text
stringlengths 12
1.05M
| repo_name
stringlengths 5
86
| path
stringlengths 4
191
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 12
1.05M
| keyword
listlengths 1
23
| text_hash
stringlengths 64
64
|
|---|---|---|---|---|---|---|---|
##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class RRtracklayer(RPackage):
"""Extensible framework for interacting with multiple genome browsers
(currently UCSC built-in) and manipulating annotation tracks in various
formats (currently GFF, BED, bedGraph, BED15, WIG, BigWig and 2bit
built-in). The user may export/import tracks to/from the supported
browsers, as well as query and modify the browser state, such as the
current viewport."""
homepage = "http://bioconductor.org/packages/rtracklayer/"
url = "https://git.bioconductor.org/packages/rtracklayer"
list_url = homepage
version('1.36.6', git='https://git.bioconductor.org/packages/rtracklayer', commit='8c0ac7230f94e0c5a981acbb178c8de70e968131')
depends_on('r-xml', type=('build', 'run'))
depends_on('r-biocgenerics', type=('build', 'run'))
depends_on('r-s4vectors', type=('build', 'run'))
depends_on('r-iranges', type=('build', 'run'))
depends_on('r-xvector', type=('build', 'run'))
depends_on('r-genomeinfodb', type=('build', 'run'))
depends_on('r-biostrings', type=('build', 'run'))
depends_on('r-zlibbioc', type=('build', 'run'))
depends_on('r-rcurl', type=('build', 'run'))
depends_on('r-rsamtools', type=('build', 'run'))
depends_on('r-genomicalignments', type=('build', 'run'))
depends_on('r@3.4.0:3.4.9', when='@1.36.6')
|
EmreAtes/spack
|
var/spack/repos/builtin/packages/r-rtracklayer/package.py
|
Python
|
lgpl-2.1
| 2,610
|
[
"Bioconductor"
] |
ffb1c635e0ff41956c824b5ec3ad0cacb902857e172341aef081f59b3341b124
|
#!/usr/bin/env python
import sys
from os import path
import os
import argparse
import urllib.request
from types import ModuleType
from collections import OrderedDict
from inspect import cleandoc
import ast
from codegen import SourceGenerator
from lxml import html
from slugify import slugify
import html2text
HTMLCONVERTER = html2text.HTML2Text()
HTMLCONVERTER.ignore_links = False
html2text = HTMLCONVERTER.handle
class SourceGenerator(SourceGenerator):
def visit_arg(self, node):
self.write(node.arg)
def get_argparser():
parser = argparse.ArgumentParser(description='Update tests with LDPR docs')
parser.add_argument('--url', type=str,
help='rfc url',
default='http://www.w3.org/TR/2014/WD-ldp-20140916/',
)
parser.add_argument('--rfc2119-class', type=str,
help='rfc2119 requirement classes',
nargs='+',
default=['MUST', 'SHOULD'])
parser.add_argument('--testbases', type=str,
help='Bases for tests',
nargs='+',
default=['LdpTestCase',])
parser.add_argument('--path', type=str,
help='modules path',
default=path.abspath(path.dirname(__file__)))
parser.add_argument('--header', type=str,
help='modules path',
default='from test.requirements.base import LdpTestCase')
return parser
class AppendDocToMethods(ast.NodeTransformer):
def __init__(self, testmethods):
super(AppendDocToMethods, self).__init__()
self.testmethods = testmethods
def visit_FunctionDef(self, node):
if node.name in self.testmethods:
rewrite_doc(node, self.testmethods[node.name])
del self.testmethods[node.name]
return node
def get_doc_nodes(doc):
return [ast.Expr(value=ast.Name('"""', ast.Load())),
ast.Expr(value=ast.Name(cleandoc(doc).replace('\t', ''), ast.Load())),
ast.Expr(value=ast.Name('"""', ast.Load()))]
def rewrite_doc(node, doc):
doc_nodes = get_doc_nodes(doc)
original_docstring = ast.get_docstring(node)
if original_docstring is not None:
node.body = node.body[1:]
for doc_node in reversed(doc_nodes):
node.body.insert(0, doc_node)
def cases_dict(section, required_types):
testcases = OrderedDict()
rfc2119_types = ' or '.join(['@title="%s"'%st for st in required_types])
for testmethod in section.xpath('.//section[h5[em[@class="rfc2119"][%s]]]'%rfc2119_types):
casename = ''.join((subname.capitalize()
for subname
in slugify(testmethod.xpath('../@id').pop()).split('-')))
if not casename in testcases:
testcases[casename] = OrderedDict()
testname = 'test_%s'%testmethod.xpath('.//span[@class="secno"]/text()')[0].strip().replace('.', '_')
testcases[casename][testname] = testmethod.text_content()
return testcases
def create_testmethod(name, doc):
method_node = ast.FunctionDef(name=name,
args=ast.arguments(
args=[ast.arg(arg='self', annotation='self')],
vararg=None,
varargannotation=None,
kwonlyargs=[],
kwarg=None,
kwargannotation=None,
defaults=[],
kw_defaults=[]),
body=get_doc_nodes(doc)+[Pass(),],
decorator_list=[],
returns=None)
# print(to_source(method_node))
return method_node
def to_source(node, indent_with=' ' * 4, add_line_information=False):
generator = SourceGenerator(indent_with, add_line_information)
generator.visit(node)
def node_repr(node):
if isinstance(node, str):
return node
if isinstance(node, ast.alias):
return node.name
return ''.join([node_repr(r) for r in generator.result])
def update_module_with_section(module_node, section, section_types, testbases):
section_html = html.tostring(section).decode()
doc = html2text(section_html)
rewrite_doc(module_node, doc)
testcases = cases_dict(section, section_types)
for node in module_node.body[3:]:
if isinstance(node, ast.ClassDef) and node.name in testcases:
#rewrite docs for existing methods in existing case
docs_transformer = AppendDocToMethods(testcases[node.name])
docs_transformer.visit(node)
#append uncreated methods to existing case
for (testmethod, doc) in docs_transformer.testmethods.items():
node.body.append(create_testmethod(testmethod, doc))
del testcases[node.name]
#create uncreated testcases
for testcase, testmethods in testcases.items():
if not testmethods:
continue
casedef = ast.ClassDef( name=testcase,
body=[],
bases=(ast.Name(base, ast.Load()) for base in testbases),
decorator_list=[])
for testmethod, doc in testmethods.items():
casedef.body.append(create_testmethod(testmethod, doc))
module_node.body.append(casedef)
return module_node
def main(args=sys.argv[1:], argparser=get_argparser()):
args = argparser.parse_args(args)
print (args)
opener = urllib.request.build_opener()
# opener.addheaders = [('User-agent', 'Mozilla/5.0')]
with opener.open(args.url) as response:
et = html.parse(response)
sections = []
for chapterid in ('ldpr', 'ldpc'):
sections.extend(et.xpath('//section[@id="%s"]/section[@class=not("informative")]'%chapterid))
for section in sections:
module_name = 'test_%s'%slugify(section.xpath('./h3')[0]\
.text_content().split(' ', 1)[1])\
.replace('-','_')
try:
existing_test_module = __import__(module_name)
with open(existing_test_module.__file__, 'r') as module_file:
module_source = module_file.read()
except ImportError as e:
if e._not_found:
module_source = args.header
else:
raise e
module_node = ast.parse(module_source)
module_node = update_module_with_section(module_node,
section,
args.rfc2119_class,
args.testbases)
with open(path.join(args.path, '%s.py'%module_name), 'wb') as module_file:
module_file.write(bytes(to_source(module_node), 'utf8'))
from ast import *
def dump(node, annotate_fields=True, include_attributes=False, indent=' '):
"""
Return a formatted dump of the tree in *node*. This is mainly useful for
debugging purposes. The returned string will show the names and the values
for fields. This makes the code impossible to evaluate, so if evaluation is
wanted *annotate_fields* must be set to False. Attributes such as line
numbers and column offsets are not dumped by default. If this is wanted,
*include_attributes* can be set to True.
"""
def _format(node, level=0):
if isinstance(node, AST):
fields = [(a, _format(b, level)) for a, b in iter_fields(node)]
if include_attributes and node._attributes:
fields.extend([(a, _format(getattr(node, a), level))
for a in node._attributes])
return ''.join([
node.__class__.__name__,
'(',
', '.join(('%s=%s' % field for field in fields)
if annotate_fields else
(b for a, b in fields)),
')'])
elif isinstance(node, list):
lines = ['[']
lines.extend((indent * (level + 2) + _format(x, level + 2) + ','
for x in node))
if len(lines) > 1:
lines.append(indent * (level + 1) + ']')
else:
lines[-1] += ']'
return '\n'.join(lines)
return repr(node)
if not isinstance(node, AST):
raise TypeError('expected AST, got %r' % node.__class__.__name__)
return _format(node)
def parseprint(code, filename="<string>", mode="exec", **kwargs):
"""Parse some code from a string and pretty-print it."""
node = parse(code, mode=mode) # An ode to the code
print(dump(node, **kwargs))
ccc = """
class A(object):
def test_method(self, x):
pass
"""
if __name__ == '__main__':
main()
# parseprint(ccc)
|
denz/ldp
|
test/requirements/w3tests_update.py
|
Python
|
bsd-3-clause
| 9,216
|
[
"VisIt"
] |
9e6510312ef11493f86b3069af91a2947239b9353f6a749e3b1897688f330ff2
|
# Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function, division
from pyscf.nao.m_libnao import libnao
from ctypes import POINTER, c_int64, byref
libnao.init_dens_libnao.argtypes = ( POINTER(c_int64), ) # info
def init_dens_libnao():
""" Initilize the auxiliary for computing the density on libnao site """
info = c_int64(-999)
libnao.init_dens_libnao( byref(info))
return info.value
|
gkc1000/pyscf
|
pyscf/nao/m_init_dens_libnao.py
|
Python
|
apache-2.0
| 991
|
[
"PySCF"
] |
378b443a79dc089dba55f996d7512eadb6c834822de17cf97233a00248861b30
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.conf import settings
from django.conf.urls import include, url
from django.conf.urls.static import static
from django.contrib import admin
from django.views.generic import TemplateView
urlpatterns = [
url(r'^$', TemplateView.as_view(template_name='pages/home.html'), name="home"),
url(r'^about/$', TemplateView.as_view(template_name='pages/about.html'), name="about"),
url(r'^contact/$', TemplateView.as_view(template_name='pages/contact.html'), name="contact"),
url(r'^recommendations/$', TemplateView.as_view(template_name='recommend_all/front.html'), name="recommendations"),
url(r'^recommendations/all$', 'goodbooks.recommend_all.views.index', name="all"),
# url(r'^recommend_all/all$', TemplateView.as_view(template_name='recommend_all/all.html'), name="recommend_all"),
url(r'^recommendations/books$', 'goodbooks.recommend_all.views.books', name="books"),
url(r'^recommendations/movies$', 'goodbooks.recommend_all.views.movies', name="movies"),
url(r'^recommendations/music$', 'goodbooks.recommend_all.views.music', name="music"),
url(r'^recommendations/suggest_book$', 'goodbooks.recommend_all.views.suggest_book', name="suggest_book"),
url(r'^recommendations/suggest_movie$', 'goodbooks.recommend_all.views.suggest_movie', name="suggest_movie"),
url(r'^recommendations/suggest_music$', 'goodbooks.recommend_all.views.suggest_music', name="suggest_music"),
# Django Admin
url(r'^admin/', include(admin.site.urls)),
# User management
url(r'^users/', include("goodbooks.users.urls", namespace="users")),
url(r'^accounts/', include('allauth.urls')),
# Your stuff: custom urls includes go here
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
if settings.DEBUG:
# This allows the error pages to be debugged during development, just visit
# these url in browser to see how these error pages look like.
urlpatterns += [
url(r'^400/$', 'django.views.defaults.bad_request'),
url(r'^403/$', 'django.views.defaults.permission_denied'),
url(r'^404/$', 'django.views.defaults.page_not_found'),
url(r'^500/$', 'django.views.defaults.server_error'),
]
|
joanneko/goodbooks
|
config/urls.py
|
Python
|
bsd-3-clause
| 2,273
|
[
"VisIt"
] |
f65cdff8a5bebf82380ce62e2a330ddf079e086647b21b3c463c3d2175b5818d
|
""" Module holding function(s) creating the pilot wrapper.
This is a DIRAC-free module, so it could possibly be used also outside of DIRAC installations.
The main client of this module is the SiteDirector, that invokes the functions here more or less like this::
pilotFilesCompressedEncodedDict = getPilotFilesCompressedEncodedDict(pilotFiles)
localPilot = pilotWrapperScript(pilotFilesCompressedEncodedDict,
pilotOptions,
pilotExecDir)
_writePilotWrapperFile(localPilot=localPilot)
"""
import os
import tempfile
import base64
import bz2
pilotWrapperContent = """#!/bin/bash
/usr/bin/env python << EOF
# imports
from __future__ import print_function
import os
import stat
import tempfile
import sys
import shutil
import base64
import bz2
import logging
import time
import tarfile
# setting up the logging
formatter = logging.Formatter(fmt='%%(asctime)s UTC %%(levelname)-8s %%(message)s', datefmt='%%Y-%%m-%%d %%H:%%M:%%S')
logging.Formatter.converter = time.gmtime
try:
screen_handler = logging.StreamHandler(stream=sys.stdout)
except TypeError: # python2.6
screen_handler = logging.StreamHandler(strm=sys.stdout)
screen_handler.setFormatter(formatter)
logger = logging.getLogger('pilotLogger')
logger.setLevel(logging.DEBUG)
logger.addHandler(screen_handler)
# just logging the environment as first thing
logger.debug('===========================================================')
logger.debug('Environment of execution host\\n')
for key, val in os.environ.items():
logger.debug(key + '=' + val)
logger.debug('===========================================================\\n')
# putting ourselves in the right directory
pilotExecDir = '%(pilotExecDir)s'
if not pilotExecDir:
pilotExecDir = os.getcwd()
pilotWorkingDirectory = tempfile.mkdtemp(suffix='pilot', prefix='DIRAC_', dir=pilotExecDir)
pilotWorkingDirectory = os.path.realpath(pilotWorkingDirectory)
os.chdir(pilotWorkingDirectory)
logger.info("Launching dirac-pilot script from %%s" %%os.getcwd())
"""
def pilotWrapperScript(pilotFilesCompressedEncodedDict=None,
pilotOptions='',
pilotExecDir='',
envVariables=None,
location=''):
""" Returns the content of the pilot wrapper script.
The pilot wrapper script is a bash script that invokes the system python. Linux only.
:param pilotFilesCompressedEncodedDict: this is a possible dict of name:compressed+encoded content files.
the proxy can be part of this, and of course the pilot files
:type pilotFilesCompressedEncodedDict: dict
:param pilotOptions: options with which to start the pilot
:type pilotOptions: basestring
:param pilotExecDir: pilot execution directory
:type pilotExecDir: basestring
:param envVariables: dictionary of environment variables
:type envVariables: dict
:param location: location where to get the pilot files
:type location: basestring
:returns: content of the pilot wrapper
:rtype: basestring
"""
if pilotFilesCompressedEncodedDict is None:
pilotFilesCompressedEncodedDict = {}
if envVariables is None:
envVariables = {}
compressedString = ""
# are there some pilot files to unpack? Then we create the unpacking string
for pfName, encodedPf in pilotFilesCompressedEncodedDict.items():
compressedString += """
try:
with open('%(pfName)s', 'w') as fd:
fd.write(bz2.decompress(base64.b64decode(\"\"\"%(encodedPf)s\"\"\")))
os.chmod('%(pfName)s', stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR)
except BaseException as x:
print(x, file=sys.stderr)
logger.error(x)
shutil.rmtree(pilotWorkingDirectory)
sys.exit(-1)
""" % {'encodedPf': encodedPf,
'pfName': pfName}
envVariablesString = ""
for name, value in envVariables.items(): # are there some environment variables to add?
envVariablesString += """
os.environ[\"%(name)s\"]=\"%(value)s\"
""" % {'name': name,
'value': value}
# add X509_USER_PROXY to etablish pilot env in Cluster WNs
if 'proxy' in pilotFilesCompressedEncodedDict:
envVariablesString += """
os.environ['X509_USER_PROXY'] = os.path.join(pilotWorkingDirectory, 'proxy')
"""
# now building the actual pilot wrapper
localPilot = pilotWrapperContent % {'pilotExecDir': pilotExecDir}
if compressedString:
localPilot += """
# unpacking lines
logger.info("But first unpacking pilot files")
%s
""" % compressedString
if envVariablesString:
localPilot += """
# Modifying the environment
%s
""" % envVariablesString
if location:
localPilot += """
# Getting the pilot files
logger.info("Getting the pilot files from %(location)s")
location = '%(location)s'.replace(' ', '').split(',')
import random
random.shuffle(location)
# we try from the available locations
for loc in location:
# Getting the json file and the tar file
try:
# urllib is different between python 2 and 3
if sys.version_info < (3,):
from urllib2 import urlopen as url_library_urlopen
from urllib2 import URLError as url_library_URLError
else:
from urllib.request import urlopen as url_library_urlopen
from urllib.error import URLError as url_library_URLError
# needs to distinguish wether urlopen method contains the 'context' param
# in theory, it should be available from python 2.7.9
# in practice, some prior versions may be composed of recent urllib version containing the param
if 'context' in url_library_urlopen.__code__.co_varnames:
import ssl
context = ssl._create_unverified_context()
rJson = url_library_urlopen('https://' + loc + '/pilot/pilot.json',
timeout=10,
context=context)
rTar = url_library_urlopen('https://' + loc + '/pilot/pilot.tar',
timeout=10,
context=context)
break
else:
rJson = url_library_urlopen('https://' + loc + '/pilot/pilot.json',
timeout=10)
rTar = url_library_urlopen('https://' + loc + '/pilot/pilot.tar',
timeout=10)
break
except url_library_URLError:
print('%%s unreacheable' %% loc, file=sys.stderr)
logger.error('%%s unreacheable' %% loc)
else:
print("None of the locations of the pilot files is reachable", file=sys.stderr)
logger.error("None of the locations of the pilot files is reachable")
sys.exit(-1)
pj = open('pilot.json', 'wb')
pj.write(rJson.read())
pj.close()
pt = open('pilot.tar', 'wb')
pt.write(rTar.read())
pt.close()
pt = tarfile.open('pilot.tar', 'r')
pt.extractall()
pt.close()
""" % {'location': location}
localPilot += """
# now finally launching the pilot script (which should be called dirac-pilot.py)
cmd = "python dirac-pilot.py %s"
logger.info('Executing: %%s' %% cmd)
sys.stdout.flush()
os.system(cmd)
# and cleaning up
shutil.rmtree(pilotWorkingDirectory)
EOF
""" % pilotOptions
return localPilot
def getPilotFilesCompressedEncodedDict(pilotFiles, proxy=None):
""" this function will return the dictionary of pilot files names : encodedCompressedContent
that we are going to send
:param pilotFiles: list of pilot files (list of location on the disk)
:type pilotFiles: list
:param proxy: the proxy to send
:type proxy: X509Chain
"""
pilotFilesCompressedEncodedDict = {}
for pf in pilotFiles:
with open(pf, "r") as fd:
pfContent = fd.read()
pfContentEncoded = base64.b64encode(bz2.compress(pfContent, 9))
pilotFilesCompressedEncodedDict[os.path.basename(pf)] = pfContentEncoded
if proxy is not None:
compressedAndEncodedProxy = base64.b64encode(bz2.compress(proxy.dumpAllToString()['Value']))
pilotFilesCompressedEncodedDict['proxy'] = compressedAndEncodedProxy
return pilotFilesCompressedEncodedDict
def _writePilotWrapperFile(workingDirectory=None, localPilot=''):
""" write the localPilot string to a file, rurn the file name
:param workingDirectory: the directory where to store the pilot wrapper file
:type workingDirectory: basestring
:param localPilot: content of the pilot wrapper
:type localPilot: basestring
:returns: file name of the pilot wrapper
:rtype: basestring
"""
fd, name = tempfile.mkstemp(suffix='_pilotwrapper.py', prefix='DIRAC_', dir=workingDirectory)
with os.fdopen(fd, 'w') as pilotWrapper:
pilotWrapper.write(localPilot)
return name
|
fstagni/DIRAC
|
WorkloadManagementSystem/Utilities/PilotWrapper.py
|
Python
|
gpl-3.0
| 8,620
|
[
"DIRAC"
] |
dcf60c3f00b6bc0f6e92d6dc08e430fbe17b575ef3a84cce56bd88ee69000306
|
#
# Copyright 2016 The BigDL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import shutil
import tempfile
from unittest import TestCase
import numpy as np
import tensorflow as tf
from bigdl.orca.learn.trigger import SeveralIteration
from pyspark.sql.context import SQLContext
import bigdl.orca.data.pandas
from bigdl.dllib.nncontext import init_nncontext
from bigdl.orca import OrcaContext
from bigdl.orca.data.tf.data import Dataset
from bigdl.orca.learn.tf.estimator import Estimator
from bigdl.dllib.utils.tf import save_tf_checkpoint, load_tf_checkpoint, get_checkpoint_state
resource_path = os.path.join(os.path.split(__file__)[0], "../../resources")
class SimpleModel(object):
def __init__(self):
self.user = tf.placeholder(dtype=tf.int32, shape=(None,))
self.item = tf.placeholder(dtype=tf.int32, shape=(None,))
self.label = tf.placeholder(dtype=tf.int32, shape=(None,))
feat = tf.stack([self.user, self.item], axis=1)
self.logits = tf.layers.dense(tf.to_float(feat), 2)
self.loss = tf.reduce_mean(tf.losses.sparse_softmax_cross_entropy(logits=self.logits,
labels=self.label))
class TestEstimatorForGraph(TestCase):
def setup_method(self, method):
OrcaContext.train_data_store = "DRAM"
def test_estimator_graph(self):
import bigdl.orca.data.pandas
tf.reset_default_graph()
model = SimpleModel()
file_path = os.path.join(resource_path, "orca/learn/ncf.csv")
data_shard = bigdl.orca.data.pandas.read_csv(file_path)
def transform(df):
result = {
"x": (df['user'].to_numpy(), df['item'].to_numpy()),
"y": df['label'].to_numpy()
}
return result
data_shard = data_shard.transform_shard(transform)
est = Estimator.from_graph(
inputs=[model.user, model.item],
labels=[model.label],
outputs=[model.logits],
loss=model.loss,
optimizer=tf.train.AdamOptimizer(),
metrics={"loss": model.loss})
est.fit(data=data_shard,
batch_size=8,
epochs=10,
validation_data=data_shard)
data_shard = bigdl.orca.data.pandas.read_csv(file_path)
def transform(df):
result = {
"x": (df['user'].to_numpy(), df['item'].to_numpy()),
}
return result
data_shard = data_shard.transform_shard(transform)
predictions = est.predict(data_shard).collect()
assert 'prediction' in predictions[0]
print(predictions)
def test_estimator_graph_fit(self):
import bigdl.orca.data.pandas
tf.reset_default_graph()
model = SimpleModel()
file_path = os.path.join(resource_path, "orca/learn/ncf.csv")
data_shard = bigdl.orca.data.pandas.read_csv(file_path)
def transform(df):
result = {
"x": (df['user'].to_numpy(), df['item'].to_numpy()),
"y": df['label'].to_numpy()
}
return result
data_shard = data_shard.transform_shard(transform)
est = Estimator.from_graph(
inputs=[model.user, model.item],
labels=[model.label],
loss=model.loss,
optimizer=tf.train.AdamOptimizer(),
metrics={"loss": model.loss})
est.fit(data=data_shard,
batch_size=8,
epochs=10,
validation_data=data_shard)
def test_estimator_graph_evaluate(self):
import bigdl.orca.data.pandas
tf.reset_default_graph()
model = SimpleModel()
file_path = os.path.join(resource_path, "orca/learn/ncf.csv")
data_shard = bigdl.orca.data.pandas.read_csv(file_path)
def transform(df):
result = {
"x": (df['user'].to_numpy(), df['item'].to_numpy()),
"y": df['label'].to_numpy()
}
return result
data_shard = data_shard.transform_shard(transform)
est = Estimator.from_graph(
inputs=[model.user, model.item],
labels=[model.label],
loss=model.loss,
optimizer=tf.train.AdamOptimizer(),
metrics={"loss": model.loss})
result = est.evaluate(data_shard)
assert "loss" in result
print(result)
def test_estimator_graph_predict(self):
import bigdl.orca.data.pandas
tf.reset_default_graph()
model = SimpleModel()
file_path = os.path.join(resource_path, "orca/learn/ncf.csv")
data_shard = bigdl.orca.data.pandas.read_csv(file_path)
est = Estimator.from_graph(
inputs=[model.user, model.item],
outputs=[model.logits])
def transform(df):
result = {
"x": (df['user'].to_numpy(), df['item'].to_numpy()),
}
return result
data_shard = data_shard.transform_shard(transform)
predictions = est.predict(data_shard).collect()
print(predictions)
def test_estimator_graph_pandas_dataframe(self):
import bigdl.orca.data.pandas
tf.reset_default_graph()
model = SimpleModel()
file_path = os.path.join(resource_path, "orca/learn/ncf.csv")
data_shard = bigdl.orca.data.pandas.read_csv(file_path)
est = Estimator.from_graph(
inputs=[model.user, model.item],
labels=[model.label],
loss=model.loss,
optimizer=tf.train.AdamOptimizer(),
metrics={"loss": model.loss})
est.fit(data=data_shard,
batch_size=8,
epochs=10,
feature_cols=['user', 'item'],
label_cols=['label'],
validation_data=data_shard)
result = est.evaluate(data_shard, feature_cols=['user', 'item'], label_cols=['label'])
assert "loss" in result
print(result)
est = Estimator.from_graph(
inputs=[model.user, model.item],
outputs=[model.logits])
predictions = est.predict(data_shard, feature_cols=['user', 'item']).collect()
print(predictions)
def test_estimator_graph_fit_clip(self):
import bigdl.orca.data.pandas
tf.reset_default_graph()
model = SimpleModel()
file_path = os.path.join(resource_path, "orca/learn/ncf.csv")
data_shard = bigdl.orca.data.pandas.read_csv(file_path)
def transform(df):
result = {
"x": (df['user'].to_numpy(), df['item'].to_numpy()),
"y": df['label'].to_numpy()
}
return result
data_shard = data_shard.transform_shard(transform)
est = Estimator.from_graph(
inputs=[model.user, model.item],
labels=[model.label],
loss=model.loss,
optimizer=tf.train.AdamOptimizer(),
clip_norm=1.2,
metrics={"loss": model.loss})
est.fit(data=data_shard,
batch_size=8,
epochs=10,
validation_data=data_shard)
est = Estimator.from_graph(
inputs=[model.user, model.item],
labels=[model.label],
loss=model.loss,
optimizer=tf.train.AdamOptimizer(),
clip_value=0.2,
metrics={"loss": model.loss})
est.fit(data=data_shard,
batch_size=8,
epochs=10,
validation_data=data_shard)
def test_estimator_graph_checkpoint(self):
import bigdl.orca.data.pandas
tf.reset_default_graph()
model = SimpleModel()
file_path = os.path.join(resource_path, "orca/learn/ncf.csv")
data_shard = bigdl.orca.data.pandas.read_csv(file_path)
def transform(df):
result = {
"x": (df['user'].to_numpy(), df['item'].to_numpy()),
"y": df['label'].to_numpy()
}
return result
data_shard = data_shard.transform_shard(transform)
temp = tempfile.mkdtemp()
model_dir = os.path.join(temp, "test_model")
est = Estimator.from_graph(
inputs=[model.user, model.item],
labels=[model.label],
loss=model.loss,
optimizer=tf.train.AdamOptimizer(),
metrics={"loss": model.loss},
model_dir=model_dir
)
est.fit(data=data_shard,
batch_size=8,
epochs=6,
validation_data=data_shard,
checkpoint_trigger=SeveralIteration(4))
est.sess.close()
tf.reset_default_graph()
model = SimpleModel()
est = Estimator.from_graph(
inputs=[model.user, model.item],
labels=[model.label],
loss=model.loss,
optimizer=tf.train.AdamOptimizer(),
metrics={"loss": model.loss},
model_dir=model_dir
)
est.load_orca_checkpoint(model_dir)
est.fit(data=data_shard,
batch_size=8,
epochs=10,
validation_data=data_shard)
result = est.evaluate(data_shard)
assert "loss" in result
print(result)
shutil.rmtree(temp)
def test_estimator_graph_fit_dataset(self):
import bigdl.orca.data.pandas
tf.reset_default_graph()
model = SimpleModel()
file_path = os.path.join(resource_path, "orca/learn/ncf.csv")
data_shard = bigdl.orca.data.pandas.read_csv(file_path)
def transform(df):
result = {
"x": (df['user'].to_numpy(), df['item'].to_numpy()),
"y": df['label'].to_numpy()
}
return result
data_shard = data_shard.transform_shard(transform)
dataset = Dataset.from_tensor_slices(data_shard)
est = Estimator.from_graph(
inputs=[model.user, model.item],
labels=[model.label],
loss=model.loss,
optimizer=tf.train.AdamOptimizer(),
metrics={"loss": model.loss})
est.fit(data=dataset,
batch_size=8,
epochs=10,
validation_data=dataset)
result = est.evaluate(dataset, batch_size=4)
assert 'loss' in result
def test_estimator_graph_predict_dataset(self):
tf.reset_default_graph()
model = SimpleModel()
file_path = os.path.join(resource_path, "orca/learn/ncf.csv")
data_shard = bigdl.orca.data.pandas.read_csv(file_path)
est = Estimator.from_graph(
inputs=[model.user, model.item],
outputs=[model.logits])
def transform(df):
result = {
"x": (df['user'].to_numpy(), df['item'].to_numpy()),
}
return result
data_shard = data_shard.transform_shard(transform)
dataset = Dataset.from_tensor_slices(data_shard)
predictions = est.predict(dataset).collect()
assert len(predictions) == 48
def test_estimator_graph_dataframe(self):
tf.reset_default_graph()
model = SimpleModel()
file_path = os.path.join(resource_path, "orca/learn/ncf.csv")
sc = init_nncontext()
sqlcontext = SQLContext(sc)
df = sqlcontext.read.csv(file_path, header=True, inferSchema=True)
est = Estimator.from_graph(
inputs=[model.user, model.item],
labels=[model.label],
outputs=[model.logits],
loss=model.loss,
optimizer=tf.train.AdamOptimizer(),
metrics={"loss": model.loss})
est.fit(data=df,
batch_size=8,
epochs=10,
feature_cols=['user', 'item'],
label_cols=['label'],
validation_data=df)
result = est.evaluate(df, batch_size=4, feature_cols=['user', 'item'],
label_cols=['label'])
print(result)
prediction_df = est.predict(df, batch_size=4, feature_cols=['user', 'item'])
assert 'prediction' in prediction_df.columns
predictions = prediction_df.collect()
assert len(predictions) == 48
def test_estimator_graph_dataframe_exception(self):
tf.reset_default_graph()
model = SimpleModel()
file_path = os.path.join(resource_path, "orca/learn/ncf.csv")
sc = init_nncontext()
sqlcontext = SQLContext(sc)
df = sqlcontext.read.csv(file_path, header=True, inferSchema=True)
est = Estimator.from_graph(
inputs=[model.user, model.item],
labels=[model.label],
outputs=[model.logits],
loss=model.loss,
optimizer=tf.train.AdamOptimizer(),
metrics={"loss": model.loss})
with self.assertRaises(Exception) as context:
est.fit(data=df,
batch_size=8,
epochs=10,
feature_cols=['user', 'item'],
validation_data=df)
self.assertTrue('label columns is None; it should not be None in training'
in str(context.exception))
est.fit(data=df,
batch_size=8,
epochs=10,
feature_cols=['user', 'item'],
label_cols=['label']
)
with self.assertRaises(Exception) as context:
predictions = est.predict(df, batch_size=4).collect()
self.assertTrue('feature columns is None; it should not be None in prediction'
in str(context.exception))
with self.assertRaises(Exception) as context:
est.fit(data=df,
batch_size=8,
epochs=10,
feature_cols=['user', 'item'],
label_cols=['label'],
validation_data=[1, 2, 3])
self.assertTrue('train data and validation data should be both Spark DataFrame'
in str(context.exception))
def test_checkpoint_remote(self):
tf.reset_default_graph()
model = SimpleModel()
sess = tf.Session()
sess.run(tf.global_variables_initializer())
saver = tf.train.Saver(tf.global_variables())
temp = tempfile.mkdtemp()
save_tf_checkpoint(sess, os.path.join(temp, "simple.ckpt"), saver)
ckpt = get_checkpoint_state(temp)
assert ckpt.model_checkpoint_path == os.path.join(temp, "simple.ckpt")
assert ckpt.all_model_checkpoint_paths[0] == os.path.join(temp, "simple.ckpt")
load_tf_checkpoint(sess, os.path.join(temp, "simple.ckpt"), saver)
shutil.rmtree(temp)
def _test_estimator_graph_tf_dataset(self, dataset_creator):
tf.reset_default_graph()
model = SimpleModel()
dataset = dataset_creator()
est = Estimator.from_graph(
inputs=[model.user, model.item],
labels=[model.label],
outputs=[model.logits],
loss=model.loss,
optimizer=tf.train.AdamOptimizer(),
metrics={"loss": model.loss})
est.fit(data=dataset,
batch_size=8,
epochs=10,
validation_data=dataset)
result = est.evaluate(dataset, batch_size=4)
assert 'loss' in result
def test_estimator_graph_tf_dataset(self):
def dataset_creator():
dataset = tf.data.Dataset.from_tensor_slices((np.random.randint(0, 200, size=(100,)),
np.random.randint(0, 50, size=(100,)),
np.ones(shape=(100,), dtype=np.int32)))
return dataset
self._test_estimator_graph_tf_dataset(dataset_creator)
def test_estimator_graph_tf_dataset_v2(self):
def dataset_creator():
dataset = tf.data.Dataset.from_tensor_slices((np.random.randint(0, 200, size=(100,)),
np.random.randint(0, 50, size=(100,)),
np.ones(shape=(100,), dtype=np.int32)))
return dataset._dataset
self._test_estimator_graph_tf_dataset(dataset_creator)
def test_estimator_graph_tensorboard(self):
tf.reset_default_graph()
model = SimpleModel()
file_path = os.path.join(resource_path, "orca/learn/ncf.csv")
data_shard = bigdl.orca.data.pandas.read_csv(file_path)
def transform(df):
result = {
"x": (df['user'].to_numpy(), df['item'].to_numpy()),
"y": df['label'].to_numpy()
}
return result
data_shard = data_shard.transform_shard(transform)
temp = tempfile.mkdtemp()
# only set model dir, summary generated under model dir
model_dir = os.path.join(temp, "test_model")
est = Estimator.from_graph(
inputs=[model.user, model.item],
labels=[model.label],
loss=model.loss,
optimizer=tf.train.AdamOptimizer(),
metrics={"loss": model.loss},
model_dir=model_dir
)
est.fit(data=data_shard,
batch_size=8,
epochs=5,
validation_data=data_shard)
train_tp = est.get_train_summary("Throughput")
val_scores = est.get_validation_summary("loss")
assert len(train_tp) > 0
assert len(val_scores) > 0
# set tensorboard dir to different directory
est.set_tensorboard("model", "test")
est.fit(data=data_shard,
batch_size=8,
epochs=5,
validation_data=data_shard)
train_tp = est.get_train_summary("Throughput")
val_scores = est.get_validation_summary("loss")
assert len(train_tp) > 0
assert len(val_scores) > 0
# no model dir, no tensorboard dir, no summary saved
est2 = Estimator.from_graph(
inputs=[model.user, model.item],
labels=[model.label],
loss=model.loss,
optimizer=tf.train.AdamOptimizer(),
metrics={"loss": model.loss}
)
est2.fit(data=data_shard,
batch_size=8,
epochs=5,
validation_data=data_shard)
train_tp = est2.get_train_summary("Throughput")
val_scores = est2.get_validation_summary("loss")
assert train_tp is None
assert val_scores is None
shutil.rmtree(temp)
def test_estimator_graph_save_load(self):
import bigdl.orca.data.pandas
tf.reset_default_graph()
# save
model = SimpleModel()
file_path = os.path.join(resource_path, "orca/learn/ncf.csv")
data_shard = bigdl.orca.data.pandas.read_csv(file_path)
def transform(df):
result = {
"x": (df['user'].to_numpy(), df['item'].to_numpy()),
"y": df['label'].to_numpy()
}
return result
data_shard = data_shard.transform_shard(transform)
est = Estimator.from_graph(
inputs=[model.user, model.item],
labels=[model.label],
outputs=[model.logits],
loss=model.loss,
optimizer=tf.train.AdamOptimizer(),
metrics={"loss": model.loss},
sess=None
)
est.fit(data=data_shard,
batch_size=8,
epochs=10,
validation_data=data_shard)
temp = tempfile.mkdtemp()
model_checkpoint = os.path.join(temp, 'test.ckpt')
est.save_tf_checkpoint(model_checkpoint)
est.sess.close()
tf.reset_default_graph()
# load
with tf.Session() as sess:
model = SimpleModel()
saver = tf.train.Saver(tf.global_variables())
saver.restore(sess, model_checkpoint)
est = Estimator.from_graph(
inputs=[model.user, model.item],
labels=[model.label],
outputs=[model.logits],
loss=model.loss,
metrics={"loss": model.loss},
sess=sess
)
data_shard = bigdl.orca.data.pandas.read_csv(file_path)
def transform(df):
result = {
"x": (df['user'].to_numpy(), df['item'].to_numpy()),
}
return result
data_shard = data_shard.transform_shard(transform)
predictions = est.predict(data_shard).collect()
assert 'prediction' in predictions[0]
print(predictions)
shutil.rmtree(temp)
def test_estimator_save_load(self):
import bigdl.orca.data.pandas
tf.reset_default_graph()
# save
model = SimpleModel()
file_path = os.path.join(resource_path, "orca/learn/ncf.csv")
data_shard = bigdl.orca.data.pandas.read_csv(file_path)
def transform(df):
result = {
"x": (df['user'].to_numpy(), df['item'].to_numpy()),
"y": df['label'].to_numpy()
}
return result
data_shard = data_shard.transform_shard(transform)
est = Estimator.from_graph(
inputs=[model.user, model.item],
labels=[model.label],
outputs=[model.logits],
loss=model.loss,
optimizer=tf.train.AdamOptimizer(),
metrics={"loss": model.loss},
sess=None
)
est.fit(data=data_shard,
batch_size=8,
epochs=5,
validation_data=data_shard)
temp = tempfile.mkdtemp()
model_checkpoint = os.path.join(temp, 'tmp.ckpt')
est.save(model_checkpoint)
est.shutdown()
tf.reset_default_graph()
# load
with tf.Session() as sess:
model = SimpleModel()
est = Estimator.from_graph(
inputs=[model.user, model.item],
labels=[model.label],
outputs=[model.logits],
loss=model.loss,
metrics={"loss": model.loss},
sess=sess
)
est.load(model_checkpoint)
data_shard = bigdl.orca.data.pandas.read_csv(file_path)
def transform(df):
result = {
"x": (df['user'].to_numpy(), df['item'].to_numpy()),
}
return result
data_shard = data_shard.transform_shard(transform)
predictions = est.predict(data_shard).collect()
assert 'prediction' in predictions[0]
print(predictions)
shutil.rmtree(temp)
def test_estimator_graph_with_bigdl_optim_method(self):
import bigdl.orca.data.pandas
tf.reset_default_graph()
model = SimpleModel()
file_path = os.path.join(resource_path, "orca/learn/ncf.csv")
data_shard = bigdl.orca.data.pandas.read_csv(file_path)
def transform(df):
result = {
"x": (df['user'].to_numpy(), df['item'].to_numpy()),
"y": df['label'].to_numpy()
}
return result
data_shard = data_shard.transform_shard(transform)
from bigdl.orca.learn.optimizers import SGD
from bigdl.orca.learn.optimizers.schedule import Plateau
sgd = SGD(learningrate=0.1,
learningrate_schedule=Plateau("score",
factor=0.1,
patience=10,
mode="min", ))
est = Estimator.from_graph(
inputs=[model.user, model.item],
labels=[model.label],
outputs=[model.logits],
loss=model.loss,
optimizer=sgd,
metrics={"loss": model.loss})
est.fit(data=data_shard,
batch_size=8,
epochs=10,
validation_data=data_shard)
def test_estimator_graph_fit_mem_type(self):
import bigdl.orca.data.pandas
tf.reset_default_graph()
model = SimpleModel()
file_path = os.path.join(resource_path, "orca/learn/ncf.csv")
data_shard = bigdl.orca.data.pandas.read_csv(file_path)
def transform(df):
result = {
"x": (df['user'].to_numpy(), df['item'].to_numpy()),
"y": df['label'].to_numpy()
}
return result
data_shard = data_shard.transform_shard(transform)
est = Estimator.from_graph(
inputs=[model.user, model.item],
labels=[model.label],
loss=model.loss,
optimizer=tf.train.AdamOptimizer(),
metrics={"loss": model.loss})
OrcaContext.train_data_store = "DISK_2"
est.fit(data=data_shard,
batch_size=4,
epochs=10,
validation_data=data_shard
)
OrcaContext.train_data_store = "DRAM"
if __name__ == "__main__":
import pytest
pytest.main([__file__])
|
intel-analytics/BigDL
|
python/orca/test/bigdl/orca/learn/spark/test_estimator_for_spark.py
|
Python
|
apache-2.0
| 26,014
|
[
"ORCA"
] |
16f8c5af440a797117156e94da8dd8c01977decfa4c286e12250e586bca9683e
|
import unittest
import copy
import sys
from nose.tools import assert_true
import numpy as np
from numpy.testing import (assert_array_equal, assert_array_almost_equal,
assert_raises)
from scipy import stats
from sklearn import mixture
from sklearn.datasets.samples_generator import make_spd_matrix
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raise_message
from sklearn.metrics.cluster import adjusted_rand_score
from sklearn.externals.six.moves import cStringIO as StringIO
rng = np.random.RandomState(0)
def test_sample_gaussian():
# Test sample generation from mixture.sample_gaussian where covariance
# is diagonal, spherical and full
n_features, n_samples = 2, 300
axis = 1
mu = rng.randint(10) * rng.rand(n_features)
cv = (rng.rand(n_features) + 1.0) ** 2
samples = mixture.sample_gaussian(
mu, cv, covariance_type='diag', n_samples=n_samples)
assert_true(np.allclose(samples.mean(axis), mu, atol=1.3))
assert_true(np.allclose(samples.var(axis), cv, atol=1.5))
# the same for spherical covariances
cv = (rng.rand() + 1.0) ** 2
samples = mixture.sample_gaussian(
mu, cv, covariance_type='spherical', n_samples=n_samples)
assert_true(np.allclose(samples.mean(axis), mu, atol=1.5))
assert_true(np.allclose(
samples.var(axis), np.repeat(cv, n_features), atol=1.5))
# and for full covariances
A = rng.randn(n_features, n_features)
cv = np.dot(A.T, A) + np.eye(n_features)
samples = mixture.sample_gaussian(
mu, cv, covariance_type='full', n_samples=n_samples)
assert_true(np.allclose(samples.mean(axis), mu, atol=1.3))
assert_true(np.allclose(np.cov(samples), cv, atol=2.5))
# Numerical stability check: in SciPy 0.12.0 at least, eigh may return
# tiny negative values in its second return value.
from sklearn.mixture import sample_gaussian
x = sample_gaussian([0, 0], [[4, 3], [1, .1]],
covariance_type='full', random_state=42)
assert_true(np.isfinite(x).all())
def _naive_lmvnpdf_diag(X, mu, cv):
# slow and naive implementation of lmvnpdf
ref = np.empty((len(X), len(mu)))
stds = np.sqrt(cv)
for i, (m, std) in enumerate(zip(mu, stds)):
ref[:, i] = np.log(stats.norm.pdf(X, m, std)).sum(axis=1)
return ref
def test_lmvnpdf_diag():
# test a slow and naive implementation of lmvnpdf and
# compare it to the vectorized version (mixture.lmvnpdf) to test
# for correctness
n_features, n_components, n_samples = 2, 3, 10
mu = rng.randint(10) * rng.rand(n_components, n_features)
cv = (rng.rand(n_components, n_features) + 1.0) ** 2
X = rng.randint(10) * rng.rand(n_samples, n_features)
ref = _naive_lmvnpdf_diag(X, mu, cv)
lpr = mixture.log_multivariate_normal_density(X, mu, cv, 'diag')
assert_array_almost_equal(lpr, ref)
def test_lmvnpdf_spherical():
n_features, n_components, n_samples = 2, 3, 10
mu = rng.randint(10) * rng.rand(n_components, n_features)
spherecv = rng.rand(n_components, 1) ** 2 + 1
X = rng.randint(10) * rng.rand(n_samples, n_features)
cv = np.tile(spherecv, (n_features, 1))
reference = _naive_lmvnpdf_diag(X, mu, cv)
lpr = mixture.log_multivariate_normal_density(X, mu, spherecv,
'spherical')
assert_array_almost_equal(lpr, reference)
def test_lmvnpdf_full():
n_features, n_components, n_samples = 2, 3, 10
mu = rng.randint(10) * rng.rand(n_components, n_features)
cv = (rng.rand(n_components, n_features) + 1.0) ** 2
X = rng.randint(10) * rng.rand(n_samples, n_features)
fullcv = np.array([np.diag(x) for x in cv])
reference = _naive_lmvnpdf_diag(X, mu, cv)
lpr = mixture.log_multivariate_normal_density(X, mu, fullcv, 'full')
assert_array_almost_equal(lpr, reference)
def test_lvmpdf_full_cv_non_positive_definite():
n_features, n_samples = 2, 10
rng = np.random.RandomState(0)
X = rng.randint(10) * rng.rand(n_samples, n_features)
mu = np.mean(X, 0)
cv = np.array([[[-1, 0], [0, 1]]])
expected_message = "'covars' must be symmetric, positive-definite"
assert_raise_message(ValueError, expected_message,
mixture.log_multivariate_normal_density,
X, mu, cv, 'full')
def test_GMM_attributes():
n_components, n_features = 10, 4
covariance_type = 'diag'
g = mixture.GMM(n_components, covariance_type, random_state=rng)
weights = rng.rand(n_components)
weights = weights / weights.sum()
means = rng.randint(-20, 20, (n_components, n_features))
assert_true(g.n_components == n_components)
assert_true(g.covariance_type == covariance_type)
g.weights_ = weights
assert_array_almost_equal(g.weights_, weights)
g.means_ = means
assert_array_almost_equal(g.means_, means)
covars = (0.1 + 2 * rng.rand(n_components, n_features)) ** 2
g.covars_ = covars
assert_array_almost_equal(g.covars_, covars)
assert_raises(ValueError, g._set_covars, [])
assert_raises(ValueError, g._set_covars,
np.zeros((n_components - 2, n_features)))
assert_raises(ValueError, mixture.GMM, n_components=20,
covariance_type='badcovariance_type')
class GMMTester():
do_test_eval = True
def _setUp(self):
self.n_components = 10
self.n_features = 4
self.weights = rng.rand(self.n_components)
self.weights = self.weights / self.weights.sum()
self.means = rng.randint(-20, 20, (self.n_components, self.n_features))
self.threshold = -0.5
self.I = np.eye(self.n_features)
self.covars = {
'spherical': (0.1 + 2 * rng.rand(self.n_components,
self.n_features)) ** 2,
'tied': (make_spd_matrix(self.n_features, random_state=0)
+ 5 * self.I),
'diag': (0.1 + 2 * rng.rand(self.n_components,
self.n_features)) ** 2,
'full': np.array([make_spd_matrix(self.n_features, random_state=0)
+ 5 * self.I for x in range(self.n_components)])}
def test_eval(self):
if not self.do_test_eval:
return # DPGMM does not support setting the means and
# covariances before fitting There is no way of fixing this
# due to the variational parameters being more expressive than
# covariance matrices
g = self.model(n_components=self.n_components,
covariance_type=self.covariance_type, random_state=rng)
# Make sure the means are far apart so responsibilities.argmax()
# picks the actual component used to generate the observations.
g.means_ = 20 * self.means
g.covars_ = self.covars[self.covariance_type]
g.weights_ = self.weights
gaussidx = np.repeat(np.arange(self.n_components), 5)
n_samples = len(gaussidx)
X = rng.randn(n_samples, self.n_features) + g.means_[gaussidx]
ll, responsibilities = g.score_samples(X)
self.assertEqual(len(ll), n_samples)
self.assertEqual(responsibilities.shape,
(n_samples, self.n_components))
assert_array_almost_equal(responsibilities.sum(axis=1),
np.ones(n_samples))
assert_array_equal(responsibilities.argmax(axis=1), gaussidx)
def test_sample(self, n=100):
g = self.model(n_components=self.n_components,
covariance_type=self.covariance_type, random_state=rng)
# Make sure the means are far apart so responsibilities.argmax()
# picks the actual component used to generate the observations.
g.means_ = 20 * self.means
g.covars_ = np.maximum(self.covars[self.covariance_type], 0.1)
g.weights_ = self.weights
samples = g.sample(n)
self.assertEqual(samples.shape, (n, self.n_features))
def test_train(self, params='wmc'):
g = mixture.GMM(n_components=self.n_components,
covariance_type=self.covariance_type)
g.weights_ = self.weights
g.means_ = self.means
g.covars_ = 20 * self.covars[self.covariance_type]
# Create a training set by sampling from the predefined distribution.
X = g.sample(n_samples=100)
g = self.model(n_components=self.n_components,
covariance_type=self.covariance_type,
random_state=rng, min_covar=1e-1,
n_iter=1, init_params=params)
g.fit(X)
# Do one training iteration at a time so we can keep track of
# the log likelihood to make sure that it increases after each
# iteration.
trainll = []
for _ in range(5):
g.params = params
g.init_params = ''
g.fit(X)
trainll.append(self.score(g, X))
g.n_iter = 10
g.init_params = ''
g.params = params
g.fit(X) # finish fitting
# Note that the log likelihood will sometimes decrease by a
# very small amount after it has more or less converged due to
# the addition of min_covar to the covariance (to prevent
# underflow). This is why the threshold is set to -0.5
# instead of 0.
delta_min = np.diff(trainll).min()
self.assertTrue(
delta_min > self.threshold,
"The min nll increase is %f which is lower than the admissible"
" threshold of %f, for model %s. The likelihoods are %s."
% (delta_min, self.threshold, self.covariance_type, trainll))
def test_train_degenerate(self, params='wmc'):
# Train on degenerate data with 0 in some dimensions
# Create a training set by sampling from the predefined distribution.
X = rng.randn(100, self.n_features)
X.T[1:] = 0
g = self.model(n_components=2, covariance_type=self.covariance_type,
random_state=rng, min_covar=1e-3, n_iter=5,
init_params=params)
g.fit(X)
trainll = g.score(X)
self.assertTrue(np.sum(np.abs(trainll / 100 / X.shape[1])) < 5)
def test_train_1d(self, params='wmc'):
# Train on 1-D data
# Create a training set by sampling from the predefined distribution.
X = rng.randn(100, 1)
# X.T[1:] = 0
g = self.model(n_components=2, covariance_type=self.covariance_type,
random_state=rng, min_covar=1e-7, n_iter=5,
init_params=params)
g.fit(X)
trainll = g.score(X)
if isinstance(g, mixture.DPGMM):
self.assertTrue(np.sum(np.abs(trainll / 100)) < 5)
else:
self.assertTrue(np.sum(np.abs(trainll / 100)) < 2)
def score(self, g, X):
return g.score(X).sum()
class TestGMMWithSphericalCovars(unittest.TestCase, GMMTester):
covariance_type = 'spherical'
model = mixture.GMM
setUp = GMMTester._setUp
class TestGMMWithDiagonalCovars(unittest.TestCase, GMMTester):
covariance_type = 'diag'
model = mixture.GMM
setUp = GMMTester._setUp
class TestGMMWithTiedCovars(unittest.TestCase, GMMTester):
covariance_type = 'tied'
model = mixture.GMM
setUp = GMMTester._setUp
class TestGMMWithFullCovars(unittest.TestCase, GMMTester):
covariance_type = 'full'
model = mixture.GMM
setUp = GMMTester._setUp
def test_multiple_init():
# Test that multiple inits does not much worse than a single one
X = rng.randn(30, 5)
X[:10] += 2
g = mixture.GMM(n_components=2, covariance_type='spherical',
random_state=rng, min_covar=1e-7, n_iter=5)
train1 = g.fit(X).score(X).sum()
g.n_init = 5
train2 = g.fit(X).score(X).sum()
assert_true(train2 >= train1 - 1.e-2)
def test_n_parameters():
# Test that the right number of parameters is estimated
n_samples, n_dim, n_components = 7, 5, 2
X = rng.randn(n_samples, n_dim)
n_params = {'spherical': 13, 'diag': 21, 'tied': 26, 'full': 41}
for cv_type in ['full', 'tied', 'diag', 'spherical']:
g = mixture.GMM(n_components=n_components, covariance_type=cv_type,
random_state=rng, min_covar=1e-7, n_iter=1)
g.fit(X)
assert_true(g._n_parameters() == n_params[cv_type])
def test_1d_1component():
# Test all of the covariance_types return the same BIC score for
# 1-dimensional, 1 component fits.
n_samples, n_dim, n_components = 100, 1, 1
X = rng.randn(n_samples, n_dim)
g_full = mixture.GMM(n_components=n_components, covariance_type='full',
random_state=rng, min_covar=1e-7, n_iter=1)
g_full.fit(X)
g_full_bic = g_full.bic(X)
for cv_type in ['tied', 'diag', 'spherical']:
g = mixture.GMM(n_components=n_components, covariance_type=cv_type,
random_state=rng, min_covar=1e-7, n_iter=1)
g.fit(X)
assert_array_almost_equal(g.bic(X), g_full_bic)
def assert_fit_predict_correct(model, X):
model2 = copy.deepcopy(model)
predictions_1 = model.fit(X).predict(X)
predictions_2 = model2.fit_predict(X)
assert adjusted_rand_score(predictions_1, predictions_2) == 1.0
def test_fit_predict():
"""
test that gmm.fit_predict is equivalent to gmm.fit + gmm.predict
"""
lrng = np.random.RandomState(101)
n_samples, n_dim, n_comps = 100, 2, 2
mu = np.array([[8, 8]])
component_0 = lrng.randn(n_samples, n_dim)
component_1 = lrng.randn(n_samples, n_dim) + mu
X = np.vstack((component_0, component_1))
for m_constructor in (mixture.GMM, mixture.VBGMM, mixture.DPGMM):
model = m_constructor(n_components=n_comps, covariance_type='full',
min_covar=1e-7, n_iter=5,
random_state=np.random.RandomState(0))
assert_fit_predict_correct(model, X)
model = mixture.GMM(n_components=n_comps, n_iter=0)
z = model.fit_predict(X)
assert np.all(z == 0), "Quick Initialization Failed!"
def test_aic():
# Test the aic and bic criteria
n_samples, n_dim, n_components = 50, 3, 2
X = rng.randn(n_samples, n_dim)
SGH = 0.5 * (X.var() + np.log(2 * np.pi)) # standard gaussian entropy
for cv_type in ['full', 'tied', 'diag', 'spherical']:
g = mixture.GMM(n_components=n_components, covariance_type=cv_type,
random_state=rng, min_covar=1e-7)
g.fit(X)
aic = 2 * n_samples * SGH * n_dim + 2 * g._n_parameters()
bic = (2 * n_samples * SGH * n_dim +
np.log(n_samples) * g._n_parameters())
bound = n_dim * 3. / np.sqrt(n_samples)
assert_true(np.abs(g.aic(X) - aic) / n_samples < bound)
assert_true(np.abs(g.bic(X) - bic) / n_samples < bound)
def check_positive_definite_covars(covariance_type):
r"""Test that covariance matrices do not become non positive definite
Due to the accumulation of round-off errors, the computation of the
covariance matrices during the learning phase could lead to non-positive
definite covariance matrices. Namely the use of the formula:
.. math:: C = (\sum_i w_i x_i x_i^T) - \mu \mu^T
instead of:
.. math:: C = \sum_i w_i (x_i - \mu)(x_i - \mu)^T
while mathematically equivalent, was observed a ``LinAlgError`` exception,
when computing a ``GMM`` with full covariance matrices and fixed mean.
This function ensures that some later optimization will not introduce the
problem again.
"""
rng = np.random.RandomState(1)
# we build a dataset with 2 2d component. The components are unbalanced
# (respective weights 0.9 and 0.1)
X = rng.randn(100, 2)
X[-10:] += (3, 3) # Shift the 10 last points
gmm = mixture.GMM(2, params="wc", covariance_type=covariance_type,
min_covar=1e-3)
# This is a non-regression test for issue #2640. The following call used
# to trigger:
# numpy.linalg.linalg.LinAlgError: 2-th leading minor not positive definite
gmm.fit(X)
if covariance_type == "diag" or covariance_type == "spherical":
assert_greater(gmm.covars_.min(), 0)
else:
if covariance_type == "tied":
covs = [gmm.covars_]
else:
covs = gmm.covars_
for c in covs:
assert_greater(np.linalg.det(c), 0)
def test_positive_definite_covars():
# Check positive definiteness for all covariance types
for covariance_type in ["full", "tied", "diag", "spherical"]:
yield check_positive_definite_covars, covariance_type
def test_verbose_first_level():
# Create sample data
X = rng.randn(30, 5)
X[:10] += 2
g = mixture.GMM(n_components=2, n_init=2, verbose=1)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
g.fit(X)
finally:
sys.stdout = old_stdout
def test_verbose_second_level():
# Create sample data
X = rng.randn(30, 5)
X[:10] += 2
g = mixture.GMM(n_components=2, n_init=2, verbose=2)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
g.fit(X)
finally:
sys.stdout = old_stdout
|
valexandersaulys/airbnb_kaggle_contest
|
venv/lib/python3.4/site-packages/sklearn/mixture/tests/test_gmm.py
|
Python
|
gpl-2.0
| 17,414
|
[
"Gaussian"
] |
affaa24724c36f637864b98d192e29dd25f1794b4678017a0f0381c5c8ddd168
|
#!/home/GLBRCORG/mplace/anaconda3/bin/python
"""
Program: MotifxPreAlign.py
Purpose: Use a list of phosphorylated peptide information to create a pre-aligned
peptide file for the motifx website.
Program is called from Motifx.py, but can also be run standalone from
the command line, output from command line is a text file: motifx_pre_align_results.txt
If called from another python program, the data will be in a dictionary
called pepInfo.
Input: A peptide information file and a orf fasta file (optional)
peptide info file:
YBR162W-A_T5,Induced,AVQtPR,AVQT*PR
YBL005W-B_S960,Induced,AVsPTDSTPPSTHTEDSKR,AVS*PTDSTPPSTHTEDSKR
YBL005W-B_T962,Induced,AVSPtDSTPPSTHTEDSKR,AVSPT*DSTPPSTHTEDSKR
orf fasta file:
The default is SGD R64-2-1, but user may optionally use their own.
width: default with to center the phosphorylated peptide on is 13, user
may optionally change this to any odd number.
output: A list of peptide sequences aligned to a specific orf, all the same
length.
* Only unique peptides will be used, any duplicates will be
thrown out.
* Any peptide which cannot be extended because it is too close to the
end, will be discarded. For instance if the width is 13 but the
central residue is at the end of the gene and thus the sequence cannot
be extended it will be thrown out.
Removed peptides will be listed in Peptides-removed.txt
Dependencies: Python 3
BioPython
Output: gene name with amino acid sequence and motif
author: Mike Place
Date: 12/09/2016
version: 1.0
"""
from Bio import SeqIO
from collections import defaultdict
import argparse
import sys
class MotifxPreAlign(object):
""" An exact peptide sequence matching object.
Attributes:
genes - Gene set, taken from the peptide info file, used to limit orf search space by excluding genes not in set
orfRec - Fasta file to search for exact match, i.e. the orf fasta file
pepInfo - dictionary, key is the gene name
example:
'YGL076C_T8_S11', {'group': 'Induced',
'motif': '......S.SP...',
'origPep' : 'AAEKILtPEsQLKK',
'phosPep' : 'AAEKILT*PES*QLKK',
'extended': [ Seq('AAEKILTPESQLK',SingleLetterAlphabet())]} # extended is a list of Seq objects
where 'extended' is a list of peptides with the centered residue
having 6 peptides on either side, (default peptide length of 13)
window - specified width of peptide string, defaults to 13
"""
def __init__(self, fasta, pep, window):
""" Contruct exactFastaMatch object """
self.window = window # set width of peptide string
self.wing = int((window -1)/2) # number of amino acids on either side of phospho site
self.genes = set() # gene name set, for filtering orf fasta
self.pepInfo = self.getPepInfo(pep) # get peptide information
self.orfRec = self.filterFasta(fasta) # make a reduced list of seqIO objects (fasta sequences)
self.genesToRemove = [] # list of genes to remove from final results, i.e. no match or its been excluded
self.duplicatePeps = set() # used to find duplicate peptides
def getPepInfo(self, pep):
""" parse and return a dict of peptide information & sequence """
peplst = defaultdict(dict)
with open(pep, 'r') as file:
for ln in file:
if ln.startswith('Ppep'):
continue
ln = ln.rstrip()
row = ln.split(',')
peplst[row[0]]['group'] = row[1]
peplst[row[0]]['origPep'] = row[2].upper() # capitalize peptide
peplst[row[0]]['phosPep'] = row[3]
peplst[row[0]]['extended']= []
peplst[row[0]]['motif'] = '' # rhis is blank by intention, expected to be filled by calling program.
gene = row[0].split('_')
self.genes.add(gene[0]) #Get gene name
return peplst
def filterFasta(self,fasta):
""" select only those fasta sequences that match the names in the gene list """
# open and create a list of all input fasta sequences
record = list(SeqIO.parse( fasta, 'fasta'))
filteredRec = []
# loop through all sequence records and match name to gene name
# if names match keep record
for rec in record:
if rec.name in self.genes:
# if the * appears at the end of the peptide remove it
if rec[-1] == '*':
rec = rec[0:-1]
filteredRec.append(rec)
return filteredRec
def matchSeq(self):
""" Attempt to match the sequences to the reduced gene list"""
# loop through short sequence to match to fasta records
for gene, info in self.pepInfo.items():
# locate the * index and subtracts 1 to get the phosphorylated site, will handle multiple sites w/in a peptide
idx = [ i-1 for i, ltr in enumerate(info['phosPep']) if ltr == '*']
count = 0
for i in idx:
# match the peptide to a gene
for subject in self.orfRec:
start = subject.seq.find(info['origPep']) # the index of the start position
if start >= 0: # if there is an exact match
orfLength = len(subject.seq) # get the length of gene
end = start + i + self.wing # add start index to the full peptide length
# exclude peptides which cannot be extened the appropriate length
# peptide on right end
if end >= orfLength:
with open('Peptides-removed.txt', 'a') as rm:
removed = gene + ',' + info['phosPep'] + ',' + str(idx) + ' ' + 'Right, past end\n'
rm.write(removed)
rm.close()
self.genesToRemove.append(gene)
continue
elif (start + i) - self.wing < 0: # exclude peptides which are unable to extend at left end
with open('Peptides-removed.txt', 'a') as rm:
removed = gene + ',' + info['phosPep'] + ',' + str(idx) + 'Left, past start\n'
rm.write(removed)
rm.close()
self.genesToRemove.append(gene)
continue
## extend peptide around phosphorylated amino acid site
phosPepPos = start + i - count
count += 1
extendPep = subject.seq[(phosPepPos - self.wing):(phosPepPos +1 + self.wing)]
# check for duplicate peptides from the same gene and remove duplicate, allow one to pass
# for example: YOR051C_S33 and YOR051C_S33_T34 -- produce 3 peptides in total
# we remove one of the YOR051C_S33, so it is not given extra weight by motifx
name = gene.split('_') # example split gene name YOR051C_S33
dupName = ('%s\t%s' %(name[0] ,str(extendPep) )) # 'YOR051C DEPSRESTPVRSQ'
# check if this pattern has already been seen
if dupName not in self.duplicatePeps:
info['extended'].append(extendPep)
self.duplicatePeps.add(dupName)
else:
with open('Peptides-removed.txt', 'a') as rm:
removed = ('%s\t DUPLICATE PEPTIDE FROM THE SAME GENE' %(dupName))
rm.write('%s\n' %(removed))
rm.close()
def cleanResult(self):
""" Remove gene names from self.pepInfo, for genes that have been excluded"""
for gn in self.genesToRemove:
if gn in self.pepInfo:
del self.pepInfo[gn]
def writeToFile(self):
""" Write the extended peptides to a file, as a table, example:
Gene Group Motif originalPep phosphoPep Motifx_extended_pep
YNL103W_T302 Induced TTHTPNR TTHT*PNR LHRTTHTPNRSSP
YJL020C_S893_T894 Induced RSTTHDVGEISNNVK RS*T*THDVGEISNNVK VASIRRSTTHDVG,ASIRRSTTHDVGE
"""
with open('motifx_pre_align_results.txt', 'w') as out:
# write table header
out.write("Gene\tGroup\tMotif\toriginalPep\tphosphoPep\tMotifx_extended_pep\n")
for gene,info in self.pepInfo.items():
zpep = []
# merge the new extened peptides into a single comma delimited string
for i in info['extended']:
zpep.append(str(i))
extd = ','.join(zpep)
out.write("%s\t%s\t%s\t%s\t%s\t%s\n" %(gene, info['group'], info['motif'], info['origPep'], info['phosPep'], extd))
def main():
"""
Main, parse cmd line args and call writeRefFasta.
"""
#******************************************************************************
# Command line args
#******************************************************************************
cmdparser = argparse.ArgumentParser(description="Pre-align peptides to an orf fasta file for use with Motifx website.",
usage='%(prog)s -f <fasta file> -p <peptide info file>' ,prog='MotifxPreAlign.py' )
cmdparser.add_argument('-f', '--fasta',action='store', dest='FASTA',help='Optional: orf fasta input file (default SGD R64-2-1)', metavar='')
cmdparser.add_argument('-i', '--info', action='store_true',dest='INFO', help='Print a more detailed description')
cmdparser.add_argument('-p', '--pep', action='store', dest='PEP', help='Required: Peptide info file', metavar='')
cmdparser.add_argument('-w', '--width',action='store', dest='WID', help='Optional: width of peptide, (13)', metavar='')
cmdResults = vars(cmdparser.parse_args())
# if no args print help
if len(sys.argv) == 1:
print("")
cmdparser.print_help()
sys.exit(1)
#******************************************************************************
# Check and define input variables
#******************************************************************************
# Print detailed program information to screen
if cmdResults['INFO']:
print("\n\tMotifxPreAlign.py\n")
print("\tPurpose: Pre-align peptides to an orf fasta file for use with Motifx website.")
print("\n\tRequired parameters:")
print("\n\t-p peptide info file,")
print("\n\t Example, one per line of the following:")
print("\n\t YBR162W-A_T5,Induced,AVQtPR,AVQT*PR ")
print("\t YGL076C_T8_S11,Induced,AAEKILtPEsQLKK,AAEKILT*PES*QLKK")
print("\n\tOptional parameters:\n")
print("\t-f fasta file, expected to be an orf file.")
print("\t-w width of peptides, all peptides will be the same length.")
print("\n\tOutut: A list of peptides centered on the phosphorylated amino acid.")
print("\tList is meant to be used as input for the motifx website.")
print("\n\tTo see Python Docs and get a better explaination of the program:")
print("\n\tOpen python console and enter")
print("\timport sys")
print("\tsys.path.append('/full/path/to/script')")
print("\timport MotifxPreAlign")
print("\thelp(MotifxPreAlign)")
print("\n\tSee Mike Place for help or suggestions.\n")
sys.exit(1)
# get fasta sequence file to query
if cmdResults['FASTA'] is not None:
fastaFile = cmdResults['FASTA']
# get short sequences to find in the fasta sequences.
if cmdResults['PEP'] is not None:
pepFile = cmdResults['PEP']
# check for alternate window size
if cmdResults['WID'] is not None:
window = int(cmdResults['WID'])
else:
window = 13
pepd = MotifxPreAlign(fastaFile, pepFile, window) # create object
pepd.matchSeq()
pepd.cleanResult()
pepd.writeToFile()
if __name__ == "__main__":
main()
|
mmacgilvray18/Phospho_Network
|
MotifxPreAlign.py
|
Python
|
gpl-3.0
| 13,261
|
[
"Biopython"
] |
0a537a7844c3701ed37e97f1bdbcae9936180307b77418321f3a05bb5e698531
|
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Makes sure that all files contain proper licensing information."""
import json
import optparse
import os.path
import subprocess
import sys
def PrintUsage():
print """Usage: python checklicenses.py [--root <root>] [tocheck]
--root Specifies the repository root. This defaults to "../.." relative
to the script file. This will be correct given the normal location
of the script in "<root>/tools/checklicenses".
--ignore-suppressions Ignores path-specific license whitelist. Useful when
trying to remove a suppression/whitelist entry.
tocheck Specifies the directory, relative to root, to check. This defaults
to "." so it checks everything.
Examples:
python checklicenses.py
python checklicenses.py --root ~/chromium/src third_party"""
WHITELISTED_LICENSES = [
'Anti-Grain Geometry',
'Apache (v2.0)',
'Apache (v2.0) BSD (2 clause)',
'Apache (v2.0) GPL (v2)',
'Apple MIT', # https://fedoraproject.org/wiki/Licensing/Apple_MIT_License
'APSL (v2)',
'APSL (v2) BSD (4 clause)',
'BSD',
'BSD (2 clause)',
'BSD (2 clause) ISC',
'BSD (2 clause) MIT/X11 (BSD like)',
'BSD (3 clause)',
'BSD (3 clause) GPL (v2)',
'BSD (3 clause) ISC',
'BSD (3 clause) LGPL (v2 or later)',
'BSD (3 clause) LGPL (v2.1 or later)',
'BSD (3 clause) MIT/X11 (BSD like)',
'BSD (4 clause)',
'BSD-like',
# TODO(phajdan.jr): Make licensecheck not print BSD-like twice.
'BSD-like MIT/X11 (BSD like)',
'BSL (v1.0)',
'FreeType (BSD like)',
'FreeType (BSD like) with patent clause',
'GPL (v2) LGPL (v2.1 or later)',
'GPL (v2 or later) with Bison parser exception',
'GPL (v2 or later) with libtool exception',
'GPL (v3 or later) with Bison parser exception',
'GPL with Bison parser exception',
'Independent JPEG Group License',
'ISC',
'LGPL (unversioned/unknown version)',
'LGPL (v2)',
'LGPL (v2 or later)',
'LGPL (v2.1)',
'LGPL (v2.1 or later)',
'LGPL (v3 or later)',
'MIT/X11 (BSD like)',
'MIT/X11 (BSD like) LGPL (v2.1 or later)',
'MPL (v1.0) LGPL (v2 or later)',
'MPL (v1.1)',
'MPL (v1.1) BSD (3 clause) GPL (v2) LGPL (v2.1 or later)',
'MPL (v1.1) BSD (3 clause) LGPL (v2.1 or later)',
'MPL (v1.1) BSD-like',
'MPL (v1.1) BSD-like GPL (unversioned/unknown version)',
'MPL (v1.1) BSD-like GPL (v2) LGPL (v2.1 or later)',
'MPL (v1.1) GPL (v2)',
'MPL (v1.1) GPL (v2) LGPL (v2 or later)',
'MPL (v1.1) GPL (v2) LGPL (v2.1 or later)',
'MPL (v1.1) GPL (unversioned/unknown version)',
'MPL (v1.1) LGPL (v2 or later)',
'MPL (v1.1) LGPL (v2.1 or later)',
'MPL (v2.0)',
'Ms-PL',
'Public domain',
'Public domain BSD',
'Public domain BSD (3 clause)',
'Public domain BSD-like',
'Public domain LGPL (v2.1 or later)',
'libpng',
'zlib/libpng',
'SGI Free Software License B',
'SunSoft (BSD like)',
'University of Illinois/NCSA Open Source License (BSD like)',
('University of Illinois/NCSA Open Source License (BSD like) '
'MIT/X11 (BSD like)'),
]
PATH_SPECIFIC_WHITELISTED_LICENSES = {
'base/third_party/icu': [ # http://crbug.com/98087
'UNKNOWN',
],
# http://code.google.com/p/google-breakpad/issues/detail?id=450
'breakpad/src': [
'UNKNOWN',
],
'buildtools/third_party/libc++/trunk/test': [
# http://llvm.org/bugs/show_bug.cgi?id=18291
'UNKNOWN',
],
'chrome/common/extensions/docs/examples': [ # http://crbug.com/98092
'UNKNOWN',
],
# This contains files copied from elsewhere from the tree. Since the copied
# directories might have suppressions below (like simplejson), whitelist the
# whole directory. This is also not shipped code.
'chrome/common/extensions/docs/server2/third_party': [
'UNKNOWN',
],
'courgette/third_party/bsdiff_create.cc': [ # http://crbug.com/98095
'UNKNOWN',
],
# This code is not shipped.
'mojo/public/third_party/ply': [
'UNKNOWN',
],
'native_client': [ # http://crbug.com/98099
'UNKNOWN',
],
'native_client/toolchain': [
'BSD GPL (v2 or later)',
'BSD (2 clause) GPL (v2 or later)',
'BSD (3 clause) GPL (v2 or later)',
'BSL (v1.0) GPL',
'BSL (v1.0) GPL (v3.1)',
'GPL',
'GPL (unversioned/unknown version)',
'GPL (v2)',
'GPL (v2 or later)',
'GPL (v3.1)',
'GPL (v3 or later)',
],
'third_party/WebKit': [
'UNKNOWN',
],
# http://code.google.com/p/angleproject/issues/detail?id=217
'third_party/angle': [
'UNKNOWN',
],
# http://crbug.com/222828
# http://bugs.python.org/issue17514
'third_party/chromite/third_party/argparse.py': [
'UNKNOWN',
],
# http://crbug.com/326117
# https://bitbucket.org/chrisatlee/poster/issue/21
'third_party/chromite/third_party/poster': [
'UNKNOWN',
],
# http://crbug.com/333508
'third_party/clang_format/script': [
'UNKNOWN',
],
# http://crbug.com/333508
'buildtools/clang_format/script': [
'UNKNOWN',
],
# https://mail.python.org/pipermail/cython-devel/2014-July/004062.html
'third_party/cython': [
'UNKNOWN',
],
'third_party/devscripts': [
'GPL (v2 or later)',
],
'third_party/expat/files/lib': [ # http://crbug.com/98121
'UNKNOWN',
],
'third_party/ffmpeg': [
'GPL',
'GPL (v2)',
'GPL (v2 or later)',
'GPL (v3 or later)',
'UNKNOWN', # http://crbug.com/98123
],
'third_party/fontconfig': [
# https://bugs.freedesktop.org/show_bug.cgi?id=73401
'UNKNOWN',
],
'third_party/freetype2': [ # http://crbug.com/177319
'UNKNOWN',
],
'third_party/hunspell': [ # http://crbug.com/98134
'UNKNOWN',
],
'third_party/iccjpeg': [ # http://crbug.com/98137
'UNKNOWN',
],
'third_party/icu': [ # http://crbug.com/98301
'UNKNOWN',
],
'third_party/lcov': [ # http://crbug.com/98304
'UNKNOWN',
],
'third_party/lcov/contrib/galaxy/genflat.pl': [
'GPL (v2 or later)',
],
'third_party/libc++/trunk/test': [
# http://llvm.org/bugs/show_bug.cgi?id=18291
'UNKNOWN',
],
'third_party/libevent': [ # http://crbug.com/98309
'UNKNOWN',
],
'third_party/libjingle/source/talk': [ # http://crbug.com/98310
'UNKNOWN',
],
'third_party/libjpeg_turbo': [ # http://crbug.com/98314
'UNKNOWN',
],
# Many liblouis files are mirrored but not used in the NaCl module.
# They are not excluded from the mirror because of lack of infrastructure
# support. Getting license headers added to the files where missing is
# tracked in https://github.com/liblouis/liblouis/issues/22.
'third_party/liblouis/src': [
'GPL (v3 or later)',
'UNKNOWN',
],
'third_party/libpng': [ # http://crbug.com/98318
'UNKNOWN',
],
# The following files lack license headers, but are trivial.
'third_party/libusb/src/libusb/os/poll_posix.h': [
'UNKNOWN',
],
'third_party/libvpx/source': [ # http://crbug.com/98319
'UNKNOWN',
],
'third_party/libxml': [
'UNKNOWN',
],
'third_party/libxslt': [
'UNKNOWN',
],
'third_party/lzma_sdk': [
'UNKNOWN',
],
'third_party/mesa/src': [
'GPL (v2)',
'GPL (v3 or later)',
'MIT/X11 (BSD like) GPL (v3 or later) with Bison parser exception',
'UNKNOWN', # http://crbug.com/98450
],
'third_party/modp_b64': [
'UNKNOWN',
],
'third_party/openmax_dl/dl' : [
'Khronos Group',
],
'third_party/openssl': [ # http://crbug.com/98451
'UNKNOWN',
],
'third_party/boringssl': [
# There are some files in BoringSSL which came from OpenSSL and have no
# license in them. We don't wish to add the license header ourselves
# thus we don't expect to pass license checks.
'UNKNOWN',
],
'third_party/ots/tools/ttf-checksum.py': [ # http://code.google.com/p/ots/issues/detail?id=2
'UNKNOWN',
],
'third_party/molokocacao': [ # http://crbug.com/98453
'UNKNOWN',
],
'third_party/ocmock/OCMock': [ # http://crbug.com/98454
'UNKNOWN',
],
'third_party/protobuf': [ # http://crbug.com/98455
'UNKNOWN',
],
# https://bitbucket.org/ned/coveragepy/issue/313/add-license-file-containing-2-3-or-4
# BSD 2-clause license.
'third_party/pycoverage': [
'UNKNOWN',
],
'third_party/pyelftools': [ # http://crbug.com/222831
'UNKNOWN',
],
'third_party/scons-2.0.1/engine/SCons': [ # http://crbug.com/98462
'UNKNOWN',
],
'third_party/simplejson': [
'UNKNOWN',
],
'third_party/skia': [ # http://crbug.com/98463
'UNKNOWN',
],
'third_party/snappy/src': [ # http://crbug.com/98464
'UNKNOWN',
],
'third_party/smhasher/src': [ # http://crbug.com/98465
'UNKNOWN',
],
'third_party/speech-dispatcher/libspeechd.h': [
'GPL (v2 or later)',
],
'third_party/sqlite': [
'UNKNOWN',
],
# http://crbug.com/334668
# MIT license.
'tools/swarming_client/third_party/httplib2': [
'UNKNOWN',
],
# http://crbug.com/334668
# Apache v2.0.
'tools/swarming_client/third_party/oauth2client': [
'UNKNOWN',
],
# https://github.com/kennethreitz/requests/issues/1610
'tools/swarming_client/third_party/requests': [
'UNKNOWN',
],
'third_party/swig/Lib/linkruntime.c': [ # http://crbug.com/98585
'UNKNOWN',
],
'third_party/talloc': [
'GPL (v3 or later)',
'UNKNOWN', # http://crbug.com/98588
],
'third_party/tcmalloc': [
'UNKNOWN', # http://crbug.com/98589
],
'third_party/tlslite': [
'UNKNOWN',
],
'third_party/webdriver': [ # http://crbug.com/98590
'UNKNOWN',
],
# https://github.com/html5lib/html5lib-python/issues/125
# https://github.com/KhronosGroup/WebGL/issues/435
'third_party/webgl/src': [
'UNKNOWN',
],
'third_party/webrtc': [ # http://crbug.com/98592
'UNKNOWN',
],
'third_party/xdg-utils': [ # http://crbug.com/98593
'UNKNOWN',
],
'third_party/yasm/source': [ # http://crbug.com/98594
'UNKNOWN',
],
'third_party/zlib/contrib/minizip': [
'UNKNOWN',
],
'third_party/zlib/trees.h': [
'UNKNOWN',
],
'tools/emacs': [ # http://crbug.com/98595
'UNKNOWN',
],
'tools/gyp/test': [
'UNKNOWN',
],
'tools/python/google/__init__.py': [
'UNKNOWN',
],
'tools/stats_viewer/Properties/AssemblyInfo.cs': [
'UNKNOWN',
],
'tools/symsrc/pefile.py': [
'UNKNOWN',
],
# Not shipped, downloaded on trybots sometimes.
'tools/telemetry/third_party/gsutil': [
'BSD MIT/X11 (BSD like)',
'UNKNOWN',
],
'tools/telemetry/third_party/pyserial': [
# https://sourceforge.net/p/pyserial/feature-requests/35/
'UNKNOWN',
],
'v8/test/cctest': [ # http://crbug.com/98597
'UNKNOWN',
],
'v8/src/third_party/kernel/tools/perf/util/jitdump.h': [ # http://crbug.com/391716
'UNKNOWN',
],
}
def check_licenses(options, args):
# Figure out which directory we have to check.
if len(args) == 0:
# No directory to check specified, use the repository root.
start_dir = options.base_directory
elif len(args) == 1:
# Directory specified. Start here. It's supposed to be relative to the
# base directory.
start_dir = os.path.abspath(os.path.join(options.base_directory, args[0]))
else:
# More than one argument, we don't handle this.
PrintUsage()
return 1
print "Using base directory:", options.base_directory
print "Checking:", start_dir
print
licensecheck_path = os.path.abspath(os.path.join(options.base_directory,
'third_party',
'devscripts',
'licensecheck.pl'))
licensecheck = subprocess.Popen([licensecheck_path,
'-l', '100',
'-r', start_dir],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = licensecheck.communicate()
if options.verbose:
print '----------- licensecheck stdout -----------'
print stdout
print '--------- end licensecheck stdout ---------'
if licensecheck.returncode != 0 or stderr:
print '----------- licensecheck stderr -----------'
print stderr
print '--------- end licensecheck stderr ---------'
print "\nFAILED\n"
return 1
used_suppressions = set()
errors = []
for line in stdout.splitlines():
filename, license = line.split(':', 1)
filename = os.path.relpath(filename.strip(), options.base_directory)
# All files in the build output directory are generated one way or another.
# There's no need to check them.
if filename.startswith('out/'):
continue
# For now we're just interested in the license.
license = license.replace('*No copyright*', '').strip()
# Skip generated files.
if 'GENERATED FILE' in license:
continue
if license in WHITELISTED_LICENSES:
continue
if not options.ignore_suppressions:
matched_prefixes = [
prefix for prefix in PATH_SPECIFIC_WHITELISTED_LICENSES
if filename.startswith(prefix) and
license in PATH_SPECIFIC_WHITELISTED_LICENSES[prefix]]
if matched_prefixes:
used_suppressions.update(set(matched_prefixes))
continue
errors.append({'filename': filename, 'license': license})
if options.json:
with open(options.json, 'w') as f:
json.dump(errors, f)
if errors:
for error in errors:
print "'%s' has non-whitelisted license '%s'" % (
error['filename'], error['license'])
print "\nFAILED\n"
print "Please read",
print "http://www.chromium.org/developers/adding-3rd-party-libraries"
print "for more info how to handle the failure."
print
print "Please respect OWNERS of checklicenses.py. Changes violating"
print "this requirement may be reverted."
# Do not print unused suppressions so that above message is clearly
# visible and gets proper attention. Too much unrelated output
# would be distracting and make the important points easier to miss.
return 1
print "\nSUCCESS\n"
if not len(args):
unused_suppressions = set(
PATH_SPECIFIC_WHITELISTED_LICENSES.iterkeys()).difference(
used_suppressions)
if unused_suppressions:
print "\nNOTE: unused suppressions detected:\n"
print '\n'.join(unused_suppressions)
return 0
def main():
default_root = os.path.abspath(
os.path.join(os.path.dirname(__file__), '..', '..'))
option_parser = optparse.OptionParser()
option_parser.add_option('--root', default=default_root,
dest='base_directory',
help='Specifies the repository root. This defaults '
'to "../.." relative to the script file, which '
'will normally be the repository root.')
option_parser.add_option('-v', '--verbose', action='store_true',
default=False, help='Print debug logging')
option_parser.add_option('--ignore-suppressions',
action='store_true',
default=False,
help='Ignore path-specific license whitelist.')
option_parser.add_option('--json', help='Path to JSON output file')
options, args = option_parser.parse_args()
return check_licenses(options, args)
if '__main__' == __name__:
sys.exit(main())
|
M4sse/chromium.src
|
tools/checklicenses/checklicenses.py
|
Python
|
bsd-3-clause
| 16,482
|
[
"Galaxy"
] |
2534eb56fe3ba8067b1a6811a35f64503344f8ddb692099e8a742eada4a6ae3e
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from py.path import local
import os
class ClassRegistry(dict):
"hold and register classes by nickname, to select later"
def register(self, name):
def inner(cls):
self[name] = cls
return cls
return inner
def files_in_dir(target):
for l in local(target).visit(sort=True):
if l.isfile():
yield l
|
BrianHicks/perch
|
perch/utils.py
|
Python
|
bsd-3-clause
| 414
|
[
"VisIt"
] |
8925af4f5dc0c327cd9a7bb757e4fab6222a1e1461943a45a0ebd3012b1cf711
|
# -*- coding: utf-8 -*-
#coding=utf-8
import sys,os
reload(sys)
sys.setdefaultencoding('utf8')
import logging
import urllib
import hashlib
import socket
import urllib2
from suds.client import Client
import xmltodict
from datetime import datetime
from collections import defaultdict
from lxml import html
import base64
from Crypto.Cipher import DES
from django.conf import settings
from django.core.context_processors import csrf
from django.core.exceptions import PermissionDenied
from django.core.urlresolvers import reverse
from django.contrib.auth.models import User, AnonymousUser
from django.contrib.auth.decorators import login_required
from django.http import Http404, HttpResponse
from django.shortcuts import redirect
from edxmako.shortcuts import render_to_response, render_to_string
from django_future.csrf import ensure_csrf_cookie
from django.views.decorators.cache import cache_control
from django.db import transaction
from markupsafe import escape
import django.utils
from courseware import grades
from courseware.access import has_access
from courseware.courses import (get_courses, get_course_with_access, sort_by_announcement, sort_and_audited_items, get_course_info_section, filter_audited_items,
get_course_by_id, get_course, course_image_url, get_course_about_section, get_courses_by_search)
import courseware.tabs as tabs
from courseware.masquerade import setup_masquerade
from courseware.model_data import FieldDataCache
from .module_render import toc_for_course, get_module_for_descriptor, get_module
from courseware.models import StudentModule, StudentModuleHistory
from course_modes.models import CourseMode
from student.models import UserTestGroup, CourseEnrollment,UserProfile
from student.views import course_from_id, single_course_reverification_info
from util.cache import cache, cache_if_anonymous
from util.json_request import JsonResponse
from xblock.fragment import Fragment
from xmodule.modulestore import Location
from xmodule.modulestore.django import modulestore, loc_mapper
from xmodule.modulestore.exceptions import InvalidLocationError, ItemNotFoundError, NoPathToItem
from xmodule.modulestore.search import path_to_location
from xmodule.course_module import CourseDescriptor
from xmodule.contentstore.content import StaticContent
import shoppingcart
from microsite_configuration import microsite
from student.roles import CourseRole, CourseInstructorRole, CourseStaffRole, GlobalStaff
import simplejson
log = logging.getLogger("edx.courseware")
template_imports = {'urllib': urllib}
def user_groups(user):
"""
TODO (vshnayder): This is not used. When we have a new plan for groups, adjust appropriately.
"""
if not user.is_authenticated():
return []
# TODO: Rewrite in Django
key = 'user_group_names_{user.id}'.format(user=user)
cache_expiration = 60 * 60 # one hour
# Kill caching on dev machines -- we switch groups a lot
group_names = cache.get(key)
if settings.DEBUG:
group_names = None
if group_names is None:
group_names = [u.name for u in UserTestGroup.objects.filter(users=user)]
cache.set(key, group_names, cache_expiration)
return group_names
#@ensure_csrf_cookie
#@cache_if_anonymous
def courses(request):
"""
Render "find courses" page. The course selection work is done in courseware.courses.
"""
q = request.GET.get('query', '')
courses_aa = get_courses_by_search(request.META.get('HTTP_HOST'))
courses_list = []
if q != "":
for course in courses_aa:
if q in course.org or q in course.id or q in course.display_name_with_default:
courses_list.append(course)
else:
continue
else:
courses_list = courses_aa
courses = sort_by_announcement(courses_list)
return render_to_response("courseware/courses.html", {'courses': filter_audited_items(courses)})
def return_fixed_courses(request, courses, action=None):
default_length = 8
course_id = request.GET.get("course_id")
if course_id:
course_id = course_id.replace(".", '/',2)
try:
# index_course = get_course_by_id(course_id)
index_course = 0
for i in courses:
if i.id != None:
if i.id == course_id:
index_course = i
break
print '============index_course==============='
print index_course.id
course_index = (courses.index(index_course) + 1)
except:
course_index = 0
current_list = courses[course_index:]
print '============current_list======================'
print current_list
if len(current_list) > default_length:
current_list = current_list[0:default_length]
course_list = []
for course in current_list:
try:
course_json = mobi_course_info(request, course, action)
course_list.append(course_json)
except:
continue
return JsonResponse({"count": len(courses), "course-list": course_list})
#def course_attr_list_handler(request, course_category, course_level=None):
#
# courses = get_courses(request.user, request.META.get('HTTP_HOST'))
# courses = sort_and_audited_items(courses)
# courses_list = []
#
# for course in courses:
# if course_level:
# if course.course_level == course_level and course.course_category == course_category:
# courses_list.append(course)
# elif course.course_category == course_category:
# courses_list.append(course)
# else:
# continue
#
# return return_fixed_courses(request, courses_list, None)
def course_attr_list_handler(request,course_category, course_level=None):
courses = get_courses(request.user, request.META.get('HTTP_HOST'))
courses = sort_and_audited_items(courses)
courses_list = []
for course in courses:
if course_level:
if course.course_level == course_level :
courses_list.append(course)
else:
continue
return return_fixed_courses(request, courses_list, None)
def courses_list_handler(request, action):
"""
Return courses based on request params
"""
try:
user = request.user
except:
user = AnonymousUser()
if action not in ["homefalls", "all", "hot", "latest", "my", "search", "rolling", "sync"]:
return JsonResponse({"success": False, "errmsg": "not support other actions except homefalls all hot latest rolling and my"})
def get_courses_depend_action(courses):
"""
Return courses depend on action
action: [homefalls, hot, lastest, my, search]
homefalls: get all courses
hot: Number of attended people > ?
lastest: News last week
my: I registered
all: like 'homefalls'
"""
courses = sort_and_audited_items(courses)
courses_list = []
if action == "latest":
default_count = 20
if len(courses) < default_count:
default_count = len(courses)
courses_list = courses[0:default_count]
elif action == "my":
# filter my registered courses
for course in courses:
if registered_for_course(course, user):
courses_list.append(course)
elif action == "rolling":
default_count = 5
courses_list = courses[0:default_count]
elif action == 'search':
keyword = request.GET.get("keyword")
if keyword:
for c in courses:
if keyword in c.org or keyword in c.id or keyword in c.display_name_with_default:
courses_list.append(c)
else:
courses_list = courses
return courses_list
courses = get_courses(user, request.META.get('HTTP_HOST'))
courses = sort_by_announcement(courses)
if action != "sync":
courses = get_courses_depend_action(courses)
return return_fixed_courses(request, courses, action)
def _course_json(course, course_id, url_name, position=0):
locator = loc_mapper().translate_location(course_id, course.location, published=False, add_entry_if_missing=True)
is_container = course.has_children
category = course.category
result = {
'display_name': course.display_name,
'id': unicode(locator),
'category': category,
'is_draft': getattr(course, 'is_draft', False),
'is_container': is_container
}
if category in ['sequential', 'chapter']:
url_name = url_name + '/' + course.url_name
elif category == "vertical":
result['unit_url'] = url_name + '/' + str(position)
elif category == "video":
result[category + '_url'] = course.html5_sources[0] if len(course.html5_sources) > 0 else ""
if is_container:
children = []
for idx, child in enumerate(course.get_children()):
try:
children.append(_course_json(child, course_id, url_name, (idx + 1)))
except:
continue
result['children'] = children
return result
def mobi_course_info(request, course, action=None):
course_logo = course_image_url(course)
host = request.get_host()
try:
user = request.user
except:
user = AnonymousUser()
cp = 0.0
if not hasattr(course,"display_course_price_with_default"):
cp = 0.0
else:
if not course.display_course_price_with_default:
cp = 0.0
else:
cp = course.display_course_price_with_default
result = {
"id": course.id.replace('/', '.'),
"name": course.display_name_with_default,
"logo": host + course_logo,
"org": course.display_org_with_default,
"course_number": course.display_number_with_default,
"start_date": course.start.strftime("%Y-%m-%d"),
"course_category": course.course_category,
"course_level": course.course_level,
"registered": registered_for_course(course, user),
"about": get_course_about_section(course, 'short_description'),
"category": course.category,
"course_price": float('%0.2f' % int(cp))
}
def compute_action_imgurl(imgname):
course_mini_info = course.id.split('/')
asset_location = StaticContent.compute_location(course_mini_info[0], course_mini_info[1], imgname)
return host + StaticContent.get_url_path_from_location(asset_location)
for imgname in ['mobi', 'mobi_r', 'ott_r']:
try:
result[imgname] = compute_action_imgurl(imgname + '_logo.jpg')
except:
result[imgname] = host + course_logo
return result
def bs_mobi_course_info(request, course, action=None):
course_logo = course_image_url(course)
host = 'http://mooc.xiaodun.cn'
try:
user = request.user
except:
user = AnonymousUser()
cp = 0.0
if not hasattr(course,"display_course_price_with_default"):
cp = 0.0
else:
if not course.display_course_price_with_default:
cp = 0.0
else:
cp = course.display_course_price_with_default
result = {
"id": course.id.replace('/', '.'),
"name": course.display_name_with_default,
"logo": host + course_logo,
"org": course.display_org_with_default,
"course_number": course.display_number_with_default,
"start_date": course.start.strftime("%Y-%m-%d"),
"course_category": course.course_category,
"course_level": course.course_level,
"registered": registered_for_course(course, user),
"about": get_course_about_section(course, 'short_description'),
"category": course.category,
"course_price": float('%0.2f' % int(cp)),
"course_audit":course.course_audit
}
def compute_action_imgurl(imgname):
course_mini_info = course.id.split('/')
asset_location = StaticContent.compute_location(course_mini_info[0], course_mini_info[1], imgname)
return host + StaticContent.get_url_path_from_location(asset_location)
for imgname in ['mobi', 'mobi_r', 'ott_r']:
try:
result[imgname] = compute_action_imgurl(imgname + '_logo.jpg')
except:
result[imgname] = host + course_logo
return result
def _course_info_content(html_parsed):
"""
Constructs the HTML for the course info update, not including the header.
"""
if len(html_parsed) == 1:
# could enforce that update[0].tag == 'h2'
content = html_parsed[0].tail
else:
content = html_parsed[0].tail if html_parsed[0].tail is not None else ""
content += "\n".join([html.tostring(ele) for ele in html_parsed[1:]])
return content
def parse_updates_html_str(html_str):
course_upd_collection = []
if html_str == '':
return {"updates": course_upd_collection}
try:
course_html_parsed = html.fromstring(html_str)
except:
escaped = django.utils.html.eacape(html_str)
course_html_parsed = html.fromstring(escaped)
print type(course_html_parsed)
if course_html_parsed.tag == 'section':
for index, update in enumerate(course_html_parsed):
if len(update) > 0:
content = _course_info_content(update)
computer_id = len(course_html_parsed) - index
payload = {
"id": computer_id,
"date": update.findtext("h2"),
"content": content
}
course_upd_collection.append(payload)
return {"updates": course_upd_collection}
def mobi_course_action(request, course_id, action):
try:
course_id_bak = course_id.replace('.', '/')
if action in ["updates", "handouts", "structure"]:
user = request.user
if not user:
user = AnonymousUser()
course = get_course_with_access(user, course_id_bak, 'load')
registered = registered_for_course(course, user)
if action == "updates" and registered:
# course_updates = get_course_info_section(request, course, action)
loc = Location(course.location.tag, course.location.org, course.location.course, 'course_info', action)
field_data_cache = FieldDataCache([], course.id, request.user)
course_module = get_module(
user,
request,
loc,
field_data_cache,
course.id,
wrap_xmodule_display=False,
static_asset_path=course.static_asset_path
)
return JsonResponse({'updates': [item for item in course_module.items if item["status"] != "deleted"]})
elif action == "handouts" and registered:
course_handouts = get_course_info_section(request, course, action)
return JsonResponse({"handouts": course_handouts})
elif action == "structure":
url_name = request.get_host() + '/m/courses/' + course_id_bak + '/courseware'
return JsonResponse(_course_json(course=course, course_id=course.location.course_id, url_name=url_name))
else:
raise Exception
else:
course = get_course_with_access(request.user, course_id_bak, 'see_exists')
return JsonResponse(mobi_course_info(request, course))
except:
return JsonResponse({"success": False, "errmsg": "access denied!"})
def render_accordion(request, course, chapter, section, field_data_cache):
"""
Draws navigation bar. Takes current position in accordion as
parameter.
If chapter and section are '' or None, renders a default accordion.
course, chapter, and section are the url_names.
Returns the html string
"""
# grab the table of contents
user = User.objects.prefetch_related("groups").get(id=request.user.id)
request.user = user # keep just one instance of User
toc = toc_for_course(user, request, course, chapter, section, field_data_cache)
context = dict([('toc', toc),
('course_id', course.id),
('csrf', csrf(request)['csrf_token']),
('due_date_display_format', course.due_date_display_format)] + template_imports.items())
return render_to_string('courseware/accordion.html', context)
def get_current_child(xmodule):
"""
Get the xmodule.position's display item of an xmodule that has a position and
children. If xmodule has no position or is out of bounds, return the first child.
Returns None only if there are no children at all.
"""
if not hasattr(xmodule, 'position'):
return None
if xmodule.position is None:
pos = 0
else:
# position is 1-indexed.
pos = xmodule.position - 1
children = xmodule.get_display_items()
if 0 <= pos < len(children):
child = children[pos]
elif len(children) > 0:
# Something is wrong. Default to first child
child = children[0]
else:
child = None
return child
def redirect_to_course_position(course_module):
"""
Return a redirect to the user's current place in the course.
If this is the user's first time, redirects to COURSE/CHAPTER/SECTION.
If this isn't the users's first time, redirects to COURSE/CHAPTER,
and the view will find the current section and display a message
about reusing the stored position.
If there is no current position in the course or chapter, then selects
the first child.
"""
urlargs = {'course_id': course_module.id}
chapter = get_current_child(course_module)
if chapter is None:
# oops. Something bad has happened.
raise Http404("No chapter found when loading current position in course")
urlargs['chapter'] = chapter.url_name
if course_module.position is not None:
return redirect(reverse('courseware_chapter', kwargs=urlargs))
# Relying on default of returning first child
section = get_current_child(chapter)
if section is None:
raise Http404("No section found when loading current position in course")
urlargs['section'] = section.url_name
return redirect(reverse('courseware_section', kwargs=urlargs))
def save_child_position(seq_module, child_name):
"""
child_name: url_name of the child
"""
for position, c in enumerate(seq_module.get_display_items(), start=1):
if c.url_name == child_name:
# Only save if position changed
if position != seq_module.position:
seq_module.position = position
# Save this new position to the underlying KeyValueStore
seq_module.save()
def chat_settings(course, user):
"""
Returns a dict containing the settings required to connect to a
Jabber chat server and room.
"""
domain = getattr(settings, "JABBER_DOMAIN", None)
if domain is None:
log.warning('You must set JABBER_DOMAIN in the settings to '
'enable the chat widget')
return None
return {
'domain': domain,
# Jabber doesn't like slashes, so replace with dashes
'room': "{ID}_class".format(ID=course.id.replace('/', '-')),
'username': "{USER}@{DOMAIN}".format(
USER=user.username, DOMAIN=domain
),
# TODO: clearly this needs to be something other than the username
# should also be something that's not necessarily tied to a
# particular course
'password': "{USER}@{DOMAIN}".format(
USER=user.username, DOMAIN=domain
),
}
@login_required
@ensure_csrf_cookie
@cache_control(no_cache=True, no_store=True, must_revalidate=True)
def index(request, course_id, chapter=None, section=None,
position=None):
"""
Displays courseware accordion and associated content. If course, chapter,
and section are all specified, renders the page, or returns an error if they
are invalid.
If section is not specified, displays the accordion opened to the right chapter.
If neither chapter or section are specified, redirects to user's most recent
chapter, or the first chapter if this is the user's first visit.
Arguments:
- request : HTTP request
- course_id : course id (str: ORG/course/URL_NAME)
- chapter : chapter url_name (str)
- section : section url_name (str)
- position : position in module, eg of <sequential> module (str)
Returns:
- HTTPresponse
"""
user = User.objects.prefetch_related("groups").get(id=request.user.id)
request.user = user # keep just one instance of User
course = get_course_with_access(user, course_id, 'load', depth=2)
staff_access = has_access(user, course, 'staff')
registered = registered_for_course(course, user)
if not registered:
# TODO (vshnayder): do course instructors need to be registered to see course?
log.debug(u'User %s tried to view course %s but is not enrolled', user, course.location.url())
return redirect(reverse('about_course', args=[course.id]))
masq = setup_masquerade(request, staff_access)
try:
field_data_cache = FieldDataCache.cache_for_descriptor_descendents(
course.id, user, course, depth=2)
course_module = get_module_for_descriptor(user, request, course, field_data_cache, course.id)
if course_module is None:
log.warning(u'If you see this, something went wrong: if we got this'
u' far, should have gotten a course module for this user')
return redirect(reverse('about_course', args=[course.id]))
if chapter is None:
return redirect_to_course_position(course_module)
context = {
'csrf': csrf(request)['csrf_token'],
'accordion': render_accordion(request, course, chapter, section, field_data_cache),
'COURSE_TITLE': course.display_name_with_default,
'course': course,
'init': '',
'fragment': Fragment(),
'staff_access': staff_access,
'masquerade': masq,
'xqa_server': settings.FEATURES.get('USE_XQA_SERVER', 'http://xqa:server@content-qa.mitx.mit.edu/xqa'),
'reverifications': fetch_reverify_banner_info(request, course_id),
}
# Only show the chat if it's enabled by the course and in the
# settings.
show_chat = course.show_chat and settings.FEATURES['ENABLE_CHAT']
if show_chat:
context['chat'] = chat_settings(course, user)
# If we couldn't load the chat settings, then don't show
# the widget in the courseware.
if context['chat'] is None:
show_chat = False
context['show_chat'] = show_chat
chapter_descriptor = course.get_child_by(lambda m: m.url_name == chapter)
if chapter_descriptor is not None:
save_child_position(course_module, chapter)
else:
raise Http404('No chapter descriptor found with name {}'.format(chapter))
chapter_module = course_module.get_child_by(lambda m: m.url_name == chapter)
if chapter_module is None:
# User may be trying to access a chapter that isn't live yet
if masq=='student': # if staff is masquerading as student be kinder, don't 404
log.debug('staff masq as student: no chapter %s' % chapter)
return redirect(reverse('courseware', args=[course.id]))
raise Http404
if section is not None:
section_descriptor = chapter_descriptor.get_child_by(lambda m: m.url_name == section)
if section_descriptor is None:
# Specifically asked-for section doesn't exist
if masq=='student': # if staff is masquerading as student be kinder, don't 404
log.debug('staff masq as student: no section %s' % section)
return redirect(reverse('courseware', args=[course.id]))
raise Http404
# cdodge: this looks silly, but let's refetch the section_descriptor with depth=None
# which will prefetch the children more efficiently than doing a recursive load
section_descriptor = modulestore().get_instance(course.id, section_descriptor.location, depth=None)
# Load all descendants of the section, because we're going to display its
# html, which in general will need all of its children
section_field_data_cache = FieldDataCache.cache_for_descriptor_descendents(
course_id, user, section_descriptor, depth=None)
section_module = get_module_for_descriptor(request.user,
request,
section_descriptor,
section_field_data_cache,
course_id,
position
)
if section_module is None:
# User may be trying to be clever and access something
# they don't have access to.
raise Http404
# Save where we are in the chapter
save_child_position(chapter_module, section)
context['fragment'] = section_module.render('student_view')
context['section_title'] = section_descriptor.display_name_with_default
else:
# section is none, so display a message
prev_section = get_current_child(chapter_module)
if prev_section is None:
# Something went wrong -- perhaps this chapter has no sections visible to the user
raise Http404
prev_section_url = reverse('courseware_section', kwargs={'course_id': course_id,
'chapter': chapter_descriptor.url_name,
'section': prev_section.url_name})
context['fragment'] = Fragment(content=render_to_string(
'courseware/welcome-back.html',
{
'course': course,
'chapter_module': chapter_module,
'prev_section': prev_section,
'prev_section_url': prev_section_url
}
))
result = render_to_response('courseware/courseware.html', context)
except Exception as e:
if isinstance(e, Http404):
# let it propagate
raise
# In production, don't want to let a 500 out for any reason
if settings.DEBUG:
raise
else:
log.exception("Error in index view: user={user}, course={course},"
" chapter={chapter} section={section}"
"position={position}".format(
user=user,
course=course,
chapter=chapter,
section=section,
position=position
))
try:
result = render_to_response('courseware/courseware-error.html',
{'staff_access': staff_access,
'course': course})
except:
# Let the exception propagate, relying on global config to at
# at least return a nice error message
log.exception("Error while rendering courseware-error page")
raise
return result
@ensure_csrf_cookie
def jump_to_id(request, course_id, module_id):
"""
This entry point allows for a shorter version of a jump to where just the id of the element is
passed in. This assumes that id is unique within the course_id namespace
"""
course_location = CourseDescriptor.id_to_location(course_id)
items = modulestore().get_items(
Location('i4x', course_location.org, course_location.course, None, module_id),
course_id=course_id
)
if len(items) == 0:
raise Http404("Could not find id = {0} in course_id = {1}. Referer = {2}".
format(module_id, course_id, request.META.get("HTTP_REFERER", "")))
if len(items) > 1:
log.warning("Multiple items found with id = {0} in course_id = {1}. Referer = {2}. Using first found {3}...".
format(module_id, course_id, request.META.get("HTTP_REFERER", ""), items[0].location.url()))
return jump_to(request, course_id, items[0].location.url())
@ensure_csrf_cookie
def jump_to(request, course_id, location):
"""
Show the page that contains a specific location.
If the location is invalid or not in any class, return a 404.
Otherwise, delegates to the index view to figure out whether this user
has access, and what they should see.
"""
# Complain if the location isn't valid
try:
location = Location(location)
except InvalidLocationError:
raise Http404("Invalid location")
# Complain if there's not data for this location
try:
(course_id, chapter, section, position) = path_to_location(modulestore(), course_id, location)
except ItemNotFoundError:
raise Http404(u"No data at this location: {0}".format(location))
except NoPathToItem:
raise Http404(u"This location is not in any class: {0}".format(location))
# choose the appropriate view (and provide the necessary args) based on the
# args provided by the redirect.
# Rely on index to do all error handling and access control.
if chapter is None:
return redirect('courseware', course_id=course_id)
elif section is None:
return redirect('courseware_chapter', course_id=course_id, chapter=chapter)
elif position is None:
return redirect('courseware_section', course_id=course_id, chapter=chapter, section=section)
else:
return redirect('courseware_position', course_id=course_id, chapter=chapter, section=section, position=position)
def prn_obj(obj):
print ', '.join(['%s:%s' % item for item in obj.__dict__.items()])
@ensure_csrf_cookie
def course_info(request, course_id):
"""
Display the course's info.html, or 404 if there is no such course.
Assumes the course_id is in a valid format.
"""
course = get_course_with_access(request.user, course_id, 'load')
# for business system, counting course open times
# with instructor and clusteruser
locator = loc_mapper().translate_location(course_id, course.location, published=False, add_entry_if_missing=True)
instructors = CourseInstructorRole(locator).users_with_role()
staff = CourseStaffRole(locator).users_with_role()
instructor = 'admin'
if len(instructors)>0:
instructor = instructors[0].username
elif len(staff)>0:
instructor = staff[0].username
# for business system, counting course open times
staff_access = has_access(request.user, course, 'staff')
masq = setup_masquerade(request, staff_access) # allow staff to toggle masquerade on info page
reverifications = fetch_reverify_banner_info(request, course_id)
context = {
'request': request,
'course_id': course_id,
'cache': None,
'course': course,
'staff_access': staff_access,
'masquerade': masq,
'reverifications': reverifications,
'instructor': instructor,
'clusteruser': request.user
}
return render_to_response('courseware/info.html', context)
@ensure_csrf_cookie
def static_tab(request, course_id, tab_slug):
"""
Display the courses tab with the given name.
Assumes the course_id is in a valid format.
"""
course = get_course_with_access(request.user, course_id, 'load')
tab = tabs.get_static_tab_by_slug(course, tab_slug)
if tab is None:
raise Http404
contents = tabs.get_static_tab_contents(
request,
course,
tab
)
if contents is None:
raise Http404
staff_access = has_access(request.user, course, 'staff')
return render_to_response('courseware/static_tab.html',
{'course': course,
'tab': tab,
'tab_contents': contents,
'staff_access': staff_access, })
# TODO arjun: remove when custom tabs in place, see courseware/syllabus.py
@ensure_csrf_cookie
def syllabus(request, course_id):
"""
Display the course's syllabus.html, or 404 if there is no such course.
Assumes the course_id is in a valid format.
"""
course = get_course_with_access(request.user, course_id, 'load')
staff_access = has_access(request.user, course, 'staff')
return render_to_response('courseware/syllabus.html', {'course': course,
'staff_access': staff_access, })
def registered_for_course(course, user):
"""
Return True if user is registered for course, else False
"""
if user is None:
return False
if user.is_authenticated():
return CourseEnrollment.is_enrolled(user, course.id)
else:
return False
def demd5_webservicestr(srstr):
if not isinstance(srstr, str):
return ""
md5obj = hashlib.md5()
md5obj.update(srstr)
return md5obj.hexdigest()
def md5_str(srstr):
md5obj = hashlib.md5()
md5obj.update(srstr)
return md5obj.hexdigest()
def purchase_authenticate(request, course_id):
user = request.user
course = get_course_with_access(request.user, course_id, 'see_exists')
# course.course_uid= str(course.course_uuid).replace('-','')
course.course_uid= md5_str(course_id)
re_jsondict = {'authenticated': False}
if not (user is None or isinstance(user, AnonymousUser)):
xml_params = render_to_string('xmls/auth_purchase.xml', {'username': user.username, 'course_uuid': course.course_uid})
try:
url = "{}/services/OssWebService?wsdl".format(settings.OPER_SYS_DOMAIN)
client = Client(url)
aresult = client.service.confirmBillEvent(xml_params, demd5_webservicestr(xml_params + "VTEC_#^)&*("))
print aresult.encode('utf-8')
redict = xmltodict.parse(aresult.encode('utf-8'))
if int(redict['EVENTRETURN']['RESULT']) in [0, 1,11]:
# if int(redict['EVENTRETURN']['RESULT']) in [1]:
re_jsondict['authenticated'] = True
pdate = datetime.now().strftime("%Y-%m-%d")
# push course trade data to business system
xml_data_str = render_to_string('xmls/pushed_course_data.xml', {'course': course, 'user': user, 'pdate': pdate})
# DES encode data
pad = lambda s: s + (8 - len(s) % 8) * chr(8 - len(s) % 8)
# print '=-=-=-=-=-=-=-=-3'
# print pad
des_enxml_str = base64.b64encode(DES.new(settings.SSO_KEY[0:8], DES.MODE_ECB).encrypt(pad(xml_data_str.encode('utf-8'))))
bs_host = settings.XIAODUN_BACK_HOST # test dev "http://192.168.1.78:8081/xiaodun"
push_url = "{}/service/course/add?data={}".format(bs_host, des_enxml_str)
socket.setdefaulttimeout(2)
req = urllib2.Request(push_url)
# TODO: setting a column mark result, if failure, package it and send with scheduler in backend
urllib2.urlopen(req)
else:
errmsg = redict['EVENTRETURN']['DESCRIPTION']['DESC'].strip()
re_jsondict['errmsg'] = errmsg if errmsg else ""
except Exception,e:
print e
re_jsondict['errmsg'] = '服务器错误,稍后再试!'
return JsonResponse(re_jsondict)
def course_is_enroll(request, course_id):
re_jsondict = {'authenticated': False}
user = request.user
course = get_course_with_access(user, course_id, 'see_exists')
registered = registered_for_course(course, user)
if registered:
re_jsondict = {'authenticated': True}
return JsonResponse(re_jsondict)
@ensure_csrf_cookie
@cache_if_anonymous
def course_about(request, course_id):
print '--------------------------------'
if microsite.get_value(
'ENABLE_MKTG_SITE',
settings.FEATURES.get('ENABLE_MKTG_SITE', False)
):
raise Http404
bs_course_id = course_id.replace('/','.')
course = get_course_with_access(request.user, course_id, 'see_exists')
print '-==================='
print course_id.encode('utf-8')
# course.course_uid = str(course.course_uuid)[-12:]
# course.course_uid = str(course.course_uuid).replace('-','')
course.course_uid = md5_str(course_id)
print course.course_uid
registered = registered_for_course(course, request.user)
if has_access(request.user, course, 'load'):
course_target = reverse('info', args=[course.id])
else:
course_target = reverse('about_course', args=[course.id])
show_courseware_link = (has_access(request.user, course, 'load') or
settings.FEATURES.get('ENABLE_LMS_MIGRATION'))
# Note: this is a flow for payment for course registration, not the Verified Certificate flow.
registration_price = 0
in_cart = False
reg_then_add_to_cart_link = ""
if (settings.FEATURES.get('ENABLE_SHOPPING_CART') and
settings.FEATURES.get('ENABLE_PAID_COURSE_REGISTRATION')):
registration_price = CourseMode.min_course_price_for_currency(course_id,
settings.PAID_COURSE_REGISTRATION_CURRENCY[0])
if request.user.is_authenticated():
cart = shoppingcart.models.Order.get_cart_for_user(request.user)
in_cart = shoppingcart.models.PaidCourseRegistration.contained_in_order(cart, course_id)
reg_then_add_to_cart_link = "{reg_url}?course_id={course_id}&enrollment_action=add_to_cart".format(
reg_url=reverse('register_user'), course_id=course.id)
# see if we have already filled up all allowed enrollments
is_course_full = CourseEnrollment.is_course_full(course)
# load wsdl client
# TODO setting operation system url to common setting which load when sys boot
print '=============course_about======================='
print registered
print course_target
print registration_price
print in_cart
print reg_then_add_to_cart_link
print show_courseware_link
print is_course_full
print '{}/order/order!pay.do?username={}&courseid={}'.format(settings.XIAODUN_BACK_HOST_EXTERNAL, request.user.username,bs_course_id)
print '*********************'
return render_to_response('courseware/course_about.html',
{'course': course,
'registered': registered,
'course_target': course_target,
'registration_price': registration_price,
'in_cart': in_cart,
'reg_then_add_to_cart_link': reg_then_add_to_cart_link,
'show_courseware_link': show_courseware_link,
'is_course_full': is_course_full,
'purchase_link': '{}/order/order!pay.do?username={}&courseid={}'.format(settings.XIAODUN_BACK_HOST_EXTERNAL, request.user.username,bs_course_id),
'purchased': registered})
@ensure_csrf_cookie
@cache_if_anonymous
def mktg_course_about(request, course_id):
"""
This is the button that gets put into an iframe on the Drupal site
"""
try:
course = get_course_with_access(request.user, course_id, 'see_exists')
except (ValueError, Http404) as e:
# if a course does not exist yet, display a coming
# soon button
return render_to_response(
'courseware/mktg_coming_soon.html', {'course_id': course_id}
)
registered = registered_for_course(course, request.user)
if has_access(request.user, course, 'load'):
course_target = reverse('info', args=[course.id])
else:
course_target = reverse('about_course', args=[course.id])
allow_registration = has_access(request.user, course, 'enroll')
show_courseware_link = (has_access(request.user, course, 'load') or
settings.FEATURES.get('ENABLE_LMS_MIGRATION'))
course_modes = CourseMode.modes_for_course(course.id)
return render_to_response(
'courseware/mktg_course_about.html',
{
'course': course,
'registered': registered,
'allow_registration': allow_registration,
'course_target': course_target,
'show_courseware_link': show_courseware_link,
'course_modes': course_modes,
}
)
@login_required
@cache_control(no_cache=True, no_store=True, must_revalidate=True)
@transaction.commit_manually
def calendar(request, course_id, student_id=None):
"""
Wraps "_progress" with the manual_transaction context manager just in case
there are unanticipated errors.
"""
with grades.manual_transaction():
return _calendar(request, course_id, student_id)
def cal_getevents(request, course_id):
course_id = course_id.replace("/",".")
events_json = []
for event in modulestore("course_calendar").range_events(course_id,request.GET.get("start"),request.GET.get("end")):
events_json.append({"id":event["id"],"title":event["calendar"]["title"],"start":event["calendar"]["start"],"end":event["calendar"]["end"]})
return JsonResponse(events_json)
def _calendar(request, course_id, student_id):
"""
Unwrapped version of "progress".
User progress. We show the grade bar and every problem score.
Course staff are allowed to see the progress of students in their class.
"""
course = get_course_with_access(request.user, course_id, 'load', depth=None)
staff_access = has_access(request.user, course, 'staff')
if student_id is None or student_id == request.user.id:
# always allowed to see your own profile
student = request.user
else:
# Requesting access to a different student's profile
if not staff_access:
raise Http404
student = User.objects.get(id=int(student_id))
# NOTE: To make sure impersonation by instructor works, use
# student instead of request.user in the rest of the function.
# The pre-fetching of groups is done to make auth checks not require an
# additional DB lookup (this kills the Progress page in particular).
student = User.objects.prefetch_related("groups").get(id=student.id)
courseware_summary = grades.progress_summary(student, request, course)
grade_summary = grades.grade(student, request, course)
if courseware_summary is None:
#This means the student didn't have access to the course (which the instructor requested)
raise Http404
context = {
'course': course,
'courseware_summary': courseware_summary,
'grade_summary': grade_summary,
'staff_access': staff_access,
'student': student,
'reverifications': fetch_reverify_banner_info(request, course_id)
}
with grades.manual_transaction():
response = render_to_response('courseware/calendar.html', context)
return response
@login_required
@cache_control(no_cache=True, no_store=True, must_revalidate=True)
@transaction.commit_manually
def progress(request, course_id, student_id=None):
"""
Wraps "_progress" with the manual_transaction context manager just in case
there are unanticipated errors.
"""
with grades.manual_transaction():
return _progress(request, course_id, student_id)
def _progress(request, course_id, student_id):
"""
Unwrapped version of "progress".
User progress. We show the grade bar and every problem score.
Course staff are allowed to see the progress of students in their class.
"""
course = get_course_with_access(request.user, course_id, 'load', depth=None)
staff_access = has_access(request.user, course, 'staff')
if student_id is None or student_id == request.user.id:
# always allowed to see your own profile
student = request.user
else:
# Requesting access to a different student's profile
if not staff_access:
raise Http404
student = User.objects.get(id=int(student_id))
# NOTE: To make sure impersonation by instructor works, use
# student instead of request.user in the rest of the function.
# The pre-fetching of groups is done to make auth checks not require an
# additional DB lookup (this kills the Progress page in particular).
student = User.objects.prefetch_related("groups").get(id=student.id)
courseware_summary = grades.progress_summary(student, request, course)
grade_summary = grades.grade(student, request, course)
if courseware_summary is None:
#This means the student didn't have access to the course (which the instructor requested)
raise Http404
context = {
'course': course,
'courseware_summary': courseware_summary,
'grade_summary': grade_summary,
'staff_access': staff_access,
'student': student,
'reverifications': fetch_reverify_banner_info(request, course_id)
}
with grades.manual_transaction():
response = render_to_response('courseware/progress.html', context)
return response
def fetch_reverify_banner_info(request, course_id):
"""
Fetches needed context variable to display reverification banner in courseware
"""
reverifications = defaultdict(list)
user = request.user
if not user.id:
return reverifications
enrollment = CourseEnrollment.get_or_create_enrollment(request.user, course_id)
course = course_from_id(course_id)
info = single_course_reverification_info(user, course, enrollment)
if info:
reverifications[info.status].append(info)
return reverifications
@login_required
def submission_history(request, course_id, student_username, location):
"""Render an HTML fragment (meant for inclusion elsewhere) that renders a
history of all state changes made by this user for this problem location.
Right now this only works for problems because that's all
StudentModuleHistory records.
"""
course = get_course_with_access(request.user, course_id, 'load')
staff_access = has_access(request.user, course, 'staff')
# Permission Denied if they don't have staff access and are trying to see
# somebody else's submission history.
if (student_username != request.user.username) and (not staff_access):
raise PermissionDenied
try:
student = User.objects.get(username=student_username)
student_module = StudentModule.objects.get(course_id=course_id,
module_state_key=location,
student_id=student.id)
except User.DoesNotExist:
return HttpResponse(escape("User {0} does not exist.".format(student_username)))
except StudentModule.DoesNotExist:
return HttpResponse(escape("{0} has never accessed problem {1}".format(student_username, location)))
history_entries = StudentModuleHistory.objects.filter(
student_module=student_module
).order_by('-id')
# If no history records exist, let's force a save to get history started.
if not history_entries:
student_module.save()
history_entries = StudentModuleHistory.objects.filter(
student_module=student_module
).order_by('-id')
context = {
'history_entries': history_entries,
'username': student.username,
'location': location,
'course_id': course_id
}
return render_to_response('courseware/submission_history.html', context)
def get_courses_id(request):
"""
Return courses based on request params
"""
try:
user = request.user
except:
user = AnonymousUser()
courses = get_courses(user, request.META.get('HTTP_HOST'))
courses = sort_by_announcement(courses)
print len(courses)
courses = sorted(courses,key=lambda course: course.start)
all_course_id = [c.id.replace('/','.') for c in courses]
return JsonResponse(all_course_id)
def get_course_info(request):
course_id = request.GET.get("course_id")
print '===============course_id==============='
print course_id.encode('utf-8')
course_id = course_id.replace('.','/')
course = get_course_by_id(course_id)
course_list = []
try:
course_json = bs_mobi_course_info(request, course, 'sync')
course_list.append(course_json)
except:
pass
return JsonResponse({"course-list": course_list})
def record_login_info(request):
user = request.user
user_profile = UserProfile.objects.get(user=user)
re_json = {"success": False}
role = 1
if user_profile.profile_role == "th":
role = 2
request_host = settings.XIAODUN_BACK_HOST
request_url = '{}/check/check!check.do?username={}&role={}'.format(request_host, user.username,role)
socket.setdefaulttimeout(10)
try:
req = urllib2.Request(request_url)
resp = urllib2.urlopen(req)
content = resp.read()
request_json = simplejson.loads(content)
if request_json.get('success', ''):
re_json = {"success": True}
except:
pass
return JsonResponse(re_json)
|
XiaodunServerGroup/xiaodun-platform
|
lms/djangoapps/courseware/views.py
|
Python
|
agpl-3.0
| 50,293
|
[
"VisIt"
] |
af92c62ad7e9e1730d1ed6a06b8ed9a39d8140e8d240a9f1537ac8fd1a9d24a1
|
import peel
import pymolecule
#import numpy
my_params = peel.defaultParams
my_protein = pymolecule.Molecule()
my_protein.fileio.load_pdb_into('2gfc_reduced.pdb', serial_reindex=True, resseq_reindex=False)
my_peel = peel.peel(my_protein, my_params)
my_peel.write_vmd_script('visualize_2gfc.vmd', peel.defaultParams)
#povmeMap = numpy.ones((13,13,13))
#my_peel.color_povme_map(povmeMap, [0,13,0,13,0,13], 1)
# If given no "features" argument, create_feature_maps will create maps of all features
my_feature_maps = my_peel.create_feature_maps([10,80,-30,45,-30,30], 1)
my_feature_maps['hbondAcceptor'].write_pdb('HBA.pdb')
my_feature_maps['hbondAcceptor'].write_dx_file('HBA.dx')
my_feature_maps['hbondDonor'].write_pdb('HBD.pdb')
my_feature_maps['hbondDonor'].write_dx_file('HBD.dx')
my_feature_maps['aromatic'].write_pdb('ARO.pdb')
my_feature_maps['aromatic'].write_dx_file('ARO.dx')
my_feature_maps['hydrophobic'].write_pdb('HPB.pdb')
my_feature_maps['hydrophobic'].write_dx_file('HPB.dx')
my_feature_maps['hydrophilic'].write_pdb('HPL.pdb')
my_feature_maps['hydrophilic'].write_dx_file('HPL.dx')
#my_feature_maps['hydrophobicity'].write_pdb('HPBTY.pdb')
#my_feature_maps['hydrophobicity'].write_dx_file('HPBTY.dx')
print "Done!"
|
j-wags/POVME
|
binana/tests/peel_2gfc_basic/2gfcTest.py
|
Python
|
gpl-3.0
| 1,236
|
[
"VMD"
] |
ace9a9cf2f61e28d75a7b5b4b078d72868b88706001036405225d5e479af478a
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import abc
import logging
import re
import sys
from io import IOBase
from logging import Handler, Logger, StreamHandler
from typing import IO, Optional
# 7-bit C1 ANSI escape sequences
ANSI_ESCAPE = re.compile(r'\x1B[@-_][0-?]*[ -/]*[@-~]')
def remove_escape_codes(text: str) -> str:
"""
Remove ANSI escapes codes from string. It's used to remove
"colors" from log messages.
"""
return ANSI_ESCAPE.sub("", text)
class LoggingMixin:
"""Convenience super-class to have a logger configured with the class name"""
_log: Optional[logging.Logger] = None
def __init__(self, context=None):
self._set_context(context)
@property
def log(self) -> Logger:
"""Returns a logger."""
if self._log is None:
self._log = logging.getLogger(self.__class__.__module__ + '.' + self.__class__.__name__)
return self._log
def _set_context(self, context):
if context is not None:
set_context(self.log, context)
class ExternalLoggingMixin:
"""Define a log handler based on an external service (e.g. ELK, StackDriver)."""
@property
@abc.abstractmethod
def log_name(self) -> str:
"""Return log name"""
@abc.abstractmethod
def get_external_log_url(self, task_instance, try_number) -> str:
"""Return the URL for log visualization in the external service."""
@property
@abc.abstractmethod
def supports_external_link(self) -> bool:
"""Return whether handler is able to support external links."""
# We have to ignore typing errors here because Python I/O classes are a mess, and they do not
# have the same type hierarchy defined as the `typing.IO` - they violate Liskov Substitution Principle
# While it is ok to make your class derive from IOBase (and its good thing to do as they provide
# base implementation for IO-implementing classes, it's impossible to make them work with
# IO generics (and apparently it has not even been intended)
# See more: https://giters.com/python/typeshed/issues/6077
class StreamLogWriter(IOBase, IO[str]): # type: ignore[misc]
"""Allows to redirect stdout and stderr to logger"""
encoding: None = None
def __init__(self, logger, level):
"""
:param log: The log level method to write to, ie. log.debug, log.warning
:return:
"""
self.logger = logger
self.level = level
self._buffer = ''
def close(self):
"""
Provide close method, for compatibility with the io.IOBase interface.
This is a no-op method.
"""
@property
def closed(self):
"""
Returns False to indicate that the stream is not closed, as it will be
open for the duration of Airflow's lifecycle.
For compatibility with the io.IOBase interface.
"""
return False
def _propagate_log(self, message):
"""Propagate message removing escape codes."""
self.logger.log(self.level, remove_escape_codes(message))
def write(self, message):
"""
Do whatever it takes to actually log the specified logging record
:param message: message to log
"""
if not message.endswith("\n"):
self._buffer += message
else:
self._buffer += message.rstrip()
self.flush()
def flush(self):
"""Ensure all logging output has been flushed"""
buf = self._buffer
if len(buf) > 0:
self._buffer = ''
self._propagate_log(buf)
def isatty(self):
"""
Returns False to indicate the fd is not connected to a tty(-like) device.
For compatibility reasons.
"""
return False
class RedirectStdHandler(StreamHandler):
"""
This class is like a StreamHandler using sys.stderr/stdout, but always uses
whatever sys.stderr/stderr is currently set to rather than the value of
sys.stderr/stdout at handler construction time.
"""
def __init__(self, stream):
if not isinstance(stream, str):
raise Exception(
"Cannot use file like objects. Use 'stdout' or 'stderr' as a str and without 'ext://'."
)
self._use_stderr = True
if 'stdout' in stream:
self._use_stderr = False
# StreamHandler tries to set self.stream
Handler.__init__(self)
@property
def stream(self):
"""Returns current stream."""
if self._use_stderr:
return sys.stderr
return sys.stdout
def set_context(logger, value):
"""
Walks the tree of loggers and tries to set the context for each handler
:param logger: logger
:param value: value to set
"""
_logger = logger
while _logger:
for handler in _logger.handlers:
try:
handler.set_context(value)
except AttributeError:
# Not all handlers need to have context passed in so we ignore
# the error when handlers do not have set_context defined.
pass
if _logger.propagate is True:
_logger = _logger.parent
else:
_logger = None
|
Acehaidrey/incubator-airflow
|
airflow/utils/log/logging_mixin.py
|
Python
|
apache-2.0
| 6,009
|
[
"Elk"
] |
6c0367a1b8d051aa38d6d010833f539a8b02ba73e0de6fbb469f70c1bf68b1c0
|
#!/usr/bin/python
# =============================================================================================
# COPYRIGHT NOTICE
#
# Written by Michael R. Shirts <mrshirts@gmail.com>.
#
# Copyright (c) 2012 The University of Virginia. All Rights Reserved.
#
# This program is free software; you can redistribute it and/or modify it under the terms of
# the GNU General Public License as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
# without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA.
# =============================================================================================
import numpy
import numpy.random
import scipy
import scipy.optimize
import scipy.stats
#==========================
# HELPER FUNCTIONS
#=========================
def check_twodtype(type): # check if it's a valid type
if (type=='dbeta-dpressure') or (type=='dbeta-dmu') or (type=='dbeta-ddmu'):
#print 'Warning: can\'t do 3D fits currently'
# throw an exception?
return False
else:
return True
def PrepConversionFactors(eunits='kJ/mol',punits='bar',vunits='nm^3'):
if (vunits == 'nm^3') and (punits == 'bar'):
# default conversion is gromacs nm3*bar to kJ/mol
# 1 nm3.bar = 0.00060221415 m3.bar / mol, 0.01 m3.bar/mol = 1 kJ/mol --> 6.0221415x10^-2 kJ/mol / nm3/bar
pvconvert = 0.06221415
elif (vunits == 'kT' and punits == 'kT'):
pvconvert = 1
else:
print "I don't know the conversion factor for %s volume units and %s pressure units" % (vunits,punits)
if (eunits == 'kJ/mol') or (eunits == 'kT'):
econvert = 1;
elif (eunits == 'kcal/mol'):
pvconvert /= 4.184 # if eunits are in kcal/mol, then we need to change pconvert to kcal/mol / nm3/bar
else:
print "I don't know those energy units"
muconvert = -1*econvert
return econvert,pvconvert,muconvert
def PrepStrings(type,vunits='kT'):
if (type == 'dbeta-constV'):
vt = 'E'
plinfit = r'$-(\beta_2-\beta_1)E$'
pnlfit = r'$\exp(-(\beta_2-\beta_1)E)$'
varstring = r'$E (kT)$'
legend_location = 'upper left'
elif (type == 'dbeta-constP'):
vt = 'H'
plinfit = r'$-(\beta_2-\beta_1)H$'
pnlfit = r'$\exp(-(\beta_2-\beta_1)H)$'
varstring = r'$H (kT)$'
legend_location = 'upper left'
elif (type == 'dbeta-constmu'):
#averages are A = <E>-mu<N>.
vt = 'A'
plinfit = r'$-(\beta_2-\beta_1)A$'
pnlfit = r'$\exp(-(\beta_2-\beta_1)A)$'
varstring = r'$A (kT)$'
legend_location = 'upper left'
elif (type == 'dbeta-constdmu'):
#averages are <E>-dmu<N>.
vt = '\langle E \rangle - \Delta \mu \langle N_2\rangle'
plinfit = r'$-(\beta_2-\beta_1)A$'
pnlfit = r'$\exp(-(\beta_2-\beta_1)A)$'
varstring = r'$A (kT)$'
legend_location = 'upper left'
elif (type == 'dpressure-constB'):
vt = 'V'
plinfit = r'$-\beta(P_2-P_1)V$'
pnlfit = r'$\exp(-\beta(P_2-P_1)V)$'
varstring = r'$V (' + vunits + r')$'
legend_location = 'upper right'
elif (type == 'dmu-constB'):
vt = 'N'
plinfit = r'$\beta(\mu_2-\mu_1)N$'
pnlfit = r'$\exp(\beta(\mu_2-\mu_1)N)$'
varstring = r'$N (' + 'number' + r')$'
legend_location = 'upper left'
elif (type == 'ddmu-constB'):
vt = 'N_2'
plinfit = r'$\beta(\Delta \mu_2-\ Delta \mu_1)N_2$'
pnlfit = r'$\exp(\beta(\Delta \mu_2-\Delta \mu_1)N)_2$'
varstring = r'$N (' + 'number' + r')$'
legend_location = 'upper left'
elif (type == 'dbeta-dpressure') or (type == 'dbeta-dmu') or (type == 'dbeta-ddmu'):
vt = ''
plinfit = ''
pnlfit = ''
varstring = ''
legend_location = ''
else:
print "Type is not defined for plotting!"
pstring = 'ln(P_2(' + vt + ')/P_1(' + vt + '))'
return vt,pstring,plinfit,pnlfit,varstring,legend_location
def PrepInputs(N_k,conversions,type='dbeta-constV',beta=None,beta_ave=None,P=None,P_ave=None,mu=None,mu_ave=None,U_kn=None,V_kn=None,N_kn=None):
"""
useg can be either "scale", where uncertainties are scaled, or "subsample" resulting in subsampled data.
"""
pvconvert = conversions[1]
# convenience variables
N0 = N_k[0]
N1 = N_k[1]
maxN = numpy.max(N_k);
# Currently seven types; fitting parameters are:
# NVT
# 1) free energy, dbeta, no P, N - constants are beta_ave, variables (vectors) are E
# NPT
# 2) free energy, dpressure - constants are p_ave, variables (vectors) are V
# 3) free energy, dbeta, constP - constants are beta_ave, variables (vectors) are H
# 4) free energy, dbeta, dpressure - constants are beta_ave, p_ave, variables (vectors) are E and V
# muPT
# 5) free energy, dmu = constants are mu_ave, variables (vectors) are N
# 6) free energy, dbeta, constmu - constants are beta_ave, variables (vectors) are A
# 7) free energy, dbeta, dmu - constants are beta_ave, mu_ave, variables (vectors) are E and N
#
# 8) free energy, ddmu = constants are dmu_ave, variables (vectors) are N_2
# 9) free energy, dbeta, constdmu = constants are beta_ave, variables (vectors) are E-\delta\mu\N
# 10) free energy, dbeta, ddmu - constants are beta_ave, dmu_ave, variables (vectors) are E and N_2
# NVT types
if (type == 'dbeta-constV'):
# allocate space
v = numpy.zeros([1,2,maxN],float) # the variables
vr = numpy.zeros([1,2,maxN],float)
const = numpy.zeros(1,float) # parameter constants
dp = numpy.zeros(1,float) # "true" change in constants
v[0,0,0:N0] = U_kn[0,0:N0]
v[0,1,0:N1] = U_kn[1,0:N1]
const[0] = 0.5*(beta[0] + beta[1])
dp[0] = beta[0] - beta[1]
# NPT types
elif (type == 'dbeta-constP'):
# allocate space
v = numpy.zeros([1,2,maxN],float) # the variables
vr = numpy.zeros([1,2,maxN],float)
const = numpy.zeros(1,float) # parameter constants
dp = numpy.zeros(1,float) # "true" change in constants
v[0,0,0:N0] = U_kn[0,0:N0] + conversions[1]*P_ave*V_kn[0,0:N0] # everything goes into energy units
v[0,1,0:N1] = U_kn[1,0:N1] + conversions[1]*P_ave*V_kn[1,0:N1]
const[0] = 0.5*(beta[0] + beta[1])
dp[0] = beta[0] - beta[1]
elif (type == 'dpressure-constB'):
# allocate space
v = numpy.zeros([1,2,maxN],float)
vr = numpy.zeros([1,2,maxN],float)
const = numpy.zeros(1,float)
dp = numpy.zeros(1,float)
v[0,0,0:N0] = V_kn[0,0:N0]
v[0,1,0:N1] = V_kn[1,0:N1]
const[0] = 0.5*pvconvert*beta_ave*(P[0] + P[1]) # units of 1/volume
dp[0] = pvconvert*beta_ave*(P[0] - P[1]) # units of 1/volume
elif (type == 'dbeta-dpressure'):
# allocate space
v = numpy.zeros([2,2,maxN],float)
vr = numpy.zeros([2,2,maxN],float)
const = numpy.zeros(2,float)
dp = numpy.zeros(2,float)
v[0,0,0:N0] = U_kn[0,0:N0]
v[0,1,0:N1] = U_kn[1,0:N1]
v[1,0,0:N0] = V_kn[0,0:N0]
v[1,1,0:N1] = V_kn[1,0:N1]
const[0] = 0.5*(beta[0] + beta[1]) # units of 1/E
const[1] = 0.5*pvconvert*(P[0] + P[1]) # units of E/V?
dp[0] = beta[0] - beta[1] # units of 1/Energy
dp[1] = pvconvert*(beta[0]*P[0] - beta[1]*P[1]) # units of 1/Volume
# mu V T types
elif (type == 'dbeta-constmu'):
# allocate space
v = numpy.zeros([1,2,maxN],float) # the variables
vr = numpy.zeros([1,2,maxN],float)
const = numpy.zeros(1,float) # parameter constants
dp = numpy.zeros(1,float) # "true" change in constants
v[0,0,0:N0] = U_kn[0,0:N0] - mu_ave*N_kn[0,0:N0] # everything goes into energy units
v[0,1,0:N1] = U_kn[1,0:N1] - mu_ave*N_kn[1,0:N1] # everything is in energy units.
const[0] = 0.5*(beta[0] + beta[1])
dp[0] = beta[0] - beta[1]
elif (type == 'dmu-constB'):
# allocate space
v = numpy.zeros([1,2,maxN],float)
vr = numpy.zeros([1,2,maxN],float)
const = numpy.zeros(1,float)
dp = numpy.zeros(1,float)
v[0,0,0:N0] = N_kn[0,0:N0]
v[0,1,0:N1] = N_kn[1,0:N1]
const[0] = -1*(beta_ave*(mu[0] + mu[1])) # units of 1/energy?
dp[0] = -1*(beta_ave*(mu[0] - mu[1])) # units of 1/energy?
elif (type == 'dbeta-dmu'):
# allocate space
v = numpy.zeros([2,2,maxN],float)
vr = numpy.zeros([2,2,maxN],float)
const = numpy.zeros(2,float)
dp = numpy.zeros(2,float)
v[0,0,0:N0] = U_kn[0,0:N0]
v[0,1,0:N1] = U_kn[1,0:N1]
v[1,0,0:N0] = N_kn[0,0:N0]
v[1,1,0:N1] = N_kn[1,0:N1]
const[0] = 0.5*(beta[0] + beta[1]) # units of 1/E
const[1] = -0.5*(mu[0] + mu[1]) # units of E/V?
dp[0] = beta[0] - beta[1] # units of 1/Energy
dp[1] = -(beta[0]*mu[0] - beta[1]*mu[1]) # units of 1/number?
else:
print "Warning: Type of analysis %s is not defined!" % (type)
return dp,const,v,vr
def LogLikelihood(x,N_k,const,v):
L = len(x)
N0 = N_k[0]
N1 = N_k[1]
N = N0+N1
M = numpy.log((1.0*N1)/(1.0*N0))
#D0 = M + beta_ave*x[0] + U0*x[1]
#D1 = M + beta_ave*x[0] + U1*x[1]
D0 = D1 = M + const[0]*x[0]
for i in range(L-1):
D0 = D0 + v[i,0,0:N0]*x[i+1]
D1 = D1 + v[i,1,0:N1]*x[i+1]
E0 = 1 + numpy.exp(D0)
E1 = 1 + numpy.exp(-D1)
# this is the negative of the log likelihood, since we want to maximize it using fmin
of = ((numpy.sum(numpy.log(E0)) + numpy.sum(numpy.log(E1))))/N
return of
def dLogLikelihood(x,N_k,const,v):
"""
Derivative with respect to the parameters, to aid the minimization.
"""
L = len(x)
N0 = N_k[0]
N1 = N_k[1]
N = N0+N1
M = numpy.log((1.0*N1)/(1.0*N0))
D0 = D1 = M + const[0]*x[0]
for i in range(L-1):
D0 = D0 + v[i,0,0:N0]*x[i+1]
D1 = D1 + v[i,1,0:N1]*x[i+1]
E0 = 1/(1 + numpy.exp(-D0))
E1 = 1/(1 + numpy.exp(D1))
g = numpy.zeros(L,dtype=numpy.float64)
#this is the gradient of -log likelihood
#g[0] = (1.0/N)*(numpy.sum(beta*E0) - numpy.sum(beta*E1))
#g[1] = (1.0/N)*(numpy.sum(U0*E0) - numpy.sum(U1*E1))
#g[2] = (1.0/N)*(numpy.sum(V0*E0) - numpy.sum(V1*E1))
g[0] = const[0]*(numpy.sum(E0) - numpy.sum(E1))
for i in range(L-1):
g[i+1] = numpy.sum(v[i,0,0:N0]*E0) - numpy.sum(v[i,1,0:N1]*E1)
return (1.0/N)*g
def d2LogLikelihood(x,N_k,const,v):
"""
beta = const[0]
pave = const[1]
if D = M + beta*x[0] + x[1]*U
I = \sum_{i=1}^N [[-beta^2/S,-beta*U/S],[-beta*U/S,-U^2/S]] where S = [(1+exp(-D))*(1+exp(D))]
if D = M + beta*x[0] + x[1]*U + x[2]*V
I = \sum_{i=1}^N [[-beta^2/S,-beta*U/S,-beta*V/S],[-beta*U/S,-U^2/S,-U*V/S],[-beta*V/S,-U*V^2/S,-V^2/S]] where S = [(1+exp(-D))*(1+exp(D))]
"""
L = len(x)
N0 = N_k[0]
N1 = N_k[1]
N = N0+N1
M = numpy.log((1.0*N1)/(1.0*N0))
vall = numpy.zeros([L-1,N],dtype=numpy.float64)
for i in range(L-1):
vall[i,0:N0] = v[i,0,0:N0]
vall[i,N0:N] = v[i,1,0:N1]
D = M + const[0]*x[0]
for i in range(L-1):
D = D + vall[i,:]*x[i+1]
E = (1 + numpy.exp(-D)) * (1 + numpy.exp(D))
hf = numpy.zeros([L,L,N],dtype=numpy.float64)
cones = const[0] * numpy.ones(N,dtype=numpy.float64)
# fix this to match the
for i in range(L):
if (i == 0):
a = cones
else:
a = vall[i-1,:]
for j in range(L):
if (j == 0):
b = cones
else:
b = vall[j-1,:]
hf[i,j,:] = a*b
# this is the hessian of the minimum function (not the max)
h = -numpy.sum(hf/E,axis=2)/N
return h
def SolveMaxLike(x, N_k, const, v, tol = 1e-10, maxiter=20):
converge = False
itol = 1e-2
rtol = 1e-2
lasttol = 100000;
for i in range(maxiter):
lastx = x
gx = numpy.transpose(dLogLikelihood(x,N_k,const,v))
nh = d2LogLikelihood(x,N_k,const,v)
dx = numpy.linalg.solve(nh,gx)
x += dx # adding, not subtracting because of the handling of negatives
rx = dx/x
checktol = numpy.sqrt(numpy.dot(dx,dx))
checkrtol = numpy.sqrt(numpy.dot(rx,rx))
if (checkrtol < tol):
break
converge = True
if (checkrtol > 1.0) and (checktol > lasttol): # we are possibly diverging. Switch to cg for a bit.
x = scipy.optimize.fmin_cg(LogLikelihood,lastx,fprime=dLogLikelihood,gtol=itol,args=[N_k,const,v],disp=1)
itol *= rtol
lasttol = checktol
if (i == maxiter) and (converge == False):
print "Too many iterations, convergence failing"
return x
def MaxLikeUncertain(x,N_k,const,v,vave):
L = len(x)
d = numpy.zeros(L,float)
# multiply back by N, since we were dealing with smaller numbers for numerical robustness.
fi = -(N_k[0] + N_k[1])*d2LogLikelihood(x,N_k,const,v)
d2 = numpy.linalg.inv(fi)
# We have a fit to the line y = m(x-Uave) + b, so to get the uncertainty in the free energy back, we need
# to add M*Uave back to f. The uncertainty in cov(b + m*Uave,b+m*Uave) = var(b) + Uave**2*var(m) + Uave*cov(v,m)
# For two dimensioms, we have the line y = m1(x1-vave1) + m2(x2-vave2) + b
# Uncertainty will be cov(b + m1vave1 + m2vave2) = var(b) + vave1^2 var(m1) + vave2^2 var(m2)
# + 2vave1 cov(m1,b) + 2vave2 cov(m2,b)
# + 2vave1 cov(m1,m2)
d[0] = const[0]**2*d2[0,0]
for i in range(1,L):
d[0] += vave[i-1]**2*d2[i,i] + 2*vave[i-1]*d2[0,i] # should this last one be plus or minus
d[i] = d2[i,i]
for j in range(i+1,L-1):
d[0] += 2*vave[i-1]*vave[j-1]*d2[i,j]
d = numpy.sqrt(d)
return d
def MaxLikeParams(N_k,dp,const,v,df=0,analytic_uncertainty=False,g=1):
L = len(const)
optimum = numpy.zeros(L+1,float)
vave = numpy.zeros(L,dtype=numpy.float64)
vmod = numpy.zeros([L,2,numpy.max(N_k)],dtype=numpy.float64)
# for numerical stability, we need to translate the curve
for i in range(L):
vave[i] = (numpy.sum(v[i,0,0:N_k[0]]) + numpy.sum(v[i,1,0:N_k[1]]))/numpy.sum(N_k)
vmod[i,0,0:N_k[0]] = v[i,0,0:N_k[0]] - vave[i]
vmod[i,1,0:N_k[1]] = v[i,1,0:N_k[1]] - vave[i]
xstart = numpy.zeros(L+1,float)
for i in range(L):
xstart[0] += vave[i]*dp[i]
xstart[i+1] = dp[i]
xstart[0] += df
xstart[0] /= const[0]
ofit = SolveMaxLike(xstart,N_k,const,vmod,tol=1e-10)
optimum[0] = ofit[0]*const[0]
for i in range(L):
optimum[i+1] = ofit[i+1]
optimum[0] -= (vave[i]*ofit[i+1])
results = []
results.append(optimum)
if (analytic_uncertainty):
doptimum = MaxLikeUncertain(ofit,N_k,const,vmod,vave)*numpy.sqrt(numpy.average(g))
results.append(doptimum)
return results
#========================================================================================
# Functions for computing Bennett acceptance ratio
#==========================================================================================
def logsum(a_n):
"""
Compute the log of a sum of exponentiated terms exp(a_n) in a numerically-stable manner:
logsum a_n = max_arg + \log \sum_{n=1}^N \exp[a_n - max_arg]
where max_arg = max_n a_n. This is mathematically (but not numerically) equivalent to
logsum a_n = \log \sum_{n=1}^N \exp[a_n]
ARGUMENTS
a_n (numpy array) - a_n[n] is the nth exponential argument
RETURNS
log_sum (float) - the log of the sum of exponentiated a_n, log (\sum_n exp(a_n))
EXAMPLE
"""
# Compute the maximum argument.
max_log_term = numpy.max(a_n)
# Compute the reduced terms.
terms = numpy.exp(a_n - max_log_term)
# Compute the log sum.
log_sum = numpy.log(sum(terms)) + max_log_term
return log_sum
#=============================================================================================
# Bennett acceptance ratio function to be zeroed to solve for BAR.
#=============================================================================================
def BARzero(w_F,w_R,DeltaF):
"""
ARGUMENTS
w_F (numpy.array) - w_F[t] is the forward work value from snapshot t.
t = 0...(T_F-1) Length T_F is deduced from vector.
w_R (numpy.array) - w_R[t] is the reverse work value from snapshot t.
t = 0...(T_R-1) Length T_R is deduced from vector.
DeltaF (float) - Our current guess
RETURNS
fzero - a variable that is zeroed when DeltaF satisfies BAR.
"""
# Recommended stable implementation of BAR.
# Determine number of forward and reverse work values provided.
T_F = float(w_F.size) # number of forward work values
T_R = float(w_R.size) # number of reverse work values
# Compute log ratio of forward and reverse counts.
M = numpy.log(T_F / T_R)
# Compute log numerator.
# log f(W) = - log [1 + exp((M + W - DeltaF))]
# = - log ( exp[+maxarg] [exp[-maxarg] + exp[(M + W - DeltaF) - maxarg]] )
# = - maxarg - log[exp[-maxarg] + (T_F/T_R) exp[(M + W - DeltaF) - maxarg]]
# where maxarg = max( (M + W - DeltaF) )
exp_arg_F = (M + w_F - DeltaF)
max_arg_F = numpy.choose(numpy.greater(0.0, exp_arg_F), (0.0, exp_arg_F))
log_f_F = - max_arg_F - numpy.log( numpy.exp(-max_arg_F) + numpy.exp(exp_arg_F - max_arg_F) )
log_numer = logsum(log_f_F) - numpy.log(T_F)
# Compute log_denominator.
# log_denom = log < f(-W) exp[-W] >_R
# NOTE: log [f(-W) exp(-W)] = log f(-W) - W
exp_arg_R = (M - w_R - DeltaF)
max_arg_R = numpy.choose(numpy.greater(0.0, exp_arg_R), (0.0, exp_arg_R))
log_f_R = - max_arg_R - numpy.log( numpy.exp(-max_arg_R) + numpy.exp(exp_arg_R - max_arg_R) ) - w_R
log_denom = logsum(log_f_R) - numpy.log(T_R)
# This function must be zeroed to find a root
fzero = DeltaF - (log_denom - log_numer)
return fzero
def BAR(w_F, w_R, DeltaF=0.0, maximum_iterations=500, relative_tolerance=1.0e-10, verbose=False):
"""
Compute free energy difference using the Bennett acceptance ratio (BAR) method using false position
ARGUMENTS
w_F (numpy.array) - w_F[t] is the forward work value from snapshot t.
t = 0...(T_F-1) Length T_F is deduced from vector.
w_R (numpy.array) - w_R[t] is the reverse work value from snapshot t.
t = 0...(T_R-1) Length T_R is deduced from vector.
OPTIONAL ARGUMENTS
DeltaF (float) - DeltaF can be set to initialize the free energy difference with a guess (default 0.0)
maximum_iterations (int) - can be set to limit the maximum number of iterations performed (default 500)
relative_tolerance (float) - can be set to determine the relative tolerance convergence criteria (defailt 1.0e-5)
verbose (boolean) - should be set to True if verbse debug output is desired (default False)
RETURNS
[DeltaF, dDeltaF] where dDeltaF is the estimated std dev uncertainty
REFERENCE
[1] Shirts MR, Bair E, Hooker G, and Pande VS. Equilibrium free energies from nonequilibrium
measurements using maximum-likelihood methods. PRL 91(14):140601, 2003.
EXAMPLES
Compute free energy difference between two specified samples of work values.
"""
UpperB = numpy.average(w_F)
LowerB = -numpy.average(w_R)
FUpperB = BARzero(w_F,w_R,UpperB)
FLowerB = BARzero(w_F,w_R,LowerB)
nfunc = 2;
if (numpy.isnan(FUpperB) or numpy.isnan(FLowerB)):
# this data set is returning NAN -- will likely not work. Return 0, print a warning:
print "Warning: BAR is likely to be inaccurate because of poor sampling. Guessing 0."
return [0.0, 0.0]
while FUpperB*FLowerB > 0:
# if they have the same sign, they do not bracket. Widen the bracket until they have opposite signs.
# There may be a better way to do this, and the above bracket should rarely fail.
if verbose:
print 'Initial brackets did not actually bracket, widening them'
FAve = (UpperB+LowerB)/2
UpperB = UpperB - max(abs(UpperB-FAve),0.1)
LowerB = LowerB + max(abs(LowerB-FAve),0.1)
FUpperB = BARzero(w_F,w_R,UpperB)
FLowerB = BARzero(w_F,w_R,LowerB)
nfunc += 2
# Iterate to convergence or until maximum number of iterations has been exceeded.
for iteration in range(maximum_iterations):
DeltaF_old = DeltaF
# Predict the new value
if (LowerB==0.0) and (UpperB==0.0):
DeltaF = 0.0
FNew = 0.0
else:
DeltaF = UpperB - FUpperB*(UpperB-LowerB)/(FUpperB-FLowerB)
FNew = BARzero(w_F,w_R,DeltaF)
nfunc += 1
if FNew == 0:
# Convergence is achieved.
if verbose:
print "Convergence achieved."
relative_change = 10^(-15)
break
# Check for convergence.
if (DeltaF == 0.0):
# The free energy difference appears to be zero -- return.
if verbose: print "The free energy difference appears to be zero."
return [0.0, 0.0]
relative_change = abs((DeltaF - DeltaF_old)/DeltaF)
if verbose:
print "relative_change = %12.3f" % relative_change
if ((iteration > 0) and (relative_change < relative_tolerance)):
# Convergence is achieved.
if verbose:
print "Convergence achieved."
break
if FUpperB*FNew < 0:
# these two now bracket the root
LowerB = DeltaF
FLowerB = FNew
elif FLowerB*FNew <= 0:
# these two now bracket the root
UpperB = DeltaF
FUpperB = FNew
else:
message = 'WARNING: Cannot determine bound on free energy'
raise BoundsError(message)
if verbose:
print "iteration %5d : DeltaF = %16.3f" % (iteration, DeltaF)
# Report convergence, or warn user if not achieved.
if iteration < maximum_iterations:
if verbose:
print 'Converged to tolerance of %e in %d iterations (%d function evaluations)' % (relative_change, iteration,nfunc)
else:
message = 'WARNING: Did not converge to within specified tolerance. max_delta = %f, TOLERANCE = %f, MAX_ITS = %d' % (relative_change, tolerance, maximum_iterations)
raise ConvergenceException(message)
# Compute asymptotic variance estimate using Eq. 10a of Bennett, 1976 (except with n_1<f>_1^2 in
# the second denominator, it is an error in the original
# NOTE: The numerical stability of this computation may need to be improved.
# Determine number of forward and reverse work values provided.
T_F = float(w_F.size) # number of forward work values
T_R = float(w_R.size) # number of reverse work values
# Compute log ratio of forward and reverse counts.
M = numpy.log(T_F / T_R)
T_tot = T_F + T_R
C = M-DeltaF
fF = 1/(1+numpy.exp(w_F + C))
fR = 1/(1+numpy.exp(w_R - C))
afF2 = (numpy.average(fF))**2
afR2 = (numpy.average(fR))**2
vfF = numpy.var(fF)/T_F
vfR = numpy.var(fR)/T_R
variance = vfF/afF2 + vfR/afR2
dDeltaF = numpy.sqrt(variance)
if verbose:
print "DeltaF = %8.3f +- %8.3f" % (DeltaF, dDeltaF)
return (DeltaF, dDeltaF)
def Print1DStats(title,type,fitvals,convert,trueslope,const,dfitvals='N/A'):
# if dB, 'convert' is kB
# if dP, 'convert' is beta*PV_convert
# first element in fitvals is free energies df
dfs = fitvals[0]
# second element in fitvals is the slope
slopes = fitvals[1]
# Need to fix this so that uncertainties aren't printed when ddf is 'N/A'
df = numpy.average(dfs) # true even if there is only one per slope
if (numpy.size(dfs) > 1):
ddf = numpy.std(dfs)
else:
ddf = dfitvals[0]
slope = numpy.average(slopes) # true even if there is only one
if (numpy.size(slopes) > 1):
dslope = numpy.std(slopes)
else:
dslope = dfitvals[1]
print ""
print "---------------------------------------------"
print " %20s " % (title)
print "---------------------------------------------"
print " df = %.5f +/- %.5f " % (df,ddf)
print "---------------------------------------------"
print " Estimated slope vs. True slope"
print "---------------------------------------------"
print "%11.6f +/- %11.6f | %11.6f" % (slope, dslope, trueslope)
print "---------------------------------------------"
quant = numpy.abs((slope-trueslope)/dslope)
print ""
print "(That's %.2f quantiles from true slope=%5f, FYI.)" % (quant,trueslope)
if (quant > 5):
print " (Ouch!)"
else:
print ""
if (type[0:5] == 'dbeta'):
#trueslope = B1 - B0, const = (B1 + B0)/2, B = 1/(k_B T)
# so B0 = (const-trueslope/2), T0 = 1/(k_B*B0)
# so B1 = (const+trueslope/2), T1 = 1/(k_B*B1)
T0 = (convert*(const-trueslope/2))**(-1)
T1 = (convert*(const+trueslope/2))**(-1)
print "---------------------------------------------"
print " True dT = %7.3f, Eff. dT = %7.3f+/-%.3f" % (T0-T1, convert*T0*T1*slope,convert*dslope*T0*T1)
print "---------------------------------------------"
elif (type == 'dpressure-constB'):
# trueslope = B*PV_conv*(P1-P0), const = B*PV_conv*(P1+P0)/2,
# we need to convert this slope to a pressure. This should just be dividing by pvconvert*beta
#
print "---------------------------------------------"
print " True dP = %7.3f, Eff. dP = %7.3f+/-%.3f" % (-trueslope/convert, -slope/convert, numpy.abs(dslope/convert))
print "---------------------------------------------"
elif (type == 'dmu-constB'):
# trueslope = B*(mu1-mu0), const = B*(mu1+mu0)/2,
# we need to convert this slope to a chemical potential. This should just be dividing by beta
#
print "---------------------------------------------"
print " True dmu = %7.3f, Eff. dmu = %7.3f+/-%.3f" % (-trueslope/convert, -slope/convert, numpy.abs(dslope/convert))
print "---------------------------------------------"
elif (type == 'ddmu-constB'):
# trueslope = B*(dmu1-dmu0), const = B*(dmu1+dmu0)/2,
# we need to convert this slope to a chemical potential. This should just be dividing by beta
#
print "---------------------------------------------"
print " True ddmu = %7.3f, Eff. ddmu = %7.3f+/-%.3f" % (-trueslope/convert, -slope/convert, numpy.abs(dslope/convert))
print "---------------------------------------------"
def Print2DStats(title,type,fitvals,kB,convertback,trueslope,const,dfitvals='N/A'):
# first element in fitvals is free energies df
dfs = fitvals[0]
# Need to fix this so that uncertainties aren't printed when ddf is 'N/A'
df = numpy.average(dfs) # true even if there is only one per slope
if (numpy.size(dfs) > 1):
ddf = numpy.std(dfs)
else:
ddf = dfitvals[0]
slopes = []
# second element in fitvals is the energy slope
# third element in fitvals is the PV slope
for i in range(2):
slopes.append(fitvals[i+1])
slope = numpy.zeros(2,float)
dslope = numpy.zeros(2,float)
for i in range(2):
slope[i] = numpy.average(slopes[i]) # true even if there is only one
if (numpy.size(slopes[i]) > 1):
dslope[i] = numpy.std(slopes[i])
else:
dslope[i] = dfitvals[i+1]
print ""
print "---------------------------------------------------"
print " %20s " % (title)
print "---------------------------------------------------"
print " df = %.5f +/- %.5f " % (df,ddf)
for i in range(2):
print "---------------------------------------------------"
print " Estimated slope[%d] vs. True slope[%d]" % (i,i)
print "---------------------------------------------------"
print "%11.6f +/- %11.6f | %11.6f" % (slope[i], dslope[i], trueslope[i])
quant = numpy.abs((slope[i]-trueslope[i])/dslope[i])
print ""
print "(That's %.2f quantiles from true slope=%5f, FYI.)" % (quant,trueslope[i])
if (quant > 5):
print " (Ouch!)"
else:
print ""
#dp = B1 - B0, const = (B1 + B0)/2, B = 1/kbT
# so B0 = (const[0]-trueslope[0]/2), T0 = 1/(kB*B0)
# so B1 = (const[0]+trueslope[0]/2), T1 = 1/(kB*B1)
T0 = (kB*(const[0]-trueslope[0]/2))**(-1)
T1 = (kB*(const[0]+trueslope[0]/2))**(-1)
print "---------------------------------------------"
print " True dT = %7.3f, Eff. dT = %7.3f+/-%.3f" % (T0-T1, kB*T0*T1*slope[0],kB*dslope[0]*T0*T1)
print "---------------------------------------------"
if (type == 'dbeta-dpressure'):
text = 'dP'
elif (type == 'dbeta-dmu'):
text = 'dmu'
elif (type == 'dbeta-ddmu'):
text = 'ddmu'
print "---------------------------------------------"
print " True %s = %7.3f, Eff. %s = %7.3f+/-%.3f" % (text,-trueslope[1]/convertback,text, -slope[1]/convertback, dslope[1]/convertback)
def PrintPicture(xaxis,true,y,dy,fit,type,name,figname,fittype,vunits='kT',show=False):
try:
import matplotlib
except:
print '*************'
print 'Note: Figures not generated because matplotlib not found. ',
print 'Please install matplotlib to allow generation of pictures'
return
import matplotlib.pyplot as plt
matplotlib.rc('lines',lw=2)
font = {'family' : 'serif',
'weight' : 'bold',
'size' : '14'}
matplotlib.rc('font',**font)
[vt,pstring,plinfit,pnlfit,varstring,legend_location] = PrepStrings(type,vunits=vunits)
pstringtex = r'$\frac{P_2(' + vt + r')}{P_1(' + vt + r')}$'
pstringlntex = r'$\ln\frac{P_2(' + vt + r')}{P_1(' + vt + r')}$'
print "Now printing figure %s" % (figname)
plt.clf()
plt.xlabel(varstring)
if (fittype == 'linear'):
plt.title(vt + ' vs. log probability ratio \n for ' + name)
plt.errorbar(xaxis,y,fmt='b-',yerr=dy,label = pstringlntex) # make this general!
plt.errorbar(xaxis,true,fmt='k-',label = plinfit)
plt.errorbar(xaxis,fit,fmt='r-',label = 'Fit to $y = b+aB$')
plt.ylabel(pstringlntex)
elif (fittype == 'nonlinear'):
plt.title(vt + ' vs. probability ratio \n for ' + name)
plt.errorbar(xaxis,y,fmt='b-',yerr=dy,label = pstringtex)
plt.errorbar(xaxis,true,fmt='k-',label = pnlfit)
plt.errorbar(xaxis,fit,fmt='r-',label = 'Fit to $y = \exp(b+aE)$')
plt.ylabel(pstringtex)
elif (fittype == 'maxwell'):
# only valid for kinetic energy
plt.title('E_kin vs. probability \n for' + name)
plt.errorbar(xaxis,y,fmt='b-',yerr=dy,label = r'$P(E_{\mathrm{kin}})$')
if (true is not None): # sometimes, true will be none.
plt.errorbar(xaxis,true,fmt='k-',label = 'Fit to Analytical')
plt.errorbar(xaxis,fit,fmt='r-',label = 'Fit to Normal')
plt.ylabel(r'$P(E_{\mathrm{kin}})$')
else:
print "I'm crying foul! %s is not an allowed chart type!" % (fittype)
plt.legend(loc=legend_location)
if show:
plt.show()
plt.savefig(figname + '.pdf')
def GenHistogramProbs(N_k,bins,v,g):
K = len(N_k)
hlist = []
dhlist = []
for k in range(0,K):
hstat = numpy.histogram(v[0,k,0:N_k[k]], bins = bins)
h = (1.0*hstat[0])/N_k[k]
hlist.append(h)
dh = numpy.sqrt(g[k]*h*(1.0-h)/N_k[k])
dhlist.append(dh)
return hlist,dhlist
def LinFit(bins,N_k,dp,const,v,df=0,analytic_uncertainty=False,bGraph=False,name="",figname='lin_figure',g=[1,1],type='dbeta-constV',vunits='kT'):
[hlist,dhlist] = GenHistogramProbs(N_k,bins,v,g)
ratio = numpy.log(hlist[1]/hlist[0]) # this should have the proper exponential distribution
dratio = numpy.sqrt((dhlist[0]/hlist[0])**2 + (dhlist[1]/hlist[1])**2)
usedat = numpy.isfinite(ratio)
y = ratio[usedat]
nuse = len(y)
weights = 1.0/dratio[usedat]
xaxis = (bins[0:len(bins)-1] + bins[1:len(bins)])/2
x = xaxis[usedat]
X = numpy.ones([nuse,2],float)
X[:,1] = x
w = numpy.diag(weights)
WX = numpy.dot(w,X)
WY = numpy.dot(w,y)
WXT = numpy.transpose(WX)
Z = numpy.dot(WXT,WX)
WXY = numpy.dot(WXT,WY)
a = numpy.linalg.solve(Z,WXY)
da_matrix = numpy.transpose(numpy.linalg.inv(Z))
da = numpy.zeros(2,float)
da[0] = numpy.sqrt(da_matrix[0,0])
da[1] = numpy.sqrt(da_matrix[1,1])
# the true line is y = df + dp*x, where y is ln P_1(X)/P_2(X)
if (bGraph):
trueslope = dp
true = df+trueslope*xaxis
fit = a[0] + a[1]*xaxis
PrintData(xaxis,true,fit,ratio,dratio,'linear')
name = name + ' (linear)'
PrintPicture(xaxis,true,ratio,dratio,fit,type,name,figname,'linear',vunits)
results = []
results.append(a)
if (analytic_uncertainty):
results.append(da)
return results
def SolveNonLin(f,df,a,data,ddata,xaxis,maxiter=20,tol=1e-10):
K = numpy.size(a)
usedat = numpy.isfinite(data)
y = data[usedat]
nuse = len(y)
weights = 1.0/ddata[usedat]
w = numpy.diag(weights)
x = xaxis[usedat]
J = numpy.zeros([nuse,K],dtype = numpy.float64)
# do the newton-raphson solution
endnext = False
for n in range(maxiter):
expt = f(a,x)
J = numpy.transpose(df(a,x))
WJ = numpy.dot(w,J)
JTW = numpy.transpose(WJ)
dy = y - expt
Z = numpy.dot(JTW,WJ)
incr_a = numpy.linalg.solve(Z,numpy.dot(JTW,dy))
a += incr_a
ra = incr_a/a
chtol = numpy.sqrt(numpy.dot(ra,ra))
if (chtol < tol):
if (endnext == True) or (analytical_estimate == False):
endnow == True # we put in this logic so that we calculate the matrix at the minimum
# if we want the analytic uncertainty
endnext = True
if (endnow == True):
break
if (n == maxiter):
print "Too many iterations for nonlinear least squares"
da_matrix = numpy.linalg.inv(Z)
da = numpy.zeros(K,float)
for k in range(K):
da[k] = numpy.sqrt(da_matrix[k,k])
return a,da
def ExpFit(a,x): # assume only 2D, since we are not generating histograms
return numpy.exp(a[0] + a[1]*x)
def dExpFit(a,x):
s = a[0] + a[1]*x
e = numpy.exp(s)
return numpy.array([e,x*e])
def NonLinFit(bins,N_k,dp,const,v,df=0,analytic_uncertainty=False,bGraph=False,name="",
figname='nonlin_figure', tol=1e-10,g=[1,1], type = 'dbeta-constV',vunits='kT'):
# nonlinear model is exp(A + B*E_i), where the i are the bin energies.
# residuals are y_i - exp(A + B*E_i)
# dS/dbeta_j = 2\sum_i r_i dr_i/dB_j = 0
#
# dr_i/dA = exp(A + B*E_i)
# dr_i/dB = E_i*exp(A + B*E_i)
[hlist,dhlist] = GenHistogramProbs(N_k,bins,v,g)
ratio = (hlist[1]/hlist[0]) # this should have the proper exponential distribution
dratio = ratio*(numpy.sqrt((dhlist[0]/hlist[0])**2 + (dhlist[1]/hlist[1])**2))
xaxis = (bins[0:len(bins)-1] + bins[1:len(bins)])/2
# starting point for nonlinear fit
L = numpy.size(dp)+1
a = numpy.zeros(L)
a[0] = df
a[1:L] = dp[:]
(a,da) = SolveNonLin(ExpFit,dExpFit,a,ratio,dratio,xaxis,tol=tol)
if (bGraph):
trueslope = dp
true = numpy.exp(df+trueslope*xaxis)
fit = ExpFit(a,xaxis)
PrintData(xaxis,true,fit,ratio,dratio,'nonlinear')
name = name + ' (nonlinear)'
PrintPicture(xaxis,true,ratio,dratio,fit,type,name,figname,'nonlinear',vunits=vunits)
results = []
results.append(a)
if (analytic_uncertainty):
results.append(da)
return results
def MaxwellBoltzFit(bins,U,N,kT,figname,name="",ndof=None,g=1):
# generate histogram
hstat = numpy.histogram(U, bins = bins)
# normalize the histogram
h = (1.0*hstat[0])/N
# compute the error bars
dh = numpy.sqrt(g*h*(1.0-h)/N)
xaxis = (bins[0:len(bins)-1] + bins[1:len(bins)])/2
# we assume we have the correct mean for now, since presumably the temperature works
mean = numpy.mean(U)
std_fit = numpy.std(U)
std_true = numpy.sqrt(mean*kT)
if (mean > 50*kT): #if too big, we use a normal distribution -- we'll use limit of 50 DOF as suggest (by Wikipedia!)
# note that for a normal distribution, the sample mean and standard deviation give the maximum likelihood information.
fit = numpy.exp(-(xaxis-mean)**2/(2*std_fit**2))/(numpy.sqrt(2*numpy.pi*std_fit**2))
true = numpy.exp(-(xaxis-mean)**2/(2*std_true**2))/(numpy.sqrt(2*numpy.pi*std_true**2))
# check this with paper?
else:
# should be a gamma distribution; no std fit
fit = (xaxis/kT)**(mean/kT-1)*numpy.exp(-xaxis/kT)/(kT*scipy.special.gamma(mean/kT))
if (ndof is not None):
mean_true = 0.5*ndof*kT
true = (xaxis/kT)**(mean_true/kT-1)*numpy.exp(-xaxis/kT)/(kT*scipy.special.gamma(mean/kT))
else:
true = None # unless we know the number of DOF, we don't know the true distribution:
print "--- Kinetic energy analysis ---"
print ""
print "kT = %10.4f" % (kT)
if (ndof is None):
print "Effective # of DOF = %10.4f" % (2*mean/kT)
else:
print "Reported # of DOF = %10.4f" % ndof
if (mean > 25*kT):
"Approximating the Maxwell-Boltzmann with a normal distribution, as # DOF > 50"
print "Direct Std = %10.4f, Std from sqrt(U*kT) = %10.4f" % (std_fit,std_true)
print ""
# name = name + str
# normalize histogram and error bars
width = bins[1]-bins[0] # assumes equal spacing (currently true)
h /= width
dh /= width
PrintPicture(xaxis,true,h,dh,fit,'dbeta-constV',name,figname,'maxwell')
def PrintData(xaxis,true,fit,collected,dcollected,type):
if (type == 'linear'):
print "---- Linear Fit ----"
elif (type == 'nonlinear'):
print "---- Nonlinear Fit ----"
elif (type == 'maxwell'):
print "---- fit to Maxwell-Boltzmann ----"
else:
quit("Sorry, no allowed type of fit (%s) specified!" % (type))
print " X True Observed Error d(true/obs) sig(true/obs) Fit "
print "---------------------------------------------------------------------------------------"
for i in range(len(collected)):
diff = collected[i]-true[i]
sig = numpy.abs(collected[i]-true[i])/dcollected[i]
print "%10.3f %10.3f %10.3f %10.3f %10.3f %10.3f %10.3f" % (xaxis[i],true[i],collected[i],dcollected[i],diff,sig,fit[i])
def ProbabilityAnalysis(N_k,type='dbeta-constV',T_k=None,P_k=None,mu_k=None,U_kn=None,V_kn=None,N_kn=None,kB=0.0083144624,title=None,figname=None,nbins=40,bMaxLikelihood=True,bLinearFit=True,bNonLinearFit=True,reptype=None,nboots=200,g=[1,1],reps=None,cuttails=0.001,bMaxwell=False,eunits='kJ/mol',vunits='nm^3',punits='bar',seed=None):
K = len(N_k) # should be 2 pretty much always . . .
# decide if we are printing figures:
if (figname is None):
bGraph = False
figname = ''
else:
bGraph = True
# get correct conversion terms between different units.
conversions = PrepConversionFactors(eunits,punits,vunits)
if (seed):
numpy.random.seed(seed) # so there is the ability to make the RNG repeatable
print "setting random number seed for bootstrapping %d" % (seed)
# initialize constant terms
beta_ave = None
P_ave = None
mu_ave = None
if (T_k is None):
T_k = numpy.zeros(2,float)
else:
beta_k = (1.0/(kB*T_k))
beta_ave = numpy.average(beta_k)
if (P_k is None):
P_k = numpy.zeros(2,float)
else:
P_ave = numpy.average(P_k)
if (mu_k is None):
mu_k = numpy.zeros(2,float)
else:
mu_ave = numpy.average(mu_k)
# prepare the variables we are going to work with
[dp,const,v,vr] = PrepInputs(N_k,conversions,type,beta_k,beta_ave,P_k,P_ave,mu_k,mu_ave,U_kn,V_kn,N_kn)
[vt,pstring,plinfit,pnlfit,varstring,legend_location] = PrepStrings(type)
if (check_twodtype(type)): # if it's 2D, we can graph, otherwise, there is too much histogram error
# determine the bin widths
maxk = numpy.zeros(K,float)
mink = numpy.zeros(K,float)
# cuttails indicates how many we leave out on each tail
# for now, we choose the range that cuts 0.1% from the tails of the smallest distribution.
prange = cuttails
for k in range(K):
maxk[k] = scipy.stats.scoreatpercentile(v[0,k,0:N_k[k]],100*(1-prange))
mink[k] = scipy.stats.scoreatpercentile(v[0,k,0:N_k[k]],100*(prange))
binmax = numpy.min(maxk)
binmin = numpy.max(mink)
if (binmax < binmin):
quit("\nThere is no overlap between the two ensembles; high distribution min is %f and low distribution max is %f; quitting!" %(binmin,binmax));
if (type == 'dmu-constB'): # special code for N, since it's discrete
# if the spread is greater than
if ((binmax-binmin) < nbins):
bins = numpy.arange(binmin-0.5,binmax+0.6,1)
nbins = len(bins)
else:
print "Warning: since the range of particle number is greater than the",
print "number of bins specified, particle number is not discrete for ",
print "methods using histograms. Set nbins larger (using --nbins) to "
print "obtain discrete N distributions"
bins = numpy.zeros(nbins+1,float)
for i in range(nbins+1):
bins[i] = binmin + (binmax-binmin)*(i/(1.0*nbins))
else:
bins = numpy.zeros(nbins+1,float)
for i in range(nbins+1):
bins[i] = binmin + (binmax-binmin)*(i/(1.0*nbins))
#===================================================================================================
# Calculate free energies with different methods
#===================================================================================================
if (type == 'dbeta-dpressure') or (type == 'dbeta-dmu'):
if (dp[0] == 0):
quit("Warning: two input temperatures are equal, can't do joint variable fit!");
if (dp[1] == 0):
if (type == 'dbeta-dpressure'):
quit("Warning: two input pressures are equal, can't do joint E,V fit!");
elif (type == 'dbeta-dmu'):
quit("Warning: two input chemical potentials are equal, can't do joint E,N fit!");
else:
trueslope = dp
print "True slope of %s should be %.8f" % (pstring,trueslope)
if type == 'dpressure-constB' or type == 'dbeta-dpressure':
convertback = beta_ave*conversions[1] # this is the pv component
elif type == 'dmu-constB' or type == 'dbeta-dmu':
convertback = -1*kB
else:
convertback = kB
w_F = (beta_k[1]-beta_k[0])*U_kn[0,0:N_k[0]]
w_R = -((beta_k[1]-beta_k[0])*U_kn[1,0:N_k[1]])
if (type == 'dbeta-constP') or (type == 'dpressure-constB') or (type == 'dbeta-dpressure'):
w_F += conversions[1]*(beta_k[1]*P_k[1]-beta_k[0]*P_k[0])*V_kn[0,0:N_k[0]]
w_R += -conversions[1]*(beta_k[1]*P_k[1]-beta_k[0]*P_k[0])*V_kn[1,0:N_k[1]]
if (type == 'dbeta-constmu') or (type == 'dmu-constB') or (type == 'dbeta-dmu'):
w_F += -(beta_k[1]*mu_k[1]-beta_k[0]*mu_k[0])*N_kn[0,0:N_k[0]]
w_R += (beta_k[1]*mu_k[1]-beta_k[0]*mu_k[0])*N_kn[1,0:N_k[1]]
# it's not entirely clear if this right because of lack of overlap in phase space between different N's!
print "Now computing log of partition functions using BAR"
(df,ddf) = BAR(w_F,w_R)
print "using %.5f for log of partition functions computed from BAR" % (df)
print "Uncertainty in quantity is %.5f" % (ddf)
print "Assuming this is negligible compared to sampling error at individual points"
if (bMaxwell): # only applies for kinetic energies
print "Now fitting to a Maxwell-Boltzmann distribution"
for k in range(2):
fn = figname + '_maxboltz' + str(T_k[k])
MaxwellBoltzFit(bins,U_kn[k,0:N_k[k]],N_k[k],kB*T_k[k],fn)
if (bLinearFit and check_twodtype(type)):
print "Now computing the linear fit parameters"
fn = figname + '_linear'
(fitvals,dfitvals) = LinFit(bins,N_k,dp,const,v,df=df,name=title,figname=fn,bGraph=bGraph,analytic_uncertainty=True,g=g,type=type,vunits=vunits)
Print1DStats('Linear Fit Analysis (analytical error)',type,fitvals,convertback,dp,const,dfitvals=dfitvals)
if (bNonLinearFit and check_twodtype(type)):
print "Now computing the nonlinear fit parameters"
fn = figname + '_nonlinear'
(fitvals,dfitvals) = NonLinFit(bins,N_k,dp,const,v,df=df,name=title,figname=fn,bGraph=bGraph,analytic_uncertainty=True,g=g,type=type,vunits=vunits)
Print1DStats('Nonlinear Fit Analysis (analytical error)',type,fitvals,convertback,dp,const,dfitvals=dfitvals)
if (bMaxLikelihood):
print "Now computing the maximum likelihood parameters"
(fitvals,dfitvals) = MaxLikeParams(N_k,dp,const,v,df=df,analytic_uncertainty=True,g=numpy.average(g))
if (check_twodtype(type)):
Print1DStats('Maximum Likelihood Analysis (analytical error)',type,fitvals,convertback,dp,const,dfitvals=dfitvals)
else:
Print2DStats('2D-Maximum Likelihood Analysis (analytical error)',type,fitvals,kB,convertback,dp,const,dfitvals=dfitvals)
if (reptype is None):
return
if (reptype == 'bootstrap'):
nreps = nboots
if (nreps < 50):
if (nreps > 1):
print "Warning, less than 50 bootstraps (%d requested) is likely not a good statistical idea" % (nreps)
else:
print "Cannot provide bootstrap statisics, only %d requested" % (nreps)
return
print "Now bootstrapping (n=%d) for uncertainties . . . could take a bit of time!" % (nreps)
elif (reptype == 'independent'):
nreps = len(reps)
print "Now analyzing %d independent samples . . . could take a bit of time!" % (nreps)
else:
quit("Don't understand reptype = %s; quitting" % (reptype))
if check_twodtype(type): # how many values do we have to deal with?
rval = 2
else:
rval = 3
linvals = numpy.zeros([rval,nreps],float)
nlvals = numpy.zeros([rval,nreps],float)
mlvals = numpy.zeros([rval,nreps],float)
for n in range(nreps):
if (n%10 == 0):
print "Finished %d samples . . ." % (n)
if (reptype == 'bootstrap'):
for k in range(K):
if ((g is None) or (g[0]==1 and g[1]==1)):
#do normal bootstrap
rindex = numpy.random.randint(0,high=N_k[k],size=N_k[k]); # bootstrap it
for i in range(len(const)):
vr[i,k,0:N_k[k]] = v[i,k,rindex]
else:
# we are given correlation times. Do block bootstrapping.
gk = int(numpy.ceil(g[k]))
nblocks = int(numpy.floor(N_k[k]/gk))
# moving blocks bootstrap; all contiguous segments of length gk
rindex = numpy.random.randint(0,high=gk*(nblocks-1),size=nblocks);
for nb in range(nblocks):
for i in range(len(const)):
vr[i,k,nb*gk:(nb+1)*gk] = v[i,k,rindex[nb]:rindex[nb]+gk]
N_k[k] = nblocks*gk # we could have a few samples less now
if (reptype == 'independent'):
for k in range(K):
for i in range(len(const)):
vr[i,k,0:N_k[k]] = reps[n][i][k,0:N_k[k]]
if (bLinearFit and check_twodtype(type)):
fitvals = LinFit(bins,N_k,dp,const,vr)
for i in range(rval):
linvals[i,n] = fitvals[0][i]
if (bNonLinearFit and check_twodtype(type)):
fitvals = NonLinFit(bins,N_k,dp,const,vr,df=df)
for i in range(rval):
nlvals[i,n] = fitvals[0][i]
if (bMaxLikelihood):
fitvals = MaxLikeParams(N_k,dp,const,vr,df=df)
for i in range(rval):
mlvals[i,n] = fitvals[0][i]
if (bLinearFit and check_twodtype(type)):
Print1DStats('Linear Fit Analysis',type,[linvals[0],linvals[1]],convertback,dp,const)
if (bNonLinearFit and check_twodtype(type)):
Print1DStats('Nonlinear Fit Analysis',type,[nlvals[0],nlvals[1]],convertback,dp,const)
if (bMaxLikelihood):
if check_twodtype(type):
Print1DStats('Maximum Likelihood Analysis',type,[mlvals[0],mlvals[1]],convertback,dp,const)
else:
Print2DStats('2D-Maximum Likelihood Analysis',type,[mlvals[0],mlvals[1],mlvals[2]],kB,convertback,dp,const)
return
# to do: fix the drawing directions so that correct data has the legend in the right place.
|
shirtsgroup/checkensemble
|
checkensemble/checkensemble.py
|
Python
|
gpl-2.0
| 50,763
|
[
"Gromacs"
] |
24ebca8109c6abc26bc704ea03c3a29633e017abb0573cef61971761f9b7a4a9
|
from .schemas import (
schema_absorption_spectrum, schema_derivative_couplings,
schema_distribute_absorption_spectrum, schema_distribute_derivative_couplings,
schema_distribute_single_points, schema_cp2k_general_settings, schema_single_points)
from .templates import (create_settings_from_template, valence_electrons)
from nac.common import DictConfig
from pathlib import Path
from os.path import join
from scm.plams import Molecule
from qmflows.settings import Settings
from qmflows.utils import settings2Dict
from schema import SchemaError
from typing import Dict
import logging
import os
import yaml
logger = logging.getLogger(__name__)
schema_workflows = {
'absorption_spectrum': schema_absorption_spectrum,
'derivative_couplings': schema_derivative_couplings,
'single_points': schema_single_points,
'cp2k_general_settings': schema_cp2k_general_settings,
'distribute_derivative_couplings': schema_distribute_derivative_couplings,
'distribute_absorption_spectrum': schema_distribute_absorption_spectrum,
'distribute_single_points': schema_distribute_single_points
}
def process_input(input_file: str, workflow_name: str) -> Dict:
"""
Read the `input_file` in YAML format, validate it against the
corresponding `workflow_name` schema and return a nested dictionary with the input.
:param str input_file: path to the input
:return: Input as dictionary
:raise SchemaError: If the input is not valid
"""
schema = schema_workflows[workflow_name]
with open(input_file, 'r') as f:
dict_input = yaml.load(f.read(), Loader=yaml.FullLoader)
try:
d = schema.validate(dict_input)
return DictConfig(create_settings(d))
except SchemaError as e:
msg = f"There was an error in the input yaml provided:\n{e}"
print(msg)
def create_settings(d: Dict) -> Dict:
"""
Transform the input dict into Cp2K settings.
:param d: input dict
:return: dictionary with Settings to call Cp2k
"""
# Convert cp2k definitions to settings
general = d['cp2k_general_settings']
general['cp2k_settings_main'] = Settings(
general['cp2k_settings_main'])
general['cp2k_settings_guess'] = Settings(
general['cp2k_settings_guess'])
apply_templates(general, d['path_traj_xyz'])
input_parameters = add_missing_keywords(d)
print_final_input(input_parameters)
return input_parameters
def apply_templates(general: Dict, path_traj_xyz: str) -> None:
"""
Apply a template for CP2K if the user request so.
"""
for s in [general[x] for x in ['cp2k_settings_main', 'cp2k_settings_guess']]:
val = s['specific']
if "template" in val:
s['specific'] = create_settings_from_template(
general, val['template'], path_traj_xyz)
def add_missing_keywords(d: Dict) -> Dict:
"""
and add the `added_mos` and `mo_index_range` keywords
"""
general = d['cp2k_general_settings']
# Add keywords if missing
if d.get('nHOMO') is None:
d['nHOMO'] = compute_HOMO_index(
d['path_traj_xyz'], general['basis'], general['charge'])
# Added_mos keyword
add_mo_index_range(d)
# Add restart point
add_restart_point(general)
# Add basis sets
add_basis(general)
# add cell parameters
add_cell_parameters(general)
# Add Periodic properties
add_periodic(general)
# Add charge
add_charge(general)
# Add multiplicity
add_multiplicity(general)
return d
def add_basis(general: dict) -> None:
"""
Add path to the basis and potential
"""
setts = [general[p] for p in ['cp2k_settings_main', 'cp2k_settings_guess']]
# add basis and potential path
if general["path_basis"] is not None:
logger.info("path to basis added to cp2k settings")
for x in setts:
x.basis = general['basis']
x.potential = general['potential']
x.specific.cp2k.force_eval.dft.potential_file_name = os.path.abspath(
join(general['path_basis'], "GTH_POTENTIALS"))
# Choose the file basis to use
select_basis_file(x, general)
def select_basis_file(sett: Settings, general: dict) -> str:
"""
Choose the right basis set based on the potential and basis name
"""
dft = sett.specific.cp2k.force_eval.dft
dft["basis_set_file_name"] = os.path.abspath(
join(general['path_basis'], "BASIS_MOLOPT"))
if dft.xc.get("xc_functional pbe") is None:
# USE ADMM
# We need to write one lowercase and the other uppercase otherwise Settings will
# Overwrite the value
dft["Basis_Set_File_Name"] = os.path.abspath(
join(general['path_basis'], "BASIS_ADMM_MOLOPT"))
dft["BASIS_SET_FILE_NAME"] = os.path.abspath(
join(general['path_basis'], "BASIS_ADMM"))
def add_cell_parameters(general: dict) -> None:
"""
Add the Unit cell information to both the main and the guess settings
"""
# Search for a file containing the cell parameters
file_cell_parameters = general["file_cell_parameters"]
for s in (general[p] for p in ['cp2k_settings_main', 'cp2k_settings_guess']):
if file_cell_parameters is None:
s.cell_parameters = general['cell_parameters']
s.cell_angles = None
else:
s.cell_parameters = None
s.cell_angles = None
def add_periodic(general: dict) -> None:
"""
Add the keyword for the periodicity of the system
"""
for s in (general[p] for p in ['cp2k_settings_main', 'cp2k_settings_guess']):
s.specific.cp2k.force_eval.subsys.cell.periodic = general['periodic']
def add_charge(general: dict) -> None:
"""
Add the keyword for the charge of the system
"""
for s in (general[p] for p in ['cp2k_settings_main', 'cp2k_settings_guess']):
s.specific.cp2k.force_eval.dft.charge = general['charge']
def add_multiplicity(general: dict) -> None:
"""
Add the keyword for the multiplicity of the system only if greater than 1
"""
if general['multiplicity'] > 1:
for s in (general[p] for p in ['cp2k_settings_main', 'cp2k_settings_guess']):
s.specific.cp2k.force_eval.dft.multiplicity = general['multiplicity']
s.specific.cp2k.force_eval.dft.uks = ""
def add_restart_point(general: dict) -> None:
"""
add a restart file if the user provided it
"""
guess = general['cp2k_settings_guess']
wfn = general['wfn_restart_file_name']
if wfn is not None and wfn:
dft = guess.specific.cp2k.force_eval.dft
dft.wfn_restart_file_name = Path(wfn).absolute().as_posix()
def add_mo_index_range(dict_input: dict) -> None:
"""
Compute the MO range to print
"""
active_space = dict_input['active_space']
nHOMO = dict_input["nHOMO"]
mo_index_range = nHOMO - active_space[0], nHOMO + active_space[1]
dict_input['mo_index_range'] = mo_index_range
# mo_index_range keyword
cp2k_main = dict_input['cp2k_general_settings']['cp2k_settings_main']
dft_main_print = cp2k_main.specific.cp2k.force_eval.dft.print
dft_main_print.mo.mo_index_range = f"{mo_index_range[0] + 1} {mo_index_range[1]}"
# added_mos
cp2k_main.specific.cp2k.force_eval.dft.scf.added_mos = mo_index_range[1] - nHOMO
def compute_HOMO_index(path_traj_xyz: str, basis: str, charge: int) -> int:
"""
Compute the HOMO index
"""
mol = Molecule(path_traj_xyz, 'xyz')
number_of_electrons = sum(
valence_electrons['-'.join((at.symbol, basis))] for at in mol.atoms)
""" Correct for total charge of the system """
number_of_electrons = number_of_electrons - charge
if (number_of_electrons % 2) != 0:
raise RuntimeError(
"Unpair number of electrons detected when computing the HOMO")
return number_of_electrons // 2
def recursive_traverse(val):
"""
Check if the value of a key is a Settings instance a transform it to plain dict.
"""
if isinstance(val, dict):
if isinstance(val, Settings):
return settings2Dict(val)
else:
return {k: recursive_traverse(v) for k, v in val.items()}
else:
return val
def print_final_input(d: dict) -> None:
"""
Print the input after post-processing
"""
xs = d.copy()
for k, v in d.items():
xs[k] = recursive_traverse(v)
with open("input_parameters.yml", "w") as f:
yaml.dump(xs, f, indent=4)
|
felipeZ/nonAdiabaticCoupling
|
nac/workflows/input_validation.py
|
Python
|
mit
| 8,570
|
[
"CP2K"
] |
c6d19e6530b052127c35110ed882e2eb9aa743a387ec012f1a2a9d6483fa1dbf
|
# -*- coding: utf-8 -*-
"""Tests for the umbrella node-link JSON exporter."""
import unittest
from pybel.io.umbrella_nodelink import to_umbrella_nodelink
from tests.test_io.test_cx.examples import example_graph
class TestUmbrellaNodeLinkExporter(unittest.TestCase):
"""Tests for the umbrella node-link JSON exporter."""
def test_exporter_new_nodes(self):
"""Test new nodes created."""
# Check original number of nodes and edges in the example BEL Graph
self.assertEqual(29, example_graph.number_of_nodes())
self.assertEqual(32, example_graph.number_of_edges())
custom_json_dict = to_umbrella_nodelink(example_graph)
self.assertEqual(32, len(custom_json_dict["nodes"]))
# 3 new nodes are created:
self.assertIn(
'act(p(hgnc:MAPK1), ma(go:0016301 ! "kinase activity"))',
custom_json_dict["nodes"],
)
self.assertIn(
'act(p(hgnc:PTK2, pmod(go:0006468 ! "protein phosphorylation", Tyr, 925)), ma(go:0016301 ! "kinase activity"))',
custom_json_dict["nodes"],
)
self.assertIn(
'act(p(fus(hgnc:BCR, "?", hgnc:ABL1, "?")), ma(go:0016301 ! "kinase activity"))',
custom_json_dict["nodes"],
)
def test_exporter_edges(self):
"""Test no new edges created."""
# Check original number of nodes and edges in the example BEL Graph
self.assertEqual(29, example_graph.number_of_nodes(), msg="Wrong number of nodes")
self.assertEqual(32, example_graph.number_of_edges(), msg="Wrong number of edges")
custom_json_dict = to_umbrella_nodelink(example_graph)
# Number of edges is maintained
self.assertEqual(
32,
len(custom_json_dict["links"]),
msg="Wrong number of links in Umbrella JSON",
)
|
pybel/pybel
|
tests/test_io/test_umbrella_nodelink.py
|
Python
|
mit
| 1,868
|
[
"Pybel"
] |
800796712ed0b1cb63e382810d5ba3c17d0ec1c4864df89c80a7bbfb0969ef85
|
#!/usr/bin/env python
#Schedule is Call, Long, Short, MVSC, Vacation, Regina
#1:'Jason', 2:'Brian', 3:'Dick', 4:'Mark', 5:'Tim', 6:'Ryan'
import numpy as np
import itertools
import random
import sys
import math
import logging
import two_opt
from cost import cost
def gen_permutations(n, mvsc_list, regina_list): #Generate all possible combinations for a week's schedule
p = itertools.permutations(list(range(1,n+1)))
temp = np.array([list(w) for w in p if w[3] in mvsc_list])
temp2 = np.array([list(w) for w in temp if w[-1] in regina_list])
return temp2
def get_cands(week):
cands = list()
filled = list(itertools.chain.from_iterable(np.nonzero(master_cand[week])))
for perm in perm_list:
truth_array = (master_cand[week]==perm)
if (np.array([truth_array[f] for f in filled]).all()):
if (master_cand[week-1][4] != perm[0]): #No call following vacation
cands.append(perm)
rand_index = random.randint(0, len(cands) - 1)
return cands[rand_index]
def accept(new_cost, old_cost, temperature):
if new_cost < old_cost:
return 1.0
result = math.exp((old_cost - new_cost) / temperature)
return result
def rand_fill(schedule_cand):
for i in range(1,53):
schedule_cand[i,:] = get_cands(i)
return schedule_cand
def gen_schedules(schedule_cand): #Generate schedule candidates using simulated annealing
global master_cand, final
master_cand = schedule_cand.copy()
final = False
init_temp = 1000
temperature = init_temp
base_temp = 0.01
cooling_factor = 0.99
iters_per_temp = 100
reheating_frequency = 0.002
reheating_factor = 10
reset_frequency = 0.0003
two_opt_freq = 0.01
overall_best_cost = 99999999
schedule_cand = rand_fill(schedule_cand)
best_schedule = schedule_cand.copy()
current_cost = cost(schedule_cand)
while temperature > base_temp:
for q in range(iters_per_temp):
week = random.randint(1,52)
temp = np.array(schedule_cand[week])
schedule_cand[week] = get_cands(week)
new_cost = cost(schedule_cand)
if (accept(new_cost, current_cost, temperature) > random.random()):
current_cost = new_cost
#print 'Current cost: %d\tTemp: %.3f' % (current_cost, temperature)
if current_cost < overall_best_cost:
overall_best_cost = current_cost
best_schedule = schedule_cand.copy()
#print 'new overall best: %d\tTemperature = %.2f' % (overall_best_cost, temperature)
#print best_schedule
else:
schedule_cand[week] = temp
temperature *= cooling_factor
if random.random() < reheating_frequency:
temperature *= reheating_factor
if temperature > init_temp:
temperature = init_temp
#print 'Reheating, Temperature = %.2f\tCost = %d' % (temperature, current_cost)
if random.random() < reset_frequency:
schedule_cand = best_schedule.copy()
current_cost = overall_best_cost
#print 'Reset to best_schedule. Temperature = %.2f\tCost = %d' % (temperature, overall_best_cost)
if random.random() < two_opt_freq:
schedule_cand = two_opt.gen_schedules(schedule_cand, 'schedule_2015.csv')
current_cost = cost(schedule_cand)
if current_cost < overall_best_cost:
overall_best_cost = current_cost
best_schedule = schedule_cand.copy()
#print 'new overall best: %d\tTemperature = %.2f\tTwo_opted' % (overall_best_cost, temperature)
#print best_schedule
#print 'Totals:'
#for i in range(1,7):
#print 'Person %d' % i
#print sum(best_schedule[:,:]==i)
#final = True
#cost(best_schedule)
best_schedule = two_opt.gen_schedules(best_schedule, 'schedule_2015.csv')
return best_schedule
def read_input(filename):
schedule_cand = np.genfromtxt(filename, delimiter=',')
assert schedule_cand.shape==(53,6)
return schedule_cand
def save_result(filename, schedule_cand):
np.savetxt('schedules/' + filename, schedule_cand, delimiter=',')
logging.info('saved schedule to schedules/%s', filename)
def run_schedule(s_filename, savename):
global perm_list
logging.basicConfig(filename='log/' + savename + '.log', format='%(asctime)s %(message)s', \
datefmt='%m/%d/%Y %I:%M:%S %p', level=logging.INFO, mode='w')
regina_list = [1, 2, 5, 6]
mvsc_list = [1, 2, 3, 6]
perm_list = gen_permutations(6, mvsc_list, regina_list)
schedule_cand = read_input(s_filename)
#print schedule_cand
logging.info('beginning schedule generation using simulated annealing')
schedule_cand = gen_schedules(schedule_cand)
logging.info('finished schedule generation, best cost %d', cost(schedule_cand))
save_result(savename, schedule_cand)
if __name__ == "__main__":
try:
s_filename = sys.argv[1]
savename = sys.argv[2]
except:
print'Please supply the name of the input file and output file'
sys.exit()
run_schedule(s_filename, savename)
|
jbolinge/scheduler
|
sim_anneal.py
|
Python
|
lgpl-3.0
| 4,665
|
[
"Brian"
] |
722df513c53b4bdfd245d03a9b444728e690b515e1359e3f0d9cc30aa89f1b6e
|
"""An implementation of the Zephyr Abstract Syntax Definition Language.
See http://asdl.sourceforge.net/ and
http://www.cs.princeton.edu/research/techreps/TR-554-97
Only supports top level module decl, not view. I'm guessing that view
is intended to support the browser and I'm not interested in the
browser.
Changes for Python: Add support for module versions
"""
import os
import traceback
import spark
class Token(object):
# spark seems to dispatch in the parser based on a token's
# type attribute
def __init__(self, type, lineno):
self.type = type
self.lineno = lineno
def __str__(self):
return self.type
def __repr__(self):
return str(self)
class Id(Token):
def __init__(self, value, lineno):
self.type = 'Id'
self.value = value
self.lineno = lineno
def __str__(self):
return self.value
class String(Token):
def __init__(self, value, lineno):
self.type = 'String'
self.value = value
self.lineno = lineno
class ASDLSyntaxError(Exception):
def __init__(self, lineno, token=None, msg=None):
self.lineno = lineno
self.token = token
self.msg = msg
def __str__(self):
if self.msg is None:
return "Error at '%s', line %d" % (self.token, self.lineno)
else:
return "%s, line %d" % (self.msg, self.lineno)
class ASDLScanner(spark.GenericScanner, object):
def tokenize(self, input):
self.rv = []
self.lineno = 1
super(ASDLScanner, self).tokenize(input)
return self.rv
def t_id(self, s):
r"[\w\.]+"
# XXX doesn't distinguish upper vs. lower, which is
# significant for ASDL.
self.rv.append(Id(s, self.lineno))
def t_string(self, s):
r'"[^"]*"'
self.rv.append(String(s, self.lineno))
def t_xxx(self, s): # not sure what this production means
r"<="
self.rv.append(Token(s, self.lineno))
def t_punctuation(self, s):
r"[\{\}\*\=\|\(\)\,\?\:]"
self.rv.append(Token(s, self.lineno))
def t_comment(self, s):
r"\-\-[^\n]*"
pass
def t_newline(self, s):
r"\n"
self.lineno += 1
def t_whitespace(self, s):
r"[ \t]+"
pass
def t_default(self, s):
r" . +"
raise ValueError, "unmatched input: %s" % `s`
class ASDLParser(spark.GenericParser, object):
def __init__(self):
super(ASDLParser, self).__init__("module")
def typestring(self, tok):
return tok.type
def error(self, tok):
raise ASDLSyntaxError(tok.lineno, tok)
def p_module_0(self, (module, name, version, _0, _1)):
" module ::= Id Id version { } "
if module.value != "module":
raise ASDLSyntaxError(module.lineno,
msg="expected 'module', found %s" % module)
return Module(name, None, version)
def p_module(self, (module, name, version, _0, definitions, _1)):
" module ::= Id Id version { definitions } "
if module.value != "module":
raise ASDLSyntaxError(module.lineno,
msg="expected 'module', found %s" % module)
return Module(name, definitions, version)
def p_version(self, (version, V)):
"version ::= Id String"
if version.value != "version":
raise ASDLSyntaxError(version.lineno,
msg="expected 'version', found %" % version)
return V
def p_definition_0(self, (definition,)):
" definitions ::= definition "
return definition
def p_definition_1(self, (definitions, definition)):
" definitions ::= definition definitions "
return definitions + definition
def p_definition(self, (id, _, type)):
" definition ::= Id = type "
return [Type(id, type)]
def p_type_0(self, (product,)):
" type ::= product "
return product
def p_type_1(self, (sum,)):
" type ::= sum "
return Sum(sum)
def p_type_2(self, (sum, id, _0, attributes, _1)):
" type ::= sum Id ( fields ) "
if id.value != "attributes":
raise ASDLSyntaxError(id.lineno,
msg="expected attributes, found %s" % id)
if attributes:
attributes.reverse()
return Sum(sum, attributes)
def p_product(self, (_0, fields, _1)):
" product ::= ( fields ) "
# XXX can't I just construct things in the right order?
fields.reverse()
return Product(fields)
def p_sum_0(self, (constructor,)):
" sum ::= constructor "
return [constructor]
def p_sum_1(self, (constructor, _, sum)):
" sum ::= constructor | sum "
return [constructor] + sum
def p_sum_2(self, (constructor, _, sum)):
" sum ::= constructor | sum "
return [constructor] + sum
def p_constructor_0(self, (id,)):
" constructor ::= Id "
return Constructor(id)
def p_constructor_1(self, (id, _0, fields, _1)):
" constructor ::= Id ( fields ) "
# XXX can't I just construct things in the right order?
fields.reverse()
return Constructor(id, fields)
def p_fields_0(self, (field,)):
" fields ::= field "
return [field]
def p_fields_1(self, (field, _, fields)):
" fields ::= field , fields "
return fields + [field]
def p_field_0(self, (type,)):
" field ::= Id "
return Field(type)
def p_field_1(self, (type, name)):
" field ::= Id Id "
return Field(type, name)
def p_field_2(self, (type, _, name)):
" field ::= Id * Id "
return Field(type, name, seq=True)
def p_field_3(self, (type, _, name)):
" field ::= Id ? Id "
return Field(type, name, opt=True)
def p_field_4(self, (type, _)):
" field ::= Id * "
return Field(type, seq=True)
def p_field_5(self, (type, _)):
" field ::= Id ? "
return Field(type, opt=True)
builtin_types = ("identifier", "string", "int", "bool", "object")
# below is a collection of classes to capture the AST of an AST :-)
# not sure if any of the methods are useful yet, but I'm adding them
# piecemeal as they seem helpful
class AST(object):
pass # a marker class
class Module(AST):
def __init__(self, name, dfns, version):
self.name = name
self.dfns = dfns
self.version = version
self.types = {} # maps type name to value (from dfns)
for type in dfns:
self.types[type.name.value] = type.value
def __repr__(self):
return "Module(%s, %s)" % (self.name, self.dfns)
class Type(AST):
def __init__(self, name, value):
self.name = name
self.value = value
def __repr__(self):
return "Type(%s, %s)" % (self.name, self.value)
class Constructor(AST):
def __init__(self, name, fields=None):
self.name = name
self.fields = fields or []
def __repr__(self):
return "Constructor(%s, %s)" % (self.name, self.fields)
class Field(AST):
def __init__(self, type, name=None, seq=False, opt=False):
self.type = type
self.name = name
self.seq = seq
self.opt = opt
def __repr__(self):
if self.seq:
extra = ", seq=True"
elif self.opt:
extra = ", opt=True"
else:
extra = ""
if self.name is None:
return "Field(%s%s)" % (self.type, extra)
else:
return "Field(%s, %s%s)" % (self.type, self.name, extra)
class Sum(AST):
def __init__(self, types, attributes=None):
self.types = types
self.attributes = attributes or []
def __repr__(self):
if self.attributes is None:
return "Sum(%s)" % self.types
else:
return "Sum(%s, %s)" % (self.types, self.attributes)
class Product(AST):
def __init__(self, fields):
self.fields = fields
def __repr__(self):
return "Product(%s)" % self.fields
class VisitorBase(object):
def __init__(self, skip=False):
self.cache = {}
self.skip = skip
def visit(self, object, *args):
meth = self._dispatch(object)
if meth is None:
return
try:
meth(object, *args)
except Exception, err:
print "Error visiting", repr(object)
print err
traceback.print_exc()
# XXX hack
if hasattr(self, 'file'):
self.file.flush()
os._exit(1)
def _dispatch(self, object):
assert isinstance(object, AST), repr(object)
klass = object.__class__
meth = self.cache.get(klass)
if meth is None:
methname = "visit" + klass.__name__
if self.skip:
meth = getattr(self, methname, None)
else:
meth = getattr(self, methname)
self.cache[klass] = meth
return meth
class Check(VisitorBase):
def __init__(self):
super(Check, self).__init__(skip=True)
self.cons = {}
self.errors = 0
self.types = {}
def visitModule(self, mod):
for dfn in mod.dfns:
self.visit(dfn)
def visitType(self, type):
self.visit(type.value, str(type.name))
def visitSum(self, sum, name):
for t in sum.types:
self.visit(t, name)
def visitConstructor(self, cons, name):
key = str(cons.name)
conflict = self.cons.get(key)
if conflict is None:
self.cons[key] = name
else:
print "Redefinition of constructor %s" % key
print "Defined in %s and %s" % (conflict, name)
self.errors += 1
for f in cons.fields:
self.visit(f, key)
def visitField(self, field, name):
key = str(field.type)
l = self.types.setdefault(key, [])
l.append(name)
def visitProduct(self, prod, name):
for f in prod.fields:
self.visit(f, name)
def check(mod):
v = Check()
v.visit(mod)
for t in v.types:
if t not in mod.types and not t in builtin_types:
v.errors += 1
uses = ", ".join(v.types[t])
print "Undefined type %s, used in %s" % (t, uses)
return not v.errors
def parse(file):
scanner = ASDLScanner()
parser = ASDLParser()
buf = open(file).read()
tokens = scanner.tokenize(buf)
try:
return parser.parse(tokens)
except ASDLSyntaxError, err:
print err
lines = buf.split("\n")
print lines[err.lineno - 1] # lines starts at 0, files at 1
if __name__ == "__main__":
import glob
import sys
if len(sys.argv) > 1:
files = sys.argv[1:]
else:
testdir = "tests"
files = glob.glob(testdir + "/*.asdl")
for file in files:
print file
mod = parse(file)
print "module", mod.name
print len(mod.dfns), "definitions"
if not check(mod):
print "Check failed"
else:
for dfn in mod.dfns:
print dfn.type
|
google/google-ctf
|
third_party/edk2/AppPkg/Applications/Python/Python-2.7.2/Parser/asdl.py
|
Python
|
apache-2.0
| 11,733
|
[
"VisIt"
] |
63bb05dee87dc3b96b5017a78c808a2f286920327c855c6692c89f4f2bdeae56
|
#!/usr/bin/python
########################################################################
# 27 April 2015
# Patrick Lombard, Centre for Stem Stem Research
# Core Bioinformatics Group
# University of Cambridge
# All right reserved.
########################################################################
import subprocess
import sys, re, os
import ConfigParser
import tempfile
import argparse
import pkg_resources
def run_rcode(rscript):
rcode = tempfile.NamedTemporaryFile(delete = False)
rcode.write(rscript)
rcode.close()
print rcode.name
try:
subprocess.call(['Rscript', rcode.name])
except:
error("Error in running R script\n")
error("Error: %s\n" % str(sys.exc_info()[1]))
error( "[Exception type: %s, raised in %s:%d]\n" % ( sys.exc_info()[1].__class__.__name__,
os.path.basename(traceback.extract_tb( sys.exc_info()[2] )[-1][0]),
traceback.extract_tb( sys.exc_info()[2] )[-1][1] ) )
sys.exit(1)
def create_design_for_R(idict):
output = tempfile.NamedTemporaryFile(delete = False)
output.write("sampleName\tfileName\tcondition\n"),
for key in sorted(idict.keys()):
bam_name = os.path.basename(key)
name = re.sub(".bam$", "", bam_name)
output.write("{}\t{}\t{}\n".format(name, key, idict[key]))
output.close()
return output.name
def ConfigSectionMap(section, Config):
dict1 = {}
options = Config.options(section)
for option in options:
try:
dict1[option] = Config.get(section, option)
if dict1[option] == -1:
DebugPrint("skip: %s" % option)
except:
print("exception on %s!" % option)
dict1[option] = None
return dict1
def write_deseq(ifile, sample_dict, cond1, cond2, padj, f, outdir, gc_file, tmpdesign, cqn):
print "==> Running differental expression analysis...\n"
rscript = "suppressMessages(library(cqn)); suppressMessages(library(scales))\n"
rscript = "suppressMessages(library(DESeq2))\n"
rscript += "suppressMessages(library(RColorBrewer)); suppressMessages(library(ggplot2)); suppressMessages(library(gplots))\n"
rscript += "pdata <- read.table('{}', header=T)\n".format(tmpdesign)
#Make sure they match!
rscript += "counts <- read.table('{}', sep='\\t', header=T, row.names=1)\n".format(ifile)
if f:
rscript += "counts <- counts[,6:dim(counts)[2]]\n"
else:
pass
if cqn:
rscript += "x <- read.table('{}', sep=\"\\t\", header=T)\n".format(gc_file)
rscript += "g_length <- x[,4]-x[,3]+1; gc <- x[,2]/100; y <- cbind(g_length, gc); rownames(y) <- x[,1]\n"
rscript += "id <- match(rownames(counts), rownames(y)); z <- y[id,]; z2 <- z[complete.cases(z),]; id <- match(rownames(z2), rownames(counts)); counts2 <- counts[id,]\n"
rscript += "cqn.subset <- cqn(counts2, lengths = z2[,1], x = z2[,2], sizeFactors = colSums(counts))\n"
rscript += "cqnOffset <- cqn.subset$glm.offset; cqnNormFactors <- exp(cqnOffset)\n"
rscript += "rnaseq_dds <- DESeqDataSetFromMatrix(countData = counts2, colData = data.frame(pdata), design = ~ condition)\n"
rscript += "rnaseq_dds$condition <- factor(rnaseq_dds$condition, levels=unique(pdata[,3]))\n"
rscript += "normalizationFactors(rnaseq_dds) <- cqnNormFactors"
else:
rscript += "rnaseq_dds <- DESeqDataSetFromMatrix(countData = counts, colData = data.frame(pdata), design = ~ condition)\n"
rscript += "rnaseq_dds$condition <- factor(rnaseq_dds$condition, levels=unique(pdata[,3]))\n"
rscript += "rnaseq_dds <- DESeq(rnaseq_dds)\n"
rscript += "rnaseq_res <- results(rnaseq_dds, contrast=c('condition','{0}','{1}'))\n".format(cond1, cond2)
rscript += "rnaseq_sig <- rnaseq_res[which(rnaseq_res$padj <= {}),]\n".format(padj)
rscript += "write.table(rnaseq_sig, file='{2}/{0}_vs_{1}_deseq2_significant.tsv', sep='\\t', quote=F)\n".format(cond1, cond2, outdir)
rscript += "write.table(rnaseq_res, file='{2}/{0}_vs_{1}_deseq2_analysis.tsv', sep='\\t', quote=F)\n".format(cond1, cond2, outdir)
rscript += "rld<-rlog(rnaseq_dds); colnames(rld) <- pdata$sampleName; rlogMat<-assay(rld); distsRL<-dist(t(assay(rld)))\n"
rscript += "pdf('{0}/Sample-RLD-plots.pdf'); hmcol<-colorRampPalette(brewer.pal(9,'GnBu'))(100); mat<-as.matrix(distsRL)\n".format(outdir)
rscript += "hc<-hclust(distsRL); par(cex.main=1);\n";
rscript += "heatmap.2(mat,Rowv=as.dendrogram(hc),symm=TRUE,trace='none',col=rev(hmcol),margin=c(13,13),main='Sample-to-sample distances',cexRow=1,cexCol=1)\n"
rscript += "data<-plotPCA(rld,intgroup=c('condition'),returnData=TRUE, ntop = nrow(rld)); percentVar<-round(100*attr(data,'percentVar'))\n"
rscript += "ggplot(data,aes(PC1, PC2,color=condition,label=names))+geom_point(size=3)+xlab(paste0('PC1: ',percentVar[1],'% variance'))+ylab(paste0('PC2: ',percentVar[2],'% variance')) +geom_text(colour = 'black', alpha = 0.8, size = 2)\n"
rscript += "dev.off()\n"
return rscript
def get_cqn(ifile, sample_dict, f, outfile, gc_file, tmpdesign):
print "==> Running differental expression analysis...\n"
rscript = "suppressMessages(library(cqn)); suppressMessages(library(scales))\n"
rscript += "pdata <- read.table('{}', header=T)\n".format(tmpdesign)
#Make sure they match!
rscript += "counts <- read.table('{}', sep='\\t', header=T, row.names=1)\n".format(ifile)
if f:
rscript += "counts <- counts[,6:dim(counts)[2]]\n"
else:
pass
rscript += "x <- read.table('{}', sep=\"\\t\", header=T)\n".format(gc_file)
rscript += "g_length <- x[,4]-x[,3]+1; gc <- x[,2]/100; y <- cbind(g_length, gc); rownames(y) <- x[,1]\n"
rscript += "id <- match(rownames(counts), rownames(y)); z <- y[id,]; z2 <- z[complete.cases(z),]; id <- match(rownames(z2), rownames(counts)); counts2 <- counts[id,]\n"
rscript += "cqn.subset <- cqn(counts2, lengths = z2[,1], x = z2[,2], sizeFactors = colSums(counts))\n"
rscript += "cqnOffset <- cqn.subset$glm.offset; cqnNormFactors <- exp(cqnOffset)\n"
rscript += "write.table(cqnNormFactors, file='{}', sep='\\t', quote=F)\n".format(outfile)
return rscript
def main():
parser = argparse.ArgumentParser(description='Differential expression for ATAC-seq experiments. Runs DESEQ2\n')
subparsers = parser.add_subparsers(help='Programs included',dest="subparser_name")
deseq2_parser = subparsers.add_parser('deseq', help="Runs DESEQ")
deseq2_parser.add_argument('-c','--config', help='Config file containing parameters, please see documentation for usage!', required=False)
deseq2_parser.add_argument('-i','--input', help='Combined counts file from HTSeq or pyrna_count.py',required=True)
deseq2_parser.add_argument('-p','--padj', help='Option for DESEQ2, default=0.05', default=0.05, required=False)
deseq2_parser.add_argument('-n', action='store_true', help='Use CQN normalisation instead of DESEQ2', required=False)
deseq2_parser.add_argument('-f', action='store_true', help='Use if featureCounts used as input', required=False)
deseq2_parser.add_argument('-o','--output', help='Output counts file directory, default is current directory', required=False)
norm_parser = subparsers.add_parser('norm', help="Outputs normalisation factors for samples in counts matrix")
norm_parser.add_argument('-c','--config', help='Config file containing parameters, please see documentation for usage!', required=False)
norm_parser.add_argument('-i','--input', help='Combined counts file from HTSeq or pyrna_count.py',required=True)
norm_parser.add_argument('-f', action='store_true', help='Use if featureCounts used as input', required=False)
norm_parser.add_argument('-o','--output', help='Output counts file', required=False)
if len(sys.argv)==1:
parser.print_help()
sys.exit(1)
args = vars(parser.parse_args())
gc_values= pkg_resources.resource_filename('pyatactools', 'data/gc_values.txt')
Config = ConfigParser.ConfigParser()
Config.optionxform = str
Config.read(args["config"])
conditions = ConfigSectionMap("Conditions", Config)
if args["output"]:
output = args["output"]
else:
output = os.getcwd()
if args["subparser_name"] == "deseq":
design = create_design_for_R(conditions)
comparisons = ConfigSectionMap("Comparisons", Config)
for comp in comparisons:
c = comparisons[comp].split(",")
comps = [x.strip(' ') for x in c]
rscript = write_deseq(args["input"], conditions, comps[0], comps[1], args["padj"], args["f"], output, gc_values, design, args["n"]) ##Needs changing!!!
run_rcode(rscript)
elif args["subparser_name"] == "norm":
design = create_design_for_R(conditions)
rscript = get_cqn(args["input"], conditions, args["f"], output, gc_values, design) ##Needs changing!!!
run_rcode(rscript)
|
pdl30/pyatactools
|
pyatactools/atac_norm.py
|
Python
|
gpl-2.0
| 8,449
|
[
"HTSeq"
] |
bc67a0ae1ce1c10ace085fefbb8e345bedd4a2cb2f43462cc2f0e3544a3cce32
|
import re
import mdtraj
from mdtraj.testing.docstrings import docstring_verifiers
from mdtraj.testing.docstrings import import_all_modules
SKIP_MODULES = [
'mdtraj\.utils\.external',
'mdtraj\.utils\.six',
'mdtraj\.utils\.unit\.unit_math',
'mdtraj\.utils\.unit\.baseunit',
'mdtraj\.utils\.unit\.prefix',
'mdtraj\.utils\.unit\.unit',
'mdtraj\.utils\.unit\.quantity',
'mdtraj\.utils\.unit\.mymatrix',
'mdtraj\.formats\.lh5',
'mdtraj\.formats\.hdf5',
'mdtraj\.formats\.pdb\.pdbstructure',
'mdtraj\.scripts',
'mdtraj\.testing\.docscrape',
'mdtraj\.io',
]
SKIP_FUNCTIONS = [
]
def test_all_docstrings():
for module in import_all_modules(mdtraj):
if any(re.search(s, module.__name__) for s in SKIP_MODULES):
continue
for test in docstring_verifiers(module):
if test.fname in SKIP_FUNCTIONS:
continue
yield test
|
casawa/mdtraj
|
mdtraj/tests/test_docstrings.py
|
Python
|
lgpl-2.1
| 937
|
[
"MDTraj"
] |
0014e13cbda8b8d0625cba5991a43888d4f16d075df0d3b32153e66263ecdb0d
|
'''
Defines the main analysis functions which are called by the
PyGeoNS executable.
'''
from __future__ import division
import numpy as np
import logging
import subprocess as sp
from pygeons.main.fit import fit
from pygeons.main.reml import reml
from pygeons.main.strain import strain
from pygeons.main.autoclean import autoclean
from pygeons.main.gptools import composite_units
from pygeons.main import gpnetwork
from pygeons.main import gpstation
from pygeons.mjd import mjd_inv,mjd
from pygeons.basemap import make_basemap
from pygeons.io.convert import dict_from_hdf5,hdf5_from_dict
logger = logging.getLogger(__name__)
def _params_dict(b):
'''
coerce the list *b* into a dictionary of hyperparameters for each
direction. The dictionary keys are 'east', 'north', and 'vertical'.
The dictionary values are each an N array of hyperparameters.
>>> b1 = [1.0,2.0]
>>> b2 = ['1.0','2.0']
>>> b3 = ['east','1.0','2.0','north','1.0','2.0','vertical','1.0','2.0']
'''
b = list(b)
msg = ('the hyperparameters must be a list of N floats or 3 lists '
'of N floats where each list is preceded by "east", "north", '
'or "vertical"')
if ('east' in b) & ('north' in b) & ('vertical' in b):
if (len(b) % 3) != 0:
raise ValueError(msg)
arr = np.reshape(b,(3,-1))
dirs = arr[:,0].astype(str) # directions
vals = arr[:,1:].astype(float) # hyperparameter array
out = dict(zip(dirs,vals))
# make sure the keys contain 'east', 'north', and 'vertical'
if set(out.keys()) != set(['east','north','vertical']):
raise ValueError(msg)
else:
try:
arr = np.array(b,dtype=float)
except ValueError:
raise ValueError(msg)
out = {'east':arr,
'north':arr,
'vertical':arr}
return out
def _remove_extension(f):
'''remove file extension if one exists'''
if '.' not in f:
return f
else:
return '.'.join(f.split('.')[:-1])
def _log_fit(input_file,
network_model,network_params,
station_model,station_params,
output_file):
msg = '\n'
msg += '---------------- PYGEONS FIT RUN INFORMATION -----------------\n\n'
msg += 'input file : %s\n' % input_file
msg += 'network :\n'
msg += ' model : %s\n' % ', '.join(network_model)
msg += ' parameter units : %s\n' % ', '.join(composite_units(network_model,gpnetwork.CONSTRUCTORS))
msg += ' east parameters : %s\n' % ', '.join(['%0.4e' % i for i in network_params['east']])
msg += ' north parameters : %s\n' % ', '.join(['%0.4e' % i for i in network_params['north']])
msg += ' vertical parameters : %s\n' % ', '.join(['%0.4e' % i for i in network_params['vertical']])
msg += 'station :\n'
msg += ' model : %s\n' % ', '.join(station_model)
msg += ' parameter units : %s\n' % ', '.join(composite_units(station_model,gpstation.CONSTRUCTORS))
msg += ' east parameters : %s\n' % ', '.join(['%0.4e' % i for i in station_params['east']])
msg += ' north parameters : %s\n' % ', '.join(['%0.4e' % i for i in station_params['north']])
msg += ' vertical parameters : %s\n' % ', '.join(['%0.4e' % i for i in station_params['vertical']])
msg += 'output file : %s\n\n' % output_file
msg += '--------------------------------------------------------------\n'
logger.info(msg)
return
def _log_autoclean(input_file,
network_model,network_params,
station_model,station_params,
outlier_tol,
output_file):
msg = '\n'
msg += '------------- PYGEONS AUTOCLEAN RUN INFORMATION --------------\n\n'
msg += 'input file : %s\n' % input_file
msg += 'network :\n'
msg += ' model : %s\n' % ', '.join(network_model)
msg += ' parameter units : %s\n' % ', '.join(composite_units(network_model,gpnetwork.CONSTRUCTORS))
msg += ' east parameters : %s\n' % ', '.join(['%0.4e' % i for i in network_params['east']])
msg += ' north parameters : %s\n' % ', '.join(['%0.4e' % i for i in network_params['north']])
msg += ' vertical parameters : %s\n' % ', '.join(['%0.4e' % i for i in network_params['vertical']])
msg += 'station :\n'
msg += ' model : %s\n' % ', '.join(station_model)
msg += ' parameter units : %s\n' % ', '.join(composite_units(station_model,gpstation.CONSTRUCTORS))
msg += ' east parameters : %s\n' % ', '.join(['%0.4e' % i for i in station_params['east']])
msg += ' north parameters : %s\n' % ', '.join(['%0.4e' % i for i in station_params['north']])
msg += ' vertical parameters : %s\n' % ', '.join(['%0.4e' % i for i in station_params['vertical']])
msg += 'outlier tolerance : %s\n' % outlier_tol
msg += 'output file : %s\n\n' % output_file
msg += '--------------------------------------------------------------\n'
logger.info(msg)
return
def _log_reml(input_file,
network_model,network_params,network_fix,
station_model,station_params,station_fix,
output_file):
msg = '\n'
msg += '---------------- PYGEONS REML RUN INFORMATION ----------------\n\n'
msg += 'input file : %s\n' % input_file
msg += 'network :\n'
msg += ' model : %s\n' % ', '.join(network_model)
msg += ' parameter units : %s\n' % ', '.join(composite_units(network_model,gpnetwork.CONSTRUCTORS))
msg += ' fixed parameters : %s\n' % ', '.join(network_fix.astype(str))
msg += ' initial east parameters : %s\n' % ', '.join(['%0.4e' % i for i in network_params['east']])
msg += ' initial north parameters : %s\n' % ', '.join(['%0.4e' % i for i in network_params['north']])
msg += ' initial vertical parameters : %s\n' % ', '.join(['%0.4e' % i for i in network_params['vertical']])
msg += 'station :\n'
msg += ' model : %s\n' % ', '.join(station_model)
msg += ' parameter units : %s\n' % ', '.join(composite_units(station_model,gpstation.CONSTRUCTORS))
msg += ' fixed parameters : %s\n' % ', '.join(station_fix.astype(str))
msg += ' initial east parameters : %s\n' % ', '.join(['%0.4e' % i for i in station_params['east']])
msg += ' initial north parameters : %s\n' % ', '.join(['%0.4e' % i for i in station_params['north']])
msg += ' initial vertical parameters : %s\n' % ', '.join(['%0.4e' % i for i in station_params['vertical']])
msg += 'output file : %s\n\n' % output_file
msg += '--------------------------------------------------------------\n'
logger.info(msg)
return msg
def _log_reml_results(input_file,
network_model,network_params,network_fix,
station_model,station_params,station_fix,
likelihood,output_file):
msg = '\n'
msg += '-------------------- PYGEONS REML RESULTS --------------------\n\n'
msg += 'input file : %s\n' % input_file
msg += 'network :\n'
msg += ' model : %s\n' % ' '.join(network_model)
msg += ' parameter units : %s\n' % ', '.join(composite_units(network_model,gpnetwork.CONSTRUCTORS))
msg += ' fixed parameters : %s\n' % ', '.join(network_fix.astype(str))
msg += ' optimal east parameters : %s\n' % ', '.join(['%0.4e' % i for i in network_params['east']])
msg += ' optimal north parameters : %s\n' % ', '.join(['%0.4e' % i for i in network_params['north']])
msg += ' optimal vertical parameters : %s\n' % ', '.join(['%0.4e' % i for i in network_params['vertical']])
msg += 'station :\n'
msg += ' model : %s\n' % ' '.join(station_model)
msg += ' parameter units : %s\n' % ', '.join(composite_units(station_model,gpstation.CONSTRUCTORS))
msg += ' fixed parameters : %s\n' % ', '.join(station_fix.astype(str))
msg += ' optimal east parameters : %s\n' % ', '.join(['%0.4e' % i for i in station_params['east']])
msg += ' optimal north parameters : %s\n' % ', '.join(['%0.4e' % i for i in station_params['north']])
msg += ' optimal vertical parameters : %s\n' % ', '.join(['%0.4e' % i for i in station_params['vertical']])
msg += 'log likelihood :\n'
msg += ' east : %s\n' % likelihood['east']
msg += ' north : %s\n' % likelihood['north']
msg += ' vertical : %s\n' % likelihood['vertical']
msg += 'output file : %s\n\n' % output_file
msg += '--------------------------------------------------------------\n'
logger.info(msg)
return msg
def _log_strain(input_file,
network_prior_model,network_prior_params,
network_noise_model,network_noise_params,
station_noise_model,station_noise_params,
start_date,stop_date,output_id,rate,vertical,
covariance,output_dx_file,output_dy_file):
msg = '\n'
msg += '--------------- PYGEONS STRAIN RUN INFORMATION ---------------\n\n'
msg += 'input file : %s\n' % input_file
msg += 'network prior :\n'
msg += ' model : %s\n' % ', '.join(network_prior_model)
msg += ' parameter units : %s\n' % ', '.join(composite_units(network_prior_model,gpnetwork.CONSTRUCTORS))
msg += ' east parameters : %s\n' % ', '.join(['%0.4e' % i for i in network_prior_params['east']])
msg += ' north parameters : %s\n' % ', '.join(['%0.4e' % i for i in network_prior_params['north']])
msg += ' vertical parameters : %s\n' % ', '.join(['%0.4e' % i for i in network_prior_params['vertical']])
msg += 'network noise :\n'
msg += ' model : %s\n' % ' '.join(network_noise_model)
msg += ' parameter units : %s\n' % ', '.join(composite_units(network_noise_model,gpnetwork.CONSTRUCTORS))
msg += ' east parameters : %s\n' % ', '.join(['%0.4e' % i for i in network_noise_params['east']])
msg += ' north parameters : %s\n' % ', '.join(['%0.4e' % i for i in network_noise_params['north']])
msg += ' vertical parameters : %s\n' % ', '.join(['%0.4e' % i for i in network_noise_params['vertical']])
msg += 'station noise :\n'
msg += ' model : %s\n' % ', '.join(station_noise_model)
msg += ' parameter units : %s\n' % ', '.join(composite_units(station_noise_model,gpstation.CONSTRUCTORS))
msg += ' east parameters : %s\n' % ', '.join(['%0.4e' % i for i in station_noise_params['east']])
msg += ' north parameters : %s\n' % ', '.join(['%0.4e' % i for i in station_noise_params['north']])
msg += ' vertical parameters : %s\n' % ', '.join(['%0.4e' % i for i in station_noise_params['vertical']])
msg += 'start date for output : %s\n' % start_date
msg += 'stop date for output : %s\n' % stop_date
msg += 'number of output positions : %s\n' % len(output_id)
msg += 'ignore vertical deformation : %s\n' % (not vertical)
msg += 'return strain rates : %s\n' % rate
msg += 'return covariances : %s\n' % covariance
msg += 'output east derivative file : %s\n' % output_dx_file
msg += 'output north derivative file : %s\n\n' % output_dy_file
msg += '--------------------------------------------------------------\n'
logger.info(msg)
def pygeons_fit(input_file,
network_model=('spwen12-se',),
network_params=(1.0,0.1,100.0),
station_model=('linear',),
station_params=(),
output_stem=None):
'''
Condition the Gaussian process to the observations and evaluate the
posterior at the observation points.
'''
logger.info('Running pygeons fit ...')
data = dict_from_hdf5(input_file)
if data['time_exponent'] != 0:
raise ValueError('input dataset must have units of displacement')
if data['space_exponent'] != 1:
raise ValueError('input dataset must have units of displacement')
# create output dictionary
out = dict((k,np.copy(v)) for k,v in data.iteritems())
# convert params to a dictionary of hyperparameters for each direction
network_params = _params_dict(network_params)
station_params = _params_dict(station_params)
# make output file name
if output_stem is None:
output_stem = _remove_extension(input_file) + '.fit'
output_file = output_stem + '.h5'
# convert geodetic positions to cartesian
bm = make_basemap(data['longitude'],data['latitude'])
x,y = bm(data['longitude'],data['latitude'])
xy = np.array([x,y]).T
_log_fit(input_file,
network_model,network_params,
station_model,station_params,
output_file)
for dir in ['east','north','vertical']:
u,su = fit(t=data['time'][:,None],
x=xy,
d=data[dir],
sd=data[dir+'_std_dev'],
network_model=network_model,
network_params=network_params[dir],
station_model=station_model,
station_params=station_params[dir])
out[dir] = u
out[dir+'_std_dev'] = su
hdf5_from_dict(output_file,out)
logger.info('Posterior fit written to %s' % output_file)
return
def pygeons_autoclean(input_file,
network_model=('spwen12-se',),
network_params=(1.0,0.1,100.0),
station_model=('linear',),
station_params=(),
output_stem=None,
outlier_tol=4.0):
'''
Remove outliers with a data editing algorithm
'''
logger.info('Running pygeons autoclean ...')
data = dict_from_hdf5(input_file)
if data['time_exponent'] != 0:
raise ValueError('input dataset must have units of displacement')
if data['space_exponent'] != 1:
raise ValueError('input dataset must have units of displacement')
# dictionary which will contain the edited data
out = dict((k,np.copy(v)) for k,v in data.iteritems())
# convert params to a dictionary of hyperparameters for each direction
network_params = _params_dict(network_params)
station_params = _params_dict(station_params)
# make output file name
if output_stem is None:
output_stem = _remove_extension(input_file) + '.autoclean'
output_file = output_stem + '.h5'
# convert geodetic positions to cartesian
bm = make_basemap(data['longitude'],data['latitude'])
x,y = bm(data['longitude'],data['latitude'])
xy = np.array([x,y]).T
_log_autoclean(input_file,
network_model,network_params,
station_model,station_params,
outlier_tol,
output_file)
for dir in ['east','north','vertical']:
de,sde = autoclean(t=data['time'][:,None],
x=xy,
d=data[dir],
sd=data[dir+'_std_dev'],
network_model=network_model,
network_params=network_params[dir],
station_model=station_model,
station_params=station_params[dir],
tol=outlier_tol)
out[dir] = de
out[dir+'_std_dev'] = sde
hdf5_from_dict(output_file,out)
logger.info('Edited data written to %s' % output_file)
return
def pygeons_reml(input_file,
network_model=('spwen12-se',),
network_params=(1.0,0.1,100.0),
network_fix=(),
station_model=('linear',),
station_params=(),
station_fix=(),
output_stem=None):
'''
Restricted maximum likelihood estimation
'''
logger.info('Running pygeons reml ...')
data = dict_from_hdf5(input_file)
if data['time_exponent'] != 0:
raise ValueError('input dataset must have units of displacement')
if data['space_exponent'] != 1:
raise ValueError('input dataset must have units of displacement')
# convert params to a dictionary of hyperparameters for each direction
network_params = _params_dict(network_params)
network_fix = np.asarray(network_fix,dtype=int)
station_params = _params_dict(station_params)
station_fix = np.asarray(station_fix,dtype=int)
# make output file name
if output_stem is None:
output_stem = _remove_extension(input_file) + '.reml'
output_file = output_stem + '.txt'
# convert geodetic positions to cartesian
bm = make_basemap(data['longitude'],data['latitude'])
x,y = bm(data['longitude'],data['latitude'])
xy = np.array([x,y]).T
# call "pygeons info" on the input data file. pipe the results to
# the output file
sp.call('pygeons info %s > %s' % (input_file,output_file),shell=True)
msg = _log_reml(input_file,
network_model,network_params,network_fix,
station_model,station_params,station_fix,
output_file)
# write log entry to file
with open(output_file,'a') as fout:
fout.write(msg)
# make a dictionary storing likelihoods
likelihood = {}
for dir in ['east','north','vertical']:
net_opt,sta_opt,like = reml(t=data['time'][:,None],
x=xy,
d=data[dir],
sd=data[dir+'_std_dev'],
network_model=network_model,
network_params=network_params[dir],
network_fix=network_fix,
station_model=station_model,
station_params=station_params[dir],
station_fix=station_fix)
# update the parameter dict with the optimal values
network_params[dir] = net_opt
station_params[dir] = sta_opt
likelihood[dir] = like
msg = _log_reml_results(input_file,
network_model,network_params,network_fix,
station_model,station_params,station_fix,
likelihood,output_file)
# write log entry to file
with open(output_file,'a') as fout:
fout.write(msg)
logger.info('Optimal parameters written to %s' % output_file)
return
def pygeons_strain(input_file,
network_prior_model=('spwen12-se',),
network_prior_params=(1.0,0.1,100.0),
network_noise_model=(),
network_noise_params=(),
station_noise_model=('linear',),
station_noise_params=(),
start_date=None,stop_date=None,
positions=None,positions_file=None,
rate=True,vertical=True,covariance=False,
output_stem=None):
'''
calculates strain
'''
logger.info('Running pygeons strain ...')
data = dict_from_hdf5(input_file)
if data['time_exponent'] != 0:
raise ValueError('input dataset must have units of displacement')
if data['space_exponent'] != 1:
raise ValueError('input dataset must have units of displacement')
out_dx = dict((k,np.copy(v)) for k,v in data.iteritems())
out_dy = dict((k,np.copy(v)) for k,v in data.iteritems())
# convert params to a dictionary of hyperparameters for each direction
network_prior_params = _params_dict(network_prior_params)
network_noise_params = _params_dict(network_noise_params)
station_noise_params = _params_dict(station_noise_params)
# convert geodetic input positions to cartesian
bm = make_basemap(data['longitude'],data['latitude'])
x,y = bm(data['longitude'],data['latitude'])
xy = np.array([x,y]).T
# set output positions
if (positions is None) & (positions_file is None):
# no output positions were specified so return the solution at the
# input data positions
output_id = np.array(data['id'],copy=True)
output_lon = np.array(data['longitude'],copy=True)
output_lat = np.array(data['latitude'],copy=True)
else:
output_id = np.zeros((0,),dtype=str)
output_lon = np.zeros((0,),dtype=float)
output_lat = np.zeros((0,),dtype=float)
if positions_file is not None:
# if positions file was specified
pos = np.loadtxt(positions_file,dtype=str,ndmin=2)
if pos.shape[1] != 3:
raise ValueError(
'positions file must contain a column for IDs, longitudes, '
'and latitudes')
output_id = np.hstack((output_id,pos[:,0]))
output_lon = np.hstack((output_lon,pos[:,1].astype(float)))
output_lat = np.hstack((output_lat,pos[:,2].astype(float)))
if positions is not None:
# if positions were specified via the command line
pos = np.array(positions,dtype=str).reshape((-1,3))
output_id = np.hstack((output_id,pos[:,0]))
output_lon = np.hstack((output_lon,pos[:,1].astype(float)))
output_lat = np.hstack((output_lat,pos[:,2].astype(float)))
# convert geodetic output positions to cartesian
output_x,output_y = bm(output_lon,output_lat)
output_xy = np.array([output_x,output_y]).T
# set output times
if start_date is None:
start_date = mjd_inv(np.min(data['time']),'%Y-%m-%d')
if stop_date is None:
stop_date = mjd_inv(np.max(data['time']),'%Y-%m-%d')
start_time = mjd(start_date,'%Y-%m-%d')
stop_time = mjd(stop_date,'%Y-%m-%d')
output_time = np.arange(start_time,stop_time+1)
# set output file names
if output_stem is None:
output_stem = _remove_extension(input_file) + '.strain'
output_dx_file = output_stem + '.dudx.h5'
output_dy_file = output_stem + '.dudy.h5'
_log_strain(input_file,
network_prior_model,network_prior_params,
network_noise_model,network_noise_params,
station_noise_model,station_noise_params,
start_date,stop_date,output_id,rate,vertical,
covariance,output_dx_file,output_dy_file)
for dir in ['east','north','vertical']:
if (dir == 'vertical') & (not vertical):
logger.debug('Not computing vertical deformation gradients')
# do not compute the deformation gradients for vertical. Just
# return zeros.
dx = np.zeros((output_time.shape[0],output_xy.shape[0]))
sdx = np.zeros((output_time.shape[0],output_xy.shape[0]))
dy = np.zeros((output_time.shape[0],output_xy.shape[0]))
sdy = np.zeros((output_time.shape[0],output_xy.shape[0]))
if covariance:
# if covariance is True then create an empty array of
# covariances
cdx = np.zeros((output_time.shape[0],output_xy.shape[0],
output_time.shape[0],output_xy.shape[0]))
cdy = np.zeros((output_time.shape[0],output_xy.shape[0],
output_time.shape[0],output_xy.shape[0]))
soln = (dx,sdx,cdx,dy,sdy,cdy)
else:
soln = (dx,sdx,dy,sdy)
else:
soln = strain(t=data['time'][:,None],
x=xy,
d=data[dir],
sd=data[dir+'_std_dev'],
network_prior_model=network_prior_model,
network_prior_params=network_prior_params[dir],
network_noise_model=network_noise_model,
network_noise_params=network_noise_params[dir],
station_noise_model=station_noise_model,
station_noise_params=station_noise_params[dir],
out_t=output_time[:,None],
out_x=output_xy,
rate=rate,
covariance=covariance)
if covariance:
# soln contains six entries when covariance is True
dx,sdx,cdx,dy,sdy,cdy = soln
out_dx[dir] = dx
out_dx[dir+'_std_dev'] = sdx
out_dx[dir+'_covariance'] = cdx
out_dy[dir] = dy
out_dy[dir+'_std_dev'] = sdy
out_dy[dir+'_covariance'] = cdy
else:
# soln contains four entries when covariance is False
dx,sdx,dy,sdy = soln
out_dx[dir] = dx
out_dx[dir+'_std_dev'] = sdx
out_dy[dir] = dy
out_dy[dir+'_std_dev'] = sdy
out_dx['time'] = output_time
out_dx['longitude'] = output_lon
out_dx['latitude'] = output_lat
out_dx['id'] = output_id
out_dx['time_exponent'] = -int(rate)
out_dx['space_exponent'] = 0
out_dy['time'] = output_time
out_dy['longitude'] = output_lon
out_dy['latitude'] = output_lat
out_dy['id'] = output_id
out_dy['time_exponent'] = -int(rate)
out_dy['space_exponent'] = 0
hdf5_from_dict(output_dx_file,out_dx)
hdf5_from_dict(output_dy_file,out_dy)
if rate:
logger.info('Posterior velocity gradients written to %s and %s' % (output_dx_file,output_dy_file))
else:
logger.info('Posterior displacement gradients written to %s and %s' % (output_dx_file,output_dy_file))
return
|
treverhines/PyGeoNS
|
pygeons/main/main.py
|
Python
|
mit
| 24,474
|
[
"Gaussian"
] |
69398f889b6a74f739d022e2d2eb448231b46b41837bff7d865918bf5f9c1dba
|
# module based on EGADS/netcdf_io.py created by Matt Freer in the EUFAR framework
import netCDF4
class FileCore(object):
def __init__(self, filename=None, perms='r', **kwargs):
self.f = None
self.filename = filename
self.perms = perms
for key, val in iter(kwargs.items()):
setattr(self, key, val)
if filename is not None:
self._open_file(filename, perms)
def open(self, filename, perms=None):
if perms is not None:
self.perms = perms
else:
perms = self.perms
self._open_file(filename, perms)
def close(self):
if self.f is not None:
self.f.close()
self.f = None
self.filename = None
def get_perms(self):
if self.f is not None:
return self.perms
else:
return
def get_filename(self):
return self.filename
class NetCdf(FileCore):
def __del__(self):
if self.f is not None:
self.f.close()
def open(self, filename, perms=None):
FileCore.open(self, filename, perms)
def get_attribute_list(self, varname=None):
return self._get_attribute_list(varname)
def get_attribute_value(self, attrname, varname=None):
attrs = self._get_attribute_list(varname)
return attrs[attrname]
def get_dimension_list(self, varname=None):
return self._get_dimension_list(varname)
def get_variable_list(self):
return self._get_variable_list()
def get_perms(self):
if self.f is not None:
return self.perms
else:
return
def read_variable(self, varname, input_range=None):
try:
varin = self.f.variables[varname]
except KeyError:
raise KeyError("ERROR: Variable %s does not exist in %s" % (varname, self.filename))
except Exception:
print("Error: Unexpected error")
raise
if input_range is None:
value = varin[:]
else:
obj = 'slice(input_range[0], input_range[1])'
for i in range(2, len(input_range), 2):
obj = obj + ', slice(input_range[%i], input_range[%i])' % (i, i + 1)
value = varin[eval(obj)]
return value
def _open_file(self, filename, perms):
self.close()
try:
self.f = netCDF4.Dataset(filename, perms) # @UndefinedVariable
self.filename = filename
self.perms = perms
except RuntimeError:
raise RuntimeError("ERROR: File %s doesn't exist" % (filename))
except Exception:
print("ERROR: Unexpected error")
raise
def _get_attribute_list(self, var=None):
if self.f is not None:
if var is not None:
varin = self.f.variables[var]
return varin.__dict__
else:
return self.f.__dict__
else:
raise
def _get_dimension_list(self, var=None):
dimdict = {}
if self.f is not None:
file_dims = self.f.dimensions
if var is not None:
varin = self.f.variables[var]
dims = varin.dimensions
for dimname in dims:
dimobj = file_dims[dimname]
dimdict[dimname] = len(dimobj)
else:
dims = file_dims
for dimname, dimobj in dims.iteritems():
dimdict[dimname] = len(dimobj)
return dimdict
else:
raise
return None
def _get_variable_list(self):
if self.f is not None:
return self.f.variables.keys()
else:
raise
|
eufarn7sp/asmm-eufar
|
functions/netcdf_lite.py
|
Python
|
bsd-3-clause
| 3,788
|
[
"NetCDF"
] |
728ed1591895b0238aaa3b9eeaec0e773a323f0ad3a93302f9a9ce3d645acce6
|
# coding: utf-8
#
# drums-backend a simple interactive audio sampler that plays vorbis samples
# Copyright (C) 2009 C.D. Immanuel Albrecht
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from math import *
import random
import copy
class pgrammar(object):
"""a p-grammar is a grammar that works in a probabilistic way"""
def __init__(self, rules):
""" initialize the pgrammar object, where rules is a dictionary
mapping non-terminals to lists of (omega, p) tuples """
self.rules = rules
self.nonterminals = self.rules.keys()
self.p_sums = {}
self.lower_bounds = {}
self.random_maps = {}
for A in self.nonterminals:
total = sum( map( lambda x: x[1], self.rules[A] ) )
self.p_sums[A] = total
if total <= 0.0:
total = 1.0
self.lower_bounds[A] = []
bound = 0.0
boundmapstring = "0"
count = 0
for r in self.rules[A]:
self.lower_bounds[A].append(bound)
boundmapstring = repr(count) + " if (x>=" + repr(bound/total) \
+ ") else " + boundmapstring
bound += r[1]
count += 1
boundmapstring = "lambda x: " + boundmapstring
#print A,':',boundmapstring
self.random_maps[A] = lambda x,A=A,fn=eval(boundmapstring):\
self.rules[A][fn(x)][0]
def __call__(self, word):
tokens_list = filter(lambda x: x[1], word)
tokens = {}
for t in tokens_list:
T = repr(t)
if not T in tokens:
def rulemap(x):
if x[1]:
return (x[0],t[1]+[x[1]])
else:
return (x[0],[])
tokens[T] = map(rulemap, \
self.random_maps[t[0]](random.uniform(0.0,1.0)))
w = []
for x in word:
X = repr(x)
if X in tokens:
w.extend(tokens[X])
else: # make unknown non-terminals terminals
w.append((x[0],[]))
return w
def projectTerminals(word):
return map(lambda x: x[0], filter(lambda x: not x[1], word))
def hasNonTerminals(word):
for k in word:
if k[1]:
return True
return False
def definiteTerminals(word):
terms = []
for k in word:
if k[1]:
return terms
terms.append(k[0])
return terms
def countDefiniteTerminals(word):
count = 0
for k in word:
if k[1]:
return count
count += 1
return count
def makeRule(code):
""" make a rule data structure from code:
NONTERMINAL: *TERMINAL(NUMBER) ... *TERMINAL(NUMBER): PVALUE
where NUMBER=0 means terminal, and all nonterminals with
the same NUMBER refer to the same instances of nonterminals. """
c = map(lambda x: x.strip(), code.split(':'))
if len(c) == 2:
c.append("1.0")
if not len(c) == 3:
raise Exception("Wrong rule format: "+str(code))
lefthand = c[0].strip()
pvalue = float(c[2])
termnums = filter(lambda x:x, c[1].split(")"))
def int0(x):
try:
return int(x)
except ValueError:
return 0
def mk_sym(x):
s = x.split("(")
if not len(s) == 2:
raise Exception("Wrong bracket format: " + str(code))
return (s[0].strip(),int0(s[1]))
symbols = map(mk_sym, termnums)
righthand = [(symbols, pvalue)]
return {lefthand: righthand}
def makeWord(code):
termnums = filter(lambda x:x, code.split(")"))
def int0(x):
try:
return int(x)
except ValueError:
return 0
def lint0(x):
q = int0(x)
if q:
return [q]
else:
return []
def mk_sym(x):
s = x.split("(")
if not len(s) == 2:
raise Exception("Wrong bracket format: " + str(code))
return (s[0].strip(),lint0(s[1]))
symbols = map(mk_sym, termnums)
return symbols
def simplifyWord(word):
simplificator = {tuple(): []}
varc = 1
new_word = []
for x in word:
try:
v = simplificator[tuple(x[1])]
except KeyError:
v = simplificator[tuple(x[1])] = [varc]
varc += 1
new_word.append((x[0], v))
return new_word
def addRules(x, add):
for k in add:
x.setdefault(k,[]).extend(add[k])
def makeRules(rules, default=None):
rlist = filter(lambda x:x, map(lambda x:x.strip(), rules.split("\n")))
if default:
r = copy.deepcopy(default)
else:
r = {}
for x in rlist:
addRules(r, makeRule(x))
return r
if __name__ == '__main__':
rules = {}
rules['A'] = [([('A',1),('B',1),('A',1),('A',2)], 0.5),\
([('C',1)], 1.1)]
rules['B'] = [([('!',0)],0)]
rules['C'] = [([('_',0)],0)]
g = pgrammar(rules)
initial = [('A',[1]),('A',[])]
class neuron(object):
"""a neuron has an inner level, an input level and output synapses"""
def __init__(self, level=None, synapses=None, delta=None):
""" initialize a neuron with its level and output synapses-weighting """
if level:
self.level = level
else:
self.level = 0.5
if synapses:
self.synapses = synapses
else:
self.synapses = {}
#
# self.synapses maps function objects on sets of neurons
#
self.input = 0
if delta:
self.delta = delta
else:
self.delta = lambda x: min(1.0,max(0.0,(x+self.level)))
def react(self):
self.level = self.delta(self.input)
self.input = 0
def feed(self):
for fn in self.synapses:
y = fn(self.level)
for n in self.synapses[fn]:
n.input += y
class cluster(object):
"""a cluster is a set of neurons that may have synapse connections"""
def __init__(self, count=32, renormalization=None):
""" initialize a neuron cluster with count neurons """
self.neurons = [neuron() for i in range(count)]
self.renormalization = renormalization
def feedback(self):
for n in self.neurons:
n.feed()
for n in self.neurons:
n.react()
if self.renormalization:
factor = self.renormalization / sum(map(lambda n:n.level, self.neurons))
for n in self.neurons:
n.level *= factor
def shock(self, energy):
for n in self.neurons:
n.level += energy
def jolt(self, energy):
for n in self.neurons:
n.input += energy
def connect(self, i, fn, j):
self.neurons[i].synapses.setdefault(fn, set()).add(self.neurons[j])
def levels(self):
return map(lambda n: n.level, self.neurons)
def trigger_fn(limit,high,low=0.0):
def fn(x,limit=limit,high=high,low=low):
if x >= limit:
return high
return low
return fn
|
immo/pyTOM
|
df/df_experimental.py
|
Python
|
gpl-3.0
| 7,868
|
[
"NEURON"
] |
2a5b8db4673ae754ea16ae5b87867d26d9aef6a210ce567ebfb590e98113f09d
|
import os
import time
from core.data.data_migrations import insert_all_permissions
from django.conf import settings
from django.contrib.staticfiles.testing import StaticLiveServerTestCase
from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait, Select
from selenium.common.exceptions import WebDriverException, TimeoutException, ElementNotVisibleException, StaleElementReferenceException
from faker import Factory as FakerFactory
from django.utils import timezone
from django.core.urlresolvers import reverse
from core.authentication import is_production_server, is_staging_server
from core.models.general import Icon, SiteConfiguration
from core.models.users import AgripoUser as User
from functional_tests.page_home_page import HomePage
DEFAULT_TIMEOUT = 15
SCREEN_DUMP_LOCATION = os.path.join(
os.path.dirname(os.path.abspath(__file__)), 'screendumps'
)
def quit_if_possible(browser):
try:
browser.quit()
except ProcessLookupError:
pass
class FunctionalTest(StaticLiveServerTestCase):
def populate_db(self, news_count=0, products_count=0, categories_count=0):
from core.data.icons import import_icons
import_icons(Icon)
url = reverse('populate_db', kwargs=dict(
news_count=news_count, products_count=products_count, categories_count=categories_count))
self.show_page(url, searched_element="ok")
@classmethod
def setUpClass(cls):
if is_production_server():
cls.server_url = 'http://not_a_valid_domain_name/at_all'
elif is_staging_server():
cls.server_url = 'http://' + settings.SERVER_URL
else:
super().setUpClass()
cls.server_url = cls.live_server_url
def setUp(self):
if is_production_server():
self.fail("Tests should never be launched on production server")
self.browser = webdriver.Firefox()
self.browser.implicitly_wait(0)
self.faker = FakerFactory.create('fr_FR')
self.faker.seed(None)
# @todo remove this when it is not needed anymore (should be done by the migration #3 and some others
from django.contrib.auth.models import Group
if not Group.objects.all():
insert_all_permissions()
# End of the removable stuff
self.config = SiteConfiguration.objects.get()
def tearDown(self):
if self._test_has_failed():
if not os.path.exists(SCREEN_DUMP_LOCATION):
os.makedirs(SCREEN_DUMP_LOCATION)
for ix, handle in enumerate(self.browser.window_handles):
self._windowid = ix
self.browser.switch_to.window(handle)
self.take_screenshot()
self.dump_html()
self.browser.quit()
super().tearDown()
def _test_has_failed(self):
# for 3.4. In 3.3, can just use self._outcomeForDoCleanups.success:
for method, error in self._outcome.errors:
no_real_errors = ['Active development pointer', 'Test not implemented yet']
if error and (str(error[1]) not in no_real_errors):
return True
return False
def not_implemented(self):
self.fail("Test not implemented yet")
def dev_point(self, delay=0):
if delay:
time.sleep(delay)
self.fail("Active development pointer")
def insert_flat_pages_contents(self):
from core.data.data_migrations import insert_flatpages_contents
insert_flatpages_contents()
def assert_is_hidden(self, element_value, by='id'):
try:
self.assert_is_displayed(element_value, by=by)
self.fail("Element with {}=\"{}\" should have been hidden".format(by, element_value))
except ElementNotVisibleException:
pass
def assert_is_displayed(self, element_id, by='id'):
if by == 'id':
el = self.browser.find_element_by_id(element_id)
elif by == 'class':
el = self.browser.find_element_by_class_name(element_id)
el.click() # should not raise
def admin_save(self, next_page=None):
self.browser.find_element_by_css_selector('input[name="_save"]').click()
# We wait for the confirmation to show up
self.wait_for(lambda: self.browser.find_element_by_css_selector("li.success"), 10)
if next_page:
self.assertEqual(self.browser.current_url, "{}{}".format(self.server_url, next_page))
return self.browser.find_element_by_css_selector("li.success").get_attribute('innerHTML')
def take_screenshot(self):
filename = self._get_filename() + '.png'
print('screenshotting to', filename)
self.browser.get_screenshot_as_file(filename)
def dump_html(self):
filename = self._get_filename() + '.html'
print('dumping page HTML to', filename)
with open(filename, 'w') as f:
f.write(self.browser.page_source)
def _get_filename(self):
timestamp = timezone.now().isoformat().replace(':', '.')[:19]
return '{folder}/{classname}.{method}-window{windowid}-{timestamp}'.format(
folder=SCREEN_DUMP_LOCATION,
classname=self.__class__.__name__,
method=self._testMethodName,
windowid=self._windowid,
timestamp=timestamp
)
def wait(self, duration):
time.sleep(duration)
def wait_for_element_with_id(self, element_id, timeout=DEFAULT_TIMEOUT):
selector = '#{}'.format(element_id)
return self.wait_for_element_with_selector(selector, timeout)
def wait_for_element_with_selector(self, selector, timeout=DEFAULT_TIMEOUT):
WebDriverWait(self.browser, timeout=timeout).until(
lambda b: b.find_element_by_css_selector(selector),
'Could not find element with selector "{}".'.format(selector)
)
return self.browser.find_element_by_css_selector(selector)
def wait_for_stale(self, existing_element, timeout=DEFAULT_TIMEOUT):
def element_has_gone_stale(element):
try:
element.is_displayed()
return False
except StaleElementReferenceException:
return True
while not element_has_gone_stale(existing_element):
timeout -= 0.1
self.wait(0.1)
if timeout <= 0:
raise Exception("The element {} never became stale".format(self._get_readable_identifier(existing_element)))
def click_link(self, link, timeout=DEFAULT_TIMEOUT, search_in=None, changes_page=True):
if timeout > 0:
self.wait_for_link_with_destination(link, timeout=timeout, search_in=search_in)
link = self.get_link_by_destination(link, search_in=search_in)
link.click()
if changes_page:
start_time = time.time()
self.wait_for_stale(link)
def _get_readable_identifier(self, element):
id = "#{}".format(element.id)
if "{" in id:
id = ""
classes = element.get_attribute("class").replace(" ", ".")
if classes:
classes = "." + classes
return "{}{}{}".format(element.tag_name, id, classes)
def wait_for_element_to_be_displayed(self, element, timeout=DEFAULT_TIMEOUT):
self.wait_for(
lambda: self.assertTrue(
element.is_displayed(),
"{} should have been displayed by now".format(self._get_readable_identifier(element))),
timeout, exception=AssertionError)
return element
def get_link_by_destination(self, destination, search_in=None):
if not search_in:
search_in = self.browser
return search_in.find_element_by_css_selector('a[href="{}"]'.format(destination))
def wait_for_link_with_destination(self, destination, timeout=DEFAULT_TIMEOUT, search_in=None):
WebDriverWait(self, timeout=timeout).until(
lambda b: b.get_link_by_destination(destination, search_in=search_in),
'Could not find link with href={}'.format(destination)
)
def get_element_content_by_id(self, id_element):
return self.browser.find_element_by_id(id_element).text
def get_body_content(self):
return self.browser.find_element_by_css_selector('body').text
def logout(self):
HomePage(self).get_logout_button().click()
self.wait_to_be_logged_out()
def wait_to_be_logged_in(self, timeout=DEFAULT_TIMEOUT):
self.wait_for_element_with_id(HomePage(self).id_logout, timeout=timeout)
def wait_to_be_logged_out(self):
self.wait_for_element_with_id(HomePage(self).id_login)
def assertElementNotFoundById(self, element_id):
try:
self.wait_for_element_with_id(element_id, 2)
except TimeoutException:
return True
self.fail("The element with id=#{} shouldn't have been found in the dom".format(element_id))
def wait_for(self, function_with_assertion, timeout=DEFAULT_TIMEOUT, exception=(AssertionError, WebDriverException)):
start_time = time.time()
while time.time() - start_time < timeout:
try:
return function_with_assertion()
except exception:
time.sleep(0.1)
# one more try, which will raise any errors if they are outstanding
return function_with_assertion()
def show_page(self, page, timeout=DEFAULT_TIMEOUT, searched_element='id_top_container'):
if page[0] != '/':
page = '/{}'.format(page)
self.browser.get("{}{}".format(self.server_url, page))
return self.wait_for(lambda: self.browser.find_element_by_id(searched_element), timeout)
def show_admin_page(self, *args, directly=False):
if not directly:
self.show_page('admin/', searched_element="user-tools")
next_page = '/admin/'
for arg in args:
next_page += arg + "/"
self.click_link(next_page, timeout=DEFAULT_TIMEOUT)
else:
self.show_page('admin/{}/'.format("/".join(args)), searched_element="user-tools")
def create_autoconnected_session(self, email, as_manager=False, as_farmer=False):
active_page = self.browser.current_url
# We visit the page that immediately creates the session :
if as_manager:
page = 'auto_manager_login'
connected_as = "manager"
else:
page = 'auto_login'
connected_as = "user"
page = "{}/core/{}/{}?as_farmer={}".format(self.server_url, page, email, as_farmer)
print("Getting page {}".format(page))
self.browser.get(page)
# We check that we are authenticated
body = self.browser.find_element_by_css_selector('body')
self.assertEqual("{} is connected as {}".format(email, connected_as), body.text)
# We go back to previous page or home page if there was none
if active_page != "about:blank":
self.browser.get(active_page)
else:
HomePage(self).show()
self.wait_to_be_logged_in()
return self
def select_option_by_text(self, select_id, option_text, raise_error_if_not_found=True):
return self.select_option(select_id, "text", option_text, raise_error_if_not_found)
def select_option_by_index(self, select_id, index, raise_error_if_not_found=True):
return self.select_option(select_id, "index", index, raise_error_if_not_found)
def select_option(self, select_id, by, value, raise_error_if_not_found=True):
select = Select(self.browser.find_element_by_id(select_id))
index = 0
for opt in select.options:
if by == "text" and opt.text == value:
opt.click()
return
if by == "index" and index == value:
opt.click()
return
index += 1
if raise_error_if_not_found:
if by == "index":
error_message = 'No option with index {} in the select #{}'
else:
error_message = 'Option with value "{}" not found in the select #{}'
raise ValueError(error_message.format(value, select_id))
|
agripo/website
|
functional_tests/base.py
|
Python
|
gpl-2.0
| 12,329
|
[
"VisIt"
] |
ff3a755d524f8154f55d555f22ecd8ee234c42ae05e420423880da4b2654d73c
|
from astropy.io import fits
from astropy import units as u
import numpy as np
from astropy import constants
from FITS_tools import cube_regrid
def extract_subcube(cubefilename, outfilename, linefreq=218.22219*u.GHz,
debug=False, smooth=False, vsmooth=False):
ffile = fits.open(cubefilename)
# I don't know why this is necessary, but there were weird bugs showing up
# if I did not add this (some parts of the cube defaulted to 3e-319)
ffile[0].data = ffile[0].data.astype('float32')
hdr = ffile[0].header
cdk = 'CD3_3' if 'CD3_3' in hdr else 'CDELT3'
if debug:
xarr = (np.arange(hdr['NAXIS3'])+1-hdr['CRPIX3']) * hdr[cdk] + hdr['CRVAL3']
print xarr.min(),xarr.max()
hdr['CTYPE3'] = 'VELO'
cdfrq = hdr[cdk]
crvfreq = hdr['CRVAL3']
crpixf = hdr['CRPIX3']
hdr[cdk] = -((cdfrq/crvfreq)*constants.c).to(u.km/u.s).value
hdr['CRVAL3'] = 0.0
hdr['CRPIX3'] = (linefreq.to(u.Hz).value-crvfreq)/cdfrq + crpixf
hdr['CUNIT3'] = 'km/s'
if debug:
xarr = (np.arange(hdr['NAXIS3'])+1-hdr['CRPIX3']) * hdr[cdk] + hdr['CRVAL3']
print xarr.min(),xarr.max()
for k in ['CTYPE3','CRVAL3',cdk,'CRPIX3','CUNIT3']:
assert ffile[0].header[k] == hdr[k]
outhdr = hdr.copy()
outhdr[cdk] = 1.0
outhdr['RESTFREQ'] = linefreq.to(u.Hz).value
outhdr['RESTFRQ'] = linefreq.to(u.Hz).value
outhdr['CRVAL3'] = 50
outhdr['CRPIX3'] = 199.5
outhdr['NAXIS3'] = 400
if smooth:
#cubesm = gsmooth_cube(ffile[0].data, [3,2,2], use_fft=True,
# psf_pad=False, fft_pad=False)
# smoothed with 2 pixels -> sigma=10", fwhm=23"
# this is an "optimal smooth", boosting s/n and smoothing to 36"
# resolution.
kw = 2 if not vsmooth else 4
cubesm = cube_regrid.spatial_smooth_cube(ffile[0].data, kw,
use_fft=False,
numcores=4)
cubesm = cube_regrid.spectral_smooth_cube(cubesm, 3/2.35,
use_fft=False,
numcores=4)
ffile[0].data = cubesm
outhdr[cdk] = 3.0
outhdr['CRVAL3'] = 50
outhdr['CRPIX3'] = 70
outhdr['NAXIS3'] = 140
if debug:
xarr = (np.arange(outhdr['NAXIS3'])+1-outhdr['CRPIX3']) * outhdr[cdk] + outhdr['CRVAL3']
print xarr.min(),xarr.max()
xarr = (-xarr/3e5) * linefreq + linefreq
print xarr.min(),xarr.max()
return hdr,outhdr
newhdu = cube_regrid.regrid_cube_hdu(ffile[0], outhdr, order=1,
prefilter=False)
newhdu.writeto(outfilename, clobber=True)
return newhdu
|
keflavich/apex_otf_tools
|
apex_otf_tools/extract_subcube.py
|
Python
|
bsd-3-clause
| 2,814
|
[
"CDK"
] |
c8cb359ff3bda6a511400cd89a03e0fda66a1cf271867bcd3dcadf26e46f6666
|
###### WELCOME THIS IS THE SOURCE CODE OF Snakez ########
print( "\033[1;34m _____ __" )
print( " / ___/____ ____ _/ /_____ ____")
print( " \__ \/ __ \/ __ `/ //_/ _ \/_ / ")
print( " ___/ / / / / /_/ / ,< / __/ / /_ ")
print("/____/_/ /_/\__,_/_/|_|\___/ /___/ FRAMEWORK CREATED BY : \033[1;37m\033[1;45msplinterk\033[1;m\r\n")
print("\033[1;34mMORE INFO AT : \033[1;37m\033[1;45mhttp://localhost:43110/1GaZYNPuLFN9KPz3nVAs2tojwiJjyGuA1Q\033[1;m\r\n")
print ("\r\n\033[1;34m[*] Welcome to Snakez, the web framework to create HTML,CSS,Javascript and PHP fast and easily\r\n\033[1;37m")
print("\033[1;32m[!] You have to install ZeroNet (\033[1;m\033[1;45mhttps://zeronet.io/\033[1;m\033[1;32m) and then visit Snakez's official site (\033[1;m\033[1;45mhttp://localhost:43110/1GaZYNPuLFN9KPz3nVAs2tojwiJjyGuA1Q\033[1;m\033[1;32m)\033[1;m\r\n")
|
splinterk/snakez
|
snakez.py
|
Python
|
gpl-3.0
| 876
|
[
"VisIt"
] |
a0421db123c99fd1cf93ff7472bfb2bc8875061326a7f6c126c2045e60571a73
|
"""
DIRAC.StorageManagementSystem.Service package
"""
|
DIRACGrid/DIRAC
|
src/DIRAC/StorageManagementSystem/Service/__init__.py
|
Python
|
gpl-3.0
| 57
|
[
"DIRAC"
] |
6e0d0d94d9ce0a759eea763fa33c668d1b77492b04690788b2f1d88e64badb6f
|
"""
[8/22/2012] Challenge #90 [difficult] (Alien P=NP reductions)
https://www.reddit.com/r/dailyprogrammer/comments/ynw9q/8222012_challenge_90_difficult_alien_pnp/
Ancient aliens have come down from earth and announced that, clearly, P=NP, and they've been trying to explain it to us
for years. Something about it being encoded in the pyramids they helped us build...or...something.
In order to help human scientific progress, the aliens have begun distributing crystal alien technology nodes that are
oracle solutions to NP-complete problems. Despite being very strange looking, they have USB cable ports and provide an
API for all human computer languages and operating systems. This API provides a single
function "alien_knapsack()" which takes in an instance of the knapsack problem and immediately returns the solution (it
uses alient time-travel technology or something).
Write a program demonstrating how you can solve your favorite NP-complete problem by calling "alien_knapsack" as a
black-box. You can pick any problem you want and show how to reduce it to an instance of the knapsack problem. If you
are finding your code difficult to test, then see the bonus problem.
BONUS: Write an implementation of the alien_knapsack() function in a human programming language. Obviously this does
not have to be a polynomial-time solution.
"""
def main():
pass
if __name__ == "__main__":
main()
|
DayGitH/Python-Challenges
|
DailyProgrammer/DP20120822C.py
|
Python
|
mit
| 1,417
|
[
"CRYSTAL"
] |
926219bd00aa1d75e2055a54f4f3008925189ab35883c99da342ac955894a06c
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2013 Radim Rehurek <me@radimrehurek.com>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""
Produce word vectors with deep learning via word2vec's "skip-gram and CBOW models", using either
hierarchical softmax or negative sampling [1]_ [2]_.
NOTE: There are more ways to get word vectors in Gensim than just Word2Vec. See wrappers for FastText, VarEmbed and WordRank.
The training algorithms were originally ported from the C package https://code.google.com/p/word2vec/
and extended with additional functionality.
For a blog tutorial on gensim word2vec, with an interactive web app trained on GoogleNews, visit http://radimrehurek.com/2014/02/word2vec-tutorial/
**Make sure you have a C compiler before installing gensim, to use optimized (compiled) word2vec training**
(70x speedup compared to plain NumPy implementation [3]_).
Initialize a model with e.g.::
>>> model = Word2Vec(sentences, size=100, window=5, min_count=5, workers=4)
Persist a model to disk with::
>>> model.save(fname)
>>> model = Word2Vec.load(fname) # you can continue training with the loaded model!
The word vectors are stored in a KeyedVectors instance in model.wv. This separates the read-only word vector lookup operations in KeyedVectors from the training code in Word2Vec.
>>> model.wv['computer'] # numpy vector of a word
array([-0.00449447, -0.00310097, 0.02421786, ...], dtype=float32)
The word vectors can also be instantiated from an existing file on disk in the word2vec C format as a KeyedVectors instance::
NOTE: It is impossible to continue training the vectors loaded from the C format because hidden weights, vocabulary frequency and the binary tree is missing.
>>> from gensim.models.keyedvectors import KeyedVectors
>>> word_vectors = KeyedVectors.load_word2vec_format('/tmp/vectors.txt', binary=False) # C text format
>>> word_vectors = KeyedVectors.load_word2vec_format('/tmp/vectors.bin', binary=True) # C binary format
You can perform various NLP word tasks with the model. Some of them
are already built-in::
>>> model.wv.most_similar(positive=['woman', 'king'], negative=['man'])
[('queen', 0.50882536), ...]
>>> model.wv.most_similar_cosmul(positive=['woman', 'king'], negative=['man'])
[('queen', 0.71382287), ...]
>>> model.wv.doesnt_match("breakfast cereal dinner lunch".split())
'cereal'
>>> model.wv.similarity('woman', 'man')
0.73723527
Probability of a text under the model::
>>> model.score(["The fox jumped over a lazy dog".split()])
0.2158356
Correlation with human opinion on word similarity::
>>> model.wv.evaluate_word_pairs(os.path.join(module_path, 'test_data','wordsim353.tsv'))
0.51, 0.62, 0.13
And on analogies::
>>> model.wv.accuracy(os.path.join(module_path, 'test_data', 'questions-words.txt'))
and so on.
If you're finished training a model (=no more updates, only querying), then switch to the :mod:`gensim.models.KeyedVectors` instance in wv
>>> word_vectors = model.wv
>>> del model
to trim unneeded model memory = use (much) less RAM.
Note that there is a :mod:`gensim.models.phrases` module which lets you automatically
detect phrases longer than one word. Using phrases, you can learn a word2vec model
where "words" are actually multiword expressions, such as `new_york_times` or `financial_crisis`:
>>> bigram_transformer = gensim.models.Phrases(sentences)
>>> model = Word2Vec(bigram_transformer[sentences], size=100, ...)
.. [1] Tomas Mikolov, Kai Chen, Greg Corrado, and Jeffrey Dean. Efficient Estimation of Word Representations in Vector Space. In Proceedings of Workshop at ICLR, 2013.
.. [2] Tomas Mikolov, Ilya Sutskever, Kai Chen, Greg Corrado, and Jeffrey Dean. Distributed Representations of Words and Phrases and their Compositionality.
In Proceedings of NIPS, 2013.
.. [3] Optimizing word2vec in gensim, http://radimrehurek.com/2013/09/word2vec-in-python-part-two-optimizing/
"""
from __future__ import division # py3 "true division"
import logging
import sys
import os
import heapq
from timeit import default_timer
from copy import deepcopy
from collections import defaultdict
import threading
import itertools
from gensim.utils import keep_vocab_item, call_on_class_only
from gensim.utils import keep_vocab_item
from gensim.models.keyedvectors import KeyedVectors, Vocab
try:
from queue import Queue, Empty
except ImportError:
from Queue import Queue, Empty
from numpy import exp, log, dot, zeros, outer, random, dtype, float32 as REAL,\
double, uint32, seterr, array, uint8, vstack, fromstring, sqrt, newaxis,\
ndarray, empty, sum as np_sum, prod, ones, ascontiguousarray, vstack, logaddexp
from scipy.special import expit
from gensim import utils, matutils # utility fnc for pickling, common scipy operations etc
from gensim.corpora.dictionary import Dictionary
from six import iteritems, itervalues, string_types
from six.moves import xrange
from types import GeneratorType
from scipy import stats
logger = logging.getLogger(__name__)
try:
from gensim.models.word2vec_inner import train_batch_sg, train_batch_cbow
from gensim.models.word2vec_inner import score_sentence_sg, score_sentence_cbow
from gensim.models.word2vec_inner import FAST_VERSION, MAX_WORDS_IN_BATCH
except ImportError:
# failed... fall back to plain numpy (20-80x slower training than the above)
FAST_VERSION = -1
MAX_WORDS_IN_BATCH = 10000
def train_batch_sg(model, sentences, alpha, work=None):
"""
Update skip-gram model by training on a sequence of sentences.
Each sentence is a list of string tokens, which are looked up in the model's
vocab dictionary. Called internally from `Word2Vec.train()`.
This is the non-optimized, Python version. If you have cython installed, gensim
will use the optimized version from word2vec_inner instead.
"""
result = 0
for sentence in sentences:
word_vocabs = [model.wv.vocab[w] for w in sentence if w in model.wv.vocab and
model.wv.vocab[w].sample_int > model.random.rand() * 2**32]
for pos, word in enumerate(word_vocabs):
reduced_window = model.random.randint(model.window) # `b` in the original word2vec code
# now go over all words from the (reduced) window, predicting each one in turn
start = max(0, pos - model.window + reduced_window)
for pos2, word2 in enumerate(word_vocabs[start:(pos + model.window + 1 - reduced_window)], start):
# don't train on the `word` itself
if pos2 != pos:
train_sg_pair(model, model.wv.index2word[word.index], word2.index, alpha)
result += len(word_vocabs)
return result
def train_batch_cbow(model, sentences, alpha, work=None, neu1=None):
"""
Update CBOW model by training on a sequence of sentences.
Each sentence is a list of string tokens, which are looked up in the model's
vocab dictionary. Called internally from `Word2Vec.train()`.
This is the non-optimized, Python version. If you have cython installed, gensim
will use the optimized version from word2vec_inner instead.
"""
result = 0
for sentence in sentences:
word_vocabs = [model.wv.vocab[w] for w in sentence if w in model.wv.vocab and
model.wv.vocab[w].sample_int > model.random.rand() * 2**32]
for pos, word in enumerate(word_vocabs):
reduced_window = model.random.randint(model.window) # `b` in the original word2vec code
start = max(0, pos - model.window + reduced_window)
window_pos = enumerate(word_vocabs[start:(pos + model.window + 1 - reduced_window)], start)
word2_indices = [word2.index for pos2, word2 in window_pos if (word2 is not None and pos2 != pos)]
l1 = np_sum(model.wv.syn0[word2_indices], axis=0) # 1 x vector_size
if word2_indices and model.cbow_mean:
l1 /= len(word2_indices)
train_cbow_pair(model, word, word2_indices, l1, alpha)
result += len(word_vocabs)
return result
def score_sentence_sg(model, sentence, work=None):
"""
Obtain likelihood score for a single sentence in a fitted skip-gram representaion.
The sentence is a list of Vocab objects (or None, when the corresponding
word is not in the vocabulary). Called internally from `Word2Vec.score()`.
This is the non-optimized, Python version. If you have cython installed, gensim
will use the optimized version from word2vec_inner instead.
"""
log_prob_sentence = 0.0
if model.negative:
raise RuntimeError("scoring is only available for HS=True")
word_vocabs = [model.wv.vocab[w] for w in sentence if w in model.wv.vocab]
for pos, word in enumerate(word_vocabs):
if word is None:
continue # OOV word in the input sentence => skip
# now go over all words from the window, predicting each one in turn
start = max(0, pos - model.window)
for pos2, word2 in enumerate(word_vocabs[start : pos + model.window + 1], start):
# don't train on OOV words and on the `word` itself
if word2 is not None and pos2 != pos:
log_prob_sentence += score_sg_pair(model, word, word2)
return log_prob_sentence
def score_sentence_cbow(model, sentence, alpha, work=None, neu1=None):
"""
Obtain likelihood score for a single sentence in a fitted CBOW representaion.
The sentence is a list of Vocab objects (or None, where the corresponding
word is not in the vocabulary. Called internally from `Word2Vec.score()`.
This is the non-optimized, Python version. If you have cython installed, gensim
will use the optimized version from word2vec_inner instead.
"""
log_prob_sentence = 0.0
if model.negative:
raise RuntimeError("scoring is only available for HS=True")
word_vocabs = [model.wv.vocab[w] for w in sentence if w in model.wv.vocab]
for pos, word in enumerate(word_vocabs):
if word is None:
continue # OOV word in the input sentence => skip
start = max(0, pos - model.window)
window_pos = enumerate(word_vocabs[start:(pos + model.window + 1)], start)
word2_indices = [word2.index for pos2, word2 in window_pos if (word2 is not None and pos2 != pos)]
l1 = np_sum(model.wv.syn0[word2_indices], axis=0) # 1 x layer1_size
if word2_indices and model.cbow_mean:
l1 /= len(word2_indices)
log_prob_sentence += score_cbow_pair(model, word, word2_indices, l1)
return log_prob_sentence
def train_sg_pair(model, word, context_index, alpha, learn_vectors=True, learn_hidden=True,
context_vectors=None, context_locks=None):
if context_vectors is None:
context_vectors = model.wv.syn0
if context_locks is None:
context_locks = model.syn0_lockf
if word not in model.wv.vocab:
return
predict_word = model.wv.vocab[word] # target word (NN output)
l1 = context_vectors[context_index] # input word (NN input/projection layer)
lock_factor = context_locks[context_index]
neu1e = zeros(l1.shape)
if model.hs:
# work on the entire tree at once, to push as much work into numpy's C routines as possible (performance)
l2a = deepcopy(model.syn1[predict_word.point]) # 2d matrix, codelen x layer1_size
fa = expit(dot(l1, l2a.T)) # propagate hidden -> output
ga = (1 - predict_word.code - fa) * alpha # vector of error gradients multiplied by the learning rate
if learn_hidden:
model.syn1[predict_word.point] += outer(ga, l1) # learn hidden -> output
neu1e += dot(ga, l2a) # save error
if model.negative:
# use this word (label = 1) + `negative` other random words not from this sentence (label = 0)
word_indices = [predict_word.index]
while len(word_indices) < model.negative + 1:
w = model.cum_table.searchsorted(model.random.randint(model.cum_table[-1]))
if w != predict_word.index:
word_indices.append(w)
l2b = model.syn1neg[word_indices] # 2d matrix, k+1 x layer1_size
fb = expit(dot(l1, l2b.T)) # propagate hidden -> output
gb = (model.neg_labels - fb) * alpha # vector of error gradients multiplied by the learning rate
if learn_hidden:
model.syn1neg[word_indices] += outer(gb, l1) # learn hidden -> output
neu1e += dot(gb, l2b) # save error
if learn_vectors:
l1 += neu1e * lock_factor # learn input -> hidden (mutates model.wv.syn0[word2.index], if that is l1)
return neu1e
def train_cbow_pair(model, word, input_word_indices, l1, alpha, learn_vectors=True, learn_hidden=True):
neu1e = zeros(l1.shape)
if model.hs:
l2a = model.syn1[word.point] # 2d matrix, codelen x layer1_size
fa = expit(dot(l1, l2a.T)) # propagate hidden -> output
ga = (1. - word.code - fa) * alpha # vector of error gradients multiplied by the learning rate
if learn_hidden:
model.syn1[word.point] += outer(ga, l1) # learn hidden -> output
neu1e += dot(ga, l2a) # save error
if model.negative:
# use this word (label = 1) + `negative` other random words not from this sentence (label = 0)
word_indices = [word.index]
while len(word_indices) < model.negative + 1:
w = model.cum_table.searchsorted(model.random.randint(model.cum_table[-1]))
if w != word.index:
word_indices.append(w)
l2b = model.syn1neg[word_indices] # 2d matrix, k+1 x layer1_size
fb = expit(dot(l1, l2b.T)) # propagate hidden -> output
gb = (model.neg_labels - fb) * alpha # vector of error gradients multiplied by the learning rate
if learn_hidden:
model.syn1neg[word_indices] += outer(gb, l1) # learn hidden -> output
neu1e += dot(gb, l2b) # save error
if learn_vectors:
# learn input -> hidden, here for all words in the window separately
if not model.cbow_mean and input_word_indices:
neu1e /= len(input_word_indices)
for i in input_word_indices:
model.wv.syn0[i] += neu1e * model.syn0_lockf[i]
return neu1e
def score_sg_pair(model, word, word2):
l1 = model.wv.syn0[word2.index]
l2a = deepcopy(model.syn1[word.point]) # 2d matrix, codelen x layer1_size
sgn = (-1.0)**word.code # ch function, 0-> 1, 1 -> -1
lprob = -logaddexp(0, -sgn * dot(l1, l2a.T))
return sum(lprob)
def score_cbow_pair(model, word, word2_indices, l1):
l2a = model.syn1[word.point] # 2d matrix, codelen x layer1_size
sgn = (-1.0)**word.code # ch function, 0-> 1, 1 -> -1
lprob = -logaddexp(0, -sgn * dot(l1, l2a.T))
return sum(lprob)
class Word2Vec(utils.SaveLoad):
"""
Class for training, using and evaluating neural networks described in https://code.google.com/p/word2vec/
The model can be stored/loaded via its `save()` and `load()` methods, or stored/loaded in a format
compatible with the original word2vec implementation via `wv.save_word2vec_format()` and `KeyedVectors.load_word2vec_format()`.
"""
def __init__(
self, sentences=None, size=100, alpha=0.025, window=5, min_count=5,
max_vocab_size=None, sample=1e-3, seed=1, workers=3, min_alpha=0.0001,
sg=0, hs=0, negative=5, cbow_mean=1, hashfxn=hash, iter=5, null_word=0,
trim_rule=None, sorted_vocab=1, batch_words=MAX_WORDS_IN_BATCH):
"""
Initialize the model from an iterable of `sentences`. Each sentence is a
list of words (unicode strings) that will be used for training.
The `sentences` iterable can be simply a list, but for larger corpora,
consider an iterable that streams the sentences directly from disk/network.
See :class:`BrownCorpus`, :class:`Text8Corpus` or :class:`LineSentence` in
this module for such examples.
If you don't supply `sentences`, the model is left uninitialized -- use if
you plan to initialize it in some other way.
`sg` defines the training algorithm. By default (`sg=0`), CBOW is used.
Otherwise (`sg=1`), skip-gram is employed.
`size` is the dimensionality of the feature vectors.
`window` is the maximum distance between the current and predicted word within a sentence.
`alpha` is the initial learning rate (will linearly drop to `min_alpha` as training progresses).
`seed` = for the random number generator. Initial vectors for each
word are seeded with a hash of the concatenation of word + str(seed).
Note that for a fully deterministically-reproducible run, you must also limit the model to
a single worker thread, to eliminate ordering jitter from OS thread scheduling. (In Python
3, reproducibility between interpreter launches also requires use of the PYTHONHASHSEED
environment variable to control hash randomization.)
`min_count` = ignore all words with total frequency lower than this.
`max_vocab_size` = limit RAM during vocabulary building; if there are more unique
words than this, then prune the infrequent ones. Every 10 million word types
need about 1GB of RAM. Set to `None` for no limit (default).
`sample` = threshold for configuring which higher-frequency words are randomly downsampled;
default is 1e-3, useful range is (0, 1e-5).
`workers` = use this many worker threads to train the model (=faster training with multicore machines).
`hs` = if 1, hierarchical softmax will be used for model training.
If set to 0 (default), and `negative` is non-zero, negative sampling will be used.
`negative` = if > 0, negative sampling will be used, the int for negative
specifies how many "noise words" should be drawn (usually between 5-20).
Default is 5. If set to 0, no negative samping is used.
`cbow_mean` = if 0, use the sum of the context word vectors. If 1 (default), use the mean.
Only applies when cbow is used.
`hashfxn` = hash function to use to randomly initialize weights, for increased
training reproducibility. Default is Python's rudimentary built in hash function.
`iter` = number of iterations (epochs) over the corpus. Default is 5.
`trim_rule` = vocabulary trimming rule, specifies whether certain words should remain
in the vocabulary, be trimmed away, or handled using the default (discard if word count < min_count).
Can be None (min_count will be used), or a callable that accepts parameters (word, count, min_count) and
returns either `utils.RULE_DISCARD`, `utils.RULE_KEEP` or `utils.RULE_DEFAULT`.
Note: The rule, if given, is only used prune vocabulary during build_vocab() and is not stored as part
of the model.
`sorted_vocab` = if 1 (default), sort the vocabulary by descending frequency before
assigning word indexes.
`batch_words` = target size (in words) for batches of examples passed to worker threads (and
thus cython routines). Default is 10000. (Larger batches will be passed if individual
texts are longer than 10000 words, but the standard cython code truncates to that maximum.)
"""
self.load = call_on_class_only
if FAST_VERSION == -1:
logger.warning('Slow version of {0} is being used'.format(__name__))
else:
logger.debug('Fast version of {0} is being used'.format(__name__))
self.initialize_word_vectors()
self.sg = int(sg)
self.cum_table = None # for negative sampling
self.vector_size = int(size)
self.layer1_size = int(size)
if size % 4 != 0:
logger.warning("consider setting layer size to a multiple of 4 for greater performance")
self.alpha = float(alpha)
self.min_alpha_yet_reached = float(alpha) # To warn user if alpha increases
self.window = int(window)
self.max_vocab_size = max_vocab_size
self.seed = seed
self.random = random.RandomState(seed)
self.min_count = min_count
self.sample = sample
self.workers = int(workers)
self.min_alpha = float(min_alpha)
self.hs = hs
self.negative = negative
self.cbow_mean = int(cbow_mean)
self.hashfxn = hashfxn
self.iter = iter
self.null_word = null_word
self.train_count = 0
self.total_train_time = 0
self.sorted_vocab = sorted_vocab
self.batch_words = batch_words
self.model_trimmed_post_training = False
if sentences is not None:
if isinstance(sentences, GeneratorType):
raise TypeError("You can't pass a generator as the sentences argument. Try an iterator.")
self.build_vocab(sentences, trim_rule=trim_rule)
self.train(sentences)
def initialize_word_vectors(self):
self.wv = KeyedVectors()
def make_cum_table(self, power=0.75, domain=2**31 - 1):
"""
Create a cumulative-distribution table using stored vocabulary word counts for
drawing random words in the negative-sampling training routines.
To draw a word index, choose a random integer up to the maximum value in the
table (cum_table[-1]), then finding that integer's sorted insertion point
(as if by bisect_left or ndarray.searchsorted()). That insertion point is the
drawn index, coming up in proportion equal to the increment at that slot.
Called internally from 'build_vocab()'.
"""
vocab_size = len(self.wv.index2word)
self.cum_table = zeros(vocab_size, dtype=uint32)
# compute sum of all power (Z in paper)
train_words_pow = 0.0
for word_index in xrange(vocab_size):
train_words_pow += self.wv.vocab[self.wv.index2word[word_index]].count**power
cumulative = 0.0
for word_index in xrange(vocab_size):
cumulative += self.wv.vocab[self.wv.index2word[word_index]].count**power
self.cum_table[word_index] = round(cumulative / train_words_pow * domain)
if len(self.cum_table) > 0:
assert self.cum_table[-1] == domain
def create_binary_tree(self):
"""
Create a binary Huffman tree using stored vocabulary word counts. Frequent words
will have shorter binary codes. Called internally from `build_vocab()`.
"""
logger.info("constructing a huffman tree from %i words", len(self.wv.vocab))
# build the huffman tree
heap = list(itervalues(self.wv.vocab))
heapq.heapify(heap)
for i in xrange(len(self.wv.vocab) - 1):
min1, min2 = heapq.heappop(heap), heapq.heappop(heap)
heapq.heappush(heap, Vocab(count=min1.count + min2.count, index=i + len(self.wv.vocab), left=min1, right=min2))
# recurse over the tree, assigning a binary code to each vocabulary word
if heap:
max_depth, stack = 0, [(heap[0], [], [])]
while stack:
node, codes, points = stack.pop()
if node.index < len(self.wv.vocab):
# leaf node => store its path from the root
node.code, node.point = codes, points
max_depth = max(len(codes), max_depth)
else:
# inner node => continue recursion
points = array(list(points) + [node.index - len(self.wv.vocab)], dtype=uint32)
stack.append((node.left, array(list(codes) + [0], dtype=uint8), points))
stack.append((node.right, array(list(codes) + [1], dtype=uint8), points))
logger.info("built huffman tree with maximum node depth %i", max_depth)
def build_vocab(self, sentences, keep_raw_vocab=False, trim_rule=None, progress_per=10000, update=False):
"""
Build vocabulary from a sequence of sentences (can be a once-only generator stream).
Each sentence must be a list of unicode strings.
"""
self.scan_vocab(sentences, progress_per=progress_per, trim_rule=trim_rule) # initial survey
self.scale_vocab(keep_raw_vocab=keep_raw_vocab, trim_rule=trim_rule, update=update) # trim by min_count & precalculate downsampling
self.finalize_vocab(update=update) # build tables & arrays
def scan_vocab(self, sentences, progress_per=10000, trim_rule=None):
"""Do an initial scan of all words appearing in sentences."""
logger.info("collecting all words and their counts")
sentence_no = -1
total_words = 0
min_reduce = 1
vocab = defaultdict(int)
checked_string_types = 0
for sentence_no, sentence in enumerate(sentences):
if not checked_string_types:
if isinstance(sentence, string_types):
logger.warn("Each 'sentences' item should be a list of words (usually unicode strings)."
"First item here is instead plain %s.", type(sentence))
checked_string_types += 1
if sentence_no % progress_per == 0:
logger.info("PROGRESS: at sentence #%i, processed %i words, keeping %i word types",
sentence_no, sum(itervalues(vocab)) + total_words, len(vocab))
for word in sentence:
vocab[word] += 1
if self.max_vocab_size and len(vocab) > self.max_vocab_size:
total_words += utils.prune_vocab(vocab, min_reduce, trim_rule=trim_rule)
min_reduce += 1
total_words += sum(itervalues(vocab))
logger.info("collected %i word types from a corpus of %i raw words and %i sentences",
len(vocab), total_words, sentence_no + 1)
self.corpus_count = sentence_no + 1
self.raw_vocab = vocab
def scale_vocab(self, min_count=None, sample=None, dry_run=False, keep_raw_vocab=False, trim_rule=None, update=False):
"""
Apply vocabulary settings for `min_count` (discarding less-frequent words)
and `sample` (controlling the downsampling of more-frequent words).
Calling with `dry_run=True` will only simulate the provided settings and
report the size of the retained vocabulary, effective corpus length, and
estimated memory requirements. Results are both printed via logging and
returned as a dict.
Delete the raw vocabulary after the scaling is done to free up RAM,
unless `keep_raw_vocab` is set.
"""
min_count = min_count or self.min_count
sample = sample or self.sample
drop_total = drop_unique = 0
if not update:
logger.info("Loading a fresh vocabulary")
retain_total, retain_words = 0, []
# Discard words less-frequent than min_count
if not dry_run:
self.wv.index2word = []
# make stored settings match these applied settings
self.min_count = min_count
self.sample = sample
self.wv.vocab = {}
for word, v in iteritems(self.raw_vocab):
if keep_vocab_item(word, v, min_count, trim_rule=trim_rule):
retain_words.append(word)
retain_total += v
if not dry_run:
self.wv.vocab[word] = Vocab(count=v, index=len(self.wv.index2word))
self.wv.index2word.append(word)
else:
drop_unique += 1
drop_total += v
original_unique_total = len(retain_words) + drop_unique
retain_unique_pct = len(retain_words) * 100 / max(original_unique_total, 1)
logger.info("min_count=%d retains %i unique words (%i%% of original %i, drops %i)",
min_count, len(retain_words), retain_unique_pct, original_unique_total, drop_unique)
original_total = retain_total + drop_total
retain_pct = retain_total * 100 / max(original_total, 1)
logger.info("min_count=%d leaves %i word corpus (%i%% of original %i, drops %i)",
min_count, retain_total, retain_pct, original_total, drop_total)
else:
logger.info("Updating model with new vocabulary")
new_total = pre_exist_total = 0
new_words = pre_exist_words = []
for word, v in iteritems(self.raw_vocab):
if keep_vocab_item(word, v, min_count, trim_rule=trim_rule):
if word in self.wv.vocab:
pre_exist_words.append(word)
pre_exist_total += v
if not dry_run:
self.wv.vocab[word].count += v
else:
new_words.append(word)
new_total += v
if not dry_run:
self.wv.vocab[word] = Vocab(count=v, index=len(self.wv.index2word))
self.wv.index2word.append(word)
else:
drop_unique += 1
drop_total += v
original_unique_total = len(pre_exist_words) + len(new_words) + drop_unique
pre_exist_unique_pct = len(pre_exist_words) * 100 / max(original_unique_total, 1)
new_unique_pct = len(new_words) * 100 / max(original_unique_total, 1)
logger.info("""New added %i unique words (%i%% of original %i)
and increased the count of %i pre-existing words (%i%% of original %i)""",
len(new_words), new_unique_pct, original_unique_total,
len(pre_exist_words), pre_exist_unique_pct, original_unique_total)
retain_words = new_words + pre_exist_words
retain_total = new_total + pre_exist_total
# Precalculate each vocabulary item's threshold for sampling
if not sample:
# no words downsampled
threshold_count = retain_total
elif sample < 1.0:
# traditional meaning: set parameter as proportion of total
threshold_count = sample * retain_total
else:
# new shorthand: sample >= 1 means downsample all words with higher count than sample
threshold_count = int(sample * (3 + sqrt(5)) / 2)
downsample_total, downsample_unique = 0, 0
for w in retain_words:
v = self.raw_vocab[w]
word_probability = (sqrt(v / threshold_count) + 1) * (threshold_count / v)
if word_probability < 1.0:
downsample_unique += 1
downsample_total += word_probability * v
else:
word_probability = 1.0
downsample_total += v
if not dry_run:
self.wv.vocab[w].sample_int = int(round(word_probability * 2**32))
if not dry_run and not keep_raw_vocab:
logger.info("deleting the raw counts dictionary of %i items", len(self.raw_vocab))
self.raw_vocab = defaultdict(int)
logger.info("sample=%g downsamples %i most-common words", sample, downsample_unique)
logger.info("downsampling leaves estimated %i word corpus (%.1f%% of prior %i)",
downsample_total, downsample_total * 100.0 / max(retain_total, 1), retain_total)
# return from each step: words-affected, resulting-corpus-size
report_values = {'drop_unique': drop_unique, 'retain_total': retain_total,
'downsample_unique': downsample_unique, 'downsample_total': int(downsample_total)}
# print extra memory estimates
report_values['memory'] = self.estimate_memory(vocab_size=len(retain_words))
return report_values
def finalize_vocab(self, update=False):
"""Build tables and model weights based on final vocabulary settings."""
if not self.wv.index2word:
self.scale_vocab()
if self.sorted_vocab and not update:
self.sort_vocab()
if self.hs:
# add info about each word's Huffman encoding
self.create_binary_tree()
if self.negative:
# build the table for drawing random words (for negative sampling)
self.make_cum_table()
if self.null_word:
# create null pseudo-word for padding when using concatenative L1 (run-of-words)
# this word is only ever input – never predicted – so count, huffman-point, etc doesn't matter
word, v = '\0', Vocab(count=1, sample_int=0)
v.index = len(self.wv.vocab)
self.wv.index2word.append(word)
self.wv.vocab[word] = v
# set initial input/projection and hidden weights
if not update:
self.reset_weights()
else:
self.update_weights()
def sort_vocab(self):
"""Sort the vocabulary so the most frequent words have the lowest indexes."""
if len(self.wv.syn0):
raise RuntimeError("must sort before initializing vectors/weights")
self.wv.index2word.sort(key=lambda word: self.wv.vocab[word].count, reverse=True)
for i, word in enumerate(self.wv.index2word):
self.wv.vocab[word].index = i
def reset_from(self, other_model):
"""
Borrow shareable pre-built structures (like vocab) from the other_model. Useful
if testing multiple models in parallel on the same corpus.
"""
self.wv.vocab = other_model.vocab
self.wv.index2word = other_model.index2word
self.cum_table = other_model.cum_table
self.corpus_count = other_model.corpus_count
self.reset_weights()
def _do_train_job(self, sentences, alpha, inits):
"""
Train a single batch of sentences. Return 2-tuple `(effective word count after
ignoring unknown words and sentence length trimming, total word count)`.
"""
work, neu1 = inits
tally = 0
if self.sg:
tally += train_batch_sg(self, sentences, alpha, work)
else:
tally += train_batch_cbow(self, sentences, alpha, work, neu1)
return tally, self._raw_word_count(sentences)
def _raw_word_count(self, job):
"""Return the number of words in a given job."""
return sum(len(sentence) for sentence in job)
def train(self, sentences, total_words=None, word_count=0,
total_examples=None, queue_factor=2, report_delay=1.0):
"""
Update the model's neural weights from a sequence of sentences (can be a once-only generator stream).
For Word2Vec, each sentence must be a list of unicode strings. (Subclasses may accept other examples.)
To support linear learning-rate decay from (initial) alpha to min_alpha, either total_examples
(count of sentences) or total_words (count of raw words in sentences) should be provided, unless the
sentences are the same as those that were used to initially build the vocabulary.
"""
if (self.model_trimmed_post_training):
raise RuntimeError("Parameters for training were discarded using model_trimmed_post_training method")
if FAST_VERSION < 0:
import warnings
warnings.warn("C extension not loaded for Word2Vec, training will be slow. "
"Install a C compiler and reinstall gensim for fast training.")
self.neg_labels = []
if self.negative > 0:
# precompute negative labels optimization for pure-python training
self.neg_labels = zeros(self.negative + 1)
self.neg_labels[0] = 1.
logger.info(
"training model with %i workers on %i vocabulary and %i features, "
"using sg=%s hs=%s sample=%s negative=%s window=%s",
self.workers, len(self.wv.vocab), self.layer1_size, self.sg,
self.hs, self.sample, self.negative, self.window)
if not self.wv.vocab:
raise RuntimeError("you must first build vocabulary before training the model")
if not len(self.wv.syn0):
raise RuntimeError("you must first finalize vocabulary before training the model")
if not hasattr(self, 'corpus_count'):
raise ValueError(
"The number of sentences in the training corpus is missing. Did you load the model via KeyedVectors.load_word2vec_format?"
"Models loaded via load_word2vec_format don't support further training. "
"Instead start with a blank model, scan_vocab on the new corpus, intersect_word2vec_format with the old model, then train.")
if total_words is None and total_examples is None:
if self.corpus_count:
total_examples = self.corpus_count
logger.info("expecting %i sentences, matching count from corpus used for vocabulary survey", total_examples)
else:
raise ValueError("you must provide either total_words or total_examples, to enable alpha and progress calculations")
job_tally = 0
if self.iter > 1:
sentences = utils.RepeatCorpusNTimes(sentences, self.iter)
total_words = total_words and total_words * self.iter
total_examples = total_examples and total_examples * self.iter
def worker_loop():
"""Train the model, lifting lists of sentences from the job_queue."""
work = matutils.zeros_aligned(self.layer1_size, dtype=REAL) # per-thread private work memory
neu1 = matutils.zeros_aligned(self.layer1_size, dtype=REAL)
jobs_processed = 0
while True:
job = job_queue.get()
if job is None:
progress_queue.put(None)
break # no more jobs => quit this worker
sentences, alpha = job
tally, raw_tally = self._do_train_job(sentences, alpha, (work, neu1))
progress_queue.put((len(sentences), tally, raw_tally)) # report back progress
jobs_processed += 1
logger.debug("worker exiting, processed %i jobs", jobs_processed)
def job_producer():
"""Fill jobs queue using the input `sentences` iterator."""
job_batch, batch_size = [], 0
pushed_words, pushed_examples = 0, 0
next_alpha = self.alpha
if next_alpha > self.min_alpha_yet_reached:
logger.warn("Effective 'alpha' higher than previous training cycles")
self.min_alpha_yet_reached = next_alpha
job_no = 0
for sent_idx, sentence in enumerate(sentences):
sentence_length = self._raw_word_count([sentence])
# can we fit this sentence into the existing job batch?
if batch_size + sentence_length <= self.batch_words:
# yes => add it to the current job
job_batch.append(sentence)
batch_size += sentence_length
else:
# no => submit the existing job
logger.debug(
"queueing job #%i (%i words, %i sentences) at alpha %.05f",
job_no, batch_size, len(job_batch), next_alpha)
job_no += 1
job_queue.put((job_batch, next_alpha))
# update the learning rate for the next job
if self.min_alpha < next_alpha:
if total_examples:
# examples-based decay
pushed_examples += len(job_batch)
progress = 1.0 * pushed_examples / total_examples
else:
# words-based decay
pushed_words += self._raw_word_count(job_batch)
progress = 1.0 * pushed_words / total_words
next_alpha = self.alpha - (self.alpha - self.min_alpha) * progress
next_alpha = max(self.min_alpha, next_alpha)
# add the sentence that didn't fit as the first item of a new job
job_batch, batch_size = [sentence], sentence_length
# add the last job too (may be significantly smaller than batch_words)
if job_batch:
logger.debug(
"queueing job #%i (%i words, %i sentences) at alpha %.05f",
job_no, batch_size, len(job_batch), next_alpha)
job_no += 1
job_queue.put((job_batch, next_alpha))
if job_no == 0 and self.train_count == 0:
logger.warning(
"train() called with an empty iterator (if not intended, "
"be sure to provide a corpus that offers restartable "
"iteration = an iterable)."
)
# give the workers heads up that they can finish -- no more work!
for _ in xrange(self.workers):
job_queue.put(None)
logger.debug("job loop exiting, total %i jobs", job_no)
# buffer ahead only a limited number of jobs.. this is the reason we can't simply use ThreadPool :(
job_queue = Queue(maxsize=queue_factor * self.workers)
progress_queue = Queue(maxsize=(queue_factor + 1) * self.workers)
workers = [threading.Thread(target=worker_loop) for _ in xrange(self.workers)]
unfinished_worker_count = len(workers)
workers.append(threading.Thread(target=job_producer))
for thread in workers:
thread.daemon = True # make interrupting the process with ctrl+c easier
thread.start()
example_count, trained_word_count, raw_word_count = 0, 0, word_count
start, next_report = default_timer() - 0.00001, 1.0
while unfinished_worker_count > 0:
report = progress_queue.get() # blocks if workers too slow
if report is None: # a thread reporting that it finished
unfinished_worker_count -= 1
logger.info("worker thread finished; awaiting finish of %i more threads", unfinished_worker_count)
continue
examples, trained_words, raw_words = report
job_tally += 1
# update progress stats
example_count += examples
trained_word_count += trained_words # only words in vocab & sampled
raw_word_count += raw_words
# log progress once every report_delay seconds
elapsed = default_timer() - start
if elapsed >= next_report:
if total_examples:
# examples-based progress %
logger.info(
"PROGRESS: at %.2f%% examples, %.0f words/s, in_qsize %i, out_qsize %i",
100.0 * example_count / total_examples, trained_word_count / elapsed,
utils.qsize(job_queue), utils.qsize(progress_queue))
else:
# words-based progress %
logger.info(
"PROGRESS: at %.2f%% words, %.0f words/s, in_qsize %i, out_qsize %i",
100.0 * raw_word_count / total_words, trained_word_count / elapsed,
utils.qsize(job_queue), utils.qsize(progress_queue))
next_report = elapsed + report_delay
# all done; report the final stats
elapsed = default_timer() - start
logger.info(
"training on %i raw words (%i effective words) took %.1fs, %.0f effective words/s",
raw_word_count, trained_word_count, elapsed, trained_word_count / elapsed)
if job_tally < 10 * self.workers:
logger.warn("under 10 jobs per worker: consider setting a smaller `batch_words' for smoother alpha decay")
# check that the input corpus hasn't changed during iteration
if total_examples and total_examples != example_count:
logger.warn("supplied example count (%i) did not equal expected count (%i)", example_count, total_examples)
if total_words and total_words != raw_word_count:
logger.warn("supplied raw word count (%i) did not equal expected count (%i)", raw_word_count, total_words)
self.train_count += 1 # number of times train() has been called
self.total_train_time += elapsed
self.clear_sims()
return trained_word_count
# basics copied from the train() function
def score(self, sentences, total_sentences=int(1e6), chunksize=100, queue_factor=2, report_delay=1):
"""
Score the log probability for a sequence of sentences (can be a once-only generator stream).
Each sentence must be a list of unicode strings.
This does not change the fitted model in any way (see Word2Vec.train() for that).
We have currently only implemented score for the hierarchical softmax scheme,
so you need to have run word2vec with hs=1 and negative=0 for this to work.
Note that you should specify total_sentences; we'll run into problems if you ask to
score more than this number of sentences but it is inefficient to set the value too high.
See the article by [taddy]_ and the gensim demo at [deepir]_ for examples of how to use such scores in document classification.
.. [taddy] Taddy, Matt. Document Classification by Inversion of Distributed Language Representations, in Proceedings of the 2015 Conference of the Association of Computational Linguistics.
.. [deepir] https://github.com/piskvorky/gensim/blob/develop/docs/notebooks/deepir.ipynb
"""
if FAST_VERSION < 0:
import warnings
warnings.warn("C extension compilation failed, scoring will be slow. "
"Install a C compiler and reinstall gensim for fastness.")
logger.info(
"scoring sentences with %i workers on %i vocabulary and %i features, "
"using sg=%s hs=%s sample=%s and negative=%s",
self.workers, len(self.wv.vocab), self.layer1_size, self.sg, self.hs, self.sample, self.negative)
if not self.wv.vocab:
raise RuntimeError("you must first build vocabulary before scoring new data")
if not self.hs:
raise RuntimeError("We have currently only implemented score \
for the hierarchical softmax scheme, so you need to have \
run word2vec with hs=1 and negative=0 for this to work.")
def worker_loop():
"""Train the model, lifting lists of sentences from the jobs queue."""
work = zeros(1, dtype=REAL) # for sg hs, we actually only need one memory loc (running sum)
neu1 = matutils.zeros_aligned(self.layer1_size, dtype=REAL)
while True:
job = job_queue.get()
if job is None: # signal to finish
break
ns = 0
for sentence_id, sentence in job:
if sentence_id >= total_sentences:
break
if self.sg:
score = score_sentence_sg(self, sentence, work)
else:
score = score_sentence_cbow(self, sentence, work, neu1)
sentence_scores[sentence_id] = score
ns += 1
progress_queue.put(ns) # report progress
start, next_report = default_timer(), 1.0
# buffer ahead only a limited number of jobs.. this is the reason we can't simply use ThreadPool :(
job_queue = Queue(maxsize=queue_factor * self.workers)
progress_queue = Queue(maxsize=(queue_factor + 1) * self.workers)
workers = [threading.Thread(target=worker_loop) for _ in xrange(self.workers)]
for thread in workers:
thread.daemon = True # make interrupting the process with ctrl+c easier
thread.start()
sentence_count = 0
sentence_scores = matutils.zeros_aligned(total_sentences, dtype=REAL)
push_done = False
done_jobs = 0
jobs_source = enumerate(utils.grouper(enumerate(sentences), chunksize))
# fill jobs queue with (id, sentence) job items
while True:
try:
job_no, items = next(jobs_source)
if (job_no - 1) * chunksize > total_sentences:
logger.warning(
"terminating after %i sentences (set higher total_sentences if you want more).",
total_sentences)
job_no -= 1
raise StopIteration()
logger.debug("putting job #%i in the queue", job_no)
job_queue.put(items)
except StopIteration:
logger.info(
"reached end of input; waiting to finish %i outstanding jobs",
job_no - done_jobs + 1)
for _ in xrange(self.workers):
job_queue.put(None) # give the workers heads up that they can finish -- no more work!
push_done = True
try:
while done_jobs < (job_no + 1) or not push_done:
ns = progress_queue.get(push_done) # only block after all jobs pushed
sentence_count += ns
done_jobs += 1
elapsed = default_timer() - start
if elapsed >= next_report:
logger.info(
"PROGRESS: at %.2f%% sentences, %.0f sentences/s",
100.0 * sentence_count, sentence_count / elapsed)
next_report = elapsed + report_delay # don't flood log, wait report_delay seconds
else:
# loop ended by job count; really done
break
except Empty:
pass # already out of loop; continue to next push
elapsed = default_timer() - start
self.clear_sims()
logger.info(
"scoring %i sentences took %.1fs, %.0f sentences/s",
sentence_count, elapsed, sentence_count / elapsed)
return sentence_scores[:sentence_count]
def clear_sims(self):
self.wv.syn0norm = None
def update_weights(self):
"""
Copy all the existing weights, and reset the weights for the newly
added vocabulary.
"""
logger.info("updating layer weights")
gained_vocab = len(self.wv.vocab) - len(self.wv.syn0)
newsyn0 = empty((gained_vocab, self.vector_size), dtype=REAL)
# randomize the remaining words
for i in xrange(len(self.wv.syn0), len(self.wv.vocab)):
# construct deterministic seed from word AND seed argument
newsyn0[i-len(self.wv.syn0)] = self.seeded_vector(self.wv.index2word[i] + str(self.seed))
self.wv.syn0 = vstack([self.wv.syn0, newsyn0])
if self.hs:
self.syn1 = vstack([self.syn1, zeros((gained_vocab, self.layer1_size), dtype=REAL)])
if self.negative:
self.syn1neg = vstack([self.syn1neg, zeros((gained_vocab, self.layer1_size), dtype=REAL)])
self.wv.syn0norm = None
# do not suppress learning for already learned words
self.syn0_lockf = ones(len(self.wv.vocab), dtype=REAL) # zeros suppress learning
def reset_weights(self):
"""Reset all projection weights to an initial (untrained) state, but keep the existing vocabulary."""
logger.info("resetting layer weights")
self.wv.syn0 = empty((len(self.wv.vocab), self.vector_size), dtype=REAL)
# randomize weights vector by vector, rather than materializing a huge random matrix in RAM at once
for i in xrange(len(self.wv.vocab)):
# construct deterministic seed from word AND seed argument
self.wv.syn0[i] = self.seeded_vector(self.wv.index2word[i] + str(self.seed))
if self.hs:
self.syn1 = zeros((len(self.wv.vocab), self.layer1_size), dtype=REAL)
if self.negative:
self.syn1neg = zeros((len(self.wv.vocab), self.layer1_size), dtype=REAL)
self.wv.syn0norm = None
self.syn0_lockf = ones(len(self.wv.vocab), dtype=REAL) # zeros suppress learning
def seeded_vector(self, seed_string):
"""Create one 'random' vector (but deterministic by seed_string)"""
# Note: built-in hash() may vary by Python version or even (in Py3.x) per launch
once = random.RandomState(self.hashfxn(seed_string) & 0xffffffff)
return (once.rand(self.vector_size) - 0.5) / self.vector_size
def intersect_word2vec_format(self, fname, lockf=0.0, binary=False, encoding='utf8', unicode_errors='strict'):
"""
Merge the input-hidden weight matrix from the original C word2vec-tool format
given, where it intersects with the current vocabulary. (No words are added to the
existing vocabulary, but intersecting words adopt the file's weights, and
non-intersecting words are left alone.)
`binary` is a boolean indicating whether the data is in binary word2vec format.
`lockf` is a lock-factor value to be set for any imported word-vectors; the
default value of 0.0 prevents further updating of the vector during subsequent
training. Use 1.0 to allow further training updates of merged vectors.
"""
overlap_count = 0
logger.info("loading projection weights from %s" % (fname))
with utils.smart_open(fname) as fin:
header = utils.to_unicode(fin.readline(), encoding=encoding)
vocab_size, vector_size = map(int, header.split()) # throws for invalid file format
if not vector_size == self.vector_size:
raise ValueError("incompatible vector size %d in file %s" % (vector_size, fname))
# TOCONSIDER: maybe mismatched vectors still useful enough to merge (truncating/padding)?
if binary:
binary_len = dtype(REAL).itemsize * vector_size
for line_no in xrange(vocab_size):
# mixed text and binary: read text first, then binary
word = []
while True:
ch = fin.read(1)
if ch == b' ':
break
if ch != b'\n': # ignore newlines in front of words (some binary files have)
word.append(ch)
word = utils.to_unicode(b''.join(word), encoding=encoding, errors=unicode_errors)
weights = fromstring(fin.read(binary_len), dtype=REAL)
if word in self.wv.vocab:
overlap_count += 1
self.wv.syn0[self.wv.vocab[word].index] = weights
self.syn0_lockf[self.wv.vocab[word].index] = lockf # lock-factor: 0.0 stops further changes
else:
for line_no, line in enumerate(fin):
parts = utils.to_unicode(line.rstrip(), encoding=encoding, errors=unicode_errors).split(" ")
if len(parts) != vector_size + 1:
raise ValueError("invalid vector on line %s (is this really the text format?)" % (line_no))
word, weights = parts[0], list(map(REAL, parts[1:]))
if word in self.wv.vocab:
overlap_count += 1
self.wv.syn0[self.wv.vocab[word].index] = weights
logger.info("merged %d vectors into %s matrix from %s" % (overlap_count, self.wv.syn0.shape, fname))
def most_similar(self, positive=[], negative=[], topn=10, restrict_vocab=None, indexer=None):
return self.wv.most_similar(positive, negative, topn, restrict_vocab, indexer)
def wmdistance(self, document1, document2):
return self.wv.wmdistance(document1, document2)
def most_similar_cosmul(self, positive=[], negative=[], topn=10):
return self.wv.most_similar_cosmul(positive, negative, topn)
def similar_by_word(self, word, topn=10, restrict_vocab=None):
return self.wv.similar_by_word(word, topn, restrict_vocab)
def similar_by_vector(self, vector, topn=10, restrict_vocab=None):
return self.wv.similar_by_vector(vector, topn, restrict_vocab)
def doesnt_match(self, words):
return self.wv.doesnt_match(words)
def __getitem__(self, words):
return self.wv.__getitem__(words)
def __contains__(self, word):
return self.wv.__contains__(word)
def similarity(self, w1, w2):
return self.wv.similarity(w1, w2)
def n_similarity(self, ws1, ws2):
return self.wv.n_similarity(ws1, ws2)
def init_sims(self, replace=False):
"""
init_sims() resides in KeyedVectors because it deals with syn0 mainly, but because syn1 is not an attribute
of KeyedVectors, it has to be deleted in this class, and the normalizing of syn0 happens inside of KeyedVectors
"""
if replace and hasattr(self, 'syn1'):
del self.syn1
return self.wv.init_sims(replace)
def estimate_memory(self, vocab_size=None, report=None):
"""Estimate required memory for a model using current settings and provided vocabulary size."""
vocab_size = vocab_size or len(self.wv.vocab)
report = report or {}
report['vocab'] = vocab_size * (700 if self.hs else 500)
report['syn0'] = vocab_size * self.vector_size * dtype(REAL).itemsize
if self.hs:
report['syn1'] = vocab_size * self.layer1_size * dtype(REAL).itemsize
if self.negative:
report['syn1neg'] = vocab_size * self.layer1_size * dtype(REAL).itemsize
report['total'] = sum(report.values())
logger.info("estimated required memory for %i words and %i dimensions: %i bytes",
vocab_size, self.vector_size, report['total'])
return report
@staticmethod
def log_accuracy(section):
return KeyedVectors.log_accuracy(section)
def accuracy(self, questions, restrict_vocab=30000, most_similar=None, case_insensitive=True):
most_similar = most_similar or KeyedVectors.most_similar
return self.wv.accuracy(questions, restrict_vocab, most_similar, case_insensitive)
@staticmethod
def log_evaluate_word_pairs(pearson, spearman, oov, pairs):
return KeyedVectors.log_evaluate_word_pairs(pearson, spearman, oov, pairs)
def evaluate_word_pairs(self, pairs, delimiter='\t', restrict_vocab=300000, case_insensitive=True, dummy4unknown=False):
return self.wv.evaluate_word_pairs(pairs, delimiter, restrict_vocab, case_insensitive, dummy4unknown)
def __str__(self):
return "%s(vocab=%s, size=%s, alpha=%s)" % (self.__class__.__name__, len(self.wv.index2word), self.vector_size, self.alpha)
def _minimize_model(self, save_syn1 = False, save_syn1neg = False, save_syn0_lockf = False):
if hasattr(self, 'syn1') and not save_syn1:
del self.syn1
if hasattr(self, 'syn1neg') and not save_syn1neg:
del self.syn1neg
if hasattr(self, 'syn0_lockf') and not save_syn0_lockf:
del self.syn0_lockf
self.model_trimmed_post_training = True
def delete_temporary_training_data(self, replace_word_vectors_with_normalized=False):
"""
Discard parameters that are used in training and score. Use if you're sure you're done training a model.
If `replace_word_vectors_with_normalized` is set, forget the original vectors and only keep the normalized
ones = saves lots of memory!
"""
if replace_word_vectors_with_normalized:
self.init_sims(replace=True)
self._minimize_model()
def save(self, *args, **kwargs):
# don't bother storing the cached normalized vectors, recalculable table
kwargs['ignore'] = kwargs.get('ignore', ['syn0norm', 'table', 'cum_table'])
super(Word2Vec, self).save(*args, **kwargs)
save.__doc__ = utils.SaveLoad.save.__doc__
@classmethod
def load(cls, *args, **kwargs):
model = super(Word2Vec, cls).load(*args, **kwargs)
# update older models
if hasattr(model, 'table'):
delattr(model, 'table') # discard in favor of cum_table
if model.negative and hasattr(model.wv, 'index2word'):
model.make_cum_table() # rebuild cum_table from vocabulary
if not hasattr(model, 'corpus_count'):
model.corpus_count = None
for v in model.wv.vocab.values():
if hasattr(v, 'sample_int'):
break # already 0.12.0+ style int probabilities
elif hasattr(v, 'sample_probability'):
v.sample_int = int(round(v.sample_probability * 2**32))
del v.sample_probability
if not hasattr(model, 'syn0_lockf') and hasattr(model, 'syn0'):
model.syn0_lockf = ones(len(model.wv.syn0), dtype=REAL)
if not hasattr(model, 'random'):
model.random = random.RandomState(model.seed)
if not hasattr(model, 'train_count'):
model.train_count = 0
model.total_train_time = 0
return model
def _load_specials(self, *args, **kwargs):
super(Word2Vec, self)._load_specials(*args, **kwargs)
# loading from a pre-KeyedVectors word2vec model
if not hasattr(self, 'wv'):
wv = KeyedVectors()
wv.syn0 = self.__dict__.get('syn0', [])
wv.syn0norm = self.__dict__.get('syn0norm', None)
wv.vocab = self.__dict__.get('vocab', {})
wv.index2word = self.__dict__.get('index2word', [])
self.wv = wv
@classmethod
def load_word2vec_format(cls, fname, fvocab=None, binary=False, encoding='utf8', unicode_errors='strict',
limit=None, datatype=REAL):
"""Deprecated. Use gensim.models.KeyedVectors.load_word2vec_format instead."""
raise DeprecationWarning("Deprecated. Use gensim.models.KeyedVectors.load_word2vec_format instead.")
def save_word2vec_format(self, fname, fvocab=None, binary=False):
"""Deprecated. Use model.wv.save_word2vec_format instead."""
raise DeprecationWarning("Deprecated. Use model.wv.save_word2vec_format instead.")
class BrownCorpus(object):
"""Iterate over sentences from the Brown corpus (part of NLTK data)."""
def __init__(self, dirname):
self.dirname = dirname
def __iter__(self):
for fname in os.listdir(self.dirname):
fname = os.path.join(self.dirname, fname)
if not os.path.isfile(fname):
continue
for line in utils.smart_open(fname):
line = utils.to_unicode(line)
# each file line is a single sentence in the Brown corpus
# each token is WORD/POS_TAG
token_tags = [t.split('/') for t in line.split() if len(t.split('/')) == 2]
# ignore words with non-alphabetic tags like ",", "!" etc (punctuation, weird stuff)
words = ["%s/%s" % (token.lower(), tag[:2]) for token, tag in token_tags if tag[:2].isalpha()]
if not words: # don't bother sending out empty sentences
continue
yield words
class Text8Corpus(object):
"""Iterate over sentences from the "text8" corpus, unzipped from http://mattmahoney.net/dc/text8.zip ."""
def __init__(self, fname, max_sentence_length=MAX_WORDS_IN_BATCH):
self.fname = fname
self.max_sentence_length = max_sentence_length
def __iter__(self):
# the entire corpus is one gigantic line -- there are no sentence marks at all
# so just split the sequence of tokens arbitrarily: 1 sentence = 1000 tokens
sentence, rest = [], b''
with utils.smart_open(self.fname) as fin:
while True:
text = rest + fin.read(8192) # avoid loading the entire file (=1 line) into RAM
if text == rest: # EOF
words = utils.to_unicode(text).split()
sentence.extend(words) # return the last chunk of words, too (may be shorter/longer)
if sentence:
yield sentence
break
last_token = text.rfind(b' ') # last token may have been split in two... keep for next iteration
words, rest = (utils.to_unicode(text[:last_token]).split(),
text[last_token:].strip()) if last_token >= 0 else ([], text)
sentence.extend(words)
while len(sentence) >= self.max_sentence_length:
yield sentence[:self.max_sentence_length]
sentence = sentence[self.max_sentence_length:]
class LineSentence(object):
"""
Simple format: one sentence = one line; words already preprocessed and separated by whitespace.
"""
def __init__(self, source, max_sentence_length=MAX_WORDS_IN_BATCH, limit=None):
"""
`source` can be either a string or a file object. Clip the file to the first
`limit` lines (or no clipped if limit is None, the default).
Example::
sentences = LineSentence('myfile.txt')
Or for compressed files::
sentences = LineSentence('compressed_text.txt.bz2')
sentences = LineSentence('compressed_text.txt.gz')
"""
self.source = source
self.max_sentence_length = max_sentence_length
self.limit = limit
def __iter__(self):
"""Iterate through the lines in the source."""
try:
# Assume it is a file-like object and try treating it as such
# Things that don't have seek will trigger an exception
self.source.seek(0)
for line in itertools.islice(self.source, self.limit):
line = utils.to_unicode(line).split()
i = 0
while i < len(line):
yield line[i : i + self.max_sentence_length]
i += self.max_sentence_length
except AttributeError:
# If it didn't work like a file, use it as a string filename
with utils.smart_open(self.source) as fin:
for line in itertools.islice(fin, self.limit):
line = utils.to_unicode(line).split()
i = 0
while i < len(line):
yield line[i : i + self.max_sentence_length]
i += self.max_sentence_length
# Example: ./word2vec.py -train data.txt -output vec.txt -size 200 -window 5 -sample 1e-4 -negative 5 -hs 0 -binary 0 -cbow 1 -iter 3
if __name__ == "__main__":
import argparse
logging.basicConfig(
format='%(asctime)s : %(threadName)s : %(levelname)s : %(message)s',
level=logging.INFO)
logging.info("running %s", " ".join(sys.argv))
logging.info("using optimization %s", FAST_VERSION)
# check and process cmdline input
program = os.path.basename(sys.argv[0])
if len(sys.argv) < 2:
print(globals()['__doc__'] % locals())
sys.exit(1)
from gensim.models.word2vec import Word2Vec # avoid referencing __main__ in pickle
seterr(all='raise') # don't ignore numpy errors
parser = argparse.ArgumentParser()
parser.add_argument("-train", help="Use text data from file TRAIN to train the model", required=True)
parser.add_argument("-output", help="Use file OUTPUT to save the resulting word vectors")
parser.add_argument("-window", help="Set max skip length WINDOW between words; default is 5", type=int, default=5)
parser.add_argument("-size", help="Set size of word vectors; default is 100", type=int, default=100)
parser.add_argument("-sample", help="Set threshold for occurrence of words. Those that appear with higher frequency in the training data will be randomly down-sampled; default is 1e-3, useful range is (0, 1e-5)", type=float, default=1e-3)
parser.add_argument("-hs", help="Use Hierarchical Softmax; default is 0 (not used)", type=int, default=0, choices=[0, 1])
parser.add_argument("-negative", help="Number of negative examples; default is 5, common values are 3 - 10 (0 = not used)", type=int, default=5)
parser.add_argument("-threads", help="Use THREADS threads (default 12)", type=int, default=12)
parser.add_argument("-iter", help="Run more training iterations (default 5)", type=int, default=5)
parser.add_argument("-min_count", help="This will discard words that appear less than MIN_COUNT times; default is 5", type=int, default=5)
parser.add_argument("-cbow", help="Use the continuous bag of words model; default is 1 (use 0 for skip-gram model)", type=int, default=1, choices=[0, 1])
parser.add_argument("-binary", help="Save the resulting vectors in binary mode; default is 0 (off)", type=int, default=0, choices=[0, 1])
parser.add_argument("-accuracy", help="Use questions from file ACCURACY to evaluate the model")
args = parser.parse_args()
if args.cbow == 0:
skipgram = 1
else:
skipgram = 0
corpus = LineSentence(args.train)
model = Word2Vec(
corpus, size=args.size, min_count=args.min_count, workers=args.threads,
window=args.window, sample=args.sample, sg=skipgram, hs=args.hs,
negative=args.negative, cbow_mean=1, iter=args.iter)
if args.output:
outfile = args.output
model.wv.save_word2vec_format(outfile, binary=args.binary)
else:
outfile = args.train
model.save(outfile + '.model')
if args.binary == 1:
model.wv.save_word2vec_format(outfile + '.model.bin', binary=True)
else:
model.wv.save_word2vec_format(outfile + '.model.txt', binary=False)
if args.accuracy:
model.accuracy(args.accuracy)
logger.info("finished running %s", program)
|
duyet-website/api.duyet.net
|
lib/gensim/models/word2vec.py
|
Python
|
mit
| 71,168
|
[
"VisIt"
] |
f656a5c6f29c7de5395367b7ed60798a05bcf37542455c4e19026e5b32b76e59
|
#
# Copyright (C) 2001-2008 greg Landrum
#
""" unit testing code for the Doctests
"""
import doctest
import unittest
from rdkit.ML.Data import SplitData, DataUtils
from rdkit.TestRunner import redirect_stdout
from io import StringIO
def load_tests(loader, tests, ignore):
""" Add the Doctests from the module """
tests.addTests(doctest.DocTestSuite(SplitData, optionflags=doctest.ELLIPSIS))
tests.addTests(doctest.DocTestSuite(DataUtils, optionflags=doctest.ELLIPSIS))
return tests
class TestCaseSplitData(unittest.TestCase):
def test_exceptions(self):
self.assertRaises(ValueError, SplitData.SplitIndices, 10, -0.1)
self.assertRaises(ValueError, SplitData.SplitIndices, 10, 1.1)
f = StringIO()
with redirect_stdout(f):
SplitData.SplitIndices(10, 0.5, replacement=True, silent=False)
s = f.getvalue()
self.assertIn('Training', s)
self.assertIn('hold-out', s)
def test_SplitData(self):
self.assertRaises(ValueError, SplitData.SplitDataSet, None, -1.1)
self.assertRaises(ValueError, SplitData.SplitDataSet, None, 1.1)
data = list(range(10))
DataUtils.InitRandomNumbers((23, 42))
f = StringIO()
with redirect_stdout(f):
result = SplitData.SplitDataSet(data, 0.5)
self.assertEqual(set(result[0]).intersection(result[1]), set())
self.assertEqual(len(result[0]), 5)
s = f.getvalue()
self.assertIn('Training', s)
self.assertIn('hold-out', s)
if __name__ == '__main__': # pragma: nocover
unittest.main()
|
greglandrum/rdkit
|
rdkit/ML/Data/UnitTestDoctests.py
|
Python
|
bsd-3-clause
| 1,607
|
[
"RDKit"
] |
48aeed741905a8463d8e5411f854876ae5f47569d24cadf6cc3dca379a798c91
|
"""Define all units and constants
"""
from unties.units_group import UnitsGroup
UnitsGroup._locals = globals()
# I don't want to import math just for this...
pi = 3.14159265358979323846264338327950288419716939937510
UnitsGroup.add_prefixes({
# 'Y': [10**24, 'Yotta'],
# 'Z': [10**21, 'Zetta'],
# 'E': [10**18, 'Exa'],
# 'P': [10**15, 'Peta'],
# 'T': [10**12, 'Tera'],
'G': [10**9, 'Giga'],
'M': [10**6, 'Mega'],
'k': [10**3, 'Kilo'],
'h': [10**2, 'Hecto'],
'da': [10**1, 'Deca'],
'd': [10**-1, 'Deci'],
'c': [10**-2, 'Centi'],
'm': [10**-3, 'Milli'],
'u': [10**-6, 'Micro'],
'n': [10**-9, 'Nano'],
'p': [10**-12, 'Pico'],
'f': [10**-15, 'Femto'],
# 'a': [10**-18, 'Atto'],
# 'z': [10**-21, 'Zepto'],
# 'y': [10**-24, 'Yocto'],
})
# Initialize Base Units #
#########################
UnitsGroup.base('m', 'Meter')
UnitsGroup.base('kg', 'Kilogram', prefix=False)
UnitsGroup.base('s', 'Second')
UnitsGroup.base('A', 'Ampere')
UnitsGroup.base('K', 'Kelvin')
UnitsGroup.base('cd', 'Candela')
UnitsGroup.base('mol', 'Mole')
# Initialize Derived Units #
############################
# Official SI derived units
(m / m).derived('rad', 'Radian', _manual_quantity='angle')
(s**-1).derived('Hz', 'Hertz')
(kg * m / s**2).derived('N', 'Newton')
(N / m**2).derived('Pa', 'Pascal')
(N * m).derived('J', 'Joule')
(J / s).derived('W', 'Watt')
(s * A).derived('C', 'Coulomb')
(W / A).derived('V', 'Volt')
(C / V).derived('F', 'Farad')
(V / A).derived('ohm', 'Ohm')
(1 / ohm).derived('S', 'Siemen')
(J / A).derived('Wb', 'Weber')
(V * s / m**2).derived('T', 'Tesla')
(V * s / A).derived('H', 'Henry')
# Unofficial Units
(1000*mol/m**3).derived('M', 'Molar')
# Initialize Conversion Units #
###############################
# Conversions for m:
m.conversion('Ang', 'Angstrom', 10**-10)
m.conversion('au', 'Astronomical Unit', 149597900000)
m.conversion('pc', 'Parsec', 3.08567758149137*10**16)
m.conversion('fath', 'Fathom', 1.8288)
m.conversion('inch', 'Inch', 0.0254)
m.conversion('ft', 'Foot', 0.3048)
m.conversion('yd', 'Yard', 0.9144)
m.conversion('mi', 'Mile', 1609.344)
m.conversion('fur', 'Furlong', 201.168)
m.conversion('ltyr', 'Lightyear', 9.46052840488*10**15)
m.conversion('nmi', 'Nautical Mile', 1852)
m.conversion('rod', 'Rod', 5.0292)
# Conversions for m**2:
(m**2).conversion('acre', 'Acre', 4046.8564224)
(m**2).conversion('ha', 'Hectare', 10**4)
# Conversions for m**3:
(m**3).conversion('cup', 'Cup', 2.365882365*10**-4)
(m**3).conversion('floz', 'Fluid ounce', 2.95735295625*10**-5)
(m**3).conversion('flozUK', 'British fluid ounce', 2.84130625*1**-5)
(m**3).conversion('gal', 'Gallon', 0.003785411784)
(m**3).conversion('galUK', 'British gallon', 0.00454609)
(m**3).conversion('l', 'Liter', 0.001, prefix=True)
(m**3).conversion('pt', 'Pint', 4.73176473*10**-4)
(m**3).conversion('qt', 'Quart', 9.46352946*10**-4)
(m**3).conversion('tbsp', 'Tablespoon', 1.47867647813*10**-5)
(m**3).conversion('tsp', 'Teaspoon', 4.92892159375*10**-6)
# Conversions for m/s:
(m/s).conversion('knot', 'Knot', 0.514444444444)
(m/s).conversion('kph', 'Kilometers per hour', 0.277777777778)
(m/s).conversion('mph', 'Miles per hour', 0.44704)
# Conversions for m/s**2:
(m/s**2).conversion('Gal', 'gal (galileo)', 0.01, prefix=True)
# Conversions for kg/(m*s):
(kg/(m*s)).conversion('P', 'poise', 0.1, prefix=True)
# Conversions for m**2/s:
(m**2/s).conversion('St', 'stokes', 10**-4, prefix=True)
# Conversions for mol:
mol.conversion('lbmol', 'Pound-mole', 453.59237)
# Conversions for kg:
kg.conversion('amu', 'Atomic mass unit', 1.6605402*10**-27)
# Use `g` to add prefixes (kg, mg, etc), but use `gm` for the actual symbol of
# gram. `g` is used for the acceleration of gravity
kg.conversion('g', 'Gram', 0.001, prefix=True)
kg.conversion('gm', 'Gram', 0.001)
kg.conversion('lb', 'Pound mass', 0.45359237)
kg.conversion('mton', 'Metric ton', 1000)
kg.conversion('oz', 'Ounce', 0.028349523125)
kg.conversion('slug', 'Slug', 14.5939029372)
kg.conversion('ton', 'Ton', 907.18474)
kg.conversion('tonUK', 'Long ton', 1016.047)
# Conversions for N:
N.conversion('dyn', 'Dyne', 10**-5)
N.conversion('kgf', 'Kilogram force', 9.80665)
N.conversion('lbf', 'Pound force', 4.44822161526)
N.conversion('tonf', 'Ton force', 8896.44323052)
# Conversions for J:
J.conversion('Btu', 'British thermal unit', 1055.05585262)
# The are multiple definitions for the calorie. This is the current standard.
J.conversion('cal', 'Gram calorie', 4.184)
J.conversion('Cal', 'Kilogram calorie', 4184)
J.conversion('kcal', 'Kilogram calorie', 4184)
J.conversion('erg', 'Erg', 10**-7)
J.conversion('eV', 'Electron volt', 1.60217733*10**-19)
J.conversion('ftlb', 'Foot-pound', 1.35581794833)
# Use prefixes to create kWh & MWh
J.conversion('Wh', 'Watt-hour', 3600, prefix=True)
J.conversion('latm', 'Liter-atmosphere', 101.325)
# Conversions for W:
W.conversion('hp', 'Horsepower', 745.699871582)
# Conversions for Pa:
Pa.conversion('atm', 'Atmosphere', 101325)
Pa.conversion('bar', 'Bar', 10**5)
Pa.conversion('inH2O', 'Inches of water', 249.08891)
Pa.conversion('inHg', 'Inches of mercury', 3386.38815789)
Pa.conversion('mmH2O', 'Millimeters of water', 9.80665)
Pa.conversion('mmHg', 'Millimeters of mercury', 133.322387415)
Pa.conversion('psi', 'Pounds per square inch', 6894.75729317)
Pa.conversion('torr', 'Torr', 101325 / 760)
Pa.conversion('Ba', 'Barye', 0.1)
# Conversions for s:
s.conversion('minute', 'Minute', 60)
s.conversion('hr', 'Hour', 60*60)
s.conversion('day', 'Day', 60*60*24)
s.conversion('week', 'Week', 60*60*24*7)
s.conversion('fortnight', 'Fortnight', 60*60*24*7*2)
s.conversion('yr', 'Year', 60*60*24*365.242198781)
# Conversions for rad:
rad.conversion('deg', 'Degree', pi / 180)
rad.conversion('arcmin', 'Arcminute', pi / (180 * 60))
rad.conversion('arcsec', 'Arcsecond', pi / (180 * 60 * 60))
rad.conversion('mas', 'Milliarcsecond', pi / (180 * 60 * 60 * 1000))
rad.conversion('uas', 'Microarcsecond', pi / (180 * 60 * 60 * 1000 * 1000))
# Conversions for K:
# Celcius and Fahrenheit require offsets, so we won't do those here
K.conversion('R', 'Rankine', 5 / 9)
# Conversions for A:
# Conversions for cd:
# Conversions for Hz:
# Conversions for C:
# Conversions for V:
# Conversions for F:
# Conversions for ohm:
# Conversions for S:
# Conversions for Wb:
# Conversions for T:
# Conversions for H:
# Conversions for M:
# Initialize Constants #
########################
# Measured Constants
(8.3144598 * J / (mol * K)).constant("Rc", "Gas constant")
(299792458 * m / s).constant("c", "Speed of light")
(9.80665 * m / s**2).constant("g", "Acceleration of gravity")
(6.67408 * 10**-11 * N * m**2 / kg**2).constant("Gc", "Gravitational constant")
(6.626070040 * 10**-34 * J * s).constant("h", "Planck's constant")
(9.10938356 * 10**-31 * kg).constant("Me", "Electron rest mass")
(1.674927471 * 10**-27 * kg).constant("Mn", "Neutron rest mass")
(1.672621777 * 10**-27 * kg).constant("Mp", "Proton rest mass")
(6.022140857 * 10**23 / mol).constant("Na", "Avogadro constant")
(1.6021766208 * 10**-19 * C).constant("q", "Electron charge")
# Defined constants
(c**2 * 10**-7 * H / m)(m/F).constant("Cc", "Coulomb's constant")
(h / (2 * pi)).constant("hbar", "Reduced Planck's constant")
(4 * pi * 10**-7 * N / A**2).constant("u0", "Vacuum permeability")
(u0**-1 * c**-2)(F/m).constant("e0", "Vacuum permittivity")
(Rc / Na).constant("kb", "Boltzmann's constant")
(pi**2 * kb**4 / (60 * hbar**3 * c**2)).constant("sbc",
"Stefan-Boltzmann constant")
(q * hbar / (2 * Me))(J/T).constant("ub", "Bohr magneton")
(4 * pi * e0 * hbar**2 / (Me * q**2))(m).constant("Rb", "Bohr radius")
(Me * q**4 / 8 / e0**2 / h**3 / c)(1/m).constant("Rdb", "Rydberg Constant")
(h / (2 * q))(Wb).constant("mfq", "Magnetic flux quantum")
# Initialize Quantities #
#########################
m.add_quantity('length')
kg.add_quantity('mass')
s.add_quantity('time')
A.add_quantity('electric current')
K.add_quantity('temperature')
cd.add_quantity('luminous intensity')
mol.add_quantity('amount of substance')
Hz.add_quantity('frequency')
N.add_quantity('force/weight')
Pa.add_quantity('pressure/stress')
J.add_quantity('energy/work/heat')
W.add_quantity('power/radiant flux')
C.add_quantity('electric charge')
V.add_quantity('voltage/electromotive force')
F.add_quantity('electric capacitance')
ohm.add_quantity('electrical resistance/impedance/reactance')
S.add_quantity('electrical conductance')
Wb.add_quantity('magnetic flux')
T.add_quantity('magnetic flux density')
H.add_quantity('inductance')
M.add_quantity('concentration')
(m**2).add_quantity('area')
(m**3).add_quantity('volume')
(m / s).add_quantity('speed/velocity')
(m / s**2).add_quantity('acceleration')
(m**3 / s).add_quantity('volumetric flow')
(N * s).add_quantity('momentum/impulse')
(N * m * s).add_quantity('angular momentum')
(mol / s).add_quantity('molar flow rate/catalytic activity')
# I grabbed these from a table at:
# https://en.wikipedia.org/wiki/SI_derived_unit
(N * m).add_quantity('torque')
(N / s).add_quantity('yank')
(m**-1).add_quantity('wavenumber/optical power/curvature/spatial frequency')
(kg / m**2).add_quantity('area density')
(kg / m**3).add_quantity('density/mass density')
(m**3 / kg).add_quantity('specific volume')
(mol / m**3).add_quantity('molarity')
(m**3 / mol).add_quantity('molar volume')
(J * s).add_quantity('action')
(J / K).add_quantity('heat capacity/entropy')
(J / (K * mol)).add_quantity('molar heat capacity/molar entropy')
(J / (K * kg)).add_quantity('specific heat capacity/specific entropy')
(J / mol).add_quantity('molar energy')
(J / kg).add_quantity('specific energy')
(J / m**3).add_quantity('energy density')
(N / m).add_quantity('surface tension/stiffness')
(W / m**2).add_quantity('heat flux density/irradiance')
(W / (m * K)).add_quantity('thermal conductivity')
(m**2 / s).add_quantity('kinematic viscosity/thermal diffusivity')
(m**2 / s).add_quantity('diffusion coefficient')
(Pa * s).add_quantity('dynamic viscosity')
(C / m**2).add_quantity('electric displacement field/polarization density')
(C / m**3).add_quantity('electric charge density')
(A / m**2).add_quantity('electric current density')
(S / m).add_quantity('electrical conductivity')
(S * m**2 / mol).add_quantity('molar conductivity')
(F / m).add_quantity('permittivity')
(H / m).add_quantity('magnetic permeability')
(V / m).add_quantity('electric field strength')
(A / m).add_quantity('magnetization/magnetic field strength')
(cd / m**2).add_quantity('luminance')
(C / kg).add_quantity('exposure (X and gamma rays)')
(ohm * m).add_quantity('resistivity')
(kg / m).add_quantity('linear mass density')
(C / m).add_quantity('linear charge density')
(mol / kg).add_quantity('molality')
(kg / mol).add_quantity('molar mass')
(m / m**3).add_quantity('fuel efficiency')
(kg / s).add_quantity('mass flow rate')
(J / T).add_quantity('magnetic dipole moment')
(W / m**3).add_quantity('spectral irradiance/power density')
(K / W).add_quantity('thermal resistance')
(K**-1).add_quantity('thermal expansion coefficient')
(K / m).add_quantity('temperature gradient')
(m**2 / (V * s)).add_quantity('electron mobility')
(Pa**-1).add_quantity('compressibility')
(H**-1).add_quantity('magnetic reluctance')
(Wb / m).add_quantity('magnetic vector potential')
(Wb * m).add_quantity('magnetic moment')
(T * m).add_quantity('magnetic rigidity')
(J / m**2).add_quantity('radiant exposure')
(m**3 / (mol * s)).add_quantity('catalytic efficiency')
(kg * m**2).add_quantity('moment of inertia')
(N * m * s / kg).add_quantity('specific angular momentum')
(Hz / s).add_quantity('frequency drift')
(m / H).add_quantity('magnetic susceptibility')
(W / m).add_quantity('spectral power')
def deg_c(num):
"""Return temperature in Kelvin
Accepts a number representing the temperature in Celsius.
"""
return (num + 273.15) * K
def deg_f(num):
"""Return temperature in Rankine
Accepts a number representing the temperature in Fahrenheit.
"""
return (num + 459.67) * R
|
spejamchr/unties
|
unties/units.py
|
Python
|
mit
| 12,062
|
[
"Avogadro"
] |
7bb71c75663111704bd5a4e78944e3935487758a71173309030385cce40056d4
|
# gen_deffile.py
# Drew Levin
# August 30, 2015
#
# Generates a .def file for model definition in CyCells. Takes in a list of
# parameters and a file name. The file creation is a side effect.
# parameters is a dictionary of parameter values with keys as defined in
# hypercube.py
import os
def gen_file(params, filename, is_large, is_avian):
# Get OS compatible new line string
# eof = os.linsep
eof = '\n'
# Percent of lung represented
p_in = 0.005625 if is_large else 0.0025
p_out = 1.0 - p_in
# Chemokine onset time is 8h for IP10, 16h for RANTES
chem_onset = 56700 if is_avian else 28800
# Write the output to the string
output = ''
output += '#DefFormat 8' + eof
output += eof
output += 'cell_names { healthy infected expressing dead apoptosis t_found t_search t_circ oracle }' + eof
output += eof
output += 'molecule_type virus {' + eof
output += ' diffusion_rate ' + str(params['v_dif']) + eof
output += ' decay_until ' + str(params['v_decay']/86400.0) + ' 345600' + eof
output += ' decay_rate ' + str(params['v_decay']*params['igm']/86400.0) + eof
output += '}' + eof
output += eof
output += 'molecule_type chemokine {' + eof
output += ' diffusion_rate ' + str(params['c_dif']) + eof
output += ' decay_rate ' + str(params['c_decay']) + eof
output += '}' + eof
output += eof
output += 'cell_type healthy {' + eof
output += ' radius 5' + eof
output += eof
output += ' attribute age fixed 0 fixed 0' + eof
output += ' attribute apop_age fixed 0 fixed 0' + eof
output += ' attribute virusC fixed 0 fixed 0' + eof
output += ' attribute infection_age gaussian ' + str(params['t_inc']*3600) + ' ' + str(params['t_inc']*360) + ' gaussian ' + str(params['t_inc']*3600) + ' ' + str(params['t_inc']*360) + eof
output += ' attribute sense_tcell fixed 0 fixed 0' + eof
output += eof
output += ' sense virusC copy_conc virus' + eof
output += eof
output += ' action infect virus infected calc_prob_single linear virusC ' + str(1.044e15/(60.0*params['v_inf']))+ ' 0' + eof
output += '}' + eof
output += eof
output += 'cell_type infected {' + eof
output += ' radius 5' + eof
output += eof
output += ' attribute age fixed 0 fixed 0' + eof
output += ' attribute apop_age fixed 0 fixed 0' + eof
output += ' attribute virusC fixed 0 fixed 0' + eof
output += ' attribute infection_age gaussian ' + str(params['t_inc']*3600) + ' ' + str(params['t_inc']*360) + ' gaussian ' + str(params['t_inc']*3600) + ' ' + str(params['t_inc']*360) + eof
output += ' attribute sense_tcell fixed 0 fixed 0' + eof
output += eof
output += ' process age update fixed 1' + eof
output += eof
output += ' action secrete_fixed chemokine ' + str(params['c_sec']) + ' gte age ' + str(chem_onset) + eof
output += ' action change expressing gte_var age infection_age' + eof
output += '}' + eof
output += eof
output += 'cell_type expressing {' + eof
output += ' radius 5' + eof
output += eof
output += ' attribute age fixed ' + str(params['t_inc']*3600) + ' fixed ' + str(params['t_inc']*3600) + eof
output += ' attribute apop_age fixed 0 fixed 0' + eof
output += ' attribute virusC fixed 0 fixed 0' + eof
output += ' attribute infection_age gaussian ' + str(params['t_inc']*3600) + ' ' + str(params['t_inc']*360) + ' gaussian ' + str(params['t_inc']*3600) + ' ' + str(params['t_inc']*360) + eof
output += ' attribute sense_tcell fixed 0 fixed 0' + eof
output += eof
output += ' sense sense_tcell cognate t_found 12' + eof
output += eof
output += ' process age update fixed 1' + eof
output += eof
output += ' action secrete_fixed chemokine ' + str(params['c_sec']) + ' gte age ' + str(chem_onset) + eof
output += ' action secrete_fixed virus ' + str(params['v_sec']) + ' always' + eof
output += eof
output += ' action change apoptosis composite gte sense_tcell 1 fixed_single ' + str(1.0/(params['t_kill']*60.0)) + eof
output += ' action change dead gte age ' + str(params['t_inc']*3600 + params['t_exp']*60) + eof
output += '}' + eof
output += eof
output += 'cell_type dead {' + eof
output += ' radius 5' + eof
output += eof
output += ' attribute age fixed 0 fixed 0' + eof
output += ' attribute apop_age fixed 0 fixed 0' + eof
output += ' attribute virusC fixed 0 fixed 0' + eof
output += ' attribute infection_age gaussian ' + str(params['t_inc']*3600) + ' ' + str(params['t_inc']*360) + ' gaussian ' + str(params['t_inc']*3600) + ' ' + str(params['t_inc']*360) + eof
output += ' attribute sense_tcell fixed 0 fixed 0' + eof
output += '}' + eof
output += eof
output += 'cell_type apoptosis {' + eof
output += ' radius 5' + eof
output += eof
output += ' attribute age fixed ' + str(params['t_inc']*3600) + ' fixed ' + str(params['t_inc']*3600) + eof
output += ' attribute apop_age fixed 0 fixed 0' + eof
output += ' attribute virusC fixed 0 fixed 0' + eof
output += ' attribute infection_age gaussian ' + str(params['t_inc']*3600) + ' ' + str(params['t_inc']*360) + ' gaussian ' + str(params['t_inc']*3600) + ' ' + str(params['t_inc']*360) + eof
output += ' attribute sense_tcell fixed 0 fixed 0' + eof
output += eof
output += ' process age update fixed 1' + eof
output += ' process apop_age update fixed 1' + eof
output += eof
output += ' action secrete_fixed chemokine ' + str(params['c_sec']) + ' gte age ' + str(chem_onset) + eof
output += ' action secrete_fixed virus ' + str(params['v_sec']) + ' always' + eof
output += ' action change dead or gte apop_age ' + str(params['t_apop']*3600.0) + ' gte age ' + str(params['t_inc']*3600 + params['t_exp']*60) + eof
output += '}' + eof
output += eof
output += 'cell_type t_found {' + eof
output += ' radius 5' + eof
output += ' speed ' + str(params['speed']/60.0) + eof
output += eof
output += ' attribute age fixed 0 fixed 0' + eof
output += ' attribute inflammationC fixed 0 fixed 0' + eof
output += ' attribute virusC fixed 0 fixed 0' + eof
output += ' attribute p uniform 0 1 uniform 0 1' + eof
output += eof
output += ' sense virusC copy_conc virus' + eof
output += eof
output += ' action die fixed_single ' + str(1.0/(params['foi_age']*60.0)) + eof
output += ' action move_chem chemokine 1.66e-19 always' + eof
output += '}' + eof
output += eof
output += 'cell_type t_search {' + eof
output += ' radius 5' + eof
output += eof
output += ' attribute age fixed 0 fixed 0' + eof
output += ' attribute inflammationC fixed 0 fixed 0' + eof
output += ' attribute virusC fixed 0 fixed 0' + eof
output += ' attribute p uniform 0 1 uniform 0 1' + eof
output += eof
output += ' sense inflammationC copy_conc chemokine' + eof
output += eof
output += ' action die fixed_single ' + str(1.0/(86400*params['circ_age'])) + eof
output += ' action change t_circ lte inflammationC 1.66e-19' + eof
output += ' action change t_found gte inflammationC 1.66e-19' + eof
output += '}' + eof
output += eof
output += 'cell_type t_circ {' + eof
output += ' radius 5' + eof
output += eof
output += ' attribute age fixed 0 fixed 0' + eof
output += ' attribute inflammationC fixed 0 fixed 0' + eof
output += ' attribute virusC fixed 0 fixed 0' + eof
output += ' attribute p uniform 0 1 uniform 0 1' + eof
output += eof
output += ' process age update fixed 1' + eof
output += eof
output += ' action die fixed_single ' + str(1.0/(86400.*params['circ_age'])) + eof
output += ' action recirculate2D t_search init composite gte age ' + str(params['t_circ']) + ' lte p ' + str(p_in) + eof
output += ' action recirculate2D0 t_circ init composite gte age ' + str(params['t_circ']) + ' gte p ' + str(p_in) + eof
output += '}'
output += eof
output += 'cell_type oracle {' + eof
output += ' radius 1' + eof
output += eof
output += ' attribute age fixed 0 fixed 0' + eof
output += eof
output += ' process age update fixed 1' + eof
output += eof
output += ' action admit_rand2D t_search init fixed ' + str(p_in*params['sigma']/3600.0) + ' gte age 432000' + eof
output += ' action admit_mult t_circ init fixed ' + str(p_out*params['sigma']/3600.0) + ' gte age 432000' + eof
output += '}' + eof
# Write the output string to the given file
with open(filename, 'w') as f:
f.write(output)
|
drewlevin/chemokine
|
sensitivity/testing/gen_deffile.py
|
Python
|
gpl-3.0
| 8,368
|
[
"Gaussian"
] |
05d851f8dde2c7361f8e5295f6cb6098af4f00f062890507070aacbd75a4a393
|
# Lint as: python2, python3
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Train NMT Models on WMT'14 English-German machine translation task."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import os
import REDACTED.tensorflow_models.mlperf.models.rough.transformer_lingvo.lingvo.compat as tf
from REDACTED.tensorflow_models.mlperf.models.rough.transformer_lingvo.lingvo.core import ml_perf_tokenizer
from REDACTED.tensorflow_models.mlperf.models.rough.transformer_lingvo.lingvo import model_registry
from REDACTED.tensorflow_models.mlperf.models.rough.transformer_lingvo.lingvo.core import base_model_params
from REDACTED.tensorflow_models.mlperf.models.rough.transformer_lingvo.lingvo.core import layers
from REDACTED.tensorflow_models.mlperf.models.rough.transformer_lingvo.lingvo.core import program as program_lib
from REDACTED.tensorflow_models.mlperf.models.rough.transformer_lingvo.lingvo.core import py_utils
from REDACTED.tensorflow_models.mlperf.models.rough.transformer_lingvo.lingvo.core import schedule
from REDACTED.tensorflow_models.mlperf.models.rough.transformer_lingvo.lingvo.tasks.mt import base_config
from REDACTED.tensorflow_models.mlperf.models.rough.transformer_lingvo.lingvo.tasks.mt import input_generator
from REDACTED.tensorflow_models.mlperf.models.rough.transformer_lingvo.lingvo.tasks.mt import model
from REDACTED.tensorflow_models.mlperf.models.rough.transformer_lingvo.google import model_helper
@model_registry.RegisterSingleTaskModel
class WmtEnDeMlPerf(base_model_params.SingleTaskModelParams):
DATADIR = '/REDACTED/jk-d/home/tpu-perf-team/dehao/transformer_sharded_512/'
ID_PAD = 0
ID_SOS = 0
ID_EOS = 1
MODEL_DIM = 1024
HIDDEN_DIM = 4096
NUM_HEADS = 16
VOCAB_SIZE = 33708
WARMUP_STEPS = 1107
LEARNING_RATE = 1.875
MAX_STEPS = 10000
ADAM_BETA1 = 0.9
ADAM_BETA2 = 0.98
# Per-core batch_size
TRAIN_BATCH_SIZE = 16
DECODE_BATCH_SIZE = 96
XLA_NUM_PARTITIONS = None
NUM_HOSTS = None
TEST_FILE_PATTERN = 'translate_ende_wmt32k-dev-3072-69-zero-eval-weight-?????-of-00512'
def Train(self):
p = input_generator.MlPerfInput.Params()
p.tokenizer = ml_perf_tokenizer.MlPerfTokenizer.Params()
p.tokenizer.vocab_size = self.VOCAB_SIZE
p.tokenizer.vocab_filepath = os.path.join(
self.DATADIR, 'vocab.translate_ende_wmt32k.32768.subwords')
p.tokenizer.target_eos_id = self.ID_EOS
p.file_pattern = 'tfrecord:' + os.path.join(
self.DATADIR, 'translate_ende_wmt32k-train-?????-of-00512')
p = model_helper.FixateInputShape(p, self.TRAIN_BATCH_SIZE, 96, 96)
p.num_samples = 0
p.file_random_seed = 0
p.use_per_host_infeed = True
p.num_hosts = self.NUM_HOSTS
return p
def Test(self):
p = input_generator.MlPerfInput.Params()
p.tokenizer = ml_perf_tokenizer.MlPerfTokenizer.Params()
p.tokenizer.vocab_size = self.VOCAB_SIZE
p.tokenizer.vocab_filepath = os.path.join(
self.DATADIR, 'vocab.translate_ende_wmt32k.32768.subwords')
p.tokenizer.target_eos_id = self.ID_EOS
p.file_pattern = 'tfrecord:' + os.path.join(self.DATADIR,
self.TEST_FILE_PATTERN)
p = model_helper.FixateInputShape(p, self.DECODE_BATCH_SIZE, 97, 97)
p.num_samples = 0
# Note: Per host infeed for the test set evaluation does not seem
# to be worth it, since the sharded files are so tiny, seems
# the variance/overhead makes it a bad trade-off.
p.use_per_host_infeed = True
p.file_random_seed = 0
p.num_hosts = self.NUM_HOSTS
return p
def _CommonParams(self, packed_input):
p = base_config.SetupTransformerBatchMajorParams(
name='en_de_ml_perf_transformer',
vocab_size=self.VOCAB_SIZE,
model_dim=self.MODEL_DIM,
hidden_dim=self.HIDDEN_DIM,
num_heads=self.NUM_HEADS,
num_layers=6,
inference_source_language='en',
inference_target_language='de',
input_dropout_prob=0.1,
residual_dropout_prob=0.1,
atten_dropout_prob=0.1,
relu_dropout_prob=0.1,
add_unnormalized_residuals=True,
learning_rate=self.LEARNING_RATE,
warmup_steps=self.WARMUP_STEPS,
packed_input=packed_input,
use_fast_projection_layer=True,
enable_per_dim_scale=False,
use_fused_layernorm=True,
use_bf16_activations=py_utils.use_tpu(),
use_bias=False,
xla_num_partitions=self.XLA_NUM_PARTITIONS)
for pp in [p.encoder, p.decoder]:
pp.token_emb = model_helper.ChangeToSimpleEmbedding(pp.token_emb)
p.decoder.softmax = model_helper.ChangeToSimpleSoftmax(p.decoder.softmax)
sm_params = p.decoder.softmax.Copy()
sm_params.input_dim = self.MODEL_DIM
shared_emb = layers.SharedSoftmaxLayer.Params().Set(
**dict(sm_params.IterParams()))
shared_emb.params_init = py_utils.WeightInit.Gaussian(
1.0 / math.sqrt(self.MODEL_DIM))
shared_emb.scale_sqrt_depth = True
shared_emb.use_num_classes_major_weight = True
p.decoder.shared_emb = shared_emb
p.decoder.shared_emb.cls = layers.SharedSoftmaxLayer
# Directly sharing encoder embedding with decoder results in worse model
# quality, which requires more tuning.
p.encoder.shared_emb = shared_emb
p.encoder.shared_emb.cls = layers.SharedSoftmaxLayer
p.train.lr_schedule = schedule.TransformerMLPerfSchedule.Params().Set(
warmup_steps=self.WARMUP_STEPS, model_dim=self.MODEL_DIM)
p.train.max_steps = self.MAX_STEPS
p.train.scale_gradients = False
p.train.optimizer.beta1 = self.ADAM_BETA1
p.train.optimizer.beta2 = self.ADAM_BETA2
# Fix this
#p.eval.ml_perf_metrics_only = True
p.decoder.beam_search.target_sos_id = self.ID_SOS
p.decoder.beam_search.target_eos_id = self.ID_EOS
p.decoder.beam_search.beam_size = 4.0
p.decoder.beam_search.num_hyps_per_beam = 4
p.decoder.target_sos_id = self.ID_SOS
p.decoder.target_eos_id = self.ID_EOS
p.decoder.use_fast_softmax = True
p.decoder.target_seq_len = 147
if py_utils.use_tpu():
p.encoder.input_dropout_tpl.fprop_dtype = tf.bfloat16
p.decoder.trans_decoder_tpl.fprop_dtype = tf.bfloat16
p.decoder.input_dropout_tpl.fprop_dtype = tf.bfloat16
p.train.optimizer.use_bf16_gradients_ar = True
return p
def Task(self):
return self._CommonParams(packed_input=False)
@model_registry.RegisterSingleTaskModel
class WmtEnDeMlPerfTpuExecutor(WmtEnDeMlPerf):
"""TPU Executor params for MLPerf."""
def Task(self):
p = super(WmtEnDeMlPerfTpuExecutor, self).Task()
p.decoder.beam_search = model_helper.ChangeToBeamSearchTpuHelper(
p.decoder.beam_search)
return p
def ProgramSchedule(self):
p = program_lib.SimpleProgramScheduleForTask(
train_dataset_name='Train',
# Sets train_steps_per_loop to the number of steps of an epoch based on
# reference dataset size and 131k tokens global batch size (a typical
# batch size from MLPerf 0.6 submission).
train_steps_per_loop=1107,
eval_dataset_names=['Test'],
eval_steps_per_loop=0,
decode_steps_per_loop=1,
)
p.train_executions_per_eval = 3
# For compliance logging.
p.ml_perf.benchmark_name = 'transformer'
p.ml_perf.steps_per_epoch = 1107
p.ml_perf.decoder_metric_name = 'ml_perf_bleu'
p.ml_perf.decoder_metric_success_threshold = 0.25
p.ml_perf.max_sequence_length = 96
p.ml_perf.global_batch_size = 512
p.ml_perf.optimizer_name = 'adam'
p.ml_perf.opt_adam_beta_1 = 0.9
p.ml_perf.opt_adam_beta_2 = 0.98
p.ml_perf.opt_adam_epsilon = 1e-9
p.ml_perf.base_learning_rate = self.LEARNING_RATE
p.ml_perf.warmup_steps = self.WARMUP_STEPS
p.ml_perf.train_samples = 580000
p.ml_perf.eval_samples = 3003
return p
@model_registry.RegisterSingleTaskModel
class WmtEnDeMlPerfTpuExecutorTuning(WmtEnDeMlPerfTpuExecutor):
"""TPU Executor tuning run for MLPerf.
This run will take longer than a final run as we will
pause training to decode more often, however
it makes it easier to tune.
"""
def ProgramSchedule(self):
p = program_lib.SimpleProgramScheduleForTask(
train_dataset_name='Train',
train_steps_per_loop=200,
eval_dataset_names=['Test'],
eval_steps_per_loop=0,
decode_steps_per_loop=1,
)
p.train_executions_per_eval = 1
# For compliance logging.
p.ml_perf.benchmark_name = 'transformer'
p.ml_perf.steps_per_epoch = 1107
p.ml_perf.decoder_metric_name = 'ml_perf_bleu'
p.ml_perf.decoder_metric_success_threshold = 0.25
p.ml_perf.max_sequence_length = 96
p.ml_perf.global_batch_size = 512
p.ml_perf.optimizer_name = 'adam'
p.ml_perf.opt_adam_beta_1 = 0.9
p.ml_perf.opt_adam_beta_2 = 0.98
p.ml_perf.opt_adam_epsilon = 1e-9
p.ml_perf.base_learning_rate = self.LEARNING_RATE
p.ml_perf.warmup_steps = self.WARMUP_STEPS
p.ml_perf.train_samples = 580000
p.ml_perf.eval_samples = 3003
return p
@model_registry.RegisterSingleTaskModel
class WmtEnDeMlPerfTpuExecutorPackedInputTuning(WmtEnDeMlPerfTpuExecutorTuning):
"""TPU Executor tuning run for MLPerf.
"""
PACKED_DATADIR = '/REDACTED/pw-d/home/tpu-perf-team/blee/transformer_sharded_512/'
PACKED_TRAIN_FILE_PATTERN = 'translate_ende_wmt32k_packed-train-?????-of-00256'
def ProgramSchedule(self):
p = program_lib.SimpleProgramScheduleForTask(
train_dataset_name='Train',
train_steps_per_loop=200,
eval_dataset_names=['Test'],
eval_steps_per_loop=0,
decode_steps_per_loop=1,
)
p.train_executions_per_eval = 1
# For compliance logging.
p.ml_perf.benchmark_name = 'transformer'
p.ml_perf.steps_per_epoch = 1107
p.ml_perf.decoder_metric_name = 'ml_perf_bleu'
p.ml_perf.decoder_metric_success_threshold = 0.25
p.ml_perf.max_sequence_length = 80
p.ml_perf.global_batch_size = 512
p.ml_perf.optimizer_name = 'adam'
p.ml_perf.opt_adam_beta_1 = 0.9
p.ml_perf.opt_adam_beta_2 = 0.98
p.ml_perf.opt_adam_epsilon = 1e-9
p.ml_perf.base_learning_rate = self.LEARNING_RATE
p.ml_perf.warmup_steps = self.WARMUP_STEPS
p.ml_perf.train_samples = 566340
p.ml_perf.eval_samples = 3003
return p
def Train(self):
p = input_generator.MlPerfInput.Params()
p.tokenizer = ml_perf_tokenizer.MlPerfTokenizer.Params()
p.tokenizer.vocab_size = self.VOCAB_SIZE
p.tokenizer.vocab_filepath = os.path.join(
self.PACKED_DATADIR, 'vocab.translate_ende_wmt32k.32768.subwords')
p.tokenizer.target_eos_id = self.ID_EOS
p.file_pattern = 'tfrecord:' + os.path.join(self.PACKED_DATADIR,
self.PACKED_TRAIN_FILE_PATTERN)
p = model_helper.FixateInputShape(p, self.TRAIN_BATCH_SIZE, 256, 256)
p.num_samples = 0
p.packed_input = True
p.file_random_seed = 0
p.use_per_host_infeed = True
p.num_hosts = self.NUM_HOSTS
return p
def Task(self):
p = self._CommonParams(packed_input=True)
p.decoder.beam_search = model_helper.ChangeToBeamSearchTpuHelper(
p.decoder.beam_search)
return p
@model_registry.RegisterSingleTaskModel
class WmtEnDeMlPerfTpuExecutorOneShot(WmtEnDeMlPerfTpuExecutorPackedInputTuning
):
"""Run for e2e timing.
This should hit .25 BLEU in ~3 epochs.
"""
MAX_STEPS = 6 * 1107
def ProgramSchedule(self):
p = program_lib.MLPerfProgramScheduleForTask(
train_dataset_name='Train',
train_steps_per_loop=1107,
decode_dataset_name='Test',
decode_steps_per_loop=1,
num_epochs_per_session_run=4)
p.train_executions_per_eval = 1
# For compliance logging.
p.ml_perf.benchmark_name = 'transformer'
p.ml_perf.steps_per_epoch = 1107
p.ml_perf.decoder_metric_name = 'ml_perf_bleu'
p.ml_perf.decoder_metric_success_threshold = 0.25
p.ml_perf.max_sequence_length = 80
p.ml_perf.global_batch_size = 512
p.ml_perf.optimizer_name = 'adam'
p.ml_perf.opt_adam_beta_1 = 0.9
p.ml_perf.opt_adam_beta_2 = 0.98
p.ml_perf.opt_adam_epsilon = 1e-9
p.ml_perf.base_learning_rate = self.LEARNING_RATE
p.ml_perf.warmup_steps = self.WARMUP_STEPS
p.ml_perf.train_samples = 566340
p.ml_perf.eval_samples = 3003
return p
@model_registry.RegisterSingleTaskModel
class WmtEnDeMlPerfTpuExecutorOneShot16x16Partitioned(
WmtEnDeMlPerfTpuExecutorOneShot):
"""Run for 16x16 DF."""
MAX_STEPS = 6 * 1107
TRAIN_BATCH_SIZE = 2
DECODE_BATCH_SIZE = 12
XLA_NUM_PARTITIONS = 2
NUM_HOSTS = 32
def Task(self):
p = super(WmtEnDeMlPerfTpuExecutorOneShot16x16Partitioned, self).Task()
p.xla_num_partitions = self.XLA_NUM_PARTITIONS
return p
def ProgramSchedule(self):
p = program_lib.MLPerfProgramScheduleForTask(
train_dataset_name='Train',
train_steps_per_loop=1107,
decode_dataset_name='Test',
decode_steps_per_loop=1,
num_epochs_per_session_run=4)
p.train_executions_per_eval = 1
# For compliance logging.
p.ml_perf.benchmark_name = 'transformer'
p.ml_perf.steps_per_epoch = 1107
p.ml_perf.decoder_metric_name = 'ml_perf_bleu'
p.ml_perf.decoder_metric_success_threshold = 0.25
p.ml_perf.max_sequence_length = 80
p.ml_perf.global_batch_size = 512
p.ml_perf.optimizer_name = 'adam'
p.ml_perf.opt_adam_beta_1 = 0.9
p.ml_perf.opt_adam_beta_2 = 0.98
p.ml_perf.opt_adam_epsilon = 1e-9
p.ml_perf.base_learning_rate = self.LEARNING_RATE
p.ml_perf.warmup_steps = self.WARMUP_STEPS
p.ml_perf.train_samples = 566340
p.ml_perf.eval_samples = 3003
return p
@model_registry.RegisterSingleTaskModel
class WmtEnDeMlPerfTpuExecutorOneShot16x16Tuning(WmtEnDeMlPerfTpuExecutorOneShot
):
"""Tuning runs."""
WARMUP_STEPS = 831
LEARNING_RATE = 1.875
ADAM_BETA1 = 0.88
ADAM_BETA2 = 0.92
MAX_STEPS = 10 * 277
TRAIN_BATCH_SIZE = 4
DECODE_BATCH_SIZE = 6
NUM_HOSTS = 32
PACKED_DATADIR = '/REDACTED/je-d/home/staging-REDACTED-gpu-dedicated/blee/transformer_new_sharded_train80_data/'
def ProgramSchedule(self):
p = program_lib.MLPerfProgramScheduleForTask(
train_dataset_name='Train',
train_steps_per_loop=277,
decode_dataset_name='Test',
decode_steps_per_loop=1,
num_epochs_per_session_run=7,
warmup_seconds=0)
p.train_executions_per_eval = 1
# For compliance logging.
p.ml_perf.benchmark_name = 'transformer'
p.ml_perf.steps_per_epoch = 277
p.ml_perf.decoder_metric_name = 'ml_perf_bleu'
p.ml_perf.decoder_metric_success_threshold = 0.25
p.ml_perf.max_sequence_length = 80
p.ml_perf.global_batch_size = 2048
p.ml_perf.optimizer_name = 'adam'
p.ml_perf.opt_adam_beta_1 = self.ADAM_BETA1
p.ml_perf.opt_adam_beta_2 = self.ADAM_BETA2
p.ml_perf.opt_adam_epsilon = 1e-9
p.ml_perf.base_learning_rate = self.LEARNING_RATE
p.ml_perf.warmup_steps = self.WARMUP_STEPS
p.ml_perf.train_samples = 566340
p.ml_perf.eval_samples = 3003
return p
@model_registry.RegisterSingleTaskModel
class WmtEnDeMlPerfTpuExecutorOneShot32x32Partitioned4(
WmtEnDeMlPerfTpuExecutorOneShot):
"""Run for 32x32 DF."""
WARMUP_STEPS = 550
LEARNING_RATE = 1.3
ADAM_BETA1 = 0.86
ADAM_BETA2 = 0.92
MAX_STEPS = 10 * 277
TRAIN_BATCH_SIZE = 4
DECODE_BATCH_SIZE = 6
XLA_NUM_PARTITIONS = 4
NUM_HOSTS = 128
PACKED_DATADIR = '/REDACTED/je-d/home/staging-REDACTED-gpu-dedicated/blee/transformer_new_sharded_train80_data/'
DATADIR = '/REDACTED/je-d/home/staging-REDACTED-gpu-dedicated/blee/transformer_test_data/'
def Task(self):
p = super(WmtEnDeMlPerfTpuExecutorOneShot32x32Partitioned4, self).Task()
p.xla_num_partitions = self.XLA_NUM_PARTITIONS
return p
def ProgramSchedule(self):
p = program_lib.MLPerfProgramScheduleForTask(
train_dataset_name='Train',
train_steps_per_loop=277,
decode_dataset_name='Test',
decode_steps_per_loop=1,
num_epochs_per_session_run=7)
p.train_executions_per_eval = 1
# For compliance logging.
p.ml_perf.benchmark_name = 'transformer'
p.ml_perf.steps_per_epoch = 277
p.ml_perf.decoder_metric_name = 'ml_perf_bleu'
p.ml_perf.decoder_metric_success_threshold = 0.25
p.ml_perf.max_sequence_length = 80
p.ml_perf.global_batch_size = 2048
p.ml_perf.optimizer_name = 'adam'
p.ml_perf.opt_adam_beta_1 = self.ADAM_BETA1
p.ml_perf.opt_adam_beta_2 = self.ADAM_BETA2
p.ml_perf.opt_adam_epsilon = 1e-9
p.ml_perf.base_learning_rate = self.LEARNING_RATE
p.ml_perf.warmup_steps = self.WARMUP_STEPS
p.ml_perf.train_samples = 566340
p.ml_perf.eval_samples = 3003
return p
@model_registry.RegisterSingleTaskModel
class WmtEnDeMlPerfTpuExecutorOneShotTwoPod(WmtEnDeMlPerfTpuExecutorOneShot):
"""Run for 64x32 DF."""
WARMUP_STEPS = 831
LEARNING_RATE = 1.875
ADAM_BETA1 = 0.88
ADAM_BETA2 = 0.92
MAX_STEPS = 10 * 277
TRAIN_BATCH_SIZE = 2
DECODE_BATCH_SIZE = 3
XLA_NUM_PARTITIONS = 4
NUM_HOSTS = 256
PACKED_DATADIR = '/REDACTED/je-d/home/staging-REDACTED-gpu-dedicated/blee/transformer_new_sharded_train80_data/'
DATADIR = '/REDACTED/je-d/home/staging-REDACTED-gpu-dedicated/blee/transformer_test_data/'
def Task(self):
p = super(WmtEnDeMlPerfTpuExecutorOneShotTwoPod, self).Task()
p.xla_num_partitions = self.XLA_NUM_PARTITIONS
return p
def ProgramSchedule(self):
p = program_lib.MLPerfProgramScheduleForTask(
train_dataset_name='Train',
train_steps_per_loop=277,
decode_dataset_name='Test',
decode_steps_per_loop=1,
num_epochs_per_session_run=7)
p.train_executions_per_eval = 1
# For compliance logging.
p.ml_perf.benchmark_name = 'transformer'
p.ml_perf.steps_per_epoch = 277
p.ml_perf.decoder_metric_name = 'ml_perf_bleu'
p.ml_perf.decoder_metric_success_threshold = 0.25
p.ml_perf.max_sequence_length = 80
p.ml_perf.global_batch_size = 2048
p.ml_perf.optimizer_name = 'adam'
p.ml_perf.opt_adam_beta_1 = self.ADAM_BETA1
p.ml_perf.opt_adam_beta_2 = self.ADAM_BETA2
p.ml_perf.opt_adam_epsilon = 1e-9
p.ml_perf.base_learning_rate = self.LEARNING_RATE
p.ml_perf.warmup_steps = self.WARMUP_STEPS
p.ml_perf.train_samples = 566340
p.ml_perf.eval_samples = 3003
return p
@model_registry.RegisterSingleTaskModel
class WmtEnDeMlPerfTpuExecutorOneShotFourPod(WmtEnDeMlPerfTpuExecutorOneShot):
"""Run for 128x32 DF."""
WARMUP_STEPS = 831
LEARNING_RATE = 1.875
ADAM_BETA1 = 0.88
ADAM_BETA2 = 0.92
TRAIN_BATCH_SIZE = 1
DECODE_BATCH_SIZE = 2
XLA_NUM_PARTITIONS = 4
NUM_HOSTS = 512
PACKED_DATADIR = '/REDACTED/je-d/home/staging-REDACTED-gpu-dedicated/blee/transformer_new_sharded_train80_data/'
DATADIR = '/REDACTED/je-d/home/staging-REDACTED-gpu-dedicated/blee/transformer_test_data/'
PACKED_TRAIN_FILE_PATTERN = 'translate_ende_wmt32k_packed-train-?????-of-00512'
TEST_FILE_PATTERN = 'translate_ende_wmt32k-dev-4096-zero-eval-weight-00???-of-00512'
def Task(self):
p = super(WmtEnDeMlPerfTpuExecutorOneShotFourPod, self).Task()
p.decoder.beam_search.short_seq_limit = 0
p.xla_num_partitions = self.XLA_NUM_PARTITIONS
return p
def ProgramSchedule(self):
p = program_lib.MLPerfProgramScheduleForTask(
train_dataset_name='Train',
train_steps_per_loop=277,
decode_dataset_name='Test',
decode_steps_per_loop=1,
num_epochs_per_session_run=7,
warmup_seconds=240)
p.train_executions_per_eval = 1
# For compliance logging.
p.ml_perf.benchmark_name = 'transformer'
p.ml_perf.steps_per_epoch = 277
p.ml_perf.decoder_metric_name = 'ml_perf_bleu'
p.ml_perf.decoder_metric_success_threshold = 0.25
p.ml_perf.max_sequence_length = 80
p.ml_perf.global_batch_size = 2048
p.ml_perf.optimizer_name = 'adam'
p.ml_perf.opt_adam_beta_1 = self.ADAM_BETA1
p.ml_perf.opt_adam_beta_2 = self.ADAM_BETA2
p.ml_perf.opt_adam_epsilon = 1e-9
p.ml_perf.base_learning_rate = self.LEARNING_RATE
p.ml_perf.warmup_steps = self.WARMUP_STEPS
p.ml_perf.train_samples = 566340
p.ml_perf.eval_samples = 3003
return p
@model_registry.RegisterSingleTaskModel
class WmtEnDeMlPerfTpuExecutorOneShot16x16(WmtEnDeMlPerfTpuExecutorOneShot):
"""Run for 16x16 DF."""
MAX_STEPS = 6 * 1107
TRAIN_BATCH_SIZE = 1
DECODE_BATCH_SIZE = 6
NUM_HOSTS = 32
def ProgramSchedule(self):
p = program_lib.MLPerfProgramScheduleForTask(
train_dataset_name='Train',
train_steps_per_loop=1107,
decode_dataset_name='Test',
decode_steps_per_loop=1,
num_epochs_per_session_run=4)
p.train_executions_per_eval = 1
# For compliance logging.
p.ml_perf.benchmark_name = 'transformer'
p.ml_perf.steps_per_epoch = 1107
p.ml_perf.decoder_metric_name = 'ml_perf_bleu'
p.ml_perf.decoder_metric_success_threshold = 0.25
p.ml_perf.max_sequence_length = 80
p.ml_perf.global_batch_size = 512
p.ml_perf.optimizer_name = 'adam'
p.ml_perf.opt_adam_beta_1 = 0.9
p.ml_perf.opt_adam_beta_2 = 0.98
p.ml_perf.opt_adam_epsilon = 1e-9
p.ml_perf.base_learning_rate = self.LEARNING_RATE
p.ml_perf.warmup_steps = self.WARMUP_STEPS
p.ml_perf.train_samples = 566340
p.ml_perf.eval_samples = 3003
return p
@model_registry.RegisterSingleTaskModel
class WmtEnDeMlPerfTpuExecutorOneShotPfc(WmtEnDeMlPerfTpuExecutorOneShot):
"""Run for 4x4x4 PFC."""
STEPS_PER_EPOCH = 1107
MAX_STEPS = 6 * 1107
TRAIN_BATCH_SIZE = 4
DECODE_BATCH_SIZE = 24
NUM_HOSTS = 16
PACKED_DATADIR = '/REDACTED/jk-d/home/tpu-perf-team/dehao/transformer_sharded_512/'
PACKED_TRAIN_FILE_PATTERN = 'translate_ende_wmt32k_packed-train-?????-of-00016'
TEST_FILE_PATTERN = 'translate_ende_wmt32k-dev-3072-69-zero-eval-weight-?????-of-00016'
def ProgramSchedule(self):
p = program_lib.MLPerfProgramScheduleForTask(
train_dataset_name='Train',
train_steps_per_loop=1107,
decode_dataset_name='Test',
decode_steps_per_loop=1,
num_epochs_per_session_run=4)
p.train_executions_per_eval = 1
# For compliance logging.
p.ml_perf.benchmark_name = 'transformer'
p.ml_perf.steps_per_epoch = self.STEPS_PER_EPOCH
p.ml_perf.decoder_metric_name = 'ml_perf_bleu'
p.ml_perf.decoder_metric_success_threshold = 0.25
p.ml_perf.max_sequence_length = 80
p.ml_perf.global_batch_size = 512
p.ml_perf.optimizer_name = 'adam'
p.ml_perf.opt_adam_beta_1 = 0.9
p.ml_perf.opt_adam_beta_2 = 0.98
p.ml_perf.opt_adam_epsilon = 1e-9
p.ml_perf.base_learning_rate = self.LEARNING_RATE
p.ml_perf.warmup_steps = self.WARMUP_STEPS
p.ml_perf.train_samples = 566340
p.ml_perf.eval_samples = 3003
return p
@model_registry.RegisterSingleTaskModel
class WmtEnDeMlPerfTpuExecutorOneShotPfcEightCube(
WmtEnDeMlPerfTpuExecutorOneShot):
"""Run for 2x2x2 PFC."""
STEPS_PER_EPOCH = 277
WARMUP_STEPS = 831
LEARNING_RATE = 1.9
ADAM_BETA1 = 0.88
ADAM_BETA2 = 0.94
MAX_STEPS = 10 * 277
TRAIN_BATCH_SIZE = 4
DECODE_BATCH_SIZE = 2
NUM_HOSTS = 64
PACKED_DATADIR = '/REDACTED/jk-d/home/tpu-perf-team/dehao/transformer_sharded_512/'
PACKED_TRAIN_FILE_PATTERN = 'translate_ende_wmt32k_packed-train-?????-of-00512'
TEST_FILE_PATTERN = 'translate_ende_wmt32k-dev-4096-zero-eval-weight-00???-of-00512'
def ProgramSchedule(self):
p = program_lib.MLPerfProgramScheduleForTask(
train_dataset_name='Train',
train_steps_per_loop=self.STEPS_PER_EPOCH,
decode_dataset_name='Test',
decode_steps_per_loop=1,
num_epochs_per_session_run=7)
p.train_executions_per_eval = 1
# For compliance logging.
p.ml_perf.benchmark_name = 'transformer'
p.ml_perf.steps_per_epoch = self.STEPS_PER_EPOCH
p.ml_perf.decoder_metric_name = 'ml_perf_bleu'
p.ml_perf.decoder_metric_success_threshold = 0.25
p.ml_perf.max_sequence_length = 80
p.ml_perf.global_batch_size = 2048
p.ml_perf.optimizer_name = 'adam'
p.ml_perf.opt_adam_beta_1 = self.ADAM_BETA1
p.ml_perf.opt_adam_beta_2 = self.ADAM_BETA2
p.ml_perf.opt_adam_epsilon = 1e-9
p.ml_perf.base_learning_rate = self.LEARNING_RATE
p.ml_perf.warmup_steps = self.WARMUP_STEPS
p.ml_perf.train_samples = 566340
p.ml_perf.eval_samples = 3003
return p
@model_registry.RegisterSingleTaskModel
class WmtEnDeMlPerfTpuExecutorOneShotPfcTwoCube(WmtEnDeMlPerfTpuExecutorOneShot
):
"""Run for 2x2x2 PFC."""
STEPS_PER_EPOCH = 1107
MAX_STEPS = 6 * 1107
TRAIN_BATCH_SIZE = 32
DECODE_BATCH_SIZE = 24 * 8
NUM_HOSTS = 2
PACKED_DATADIR = '/REDACTED/jk-d/home/tpu-perf-team/dehao/transformer_sharded_512/'
PACKED_TRAIN_FILE_PATTERN = 'translate_ende_wmt32k_packed-train-?????-of-00016'
TEST_FILE_PATTERN = 'translate_ende_wmt32k-dev-3072-69-zero-eval-weight-?????-of-00016'
def ProgramSchedule(self):
p = program_lib.MLPerfProgramScheduleForTask(
train_dataset_name='Train',
train_steps_per_loop=1107,
decode_dataset_name='Test',
decode_steps_per_loop=1,
num_epochs_per_session_run=4)
p.train_executions_per_eval = 1
# For compliance logging.
p.ml_perf.benchmark_name = 'transformer'
p.ml_perf.steps_per_epoch = self.STEPS_PER_EPOCH
p.ml_perf.decoder_metric_name = 'ml_perf_bleu'
p.ml_perf.decoder_metric_success_threshold = 0.25
p.ml_perf.max_sequence_length = 80
p.ml_perf.global_batch_size = 512
p.ml_perf.optimizer_name = 'adam'
p.ml_perf.opt_adam_beta_1 = 0.9
p.ml_perf.opt_adam_beta_2 = 0.98
p.ml_perf.opt_adam_epsilon = 1e-9
p.ml_perf.base_learning_rate = self.LEARNING_RATE
p.ml_perf.warmup_steps = self.WARMUP_STEPS
p.ml_perf.train_samples = 566340
p.ml_perf.eval_samples = 3003
return p
@model_registry.RegisterSingleTaskModel
class WmtEnDeMLPerfTinyTPUExecutor(WmtEnDeMlPerfTpuExecutor):
"""Tiny Transformer for TPUExecutor test."""
MODEL_DIM = 4
HIDDEN_DIM = 4
NUM_HEADS = 2
def ProgramSchedule(self):
p = program_lib.MLPerfProgramScheduleForTask(
train_dataset_name='Train',
train_steps_per_loop=20,
decode_dataset_name='Test',
decode_steps_per_loop=1,
num_epochs_per_session_run=4,
warmup_seconds=0)
p.train_executions_per_eval = 1
p.ml_perf.benchmark_name = 'transformer'
p.ml_perf.steps_per_epoch = 1
p.ml_perf.decoder_metric_name = 'ml_perf_bleu'
# Dummy value just to see run_stop/success.
p.ml_perf.decoder_metric_success_threshold = -10.0
p.ml_perf.max_sequence_length = 80
p.ml_perf.global_batch_size = 64
p.ml_perf.optimizer_name = 'adam'
p.ml_perf.opt_adam_beta_1 = 0.9
p.ml_perf.opt_adam_beta_2 = 0.98
p.ml_perf.opt_adam_epsilon = 1e-9
p.ml_perf.base_learning_rate = 2.0
p.ml_perf.warmup_steps = self.WARMUP_STEPS
p.ml_perf.train_samples = 566340
p.ml_perf.eval_samples = 3003
return p
|
mlperf/training_results_v0.7
|
Google/benchmarks/transformer/implementations/transformer-research-TF-tpu-v4-16/lingvo/tasks/mt/params/mlperf.py
|
Python
|
apache-2.0
| 27,995
|
[
"Gaussian"
] |
ec2211bb206f8c53029b92b10a23144b5fb20251f4b5d917a44207ef29ee3c28
|
#simulated neuron scalar data provider
import os
import sys
import numpy as np
import scipy.misc
import glob
sys.path.append("../convnet-folk_master")
from w_util import readLines
# define default parameters
DATA_NUM=1000
class SimSet:
def __init__(self, paramfile):
print "SimNeuPrvd: parsing", paramfile
plines = readLines(paramfile)
self.param = {'paramfile': paramfile}
for l in plines:
l=l.rstrip().split()
self.param[l[0]]=l[1]
print self.param
self.indim=1
self.outdim=1
# draw data
self.input=[]
self.output=[]
for i in range(DATA_NUM):
if DATA_NUM==1:
m=np.ones([1])
else:
m=np.random.random(1)*2-1 #random in [-1, 1)
self.input+=[m]
mm=np.zeros([1])
if self.param['train']=='1':
#mm[0]=max(0, m[0]) #relu
mm[0]=max(0, abs(m[0])-0.5)*(1 if m[0]>=0 else -1) #shlu
else:
mm[0]=0
self.output+=[mm]
def get_num_images(self):
return DATA_NUM
def get_input_dim(self):
return self.indim
def get_output_dim(self):
return self.outdim
def get_input(self, idx):
return self.input[idx]
def get_output(self, idx):
return self.output[idx]
def getmeta(self, idx):
return self.param
def getStore(param):
return SimSet(param)
def test(param):
ts = SimSet(param)
print "{} images, {} classes".format(ts.get_num_images(), ts.get_num_classes())
for i in range(0,20,10):
im=ts.get_input(i)
y=ts.get_output(i)
meta=ts.getmeta(i)
print "i={}, input={},\toutput={}".format(i, im, y)
print 'image shape:', np.shape(im)
print 'meta', meta
if __name__ == '__main__':
print 'testing SimPrvd.py!'
assert(len(sys.argv)==2)
test(sys.argv[1])
|
whereaswhile/DLSR
|
common/SimNeuPrvd.py
|
Python
|
gpl-2.0
| 1,824
|
[
"NEURON"
] |
fa69f74f6fc87a1ffaba00d422e10546e7fd9748998d34150e1559a5882295ea
|
import sap.if1
import sap.symbol_table
import sap.error
class CompilerBase(object):
placeholder = object()
def __init__(self):
self.m = sap.if1.Module()
self.symtab = sap.symbol_table.SymbolTable()
self.error = sap.error.CompilerError(self)
self.graphs = []
for sap_type in ('boolean', 'character', 'doublereal',
'integer', 'null', 'real', 'wild'):
setattr(self, sap_type, getattr(self.m, sap_type))
return
def set_base_attr(self, f):
self.function = f
self.filename = f.func_code.co_filename
return
def visit(self, node, *args):
mname = type(node).__name__
method = getattr(self, mname, self.default)
return method(node, *args)
def default(self, node):
self.error.not_supported_error(
node, 'not implemented %s' % type(node).__name__)
return
|
patmiller/SAP
|
sap/compiler_base.py
|
Python
|
bsd-3-clause
| 930
|
[
"VisIt"
] |
f2e449f5f0c46380bfbfb22eb69e166d8e0b48c11fcf10961d63f1e2e64c456b
|
"""
Helper Classes and Functions for docking fingerprint computation.
"""
from __future__ import division
from __future__ import unicode_literals
__author__ = "Bharath Ramsundar and Jacob Durrant"
__license__ = "GNU General Public License"
import logging
import math
import os
import subprocess
import numpy as np
import deepchem.utils.rdkit_util as rdkit_util
def force_partial_charge_computation(mol):
"""Force computation of partial charges for molecule.
Parameters
----------
mol: Rdkit Mol
Molecule on which we compute partial charges.
"""
rdkit_util.compute_charges(mol)
def pdbqt_to_pdb(input_file, output_directory):
"""Convert pdbqt file to pdb file.
Parameters
----------
input_file: String
Path to input file.
output_directory: String
Path to desired output directory.
"""
logging.info(input_file, output_directory)
raise ValueError("Not yet implemented")
def hydrogenate_and_compute_partial_charges(input_file,
input_format,
hyd_output=None,
pdbqt_output=None,
protein=True,
verbose=True):
"""Outputs a hydrogenated pdb and a pdbqt with partial charges.
Takes an input file in specified format. Generates two outputs:
-) A pdb file that contains a hydrogenated (at pH 7.4) version of
original compound.
-) A pdbqt file that has computed Gasteiger partial charges. This pdbqt
file is build from the hydrogenated pdb.
TODO(rbharath): Can do a bit of refactoring between this function and
pdbqt_to_pdb.
Parameters
----------
input_file: String
Path to input file.
input_format: String
Name of input format.
"""
mol = rdkit_util.load_molecule(
input_file, add_hydrogens=True, calc_charges=True)[1]
if verbose:
logging.info("Create pdb with hydrogens added")
rdkit_util.write_molecule(mol, str(hyd_output), is_protein=protein)
if verbose:
logging.info("Create a pdbqt file from the hydrogenated pdb above.")
rdkit_util.write_molecule(mol, str(pdbqt_output), is_protein=protein)
if protein:
logging.info("Removing ROOT/ENDROOT/TORSDOF")
with open(pdbqt_output) as f:
pdbqt_lines = f.readlines()
filtered_lines = []
for line in pdbqt_lines:
filtered_lines.append(line)
with open(pdbqt_output, "w") as f:
f.writelines(filtered_lines)
class AromaticRing(object):
"""Holds information about an aromatic ring."""
def __init__(self, center, indices, plane_coeff, radius):
"""
Initializes an aromatic.
Parameters
----------
center: float
Center of the ring.
indices: list
List of the atom indices for ring atoms.
plane_coeff: list
A list of elements [a, b, c, d] that define a plane by equation
a x + b y + c z = d.
radius: float
Ring radius from center.
"""
self.center = center
self.indices = indices
# a*x + b*y + c*z = dI think that
self.plane_coeff = plane_coeff
self.radius = radius
def average_point(points):
"""Returns the point with averaged coordinates of arguments.
Parameters
----------
points: list
List of point objects.
Returns
-------
pavg: Point object
Has coordinates the arithmetic average of those of p1 and p2.
"""
coords = np.array([0, 0, 0])
for point in points:
coords += point.as_array().astype(coords.dtype)
if len(points) > 0:
return Point(coords=coords / len(points))
else:
return Point(coords=coords)
class Point(object):
"""
Simple implementation for a point in 3-space.
"""
def __init__(self, x=None, y=None, z=None, coords=None):
"""
Inputs can be specified either by explicitly providing x, y, z coords
or by providing a numpy array of length 3.
Parameters
----------
x: float
X-coord.
y: float
Y-coord.
z: float
Z-coord.
coords: np.ndarray
Should be of length 3 in format np.array([x, y, z])
Raises
------
ValueError: If no arguments are provided.
"""
if x and y and z:
#self.x, self.y, self.z = x, y, z
self.coords = np.array([x, y, z])
elif coords is not None: # Implicit eval doesn't work on numpy arrays.
#self.x, self.y, self.z = coords[0], coords[1], coords[2]
self.coords = coords
else:
raise ValueError("Must specify coordinates for Point!")
# TODO(bramsundar): Should this be __copy__?
def copy_of(self):
"""Return a copy of this point."""
return Point(coords=np.copy(self.coords))
def dist_to(self, point):
"""Distance (in 2-norm) from this point to another."""
return np.linalg.norm(self.coords - point.coords)
def magnitude(self):
"""Magnitude of this point (in 2-norm)."""
return np.linalg.norm(self.coords)
#return self.dist_to(Point(coords=np.array([0, 0, 0])))
def as_array(self):
"""Return the coordinates of this point as array."""
#return np.array([self.x, self.y, self.z])
return self.coords
class Atom(object):
"""
Implements a container class for atoms. This class contains useful
annotations about the atom.
"""
def __init__(self,
atomname="",
residue="",
coordinates=Point(coords=np.array([99999, 99999, 99999])),
element="",
pdb_index="",
line="",
atomtype="",
indices_of_atoms_connecting=None,
charge=0,
resid=0,
chain="",
structure="",
comment=""):
"""
Initializes an atom.
Assumes that atom is loaded from a PDB file.
Parameters
----------
atomname: string
Name of atom. Note that atomname is not the same as residue since
atomnames often have extra annotations (e.g., CG, NZ, etc).
residue: string:
Name of protein residue this atom belongs to.
element: string
Name of atom's element.
coordinate: point
A point object (x, y, z are in Angstroms).
pdb_index: string
Index of the atom in source PDB file.
line: string
The line in the PDB file which specifies this atom.
atomtype: string
Element of atom. This differs from atomname which typically has extra
annotations (e.g. CA, OA, HD, etc)
IndicesOfAtomConnecting: list
The indices (in a PDB object) of all atoms bonded to this one.
charge: float
Associated electrostatic charge.
resid: int
The residue number in the receptor (listing the protein as a chain from
N-Terminus to C-Terminus). Assumes this is a protein atom.
chain: string
Chain identifier for molecule. See PDB spec.
structure: string
One of ALPHA, BETA, or OTHER for the type of protein secondary
structure this atom resides in (assuming this is a receptor atom).
comment: string
Either LIGAND or RECEPTOR depending on whether this is a ligand or
receptor atom.
"""
self.atomname = atomname
self.residue = residue
self.coordinates = coordinates
self.element = element
self.pdb_index = pdb_index
self.line = line
self.atomtype = atomtype
if indices_of_atoms_connecting is not None:
self.indices_of_atoms_connecting = indices_of_atoms_connecting
else:
self.indices_of_atoms_connecting = []
self.charge = charge
self.resid = resid
self.chain = chain
self.structure = structure
self.comment = comment
def copy_of(self):
"""Make a copy of this atom."""
theatom = Atom()
theatom.atomname = self.atomname
theatom.residue = self.residue
theatom.coordinates = self.coordinates.copy_of()
theatom.element = self.element
theatom.pdb_index = self.pdb_index
theatom.line = self.line
theatom.atomtype = self.atomtype
theatom.indices_of_atoms_connecting = self.indices_of_atoms_connecting[:]
theatom.charge = self.charge
theatom.resid = self.resid
theatom.chain = self.chain
theatom.structure = self.structure
theatom.comment = self.comment
return theatom
def create_pdb_line(self, index):
"""
Generates appropriate ATOM line for pdb file.
Parameters
----------
index: int
Index in associated PDB file.
"""
output = "ATOM "
output = (
output + str(index).rjust(6) + self.atomname.rjust(5) +
self.residue.rjust(4) + self.chain.rjust(2) + str(self.resid).rjust(4))
coords = self.coordinates.as_array() # [x, y, z]
output = output + ("%.3f" % coords[0]).rjust(12)
output = output + ("%.3f" % coords[1]).rjust(8)
output = output + ("%.3f" % coords[2]).rjust(8)
output = output + self.element.rjust(24)
return output
def number_of_neighbors(self):
"""Reports number of neighboring atoms."""
return len(self.indices_of_atoms_connecting)
def add_neighbor_atom_indices(self, indices):
"""
Adds atoms with provided PDB indices as neighbors.
Parameters
----------
index: list
List of indices of neighbors in PDB object.
"""
for index in indices:
if index not in self.indices_of_atoms_connecting:
self.indices_of_atoms_connecting.append(index)
def side_chain_or_backbone(self):
"""Determine whether receptor atom belongs to residue sidechain or backbone.
"""
# TODO(rbharath): Should this be an atom function?
if (self.atomname.strip() == "CA" or self.atomname.strip() == "C" or
self.atomname.strip() == "O" or self.atomname.strip() == "N"):
return "BACKBONE"
else:
return "SIDECHAIN"
def read_atom_pdb_line(self, line):
"""
TODO(rbharath): This method probably belongs in the PDB class, and not
in the Atom class.
Reads an ATOM or HETATM line from PDB and instantiates fields.
Atoms in PDBs are represented by ATOM or HETATM statements. ATOM and
HETATM statements follow the following record format:
(see ftp://ftp.wwpdb.org/pub/pdb/doc/format_descriptions/Format_v33_Letter.pdf)
COLUMNS DATA TYPE FIELD DEFINITION
-------------------------------------------------------------------------------------
1 - 6 Record name "ATOM "/"HETATM"
7 - 11 Integer serial Atom serial number.
13 - 16 Atom name Atom name.
17 Character altLoc Alternate location indicator.
18 - 20 Residue name resName Residue name.
22 Character chainID Chain identifier.
23 - 26 Integer resSeq Residue sequence number.
27 AChar iCode Code for insertion of residues.
31 - 38 Real(8.3) x Orthogonal coordinates for X in Angstroms.
39 - 46 Real(8.3) y Orthogonal coordinates for Y in Angstroms.
47 - 54 Real(8.3) z Orthogonal coordinates for Z in Angstroms.
55 - 60 Real(6.2) occupancy Occupancy.
61 - 66 Real(6.2) tempFactor Temperature factor.
77 - 78 LString(2) element Element symbol, right-justified.
79 - 80 LString(2) charge Charge on the atom.
"""
self.line = line
self.atomname = line[11:16].strip()
if len(self.atomname) == 1:
self.atomname = self.atomname + " "
elif len(self.atomname) == 2:
self.atomname = self.atomname + " "
elif len(self.atomname) == 3:
# This line is necessary for babel to work, though many PDBs in
# the PDB would have this line commented out
self.atomname = self.atomname + " "
self.coordinates = Point(
coords=np.array(
[float(line[30:38]),
float(line[38:46]),
float(line[46:54])]))
# now atom type (for pdbqt)
if line[77:79].strip():
self.atomtype = line[77:79].strip().upper()
elif self.atomname:
# If atomtype is not specified, but atomname is, set atomtype to the
# first letter of atomname. This heuristic suffices for proteins,
# since no two-letter elements appear in standard amino acids.
self.atomtype = self.atomname[:1]
else:
self.atomtype = ""
if line[69:76].strip() != "":
self.charge = float(line[69:76])
else:
self.charge = 0.0
if self.element == "": # try to guess at element from name
two_letters = self.atomname[0:2].strip().upper()
valid_two_letters = [
"BR", "CL", "BI", "AS", "AG", "LI", "HG", "MG", "MN", "RH", "ZN", "FE"
]
if two_letters in valid_two_letters:
self.element = two_letters
else: #So, just assume it's the first letter.
# Any number needs to be removed from the element name
self.element = self.atomname
self.element = self.element.replace('0', '')
self.element = self.element.replace('1', '')
self.element = self.element.replace('2', '')
self.element = self.element.replace('3', '')
self.element = self.element.replace('4', '')
self.element = self.element.replace('5', '')
self.element = self.element.replace('6', '')
self.element = self.element.replace('7', '')
self.element = self.element.replace('8', '')
self.element = self.element.replace('9', '')
self.element = self.element.replace('@', '')
self.element = self.element[0:1].strip().upper()
self.pdb_index = line[6:12].strip()
self.residue = line[16:20]
# this only uses the rightmost three characters, essentially
# removing unique rotamer identification
self.residue = " " + self.residue[-3:]
if line[23:26].strip() != "":
self.resid = int(line[23:26])
else:
self.resid = 1
self.chain = line[21:22]
if self.residue.strip() == "":
self.residue = " MOL"
class Charged(object):
"""
A class that represeents a charged atom.
"""
def __init__(self, coordinates, indices, positive):
"""
Parameters
----------
coordinates: point
Coordinates of atom.
indices: list
Contains boolean true or false entries for self and neighbors to
specify if positive or negative charge
positive: bool
Whether this atom is positive or negative.
"""
self.coordinates = coordinates
self.indices = indices
self.positive = positive
def vector_subtraction(point1, point2): # point1 - point2
"""Subtracts the coordinates of the provided points."""
return Point(coords=point1.as_array() - point2.as_array())
def cross_product(point1, point2): # never tested
"""Calculates the cross-product of provided points."""
return Point(coords=np.cross(point1.as_array(), point2.as_array()))
def vector_scalar_multiply(point, scalar):
"""Multiplies the provided point by scalar."""
return Point(coords=scalar * point.as_array())
def dot_product(point1, point2):
"""Dot product of points."""
return np.dot(point1.as_array(), point2.as_array())
def dihedral(point1, point2, point3, point4): # never tested
"""Compute dihedral angle between 4 points.
TODO(rbharath): Write a nontrivial test for this.
"""
b1 = vector_subtraction(point2, point1)
b2 = vector_subtraction(point3, point2)
b3 = vector_subtraction(point4, point3)
b2Xb3 = cross_product(b2, b3)
b1Xb2 = cross_product(b1, b2)
b1XMagb2 = vector_scalar_multiply(b1, b2.magnitude())
radians = math.atan2(dot_product(b1XMagb2, b2Xb3), dot_product(b1Xb2, b2Xb3))
return radians
def angle_between_three_points(point1, point2, point3):
"""Computes the angle (in radians) between the three provided points."""
return angle_between_points(
vector_subtraction(point1, point2), vector_subtraction(point3, point2))
def angle_between_points(point1, point2):
"""Computes the angle (in radians) between two points."""
return math.acos(
dot_product(point1, point2) / (point1.magnitude() * point2.magnitude()))
def normalized_vector(point):
"""Normalize provided point."""
return Point(coords=point.as_array() / np.linalg.norm(point.as_array()))
def distance(point1, point2):
"""Computes distance between two points."""
return point1.dist_to(point2)
def project_point_onto_plane(point, plane_coefficients):
"""Finds nearest point on specified plane to given point.
Parameters
----------
point: Point
Given point
plane_coefficients: list
[a, b, c, d] where place equation is ax + by + cz = d
"""
# The normal vector to plane is n = [a, b, c]
offset = plane_coefficients[3]
normal = np.array(plane_coefficients[:3])
# We first shift by basepoint (a point on given plane) to make math
# simpler. basepoint is given by d/||n||^2 * n
basepoint = (offset / np.linalg.norm(normal)**2) * normal
diff = point.as_array() - basepoint
# The perpendicular component of diff to plane is
# (n^T diff / ||n||^2) * n
perp = (np.dot(normal, diff) / np.linalg.norm(normal)**2) * normal
closest = basepoint + (diff - perp)
return Point(coords=np.array(closest))
|
Agent007/deepchem
|
deepchem/feat/nnscore_utils.py
|
Python
|
mit
| 17,258
|
[
"RDKit"
] |
2884a4e660b63444c1d422a59309d4f4bb764d67dc5948f34e9a1ce2cae39a3d
|
"""Tests for TURBOMOLE cauclator interface."""
import os
import numpy as np
from phonopy.interface.phonopy_yaml import read_cell_yaml
from phonopy.interface.turbomole import read_turbomole
data_dir = os.path.dirname(os.path.abspath(__file__))
def test_read_turbomole():
"""Test read_turbomole."""
cell = read_turbomole(os.path.join(data_dir, "Si-TURBOMOLE-control"))
filename = os.path.join(data_dir, "Si-TURBOMOLE.yaml")
cell_ref = read_cell_yaml(filename)
assert (np.abs(cell.cell - cell_ref.cell) < 1e-5).all()
diff_pos = cell.scaled_positions - cell_ref.scaled_positions
diff_pos -= np.rint(diff_pos)
assert (np.abs(diff_pos) < 1e-5).all()
for s, s_r in zip(cell.symbols, cell_ref.symbols):
assert s == s_r
|
atztogo/phonopy
|
test/interface/test_TURBOMOLE.py
|
Python
|
bsd-3-clause
| 760
|
[
"TURBOMOLE",
"phonopy"
] |
7aa7f1c7e84f7e6d2b0dc74f8e9cde06ede1fb25b47f7445d833ae925874809a
|
# SYSTEM LIBS
from matplotlib.colors import LogNorm
import matplotlib.pyplot as plt; plt.ion();
import matplotlib
import numpy as np
import pandas as pd
# from root_pandas import read_root
import sys
from itertools import islice
from scipy import signal # To find peaks, for edge-detecting the crystal
from scipy.optimize import curve_fit
import os
from sklearn import mixture
import random
import math
# MY LIBS
# import editable_input as ei # My script for editable text input
# from bin_dataframe import bin2D_dataframe
import mie_utils as my
# def gaussian(x, mu, sig):
# return np.exp(-np.power(x - mu, 2.) / (2 * np.power(sig, 2.)))
def gaussian(x, mu, sig, c):
return c*matplotlib.mlab.normpdf(x, mu, sig)
def gaussian_sum(x, c1, mu1, sig1, mu2, sig2):
return c1*matplotlib.mlab.normpdf(x, mu1, sig1) + \
(1-c1)*matplotlib.mlab.normpdf(x, mu2, sig2)
def fit_channeling(input_df,lowest_percentage, highest_percentage,
fit_tolerance,max_iterations):
"""
Fit the histogram for two gaussian peaks (CH for channeling and AM for
amorphous), then return the fit object for further processing.
data: input dataset.
return: dict cointaining the parameters:
"weight_AM"
"weight_CH"
"mean_AM"
"mean_CH"
"sigma_AM"
"sigma_CH"
"""
clf = mixture.GaussianMixture(
n_components=2,
covariance_type='full',
verbose=0,
verbose_interval=10,
random_state=random.SystemRandom().randrange(0,2147483647), # 2**31-1
# means_init=[[-5], [77]],
# weights_init=[0.5, 0.5],
init_params="kmeans",
n_init = 5,
tol=fit_tolerance,
# precisions_init = [[[1/10**2]],[[1/10**2]]],
#warm_start=True,
max_iter=max_iterations)
################# GET THE DATA FROM THE DATAFRAME
first_percentile = np.percentile(input_df, lowest_percentage)
last_percentile = np.percentile(input_df, highest_percentage)
data_reduced = input_df.values[(input_df.values >= \
first_percentile) & (input_df.values <= last_percentile)]
data = data_reduced.reshape(-1, 1)
################# FIT THE DATA
# Check that we have enough data for a fit, otherwise just return eff=0
clf.fit(data)
if not clf.converged_:
print("[LOG]: Fit did not converge in this bin, bin ignored")
efficiency = np.NaN
r_m1, r_m2 = clf.means_
w1, w2 = clf.weights_
m1, m2 = r_m1[0], r_m2[0]
r_c1, r_c2 = clf.covariances_
#r_c1 = clf.covariances_
#r_c2 = clf.covariances_
c1, c2 = np.sqrt(r_c1[0][0]), np.sqrt(r_c2[0][0])
# print("Means: ", clf.means_, "\n")
# print("Weights: ", clf.weights_, "\n")
# print("Precisions: ", 1/c1, " ", 1/c2, "\n")
# print("Covariances: ", c1, " ", c2, "\n")
fit_results = {}
# Save the weights in the right array
# Lower delta_thetax is the AM peak, higher CH
fit_results["nevents"] = len(data)
if (m1 < m2):
fit_results["weight_AM"] = w1
fit_results["weight_CH"] = w2
fit_results["mean_AM"] = m1
fit_results["mean_CH"] = m2
fit_results["sigma_AM"] = c1
fit_results["sigma_CH"] = c2
else:
fit_results["weight_AM"] = w2
fit_results["weight_CH"]= w1
fit_results["mean_AM"] = m2
fit_results["mean_CH"] = m1
fit_results["sigma_AM"] = c2
fit_results["sigma_CH"] = c1
# Calculate errors plugging the found parameters in a chi2 fit.
data_histo = np.histogram(data,bins=200,normed=True)
histo_bin_centers = (data_histo[1] + (data_histo[1][1] - data_histo[1][0])/2)[:-1]
initial_guess = [fit_results["weight_AM"], fit_results["mean_AM"], fit_results["sigma_AM"],
fit_results["mean_CH"], fit_results["sigma_CH"]]
popt, pcov = curve_fit(gaussian_sum, histo_bin_centers, data_histo[0],
p0=initial_guess)
print(popt)
# # Plot the chi2 fit, for debug purposes
# plt.figure()
# plt.plot(histo_bin_centers,data_histo[0],".")
# plt.plot(histo_bin_centers,gaussian_sum(histo_bin_centers,*popt))
# plt.plot(histo_bin_centers,gaussian_sum(histo_bin_centers,*initial_guess))
#
# plt.show()
perr = np.sqrt(np.diag(pcov))
# Should be in same order as in p0 of curve_fit
fit_results["weight_AM_err"] = perr[0]
fit_results["weight_CH_err"] = perr[0] # c2=1-c1, by propagation same error
fit_results["mean_AM_err"] = perr[1]
fit_results["sigma_AM_err"] = perr[2]
fit_results["mean_CH_err"] = perr[3]
fit_results["sigma_CH_err"] = perr[4]
return fit_results,data
######################################
################# MAIN
file_name = sys.argv[1]
crystal_name = sys.argv[2]
run_number = sys.argv[3]
particle_name = sys.argv[4]
particle_energy_input = sys.argv[5] # [GeV]
run_date = sys.argv[6] # [GeV]
# Use a run specific params file, otherwise look for a crystal specific one,
# otherwise use the general one.
if os.path.isfile(run_number + '_analysis_configuration_params.csv'):
analysis_configuration_params_file = run_number + '_analysis_configuration_params.csv'
elif os.path.isfile(crystal_name + '_analysis_configuration_params.csv.csv'):
analysis_configuration_params_file = crystal_name + '_analysis_configuration_params.csv'
else:
analysis_configuration_params_file = 'analysis_configuration_params.csv'
print("[LOG]: Reading crystal analysis parameters from ", analysis_configuration_params_file)
# Check if the run number is in the actual data file name, otherwise print a
# warning
if '_'+run_number+'_' not in file_name:
print("[WARNING]: '_{}_' not found in file name '{}', maybe check if "
"correct run number or correct file.".format(run_number, file_name))
# Read by chunk not needed by now probably.
events = pd.read_hdf(file_name)
# Angles in microradians from torsion_correction.py lines 171-174
# events["Delta_Theta_x"] = events.loc[:,'Tracks_thetaOut_x'].values - \
# events.loc[:,'Tracks_thetaIn_x'].values
# # Read crystal parameters
cpars = pd.read_csv("crystal_physical_characteristics.csv", index_col=0)
crystal_params = cpars[~cpars.index.isnull()] # Remove empty (like ,,,,,,) lines
crystal_lenght = float(crystal_params.loc[crystal_name,"Lenght (z) (mm)"])*1e-3 # [m]
# Taken from my thesis code
# Initial guesses for crystal parameter, uses either data from
# crystal_physical_characteristics.csv or a default value if the latter is not
# found.
particle_energy = float(particle_energy_input)*1e9 # [eV] TODO generalize to pions!
critical_radius = particle_energy / 550e9 # [m] 550 GeV/m electric field strength from byriukov
pot_well = 21.34 # [eV] Potential well between crystal planes
theta_bending = float(crystal_params.loc[crystal_name,"H8 bending angle (urad)"]) * 1e-6 # [rad]
crystal_curvature_radius = crystal_lenght / theta_bending
theta_c = math.sqrt(2*pot_well/particle_energy) * (1 - critical_radius/crystal_curvature_radius)*1e6 # [murad]
# c1_thetavr, c2_thetavr = (-1.5, 1.66666)
# theta_vr = c1_thetavr * theta_c * (1 - c2_thetavr*critical_radius/crystal_curvature_radius) # [murad]
################# FIT USING 5 and 10 AS CUTS
# ang_cut_low = [-5,-10] # [murad]
# ang_cut_high = [5,10] # [murad]
# for low_cut, high_cut in zip(ang_cut_low,ang_cut_high):
# plt.figure()
# geocut_df = events.loc[(events.loc[:,'Tracks_thetaIn_x'] > low_cut) & \
# (events.loc[:,'Tracks_thetaIn_x'] < high_cut)]
# # plt.hist2d(geocut_df.loc[:,'Tracks_thetaIn_x'].values, \
# # geocut_df.loc[:,'Tracks_thetaOut_x'].values - geocut_df.loc[:,'Tracks_thetaIn_x'].values,\
# # bins=[400,200], norm=LogNorm(), range=[[-100,100], [-80,120]])
# fit_results = fit_channeling(geocut_df.Delta_Theta_x)[0]
# filtered_data = fit_channeling(geocut_df.Delta_Theta_x)[1]
# plt.hist(filtered_data, bins=200, range=[-100,100], normed=False) # [murad]
#
#
# total_number_of_events = fit_results["nevents"]
# gauss_AM = total_number_of_events * fit_results["weight_AM"] * matplotlib.mlab.normpdf(x_histo, fit_results["mean_AM"], fit_results["sigma_AM"])
# gauss_CH = total_number_of_events * fit_results["weight_CH"] * matplotlib.mlab.normpdf(x_histo, fit_results["mean_CH"], fit_results["sigma_CH"])
#
# plt.plot(x_histo, gauss_AM, label="Amorphous Peak", color='r')
# plt.plot(x_histo, gauss_CH, label="Channeling Peak", color='Orange')
# plt.suptitle(r"Crystal {}, run {} — Channeling, cut ±{:.3} ".format(crystal_name, run_number, float(high_cut)),fontweight='bold')
# plt.title(r"Efficiency {:.3}% — bending angle {:.3} ".format(fit_results["weight_CH"]*100, fit_results["mean_CH"]) + r"$[\mu rad]$")
# plt.xlabel(r'$\Delta \theta_{x}\ [\mu rad]$')
# plt.ylabel('Frequency')
# plt.legend()
# #plt.tight_layout()
# plt.savefig("latex/img/" + str(high_cut) + "_chan_histo.pdf")
# plt.show()
#
#
# print("\nCut: +-",low_cut)
# print(pd.Series(fit_results))
#################
################# FIT USING CRITICAL ANGLE AS CUT
# theta_bending = fit_results["mean_CH"]
# crystal_curvature_radius = crystal_lenght / (theta_bending*1e-6)
# theta_c = math.sqrt(2*pot_well/particle_energy)*1e6 * (1 - critical_radius/crystal_curvature_radius) # [murad]
#### How much to move the absolute position of the cuts
# Example, with cuts [-5,5] and offset +3, we have an actual cut of [-2,8]
# Useful if torsion correction is not employed, to center the cuts
center_offset = float(my.get_from_csv(analysis_configuration_params_file,
"chan_center_offset"
))
ang_cut_low = [center_offset - theta_c / 2, center_offset - theta_c]
ang_cut_high = [center_offset + theta_c / 2, center_offset + theta_c]
dtx_low, dtx_high = my.get_from_csv(analysis_configuration_params_file,
"chan_hist_range_dtx_low",
"chan_hist_range_dtx_high",
)
dtx_nbins = int(my.get_from_csv(analysis_configuration_params_file,
"chan_hist_tx_nbins"))
x_histo = np.linspace(dtx_low,dtx_high,dtx_nbins + 1) # [murad]
print("New Thetac: ", theta_c)
lowest_percentage, highest_percentage = my.get_from_csv(analysis_configuration_params_file,
"chan_low_percentage",
"chan_high_percentage")
dech_start, dech_end = my.get_from_csv(analysis_configuration_params_file,
"dech_start",
"dech_end")
chan_fit_tolerance = my.get_from_csv(analysis_configuration_params_file,
"chan_fit_tolerance")
max_iterations = int(my.get_from_csv(analysis_configuration_params_file,
"chan_max_iterations"))
i = 0
plt.figure()
plt.hist2d(events.loc[:,'Tracks_thetaIn_x'].values, \
events.loc[:,'Tracks_thetaOut_x'].values - events.loc[:,'Tracks_thetaIn_x'].values,\
bins=[200,dtx_nbins], norm=LogNorm(), range=[[-100,100], [dtx_low,dtx_high]])
#plt.figure(); plt.hist2d(events.loc[:,'Tracks_thetaIn_x'].values, events.loc[:,'Tracks_thetaOut_x'].values - events.loc[:,'Tracks_thetaIn_x'].values, bins=[400,200], norm=LogNorm(), range=[[-100,100], [-80,120]])
for low_cut, high_cut in zip(ang_cut_low,ang_cut_high):
plt.figure()
geocut_df = events.loc[(events.loc[:,'Tracks_thetaIn_x'] > low_cut) & \
(events.loc[:,'Tracks_thetaIn_x'] < high_cut)]
totcut_df = geocut_df.loc[(events.loc[:,'Delta_Theta_x'] < dech_start) | \
(events.loc[:,'Delta_Theta_x'] > dech_end)]
# plt.hist2d(geocut_df.loc[:,'Tracks_thetaIn_x'].values, \
# geocut_df.loc[:,'Tracks_thetaOut_x'].values - geocut_df.loc[:,'Tracks_thetaIn_x'].values,\
# bins=[400,200], norm=LogNorm(), range=[[-100,100], [-80,120]])
fit_and_data = fit_channeling(totcut_df.Delta_Theta_x,
lowest_percentage, highest_percentage,
chan_fit_tolerance, max_iterations)
fit_results = fit_and_data[0]
filtered_data = fit_and_data[1]
#plt.yscale('log', nonposy='clip')
plt.hist(geocut_df.Delta_Theta_x, bins=dtx_nbins, range=[dtx_low,dtx_high], normed=False) # [murad]
#plt.hist(filtered_data, bins=dtx_nbins, range=[dtx_low,dtx_high], normed=False) # [murad]
total_number_of_events = len(filtered_data)#fit_results["nevents"]
area_bin = (dtx_high-dtx_low)/dtx_nbins * 1
gauss_AM = area_bin*total_number_of_events * fit_results["weight_AM"] * matplotlib.mlab.normpdf(x_histo, fit_results["mean_AM"], fit_results["sigma_AM"])
gauss_CH = area_bin*total_number_of_events * fit_results["weight_CH"] * matplotlib.mlab.normpdf(x_histo, fit_results["mean_CH"], fit_results["sigma_CH"])
# gauss_AM = fit_results["weight_AM"] * matplotlib.mlab.normpdf(x_histo, fit_results["mean_AM"], fit_results["sigma_AM"])
# gauss_CH = fit_results["weight_CH"] * matplotlib.mlab.normpdf(x_histo, fit_results["mean_CH"], fit_results["sigma_CH"])
plt.plot(x_histo, gauss_AM, label="Amorphous Peak", color='r')
plt.plot(x_histo, gauss_CH, label="Channeling Peak", color='Orange')
thetac_title = r"$\theta_c/2$" if i == 0 else r"$\theta_c$"
cut_value = theta_c/2 if i == 0 else theta_c
plt.suptitle(r"{} run {}, {} {} GeV — Channeling, cut ± {} = ±{:.3}".format(crystal_name,run_number,particle_name,particle_energy_input,thetac_title,cut_value),fontweight='bold')
plt.title(r"Efficiency {:.3}% ± {:.1f}% — Bending Angle {:.1f} ± {:.1f} {}".format(fit_results["weight_CH"]*100, fit_results["weight_CH_err"]*100,
fit_results["mean_CH"],fit_results["mean_CH_err"],r"$[\mu rad]$"))
plt.xlabel(r'$\Delta \theta_{x}\ [\mu rad]$')
plt.ylabel('Frequency')
plt.legend()
#plt.tight_layout()
thetac_filename = 'half_thetac' if i == 0 else 'thetac'
plt.savefig("latex/img/"+ thetac_filename + "_chan_histo.pdf")
plt.show()
print("\nCut low: ", low_cut)
print("\nCut high: ", high_cut)
print(pd.Series(fit_results))
# my.save_parameters_in_csv("crystal_analysis_parameters.csv",**fit_results)
i=i+1
#################
################# WRITE TO LATEX THE PARAMS
cut_x_left, cut_x_right = my.get_from_csv("crystal_analysis_parameters.csv", "xmin", "xmax")
tor_m,tor_q,tor_m_err,tor_q_err = my.get_from_csv("crystal_analysis_parameters.csv",\
"torsion_m", "torsion_q", "torsion_m_err", "torsion_q_err")
cut_y_low, cut_y_high = my.get_from_csv(analysis_configuration_params_file, "cut_y_low", "cut_y_high")
#Example \newcommand{\myname}{Francesco Forcher}
#with open("latex/text_gen-definitions.tex", "a") as myfile:
file_name = sys.argv[1]
crystal_name = sys.argv[2]
run_number = sys.argv[3]
particle_name = sys.argv[4]
particle_energy_input = sys.argv[5] # [GeV]
run_date = sys.argv[6] # [GeV]
with open("latex/test_gen-definitions.tex","w") as myfile:
myfile.write(r"% FILE GENERATED AUTOMATICALLY")
myfile.write("\n\n")
myfile.write("\\newcommand{{{}}}{{{}}}\n".format("\\myname","Francesco Forcher"))
myfile.write("\\newcommand{{{}}}{{{}}}\n".format("\\crystalname",crystal_name))
myfile.write("\\newcommand{{{}}}{{{}}}\n".format("\\runnumber", run_number))
myfile.write("\\newcommand{{{}}}{{{}}}\n".format("\\rundate", run_date))
myfile.write("\\newcommand{{{}}}{{{}}}\n".format("\\particletype", particle_name))
myfile.write("\\newcommand{{{}}}{{{}}}\n".format("\\particleenergy", particle_energy_input + " GeV"))
myfile.write("\n")
myfile.write("\\newcommand{{{}}}{{{:.3f}}}\n".format("\\xmin",float(cut_x_left)))
myfile.write("\\newcommand{{{}}}{{{:.3f}}}\n".format("\\xmax",float(cut_x_right)))
myfile.write("\\newcommand{{{}}}{{{:.3f}}}\n".format("\\ymin",float(cut_y_low)))
myfile.write("\\newcommand{{{}}}{{{:.3f}}}\n".format("\\ymax",float(cut_y_high)))
myfile.write("\n")
myfile.write("\\newcommand{{{}}}{{{:.1f}}}\n".format("\\torsionm", float(tor_m)))
myfile.write("\\newcommand{{{}}}{{{:.1f}}}\n".format("\\torsionq", float(tor_q)))
myfile.write("\\newcommand{{{}}}{{{:.1f}}}\n".format("\\torsionmerr",float(tor_m_err)))
myfile.write("\\newcommand{{{}}}{{{:.1f}}}\n".format("\\torsionqerr",float(tor_q_err)))
#################
|
f-forcher/crystal-channeling-analysis
|
channeling_efficiency.py
|
Python
|
gpl-3.0
| 16,737
|
[
"CRYSTAL",
"Gaussian"
] |
f1dff7234aa1da8e34025c4bcaac5cd4d345998fb71a7982e44e82fef487168a
|
class SOM:
"""
A Self-Organnising Map aka Kohonen map.
This class is a wrapper around SOM_PAK.
"""
def __init__(self, data, shape=(12,8), topology='rect', neighbourhood='gaussian'):
"""
Initialises a SOM using the input data set (a list of lists,
or a Numpy array).
shape is a tuple with the size of the map in x and y
topology can be 'rect' or 'hexa'
neighbourhood can be 'bubble' or 'gaussian'
"""
import time
t0 = time.time()
print " initialising SOM..."
self.data = data
nvars = len(data[0])
self.shape = shape
self.topol = topology
self.neigh = neighbourhood
from tempfile import mkdtemp
import os
self.bindir = os.path.dirname(os.path.abspath(__file__))+os.sep+'bin'+os.sep
self.tmpdir = mkdtemp() + os.sep
# create data file
f = open(self.tmpdir + 'som.dat', 'w')
f.write('%i\n' % nvars)
for d in data:
f.write(' '.join(map(str, d)) + '\n')
f.close()
# initialise code vector file
from subprocess import call
call([
self.bindir + 'randinit',
'-din','%ssom.dat' % self.tmpdir,
'-cout','%ssom.cod' % self.tmpdir,
'-xdim','%i' % self.shape[0],
'-ydim','%i' % self.shape[1],
'-topol','%s' % self.topol,
'-neigh','%s' % self.neigh
])
print " SOM initialisation done in %i seconds" % (int(time.time() - t0),)
def __del__(self):
from subprocess import call
call(['rm', '-R', self.tmpdir])
def train(self, rlen=1000, alpha=0.05, radius=10):
"""
Trains the SOM using 'rlen' iterations.
alpha is the learning rate that is linearly decreasing to 0
radius is the update radius that is linearly decreasing to 0
"""
import time
t0 = time.time()
print " SOM training starting..."
from subprocess import call
call([
self.bindir + 'vsom',
'-din','%ssom.dat' % self.tmpdir,
'-cin','%ssom.cod' % self.tmpdir,
'-cout','%ssom.cod' % self.tmpdir,
'-rlen','%i' % rlen,
'-alpha','%f' % alpha,
'-radius','%i' % radius,
])
print " SOM training done in %i seconds" % int(time.time() - t0)
def code_vectors(self):
import numpy as np
f = open(self.tmpdir + 'som.cod')
h = f.readline()
cod = np.array([map(float, l.strip().split()) for l in f])
f.close()
nvars = len(cod[0])
c = np.zeros([self.shape[0], self.shape[1], nvars])
cnt = 0
for j in range(self.shape[1]):
for i in range(self.shape[0]):
c[i,j,:] = cod[cnt,:]
cnt += 1
return c
def qerror(self):
"""
Return the quantization error.
"""
from subprocess import call
f = open(self.tmpdir + 'stdout.tmp', 'w')
call([
self.bindir + 'qerror',
'-din', '%ssom.dat' % self.tmpdir,
'-cin', '%ssom.cod' % self.tmpdir
], stdout=f)
f.close()
f = open(self.tmpdir + 'stdout.tmp')
q = f.readline().strip()
f.close()
return float(q.split()[-5])
def mappings(self, data=None):
"""
Returns the mappings of the provided data. If data is None, the
mappings of the training data will be used instead.
The mappings are a list of coordinates of the BMU (best matching
unit) and can be used to access the unit vector using the result
from code_vectors.
"""
if data is None: data = self.data
nvars = len(data[0])
# create data file
f = open(self.tmpdir + 'mappings.dat', 'w')
f.write('%i\n' % nvars)
for d in data:
f.write(' '.join(map(str, d)) + '\n')
f.close()
from subprocess import call
call([
self.bindir + 'visual',
'-din', self.tmpdir + 'mappings.dat',
'-cin', self.tmpdir + 'som.cod',
'-dout', self.tmpdir + 'mappings.vis'
])
f = open(self.tmpdir + 'mappings.vis')
h = f.readline()
m = []
for l in f:
v = l.strip().split()
m.append([int(v[0]), int(v[1])])
f.close()
return m
def plane(self, plane, outfile):
"""
Creates an EPS file for the provided plane number.
"""
if plane < 1:
raise Exception("Error. plane must be at least 1.")
if outfile[-4:] != '.eps': outfile += '.eps'
import os
if os.path.exists(outfile):
raise Exception("Error. File '%s' already exists." % outfile)
from subprocess import call
call([
self.bindir + 'planes',
'-cin', self.tmpdir + 'som.cod',
'-din', self.tmpdir + 'som.dat',
'-plane', '%i' % plane
])
call(['mv', self.tmpdir + 'som_p%i.eps' % plane, outfile])
def umat(self, outfile):
if outfile[-4:] != '.eps': outfile += '.eps'
import os
if os.path.exists(outfile):
raise Exception("Error. File '%s' already exists." % outfile)
f = open(outfile, 'w')
from subprocess import call
call([
self.bindir + 'umat',
'-cin', self.tmpdir + 'som.cod'
], stdout=f)
f.close()
def testSom():
colors = [
[0., 0., 0.], [0., 0., 1.], [0., 0., 0.5], [0.125, 0.529, 1.0],
[0.33, 0.4, 0.67], [0.6, 0.5, 1.0], [0., 1., 0.], [1., 0., 0.],
[0., 1., 1.], [1., 0., 1.], [1., 1., 0.], [1., 1., 1.],
[.33, .33, .33], [.5, .5, .5], [.66, .66, .66]
]
som = sompak.SOM(data=colors, shape=(20,30), topology='rect', neighbourhood='gaussian')
|
christiankaiser/sompak-py
|
sompak/__init__.py
|
Python
|
mit
| 6,023
|
[
"Gaussian"
] |
ea65c17823b101e2e56e595dd5282847584e1dda6fda0b6519a338e347dc247b
|
# Copyright (c) Geoffrey Lentner 2015. All Rights Reserved.
# See LICENSE (GPLv3)
# slipy/SLiPy/Profile.py
"""
Profile fitting tasks for spectra.
"""
import numpy as np
from scipy.optimize import curve_fit
from scipy.interpolate.interpolate import interp1d
from scipy.special import wofz as w
from scipy.integrate import simps as Integrate
from matplotlib import pyplot as plt
from matplotlib.lines import Line2D
from matplotlib import widgets
from astropy import units as u
from astropy.constants import m_e, e, c
from .. import SlipyError
from .Observatory import Observatory
from .Spectrum import Spectrum, SpectrumError
from .Plot import SPlot, PlotError
from ..Framework.Options import Options, OptionsError
from ..Framework.Measurement import Measurement
from ..Algorithms.Functions import Gaussian, Lorentzian, Voigt, InvertedLorentzian
from ..Algorithms.KernelFit import KernelFit1D
from ..Data import Atomic
class ProfileError(SlipyError):
"""
Exception specific to Profile module.
"""
pass
def Pick(event):
"""
Used to hand selection events.
"""
if isinstance(event.artist, Line2D):
# get the x, y data from the pick event
thisline = event.artist
xdata = thisline.get_xdata()
ydata = thisline.get_ydata()
ind = event.ind
# update the selection dictionary
global selected
selected['wave'].append(np.take(xdata,ind)[0])
selected['data'].append(np.take(ydata,ind)[0])
# display points as a visual aid
plt.scatter(np.take(xdata,ind)[0], np.take(ydata,ind)[0],
marker = 'o', s=75, edgecolor='red', facecolor='None', lw=2)
plt.draw()
# empty selection dictionary is filled with the Select() function
selected = {'wave': [], 'data': []}
def Select(splot):
"""
Select points from the `splot`. This should be of type SPlot
(or it can optionally be a Spectrum type, for which a SPlot will be
created). The splot will be rendered and the user clicks on the
figure. When finished, return to the terminal prompt. A dictionary is
returned with two entries, `wave` and `data`, representing the x-y
locations selected by the user. This can always be retrieved later by
accessing the module member `Profile.selected`.
"""
if type(splot) is Spectrum:
splot = SPlot(splot)
elif type(splot) is not SPlot:
raise ProfileError('Select() requires either a Spectrum or SPlot '
'object as an argument')
# reset the selection dictionary
global selected
selected = { 'wave': [], 'data': []}
splot.draw(picker = True)
splot.fig.canvas.mpl_connect('pick_event', Pick)
input(' Press <Return> after making your selections ... ')
return selected
def AutoFit(splot, function = InvertedLorentzian, params = None):
"""
Given `splot` of type SPlot, the user selects four points on the
spectrum and a parameterized function is fit (an inverted Lorentzian by
default). Optionally, `splot` can be of type spectrum and a basic SPlot
will be created for you. If the user gives an alternative `function`,
`params` (parameters) must be provided. `params` is to be the first guess,
`p0` given to scipy...curve_fit; the user can provide them expicitely,
or in the form of functions with the template `function(xarray, yarray)`
where `xarray` and `yarray` are the `wave` and `data` arrays extracted
between the two points selected by the user.
"""
print(' Please select four points identifying the spectral line.')
print(' Outer points mark the domain of the line.')
print(' Inner points mark the sample of the line to fit.')
# make selections
selected = Select(splot)
if len( selected['wave'] ) != 4:
raise ProfileError('Exactly 4 locations should be selected for '
'the Profile.AutoFit() routine!')
# order the selected wavelength locations
points = selected['wave']
points.sort()
# get data and wavelength arrays
if type(splot) is SPlot:
wave, data = splot.wave[0], splot.data[0]
else:
wave, data = splot.wave, splot.data
# extract the domains `selected` (i.e., the sample interval)
x_inner = wave[np.where(np.logical_and(points[1] < wave, wave < points[2]))]
x_outer = wave[np.where(np.logical_and(points[0] < wave, wave < points[3]))]
y_inner = data[np.where(np.logical_and(points[1] < wave, wave < points[2]))]
y_outer = data[np.where(np.logical_and(points[0] < wave, wave < points[3]))]
if function.__name__ == 'InvertedLorentzian':
# First guess for default behavior
params = [ y_inner.min().value, x_inner[ y_inner.argmin() ].value,
(x_inner[-1] - x_inner[0]).value / 2]
elif not params:
# the user gave a different function but not parameters!
raise ProfileError('The user must provide `params` when giving an '
'alternative `function` in Profile.Fit()!')
else:
if not hasattr(params, '__iter__'):
raise ProfileError('`params` must be an iterable type in '
'Profile.AutoFit()!')
try:
for a, parameter in enumerate(params):
if type(parameter) is type(InvertedLorentzian):
# replace parameter with function evaluation
params[a] = parameter(x_inner, y_inner)
except TypeError as err:
print(' --> TypeError:', err)
raise ProfileError('Profile.AutoFit() failed to call user functions '
'correctly in `params`!')
# fit a parameterized curve
coeff, var_matrix = curve_fit(
function, # function to call, default is InvertedLorentzian
x_inner.value, # domain (without units)
y_inner.value, # data (without units)
p0 = params # list of parameters to try as a first guess
)
# display visual aids ...
# evaluation of the fit profile over larger domain
plt.plot(x_outer, function(x_outer.value, *coeff) * y_outer.unit,
'b--', linewidth = 4)
plt.plot(x_inner, function(x_inner.value, *coeff) * y_inner.unit,
'r-', linewidth = 4)
# return the larger domain, evaluated and of type Spectrum
return Spectrum(function(x_outer.value, *coeff) * y_outer.unit, x_outer)
def Extract(splot, kernel = Gaussian, **kwargs):
"""
Select locations in the `splot` figure, expected to be of type SPlot.
Exactly four points should be selected. These are used to extract a
line profile from the spectrum plotted in the splot figure. The inner
section is used for the line, and the outer selection is used to model
the continuum; these, respectively, and both returned as Spectrum objects.
The gap is jumped using 1D interpolation (scipy...interp1d).
"""
try:
options = Options( kwargs, {
'kind' : 'cubic' , # given to scipy...interp1d for continuum
'bandwidth' : 0.1*u.nm # user should provide this!
# 'rms' : False # measure the RMS of the line, continuum
})
kind = options('kind')
bandwidth = options('bandwidth')
# rms = options('rms')
except OptionsError as err:
print(' --> OptionsError:', err)
raise ProfileError('Unrecognized option given to Extract()!')
print(' Please select four points identifying the spectral line.')
print(' Outer intervals sample the continuum.')
print(' Center interval contains the line.')
# make selections
selected = Select(splot)
if len( selected['wave'] ) != 4:
raise ProfileError('Exactly 4 locations should be selected for '
'the profile modeling to work!')
# order the selected wavelength locations
wave = selected['wave']
wave.sort()
# create `line` profile
xl = splot.wave[0].copy()
yl = splot.data[0].copy()
yl = yl[ xl[ xl < wave[2] ] > wave[1] ]
xl = xl[ xl[ xl < wave[2] ] > wave[1] ]
line = Spectrum(yl, xl)
# extract continuum arrays
xc = splot.wave[0].copy()
yc = splot.data[0].copy()
# inside outer-most selections
yc = yc[ xc[ xc < wave[3] ] > wave[0] ]
xc = xc[ xc[ xc < wave[3] ] > wave[0] ]
# keep wavelengths whole domain for later
xx = xc.copy()
yy = yc.copy()
# but not the line
yc = yc[np.where(np.logical_or(xc < wave[1], xc > wave[2]))]
xc = xc[np.where(np.logical_or(xc < wave[1], xc > wave[2]))]
# use `kernel smoothing` to model the continuum
model = KernelFit1D(xc, yc, kernel = kernel, bandwidth = bandwidth)
# interpolate to cross `the gap`
interp = interp1d(xc, model.mean(xc), kind = kind)
# continuum model outside the line
cont_outside = interp(xc)
# continuum inside the line
cont_inside = interp(xl)
# continuum model for whole domain
cont_domain = interp(xx)
# build a spectrum from the arrays
continuum = Spectrum(cont_domain, xx)
# display visual aid
plt.plot(xx, cont_domain, 'r--', linewidth = 3)
plt.fill_between(xl, yl, cont_inside, color = 'blue', alpha = 0.25)
plt.draw()
# if not rms:
# return line, continuum
continuum.rms = np.sqrt( np.sum( (cont_outside * yc.unit - yc)**2 ) / len(yc) )
continuum.rms *= continuum.data.unit
return line, continuum
def OpticalDepth(line, continuum, error=None, boost=None):
"""
Given an absorption `line` and its background `continuum`, compute the
apparent optical depth Spectrum. Both the line and the continuum must
be of Spectrum type. The continuum will be resampled to the pixel space of the
line if it is not currently.
If provided an `error` spectrum, it should either have the same units as the `line`
or be in percent units (they should have identical wavelength arrays). An upper and
lower uncertainty will be computed by adding and subtracting before the calculation.
Further, if an `rms` value is attached to the `continuum` giving the percent error in
the continuum fit, this will be added in quadrature to the error spectrum before hand.
`boost` allows for artificially increasing the resolution by resampling to more pixels.
If the spectrum is not of sufficiently high resolution, the integration could suffer
numerical errors. If provided, this should be a percentage to increase the resolution
(e.g., `boost=2` would double the resolution by interpolating in between the pixels).
This function returns a Spectrum with `upper` and `lower` bounds attached.
"""
if not isinstance(line, Spectrum):
raise ProfileError('OpticalDepth() expects the first argument to be of '
'`Spectrum` type!')
if not isinstance(continuum, Spectrum):
raise ProfileError('OpticalDepth() expects the second argument to be of '
'`Spectrum` type!')
line = line.copy()
continuum = continuum.copy()
if error:
if not isinstance(error, Spectrum):
raise ProfileError('OpticalDepth() expects the `error` to be of `Spectrum` type!')
if error.data.unit not in [line.data.unit, u.percent]:
raise ProfileError('OpticalDepth() expects the `error` spectrum to have either '
'the same units as the `line` or units of `percent`.')
error = error.copy()
if error.data.unit == u.percent:
if np.logical_and(error.data.value < 0, error.data.value > 100).any():
raise ProfileError('OpticalDepth() was given an `error` spectrum in '
'units of `percent` with values outside of 0 and 100!')
error.resample(line)
error.data = (error.data * line.data).to(line.data.unit)
if hasattr(continuum, 'rms'):
if not hasattr(continuum.rms, 'unit') or (continuum.rms.unit not in
[continuum.data.unit, u.percent]):
raise ProfileError('OpticalDepth() expects a `continuum` with an `rms` '
'attribute to have the same units or `percent` units.')
rms = continuum.rms
if rms.unit == u.percent:
rms = rms * continuum.data
rms = rms.to(continuum.data.unit)
if not error:
error = rms
else:
# add the two error components in quadrature
error.resample(line)
error = Spectrum(np.sqrt(rms**2 + error.data**2) * line.data.unit, line.wave)
# boost the resolution of the specrum if requested
if boost: line.resample( line.wave[0], line.wave[-1], len(line) * boost )
# resample the continuum spectrum, nothing happens if they are already the same
continuum.resample(line)
# compute the apparent optical depth
tau = Spectrum(np.log((continuum / line).data.decompose()), line.wave)
if error:
tau.lower = Spectrum(np.log((continuum / (line - error)).data.decompose()), line.wave)
tau.upper = Spectrum(np.log((continuum / (line + error)).data.decompose()), line.wave)
return tau
def EquivalentWidth(line, continuum, error=None, boost=None):
"""
Given an absorption `line` and its background `continuum`, compute the
equivalent width, `W`, of the feature. Both the line and the continuum must
be of Spectrum type. The continuum will be resampled to the pixel space of the
line if it is not currently.
If provided an `error` spectrum, it should either have the same units as the `line`
or be in percent units (they should have identical wavelength arrays). An upper and
lower uncertainty will be computed by adding and subtracting before the calculation.
Further, if an `rms` value is attached to the `continuum` giving the percent error in
the continuum fit, this will be added in quadrature to the error spectrum before hand.
`boost` allows for artificially increasing the resolution by resampling to more pixels.
If the spectrum is not of sufficiently high resolution, the integration could suffer
numerical errors. If provided, this should be a percentage to increase the resolution
(e.g., `boost=2` would double the resolution by interpolating in between the pixels).
Integration is performed using the composite Simpson`s rule (scipy.integrate.simps)
This function returns a `Measurement` object (..Framework.Measurement.Measurement).
"""
tau = OpticalDepth(line, continuum, error=error, boost=boost)
W = Integrate(1 - np.exp(-tau.data.decompose()), tau.wave) * line.wave.unit
if error:
upper = Integrate(1 - np.exp(-tau.lower.data.decompose()), tau.wave) * line.wave.unit
lower = Integrate(1 - np.exp(-tau.upper.data.decompose()), tau.wave) * line.wave.unit
# join upper/lower into +/- set
uncertainty = np.array([(upper - W).value, (lower - W).value]) * tau.wave.unit
else: uncertainty = None
return Measurement(W, error = uncertainty, name = 'Equivalent Width',
notes = 'Measured using Profile.EquivalentWidth() from SLiPy')
def ColumnDensity(line, continuum, ion, error=None, boost=None, weakline=None, integrate=True):
"""
Given an absorption `line` and its background `continuum`, compute the
apparent column density, `N`, of the feature. Both the line and the continuum must
be of Spectrum type. The continuum will be resampled to the pixel space of the
line if it is not currently. An `ion` must be provided (type Atomic.Ion) with
members, `fvalue` (oscillator strength) and `wavelength` (will be converted to
Angstroms).
If provided an `error` spectrum, it should either have the same units as the `line`
or be in percent units (they should have identical wavelength arrays). An upper and
lower uncertainty will be computed by adding and subtracting before the calculation.
Further, if an `rms` value is attached to the `continuum` giving the percent error in
the continuum fit, this will be added in quadrature to the error spectrum before hand.
With `integrate` set to True (the default) the integrated apparent column density will
be returned (as type Measurement and in units of cm-2). Otherwise, a Spectrum is
returned as a function of wavelength and the `.upper` and `.lower` bounds are attached.
`boost` allows for artificially increasing the resolution by resampling to more pixels.
If the spectrum is not of sufficiently high resolution, the integration could suffer
numerical errors. If provided, this should be a percentage to increase the resolution
(e.g., `boost=2` would double the resolution by interpolating in between the pixels).
With a `weakline` provided (the results of this function on the weaker absorption
line for a doublet) a correction gives an approximate `true` column density (attached
to the returned Measurement as `.true`), see Savage & Sembach, 1991.
Integration is performed using the composite Simpson`s rule (scipy.integrate.simps)
This function returns a `Measurement` object (..Framework.Measurement.Measurement).
"""
if (not isinstance(ion, Atomic.Ion) or not hasattr(ion, 'fvalue') or
not hasattr(ion, 'wavelength')): raise ProfileError("From ColumnDensity(), `ion` is "
"expected to be of type Atomic.Ion and have members `fvalue` and `wavelength`!")
if weakline and not hasattr(weakline, 'unit'):
raise ProfileError("From ColumnDensity(), `weakline` is expected to have units!")
const = (1.13e12 / u.cm) / (ion.fvalue * ion.wavelength.to(u.cm))
tau = OpticalDepth(line, continuum, error=error, boost=boost)
N = tau * const / ion.wavelength.to(u.AA).value
N.upper = tau.upper * const / ion.wavelength.to(u.AA).value
N.lower = tau.lower * const / ion.wavelength.to(u.AA).value
if not integrate and not weakline:
return N
elif weakline and not integrate:
raise ProfileError("From ColumnDensity(), you gave `integrate` as False but we "
"need to integrate to solve for the correct N using the `weakline`!")
upper = Integrate(N.upper.data, N.wave) / u.cm**2
lower = Integrate(N.lower.data, N.wave) / u.cm**2
N = Integrate(N.data, N.wave) / u.cm**2
uncertainty = np.array([(N - upper).value, (lower - N).value]) / u.cm**2
N = Measurement(N, name="Column Density (Apparent)",
error=uncertainty, notes="Measured using Profile.ColumnDensity() from SLiPy")
if not weakline:
return N
# attach the `correction` given the already computed `weakline` column density.
diff = np.log10(weakline.to(1 / u.cm**2).value) - np.log10(N.value)
if diff < 0.0 or diff > 0.24:
raise ProfileError("From ColumnDensity(), the difference between the doublet "
"lines is too great to solve for a correction using published results!")
# Table 4: Column Density Corrections for an Isolated Gaussian Component
# Savage & Sembach (1991)
table = np.array([ [0.000, 0.000], [0.010, 0.010], [0.020, 0.020], [0.030, 0.030],
[0.040, 0.040], [0.050, 0.051], [0.060, 0.061], [0.070, 0.073], [0.080, 0.085],
[0.090, 0.097], [0.100, 0.111], [0.110, 0.125], [0.120, 0.140], [0.130, 0.157],
[0.140, 0.175], [0.150, 0.195], [0.160, 0.217], [0.170, 0.243], [0.180, 0.273],
[0.190, 0.307], [0.200, 0.348], [0.210, 0.396], [0.220, 0.453], [0.230, 0.520],
[0.240, 0.600]])
N.correction = interp1d(table[:,0], table[:,1], kind='cubic')(diff)
N.true = 10**(np.log10(weakline.to(1/u.cm**2).value) + N.correction) / u.cm**2
return N
class FittingGUI:
"""
Graphical Interface (keeps references alive) for fitting analytical
profiles to spectral data.
"""
def __init__(self, splot, obs=None, ion=None, function='Voigt', **kwargs):
"""
Build widget elements based on a spectrum plotted in `splot` (type SPlot).
`splot` may also be a Spectrum object (for which a SPlot will be created).
Make initial guesses at parameters, build widgets, render the figure.
"""
try:
options = Options( kwargs, {
'kind' : 'cubic' , # given to interp1d for continuum
'bandwidth' : 0.1*u.nm, # user should provide this!
'linecolor' : 'k' # color passed to plt.plot() for lines
})
kind = options('kind')
bandwidth = options('bandwidth')
linecolor = options('linecolor')
except OptionsError as err:
print(' --> OptionsError:', err)
raise ProfileError('Unrecognized option given to FittingGUI.__init__()!')
if function not in ['Voigt', 'Lorentzian', 'Gaussian']:
raise ProfileError('The only currently implemented functions '
'for fitting profiles are `Voigt`, `Lorentzian` and `Gaussian`!')
# grab and/or create the SPlot
if isinstance(splot, Spectrum):
splot = SPlot(splot, marker='k-', label='spectrum')
elif not isinstance(splot, SPlot):
raise ProfileError('FittingGUI() expects the `splot` argument to '
'either be a Spectrum or a SPlot!')
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# if the profile `function` is `Voigt` and we have necessary parameters,
# show a preview for `b` and `N` and `v`.
self.any_line_parameters = True if obs or ion else False
self.has_line_parameters = True if obs and ion else False
if self.any_line_parameters and not self.has_line_parameters:
raise ProfileError('From FittingGUI.__init__(), if the absoption line '
'parameters `obs` or `ion` are provided, both must be given!')
if self.has_line_parameters:
if function != 'Voigt':
raise ProfileError('From FittingGUI.__init__(), in order to compute the '
'broadening from the instrument profile of the `obs`ervatory and the '
'column density for the `ion`, it is expected that we try to fit a `Voigt` '
'profile `function`!')
if not isinstance(obs, Observatory):
raise ProfileError('From FittingGUI.__init__(), if `obs` is provided it should '
'be of type Observatory!')
if not hasattr(obs, 'resolution'):
raise ProfileError('From FittingGUI.__init__(), if an observatory `obs` is '
'provided, it needs to have a member `resolution`!')
if not hasattr(ion, 'wavelength') or not hasattr(ion.wavelength, 'unit'):
raise ProfileError('From FittingGUI.__init__(), the provided `ion` either '
'did not have a `wavelength` attribute or it has one without units!')
if not hasattr(ion, 'fvalue'):
raise ProfileError('From FittingGUI.__init__(), the provide `ion` does not '
'have an `fvalue` (oscillator strength) attribute!')
if hasattr(ion.fvalue, 'unit') and ion.fvalue.unit != u.dimensionless_unscaled:
raise ProfileError('From FittingGUI.__init__(), the osciallator strength, '
'`fvalue` for an ion must be a dimensionless Quantity!')
if not hasattr(ion, 'A') or not ion.A:
raise ProfileError('From FittingGUI.__init__(), the provided `ion` does not '
'have an `A` (transition probability) attribute!')
if hasattr(ion.A, 'unit') and ion.A.unit != u.Unit('s-1'):
raise ProfileError('From FittingGUI.__init__(), the transition probability, '
'`A`, from the `ion` must be in units of `s-1` if units are present!')
# the instrument broadening is from R = lambda / delta_lambda
# FWHM = 2 sqrt( 2 log 2) sigma for the Gaussian instrument profile
self.R = obs.resolution
self.sigma_instrument = (ion.wavelength / self.R) / (2 * np.sqrt(2 * np.log(2)))
self.sigma_instrument_squared = self.sigma_instrument.value ** 2
# save parameters for later
self.wavelength = ion.wavelength
self.fvalue = ion.fvalue
self.A = ion.A
# the FWHM of the intrinsic line profile (Lorentzian) is proportional to the
# transition probability (Einstein coeff.) `A`...
# convert from km s-1 to wavelength units
self.gamma = (ion.wavelength * (ion.wavelength * ion.A / (2 * np.pi)).to(u.km / u.s) /
c.si).to(ion.wavelength.unit).value
# the leading constant in the computation of `N` (per Angstrom per cm-2)
# self.leading_constant = (m_e.si * c.si / (np.sqrt(np.pi) * e.si**2 * ion.fvalue *
# ion.wavelength.to(u.Angstrom))).value
self.leading_constant = 1 / (ion.fvalue * ion.wavelength.to(u.AA)**2 * np.pi *
e.emu**2 / (m_e.si * c.to(u.km/u.s)**2)).value
# setting `function` to `ModifiedVoigt` only makes a change in how the `Voigt`
# profile is evaluated by using `self.gamma` instead of self.Params[...]['Gamma']
function = 'ModifiedVoigt'
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
print('\n We need to extract the lines from the continuum before we '
'begin the fitting process.')
# extract the line, continuum, rms from the spectrum
self.line, self.continuum = Extract(splot, bandwidth=bandwidth, kind=kind)
self.rms = self.continuum.rms
print('\n Now select the peaks of each line to be fit.')
print(' Initial guesses will be made for each line markered.')
input(' Press <Return> after making your selections ... ')
# grab all the selected points
global selected
points = np.array([
[ entry.value for entry in selected['wave'] ],
[ entry.value for entry in selected['data'] ]
])
# point pairs in ascending order by wavelength
points = points[:, points[0].argsort()]
# domain size of the line and the number of components
self.domainsize = self.line.wave.value[-1] - self.line.wave.value[0]
self.numlines = np.shape(points)[1] - 4
if self.numlines < 1:
raise ProfileError('FittingGUI() expects at least one line to '
'be selected for fitting!')
# final spectral line profile is convolution of the LSP and the
# line profile function. Gaussian on Gaussian, Gaussian on Lorentzian,
# etc ... Set the functional form given requested profile function
self.Evaluate = self.SetFunction(function)
# initial guesses for parameters given the line locations,
# containing `L1`, `L2`, etc ... the values of which are themselves
# dictionaries of parameters (e.g., `FWHM`, `Depth`, etc...) whose values
# are the initial guesses for those parameters given the location of
# it`s peak and the number of peaks within the `line`s domain.
self.Params = {
'L' + str(line + 1) : self.Parameterize(function, loc)
for line, loc in enumerate(points[:, 2:-2].T)
}
# grab the actual Figure object and it's axis to keep references
self.splot = splot
self.fig = splot.fig
self.ax = splot.ax
# refresh image, but keep any changes in axis limits
splot.xlim( *splot.ax.get_xlim() )
splot.ylim( *splot.ax.get_ylim() )
splot.draw()
# bring up plot to make room for sliders
plt.subplots_adjust(bottom = 0.30)
# resample continuum onto line domain
self.continuum = self.continuum.copy()
self.continuum.resample(self.line)
# common domain for plotting, strip units, wavelengths from continuum
self.x = self.continuum.wave.value
self.continuum = self.continuum.data.value
# copy of continuum allows for adjustments
self.y = self.continuum.copy()
# save the mid-level of the continuum to avoid over-calculation
self.continuum_level = self.continuum.mean()
# add plots of each line, keep dictionary of handles
self.Component = {
line : plt.plot(self.x, self.y - self.Evaluate(self.x, **parameters),
linecolor+'--')[0] for line, parameters in self.Params.items()
}
# add plot of the continuum
self.Continuum, = plt.plot(self.x, self.y, 'r-', lw=2)
# add plot of superposition of each component
self.Combination, = plt.plot(self.x, self.y - self.SuperPosition(), 'g-')
# fix the limits on plot
xmin, xmax = splot.ax.get_xlim()
ymin, ymax = splot.ax.get_ylim()
plt.axis([xmin, xmax, ymin, ymax])
# color scheme for sliders
theme = {
'light': {'bg': 'white', 'fg': 'blue'},
'dark' : {'bg': 'black', 'fg': 'red'}
}
# axes for parameter sliders
self.Axis = {
# key : axis xpos , ypos + dy , xsize, ysize
line : plt.axes([0.10, 0.05 + (k+1) * 0.03, 0.65, 0.02], axisbg = 'black')
for k, line in enumerate( self.Params['L1'].keys() )
}
# add an axis to make adjustments to the continuum
self.Axis['Continuum'] = plt.axes([0.10, 0.05, 0.65, 0.02], axisbg='black')
# create the sliders for the parameters
self.Slider = {
# Slider `key` and widget
param : widgets.Slider(
self.Axis[param], # which axis to put slider on
param, # name of parameter (and the slider)
self.Minimum(param), # set the minimum of the slider
self.Maximum(param), # set the maximum of the slider
valinit = self.Params['L1'][param], # initial value
facecolor = 'c'
)
# create a slider for each parameter
for param in self.Params['L1'].keys()
}
# create the slider for the continuum (10% adjustments)
self.Slider['Continuum'] = widgets.Slider(self.Axis['Continuum'],
'Continuum', 0.90 * self.continuum_level, 1.10 *
self.continuum_level, valinit = self.continuum_level,
facecolor = 'c')
# connect sliders to update function
for slider in self.Slider.values():
slider.on_changed(self.Update)
# create axis for radio buttons
self.RadioAxis = plt.axes([0.85, 0.01, 0.1, 0.2],
axisbg = 'black', frameon = False)
# create the radio button widget
self.Radio = widgets.RadioButtons(self.RadioAxis,
tuple(['L' + str(i+1) for i in range(self.numlines)]),
active = 0, activecolor = 'c')
# connect the radio button to it's update function
self.Radio.on_clicked(self.ToggleComponent)
# set current component as the first line
self.current_component = 'L1'
# make currently selected line bold
self.Component['L1'].set_linewidth(2)
# variable allows for the radio buttons to not screw-up the sliders/graphs
# when switching between lines
self.stall = False
# add the initial text for `N` and `b` if applicable.
# text is along 20% below the y-axis
if self.has_line_parameters:
# self.b, self.N, self.v = self.solve_b(), self.solve_N(), self.solve_v()
self.preview = self.ax.text(xmin, ymin - 0.1 * (ymax - ymin),
'v: {:>10.4f} km s-1 b: {:>10.4f} km s-1'
' W: {:>10.4f}\nN: {:>10.4e} cm-2 tau_0: {:>10.4f}'.format(
self.solve_v().value, self.solve_b().value, self.solve_W(), self.solve_N().value,
self.solve_tau0()), va = 'top')
# display the text
self.fig.canvas.draw_idle()
def Parameterize(self, function, loc):
"""
Choose the initial parameters of `function` given the peak `loc`.
"""
if function == 'Voigt':
return {'Gamma': 0.10 * self.domainsize / self.numlines,
'Sigma': 0.20 * self.domainsize / self.numlines, 'Peak' : loc[0],
'Depth': self.continuum[ loc[0] ].value - loc[1]}
if function == 'Lorentzian':
return {'FWHM': 0.5 * self.domainsize / self.numlines, 'Peak': loc[0],
'Depth': self.continuum[ loc[0] ].value - loc[1]}
elif function == 'Gaussian':
return {'FWHM': 0.5 * self.domainsize / self.numlines, 'Peak': loc[0],
'Depth': self.continuum[ loc[0] ].value - loc[1]}
elif function == 'ModifiedVoigt':
return {'Sigma': (self.Maximum('Sigma') - self.Minimum('Sigma'))/2,
'Peak' : loc[0], 'Depth': self.continuum[ loc[0] ].value - loc[1]}
else:
raise ProfileError('From FittingGUI.Parameterize(), the only '
'currently implemented functions are the `Gaussian`, `Lorentzian`, and '
'`Voigt` profiles!')
def SetFunction(self, function):
"""
Return how to `Evaluate` the profile given the `function`.
"""
options = {'Voigt': self.__Voigt, 'Lorentzian': self.__Lorentzian,
'Gaussian': self.__Gaussian, 'ModifiedVoigt': self.__ModifiedVoigt}
if function not in options:
raise ProfileError('From FittingGUI.SetFunction(), the only '
'currently implemented functions are the `Voigt`, `Lorentzian`, and '
'the `Gaussian` profiles!!')
return options[function]
def __Gaussian(self, x, **params):
"""
A Gaussian profile. See ..Algorithms.Functions.Gaussian
"""
return Gaussian(x, params['Depth'], params['Peak'], params['FWHM'] / 2.3548200450309493)
def __Lorentzian(self, x, **params):
"""
A Lorentzian profile. See ..Algorithms.Functions.Lorentzian
"""
return params['Depth'] * Lorentzian(x, params['Peak'], params['FWHM'])
def __Voigt(self, x, **params):
"""
The Voigt profile. See ..Algorithms.Functions.Voigt
"""
return Voigt(x, params['Depth'], params['Peak'], params['Sigma'], params['Gamma'])
def __ModifiedVoigt(self, x, **params):
"""
This is the Voigt profile, but `gamma` was already set.
"""
return Voigt(x, params['Depth'], params['Peak'], params['Sigma'], self.gamma)
def SuperPosition(self):
"""
Superposition of each line component (blended line)
"""
combined = np.zeros(np.shape(self.x))
for parameters in self.Params.values():
combined += self.Evaluate(self.x, **parameters)
return combined
def Minimum(self, param):
"""
Set the lower bound on the `param`eter for its slider.
"""
if param == 'Peak':
return self.x[0]
elif param == 'FWHM':
return 1e-6
elif param == 'Depth':
return 1e-6
elif param == 'Sigma':
if self.Evaluate != self.__ModifiedVoigt:
return 1e-6
else:
return self.sigma_instrument.value
elif param == 'Gamma':
return 1e-6
else:
raise ProfileError('From FittingGUI.Minimum(), `{}` is not '
'currently implemented as a parameter to set the minumum '
'for!'.format(param))
def Maximum(self, param):
"""
Set the upper bound on the `param`eter for its slider.
"""
if param == 'Peak':
return self.x[-1]
elif param == 'FWHM':
return 0.9 * self.domainsize
elif param == 'Depth':
return 1.5 * self.continuum.max()
elif param == 'Sigma':
return 0.9 * self.domainsize / self.numlines
elif param == 'Gamma':
return 0.9 * self.domainsize / self.numlines
else:
raise ProfileError('From FittingGUI.Maximum(), `{}` is not '
'currently implemented as a parameter to set the maximum '
'for!'.format(param))
def Update(self, val):
"""
Cycle thru Sliders and update Parameter dictionary. Re-draw graphs.
"""
# `self.stall` suppresses this procedure while inside `ToggleComponent`
if not self.stall:
# the currently selected line component
line = self.current_component
# update the appropriate parameters in the dictionary
for parameter, slider in self.Slider.items():
if parameter == 'Continuum':
self.y = self.continuum + (slider.val - self.continuum_level)
else:
self.Params[line][parameter] = slider.val
# update the appropriate graph data, based on new parameters
for line, parameters in self.Params.items():
self.Component[line].set_ydata(self.y - self.Evaluate(self.x, **parameters))
# update the continuum graph
self.Continuum.set_ydata(self.y)
# update the combined graph
self.Combination.set_ydata(self.y - self.SuperPosition())
# update the displayed `N` and `b` if present
if self.has_line_parameters:
self.Update_Preview()
# push updates to graph
self.fig.canvas.draw_idle()
def ToggleComponent(self, label):
"""
Toggle function for the radio buttons. Switch between line components
`L1`, `L2`, etc. Update the sliders to reflect changing parameters.
"""
# reassign the current component that is selected
self.current_component = label
# don't update the parameter dictionary or draw the graph!!!!!!
self.stall = True
# make current feature bold and the rest thin
for line in self.Component.keys():
if line == label:
self.Component[line].set_linewidth(2)
else:
self.Component[line].set_linewidth(1)
# update the sliders to reflect the current component
for parameter, slider in self.Slider.items():
if parameter != 'Continuum':
slider.set_val(self.Params[label][parameter])
# update the displayed `b`, `N` and `z` if present
if self.has_line_parameters:
self.Update_Preview()
# give control back to sliders
self.stall = False
# push updates to graph
self.fig.canvas.draw_idle()
def solve_b(self):
"""
Given the current line component, solve for the broadening parameter, `b`,
given the instrument profile and the observed broadening.
"""
# b = sqrt(2) * sigma_v
sigma_obs = self.Params[self.current_component]['Sigma']
sigma_v = np.sqrt(sigma_obs**2 - self.sigma_instrument_squared) * self.wavelength.unit
b = np.sqrt(2) * (c * sigma_v / self.wavelength)
return b.to(u.km / u.second)
# return (c.si * (1.4142135623730951 *
# np.sqrt(self.Params[self.current_component]['Sigma']**2
# - self.sigma_instrument_squared) * self.wavelength.unit) / self.wavelength).to(u.km /
# u.second)
def solve_tau0(self):
"""
Solve for the apparent optical depth at line center.
"""
line_data = self.Component[self.current_component].get_ydata()
line_wave = self.Component[self.current_component].get_xdata()
line_center = line_data.argmin()
return np.log(self.y[line_center] / line_data[line_center])
def solve_W(self):
"""
Solve for the equivalent width of the currently selected line.
"""
line_data = self.Component[self.current_component].get_ydata()
line_wave = self.Component[self.current_component].get_xdata()
# apparent optical depth
tau = np.log(self.y / line_data)
return Integrate(1 - np.exp(-tau), line_wave) * self.wavelength.unit
def solve_N(self):
"""
Solve for the column density of the currently selected line component.
"""
# equivalent width (dimensionless)
W = self.solve_W() / self.wavelength
# m_e c^2 / pi e^2 \approx 1.13e12 cm-1 ???? How is that?
const = (1.13e12 / u.cm) / (self.fvalue * self.wavelength.to(u.cm))
return const * W
def solve_v(self):
"""
Solve for the velocity of the line given the expected wavelength.
The result is returned in km s-1.
"""
line_center = self.Params[self.current_component]['Peak'] * self.wavelength.unit
return c.to(u.km / u.second) * (line_center - self.wavelength) / self.wavelength
def Update_Preview(self):
"""
Re-compute the `b`, `N`, and `v` values, update the text in the plot.
"""
# self.b, self.N, self.v = self.solve_b(), self.solve_N(), self.solve_v()
self.preview.set_text('v: {:>10.4f} km s-1 b: {:>10.4f} km s-1'
' W: {:>10.4f}\nN: {:>10.4e} cm-2 tau_0: {:>10.4f}'.format(
self.solve_v().value, self.solve_b().value, self.solve_W(), self.solve_N().value,
self.solve_tau0()))
def GetSpectra(self):
"""
Return the current graphs as Spectrum objects.
"""
return [Spectrum(self.Component[line].get_ydata() * self.line.data.unit,
self.x * self.line.wave.unit) for line in sorted(self.Component.keys())]
def GetContinuum(self):
"""
Return the current graph of the continuum.
"""
continuum = Spectrum(self.y * self.line.data.unit, self.x * self.line.wave.unit)
continuum.rms = self.rms
return continuum
def kill(self):
"""
Close the plot to destroy widgets and restore the `splot` to its original state.
"""
plt.clf()
self.splot.fig = plt.figure("Spectral-Plot (SLiPy)")
self.splot.ax = self.splot.fig.add_subplot(111)
self.splot.draw()
plt.tight_layout()
plt.draw()
#del(self)
def MultiFit(splot, error=None, obs=None, ion=None, ion2=None, function='Voigt', measure=True,
boost=None, **kwargs):
"""
The MultiFit routine takes a `splot` figure (type SPlot) and allows the
user to interactively fit line profiles. `splot` may optionally be of type
Spectrum, in which case a SPlot figure will be created for you. This function
creates a *FittingGUI* object (not documented here) which uses the Profile.Extract()
routine first (`kwargs` are passed to this function). As in the Extract routine, the user
will select four points that mark the boundaries of the line (blended or otherwise)
and some surrounding continuum to sample. The KernelFit1D (..Algorithms.KernelFit) routine
is applied with the user specified `bandwidth` to smooth the noise out of the continuum
and interpolate, of type `kind`, across the line gap. After this, the user is asked to
further select points marking the peaks of the line(s). The number of selection points
indicates the number of lines to be fit. If you wish to deblend two or more lines, select
all suspected locations. These are not only used to determine the number of lines to fit
but to make initial guesses at the parameters for each line and determine reasonable scales
for the sliders.
After these selections, a slider for each parameter appears along with radio buttons for
each line. The user can interactively adjusts the parameter(s) for a given line by
selecting it (the radio buttons) and dragging a slider. The parameters available to
adjust depend on the `function` argument. There are currently three available line shapes:
'Gaussian', 'Lorentzian', and 'Voigt'. See ..Algorithms.Functions for information on
these.
Each line is represented by a black, dashed curve in the plot. The currently selected line
is bold. The combined (i.e., blended) line is plotted as a solid green curve.
If an Observatory with a `.resolution` is provided via the `obs` argument then the
thermal broadening parameter `b` can be computed (and displayed). This is only applicable
with either a `Gaussian` or `Voigt` profile. Further, an `ion` needs to be provided in
this scenario (type ..Data.Atomic.Ion) with an oscillator strength (fvalue), transition
probability (A) and wavelength as members. With these data, we can interactively show
the broadening prameter, the velocity, and the apparent column density during the fitting
process. Note: the `gamma` slider will disappear in this context because it has already
been determined via the transition probability.
Each slider is labeled on the left side with the name of the parameter it controls. At
the right of each slider is the current value of that parameter. The radio buttons are
labeled "L1", "L2", etc. for each line.
This routine returns both a list of Spectrum `lines` as well as the `continuum` that was
fit using the FittingGUI. This functions acts as both a single line fitting tool as well
as a deblending tool. By default, the final parameterizations are attached to each spectrum
as a dictionary, `.parameters`.
If `measure` is passed as True (the default), the equivalent width is computed and attached
to each spectrum (`L1`, `L2`, etc...) and can be accessed with `.W`. If `b` and/or `N` are
available for each line these will be attached as well, accessible as `.b` and `.N`
respectively. This functionality can be suppressed by setting `measure` to False. `boost`
and `error` are passed to EquivalentWidth() and ColumnDensity().
"""
# running the user interface
gui = FittingGUI(splot, obs=obs, ion=ion, function=function, **kwargs)
input('\n Press <Return> when you are finished fitting lines ...')
# attach the parameter dictionaries to each spectrum
lines = gui.GetSpectra()
for a, parameterization in enumerate( sorted(gui.Params.keys()) ):
lines[a].parameters = gui.Params[parameterization]
# attach the continuum to the line list
continuum = gui.GetContinuum()
# kill the attached rms to suppress auto-calculation of erros
rms = continuum.rms
# continuum.rms = None
if not measure:
return lines, continuum
if gui.has_line_parameters:
print("Computing line measurements ", end="", flush=True)
for line, key in zip(lines, sorted(gui.Params.keys())):
print(".", end="", flush=True)
# set the line to `L1`, `L2`, etc...
gui.current_componenet = key
# The error estimates from here using the EquivalentWidth and
# ColumnDensity (from OpticalDepth) are broken. The continuum
# rises and falls with added/subtracted error levels. Approximate
# errors by boosting/lowering the `Depth` of the line.
# compute the errors for the line from the depth only
depth = gui.Params[key]['Depth']
err = np.sqrt(rms.value**2 + (depth*error.data.mean()).to(gui.line.data.unit).value**2)
# solve for the line with upper error bound
gui.Params[key]['Depth'] = depth + err
lu = Spectrum(gui.y - gui.Evaluate(gui.x, **gui.Params[key]) * gui.line.data.unit,
gui.x * gui.line.wave.unit)
# solve for the line with lower error bound
gui.Params[key]['Depth'] *= depth - err
ll = Spectrum(gui.y - gui.Evaluate(gui.x, **gui.Params[key]) * gui.line.data.unit,
gui.x * gui.line.wave.unit)
W = EquivalentWidth(line, continuum, boost=boost) * 1.0 # decay to a Quantity
W.upp = EquivalentWidth(lu , continuum, boost=boost) * 1.0 # decay to a Quantity
W.low = EquivalentWidth(ll , continuum, boost=boost) * 1.0 # decay to a Quantity
N = ColumnDensity(line, continuum, ion, boost=boost) * 1.0 # decay to a Quantity
N.upp = ColumnDensity(lu , continuum, ion, boost=boost) * 1.0 # decay to a Quantity
N.low = ColumnDensity(ll , continuum, ion, boost=boost) * 1.0 # decay to a Quantity
# attach the measured quantities
notes = 'Measurement made using Profile.MultiFit() from SLiPy'
line.W = Measurement(W, error=np.array([(W-W.upp).value, (W-W.low).value])*W.unit,
name="Equivalent Width", notes=notes)
line.N = Measurement(N, error=np.array([(N-N.upp).value, (N-N.low).value])*N.unit,
name="Column Density (Apparent)", notes=notes)
line.b = Measurement(gui.solve_b(), name='Doppler Broadening Paramater', notes=notes)
line.v = Measurement(gui.solve_v(), name='Velocity', notes=notes)
gui.kill()
return lines, continuum
|
gLENTNER/SLiPy
|
SLiPy/Profile.py
|
Python
|
gpl-2.0
| 45,178
|
[
"Gaussian"
] |
416ae8525fed907526f4e7f498547ee5d9518fae3a27016165a4dc01da881a93
|
########################################################################
# $HeadURL$
# File : BuildCloudinitScript.py
# Author : Victor Mendez
########################################################################
"""
This class construct a cloudinit script for a DIRAC image and IaaS endpoint
"""
import os
import sys
# DIRAC
from DIRAC import gLogger, S_OK, S_ERROR, gConfig
__RCSID__ = "$Id: $"
class BuildCloudinitScript:
def buildCloudinitScript( self, imageConfig, endpointConfig, runningPodRequirements, instanceID = None ):
# logger
self.log = gLogger.getSubLogger( self.__class__.__name__ )
contextMethod = imageConfig[ 'contextMethod' ]
if contextMethod == 'cloudinit':
cvmfs_http_proxy = endpointConfig.get( 'cvmfs_http_proxy' )
siteName = endpointConfig.get( 'siteName' )
cloudDriver = endpointConfig.get( 'cloudDriver' )
vmStopPolicy = endpointConfig.get( 'vmStopPolicy' )
imageName = imageConfig.get( 'DIRACImageName' )
contextConfig = imageConfig.get( 'contextConfig' )
vmKeyPath = contextConfig[ 'vmKeyPath' ]
vmCertPath = contextConfig[ 'vmCertPath' ]
vmContextualizeScriptPath = contextConfig[ 'vmContextualizeScriptPath' ]
vmRunJobAgentURL = contextConfig[ 'vmRunJobAgentURL' ]
vmRunVmMonitorAgentURL = contextConfig[ 'vmRunVmMonitorAgentURL' ]
vmRunVmUpdaterAgentURL = contextConfig[ 'vmRunVmUpdaterAgentURL' ]
vmRunLogAgentURL = contextConfig[ 'vmRunLogAgentURL' ]
vmCvmfsContextURL = contextConfig[ 'vmCvmfsContextURL' ]
vmDiracContextURL = contextConfig[ 'vmDiracContextURL' ]
result = self.__buildCloudinitScript( DIRACImageName = imageName,
siteName = siteName,
cloudDriver = cloudDriver,
cvmfs_http_proxy = cvmfs_http_proxy,
vmStopPolicy = vmStopPolicy,
contextMethod = contextMethod,
vmCertPath = vmCertPath,
vmKeyPath = vmKeyPath,
vmContextualizeScriptPath = vmContextualizeScriptPath,
vmRunJobAgentURL = vmRunJobAgentURL,
vmRunVmMonitorAgentURL = vmRunVmMonitorAgentURL,
vmRunVmUpdaterAgentURL = vmRunVmUpdaterAgentURL,
vmRunLogAgentURL = vmRunLogAgentURL,
vmCvmfsContextURL = vmCvmfsContextURL,
vmDiracContextURL = vmDiracContextURL,
runningPodRequirements = runningPodRequirements,
instanceID = instanceID
)
elif contextMethod == 'ssh':
result = S_ERROR( 'ssh context method found instead of cloudinit method' )
elif contextMethod == 'adhoc':
result = S_ERROR( 'adhoc context method found instead of cloudinit method' )
elif contextMethod == 'amiconfig':
result = S_ERROR( 'amiconfig context method found instead of cloudinit method' )
else:
result = S_ERROR( '%s is not a known NovaContext method' % contextMethod )
return result
def __buildCloudinitScript( self,
DIRACImageName,
siteName,
cloudDriver,
cvmfs_http_proxy,
vmStopPolicy,
contextMethod,
vmCertPath,
vmKeyPath,
vmContextualizeScriptPath,
vmRunJobAgentURL,
vmRunVmMonitorAgentURL,
vmRunVmUpdaterAgentURL,
vmRunLogAgentURL,
vmCvmfsContextURL,
vmDiracContextURL,
runningPodRequirements,
instanceID
):
# The function return S_OK with the name of the created cloudinit script
# If the cloudinit context script was previously created, then overwriten
cloudinitPath = '/tmp/cloudinit_' + DIRACImageName + '_' + siteName + '.sh'
file=open(cloudinitPath, 'w')
#start writing the script
file.write('#!/bin/bash\n')
#buildin the necesary arguments
putCertPath = "/root/vmservicecert.pem"
file.write('putCertPath=%s\n' % (putCertPath))
putKeyPath = "/root/vmservicekey.pem"
file.write('putKeyPath=%s\n' % (putKeyPath))
file.write('vmRunJobAgentURL=%s\n' % (vmRunJobAgentURL))
file.write('vmRunVmMonitorAgentURL=%s\n' % (vmRunVmMonitorAgentURL))
file.write('vmRunVmUpdaterAgentURL=%s\n' % (vmRunVmUpdaterAgentURL))
file.write('vmRunLogAgentURL=%s\n' % (vmRunLogAgentURL))
file.write('vmCvmfsContextURL=%s\n' % (vmCvmfsContextURL))
file.write('vmDiracContextURL=%s\n' % (vmDiracContextURL))
file.write('cvmfs_http_proxy=%s\n' % (cvmfs_http_proxy))
file.write('siteName=%s\n' % (siteName))
file.write('cloudDriver=%s\n' % (cloudDriver))
file.write('vmStopPolicy=%s\n' % (vmStopPolicy))
file.write('instanceID=%s\n' % (instanceID))
# dynamic runningPod requirements for LocalSite
file.write("cat << 'EOF' > /root/LocalSiteRequirements\n")
for key, value in runningPodRequirements.items():
if type(value) is list:
file.write('%s=%s\n' % (key,','.join(value)))
else:
file.write('%s=%s\n' % (key,value))
file.write("EOF\n")
# 0) Previous copy of necessary files using build in cloudinit script
# 0.1) DIRAC service public key
pubkeyPath = os.path.expanduser( '~/.ssh/id_rsa.pub' )
file.write("cat << 'EOF' > /root/.ssh/authorized_keys\n")
try:
with open(pubkeyPath) as fp:
for line in fp:
file.write(line)
except Exception, errmsg:
return S_ERROR( errmsg )
file.write("EOF\n")
# VM DIRAC service cert
file.write("cat << 'EOF' > %s\n" % (putCertPath))
try:
with open(vmCertPath) as fp:
for line in fp:
file.write(line)
except Exception, errmsg:
return S_ERROR( errmsg )
file.write("EOF\n")
# VM DIRAC service key
file.write("cat << 'EOF' > %s\n" % (putKeyPath))
try:
with open(vmKeyPath) as fp:
for line in fp:
file.write(line)
except Exception, errmsg:
return S_ERROR( errmsg )
file.write("EOF\n")
#now the static part of the cloudinit
try:
with open(vmContextualizeScriptPath) as fp:
for line in fp:
file.write(line)
except Exception, errmsg:
return S_ERROR( errmsg )
file.close()
return S_OK(cloudinitPath)
#...............................................................................
#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF
|
myco/VMDIRAC
|
WorkloadManagementSystem/Client/BuildCloudinitScript.py
|
Python
|
gpl-3.0
| 7,487
|
[
"DIRAC"
] |
ec16ae6c9b940f6b953bfaafcde1d99ab6e040b46852be0757ae7f4855bf6b74
|
#!/usr/bin/python
import numpy as np
import Scientific.IO.NetCDF as nc
import pylab as pl
dFile = nc.NetCDFFile("vScatter.nc", "r")
for hName in dFile.variables.keys():
print dFile.variables[hName].getValue()
# rho = dFile.variables[mName].porosity
# h = dFile.variables[mName].porosityHeight
# print np.mean(rho[50:150]), np.std(rho[50:150])
# pl.plot(h,rho)
#pl.show()
|
dronir/EM
|
python/plotHemisphere.py
|
Python
|
gpl-3.0
| 387
|
[
"NetCDF"
] |
c03caf7182da1398a1755f8eba1f636a2a401aad14946016970e992377d5237b
|
# -*- coding: utf-8 -*-
'''
IMProToo
Improved MRR Processing Tool
Python toolkit to read write and process MRR Data. Raw Data, Average and
Instantaneous Data are supported.
This file includes some helper functions
Copyright (C) 2011-2021 Maximilian Maahn, U Leipzig
maximilian.maahn_AT_uni-leipzig.de
https://github.com/maahn/IMProToo
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
IMProTooTools: various helper functions
'''
from __future__ import division
from __future__ import print_function
import numpy as np
import time
import datetime
import calendar
import warnings
#warnings.filterwarnings('always', '.*', UserWarning,)
def getManualQualityArray(inputFile, timeVector):
'''
returns quality array for timevector using inputFile. See readQualityFile for details about inputFile.
'''
startTimes, endTimes, comments = readQualityFile(inputFile)
quality = np.zeros(timeVector.shape, dtype=bool)
for startTime, endTime in zip(startTimes, endTimes):
startTime = date2unix(startTime)
endTime = date2unix(endTime)
quality = quality + ((timeVector >= startTime)
* (timeVector < endTime))
return quality
def readQualityFile(inputFile):
'''
reads manual quality control files, format is like ancient 'Hatpro" format:
file format is:
date, no of entries for this day, starttime (00.00 - 23:59), endtime (00:01 - 24:00), comment
120105 3 11.00 17.00 snow on dish
19.00 20.00 snow on dish
22.00 24.00 interference
120106 1 00.00 21.00 snow on dish
#this is a comment
120107 1 00.00 24.00 maintenance
'''
startTimes = list()
endTimes = list()
comments = list()
belongsToOldDate = 1
ll = -1
f = open(inputFile, "r")
for line in f.readlines():
ll += 1
fields = line.split()
try:
# check for comments
if fields[0][0:1] == '#':
print('comment', ' '.join(fields))
continue
# does the line hav its own timestamp?
if belongsToOldDate == 1:
startDate = fields[0]
belongsToOldDate = int(fields[1])
startTime = fields[2]
endTime = fields[3]
comment = ' '.join(fields[4:])
else:
startTime = fields[0]
endTime = fields[1]
comment = ' '.join(fields[2:])
belongsToOldDate -= 1
# yes, 24.00 is a weired timestamp, correct this to 00.00 next day
if endTime == '24.00':
#import pdb;pdb.set_trace()
endDate = datetime.datetime.strftime((datetime.datetime.strptime(
startDate, '%y%m%d')+datetime.timedelta(1)), '%y%m%d')
endTime = '00.00'
else:
endDate = startDate
comment = ' '.join(fields[4:])
startTimes.append(datetime.datetime.strptime(
startDate+' '+startTime, '%y%m%d %H.%M'))
endTimes.append(datetime.datetime.strptime(
endDate+' '+endTime, '%y%m%d %H.%M'))
comments.append(comment)
except:
belongsToOldDate = 1
warnings.warn('Could not read line no. '+str(ll) + str(fields))
f.close()
return startTimes, endTimes, comments
def date2unix(date):
'''
converts datetime object to seconds since 01-01-1970
'''
return calendar.timegm(date.timetuple())
def unix2date(unix):
'''
converts seconds since 01-01-1970 to datetime object
'''
return datetime.datetime.utcfromtimestamp(unix)
def quantile(x, q, qtype=7, issorted=False):
"""
Args:
x - input data
q - quantile
qtype - algorithm
issorted- True if x already sorted.
Compute quantiles from input array x given q.For median,
specify q=0.5.
References:
http://reference.wolfram.com/mathematica/ref/Quantile.html
http://wiki.r-project.org/rwiki/doku.php?id=rdoc:stats:quantile
Author:
Ernesto P.Adorio Ph.D.
UP Extension Program in Pampanga, Clark Field.
"""
if not issorted:
y = sorted(x)
else:
y = x
if not (1 <= qtype <= 9):
return None # error!
# Parameters for the Hyndman and Fan algorithm
abcd = [(0, 0, 1, 0), # inverse empirical distrib.function., R type 1
(0.5, 0, 1, 0), # similar to type 1, averaged, R type 2
(0.5, 0, 0, 0), # nearest order statistic,(SAS) R type 3
(0, 0, 0, 1), # California linear interpolation, R type 4
(0.5, 0, 0, 1), # hydrologists method, R type 5
# mean-based estimate(Weibull method), (SPSS,Minitab), type 6
(0, 1, 0, 1),
(1, -1, 0, 1), # mode-based method,(S, S-Plus), R type 7
(1.0/3, 1.0/3, 0, 1), # median-unbiased , R type 8
(3/8.0, 0.25, 0, 1) # normal-unbiased, R type 9.
]
a, b, c, d = abcd[qtype-1]
n = len(x)
g, j = np.modf(a + (n+b) * q - 1)
if j < 0:
return y[0]
elif j >= n:
return y[n-1] # oct. 8, 2010 y[n]???!! uncaught off by 1 error!!!
j = int(np.floor(j))
if g == 0:
return y[j]
else:
return y[j] + (y[j+1] - y[j]) * (c + d * g)
def oneD2twoD(vector, shape2, axis):
'''
helper function to convert 1D to 2D data
'''
if axis == 0:
matrix = np.zeros((shape2, len(vector)))
for h in np.arange(shape2):
matrix[h] = vector
elif axis == 1:
matrix = np.zeros((len(vector), shape2))
for h in np.arange(shape2):
matrix[:, h] = vector
else:
raise ValueError("wrong axis")
return matrix
def limitMaInidces(iArray, max):
'''
helper function to limit indices to certain interval
lower limit 0
max in python style -> actuallay max-1
'''
reArray = np.ma.masked_all_like(iArray.ravel())
shape = iArray.shape
for n, i in enumerate(iArray.ravel()):
if i >= max:
reArray[n] = i - max
elif i < 0:
reArray[n] = i + max
else:
reArray[n] = i
return reArray.reshape(shape)
def _get_netCDF_module(ncForm="NETCDF3"):
'''
helper function to determine which netCDF module is loaded, to enable
consistency between the various writeNetCDF methods.
Returns both 'nc' (the netCDF module) and 'pyNC', a bool variable which
controls how some attribute data is set, and how to create the netCDF file
(since the function is different in the various netCDF modules.)
'''
# most syntax is identical, but there is one nasty difference regarding the fillValue...
if ncForm in ["NETCDF3_CLASSIC", "NETCDF3_64BIT", "NETCDF4_CLASSIC", "NETCDF4"]:
import netCDF4 as nc
pyNc = True
elif ncForm in ["NETCDF3"]:
try:
import Scientific.IO.NetCDF as nc
pyNc = False
except:
# fallback for netcdf3 with the same syntax as netcdf4!
import netCDF3 as nc
pyNc = True
else:
raise ValueError("Unknown nc form "+ncForm)
return nc, pyNc
|
maahn/IMProToo
|
IMProToo/tools.py
|
Python
|
gpl-3.0
| 7,861
|
[
"NetCDF"
] |
724fd0ece4697c608bffeadbd9b6242066ec3f2d2b211d10a26ddfc5e52ce832
|
'''
fs.contrib.tahoefs
==================
Example (it will use publicly available, but slow-as-hell Tahoe-LAFS cloud):
from fs.contrib.tahoefs import TahoeFS, Connection
dircap = TahoeFS.createdircap(webapi='http://pubgrid.tahoe-lafs.org')
print "Your dircap (unique key to your storage directory) is", dircap
print "Keep it safe!"
fs = TahoeFS(dircap, autorun=False, timeout=300, webapi='http://pubgrid.tahoe-lafs.org')
f = fs.open("foo.txt", "a")
f.write('bar!')
f.close()
print "Now visit %s and enjoy :-)" % fs.getpathurl('foo.txt')
When any problem occurred, you can turn on internal debugging messages:
import logging
l = logging.getLogger()
l.setLevel(logging.DEBUG)
l.addHandler(logging.StreamHandler(sys.stdout))
... your Python code using TahoeFS ...
TODO:
x unicode support
x try network errors / bad happiness
x colon hack (all occurences of ':' in filenames transparently convert to __colon__)
x logging / cleaning sources from print()
x exceptions
x tests
x create ticket and send initial code
x sanitize all path types (., /)
x rewrite listdir, add listdirinfo
x support for extra large file uploads (poster module)
x Possibility to block write until upload done (Tahoe mailing list)
x Report something sane when Tahoe crashed/unavailable
x solve failed unit tests (makedir_winner, ...)
filetimes
docs & author
python3 support
python 2.3 support
remove creating blank files (depends on FileUploadManager)
TODO (Not TahoeFS specific tasks):
x DebugFS
x RemoteFileBuffer on the fly buffering support
x RemoteFileBuffer unit tests
x RemoteFileBuffer submit to trunk
colon hack -> move outside tahoe, should be in windows-specific FS (maybe in Dokan?)
autorun hack -> move outside tahoe, -||-
Implement FileUploadManager + faking isfile/exists of just processing file
pyfilesystem docs is outdated (rename, movedir, ...)
'''
import logging
from logging import DEBUG, INFO, ERROR, CRITICAL
import fs.errors as errors
from fs.path import abspath, relpath, normpath, dirname, pathjoin
from fs import FS, NullFile, _thread_synchronize_default, SEEK_END
from fs.remote import CacheFS, _cached_method, RemoteFileBuffer
from fs.base import fnmatch
from util import TahoeUtil
from connection import Connection
#from .debugfs import DebugFS
logger = logging.getLogger('fs.tahoefs')
def _fix_path(func):
def wrapper(self, *args, **kwds):
if len(args):
args = list(args)
args[0] = abspath(normpath(args[0]))
return func(self, *args, **kwds)
return wrapper
class TahoeFS(CacheFS):
_meta = { 'virtual' : False,
'read_only' : False,
'unicode_paths' : True,
'case_insensitive_paths' : False,
'network' : True
}
def __init__(self, dircap, timeout=60, autorun=True, largefilesize=10*1024*1024, webapi='http://127.0.0.1:3456'):
'''
Creates instance of TahoeFS.
:param dircap: special hash allowing user to work with TahoeLAFS directory.
:param timeout: how long should underlying CacheFS keep information about files
before asking TahoeLAFS node again.
:param autorun: Allow listing autorun files? Can be very dangerous on Windows!.
This is temporary hack, as it should be part of Windows-specific middleware,
not Tahoe itself.
:param largefilesize: - Create placeholder file for files larger than this treshold.
Uploading and processing of large files can last extremely long (many hours),
so placing this placeholder can help you to remember that upload is processing.
Setting this to None will skip creating placeholder files for any uploads.
'''
fs = _TahoeFS(dircap, autorun=autorun, largefilesize=largefilesize, webapi=webapi)
super(TahoeFS, self).__init__(fs, timeout)
def __str__(self):
return "<TahoeFS: %s>" % self.dircap
@classmethod
def createdircap(cls, webapi='http://127.0.0.1:3456'):
return TahoeUtil(webapi).createdircap()
@_fix_path
def open(self, path, mode='r', **kwargs):
self.wrapped_fs._log(INFO, 'Opening file %s in mode %s' % (path, mode))
newfile = False
if not self.exists(path):
if 'w' in mode or 'a' in mode:
newfile = True
else:
self.wrapped_fs._log(DEBUG, "File %s not found while opening for reads" % path)
raise errors.ResourceNotFoundError(path)
elif self.isdir(path):
self.wrapped_fs._log(DEBUG, "Path %s is directory, not a file" % path)
raise errors.ResourceInvalidError(path)
if 'w' in mode:
newfile = True
if newfile:
self.wrapped_fs._log(DEBUG, 'Creating empty file %s' % path)
if self.wrapped_fs.readonly:
raise errors.UnsupportedError('read only filesystem')
self.setcontents(path, '')
handler = NullFile()
else:
self.wrapped_fs._log(DEBUG, 'Opening existing file %s for reading' % path)
handler = self.wrapped_fs._get_file_handler(path)
return RemoteFileBuffer(self, path, mode, handler,
write_on_flush=False)
@_fix_path
def desc(self, path):
try:
return self.getinfo(path)
except:
return ''
@_fix_path
def exists(self, path):
try:
self.getinfo(path)
self.wrapped_fs._log(DEBUG, "Path %s exists" % path)
return True
except errors.ResourceNotFoundError:
self.wrapped_fs._log(DEBUG, "Path %s does not exists" % path)
return False
except errors.ResourceInvalidError:
self.wrapped_fs._log(DEBUG, "Path %s does not exists, probably misspelled URI" % path)
return False
@_fix_path
def getsize(self, path):
try:
size = self.getinfo(path)['size']
self.wrapped_fs._log(DEBUG, "Size of %s is %d" % (path, size))
return size
except errors.ResourceNotFoundError:
return 0
@_fix_path
def isfile(self, path):
try:
isfile = (self.getinfo(path)['type'] == 'filenode')
except errors.ResourceNotFoundError:
#isfile = not path.endswith('/')
isfile = False
self.wrapped_fs._log(DEBUG, "Path %s is file: %d" % (path, isfile))
return isfile
@_fix_path
def isdir(self, path):
try:
isdir = (self.getinfo(path)['type'] == 'dirnode')
except errors.ResourceNotFoundError:
isdir = False
self.wrapped_fs._log(DEBUG, "Path %s is directory: %d" % (path, isdir))
return isdir
@_fix_path
@_cached_method
def listdirinfo(self, path="/", wildcard=None, full=False, absolute=False,
dirs_only=False, files_only=False):
self.wrapped_fs._log(DEBUG, "Listing directory (listdirinfo) %s" % path)
_fixpath = self.wrapped_fs._fixpath
_path = _fixpath(path)
if dirs_only and files_only:
raise errors.ValueError("dirs_only and files_only can not both be True")
result = []
for item in self.wrapped_fs.tahoeutil.list(self.dircap, _path):
if dirs_only and item['type'] == 'filenode':
continue
elif files_only and item['type'] == 'dirnode':
continue
if wildcard is not None and \
not fnmatch.fnmatch(item['name'], wildcard):
continue
if full:
item_path = relpath(pathjoin(_path, item['name']))
elif absolute:
item_path = abspath(pathjoin(_path, item['name']))
else:
item_path = item['name']
cache_name = self.wrapped_fs._fixpath(u"%s/%s" % \
(path, item['name']))
self._cache_set(cache_name, 'getinfo', (), {}, (True, item))
result.append((item_path, item))
return result
def listdir(self, *args, **kwargs):
return [ item[0] for item in self.listdirinfo(*args, **kwargs) ]
@_fix_path
def remove(self, path):
self.wrapped_fs._log(INFO, 'Removing file %s' % path)
if self.wrapped_fs.readonly:
raise errors.UnsupportedError('read only filesystem')
if not self.isfile(path):
if not self.isdir(path):
raise errors.ResourceNotFoundError(path)
raise errors.ResourceInvalidError(path)
try:
self.wrapped_fs.tahoeutil.unlink(self.dircap, path)
except Exception, e:
raise errors.ResourceInvalidError(path)
finally:
self._uncache(path, removed=True)
@_fix_path
def removedir(self, path, recursive=False, force=False):
self.wrapped_fs._log(INFO, "Removing directory %s" % path)
if self.wrapped_fs.readonly:
raise errors.UnsupportedError('read only filesystem')
if not self.isdir(path):
if not self.isfile(path):
raise errors.ResourceNotFoundError(path)
raise errors.ResourceInvalidError(path)
if not force and self.listdir(path):
raise errors.DirectoryNotEmptyError(path)
try:
self.wrapped_fs.tahoeutil.unlink(self.dircap, path)
finally:
self._uncache(path, removed=True)
if recursive and path != '/':
try:
self.removedir(dirname(path), recursive=True)
except errors.DirectoryNotEmptyError:
pass
@_fix_path
def makedir(self, path, recursive=False, allow_recreate=False):
self.wrapped_fs._log(INFO, "Creating directory %s" % path)
if self.wrapped_fs.readonly:
raise errors.UnsupportedError('read only filesystem')
if self.exists(path):
if not self.isdir(path):
raise errors.ResourceInvalidError(path)
if not allow_recreate:
raise errors.DestinationExistsError(path)
if not recursive and not self.exists(dirname(path)):
raise errors.ParentDirectoryMissingError(path)
try:
self.wrapped_fs.tahoeutil.mkdir(self.dircap, path)
finally:
self._uncache(path,added=True)
def movedir(self, src, dst, overwrite=False):
self.move(src, dst, overwrite)
def move(self, src, dst, overwrite=False):
# FIXME: overwrite not documented
self.wrapped_fs._log(INFO, "Moving file from %s to %s" % (src, dst))
if self.wrapped_fs.readonly:
raise errors.UnsupportedError('read only filesystem')
src = self.wrapped_fs._fixpath(src)
dst = self.wrapped_fs._fixpath(dst)
if not self.exists(dirname(dst)):
# FIXME: Why raise exception when it is legal construct?
raise errors.ParentDirectoryMissingError(dst)
if not overwrite and self.exists(dst):
raise errors.DestinationExistsError(dst)
try:
self.wrapped_fs.tahoeutil.move(self.dircap, src, dst)
finally:
self._uncache(src,removed=True)
self._uncache(dst,added=True)
@_fix_path
def setcontents(self, path, data, chunk_size=64*1024):
try:
self.wrapped_fs.setcontents(path, data, chunk_size=chunk_size)
finally:
self._uncache(path, added=True)
def rename(self, src, dst):
self.move(src, dst)
def copy(self, src, dst, overwrite=False, chunk_size=16384):
if self.wrapped_fs.readonly:
raise errors.UnsupportedError('read only filesystem')
# FIXME: Workaround because isfile() not exists on _TahoeFS
FS.copy(self, src, dst, overwrite, chunk_size)
def copydir(self, src, dst, overwrite=False, ignore_errors=False, chunk_size=16384):
if self.wrapped_fs.readonly:
raise errors.UnsupportedError('read only filesystem')
# FIXME: Workaround because isfile() not exists on _TahoeFS
FS.copydir(self, src, dst, overwrite, ignore_errors, chunk_size)
class _TahoeFS(FS):
def __init__(self, dircap, autorun, largefilesize, webapi):
self.dircap = dircap if not dircap.endswith('/') else dircap[:-1]
self.autorun = autorun
self.largefilesize = largefilesize
self.connection = Connection(webapi)
self.tahoeutil = TahoeUtil(webapi)
self.readonly = dircap.startswith('URI:DIR2-RO')
super(_TahoeFS, self).__init__(thread_synchronize=_thread_synchronize_default)
def _log(self, level, message):
if not logger.isEnabledFor(level): return
logger.log(level, u'(%d) %s' % (id(self),
unicode(message).encode('ASCII', 'replace')))
def _fixpath(self, path):
return abspath(normpath(path))
def _get_file_handler(self, path):
if not self.autorun:
if path.lower().startswith('/autorun.'):
self._log(DEBUG, 'Access to file %s denied' % path)
return NullFile()
return self.getrange(path, 0)
@_fix_path
def getpathurl(self, path, allow_none=False, webapi=None):
'''
Retrieve URL where the file/directory is stored
'''
if webapi == None:
webapi = self.connection.webapi
self._log(DEBUG, "Retrieving URL for %s over %s" % (path, webapi))
path = self.tahoeutil.fixwinpath(path, False)
return u"%s/uri/%s%s" % (webapi, self.dircap, path)
@_fix_path
def getrange(self, path, offset, length=None):
path = self.tahoeutil.fixwinpath(path, False)
return self.connection.get(u'/uri/%s%s' % (self.dircap, path),
offset=offset, length=length)
@_fix_path
def setcontents(self, path, file, chunk_size=64*1024):
self._log(INFO, 'Uploading file %s' % path)
path = self.tahoeutil.fixwinpath(path, False)
size=None
if self.readonly:
raise errors.UnsupportedError('read only filesystem')
# Workaround for large files:
# First create zero file placeholder, then
# upload final content.
if self.largefilesize != None and getattr(file, 'read', None):
# As 'file' can be also a string, need to check,
# if 'file' looks like duck. Sorry, file.
file.seek(0, SEEK_END)
size = file.tell()
file.seek(0)
if size > self.largefilesize:
self.connection.put(u'/uri/%s%s' % (self.dircap, path),
"PyFilesystem.TahoeFS: Upload started, final size %d" % size)
self.connection.put(u'/uri/%s%s' % (self.dircap, path), file, size=size)
@_fix_path
def getinfo(self, path):
self._log(INFO, 'Reading meta for %s' % path)
info = self.tahoeutil.info(self.dircap, path)
#import datetime
#info['created_time'] = datetime.datetime.now()
#info['modified_time'] = datetime.datetime.now()
#info['accessed_time'] = datetime.datetime.now()
return info
|
atty303/pyfilesystem
|
fs/contrib/tahoefs/__init__.py
|
Python
|
bsd-3-clause
| 16,384
|
[
"VisIt"
] |
577a835400c1c778a678077e8c3494bccce343a2a5b0301c1afa94ace6ce4eb1
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# transistorAmp.py
#
# Copyright 2015 Roberto Tavares <roberto.tavares.filho@gmail.com>
#
# Versão: 0.1, em 07/09/2015
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
#
#
# entrada= vg ganho de tensão
# rlw resistencia de carga
# Ecmax tensão máxima de coletor
# tmax temperatura máxima de operação
# s fator de estabilidade
# dIc delta corrente quiescente
# Eo pp tensão pico a pico de saída
import argparse
import gettext
import sys
import os
import wx
import socket
from subprocess import call
import math
import cmath
import time
import traceback
from math import pi, sin, cos, log, sqrt, atan2
#imports para internacionalizacao
import gettext
import __builtin__
__builtin__.__dict__['_'] = wx.GetTranslation
#criacao dos IDs identificadores dos objetos
BTN_AMPLIFIER_TR_G_1_10 = wx.NewId() # amplificador transistorizado ganho 1-10
BTN_AMPLIFIER_TR_G_10_50 = wx.NewId()
BTN_AMPLIFIER_TR_G_50_100 = wx.NewId()
ID_VG = wx.NewId()
ID_RL = wx.NewId()
ID_ECMAX = wx.NewId()
ID_TMAX = wx.NewId()
ID_S = wx.NewId()
ID_BETA = wx.NewId()
ID_DIC = wx.NewId()
ID_E0PP = wx.NewId()
ID_CIR = wx.NewId()
ID_VCC = wx.NewId()
ID_FMIN = wx.NewId()
ID_ICMAX = wx.NewId()
def retornaDeltaVbe(fQic):
"""
returns delta Vbe for a given quiescent current iQc em A, delta Vbe em V/grau centigrado
"""
retorno= 0.003
if fQic < 0.1:
retorno= 0.002
if fQic < 0.01:
retorno= 0.0025
if fQic < 0.001:
retorno= 0.00275
if fQic < 0.0001:
retorno= 0.003
return retorno
class saidaAmplificadorTransistorizado(wx.Dialog):
"""
Implementa a tela com os valores calculados do amplificador transistorizado
"""
def __init__(self,Re,Rl,Rb1,Rb2,Cin,Cout,Icq,Zin,statusMsg,status):
wx.Dialog.__init__(self, None, -1, _('Amplifier values'),size=(300, 150)) #inicializacao do dialogo
self.sizer1 = wx.BoxSizer(wx.VERTICAL)
self.sizer30 = wx.BoxSizer(wx.HORIZONTAL);
self.sizer20 = wx.BoxSizer(wx.VERTICAL);
self.sizer21 = wx.BoxSizer(wx.VERTICAL);
self.sizer10 = wx.BoxSizer(wx.VERTICAL);
self.sizer11 = wx.BoxSizer(wx.VERTICAL);
self.sizer12 = wx.BoxSizer(wx.VERTICAL);
self.sizer13 = wx.BoxSizer(wx.VERTICAL);
self.sizer14 = wx.BoxSizer(wx.VERTICAL);
self.sizer15 = wx.BoxSizer(wx.VERTICAL);
self.staticTxtTitulo= wx.StaticText(self, -1, _("Amplifier design output data"))
self.staticTxtTitulo.SetFont(wx.Font(18, wx.SWISS, wx.NORMAL, wx.BOLD))
self.sizer1.Add(self.staticTxtTitulo, flag= wx.ALIGN_CENTER_HORIZONTAL)
if status == True:
self.texto= "Re= " + str(Re) + " ohms"
self.static1= wx.StaticText(self, -1, self.texto)
self.static1.SetToolTipString(_("Emiter resistor"))
self.sizer10.Add(self.static1,flag= wx.ALL| wx.ALIGN_CENTER_HORIZONTAL , border = 10)
self.texto= "RL= " + str(Rl) + " ohms"
self.static1= wx.StaticText(self, -1, self.texto)
self.static1.SetToolTipString(_("Colector resistor"))
self.sizer10.Add(self.static1,flag= wx.ALL| wx.ALIGN_CENTER_HORIZONTAL , border = 10)
self.texto= "Rb1= " + str(Rb1) + " ohms"
self.static1= wx.StaticText(self, -1, self.texto)
self.static1.SetToolTipString(_("Upper base resistor"))
self.sizer10.Add(self.static1,flag= wx.ALL| wx.ALIGN_CENTER_HORIZONTAL , border = 10)
self.texto= "Rb2= " + str(Rb2) + " ohms"
self.static1= wx.StaticText(self, -1, self.texto)
self.static1.SetToolTipString(_("Lower base resistor"))
self.sizer10.Add(self.static1,flag= wx.ALL| wx.ALIGN_CENTER_HORIZONTAL , border = 10)
self.texto= "Cin= " + str(Cin) + " microfarads"
self.static1= wx.StaticText(self, -1, self.texto)
self.static1.SetToolTipString(_("Input capacitor"))
self.sizer10.Add(self.static1,flag= wx.ALL| wx.ALIGN_CENTER_HORIZONTAL , border = 10)
self.texto= "Cout= " + str(Cout) + " microfarads"
self.static1= wx.StaticText(self, -1, self.texto)
self.static1.SetToolTipString(_("Output capacitor"))
self.sizer10.Add(self.static1,flag= wx.ALL| wx.ALIGN_CENTER_HORIZONTAL , border = 10)
self.texto= "Zin= " + str(int(Zin)) + " ohms"
self.static1= wx.StaticText(self, -1, self.texto)
self.static1.SetToolTipString(_("Input impedance"))
self.sizer10.Add(self.static1,flag= wx.ALL| wx.ALIGN_CENTER_HORIZONTAL , border = 10)
self.texto= "Icq= " + str(int(Icq * 1000)) + " miliamperes"
self.static1= wx.StaticText(self, -1, self.texto)
self.static1.SetToolTipString(_("Quiescente current"))
self.sizer10.Add(self.static1,flag= wx.ALL| wx.ALIGN_CENTER_HORIZONTAL , border = 10)
else:
self.static1= wx.StaticText(self, -1, statusMsg)
self.static1.SetForegroundColour((255,0,0)) # set text color
self.sizer10.Add(self.static1,flag= wx.ALL| wx.ALIGN_CENTER_HORIZONTAL , border = 10)
self.static2= wx.StaticText(self, -1, _("Please change values and try again"))
self.static2.SetForegroundColour((255,0,0)) # set text color
self.sizer10.Add(self.static2,flag= wx.ALL| wx.ALIGN_CENTER_HORIZONTAL , border = 10)
self.okButton= wx.Button(self, wx.ID_OK, _("CLOSE")) #objeto botao OK
self.sizer15.Add(self.okButton,flag= wx.EXPAND | wx.ALL, border = 5)
#insere a figura com o esquema associado
self.img1 = wx.Image("amp1.png", wx.BITMAP_TYPE_ANY)
self.bitmap1 = wx.StaticBitmap(self, -1, wx.BitmapFromImage(self.img1))
self.sizer21.Add(self.bitmap1,flag= wx.ALL | wx.ALIGN_CENTER_HORIZONTAL, border = 10)
#montagem da estrutura de sizers
self.sizer30.Add(self.sizer20,flag= wx.ALL| wx.ALIGN_CENTER_HORIZONTAL , border = 10)
self.sizer30.Add(self.sizer21,flag= wx.ALL | wx.ALIGN_CENTER_HORIZONTAL, border = 10)
self.sizer1.Add(self.sizer30,flag= wx.ALL| wx.ALIGN_CENTER_HORIZONTAL , border = 10)
self.sizer1.Add(self.sizer15,flag= wx.ALL | wx.ALIGN_CENTER_HORIZONTAL, border = 10)
self.sizer20.Add(self.sizer14,flag= wx.ALL| wx.ALIGN_CENTER_HORIZONTAL , border = 10)
self.sizer20.Add(self.sizer13,flag= wx.ALL | wx.ALIGN_CENTER_HORIZONTAL , border = 10)
self.sizer20.Add(self.sizer12,flag= wx.ALL | wx.ALIGN_CENTER_HORIZONTAL, border = 10)
self.sizer20.Add(self.sizer10,flag= wx.ALL | wx.ALIGN_CENTER_HORIZONTAL, border = 10) #inserimos os sizers horizontais no vertical
self.sizer20.Add(self.sizer11,flag= wx.ALL | wx.ALIGN_CENTER_HORIZONTAL, border = 10)
self.SetSizer(self.sizer1) #insere o sizer de maior nivel na área ocupada pelo dialogo
self.Fit()
class entradaAmplificadorTransistorizado(wx.Dialog):
"""
Implementa a tela de entrada de parametros de projeto do amplificador transistorizado
"""
def __init__(self):
wx.Dialog.__init__(self, None, -1, _('Amplifier Specifications'),size=(300, 150)) #inicializacao do dialogo
self.sizer1 = wx.BoxSizer(wx.VERTICAL)
self.sizer10 = wx.BoxSizer(wx.HORIZONTAL);
self.sizer11 = wx.BoxSizer(wx.HORIZONTAL);
self.sizer12 = wx.BoxSizer(wx.HORIZONTAL);
self.sizer13 = wx.BoxSizer(wx.HORIZONTAL);
self.sizer14 = wx.BoxSizer(wx.HORIZONTAL);
self.sizer15 = wx.BoxSizer(wx.HORIZONTAL);
self.staticTxtTitulo= wx.StaticText(self, -1, _("Amplifier input data"))
self.staticTxtTitulo.SetFont(wx.Font(18, wx.SWISS, wx.NORMAL, wx.BOLD))
self.sizer1.Add(self.staticTxtTitulo, flag= wx.ALIGN_CENTER_HORIZONTAL) #insere os conteudos dos sizer horizontais
self.static1= wx.StaticText(self, -1, "Vg:") #objeto texto statico
self.static1.SetToolTipString(_("Amplifier gain"))
self.vgTxt = wx.TextCtrl(self, ID_VG,'5',size=(40,20), style=wx.TE_CENTRE)
self.sizer10.Add(self.static1,flag= wx.RIGHT , border = 2) #insere os conteudos dos sizer horizontais
self.sizer10.Add(self.vgTxt,flag= wx.RIGHT , border = 10) #insere os conteudos dos sizer horizontais
self.static2= wx.StaticText(self, -1, "RLW:") #objeto texto statico
self.static2.SetToolTipString(_("Load resistence in ohms"))
self.rlTxt = wx.TextCtrl(self, ID_RL,'10000',size=(60,20),style=wx.TE_CENTRE)
self.sizer10.Add(self.static2,flag= wx.RIGHT , border = 2) #insere os conteudos dos sizer horizontais
self.sizer10.Add(self.rlTxt,flag= wx.RIGHT , border = 10) #insere os conteudos dos sizer horizontais
self.static3= wx.StaticText(self, -1, "Ecmax:") #objeto texto statico
self.static3.SetToolTipString(_("Maximum colector voltage in volts"))
self.ecmTxt = wx.TextCtrl(self, ID_ECMAX,'30',size=(40,20),style=wx.TE_CENTRE)
self.sizer10.Add(self.static3,flag= wx.RIGHT , border = 2) #insere os conteudos dos sizer horizontais
self.sizer10.Add(self.ecmTxt,flag= wx.RIGHT , border = 10) #insere os conteudos dos sizer horizontais
self.static4= wx.StaticText(self, -1, "Tmax:") #objeto texto statico
self.static4.SetToolTipString(_("Maximum junction temperature, in celsius degrees"))
self.tmaxTxt = wx.TextCtrl(self, ID_TMAX,'70',size=(40,20),style=wx.TE_CENTRE)
self.sizer10.Add(self.static4,flag= wx.RIGHT , border = 2) #insere os conteudos dos sizer horizontais
self.sizer10.Add(self.tmaxTxt,flag= wx.RIGHT , border = 10) #insere os conteudos dos sizer horizontais
self.static5= wx.StaticText(self, -1, "S:") #objeto texto statico
self.static5.SetToolTipString(_("Stabilty factor, 1-10, 10 for ac coupled circuits, 2 dor DC coupled"))
self.sTxt = wx.TextCtrl(self, ID_S,'10',size=(40,20),style=wx.TE_CENTRE)
self.sizer11.Add(self.static5,flag= wx.RIGHT , border = 2) #insere os conteudos dos sizer horizontais
self.sizer11.Add(self.sTxt,flag= wx.RIGHT , border = 10) #insere os conteudos dos sizer horizontais
self.static6= wx.StaticText(self, -1, "Beta:") #objeto texto statico
self.static6.SetToolTipString(_("Transistor beta parameter"))
self.betaTxt = wx.TextCtrl(self, ID_BETA,'100',size=(40,20),style=wx.TE_CENTRE)
self.sizer11.Add(self.static6,flag= wx.RIGHT , border = 2) #insere os conteudos dos sizer horizontais
self.sizer11.Add(self.betaTxt,flag= wx.RIGHT , border = 10) #insere os conteudos dos sizer horizontais
self.static7= wx.StaticText(self, -1, "Delta Ic:") #objeto texto statico
self.static7.SetToolTipString(_("Allowable Ic current variation (%)"))
self.dicTxt = wx.TextCtrl(self, ID_DIC,'10',size=(40,20),style=wx.TE_CENTRE)
self.sizer11.Add(self.static7,flag= wx.RIGHT , border = 2) #insere os conteudos dos sizer horizontais
self.sizer11.Add(self.dicTxt,flag= wx.RIGHT , border = 10) #insere os conteudos dos sizer horizontais
self.static8= wx.StaticText(self, -1, "Eopp:") #objeto texto statico
self.static8.SetToolTipString(_("Peak to peak output voltage (%)"))
self.e0ppTxt = wx.TextCtrl(self, ID_E0PP,'2',size=(40,20),style=wx.TE_CENTRE)
self.sizer11.Add(self.static8,flag= wx.RIGHT , border = 2) #insere os conteudos dos sizer horizontais
self.sizer11.Add(self.e0ppTxt,flag= wx.RIGHT , border = 10) #insere os conteudos dos sizer horizontais
self.static9= wx.StaticText(self, -1, "Cir:") #objeto texto statico
self.static9.SetToolTipString(_("Circuit topology (1,2,or 3"))
self.cirTxt = wx.TextCtrl(self, ID_CIR,'1',size=(30,20),style=wx.TE_CENTRE)
self.sizer12.Add(self.static9,flag= wx.RIGHT , border = 2) #insere os conteudos dos sizer horizontais
self.sizer12.Add(self.cirTxt,flag= wx.RIGHT , border = 10) #insere os conteudos dos sizer horizontais
self.static10= wx.StaticText(self, -1, "Vcc:") #objeto texto statico
self.static10.SetToolTipString(_("Battery voltage (volts)"))
self.vccTxt = wx.TextCtrl(self, ID_VCC,'12',size=(40,20),style=wx.TE_CENTRE)
self.sizer12.Add(self.static10,flag= wx.RIGHT , border = 2) #insere os conteudos dos sizer horizontais
self.sizer12.Add(self.vccTxt,flag= wx.RIGHT , border = 10) #insere os conteudos dos sizer horizontais
self.static11= wx.StaticText(self, -1, "Fmin:") #objeto texto statico
self.static11.SetToolTipString(_("Minimux operational frequency (Hz)"))
self.fminTxt = wx.TextCtrl(self, ID_FMIN,'20',size=(60,20),style=wx.TE_CENTRE)
self.sizer12.Add(self.static11,flag= wx.RIGHT , border = 2) #insere os conteudos dos sizer horizontais
self.sizer12.Add(self.fminTxt,flag= wx.RIGHT , border = 10) #insere os conteudos dos sizer horizontais
self.static12= wx.StaticText(self, -1, "Icmax:") #objeto texto statico
self.static12.SetToolTipString(_("Maximum colector current (A)"))
self.icmaxTxt = wx.TextCtrl(self, ID_ICMAX,'0.5',size=(60,20),style=wx.TE_CENTRE)
self.sizer12.Add(self.static12,flag= wx.RIGHT , border = 2) #insere os conteudos dos sizer horizontais
self.sizer12.Add(self.icmaxTxt,flag= wx.RIGHT , border = 10) #insere os conteudos dos sizer horizontais
self.static13= wx.StaticText(self, -1, _("For theorical explanation, please visit http://www.cadernodelaboratorio.com.br")) #objeto texto statico
self.sizer14.Add(self.static13,flag= wx.RIGHT , border = 2) #insere os conteudos dos sizer horizontais
self.okButton= wx.Button(self, wx.ID_OK, "OK") #objeto botao OK
self.cancelButton = wx.Button(self, wx.ID_CANCEL, "Cancela") #objeto botao cancela
self.sizer15.Add(self.okButton,flag= wx.EXPAND | wx.ALL, border = 5)
self.sizer15.Add(self.cancelButton,flag= wx.EXPAND | wx.ALL, border = 5)
self.sizer1.Add(self.sizer13,flag= wx.ALL| wx.ALIGN_CENTER_HORIZONTAL , border = 10)
self.sizer1.Add(self.sizer10,flag= wx.ALL| wx.ALIGN_CENTER_HORIZONTAL , border = 10) #inserimos os sizers horizontais no vertical
self.sizer1.Add(self.sizer11,flag= wx.ALL| wx.ALIGN_CENTER_HORIZONTAL, border = 10)
self.sizer1.Add(self.sizer12,flag= wx.ALL| wx.ALIGN_CENTER_HORIZONTAL , border = 10)
self.sizer1.Add(self.sizer14,flag= wx.ALL| wx.ALIGN_CENTER_HORIZONTAL, border = 10)
self.sizer1.Add(self.sizer15,flag= wx.ALL| wx.ALIGN_CENTER_HORIZONTAL , border = 10)
self.SetSizer(self.sizer1) #insere o sizer de maior nivel na área ocupada pelo dialogo
self.Fit()
def GetVg(self):
return self.vgTxt.GetValue()
def GetRL(self):
return self.rlTxt.GetValue()
def GetEcmax(self):
return self.ecmTxt.GetValue()
def GetTmax(self):
return self.tmaxTxt.GetValue()
def GetS(self):
return self.sTxt.GetValue()
def GetBeta(self):
return self.betaTxt.GetValue()
def GetDeltaIc(self):
return self.dicTxt.GetValue()
def GetE0pp(self):
return self.e0ppTxt.GetValue()
def GetCir(self):
return self.cirTxt.GetValue()
def GetVcc(self):
return self.vccTxt.GetValue()
def GetFmin(self):
return self.fminTxt.GetValue()
def GetIcmax(self):
return self.icmaxTxt.GetValue()
class SelAmplifiersDesign(wx.Dialog):
"""
Implementa o dialgo de seleçã da topologia do amplificador transistorizado
"""
def __init__(self):
wx.Dialog.__init__(self, None, -1, _("Amplifier Design Selection"),size=(300, 150)) #inicializacao do dialogo
self.sizer1 = wx.BoxSizer(wx.VERTICAL)
self.sizer10 = wx.BoxSizer(wx.HORIZONTAL);
self.sizer11 = wx.BoxSizer(wx.HORIZONTAL);
self.sizer12 = wx.BoxSizer(wx.HORIZONTAL);
self.sizer13 = wx.BoxSizer(wx.HORIZONTAL);
self.staticTxtTitulo= wx.StaticText(self, -1, _("Select the amplifier type"))
self.staticTxtTitulo.SetFont(wx.Font(18, wx.SWISS, wx.NORMAL, wx.BOLD))
self.sizer10.Add(self.staticTxtTitulo) #insere os conteudos dos sizer horizontais
self.rb1 = wx.RadioButton(self, BTN_AMPLIFIER_TR_G_1_10 , _("Circuit gain 1-10") ,style=wx.RB_GROUP)
#self.rb2 = wx.RadioButton(self, BTN_AMPLIFIER_TR_G_10_50 , _("Circuit gain 10-50"))
#self.rb3 = wx.RadioButton(self, BTN_AMPLIFIER_TR_G_50_400 , _("Circuit max possible gain"))
self.sizer11.Add(self.rb1,flag= wx.EXPAND | wx.ALL, border = 5) #insere os conteudos dos sizer horizontais
#self.sizer11.Add(self.rb2,flag= wx.EXPAND | wx.ALL, border = 5)
#self.sizer11.Add(self.rb3,flag= wx.EXPAND | wx.ALL, border = 5)
self.okButton= wx.Button(self, wx.ID_OK, "OK") #objeto botao OK
self.cancelButton = wx.Button(self, wx.ID_CANCEL, "Cancela") #objeto botao cancela
self.sizer13.Add(self.okButton,flag= wx.EXPAND | wx.ALL, border = 5)
self.sizer13.Add(self.cancelButton,flag= wx.EXPAND | wx.ALL, border = 5)
self.sizer1.Add(self.sizer10,flag= wx.ALL | wx.ALIGN_CENTER_HORIZONTAL, border = 10) #inserimos os sizers horizontais no vertical
self.sizer1.Add(self.sizer11,flag= wx.ALL | wx.ALIGN_CENTER_HORIZONTAL, border = 10)
self.sizer1.Add(self.sizer12,flag= wx.ALL | wx.ALIGN_CENTER_HORIZONTAL, border = 10)
self.sizer1.Add(self.sizer13,flag= wx.ALL |wx.ALIGN_CENTER_HORIZONTAL ,border = 10)
self.SetSizer(self.sizer1) #insere o sizer de maior nivel na área ocupada pelo dialogo
self.Fit()
def GetSelection(self):
if self.rb1.GetValue() == True:
return BTN_AMPLIFIER_TR_G_1_10
#if self.rb2.GetValue() == True:
# return BTN_AMPLIFIER_TR_G_10_50
#if self.rb3.GetValue() == True:
# return BTN_AMPLIFIER_TR_G_50_100
class TransistorAmpDesign():
"""
calcula os valores de um amplificador transistorizado ganho 1-10
"""
def __init__(self,fVg,fRlw,fEcmax,fTmax,fS,fDic,fE0pp,fBeta,fVcc,fMin,fIcmax,debugMode):
self.fVg= fVg
self.fRlw= fRlw
self.fEcmax= fEcmax
self.fTmax= fTmax
self.fS= fS
self.fDic= fDic
self.fE0pp=fE0pp
self.fBeta=fBeta
self.fVcc= fVcc
self.fMin= fMin
self.fIcmax= fIcmax
self.condicaoValida= True
self.valorIcOK= False
self.debugMode= debugMode
self.finalMsg= _("Circuit calculated correctly")
if self.debugMode == True:
print "Vg=", self.fVg
print "Rl=",self.fRlw
print "Ecmax=",self.fEcmax
print "Tmax=",self.fTmax
print "S=",self.fS
print "Dic=",self.fDic
print "E0pp=",self.fE0pp
print "Beta=",self.fBeta
print "Vcc=",self.fVcc
print "Fmin=",self.fMin
print "Icmax=",self.fIcmax
def calculaCircuito(self):
"""
retorna booleano indicando se a topologia pode ser calculada(True/False)
os valores dos componentes podem ser obtidos atrevés dos métodos correspondentes
"""
self.fQic= 0.0005 #corrente quiescente inicial em amperes
#calcula a minima Icq em funcao dos valores de Re e Rlac
while self.condicaoValida == True:
self.fEres= (retornaDeltaVbe(self.fQic)*(self.fTmax-20)*100)/self.fDic
self.fRes= self.fEres/self.fQic
self.fRlac= self.fVg * self.fRes
if self.fRlw <= self.fRlac:
self.fQic = 1.2 * self.fQic #aumentamos a Icq para atender aos valores de RLac e Re necessarios para o ganho especificado
if self.fQic > self.fIcmax/2:
self.condicaoValida= False
self.finalMsg= _("Design impossible: Try increase RLw")
else:
break
#calcula a minima Icq para resultar em Rl positivo
while self.condicaoValida == True:
self.fEres= (retornaDeltaVbe(self.fQic)*(self.fTmax-20)*100)/self.fDic
self.fRes= self.fEres/self.fQic
self.fRlac= self.fVg * self.fRes
self.fRl= (self.fRlw * self.fRlac)/(self.fRlw-self.fRlac)
if self.fRl < 0:
self.fQic = 1.2 * self.fQic
if self.fQic > self.fIcmax/2:
self.condicaoValida= False
self.finalMsg= _("Design impossible: Try increase RLw or lower Vg")
else:
break
#calcula o minIcq para garantir a VCE desejada
while self.condicaoValida== True:
if self.debugMode == True:
print self.fQic
self.fEres= (retornaDeltaVbe(self.fQic)*(self.fTmax-20)*100)/self.fDic
self.fRes= self.fEres/self.fQic
self.fRlac= self.fVg * self.fRes
self.fRl= (self.fRlw * self.fRlac)/(self.fRlw-self.fRlac)
self.fErl= self.fQic * self.fRl
self.fEq= 1 + self.fE0pp
self.fEs= self.fEq + self.fErl + self.fEres
self.fFolgaVcc= self.fVcc- self.fEs
if self.fFolgaVcc < 1.0:
self.fQic= self.fQic * 1.1
if self.fQic > self.fIcmax/2:
self.condicaoValida= False
self.finalMsg= _("Design impossible: Try increase Vcc")
else:
self.valorIcOK= True
self.fEq = self.fEq + self.fFolgaVcc
break
if self.condicaoValida==True:
self.fRb2= self.fS * self.fRes
self.fBetaRes= self.fBeta * self.fRes
self.fRint= (self.fRb2 * self.fBetaRes)/(self.fRb2+self.fBetaRes)
self.fErb1= self.fEq + self.fErl - 0.6
self.fRb1= (self.fErb1 * self.fRint)/(self.fEres +0.6)
self.fiZin= 1.0/self.fRb1 + 1.0/self.fRb2 + 1.0/(self.fBeta * self.fRes)
self.fZin= 1.0/self.fiZin
self.fCcin= 1.0/(2 * 3.14 * self.fMin * self.fZin/10.0)
self.fCco= 1.0 /(2 * 3.14 * self.fMin * self.fRlw/10.0)
return self.condicaoValida
def GetRe(self):
return self.fRes
def GetRl(self):
return self.fRl
def GetRb1(self):
return self.fRb1
def GetRb2(self):
return self.fRb2
def GetCcin(self):
return self.fCcin
def GetCco(self):
return self.fCco
def GetQic(self):
return self.fQic
def GetZin(self):
return self.fZin
def GetEres(self):
return self.fEres
def GetFinalMsg(self):
return self.finalMsg
|
cadele/cadele
|
transistorAmp.py
|
Python
|
gpl-2.0
| 21,912
|
[
"VisIt"
] |
c4b49318fb919d5254d88fdec1527dc4cccc5534f897faa8b4a72ea0ca1c98a3
|
# Copyright (C) 2009, Thomas Leonard
# See the README file for details, or visit http://0install.net.
from zeroinstall import _
from zeroinstall.support import tasks
from zeroinstall.injector import handler, download
gobject = tasks.get_loop().gobject
glib = tasks.get_loop().glib
version = '2.4-rc1-post'
class GUIHandler(handler.Handler):
pulse = None
mainwindow = None
def _reset_counters(self):
if not self.monitored_downloads:
self.n_completed_downloads = 0
self.total_bytes_downloaded = 0
return False
def abort_all_downloads(self):
for dl in self.monitored_downloads:
dl.abort()
def downloads_changed(self):
if self.monitored_downloads and self.pulse is None:
def pulse():
self.mainwindow.update_download_status(only_update_visible = True)
return True
pulse()
self.pulse = glib.timeout_add(200, pulse)
elif len(self.monitored_downloads) == 0:
# Delay before resetting, in case we start a new download quickly
glib.timeout_add(500, self._reset_counters)
# Stop animation
if self.pulse:
glib.source_remove(self.pulse)
self.pulse = None
self.mainwindow.update_download_status()
def impl_added_to_store(self, impl):
self.mainwindow.update_download_status(only_update_visible = True)
@tasks.async
def _switch_to_main_window(self, reason):
if self.mainwindow.systray_icon:
self.mainwindow.systray_icon.set_tooltip(reason)
self.mainwindow.systray_icon.set_blinking(True)
# Wait for the user to click the icon, then continue
yield self.mainwindow.systray_icon_blocker
yield tasks.TimeoutBlocker(0.5, 'Delay')
@tasks.async
def confirm_import_feed(self, pending, valid_sigs):
yield self._switch_to_main_window(_('Need to confirm a new GPG key'))
from zeroinstall.gtkui import trust_box
box = trust_box.TrustBox(pending, valid_sigs, parent = self.mainwindow.window)
box.show()
yield box.closed
@tasks.async
def confirm_install(self, message):
yield self._switch_to_main_window(_('Need to confirm installation of distribution packages'))
from zeroinstall.injector.download import DownloadAborted
from zeroinstall.gui import dialog
import gtk
box = gtk.MessageDialog(self.mainwindow.window,
gtk.DIALOG_DESTROY_WITH_PARENT,
gtk.MESSAGE_QUESTION, gtk.BUTTONS_CANCEL,
str(message))
box.set_position(gtk.WIN_POS_CENTER)
install = dialog.MixedButton(_('Install'), gtk.STOCK_OK)
if gtk.pygtk_version >= (2,22,0):
install.set_can_default(True)
else:
install.set_flags(gtk.CAN_DEFAULT)
box.add_action_widget(install, gtk.RESPONSE_OK)
install.show_all()
box.set_default_response(gtk.RESPONSE_OK)
box.show()
response = dialog.DialogResponse(box)
yield response
box.destroy()
if response.response != gtk.RESPONSE_OK:
raise DownloadAborted()
def report_error(self, ex, tb = None):
if isinstance(ex, download.DownloadAborted):
return # No need to tell the user about this, since they caused it
self.mainwindow.report_exception(ex, tb = tb)
|
linuxmidhun/0install
|
zeroinstall/gui/gui.py
|
Python
|
lgpl-2.1
| 3,008
|
[
"VisIt"
] |
8b40e8afa58a412c27ae73950e235813d894b0b16bd9853c9a323ac3c9bc7b70
|
#!/usr/bin/env python
##################################################
## DEPENDENCIES
import sys
import os
import os.path
try:
import builtins as builtin
except ImportError:
import __builtin__ as builtin
from os.path import getmtime, exists
import time
import types
from Cheetah.Version import MinCompatibleVersion as RequiredCheetahVersion
from Cheetah.Version import MinCompatibleVersionTuple as RequiredCheetahVersionTuple
from Cheetah.Template import Template
from Cheetah.DummyTransaction import *
from Cheetah.NameMapper import NotFound, valueForName, valueFromSearchList, valueFromFrameOrSearchList
from Cheetah.CacheRegion import CacheRegion
import Cheetah.Filters as Filters
import Cheetah.ErrorCatchers as ErrorCatchers
##################################################
## MODULE CONSTANTS
VFFSL=valueFromFrameOrSearchList
VFSL=valueFromSearchList
VFN=valueForName
currentTime=time.time
__CHEETAH_version__ = '2.4.4'
__CHEETAH_versionTuple__ = (2, 4, 4, 'development', 0)
__CHEETAH_genTime__ = 1447321436.297985
__CHEETAH_genTimestamp__ = 'Thu Nov 12 18:43:56 2015'
__CHEETAH_src__ = '/home/knuth/openpli-oe-core/build/tmp/work/fusionhd-oe-linux/enigma2-plugin-extensions-openwebif/1+gitAUTOINC+5837c87afc-r0/git/plugin/controllers/views/bouqueteditor/web/getservices.tmpl'
__CHEETAH_srcLastModified__ = 'Thu Nov 12 18:43:41 2015'
__CHEETAH_docstring__ = 'Autogenerated by Cheetah: The Python-Powered Template Engine'
if __CHEETAH_versionTuple__ < RequiredCheetahVersionTuple:
raise AssertionError(
'This template was compiled with Cheetah version'
' %s. Templates compiled before version %s must be recompiled.'%(
__CHEETAH_version__, RequiredCheetahVersion))
##################################################
## CLASSES
class getservices(Template):
##################################################
## CHEETAH GENERATED METHODS
def __init__(self, *args, **KWs):
super(getservices, self).__init__(*args, **KWs)
if not self._CHEETAH__instanceInitialized:
cheetahKWArgs = {}
allowedKWs = 'searchList namespaces filter filtersLib errorCatcher'.split()
for k,v in KWs.items():
if k in allowedKWs: cheetahKWArgs[k] = v
self._initCheetahInstance(**cheetahKWArgs)
def respond(self, trans=None):
## CHEETAH: main method generated for this template
if (not trans and not self._CHEETAH__isBuffering and not callable(self.transaction)):
trans = self.transaction # is None unless self.awake() was called
if not trans:
trans = DummyTransaction()
_dummyTrans = True
else: _dummyTrans = False
write = trans.response().write
SL = self._CHEETAH__searchList
_filter = self._CHEETAH__currentFilter
########################################
## START - generated method body
_orig_filter_95922938 = _filter
filterName = u'WebSafe'
if self._CHEETAH__filters.has_key("WebSafe"):
_filter = self._CHEETAH__currentFilter = self._CHEETAH__filters[filterName]
else:
_filter = self._CHEETAH__currentFilter = \
self._CHEETAH__filters[filterName] = getattr(self._CHEETAH__filtersLib, filterName)(self).filter
write(u'''<?xml version="1.0" encoding="UTF-8"?>
<e2servicelist>
''')
for service in VFFSL(SL,"services",True): # generated from line 4, col 2
write(u'''\t<e2service>
\t\t<e2servicereference>''')
_v = VFFSL(SL,"service.servicereference",True) # u'$service.servicereference' on line 6, col 23
if _v is not None: write(_filter(_v, rawExpr=u'$service.servicereference')) # from line 6, col 23.
write(u'''</e2servicereference>
\t\t<e2servicename>''')
_v = VFFSL(SL,"service.servicename",True) # u'$service.servicename' on line 7, col 18
if _v is not None: write(_filter(_v, rawExpr=u'$service.servicename')) # from line 7, col 18.
write(u'''</e2servicename>
\t\t<e2serviceisgroup>''')
_v = VFFSL(SL,"service.isgroup",True) # u'$service.isgroup' on line 8, col 21
if _v is not None: write(_filter(_v, rawExpr=u'$service.isgroup')) # from line 8, col 21.
write(u'''</e2serviceisgroup>
\t\t<e2serviceismarker>''')
_v = VFFSL(SL,"service.ismarker",True) # u'$service.ismarker' on line 9, col 22
if _v is not None: write(_filter(_v, rawExpr=u'$service.ismarker')) # from line 9, col 22.
write(u'''</e2serviceismarker>
\t\t<e2serviceisprotected>''')
_v = VFFSL(SL,"service.isprotected",True) # u'$service.isprotected' on line 10, col 25
if _v is not None: write(_filter(_v, rawExpr=u'$service.isprotected')) # from line 10, col 25.
write(u'''</e2serviceisprotected>
\t</e2service>
''')
write(u'''</e2servicelist>
''')
_filter = self._CHEETAH__currentFilter = _orig_filter_95922938
########################################
## END - generated method body
return _dummyTrans and trans.response().getvalue() or ""
##################################################
## CHEETAH GENERATED ATTRIBUTES
_CHEETAH__instanceInitialized = False
_CHEETAH_version = __CHEETAH_version__
_CHEETAH_versionTuple = __CHEETAH_versionTuple__
_CHEETAH_genTime = __CHEETAH_genTime__
_CHEETAH_genTimestamp = __CHEETAH_genTimestamp__
_CHEETAH_src = __CHEETAH_src__
_CHEETAH_srcLastModified = __CHEETAH_srcLastModified__
_mainCheetahMethod_for_getservices= 'respond'
## END CLASS DEFINITION
if not hasattr(getservices, '_initCheetahAttributes'):
templateAPIClass = getattr(getservices, '_CHEETAH_templateClass', Template)
templateAPIClass._addCheetahPlumbingCodeToClass(getservices)
# CHEETAH was developed by Tavis Rudd and Mike Orr
# with code, advice and input from many other volunteers.
# For more information visit http://www.CheetahTemplate.org/
##################################################
## if run from command line:
if __name__ == '__main__':
from Cheetah.TemplateCmdLineIface import CmdLineIface
CmdLineIface(templateObj=getservices()).run()
|
pli3/e2-openwbif
|
plugin/controllers/views/bouqueteditor/web/getservices.py
|
Python
|
gpl-2.0
| 6,302
|
[
"VisIt"
] |
9cb7930848b566c7652ae4c6fd5ae6388b857b16f4b37e208d9cc6de6e83f344
|
# -*- coding: utf-8 -*-
# creates: diffusion-I.png diffusion-T.png diffusion-F.png diffusion-barrier.png
#import matplotlib
#matplotlib.use('Agg')
import pylab as plt
from ase.io import read, write
from ase.neb import fit
if 1:
execfile('diffusion1.py')
execfile('diffusion2.py')
execfile('diffusion4.py')
images = read('neb.traj@-5:')
for name, a in zip('ITF', images[::2]):
cell = a.get_cell()
del a.constraints
a = a * (2, 2, 1)
a.set_cell(cell)
write('diffusion-%s.pov' % name, a, show_unit_cell=True,
transparent=False, display=False, run_povray=True)
s, E, Sfit, Efit, lines = fit(images)
assert abs(max(Efit) - 0.374) < 1e-3
plt.figure(figsize=(4.5, 3))
plt.plot(s, E, 'o')
plt.plot(Sfit, Efit, 'k-')
for x, y in lines:
plt.plot(x, y, 'g-')
plt.xlabel(u'path [Å]')
plt.ylabel(u'energy [eV]')
plt.title('Maximum: %.3f eV' % max(Efit))
plt.subplots_adjust(left=0.19, bottom=0.15)
plt.savefig('diffusion-barrier.png')
|
askhl/ase
|
doc/tutorials/neb/diffusion.py
|
Python
|
gpl-2.0
| 973
|
[
"ASE"
] |
9b59d1491af9413feb96828a9652e71b66007523dc5eb27e6dc087a94b7a1567
|
#
# Copyright (C) 2001,2002,2003 greg Landrum and Rational Discovery LLC
#
""" Functionality for ranking bits using info gains
**Definitions used in this module**
- *sequence*: an object capable of containing other objects which supports
__getitem__() and __len__(). Examples of these include lists, tuples, and
Numeric arrays.
- *IntVector*: an object containing integers which supports __getitem__() and
__len__(). Examples include lists, tuples, Numeric Arrays, and BitVects.
**NOTE**: Neither *sequences* nor *IntVectors* need to support item assignment.
It is perfectly acceptable for them to be read-only, so long as they are
random-access.
"""
import numpy
from rdkit.ML.InfoTheory import entropy
def FormCounts(bitVects,actVals,whichBit,nPossibleActs,nPossibleBitVals=2):
""" generates the counts matrix for a particular bit
**Arguments**
- bitVects: a *sequence* containing *IntVectors*
- actVals: a *sequence*
- whichBit: an integer, the bit number to use.
- nPossibleActs: the (integer) number of possible activity values.
- nPossibleBitVals: (optional) if specified, this integer provides the maximum
value attainable by the (increasingly inaccurately named) bits in _bitVects_.
**Returns**
a Numeric array with the counts
**Notes**
This is really intended for internal use.
"""
if len(bitVects) != len(actVals): raise ValueError,'var and activity lists should be the same length'
res = numpy.zeros((nPossibleBitVals,nPossibleActs),numpy.integer)
for i in xrange(len(bitVects)):
res[bitVects[i][whichBit],actVals[i]] += 1
return res
def CalcInfoGains(bitVects,actVals,nPossibleActs,nPossibleBitVals=2):
""" Calculates the information gain for a set of points and activity values
**Arguments**
- bitVects: a *sequence* containing *IntVectors*
- actVals: a *sequence*
- nPossibleActs: the (integer) number of possible activity values.
- nPossibleBitVals: (optional) if specified, this integer provides the maximum
value attainable by the (increasingly inaccurately named) bits in _bitVects_.
**Returns**
a list of floats
"""
if len(bitVects) != len(actVals): raise ValueError,'var and activity lists should be the same length'
nBits = len(bitVects[0])
res = numpy.zeros(nBits,Float)
for bit in xrange(nBits):
counts = FormCounts(bitVects,actVals,bit,nPossibleActs,
nPossibleBitVals=nPossibleBitVals)
res[bit] = entropy.InfoGain(counts)
return res
def RankBits(bitVects,actVals,nPossibleBitVals=2,
metricFunc=CalcInfoGains):
""" Rank a set of bits according to a metric function
**Arguments**
- bitVects: a *sequence* containing *IntVectors*
- actVals: a *sequence*
- nPossibleBitVals: (optional) if specified, this integer provides the maximum
value attainable by the (increasingly inaccurately named) bits in _bitVects_.
- metricFunc: (optional) the metric function to be used. See _CalcInfoGains()_
for a description of the signature of this function.
**Returns**
A 2-tuple containing:
- the relative order of the bits (a list of ints)
- the metric calculated for each bit (a list of floats)
"""
nPossibleActs = max(actVals)+1
metrics = metricFunc(bitVects,actVals,nPossibleActs,
nPossibleBitVals=nPossibleBitVals)
bitOrder = list(numpy.argsort(metrics))
bitOrder.reverse()
return bitOrder,metrics
def AnalyzeSparseVects(bitVects,actVals):
""" #DOC
**Arguments**
- bitVects: a *sequence* containing SBVs
- actVals: a *sequence*
**Returns**
a list of floats
**Notes**
- these need to be bit vects and binary activities
"""
nPts = len(bitVects)
if nPts != len(actVals): raise ValueError,'var and activity lists should be the same length'
nBits = bitVects[0].GetSize()
actives = numpy.zeros(nBits,numpy.integer)
inactives = numpy.zeros(nBits,numpy.integer)
nActives,nInactives = 0,0
for i in range(nPts):
sig,act = bitVects[i],actVals[i]
onBitList = sig.GetOnBits()
if act:
for bit in onBitList:
actives[bit] += 1
nActives += 1
else:
for bit in onBitList:
inactives[bit] += 1
nInactives += 1
resTbl = numpy.zeros((2,2),numpy.integer)
res = []
gains = []
counts = []
for bit in xrange(nBits):
nAct,nInact = actives[bit],inactives[bit]
if nAct or nInact:
resTbl[0,0] = nAct
resTbl[1,0] = nPts - nAct
resTbl[0,1] = nInact
resTbl[1,1] = nPts - nInact
gain = entropy.InfoGain(resTbl)
gains.append(gain)
res.append((bit,gain,nAct,nInact))
return res,gains
def SparseRankBits(bitVects,actVals,metricFunc=AnalyzeSparseVects):
""" Rank a set of bits according to a metric function
**Arguments**
- bitVects: a *sequence* containing SBVs
- actVals: a *sequence*
- metricFunc: (optional) the metric function to be used. See _SparseCalcInfoGains()_
for a description of the signature of this function.
**Returns**
A 2-tuple containing:
- the relative order of the bits (a list of ints)
- the metric calculated for each bit (a list of floats)
**Notes**
- these need to be bit vects and binary activities
"""
info,metrics = metricFunc(bitVects,actVals)
bitOrder = list(numpy.argsort(metrics))
bitOrder.reverse()
return bitOrder,info
|
rdkit/rdkit-orig
|
rdkit/ML/InfoTheory/BitRank.py
|
Python
|
bsd-3-clause
| 5,509
|
[
"RDKit"
] |
1c649bb28ee68f8b9ab75333f20501601b30fa5267ca502f0a1c645468ad024c
|
# Orca
#
# Copyright 2004-2009 Sun Microsystems Inc.
# Copyright 2010-2013 The Orca Team
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., Franklin Street, Fifth Floor,
# Boston MA 02110-1301 USA.
"""Messages which Orca presents in speech and/or braille. These
have been put in their own module so that we can present them in
the correct language when users change the synthesizer language
on the fly without having to reload a bunch of modules."""
__id__ = "$Id$"
__version__ = "$Revision$"
__date__ = "$Date$"
__copyright__ = "Copyright (c) 2004-2009 Sun Microsystems Inc." \
"Copyright (c) 2010-2013 The Orca Team"
__license__ = "LGPL"
from .orca_i18n import _, C_, ngettext
from .orca_platform import version
# Translators: Sometimes when we attempt to get the name of an accessible
# software application, we fail because the app or one of its elements is
# defunct. This is a generic name so that we can still refer to this element
# in messages.
APPLICATION_NO_NAME = C_("generic name", "application")
# Translators: This is presented when the user has navigated to an empty line.
BLANK = _("blank")
# Translators: This refers to font weight.
BOLD = _("bold")
# Translators: Orca has a feature in which users can store/save a particular
# location in an application window and return to it later by pressing a
# keystroke. These stored/saved locations are "bookmarks". This string is
# presented to the user when a new bookmark has been entered into the list
# of bookmarks.
BOOKMARK_ENTERED = _("bookmark entered")
# Translators: Orca has a feature in which users can store/save a particular
# location in an application window and return to it later by pressing a
# keystroke. These stored/saved locations are "bookmarks". This string is
# presented to the user to indicate the comparative locations of the current
# object and the bookmarked object could not be determined.
BOOKMARK_COMPARISON_UNKNOWN = _('comparison unknown')
# Translators: Orca has a feature in which users can store/save a particular
# location in an application window and return to it later by pressing a
# keystroke. These stored/saved locations are "bookmarks". This string is
# presented to the user to indicate the current object is the same object
# pointed to by a given bookmark.
BOOKMARK_IS_CURRENT_OBJECT = _("bookmark is current object")
# Translators: Orca has a feature in which users can store/save a particular
# location in an application window and return to it later by pressing a
# keystroke. These stored/saved locations are "bookmarks". This string is
# presented to the user to indicate the current object's parent and the
# bookmarked object's parent are the same.
BOOKMARK_PARENT_IS_SAME = _("bookmark and current object have same parent")
# Translators: Orca has a feature in which users can store/save a particular
# location in an application window and return to it later by pressing a
# keystroke. These stored/saved locations are "bookmarks". This string is
# presented to the user to indicate the current object and the bookmarked
# object share a common ancestor.
BOOKMARK_SHARED_ANCESTOR = _("shared ancestor %s")
# Translators: Orca has a feature in which users can store/save a particular
# location in an application window and return to it later by pressing a
# keystroke. These stored/saved locations are "bookmarks". This string is
# presented to the user when the active list of bookmarks have been saved to
# disk.
BOOKMARKS_SAVED = _("bookmarks saved")
# Translators: Orca has a feature in which users can store/save a particular
# location in an application window and return to it later by pressing a
# keystroke. These stored/saved locations are "bookmarks". This string is
# presented to the user when an error was encountered, preventing the active
# list of bookmarks being saved to disk.
BOOKMARKS_SAVED_FAILURE = _("bookmarks could not be saved")
# Translators: Orca normally intercepts all keyboard commands and only passes
# them along to the current application when they are not Orca commands. This
# command causes the next command issued to be passed along to the current
# application, bypassing Orca's interception of it.
BYPASS_MODE_ENABLED = _("Bypass mode enabled.")
# Translators: this is an indication that Orca is unable to obtain the display/
# results area of the calculator being used (e.g. gcalctool).
CALCULATOR_DISPLAY_NOT_FOUND = _("Unable to get calculator display")
# Translators: Orca uses Speech Dispatcher to present content to users via
# text-to-speech. Speech Dispatcher has a feature to control how capital
# letters are presented: Do nothing at all, say the word 'capital' prior to
# presenting a capital letter, or play a tone which Speech Dispatcher refers
# to as a sound 'icon'. This string to be translated refers to the brief/
# non-verbose output presented in response to the use of an Orca command which
# makes it possible for users to quickly cycle amongst these alternatives
# without having to get into a GUI.
CAPITALIZATION_ICON_BRIEF = C_("capitalization style", "icon")
# Translators: Orca uses Speech Dispatcher to present content to users via
# text-to-speech. Speech Dispatcher has a feature to control how capital
# letters are presented: Do nothing at all, say the word 'capital' prior to
# presenting a capital letter, or play a tone which Speech Dispatcher refers
# to as a sound 'icon'. This string to be translated refers to the full/verbose
# output presented in response to the use of an Orca command which makes it
# possible for users to quickly cycle amongst these alternatives without having
# to get into a GUI.
CAPITALIZATION_ICON_FULL = _("Capitalization style set to icon.")
# Translators: Orca uses Speech Dispatcher to present content to users via
# text-to-speech. Speech Dispatcher has a feature to control how capital
# letters are presented: Do nothing at all, say the word 'capital' prior to
# presenting a capital letter, or play a tone which Speech Dispatcher refers
# to as a sound 'icon'. This string to be translated refers to the brief/
# non-verbose output presented in response to the use of an Orca command which
# makes it possible for users to quickly cycle amongst these alternatives
# without having to get into a GUI.
CAPITALIZATION_NONE_BRIEF = C_("capitalization style", "none")
# Translators: Orca uses Speech Dispatcher to present content to users via
# text-to-speech. Speech Dispatcher has a feature to control how capital
# letters are presented: Do nothing at all, say the word 'capital' prior to
# presenting a capital letter, or play a tone which Speech Dispatcher refers
# to as a sound 'icon'. This string to be translated refers to the full/verbose
# output presented in response to the use of an Orca command which makes it
# possible for users to quickly cycle amongst these alternatives without having
# to get into a GUI.
CAPITALIZATION_NONE_FULL = _("Capitalization style set to none.")
# Translators: Orca uses Speech Dispatcher to present content to users via
# text-to-speech. Speech Dispatcher has a feature to control how capital
# letters are presented: Do nothing at all, say the word 'capital' prior to
# presenting a capital letter, or play a tone which Speech Dispatcher refers
# to as a sound 'icon'. This string to be translated refers to the brief/
# non-verbose output presented in response to the use of an Orca command which
# makes it possible for users to quickly cycle amongst these alternatives
# without having to get into a GUI.
CAPITALIZATION_SPELL_BRIEF = C_("capitalization style", "spell")
# Translators: Orca uses Speech Dispatcher to present content to users via
# text-to-speech. Speech Dispatcher has a feature to control how capital
# letters are presented: Do nothing at all, say the word 'capital' prior to
# presenting a capital letter, or play a tone which Speech Dispatcher refers
# to as a sound 'icon'. This string to be translated refers to the full/verbose
# output presented in response to the use of an Orca command which makes it
# possible for users to quickly cycle amongst these alternatives without having
# to get into a GUI.
CAPITALIZATION_SPELL_FULL = _("Capitalization style set to spell.")
# Translators: Gecko native caret navigation is where Firefox (or Thunderbird)
# itself controls how the arrow keys move the caret around HTML content. It's
# often broken, so Orca needs to provide its own support. As such, Orca offers
# the user the ability to toggle which application is controlling the caret.
CARET_CONTROL_GECKO = _("Gecko is controlling the caret.")
# Translators: Gecko native caret navigation is where Firefox (or Thunderbird)
# itself controls how the arrow keys move the caret around HTML content. It's
# often broken, so Orca needs to provide its own support. As such, Orca offers
# the user the ability to toggle which application is controlling the caret.
CARET_CONTROL_ORCA = _("The screen reader is controlling the caret.")
# Translators: this is the name of a cell in a spreadsheet.
CELL = _("Cell %s")
# Translators: This is the description of command line option '-d, --disable'
# which allows the user to specify an option to disable as Orca is started.
CLI_DISABLE_OPTION = _("Prevent use of option")
# Translators: this is the description of command line option '-e, --enable'
# which allows the user to specify an option to enable as Orca is started.
CLI_ENABLE_OPTION = _("Force use of option")
# Translators: This string indicates to the user what should be provided when
# using the '-e, --enable' or '-d, --disable' command line options.
CLI_OPTION = _("OPTION")
# Translators: This message is displayed when the user starts Orca from the
# command line and includes an invalid option or argument. After the message,
# the list of invalid items, as typed by the user, is displayed.
CLI_INVALID_OPTIONS = _("The following are not valid: ")
# Translators: This is the description of command line option '-l, --list-apps'
# which prints the names of running applications which can be seen by assistive
# technologies such as Orca and Accerciser.
CLI_LIST_APPS = _("Print the known running applications")
# Translators: This is the description of command line option '-p, --profile'
# which allows you to specify a profile to be loaded. A profile stores a group
# of Orca settings configured by the user for a particular purpose, such as a
# 'Spanish' profile which would include Spanish braille and text-to-speech.
# An Orca settings file contains one or more profiles.
CLI_LOAD_PROFILE = _("Load profile")
# Translators: This message is presented to the user when the specified profile
# could not be loaded. A profile stores a group of Orca settings configured for
# a particular purpose, such as a Spanish profile which would include Spanish
# braille and Spanish text-to-speech. The string substituted in is the user-
# provided profile name.
CLI_LOAD_PROFILE_ERROR = _("Profile could not be loaded: %s")
# Translators: This message is presented to the user who attempts to launch Orca
# from some other environment than the graphical desktop.
CLI_NO_DESKTOP_ERROR = \
_("Cannot start the screen reader because it cannot connect to the Desktop.")
# Translators: This message is presented to the user who attempts to launch Orca
# but the launch fails due to an error related to the settings manager.
CLI_SETTINGS_MANAGER_ERROR = \
_("Could not activate the settings manager. Exiting.")
# Translators: This message is presented to the user when he/she tries to launch
# Orca, but Orca is already running.
CLI_OTHER_ORCAS_ERROR = \
_('Another screen reader process is already running for this ' \
'session.\nRun "orca --replace" to replace that ' \
'process with a new one.')
# Translators: This string indicates to the user what should be provided when
# using the '-p, --profile' command line option.
CLI_PROFILE_NAME = _("NAME")
# Translators: This is the description of command line option '-u, --user-prefs'
# that allows you to specify an alternate location from which to load the user
# preferences.
CLI_LOAD_PREFS = _("Use alternate directory for user preferences")
# Translators: This string indicates to the user what should be provided when
# using the '-u, --user-prefs' command line option.
CLI_PREFS_DIR = _("DIR")
# Translators: This is the description of command line option '-r, --replace'
# which tells Orca to replace any existing Orca process that might be running.
CLI_REPLACE = _("Replace a currently running instance of this screen reader")
# Translators: This is the description of command line option '--debug' which
# causes debugging output for Orca to be sent to a file. The YYYY-MM-DD-HH:MM:SS
# portion of the string indicates the file name will be formed from the current
# date and time with 'debug' in front and '.out' at the end. The 'debug' and
# '.out' portions of this string should not be translated (i.e. it should always
# start with 'debug' and end with '.out', regardless of the locale.).
CLI_ENABLE_DEBUG = _("Send debug output to debug-YYYY-MM-DD-HH:MM:SS.out")
# Translators: This is the description of command line option '--debug-file'
# which allows the user to override the default date-based name of the debugging
# output file.
CLI_DEBUG_FILE = _("Send debug output to the specified file")
# Translators: This string indicates to the user what should be provided when
# using the '--debug-file' command line option.
CLI_DEBUG_FILE_NAME = _("FILE")
# Translators: This is the description of command line option '-t, --text-setup'
# that will initially display a list of questions in text form, that the user
# will need to answer, before Orca will startup. For this to happen properly,
# Orca will need to be run from a terminal window.
CLI_SETUP = _("Set up user preferences (text version)")
# Translators: This text is the description displayed when Orca is launched
# from the command line and the help text is displayed.
CLI_EPILOG = _("Report bugs to orca-list@gnome.org.")
# Translators: In chat applications, it is often possible to see that a "buddy"
# is typing currently (e.g. via a keyboard icon or status text). Some users like
# to have this typing status announced by Orca; others find that announcment
# unpleasant. Therefore, it is a setting in Orca. This string to be translated
# is presented when the value of the setting is toggled.
CHAT_BUDDY_TYPING_OFF = _("Do not announce when your buddies are typing.")
# Translators: In chat applications, it is often possible to see that a "buddy"
# is typing currently (e.g. via a keyboard icon or status text). Some users like
# to have this typing status announced by Orca; others find that announcment
# unpleasant. Therefore, it is a setting in Orca. This string to be translated
# is presented when the value of the setting is toggled.
CHAT_BUDDY_TYPING_ON = _("announce when your buddies are typing.")
# Translators: In chat applcations, Orca automatically presents incoming
# messages in speech and braille. If a user is in multiple conversations or
# channels at the same time, it can be confusing to know what room or channel
# a given message came from just from hearing/reading it. This string to be
# translated is presented to the user to clarify where an incoming message
# came from. The name of the chat room is the string substitution.
CHAT_MESSAGE_FROM_ROOM = _("Message from chat room %s")
# Translators: This message is presented to inform the user that a new chat
# conversation has been added to the existing conversations. The "tab" here
# refers to the tab which contains the label for a GtkNotebook page. The
# label on the tab is the string substitution.
CHAT_NEW_TAB = _("New chat tab %s")
# Translators: In chat applcations, Orca automatically presents incoming
# messages in speech and braille. If a user is in multiple conversations or
# channels at the same time, it can be confusing to know what room or channel
# a given message came from just from hearing/reading it. For this reason, Orca
# has an option to present the name of the room first ("#a11y <joanie> hello!"
# instead of "<joanie> hello!"). This string to be translated is presented when
# the value of the setting is toggled.
CHAT_ROOM_NAME_PREFIX_OFF = _("Do not speak chat room name.")
# Translators: In chat applcations, Orca automatically presents incoming
# messages in speech and braille. If a user is in multiple conversations or
# channels at the same time, it can be confusing to know what room or channel
# a given message came from just from hearing/reading it. For this reason, Orca
# has an option to present the name of the room first ("#a11y <joanie> hello!"
# instead of "<joanie> hello!"). This string to be translated is presented when
# the value of the setting is toggled.
CHAT_ROOM_NAME_PREFIX_ON = _("speak chat room name.")
# Translators: Orca has a command to review previous chat room messages in
# speech and braille. Some users prefer to have this message history combined
# (e.g. the last ten messages which came in, no matter what room they came
# from). Other users prefer to have specific room history (e.g. the last ten
# messages from #a11y). Therefore, this is a setting in Orca. This string to be
# translated is presented when the value of the setting is toggled.
CHAT_SEPARATE_HISTORIES_OFF = \
_("Do not provide chat room specific message histories.")
# Translators: Orca has a command to review previous chat room messages in
# speech and braille. Some users prefer to have this message history combined
# (e.g. the last ten messages which came in, no matter what room they came
# from). Other users prefer to have specific room history (e.g. the last ten
# messages from #a11y). Therefore, this is a setting in Orca. This string to be
# translated is presented when the value of the setting is toggled.
CHAT_SEPARATE_HISTORIES_ON = _("Provide chat room specific message histories.")
# Translators: this is a regular expression that is intended to match
# a positive 'yes' response from a user at the command line. The expression
# as given means - does it begin with (that's the '^' character) any of
# the characters in the '[' ']'? In this case, we've chosen 'Y', 'y', and
# '1' to mean positive answers, so any string beginning with 'Y', 'y', or
# '1' will match. For an example of translation, assume your language has
# the words 'posolutely' and 'absitively' as common words that mean the
# equivalent of 'yes'. You might make the expression match the upper and
# lower case forms: "^[aApP1]". If the 'yes' and 'no' words for your
# locale begin with the same character, the regular expression should be
# modified to use words. For example: "^(yes|Yes)" (note the change from
# using '[' and ']' to '(' and ')').
#
# Finally, this expression should match what you've chosen for the
# translation of the "Enter y or n:" strings for this file.
CONSOLE_SETUP_YESEXPR = _("^[Yy1]")
# Translators: this is a regular expression that is intended to match
# a positive 'yes' response from a user at the command line. The expression
# as given means - does it begin with (that's the '^' character) any of
# the characters in the '[' ']'? In this case, we've chosen 'Y', 'y', and
# '1' to mean positive answers, so any string beginning with 'Y', 'y', or
# '1' will match. For an example of translation, assume your language has
# the words 'posolutely' and 'absitively' as common words that mean the
# equivalent of 'yes'. You might make the expression match the upper and
# lower case forms: "^[aApP1]". If the 'yes' and 'no' words for your
# locale begin with the same character, the regular expression should be
# modified to use words. For example: "^(yes|Yes)" (note the change from
# using '[' and ']' to '(' and ')').
#
# Finally, this expression should match what you've chosen for the
# translation of the "Enter y or n:" strings for this file.
CONSOLE_SETUP_NOEXPR = _("^[Nn0]")
# Translators: This is prompting for whether the user wants to use a refreshable
# braille display (an external hardware device) or not. It is part of Orca's
# console-based setup.
CONSOLE_SETUP_ENABLE_BRAILLE = _("Enable Braille? Enter y or n: ")
# Translators: If key echo is enabled, Orca will speak the name of a key as the
# user types on the keyboard. This message is presented during Orca's console-
# based setup. If the user wants key echo, they will then be prompted for which
# classes of keys they want echoed.
CONSOLE_SETUP_ENABLE_ECHO_KEY = _("Enable key echo? Enter y or n: ")
# Translators: This is in reference to key echo for normal text entry keys and
# is part of Orca's console-based setup.
CONSOLE_SETUP_ENABLE_ECHO_PRINTABLE_KEYS = \
_("Enable alphanumeric and punctuation keys? Enter y or n: ")
# Translators: This is in reference to key echo for keys such as CTRL, ALT,
# Shift, Insert, etc. It is part of Orca's console-based setup.
CONSOLE_SETUP_ENABLE_ECHO_MODIFIER_KEYS = \
_("Enable modifier keys? Enter y or n: ")
# Translators: This is in reference to key echo for function keys (F1-F12).
# It is part of Orca's console-based setup.
CONSOLE_SETUP_ENABLE_ECHO_FUNCTION_KEYS = \
_("Enable function keys? Enter y or n: ")
# Translators: This is in reference to key echo for keys that perform actions
# such as enter, escape, tab, backspace, delete, arrow keys, page up/down, etc.
# It is part of Orca's console-based setup.
CONSOLE_SETUP_ENABLE_ECHO_ACTION_KEYS = _("Enable action keys? Enter y or n: ")
# Translators: The word echo feature of Orca will speak the word prior to the
# caret when the user types a word delimiter. This message is presented during
# Orca's console-based setup.
CONSOLE_SETUP_ENABLE_ECHO_WORD = _("Enable echo by word? Enter y or n: ")
# Translators: This is prompting for a numerical choice to be typed at Orca's
# console-based setup.
CONSOLE_SETUP_ENTER_CHOICE = _("Enter choice: ")
# Translators: This is letting the user they input an invalid integer value on
# the command line and is also requesting they enter a valid integer value.
# This message is part of Orca's console-based setup.
CONSOLE_SETUP_ENTER_VALID_NUMBER = _("Please enter a valid number.")
# Translators: This is letting the user they input an invalid yes/no value on
# the command line and is also requesting they enter a valid one. This message
# is part of Orca's console-based setup.
CONSOLE_SETUP_ENTER_Y_OR_N = _("Please enter y or n.")
# Translators: Orca has two keyboard layouts which impact what keybindings are
# used to perform Orca commands. The two layouts are "Laptop" and "Desktop".
# This message is part of Orca's console-based setup.
CONSOLE_SETUP_SELECT_KEYBOARD_LAYOUT = _("Select desired keyboard layout.")
# Translators: Orca has two keyboard layouts which impact what keybindings are
# used to perform Orca commands. The two layouts are "Laptop" and "Desktop".
# This message is part of Orca's console-based setup.
CONSOLE_SETUP_KEYBOARD_LAYOUT_DESKTOP = _("1. Desktop")
# Translators: Orca has two keyboard layouts which impact what keybindings are
# used to perform Orca commands. The two layouts are "Laptop" and "Desktop".
# This message is part of Orca's console-based setup.
CONSOLE_SETUP_KEYBOARD_LAYOUT_LAPTOP = _("2. Laptop")
# Translators: This is prompting the user for a numerical choice from a list of
# available speech synthesis engines. It is part of Orca's console-based setup.
CONSOLE_SETUP_SELECT_SPEECH_SERVER = _("Select desired speech server.")
# Translators: The speech system represents what general speech wrapper is going
# to be used. Speech-dispatcher is an example of a speech system. It provides
# wrappers around specific speech servers (engines). This message is part of
# Orca's console-based setup.
CONSOLE_SETUP_SELECT_SPEECH_SYSTEM = _("Select desired speech system:")
# Translators: This is prompting for a numerical value from a list of choices of
# speech synthesis voices (e.g., male, female, child). This message is part of
# Orca's console-based setup.
CONSOLE_SETUP_SELECT_VOICE = _("Select desired voice:")
# Translators: This message indicates that no working speech servers (speech
# synthesis engines) can be found. It is part of Orca's console-based setup.
CONSOLE_SETUP_SERVERS_NOT_AVAILABLE = _("No servers available.\n")
# Translators: This message indicates that the speech server (speech synthesis
# engine) is not working properly and no voices (e.g., male, female, child) are
# available. This message is part of Orca's console-based setup.
CONSOLE_SETUP_VOICES_NOT_AVAILABLE = _("No voices available.\n")
# Translators: This message indicates that speech synthesis will not be used.
# It is part of Orca's console-based setup.
CONSOLE_SETUP_SPEECH_NOT_USED = _("Speech will not be used.\n")
# Translators: This message is presented at the beginning of Orca's console-
# based setup.
CONSOLE_SETUP_START = _("Screen reader setup.")
# Translators: This message is presented at the completion of Orca's console-
# based setup.
CONSOLE_SETUP_COMPLETE = _("Setup complete. Press Return to continue.")
DATE_FORMAT_LOCALE = "%x"
DATE_FORMAT_NUMBERS_DM = "%d/%m"
DATE_FORMAT_NUMBERS_MD = "%m/%d"
DATE_FORMAT_NUMBERS_DMY = "%d/%m/%Y"
DATE_FORMAT_NUMBERS_MDY = "%m/%d/%Y"
DATE_FORMAT_NUMBERS_YMD = "%Y/%m/%d"
DATE_FORMAT_FULL_DM = "%A, %-d %B"
DATE_FORMAT_FULL_MD = "%A, %B %-d"
DATE_FORMAT_FULL_DMY = "%A, %-d %B, %Y"
DATE_FORMAT_FULL_MDY = "%A, %B %-d, %Y"
DATE_FORMAT_FULL_YMD = "%Y. %B %-d, %A."
DATE_FORMAT_ABBREVIATED_DM = "%a, %-d %b"
DATE_FORMAT_ABBREVIATED_MD = "%a, %b %-d"
DATE_FORMAT_ABBREVIATED_DMY = "%a, %-d %b, %Y"
DATE_FORMAT_ABBREVIATED_MDY = "%a, %b %-d, %Y"
DATE_FORMAT_ABBREVIATED_YMD = "%Y. %b %-d, %a."
# Translators: The "default" button in a dialog box is the button that gets
# activated when Enter is pressed anywhere within that dialog box.
DEFAULT_BUTTON_IS = _("Default button is %s")
# Translators: This string is part of the presentation of an item that includes
# one or several consequtive subscripted characters. For example, 'X' followed
# by 'subscript 2' followed by 'subscript 3' should be presented to the user as
# 'X subscript 23'.
DIGITS_SUBSCRIPT = _(" subscript %s")
# Translators: This string is part of the presentation of an item that includes
# one or several consequtive superscripted characters. For example, 'X' followed
# by 'superscript 2' followed by 'superscript 3' should be presented to the user
# as 'X superscript 23'.
DIGITS_SUPERSCRIPT = _(" superscript %s")
# Translators: when the user selects (highlights) or unselects text in a
# document, Orca will speak information about what they have selected or
# unselected. This message is presented when the user selects the entire
# document by pressing Ctrl+A.
DOCUMENT_SELECTED_ALL = _("entire document selected")
# Translators: when the user selects (highlights) or unselects text in a
# document, Orca will speak information about what they have selected or
# unselected. This message is presented when the user selects from the
# current location to the end of the document by pressing Ctrl+Shift+End.
DOCUMENT_SELECTED_DOWN = _("document selected from cursor position")
# Translators: when the user selects (highlights) or unselects text in a
# document, Orca will speak information about what they have selected or
# unselected. This message is presented when the user unselects previously
# selected text by pressing Ctrl+Shift+End.
DOCUMENT_UNSELECTED_DOWN = _("document unselected from cursor position")
# Translators: when the user selects (highlights) or unselects text in a
# document, Orca will speak information about what they have selected or
# unselected. This message is presented when the user selects from the
# current location to the start of the document by pressing Ctrl+Shift+Home.
DOCUMENT_SELECTED_UP = _("document selected to cursor position")
# Translators: when the user selects (highlights) or unselects text in a
# document, Orca will speak information about what they have selected or
# unselected. This message is presented when the user unselects previously
# selected text by pressing Ctrl+Shift+Home.
DOCUMENT_UNSELECTED_UP = _("document unselected to cursor position")
# Translators: Orca allows you to dynamically define which row of a spreadsheet
# or table should be treated as containing column headers. This message is
# presented when the user sets the row to a particular row number.
DYNAMIC_COLUMN_HEADER_SET = _("Dynamic column header set for row %d")
# Translators: Orca allows you to dynamically define which row of a spreadsheet
# or table should be treated as containing column headers. This message is
# presented when the user unsets the row so it is no longer treated as if it
# contained column headers.
DYNAMIC_COLUMN_HEADER_CLEARED = _("Dynamic column header cleared.")
# Translators: Orca allows you to dynamically define which column of a
# spreadsheet or table should be treated as containing column headers. This
# message is presented when the user sets the column to a particular column
# number.
DYNAMIC_ROW_HEADER_SET = _("Dynamic row header set for column %s")
# Translators: Orca allows you to dynamically define which column of a
# spreadsheet or table should be treated as containing column headers. This
# message is presented when the user unsets the column so it is no longer
# treated as if it contained row headers.
DYNAMIC_ROW_HEADER_CLEARED = _("Dynamic row header cleared.")
# Translators: this is used to announce that the current input line in a
# spreadsheet is blank/empty.
EMPTY = _("empty")
# Translators: This is the size of a file in kilobytes
FILE_SIZE_KB = _("%.2f kilobytes")
# Translators: This is the size of a file in megabytes
FILE_SIZE_MB = _("%.2f megabytes")
# Translators: This message is presented to the user after performing a file
# search to indicate there were no matches.
FILES_NOT_FOUND = _("No files found.")
# Translators: the 'flat review' feature of Orca allows the blind user to
# explore the text in a window in a 2D fashion. That is, Orca treats all
# the text from all objects in a window (e.g., buttons, labels, etc.) as a
# sequence of words in a sequence of lines. This message is presented to
# let the user know that he/she successfully appended the contents under
# flat review onto the existing contents of the clipboard.
FLAT_REVIEW_APPENDED = _("Appended contents to clipboard.")
# Translators: the 'flat review' feature of Orca allows the blind user to
# explore the text in a window in a 2D fashion. That is, Orca treats all
# the text from all objects in a window (e.g., buttons, labels, etc.) as a
# sequence of words in a sequence of lines. This message is presented to
# let the user know that he/she successfully copied the contents under flat
# review to the clipboard.
FLAT_REVIEW_COPIED = _("Copied contents to clipboard.")
# Translators: the 'flat review' feature of Orca allows the blind user to
# explore the text in a window in a 2D fashion. That is, Orca treats all
# the text from all objects in a window (e.g., buttons, labels, etc.) as a
# sequence of words in a sequence of lines. This message is presented to
# let the user know that he/she attempted to use a flat review command when
# not using flat review.
FLAT_REVIEW_NOT_IN = _("Not using flat review.")
# Translators: the 'flat review' feature of Orca allows the blind user to
# explore the text in a window in a 2D fashion. That is, Orca treats all
# the text from all objects in a window (e.g., buttons, labels, etc.) as a
# sequence of words in a sequence of lines. This message is presented to
# let the user know he/she just entered flat review.
FLAT_REVIEW_START = _("Entering flat review.")
# Translators: the 'flat review' feature of Orca allows the blind user to
# explore the text in a window in a 2D fashion. That is, Orca treats all
# the text from all objects in a window (e.g., buttons, labels, etc.) as a
# sequence of words in a sequence of lines. This message is presented to
# let the user know he/she just entered flat review.
FLAT_REVIEW_STOP = _("Leaving flat review.")
# Translators: this means a particular cell in a spreadsheet has a formula
# (e.g., "=sum(a1:d1)")
HAS_FORMULA = _("has formula")
# Translators: The following string is spoken to let the user know that he/she
# is on a link within an image map. An image map is an image/graphic which has
# been divided into regions. Each region can be clicked on and has an associated
# link. Please see http://en.wikipedia.org/wiki/Imagemap for more information
# and examples.
IMAGE_MAP_LINK = _("image map link")
# Translators: This is a spoken and/or brailled message letting the user know
# that the key combination (e.g., Ctrl+Alt+f) they just entered has already been
# bound to another command and is thus unavailable. The string substituted in is
# the name of the command which already has the binding.
KB_ALREADY_BOUND = _("The key entered is already bound to %s")
# Translators: This is a spoken and/or brailled message letting the user know
# that Orca has recorded a new key combination (e.g. Alt+Ctrl+g) as a result of
# their input. The string substituted in is the new key combination.
KB_CAPTURED = _("Key captured: %s. Press enter to confirm.")
# Translators: This is a spoken and/or brailled message letting the user know
# that Orca has assigned a new key combination (e.g. Alt+Ctrl+g) as a result of
# their input. The string substituted in is the new key combination.
KB_CAPTURED_CONFIRMATION = _("The new key is: %s")
# Translators: This is a spoken and/or brailled message letting the user know
# Orca is about to delete an existing key combination (e.g. Alt+Ctrl+g) as a
# result of their input.
KB_DELETED = _("Key binding deleted. Press enter to confirm.")
# Translators: This is a spoken and/or brailled message letting the user know
# Orca has deleted an existing key combination (e.g. Alt+Ctrl+g) as a result of
# their input.
KB_DELETED_CONFIRMATION = _("The keybinding has been removed.")
# Translators: This is a spoken and/or brailled message asking the user to press
# a new key combination (e.g., Alt+Ctrl+g) to create a new key binding for an
# Orca command.
KB_ENTER_NEW_KEY = _("enter new key")
# Translators: Orca has an "echo" setting which allows the user to configure
# what is spoken in response to a key press. Given a user who typed "Hello
# world.":
# - key echo: "H e l l o space w o r l d period"
# - word echo: "Hello" spoken when the space is pressed;
# "world" spoken when the period is pressed.
# - sentence echo: "Hello world" spoken when the period
# is pressed.
# A user can choose to have no echo, one type of echo, or multiple types of
# echo and can cycle through the various levels quickly via a command. The
# following string is a brief message which will be presented to the user who
# is cycling amongst the various echo options.
KEY_ECHO_KEY_BRIEF = C_("key echo", "key")
# Translators: Orca has an "echo" setting which allows the user to configure
# what is spoken in response to a key press. Given a user who typed "Hello
# world.":
# - key echo: "H e l l o space w o r l d period"
# - word echo: "Hello" spoken when the space is pressed;
# "world" spoken when the period is pressed.
# - sentence echo: "Hello world" spoken when the period
# is pressed.
# A user can choose to have no echo, one type of echo, or multiple types of
# echo and can cycle through the various levels quickly via a command.
KEY_ECHO_KEY_FULL = _("Key echo set to key.")
# Translators: Orca has an "echo" setting which allows the user to configure
# what is spoken in response to a key press. Given a user who typed "Hello
# world.":
# - key echo: "H e l l o space w o r l d period"
# - word echo: "Hello" spoken when the space is pressed;
# "world" spoken when the period is pressed.
# - sentence echo: "Hello world" spoken when the period
# is pressed.
# A user can choose to have no echo, one type of echo, or multiple types of
# echo and can cycle through the various levels quickly via a command. The
# following string is a brief message which will be presented to the user who
# is cycling amongst the various echo options.
KEY_ECHO_NONE_BRIEF = C_("key echo", "None")
# Translators: Orca has an "echo" setting which allows the user to configure
# what is spoken in response to a key press. Given a user who typed "Hello
# world.":
# - key echo: "H e l l o space w o r l d period"
# - word echo: "Hello" spoken when the space is pressed;
# "world" spoken when the period is pressed.
# - sentence echo: "Hello world" spoken when the period
# is pressed.
# A user can choose to have no echo, one type of echo, or multiple types of
# echo and can cycle through the various levels quickly via a command.
KEY_ECHO_NONE_FULL = _("Key echo set to None.")
# Translators: Orca has an "echo" setting which allows the user to configure
# what is spoken in response to a key press. Given a user who typed "Hello
# world.":
# - key echo: "H e l l o space w o r l d period"
# - word echo: "Hello" spoken when the space is pressed;
# "world" spoken when the period is pressed.
# - sentence echo: "Hello world" spoken when the period
# is pressed.
# A user can choose to have no echo, one type of echo, or multiple types of
# echo and can cycle through the various levels quickly via a command. The
# following string is a brief message which will be presented to the user who
# is cycling amongst the various echo options.
KEY_ECHO_KEY_AND_WORD_BRIEF = C_("key echo", "key and word")
# Translators: Orca has an "echo" setting which allows the user to configure
# what is spoken in response to a key press. Given a user who typed "Hello
# world.":
# - key echo: "H e l l o space w o r l d period"
# - word echo: "Hello" spoken when the space is pressed;
# "world" spoken when the period is pressed.
# - sentence echo: "Hello world" spoken when the period
# is pressed.
# A user can choose to have no echo, one type of echo, or multiple types of
# echo and can cycle through the various levels quickly via a command.
KEY_ECHO_KEY_AND_WORD_FULL = _("Key echo set to key and word.")
# Translators: Orca has an "echo" setting which allows the user to configure
# what is spoken in response to a key press. Given a user who typed "Hello
# world.":
# - key echo: "H e l l o space w o r l d period"
# - word echo: "Hello" spoken when the space is pressed;
# "world" spoken when the period is pressed.
# - sentence echo: "Hello world" spoken when the period
# is pressed.
# A user can choose to have no echo, one type of echo, or multiple types of
# echo and can cycle through the various levels quickly via a command. The
# following string is a brief message which will be presented to the user who
# is cycling amongst the various echo options.
KEY_ECHO_SENTENCE_BRIEF = C_("key echo", "sentence")
# Translators: Orca has an "echo" setting which allows the user to configure
# what is spoken in response to a key press. Given a user who typed "Hello
# world.":
# - key echo: "H e l l o space w o r l d period"
# - word echo: "Hello" spoken when the space is pressed;
# "world" spoken when the period is pressed.
# - sentence echo: "Hello world" spoken when the period
# is pressed.
# A user can choose to have no echo, one type of echo, or multiple types of
# echo and can cycle through the various levels quickly via a command.
KEY_ECHO_SENTENCE_FULL = _("Key echo set to sentence.")
# Translators: Orca has an "echo" setting which allows the user to configure
# what is spoken in response to a key press. Given a user who typed "Hello
# world.":
# - key echo: "H e l l o space w o r l d period"
# - word echo: "Hello" spoken when the space is pressed;
# "world" spoken when the period is pressed.
# - sentence echo: "Hello world" spoken when the period
# is pressed.
# A user can choose to have no echo, one type of echo, or multiple types of
# echo and can cycle through the various levels quickly via a command. The
# following string is a brief message which will be presented to the user who
# is cycling amongst the various echo options.
KEY_ECHO_WORD_BRIEF = C_("key echo", "word")
# Translators: Orca has an "echo" setting which allows the user to configure
# what is spoken in response to a key press. Given a user who typed "Hello
# world.":
# - key echo: "H e l l o space w o r l d period"
# - word echo: "Hello" spoken when the space is pressed;
# "world" spoken when the period is pressed.
# - sentence echo: "Hello world" spoken when the period
# is pressed.
# A user can choose to have no echo, one type of echo, or multiple types of
# echo and can cycle through the various levels quickly via a command.
KEY_ECHO_WORD_FULL = _("Key echo set to word.")
# Translators: Orca has an "echo" setting which allows the user to configure
# what is spoken in response to a key press. Given a user who typed "Hello
# world.":
# - key echo: "H e l l o space w o r l d period"
# - word echo: "Hello" spoken when the space is pressed;
# "world" spoken when the period is pressed.
# - sentence echo: "Hello world" spoken when the period
# is pressed.
# A user can choose to have no echo, one type of echo, or multiple types of
# echo and can cycle through the various levels quickly via a command. The
# following string is a brief message which will be presented to the user who
# is cycling amongst the various echo options.
KEY_ECHO_WORD_AND_SENTENCE_BRIEF = C_("key echo", "word and sentence")
# Translators: Orca has an "echo" setting which allows the user to configure
# what is spoken in response to a key press. Given a user who typed "Hello
# world.":
# - key echo: "H e l l o space w o r l d period"
# - word echo: "Hello" spoken when the space is pressed;
# "world" spoken when the period is pressed.
# - sentence echo: "Hello world" spoken when the period
# is pressed.
# A user can choose to have no echo, one type of echo, or multiple types of
# echo and can cycle through the various levels quickly via a command.
KEY_ECHO_WORD_AND_SENTENCE_FULL = _("Key echo set to word and sentence.")
# Translators: Inaccessible means that the application cannot be read by Orca.
# This usually means the application is not friendly to the assistive technology
# infrastructure.
INACCESSIBLE = _("inaccessible")
# Translators: This brief message indicates that indentation and
# justification will be spoken.
INDENTATION_JUSTIFICATION_OFF_BRIEF = \
C_("indentation and justification", "Disabled")
# Translators: This detailed message indicates that indentation and
# justification will not be spoken.
INDENTATION_JUSTIFICATION_OFF_FULL = \
_("Speaking of indentation and justification disabled.")
# Translators: This brief message indicates that indentation and
# justification will be spoken.
INDENTATION_JUSTIFICATION_ON_BRIEF = \
C_("indentation and justification", "Enabled")
# Translators: This detailed message indicates that indentation and
# justification will be spoken.
INDENTATION_JUSTIFICATION_ON_FULL = \
_("Speaking of indentation and justification enabled.")
# Translators: Orca has a "Learn Mode" that will allow the user to type any key
# on the keyboard and hear what the effects of that key would be. The effects
# might be what Orca would do if it had a handler for the particular key
# combination, or they might just be to echo the name of the key if Orca doesn't
# have a handler. This message is what is presented on the braille display when
# entering Learn Mode.
LEARN_MODE_START_BRAILLE = _("Learn mode. Press escape to exit.")
# Translators: Orca has a "Learn Mode" that will allow the user to type any key
# on the keyboard and hear what the effects of that key would be. The effects
# might be what Orca would do if it had a handler for the particular key
# combination, or they might just be to echo the name of the key if Orca doesn't
# have a handler. This message is what is spoken to the user when entering Learn
# Mode.
LEARN_MODE_START_SPEECH = \
_("Entering learn mode. Press any key to hear its function. " \
"To view the screen reader's documentation, press F1. " \
"To get a list of the screen reader's default shortcuts, press F2. " \
"To get a list of the screen reader's shortcuts for the current application, " \
"press F3. " \
"To exit learn mode, press the escape key.")
# Translators: when the user selects (highlights) or unselects text in a
# document, Orca will speak information about what they have selected or
# unselected. This message is presented when the user selects from the
# current location to the end of the line by pressing Shift+Down.
LINE_SELECTED_DOWN = _("line selected down from cursor position")
# Translators: when the user selects (highlights) or unselects text in a
# document, Orca will speak information about what they have selected or
# unselected. This message is presented when the user selects from the
# current location to the start of the line by pressing Shift+Up.
LINE_SELECTED_UP = _("line selected up from cursor position")
# Translators: when the user selects (highlights) or unselects text in a
# document, Orca will speak information about what they have selected or
# unselected. This message is presented when the user unselects previously
# selected text from the current location to the end of the paragraph by
# pressing Shift+Down.
LINE_UNSELECTED_DOWN = _("line unselected down from cursor position")
# Translators: when the user selects (highlights) or unselects text in a
# document, Orca will speak information about what they have selected or
# unselected. This message is presented when the user unselects previously
# selected text from the current location to the start of the paragraph by
# pressing Shift+Up.
LINE_UNSELECTED_UP = _("line unselected up from cursor position")
# Translators: Orca has a "Learn Mode" that will allow the user to type any key
# on the keyboard and hear what the effects of that key would be. The effects
# might be what Orca would do if it had a handler for the particular key
# combination, or they might just be to echo the name of the key if Orca doesn't
# have a handler. This message is what is presented in speech and braille when
# exiting Learn Mode.
LEARN_MODE_STOP = _("Exiting learn mode.")
# Translators: when the user selects (highlights) or unselects text in a
# document, Orca will speak information about what they have selected or
# unselected. This message is presented when the user selects from the
# current location to the start of the line by pressing Ctrl+Shift+Page_Up.
LINE_SELECTED_LEFT = _("line selected from start to previous cursor position")
# Translators: when the user selects (highlights) or unselects text in a
# document, Orca will speak information about what they have selected or
# unselected. This message is presented when the user selects from the
# current location to the end of the line by pressing Ctrl+Shift+Page_Down.
LINE_SELECTED_RIGHT = _("line selected to end from previous cursor position")
# Translators: this indicates that this piece of text is a hypertext link.
LINK = _("link")
# Translators: this is an indication that a given link points to an object
# that is on the same page.
LINK_SAME_PAGE = _("same page")
# Translators: this is an indication that a given link points to an object
# that is at the same site (but not on the same page as the link).
LINK_SAME_SITE = _("same site")
# Translators: this is an indication that a given link points to an object
# that is at a different site than that of the link.
LINK_DIFFERENT_SITE = _("different site")
# Translators: this refers to a link to a file, where the first item is the
# protocol (ftp, ftps, or file) and the second item the name of the file being
# linked to.
LINK_TO_FILE = _("%(uri)s link to %(file)s")
# Translators: this message conveys the protocol of a link eg. http, mailto.
LINK_WITH_PROTOCOL = _("%s link")
# Translators: The following string instructs the user how to navigate amongst
# the list of commands presented in learn mode, as well as how to exit the list
# when finished.
LIST_NAVIGATION = \
_("Use Up and Down Arrow to navigate the list. Press Escape to exit.")
# Translators: A live region is an area of a web page that is periodically
# updated, e.g. stock ticker. http://www.w3.org/TR/wai-aria/terms#def_liveregion
# The "politeness" level is an indication of when the user wishes to be notified
# about a change to live region content. Examples include: never ("off"), when
# idle ("polite"), and when there is a change ("assertive"). Orca has several
# features to facilitate accessing live regions. This message is presented to
# inform the user that Orca's live region's "politeness" level has changed to
# "off" for all of the live regions.
LIVE_REGIONS_ALL_OFF = _("All live regions set to off")
# Translators: A live region is an area of a web page that is periodically
# updated, e.g. stock ticker. http://www.w3.org/TR/wai-aria/terms#def_liveregion
# The "politeness" level is an indication of when the user wishes to be notified
# about a change to live region content. Examples include: never ("off"), when
# idle ("polite"), and when there is a change ("assertive"). Orca has several
# features to facilitate accessing live regions. This message is presented to
# inform the user that Orca's live region's "politeness" level for all live
# regions has been restored to their original values.
LIVE_REGIONS_ALL_RESTORED = _("live regions politeness levels restored")
# Translators: A live region is an area of a web page that is periodically
# updated, e.g. stock ticker. http://www.w3.org/TR/wai-aria/terms#def_liveregion
# The "politeness" level is an indication of when the user wishes to be notified
# about a change to live region content. Examples include: never ("off"), when
# idle ("polite"), and when there is a change ("assertive"). Orca has several
# features to facilitate accessing live regions. This message is presented to
# inform the user of the "politeness" level for the current live region.
LIVE_REGIONS_LEVEL = _("politeness level %s")
# Translators: A live region is an area of a web page that is periodically
# updated, e.g. stock ticker. http://www.w3.org/TR/wai-aria/terms#def_liveregion
# The "politeness" level is an indication of when the user wishes to be notified
# about a change to live region content. Examples include: never ("off"), when
# idle ("polite"), and when there is a change ("assertive"). Orca has several
# features to facilitate accessing live regions. This message is presented to
# inform the user that Orca's live region's "politeness" level has changed for
# the current live region.
LIVE_REGIONS_LEVEL_ASSERTIVE = _("setting live region to assertive")
# Translators: A live region is an area of a web page that is periodically
# updated, e.g. stock ticker. http://www.w3.org/TR/wai-aria/terms#def_liveregion
# The "politeness" level is an indication of when the user wishes to be notified
# about a change to live region content. Examples include: never ("off"), when
# idle ("polite"), and when there is a change ("assertive"). Orca has several
# features to facilitate accessing live regions. This message is presented to
# inform the user that Orca's live region's "politeness" level has changed for
# the current live region.
LIVE_REGIONS_LEVEL_OFF = _("setting live region to off")
# Translators: A live region is an area of a web page that is periodically
# updated, e.g. stock ticker. http://www.w3.org/TR/wai-aria/terms#def_liveregion
# The "politeness" level is an indication of when the user wishes to be notified
# about a change to live region content. Examples include: never ("off"), when
# idle ("polite"), and when there is a change ("assertive"). Orca has several
# features to facilitate accessing live regions. This message is presented to
# inform the user that Orca's live region's "politeness" level has changed for
# the current live region.
LIVE_REGIONS_LEVEL_POLITE = _("setting live region to polite")
# Translators: A live region is an area of a web page that is periodically
# updated, e.g. stock ticker. http://www.w3.org/TR/wai-aria/terms#def_liveregion
# The "politeness" level is an indication of when the user wishes to be notified
# about a change to live region content. Examples include: never ("off"), when
# idle ("polite"), and when there is a change ("assertive"). Orca has several
# features to facilitate accessing live regions. This message is presented to
# inform the user that Orca's live region's "politeness" level has changed for
# the current live region.
LIVE_REGIONS_LEVEL_RUDE = _("setting live region to rude")
# Translators: A live region is an area of a web page that is periodically
# updated, e.g. stock ticker. http://www.w3.org/TR/wai-aria/terms#def_liveregion
# Orca has several features to facilitate accessing live regions. This message
# is presented in response to a command that toggles whether or not Orca pays
# attention to changes in live regions. Note that turning off monitoring of live
# events is NOT the same as turning the politeness level to "off". The user can
# opt to have no notifications presented (politeness level of "off") and still
# manually review recent updates to live regions via Orca commands for doing so
# -- as long as the monitoring of live regions is enabled.
LIVE_REGIONS_MONITORING_OFF = _("Live regions monitoring off")
# Translators: A live region is an area of a web page that is periodically
# updated, e.g. stock ticker. http://www.w3.org/TR/wai-aria/terms#def_liveregion
# Orca has several features to facilitate accessing live regions. This message
# is presented in response to a command that toggles whether or not Orca pays
# attention to changes in live regions. Note that turning off monitoring of live
# events is NOT the same as turning the politeness level to "off". The user can
# opt to have no notifications presented (politeness level of "off") and still
# manually review recent updates to live regions via Orca commands for doing so
# -- as long as the monitoring of live regions is enabled.
LIVE_REGIONS_MONITORING_ON = _("Live regions monitoring on")
# Translators: A live region is an area of a web page that is periodically
# updated, e.g. stock ticker. http://www.w3.org/TR/wai-aria/terms#def_liveregion
# Orca has several features to facilitate accessing live regions. This message
# is presented to inform the user that a cached message is not available for the
# the current live region.
LIVE_REGIONS_NO_MESSAGE = _("no live message saved")
# Translators: A live region is an area of a web page that is periodically
# updated, e.g. stock ticker. http://www.w3.org/TR/wai-aria/terms#def_liveregion
# Orca has several features to facilitate accessing live regions. This message
# is presented to inform the user that Orca's live region features have been
# turned off.
LIVE_REGIONS_OFF = _("Live region support is off")
# Translators: Orca has a command that allows the user to move the mouse pointer
# to the current object. This is a brief message which will be presented if for
# some reason Orca cannot identify/find the current location.
LOCATION_NOT_FOUND_BRIEF = C_("location", "Not found")
# Translators: Orca has a command that allows the user to move the mouse pointer
# to the current object. This is a detailed message which will be presented if
# for some reason Orca cannot identify/find the current location.
LOCATION_NOT_FOUND_FULL = _("Could not find current location.")
# Translators: This string is used to present the state of a locking key, such
# as Caps Lock. If Caps Lock is "off", then letters typed will appear in
# lowercase; if Caps Lock is "on", they will instead appear in uppercase. This
# string is also applied to Num Lock and potentially will be applied to similar
# keys in the future.
LOCKING_KEY_STATE_OFF = C_("locking key state", "off")
# Translators: This string is used to present the state of a locking key, such
# as Caps Lock. If Caps Lock is "off", then letters typed will appear in
# lowercase; if Caps Lock is "on", they will instead appear in uppercase. This
# string is also applied to Num Lock and potentially will be applied to similar
# keys in the future.
LOCKING_KEY_STATE_ON = C_("locking key state", "on")
# Translators: This is to inform the user of the presence of the red squiggly
# line which indicates that a given word is not spelled correctly.
MISSPELLED = _("misspelled")
# Translators: Orca tries to provide more compelling output of the spell check
# dialog in some applications. The first thing it does is let the user know
# what the misspelled word is.
MISSPELLED_WORD = _("Misspelled word: %s")
# Translators: Orca tries to provide more compelling output of the spell check
# dialog in some applications. The second thing it does is give the phrase
# containing the misspelled word in the document. This is known as the context.
MISSPELLED_WORD_CONTEXT = _("Context is %s")
# Translators: Orca has a number of commands that override the default
# behavior within an application. For instance, on a web page, "h" moves
# you to the next heading. What should happen when you press an "h" in
# an entry on a web page depends: If you want to resume reading content,
# "h" should move to the next heading; if you want to enter text, "h"
# should not not move you to the next heading. Similarly, if you are
# at the bottom of an entry and press Down arrow, should you leave the
# entry? Again, it depends on if you want to resume reading content or
# if you are editing the text in the entry. Because Orca doesn't know
# what you want to do, it has two modes: In browse mode, Orca treats
# key presses as commands to read the content; in focus mode, Orca treats
# key presses as something that should be handled by the focused widget.
# This string is the message presented when Orca switches to browse mode.
MODE_BROWSE = _("Browse mode")
# Translators: Orca has a number of commands that override the default
# behavior within an application. For instance, on a web page, "h" moves
# you to the next heading. What should happen when you press an "h" in
# an entry on a web page depends: If you want to resume reading content,
# "h" should move to the next heading; if you want to enter text, "h"
# should not not move you to the next heading. Similarly, if you are
# at the bottom of an entry and press Down arrow, should you leave the
# entry? Again, it depends on if you want to resume reading content or
# if you are editing the text in the entry. Because Orca doesn't know
# what you want to do, it has two modes: In browse mode, Orca treats
# key presses as commands to read the content; in focus mode, Orca treats
# key presses as something that should be handled by the focused widget.
# This string is the message presented when Orca switches to focus mode.
MODE_FOCUS = _("Focus mode")
# Translators: (Please see the previous, detailed translator notes about
# Focus mode and Browse mode.) In order to minimize the amount of work Orca
# users need to do to switch between focus mode and browse mode, Orca attempts
# to automatically switch to the mode which is appropriate to the current
# web element. Sometimes, however, this automatic mode switching is not what
# the user wants. A good example being web apps which have their own keyboard
# navigation and use interaction model. As a result, Orca has a command which
# enables setting a "sticky" focus mode which disables all automatic toggling.
# This string is the message presented when Orca switches to sticky focus mode.
MODE_FOCUS_IS_STICKY = _("Focus mode is sticky.")
# Translators: Hovering the mouse over certain objects on a web page causes a
# new object to appear such as a pop-up menu. Orca has a command will move the
# user to the object which just appeared as a result of the user hovering the
# mouse. If this command fails, Orca will present this message.
MOUSE_OVER_NOT_FOUND = _("Mouse over object not found.")
# Translators: Orca has a command that presents a list of structural navigation
# objects in a dialog box so that users can navigate more quickly than they
# could with native keyboard navigation. This is a message that will be
# presented to the user when an error (such as the operation timing out) kept us
# from getting these objects.
NAVIGATION_DIALOG_ERROR = _("Error: Could not create list of objects.")
# Translators: This message describes a list item in a document. Nesting level
# is how "deep" the item is (e.g., a level of 2 represents a list item inside a
# list that's inside another list).
NESTING_LEVEL = _("Nesting level %d")
# Translators: Orca has a command that moves the mouse pointer to the current
# location on a web page. If moving the mouse pointer caused an item to appear
# such as a pop-up menu, we want to present that fact.
NEW_ITEM_ADDED = _("New item has been added")
# Translators: This is intended to be a short phrase to present the fact that no
# no accessible component has keyboard focus.
NO_FOCUS = _("No focus")
# Translators: This message presents the fact that no accessible application has
# has keyboard focus.
NO_FOCUSED_APPLICATION = _("No application has focus.")
# Translators: This is for navigating document content by moving from blockquote
# to blockquote. This is a detailed message which will be presented to the user
# if no more blockquotes can be found.
NO_MORE_BLOCKQUOTES = _("No more blockquotes.")
# Translators: This is for navigating document content by moving from button
# to button. This is a detailed message which will be presented to the user
# if no more buttons can be found.
NO_MORE_BUTTONS = _("No more buttons.")
# Translators: This is for navigating document content by moving from check
# box to check box. This is a detailed message which will be presented to the
# user if no more check boxes can be found.
NO_MORE_CHECK_BOXES = _("No more check boxes.")
# Translators: This is for navigating document content by moving from 'large
# object' to 'large object'. A 'large object' is a logical chunk of text,
# such as a paragraph, a list, a table, etc. This is a detailed message which
# will be presented to the user if no more check boxes can be found.
NO_MORE_CHUNKS = _("No more large objects.")
# Translators: This is for navigating document content by moving amongst web
# elements which have an "onClick" action. This is a detailed message which
# will be presented to the user if no more clickable elements can be found.
NO_MORE_CLICKABLES = _("No more clickables.")
# Translators: This is for navigating document content by moving from combo
# box to combo box. This is a detailed message which will be presented to the
# user if no more combo boxes can be found.
NO_MORE_COMBO_BOXES = _("No more combo boxes.")
# Translators: This is for navigating document content by moving from entry
# to entry. This is a detailed message which will be presented to the user
# if no more entries can be found.
NO_MORE_ENTRIES = _("No more entries.")
# Translators: This is for navigating document content by moving from form
# field to form field. This is a detailed message which will be presented to
# the user if no more form fields can be found.
NO_MORE_FORM_FIELDS = _("No more form fields.")
# Translators: This is for navigating document content by moving from heading
# to heading. This is a detailed message which will be presented to the user
# if no more headings can be found.
NO_MORE_HEADINGS = _("No more headings.")
# Translators: This is for navigating document content by moving from heading
# to heading at a particular level (i.e. only <h1> or only <h2>, etc.). This
# is a detailed message which will be presented to the user if no more headings
# at the desired level can be found.
NO_MORE_HEADINGS_AT_LEVEL = _("No more headings at level %d.")
# Translators: This is for navigating document content by moving from image
# to image. This is a detailed message which will be presented to the user
# if no more images can be found.
NO_MORE_IMAGES = _("No more images.")
# Translators: this is for navigating to the previous ARIA role landmark.
# ARIA role landmarks are the W3C defined HTML tag attribute 'role' used to
# identify important part of webpage like banners, main context, search etc.
# This is an indication that one was not found.
NO_LANDMARK_FOUND = _("No landmark found.")
# Translators: This is for navigating document content by moving from link to
# link (regardless of visited state). This is a detailed message which will be
# presented to the user if no more links can be found.
NO_MORE_LINKS = _("No more links.")
# Translators: This is for navigating document content by moving from bulleted/
# numbered list to bulleted/numbered list. This is a detailed message which will
# be presented to the user if no more lists can be found.
NO_MORE_LISTS = _("No more lists.")
# Translators: This is for navigating document content by moving from bulleted/
# numbered list item to bulleted/numbered list item. This is a detailed message
# which will be presented to the user if no more list items can be found.
NO_MORE_LIST_ITEMS = _("No more list items.")
# Translators: This is for navigating document content by moving from live
# region to live region. A live region is an area of a web page that is
# periodically updated, e.g. stock ticker. This is a detailed message which
# will be presented to the user if no more live regions can be found. For
# more info, see http://www.w3.org/TR/wai-aria/terms#def_liveregion
NO_MORE_LIVE_REGIONS = _("No more live regions.")
# Translators: This is for navigating document content by moving from paragraph
# to paragraph. This is a detailed message which will be presented to the user
# if no more paragraphs can be found.
NO_MORE_PARAGRAPHS = _("No more paragraphs.")
# Translators: This is for navigating document content by moving from radio
# button to radio button. This is a detailed message which will be presented to
# the user if no more radio buttons can be found.
NO_MORE_RADIO_BUTTONS = _("No more radio buttons.")
# Translators: This is for navigating document content by moving from separator
# to separator (e.g. <hr> tags). This is a detailed message which will be
# presented to the user if no more separators can be found.
NO_MORE_SEPARATORS = _("No more separators.")
# Translators: This is for navigating document content by moving from table to
# to table. This is a detailed message which will be presented to the user if
# no more tables can be found.
NO_MORE_TABLES = _("No more tables.")
# Translators: This is for navigating document content by moving from unvisited
# link to unvisited link. This is a detailed message which will be presented to
# the user if no more unvisited links can be found.
NO_MORE_UNVISITED_LINKS = _("No more unvisited links.")
# Translators: This is for navigating document content by moving from visited
# link to visited link. This is a detailed message which will be presented to
# the user if no more visited links can be found.
NO_MORE_VISITED_LINKS = _("No more visited links.")
# Translators: This message alerts the user to the fact that what will be
# presented next came from a notification.
NOTIFICATION = _("Notification")
# Translators: This is a brief message presented to the user when the bottom of
# the list of notifications is reached.
NOTIFICATION_LIST_BOTTOM = C_("notification", "Bottom")
# Translators: This message is presented to the user to confirm the list of
# notifications mode is being exited.
NOTIFICATION_LIST_EXIT = _("Exiting list notification messages mode.")
# Translators: This is a brief message presented to the user when the top of the
# list of notifications is reached.
NOTIFICATION_LIST_TOP = C_("notification", "Top")
# Translators: This is a tutorial message for the notification list mode.
NOTIFICATION_LIST_HELP = _("Press h for help.\n")
# Translators: The following string instructs the user how to navigate within
# the list notifications mode.
NOTIFICATION_LIST_TUTORIAL = \
_("Use Up, Down, Home or End to navigate in the list.\n"\
"Press Escape to exit.\n"\
"Press Space to repeat the last message read.\n"\
"Press one digit to read a specific message.\n")
# Translators: This message is presented to the user when the notifications list
# is empty.
NOTIFICATION_NO_MESSAGES = _("No notification messages")
# Translators: This brief message is presented to indicate the state of widgets
# (checkboxes, push buttons, toggle buttons) on a toolbar which are associated
# with text formatting (bold, italics, underlining, justification, etc.).
OFF = _("off")
# Translators: This brief message is presented to indicate the state of widgets
# (checkboxes, push buttons, toggle buttons) on a toolbar which are associated
# with text formatting (bold, italics, underlining, justification, etc.).
ON = _("on")
# Translators: This message is presented to the user when a web page or similar
# item has started loading.
PAGE_LOADING_START = _("Loading. Please wait.")
# Translators: This message is presented to the user when a web page or similar
# item has finished loading.
PAGE_LOADING_END = _("Finished loading.")
# Translators: This message is presented to the user when a web page or similar
# item has finished loading. The string substitution is for the name of the
# object which has just finished loading (most likely the page's title).
PAGE_LOADING_END_NAMED = _("Finished loading %s.")
# Translators: when the user selects (highlights) or unselects text in a
# document, Orca will speak information about what they have selected or
# unselected. This message is presented when the user selects from the
# current location to the end of the page by pressing Shift+Page_Down.
PAGE_SELECTED_DOWN = _("page selected from cursor position")
# Translators: when the user selects (highlights) or unselects text in a
# document, Orca will speak information about what they have selected or
# unselected. This message is presented when the user selects from the
# current location to the start of the page by pressing Shift+Page_Up.
PAGE_SELECTED_UP = _("page selected to cursor position")
# Translators: when the user selects (highlights) or unselects text in a
# document, Orca will speak information about what they have selected or
# unselected. This message is presented when the user unselects a previously
# selected page by pressing Shift+Page_Down.
PAGE_UNSELECTED_DOWN = _("page unselected from cursor position")
# Translators: when the user selects (highlights) or unselects text in a
# document, Orca will speak information about what they have selected or
# unselected. This message is presented when the user unselects a previously
# selected page by pressing Shift+Page_Up.
PAGE_UNSELECTED_UP = _("page unselected to cursor position")
# Translators: when the user selects (highlights) or unselects text in a
# document, Orca will speak information about what they have selected or
# unselected. This message is presented when the user selects from the
# current location to the end of the paragraph by pressing Ctrl+Shift+Down.
PARAGRAPH_SELECTED_DOWN = _("paragraph selected down from cursor position")
# Translators: when the user selects (highlights) or unselects text in a
# document, Orca will speak information about what they have selected or
# unselected. This message is presented when the user selects from the
# current location to the start of the paragraph by pressing Ctrl+Shift+UP.
PARAGRAPH_SELECTED_UP = _("paragraph selected up from cursor position")
# Translators: when the user selects (highlights) or unselects text in a
# document, Orca will speak information about what they have selected or
# unselected. This message is presented when the user unselects previously
# selected text from the current location to the end of the paragraph by
# pressing Ctrl+Shift+Down.
PARAGRAPH_UNSELECTED_DOWN = _("paragraph unselected down from cursor position")
# Translators: when the user selects (highlights) or unselects text in a
# document, Orca will speak information about what they have selected or
# unselected. This message is presented when the user unselects previously
# selected text from the current location to the start of the paragraph by
# pressing Ctrl+Shift+UP.
PARAGRAPH_UNSELECTED_UP = _("paragraph unselected up from cursor position")
# Translators: This message appears in a warning dialog when the user performs
# the command to get into Orca's preferences dialog when the preferences dialog
# is already open.
PREFERENCES_WARNING_DIALOG = \
_('You already have an instance of an Orca preferences dialog ' \
'open.\nPlease close it before opening a new one.')
# Translators: This message is an indication of the position of the focused
# slide and the total number of slides in the presentation.
PRESENTATION_SLIDE_POSITION = _("slide %(position)d of %(count)d")
# Translators: This is a detailed message which will be presented as the user
# cycles amongst his/her saved profiles. A "profile" is a collection of settings
# which apply to a given task, such as a "Spanish" profile which would use
# Spanish text-to-speech and Spanish braille and selected when reading Spanish
# content. The string representing the profile name is created by the user.
PROFILE_CHANGED = _("Profile set to %s.")
# Translators: This is an error message presented when the user attempts to
# cycle among his/her saved profiles, but no profiles can be found. A profile
# is a collection of settings which apply to a given task, such as a "Spanish"
# profile which would use Spanish text-to-speech and Spanish braille and
# selected when reading Spanish content.
PROFILE_NOT_FOUND = _("No profiles found.")
# Translators: this is an index value so that we can present value changes
# regarding a specific progress bar in environments where there are multiple
# progress bars (e.g. in the Firefox downloads dialog).
PROGRESS_BAR_NUMBER = _("Progress bar %d.")
# Translators: This brief message will be presented as the user cycles
# through the different levels of spoken punctuation. The options are:
# All puntuation marks will be spoken, None will be spoken, Most will be
# spoken, or Some will be spoken.
PUNCTUATION_ALL_BRIEF = C_("spoken punctuation", "All")
# Translators: This detailed message will be presented as the user cycles
# through the different levels of spoken punctuation. The options are:
# All puntuation marks will be spoken, None will be spoken, Most will be
# spoken, or Some will be spoken.
PUNCTUATION_ALL_FULL = _("Punctuation level set to all.")
# Translators: This brief message will be presented as the user cycles
# through the different levels of spoken punctuation. The options are:
# All puntuation marks will be spoken, None will be spoken, Most will be
# spoken, or Some will be spoken.
PUNCTUATION_MOST_BRIEF = C_("spoken punctuation", "Most")
# Translators: This detailed message will be presented as the user cycles
# through the different levels of spoken punctuation. The options are:
# All puntuation marks will be spoken, None will be spoken, Most will be
# spoken, or Some will be spoken.
PUNCTUATION_MOST_FULL = _("Punctuation level set to most.")
# Translators: This brief message will be presented as the user cycles
# through the different levels of spoken punctuation. The options are:
# All puntuation marks will be spoken, None will be spoken, Most will be
# spoken, or Some will be spoken.
PUNCTUATION_NONE_BRIEF = C_("spoken punctuation", "None")
# Translators: This detailed message will be presented as the user cycles
# through the different levels of spoken punctuation. The options are:
# All puntuation marks will be spoken, None will be spoken, Most will be
# spoken, or Some will be spoken.
PUNCTUATION_NONE_FULL = _("Punctuation level set to none.")
# Translators: This brief message will be presented as the user cycles
# through the different levels of spoken punctuation. The options are:
# All puntuation marks will be spoken, None will be spoken, Most will be
# spoken, or Some will be spoken.
PUNCTUATION_SOME_BRIEF = C_("spoken punctuation", "Some")
# Translators: This detailed message will be presented as the user cycles
# through the different levels of spoken punctuation. The options are:
# All puntuation marks will be spoken, None will be spoken, Most will be
# spoken, or Some will be spoken.
PUNCTUATION_SOME_FULL = _("Punctuation level set to some.")
# Translators: This message is presented to indicate that a search has begun
# or is still taking place.
SEARCHING = _("Searching.")
# Translators: This message is presented to indicate a search executed by the
# user has been completed.
SEARCH_COMPLETE = _("Search complete.")
# Translators: This message is presented to the user when Orca's preferences
# have been reloaded.
SETTINGS_RELOADED = _("Screen reader settings reloaded.")
# Translators: This message is presented to the user when speech synthesis
# has been temporarily turned off.
SPEECH_DISABLED = _("Speech disabled.")
# Translators: This message is presented to the user when speech synthesis
# has been turned back on.
SPEECH_ENABLED = _("Speech enabled.")
# Translators: This string announces speech rate change.
SPEECH_FASTER = _("faster.")
# Translators: This string announces speech rate change.
SPEECH_SLOWER = _("slower.")
# Translators: This string announces speech pitch change.
SPEECH_HIGHER = _("higher.")
# Translators: This string announces speech pitch change.
SPEECH_LOWER = _("lower.")
# Translators: Orca's verbosity levels control how much (or how little)
# Orca will speak when presenting objects as the user navigates within
# applications and reads content. The two levels are "brief" and "verbose".
# The following string is a message spoken to the user upon toggling
# this setting via command.
SPEECH_VERBOSITY_BRIEF = C_("Speech", "Verbosity level: brief")
# Translators: Orca's verbosity levels control how much (or how little)
# Orca will speak when presenting objects as the user navigates within
# applications and reads content. The two levels are "brief" and "verbose".
# The following string is a message spoken to the user upon toggling
# this setting via command.
SPEECH_VERBOSITY_VERBOSE = C_("Speech", "Verbosity level: verbose")
# Translators: We replace the ellipses (both manual and UTF-8) with a spoken
# string. The extra space you see at the beginning is because we need the
# speech synthesis engine to speak the new string well. For example, "Open..."
# turns into "Open dot dot dot".
SPOKEN_ELLIPSIS = _(" dot dot dot")
# Translators: This message is presented to the user when Orca is launched.
START_ORCA = _("Screen reader on.")
# Translators: This message is presented to the user when Orca is quit.
STOP_ORCA = _("Screen reader off.")
# Translators: This message means speech synthesis is not installed or working.
SPEECH_UNAVAILABLE = _("Speech is unavailable.")
# Translators: the Orca "Find" dialog allows a user to search for text in a
# window and then move focus to that text. For example, they may want to find
# the "OK" button. This message lets them know a string they were searching
# for was not found.
STRING_NOT_FOUND = _("string not found")
# Translators: The structural navigation keys are designed to move the caret
# around document content by object type. H moves you to the next heading,
# Shift H to the previous heading, T to the next table, and so on. Some users
# prefer to turn this off to use Firefox's search when typing feature. This
# message is presented when the user toggles the structural navigation feature
# of Orca. It should be a brief informative message.
STRUCTURAL_NAVIGATION_KEYS_OFF = _("Structural navigation keys off.")
# Translators: The structural navigation keys are designed to move the caret
# around document content by object type. H moves you to the next heading,
# Shift H to the previous heading, T to the next table, and so on. Some users
# prefer to turn this off to use Firefox's search when typing feature. This
# message is presented when the user toggles the structural navigation feature
# of Orca. It should be a brief informative message.
STRUCTURAL_NAVIGATION_KEYS_ON = _("Structural navigation keys on.")
# Translators: Orca has a command that allows the user to move to the next
# structural navigation object. In Orca, "structural navigation" refers to
# quickly moving through a document by jumping amongst objects of a given
# type, such as from link to link, or from heading to heading, or from form
# field to form field. This is a brief message which will be presented to the
# user if the desired structural navigation object could not be found.
STRUCTURAL_NAVIGATION_NOT_FOUND = C_("structural navigation", "Not found")
# Translators: This message describes the (row, col) position of a table cell.
TABLE_CELL_COORDINATES = _("Row %(row)d, column %(column)d.")
# Translators: This message is presented to indicate the user is in the last
# cell of a table in a document.
TABLE_END = _("End of table")
# Translators: This message is presented when a user is navigating within a
# table and then navigates out of it.
TABLE_LEAVING = _("leaving table.")
# Translators: When users are navigating a table, they sometimes want the entire
# row of a table read; other times they want just the current cell presented.
# This string is a message presented to the user when this setting is toggled.
TABLE_MODE_CELL = _("Speak cell")
# Translators: When users are navigating a table, they sometimes want the entire
# row of a table read; other times they want just the current cell presented.
# This string is a message presented to the user when this setting is toggled.
TABLE_MODE_ROW = _("Speak row")
# Translators: a uniform table is one in which each table cell occupies one row
# and one column (i.e. a perfect grid). In contrast, a non-uniform table is one
# in which at least one table cell occupies more than one row and/or column.
TABLE_NON_UNIFORM = _("Non-uniform")
# Translators: This is for navigating document content by moving from table cell
# to table cell. If the user gives a table navigation command but is not in a
# table, presents this message.
TABLE_NOT_IN_A = _("Not in a table.")
# Translators: This is a message presented to users when the columns in a table
# have been reordered.
TABLE_REORDERED_COLUMNS = _("Columns reordered")
# Translators: This is a message presented to users when the rows in a table
# have been reordered.
TABLE_REORDERED_ROWS = _("Rows reordered")
# Translators: this is in reference to a column in a table. The substitution
# is the index (e.g. the first column is "column 1").
TABLE_COLUMN = _("column %d")
# Translators: this is in reference to a column in a table. If the user is in
# the first column of a table with five columns, the position is "column 1 of 5"
TABLE_COLUMN_DETAILED = _("column %(index)d of %(total)d")
# Translators: This is for navigating document content by moving from table cell
# to table cell. This is the message presented when the user attempts to move to
# the cell below the current cell and is already in the last row.
TABLE_COLUMN_BOTTOM = _("Bottom of column.")
# Translators: This is for navigating document content by moving from table cell
# to table cell. This is the message presented when the user attempts to move to
# the cell above the current cell and is already in the first row.
TABLE_COLUMN_TOP = _("Top of column.")
# Translators: this is in reference to a row in a table. The substitution is
# the index (e.g. the first row is "row 1").
TABLE_ROW = _("row %d")
# Translators: this is in reference to a row in a table. If the user is in the
# the first row of a table with five rows, the position is "row 1 of 5"
TABLE_ROW_DETAILED = _("row %(index)d of %(total)d")
# Translators: This is for navigating document content by moving from table cell
# to table cell. This is the message presented when the user attempts to move to
# the left of the current cell and is already in the first column.
TABLE_ROW_BEGINNING = _("Beginning of row.")
# Translators: This is for navigating document content by moving from table cell
# to table cell. This is the message presented when the user attempts to move to
# the right of the current cell and is already in the last column.
TABLE_ROW_END = _("End of row.")
# Translators: This message is presented to the user to confirm that he/she just
# deleted a table row.
TABLE_ROW_DELETED = _("Row deleted.")
# Translators: This message is presented to the user to confirm that he/she just
# deleted the last row of a table.
TABLE_ROW_DELETED_FROM_END = _("Last row deleted.")
# Translators: This message is presented to the user to confirm that he/she just
# inserted a table row.
TABLE_ROW_INSERTED = _("Row inserted.")
# Translators: This message is presented to the user to confirm that he/she just
# inserted a table row at the end of the table. This typically happens when the
# user presses Tab from within the last cell of the table.
TABLE_ROW_INSERTED_AT_END = _("Row inserted at the end of the table.")
# Translators: when the user selects (highlights) text in a document, Orca lets
# them know.
TEXT_SELECTED = C_("text", "selected")
# Translators: when the user unselects (un-highlights) text in a document, Orca
# lets them know.
TEXT_UNSELECTED = C_("text", "unselected")
TIME_FORMAT_LOCALE = "%X"
TIME_FORMAT_24_HMS = "%H:%M:%S"
TIME_FORMAT_24_HM = "%H:%M"
# Translators: Orca has a feature to speak the time when the user presses a
# shortcut key. This is one of the alternative formats that the user may wish
# it to be presented with.
TIME_FORMAT_24_HMS_WITH_WORDS = _("%H hours, %M minutes and %S seconds.")
# Translators: Orca has a feature to speak the time when the user presses a
# shortcut key. This is one of the alternative formats that the user may wish
# it to be presented with.
TIME_FORMAT_24_HM_WITH_WORDS = _("%H hours and %M minutes.")
# Translators: this is information about a unicode character reported to the
# user. The value is the unicode number value of this character in hex.
UNICODE = _("Unicode %s")
# Translators: This message presents the Orca version number.
VERSION = _("Screen reader version %s.") % version
# Translators: This is presented when the user has navigated to a line with only
# whitespace characters (space, tab, etc.) on it.
WHITE_SPACE = _("white space")
# Translators: when the user is attempting to locate a particular object and the
# top of a page or list is reached without that object being found, we "wrap" to
# the bottom and continue looking upwards. We need to inform the user when this
# is taking place.
WRAPPING_TO_BOTTOM = _("Wrapping to bottom.")
# Translators: when the user is attempting to locate a particular object and the
# bottom of a page or list is reached without that object being found, we "wrap"
# to the top and continue looking downwards. We need to inform the user when
# this is taking place.
WRAPPING_TO_TOP = _("Wrapping to top.")
# Translators, normally layered panes and tables have items in them. Thus it is
# noteworthy when this is not the case. This message is presented to the user to
# indicate the current layered pane or table contains zero items.
ZERO_ITEMS = _("0 items")
def cellSpan(rowspan, colspan):
spanString = ""
if (colspan > 1) and (rowspan > 1):
# Translators: The cell here refers to a cell within a table within a
# document. We need to announce when the cell occupies or "spans" more
# than a single row and/or column.
spanString = ngettext("Cell spans %d row",
"Cell spans %d rows",
rowspan) % rowspan
# Translators: this represents the number of columns in a table.
spanString += ngettext(" %d column",
" %d columns",
colspan) % colspan
elif (colspan > 1):
# Translators: The cell here refers to a cell within a table within a
# document. We need to announce when the cell occupies or "spans" more
# than a single row and/or column.
spanString = ngettext("Cell spans %d column",
"Cell spans %d columns",
colspan) % colspan
elif (rowspan > 1):
# Translators: The cell here refers to a cell within a table within a
# document. We need to announce when the cell occupies or "spans" more
# than a single row and/or column.
spanString = ngettext("Cell spans %d row",
"Cell spans %d rows",
rowspan) % rowspan
return spanString
def charactersTooLong(count):
# Translators: People can enter a string of text that is too wide to be
# fully displayed in a spreadsheet cell. This message will be spoken if
# such a cell is encountered.
return ngettext("%d character too long",
"%d characters too long",
count) % count
def dialogCountBraille(count):
# Translators: This message informs the user how many unfocused alert and
# dialog windows a newly (re)focused application has. It is added at the
# end of a braille message containing the app which just claimed focus.
return ngettext("(%d dialog)", "(%d dialogs)", count) % count
def dialogCountSpeech(count):
# Translators: This message informs the user how many unfocused alert and
# dialog windows a newly (re)focused application has. It is added at the
# end of a spoken message containing the app which just claimed focus.
return ngettext("%d unfocused dialog", "%d unfocused dialogs", count) % count
def fileSizeBytes(size):
# Translators: This is the size of a file in bytes
return ngettext("%d byte", "%d bytes", size) % size
def filesFound(count):
# Translators: This message informs the user who many files were found as
# a result of a search.
return ngettext("%d file found", "%d files found", count) % count
def formCount(count):
# Translators: This message presents the number of forms in a document.
return ngettext("%d form", "%d forms", count) % count
def headingCount(count):
# Translators: This message presents the number of headings in a document.
return ngettext("%d heading", "%d headings", count) % count
def itemCount(count):
# Translators: This message presents the number of items in a layered pane
# or table.
return ngettext("%d item", "%d items", count) % count
def itemsFound(count):
# Translators: Orca has several commands that search for, and present a list
# of, objects based on one or more criteria. This is a message that will be
# presented to the user to indicate how many matching items were found.
return ngettext("%d item found", "%d items found", count) % count
def listItemCount(count):
# Translators: This message describes a bulleted or numbered list.
return ngettext("List with %d item", "List with %d items", count) % count
def messagesCount(count):
# Translators: This message is presented to inform the user of the number of
# messages in a list.
return ngettext("%d message.\n", "%d messages.\n", count) % count
def percentage(value):
# Translators: This message is presented to inform the user of the value of
# a slider, progress bar, or other such component.
return ngettext("%d percent.", "%d percent.", value) % value
def percentRead(value):
# Translators: This message announces the percentage of the document that
# has been read. The value is calculated by knowing the index of the current
# position divided by the total number of objects on the page.
return ngettext ("%d percent of document read",
"%d percent of document read",
value) % value
def repeatedCharCount(repeatChar, count):
# Translators: Orca will tell you how many characters are repeated on a line
# of text. For example: "22 space characters". The %d is the number and the
# %s is the spoken word for the character.
return ngettext("%(count)d %(repeatChar)s character",
"%(count)d %(repeatChar)s characters",
count) % {"count" : count, "repeatChar": repeatChar}
def selectedItemsCount(selected, total):
# Translators: This message is presented to indicate the number of selected
# objects (e.g. icons) and the total number of those objects.
return ngettext("%(index)d of %(total)d item selected",
"%(index)d of %(total)d items selected",
total) % {"index" : selected, "total" : total}
def shortcutsFoundOrca(count):
# Translators: This message is presented when the user is in a list of
# shortcuts associated with Orca commands which are not specific to the
# current application. It appears as the title of the dialog containing
# the list.
return ngettext("%d Screen reader default shortcut found.",
"%d Screen reader default shortcuts found.",
count) % count
def shortcutsFoundApp(count, appName):
# Translators: This message is presented when the user is in a list of
# shortcuts associated with Orca commands specific to the current
# application. It appears as the title of the dialog containing the list.
return ngettext("%(count)d Screen reader shortcut for %(application)s found.",
"%(count)d Screen reader shortcuts for %(application)s found.",
count) % {"count" : count, "application" : appName}
def spacesCount(count):
# Translators: This message is presented to inform the user of the number of
# space characters in a string.
return ngettext("%d space", "%d spaces", count) % count
def tabsCount(count):
# Translators: This message is presented to inform the user of the number of
# tab characters in a string.
return ngettext("%d tab", "%d tabs", count) % count
def tableCount(count):
# Translators: This message presents the number of tables in a document.
return ngettext("%d table", "%d tables", count) % count
def tableSize(nRows, nColumns):
# Translators: this represents the number of rows in a table.
rowString = ngettext("table with %d row",
"table with %d rows",
nRows) % nRows
# Translators: this represents the number of columns in a table.
colString = ngettext("%d column",
"%d columns",
nColumns) % nColumns
return rowString + " " + colString
def unvisitedLinkCount(count):
# Translators: This message presents the number of unvisited links in a
# document.
return ngettext("%d unvisited link", "%d unvisited links", count) % count
def visitedLinkCount(count):
# Translators: This message presents the number of visited links in a
# document.
return ngettext("%d visited link", "%d visited links", count) % count
|
ruibarreira/linuxtrail
|
usr/lib/python3/dist-packages/orca/messages.py
|
Python
|
gpl-3.0
| 95,927
|
[
"ORCA"
] |
1d0a4a4a7d377ea46f52b4a6829d3da1238e2f567046db4c28ec57212086de83
|
'''
Plots the normalized absolute value of the bias of triple collocation after CDF matching in the linear Gaussian case
The error model and notation are explained in evaluateCDFBiasLinearGaussian
The bias is normalized by scaling it:
- with respect to the standard deviation of the noise (normalization = 'noise')
- with respect to the standard deviation of the signal = scaled anomaly (normalization = 'anomaly')
- with respect to the standard deviation of the product (normalization = 'product')
The bias is plotted as a function of the SNR of product x and the SNR of products y and z (identical), where
SNR_i = \frac{\alpha_i^2}{\sigma_i^2}
'''
import numpy as np
from evaluateCDFBiasLinearGaussian import evaluateCDFBiasLinearGaussian
import matplotlib.pyplot as plt
import matplotlib.cm as cm
globfigparams={'fontsize':8,'family':'serif','usetex':True,'preamble':'\usepackage{times}','column_inch':229.8775/72.27,'markersize':24,'markercolour':'#AA00AA'}
plt.rc('font',**{'size':globfigparams['fontsize'],'family':globfigparams['family']})
plt.rcParams['text.usetex']=globfigparams['usetex']
plt.rcParams['text.latex.preamble']=globfigparams['preamble']
plt.rcParams['legend.fontsize']=globfigparams['fontsize']
plt.rcParams['font.size']=globfigparams['fontsize']
width=globfigparams['column_inch']
figprops = dict(facecolor='white',figsize=(width, 0.75*width))
def plotCDFBiasLinearGaussianESNR(SNRx,SNRy,normalization='noise',fnout=None):
# equal SNR for products y and z
# unless fnout is None, plot is saved as a pdf with filename fnout
assert SNRx.shape == SNRy.shape
alphax=np.ones(SNRx.shape)
sigma2x=1./SNRx
D=alphax**2 + sigma2x
M=np.zeros((SNRx.shape[0],SNRy.shape[0]))
for jx,alphaxj in enumerate(alphax):
for jy,SNRyj in enumerate(SNRy):
alphayj=np.sqrt(SNRyj*D[jx]/(SNRyj+1))
if normalization == 'anomaly':
normf=np.sqrt(alphaxj**2)
elif normalization == 'noise':
normf=np.sqrt(sigma2x[jx]/alphaxj**2)
elif normalization == 'product':
normf=np.sqrt(D[jx])
M[jy,jx]=100*abs(evaluateCDFBiasLinearGaussian(alphaxj, alphayj, alphayj))/(normf**2)
fig=plt.figure(**figprops)
ax = fig.add_axes([0.12, 0.16, 0.85, 0.8])
if normalization == 'anomaly':
cbarlabel= 'Bias relative to scaled anomaly variance [\%]'
elif normalization == 'noise':
cbarlabel='Bias relative to noise variance [\%]'
elif normalization == 'product':
cbarlabel='Bias relative to product variance $\\frac{b}{D}$ [\%]'
cax=plt.imshow(M,cmap=cm.YlOrRd,origin='lower',vmin=0,extent=[SNRx[0],SNRx[-1],SNRy[0],SNRy[-1]])
plt.xscale('log')
plt.yscale('log')
ax.set_xlabel('SNR of product $X$')
ax.set_ylabel('SNR of products $Y$ and $Z$')
cbar=fig.colorbar(cax,label=cbarlabel)
if fnout is None:
plt.show()
else:
plt.savefig(fnout,format='pdf')
def plot_all(pathout):
# plot bias with all normalizations in pathout
import os.path
SNRx=np.logspace(-1,2,50)
SNRy=SNRx
plotCDFBiasLinearGaussianESNR(SNRx,SNRy,normalization='anomaly',fnout=os.path.join(pathout,'bias_anomaly.pdf'))
plotCDFBiasLinearGaussianESNR(SNRx,SNRy,normalization='noise',fnout=os.path.join(pathout,'bias_noise.pdf'))
plotCDFBiasLinearGaussianESNR(SNRx,SNRy,normalization='product',fnout=os.path.join(pathout,'bias_product.pdf'))
|
szwieback/nlscaling
|
CDFAnalyticalBias/plotCDFBiasLinearGaussian.py
|
Python
|
gpl-3.0
| 3,466
|
[
"Gaussian"
] |
8d64a6cf1d11be23b29c31ba3278858eadade3895178b3dc38bd0d6d4db13d5d
|
#!/usr/bin/env python
'''
Description
If using files, (call by command line or from python):
all the inputs are raw float32 vectors files that are reshaped by the number
of f0 values in ff0.
There are three safe patches that were not described in the publication[1]:
(These are not critical, they might remove a few artifacts here and there).
* The noise mask is slightly low-passed (smoothed) across frequency
(def. 9 bins freq. window), in order to avoid cliffs in frequency domain
that end up creating Gibbs phenomenon in the time domain.
* High-pass filtering (def. 0.5*f0 cut-off)
This centers each synthesized segment around zero, to avoid cutting
any DC residual component (e.g. comming from the spectral envelope).
* Short half-window (def. 1ms (yes, one ms)) on the left of the pulse,
in order to avoid any pre-echos.
Reference
[1] G. Degottex, P. Lanchantin, and M. Gales, "A Pulse Model in Log-domain
for a Uniform Synthesizer," in Proc. 9th Speech Synthesis Workshop
(SSW9), 2016.
[2] G. Degottex, P. Lanchantin and M. Gales, "A Log Domain Pulse Model for
Parametric Speech Synthesis", IEEE Transactions on Audio, Speech, and
Language Processing, 26(1):57-70, 2018.
Copyright(C) 2016 Engineering Department, University of Cambridge, UK.
License
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Author
Gilles Degottex <gad27@cam.ac.uk>
'''
import argparse
import sys
import warnings
import numpy as np
np.random.seed(123) # Generate always the same "random" numbers, for debugging.
import scipy
import sigproc as sp
def getwinlen(f0, fs, nbper):
return int(np.max((0.050*fs, nbper*fs/f0))/2)*2+1 # Has to be odd
def synthesize(fs, f0s, SPEC, NM=None, wavlen=None
, ener_multT0=False
, nm_cont=False # If False, force binary state of the noise mask (by thresholding at 0.5)
, nm_lowpasswinlen=9
, hp_f0coef=0.5 # factor of f0 for the cut-off of the high-pass filter (def. 0.5*f0)
, antipreechohwindur=0.001 # [s] Use to damp the signal at the beginning of the signal AND at the end of it
# Following options are for post-processing the features, after the generation/transformation and thus before waveform synthesis
, pp_f0_rmsteps=False # Removes steps in the f0 curve
# (see sigproc.resampling.f0s_rmsteps(.) )
, pp_f0_smooth=None # Smooth the f0 curve using median and FIR filters of given window duration [s]
, pp_atten1stharminsilences=None # Typical value is -25
, verbose=1):
winnbper = 4 # Number of periods in a synthesis windows. It still contains only one single pulse, but leaves space for the VTF to decay without being cut abruptly.
# Copy the inputs to avoid modifying them
f0s = f0s.copy()
SPEC = SPEC.copy()
if not NM is None: NM = NM.copy()
else: NM = np.zeros(SPEC.shape)
NM = np.clip(NM, 0.0, 1.0) # The noise mask is supposed to be in [0,1]
# Check the size of the inputs
if f0s.shape[0]!=SPEC.shape[0]:
raise ValueError('F0 size {} and spectrogram size {} do not match'.format(f0s.shape[0], SPEC.shape[0])) # pragma: no cover
if not NM is None:
if SPEC.shape!=NM.shape:
raise ValueError('spectrogram size {} and NM size {} do not match.'.format(SPEC.shape, NM.shape)) # pragma: no cover
if wavlen==None: wavlen = int(np.round(f0s[-1,0]*fs))
dftlen = (SPEC.shape[1]-1)*2
shift = np.median(np.diff(f0s[:,0]))
if verbose>0:
print('PML Synthesis (dur={}s, fs={}Hz, f0 in [{:.0f},{:.0f}]Hz, shift={}s, dftlen={})'.format(wavlen/float(fs), fs, np.min(f0s[:,1]), np.max(f0s[:,1]), shift, dftlen))
# Prepare the features
# Enforce continuous f0
f0s[:,1] = np.interp(f0s[:,0], f0s[f0s[:,1]>0,0], f0s[f0s[:,1]>0,1])
# If asked, removes steps in the f0 curve
if pp_f0_rmsteps:
f0s = sp.f0s_rmsteps(f0s)
# If asked, smooth the f0 curve using median and FIR filters
if not pp_f0_smooth is None:
print(' Smoothing f0 curve using {}[s] window'.format(pp_f0_smooth))
import scipy.signal as sig
lf0 = np.log(f0s[:,1])
bcoefslen = int(0.5*pp_f0_smooth/shift)*2+1
lf0 = sig.medfilt(lf0, bcoefslen)
bcoefs = np.hamming(bcoefslen)
bcoefs = bcoefs/sum(bcoefs)
lf0 = sig.filtfilt(bcoefs, [1], lf0)
f0s[:,1] = np.exp(lf0)
winlenmax = getwinlen(np.min(f0s[:,1]), fs, winnbper)
if winlenmax>dftlen:
warnings.warn('\n\nWARNING: The maximum window length ({}) is bigger than the DFT length ({}). Please, increase the DFT length of your spectral features (the second dimension) or check if the f0 curve has extremly low values and try to clip them to higher values (at least higher than 50Hz). The f0 curve has been clipped to {}Hz.\n\n'.format(winlenmax, dftlen, winnbper*fs/float(dftlen))) # pragma: no cover
f0s[:,1] = np.clip(f0s[:,1], winnbper*fs/float(dftlen-2), 1e6)
if not NM is None:
# Remove noise below f0, as it is supposed to be already the case
for n in range(NM.shape[0]):
NM[n,:int((float(dftlen)/fs)*2*f0s[n,1])] = 0.0
if not nm_cont:
print(' Forcing binary noise mask')
NM[NM<=0.5] = 0.0 # To be sure that voiced segments are not hoarse
NM[NM>0.5] = 1.0 # To be sure the noise segments are fully noisy
# Generate the pulse positions [1](2) (i.e. the synthesis instants, the GCIs in voiced segments)
ts = [0.0]
while ts[-1]<float(wavlen)/fs:
cf0 = np.interp(ts[-1], f0s[:,0], f0s[:,1])
if cf0<50.0: cf0 = 50
ts.append(ts[-1]+(1.0/cf0))
ts = np.array(ts)
f0s = np.vstack((ts, np.interp(ts, f0s[:,0], f0s[:,1]))).T
# Resample the features to the pulse positions
# Spectral envelope uses the nearest, to avoid over-smoothing
SPECR = np.zeros((f0s.shape[0], dftlen/2+1))
for n, t in enumerate(f0s[:,0]): # Nearest: Way better for plosives
idx = int(np.round(t/shift))
idx = np.clip(idx, 0, SPEC.shape[0]-1)
SPECR[n,:] = SPEC[idx,:]
# Keep trace of the median energy [dB] over the whole signal
ener = np.mean(SPECR, axis=1)
idxacs = np.where(sp.mag2db(ener) > sp.mag2db(np.max(ener))-30)[0] # Get approx active frames # TODO Param
enermed = sp.mag2db(np.median(ener[idxacs])) # Median energy [dB]
ener = sp.mag2db(ener)
# Resample the noise feature to the pulse positions
# Smooth the frequency response of the mask in order to avoid Gibbs
# (poor Gibbs nobody want to see him)
nm_lowpasswin = np.hanning(nm_lowpasswinlen)
nm_lowpasswin /= np.sum(nm_lowpasswin)
NMR = np.zeros((f0s.shape[0], dftlen/2+1))
for n, t in enumerate(f0s[:,0]):
idx = int(np.round(t/shift)) # Nearest is better for plosives
idx = np.clip(idx, 0, NM.shape[0]-1)
NMR[n,:] = NM[idx,:]
if nm_lowpasswinlen>1:
NMR[n,:] = scipy.signal.filtfilt(nm_lowpasswin, [1.0], NMR[n,:])
NMR = np.clip(NMR, 0.0, 1.0)
# The complete waveform that we will fill with the pulses
wav = np.zeros(wavlen)
# Half window on the left of the synthesized segment to avoid pre-echo
dampinhwin = np.hanning(1+2*int(np.round(antipreechohwindur*fs))) # 1ms forced dampingwindow
dampinhwin = dampinhwin[:(len(dampinhwin)-1)/2+1]
for n, t in enumerate(f0s[:,0]):
f0 = f0s[n,1]
if verbose>1: print "\rPM Synthesis (python) t={:4.3f}s f0={:3.3f}Hz ".format(t,f0),
# Window's length
# TODO It should be ensured that the beggining and end of the
# noise is within the window. Nothing is doing this currently!
winlen = getwinlen(f0, fs, winnbper)
# TODO We also assume that the VTF's decay is shorter
# than winnbper-1 periods (dangerous with high pitched and tense voice).
if winlen>dftlen: raise ValueError('The window length ({}) is bigger than the DFT length ({}). Please, increase the dftlen of your spectral features or check if the f0 curve has extremly low values and try to clip them to higher values (at least higher than 50[Hz])'.format(winlen, dftlen)) # pragma: no cover
# Set the rough position of the pulse in the window (the closest sample)
# We keep a third of the window (1 period) on the left because the
# pulse signal is minimum phase. And 2/3rd (remaining 2 periods)
# on the right to let the VTF decay.
pulseposinwin = int((1.0/winnbper)*winlen)
# The sample indices of the current pulse wrt. the final waveform
winidx = int(round(fs*t)) + np.arange(winlen)-pulseposinwin
# Build the pulse spectrum
# Let start with a Dirac
S = np.ones(dftlen/2+1, dtype=np.complex64)
# Add the delay to place the Dirac at the "GCI": exp(-j*2*pi*t_i)
delay = -pulseposinwin - fs*(t-int(round(fs*t))/float(fs))
S *= np.exp((delay*2j*np.pi/dftlen)*np.arange(dftlen/2+1))
# Add the spectral envelope
# Both amplitude and phase
E = SPECR[n,:] # Take the amplitude from the given one
if hp_f0coef!=None:
# High-pass it to avoid any residual DC component.
fcut = hp_f0coef*f0
if not pp_atten1stharminsilences is None and ener[n]-enermed<pp_atten1stharminsilences:
fcut = 1.5*f0 # Try to cut between first and second harm
HP = sp.butter2hspec(fcut, 4, fs, dftlen, high=True)
E *= HP
# Not necessarily good as it is non-causal, so make it causal...
# ... together with the VTF response below.
# Build the phase of the envelope from the amplitude
E = sp.hspec2minphasehspec(E, replacezero=True) # We spend 2 FFT here!
S *= E # Add it to the current pulse
# Add energy correction wrt f0.
# STRAIGHT and AHOCODER vocoders do it.
# (why ? to equalize the energy when changing the pulse's duration ?)
if ener_multT0:
S *= np.sqrt(fs/f0)
# Generate the segment of Gaussian noise
# Use mid-points before/after pulse position
if n>0: leftbnd=int(np.round(fs*0.5*(f0s[n-1,0]+t)))
else: leftbnd=int(np.round(fs*(t-0.5/f0s[n,1]))) # int(0)
if n<f0s.shape[0]-1: rightbnd=int(np.round(fs*0.5*(t+f0s[n+1,0])))-1
else: rightbnd=int(np.round(fs*(t+0.5/f0s[n,1]))) #rightbnd=int(wavlen-1)
gausswinlen = rightbnd-leftbnd # The length of the noise segment
gaussnoise4win = np.random.normal(size=(gausswinlen)) # The noise
GN = np.fft.rfft(gaussnoise4win, dftlen) # Move the noise to freq domain
# Normalize it by its energy (@Yannis, That's your answer at SSW9!)
GN /= np.sqrt(np.mean(np.abs(GN)**2))
# Place the noise within the pulse's window
delay = (pulseposinwin-(leftbnd-winidx[0]))
GN *= np.exp((delay*2j*np.pi/dftlen)*np.arange(dftlen/2+1))
# Add it to the pulse spectrum, under the condition of the mask
S *= GN**NMR[n,:]
# That's it! the pulse spectrum is ready!
# Move it to time domain
deter = np.fft.irfft(S)[0:winlen]
# Add half window on the left of the synthesized segment
# to avoid any possible pre-echo
deter[:leftbnd-winidx[0]-len(dampinhwin)] = 0.0
deter[leftbnd-winidx[0]-len(dampinhwin):leftbnd-winidx[0]] *= dampinhwin
# Add half window on the right
# to avoid cutting the VTF response abruptly
deter[-len(dampinhwin):] *= dampinhwin[::-1]
# Write the synthesized segment in the final waveform
if winidx[0]<0 or winidx[-1]>=wavlen:
# The window is partly outside of the waveform ...
# ... thus copy only the existing part
itouse = np.logical_and(winidx>=0,winidx<wavlen)
wav[winidx[itouse]] += deter[itouse]
else:
wav[winidx] += deter
if verbose>1: print '\r \r',
if verbose>2: # pragma: no cover
import matplotlib.pyplot as plt
plt.ion()
_, axs = plt.subplots(3, 1, sharex=True, sharey=False)
times = np.arange(len(wav))/float(fs)
axs[0].plot(times, wav, 'k')
axs[0].set_ylabel('Waveform\nAmplitude')
axs[0].grid()
axs[1].plot(f0s[:,0], f0s[:,1], 'k')
axs[1].set_ylabel('F0\nFrequency [Hz]')
axs[1].grid()
axs[2].imshow(sp.mag2db(SPEC).T, origin='lower', aspect='auto', interpolation='none', extent=(f0s[0,0], f0s[-1,0], 0, 0.5*fs))
axs[2].set_ylabel('Amp. Envelope\nFrequency [Hz]')
from IPython.core.debugger import Pdb; Pdb().set_trace()
return wav
def synthesizef(fs, shift=0.005, dftlen=4096, ff0=None, flf0=None, fspec=None, flspec=None, ffwlspec=None, ffwcep=None, fmcep=None, fpdd=None, fmpdd=None, fnm=None, ffwnm=None, nm_cont=False, fsyn=None, verbose=1):
'''
Call the synthesis from python using file inputs and outputs
'''
if ff0:
f0 = np.fromfile(ff0, dtype=np.float32)
if flf0:
f0 = np.fromfile(flf0, dtype=np.float32)
f0[f0>0] = np.exp(f0[f0>0])
ts = (shift)*np.arange(len(f0))
f0s = np.vstack((ts, f0)).T
if fspec:
SPEC = np.fromfile(fspec, dtype=np.float32)
SPEC = SPEC.reshape((len(f0), -1))
if flspec:
SPEC = np.fromfile(flspec, dtype=np.float32)
SPEC = np.exp(SPEC.reshape((len(f0), -1)))
if ffwlspec:
FWLSPEC = np.fromfile(ffwlspec, dtype=np.float32)
FWLSPEC = FWLSPEC.reshape((len(f0), -1))
SPEC = np.exp(sp.fwbnd2linbnd(FWLSPEC, fs, dftlen, smooth=True))
if ffwcep:
FWCEP = np.fromfile(ffwcep, dtype=np.float32)
FWCEP = FWCEP.reshape((len(f0), -1))
SPEC = np.exp(sp.fwcep2loghspec(FWCEP, fs, dftlen))
if fmcep: # pragma: no cover
# Cannot test this because it needs SPTK
MCEP = np.fromfile(fmcep, dtype=np.float32)
MCEP = MCEP.reshape((len(f0), -1))
SPEC = sp.mcep2spec(MCEP, sp.bark_alpha(fs), dftlen)
NM = None
pdd_thresh = 0.75 # For this value, see:
# G. Degottex and D. Erro, "A uniform phase representation for the harmonic model in speech synthesis applications," EURASIP, Journal on Audio, Speech, and Music Processing - Special Issue: Models of Speech - In Search of Better Representations, vol. 2014, iss. 1, p. 38, 2014.
if fpdd:
PDD = np.fromfile(fpdd, dtype=np.float32)
PDD = PDD.reshape((len(f0), -1))
NM = PDD.copy()
NM[PDD<pdd_thresh] = 0.0
NM[PDD>pdd_thresh] = 1.0
if fmpdd: # pragma: no cover
# Cannot test this because it needs SPTK
MPDD = np.fromfile(fmpdd, dtype=np.float32)
MPDD = MPDD.reshape((len(f0), -1))
PDD = sp.mcep2spec(MPDD, sp.bark_alpha(fs), dftlen)
NM = PDD.copy()
NM[PDD<pdd_thresh] = 0.0
NM[PDD>pdd_thresh] = 1.0
if fnm:
NM = np.fromfile(fnm, dtype=np.float32)
NM = NM.reshape((len(f0), -1))
if ffwnm:
FWNM = np.fromfile(ffwnm, dtype=np.float32)
FWNM = FWNM.reshape((len(f0), -1))
NM = sp.fwbnd2linbnd(FWNM, fs, dftlen)
syn = synthesize(fs, f0s, SPEC, NM=NM, nm_cont=nm_cont, verbose=verbose)
if fsyn:
sp.wavwrite(fsyn, syn, fs, norm_max_ifneeded=True, verbose=verbose)
return syn
def main(argv):
'''
Call the synthesis from the command line
'''
argpar = argparse.ArgumentParser()
argpar.add_argument("synth", help="Output synthesis file")
argpar.add_argument("--f0", default=None, help="Input f0[Hz] file")
argpar.add_argument("--logf0", default=None, help="Input f0[log Hz] file")
argpar.add_argument("--spec", default=None, help="Input amplitude spectrogram [linear values]")
argpar.add_argument("--lspec", default=None, help="Input amplitude spectrogram [log spectral values on linear frequency scale]")
argpar.add_argument("--fwlspec", default=None, help="Input amplitude spectrogram [frequency warped log spectral values]")
argpar.add_argument("--fwcep", default=None, help="Input amplitude spectrogram [frequency warped cepstrum values]")
argpar.add_argument("--mcep", default=None, help="Input amplitude spectrogram [mel-cepstrum values]")
argpar.add_argument("--pdd", default=None, help="Input Phase Distortion Deviation file [linear values]")
argpar.add_argument("--mpdd", default=None, help="Input Phase Distortion Deviation file [mel-cepstrum values]")
argpar.add_argument("--nm", default=None, help="Output Noise Mask [linear values in [0,1] ]")
argpar.add_argument("--fwnm", default=None, help="Output Noise Mask [compressed in bands with values still in [0,1] ]")
argpar.add_argument("--nm_cont", action='store_true', help="Allow continuous values for the noisemask (def. False)")
argpar.add_argument("--fs", default=16000, type=int, help="Sampling frequency[Hz]")
argpar.add_argument("--shift", default=0.005, type=float, help="Time step[s] between the frames")
#argpar.add_argument("--dftlen", dftlen=4096, type=float, help="Size of the DFT for extracting the features")
argpar.add_argument("--verbose", default=1, help="Output some information")
args = argpar.parse_args(argv)
args.dftlen = 4096
synthesizef(args.fs, shift=args.shift, dftlen=args.dftlen, ff0=args.f0, flf0=args.logf0, fspec=args.spec, flspec=args.lspec, ffwlspec=args.fwlspec, ffwcep=args.fwcep, fmcep=args.mcep, fnm=args.nm, ffwnm=args.fwnm, nm_cont=args.nm_cont, fpdd=args.pdd, fmpdd=args.mpdd, fsyn=args.synth, verbose=args.verbose)
if __name__ == "__main__" : # pragma: no cover
main(sys.argv[1:])
|
gillesdegottex/pulsemodel
|
synthesis.py
|
Python
|
apache-2.0
| 18,626
|
[
"DIRAC",
"Gaussian"
] |
90c9ee23b7642d22671d03d16efdbdffcf91e982ca8d5f14f3bd878910a945a9
|
from __future__ import print_function
import sys
import random
import os
from builtins import range
import time
import json
sys.path.insert(1, "../../../")
import h2o
from tests import pyunit_utils
from h2o.estimators.naive_bayes import H2ONaiveBayesEstimator
from h2o.grid.grid_search import H2OGridSearch
class Test_naivebayes_grid_search:
"""
PUBDEV-1843: Grid testing. Subtask 2.
This class is created to test the gridsearch for naivebayes algo and make sure it runs. Only one test is
performed here.
Test Descriptions:
a. grab all truely griddable parameters and randomly or manually set the parameter values.
b. Next, build H2O naivebayes models using grid search. Count and make sure models
are only built for hyper-parameters set to legal values. No model is built for bad hyper-parameters
values. We should instead get a warning/error message printed out.
c. For each model built using grid search, we will extract the parameters used in building
that model and manually build a H2O naivebayes model. Training metrics are calculated from the
gridsearch model and the manually built model. If their metrics
differ by too much, print a warning message but don't fail the test.
d. we will check and make sure the models are built within the max_runtime_secs time limit that was set
for it as well. If max_runtime_secs was exceeded, declare test failure.
Note that for hyper-parameters containing all legal parameter names and parameter value lists with legal
and illegal values, grid-models should be built for all combinations of legal parameter values. For
illegal parameter values, a warning/error message should be printed out to warn the user but the
program should not throw an exception;
We will re-use the dataset generation methods for GLM. There will be only one data set for classification.
"""
# parameters set by users, change with care
max_grid_model = 100 # maximum number of grid models generated before adding max_runtime_secs
curr_time = str(round(time.time())) # store current timestamp, used as part of filenames.
seed = int(round(time.time()))
# parameters denoting filenames of interested that store training/validation/test data sets in csv format
training1_filename = "smalldata/gridsearch/multinomial_training1_set.csv"
json_filename = "gridsearch_naivebayes_hyper_parameter_" + curr_time + ".json"
allowed_diff = 1e-2 # difference allow between grid search model and manually built model MSEs
# System parameters, do not change. Dire consequences may follow if you do
current_dir = os.path.dirname(os.path.realpath(sys.argv[0])) # directory of this test file
train_row_count = 0 # training data row count, randomly generated later
train_col_count = 0 # training data column count, randomly generated later
# following parameters are used to generate hyper-parameters
max_int_val = 10 # maximum size of random integer values
min_int_val = -2 # minimum size of random integer values
max_int_number = 5 # maximum number of integer random grid values to generate
max_real_val = 1 # maximum size of random float values
min_real_val = -0.1 # minimum size of random float values
max_real_number = 5 # maximum number of real grid values to generate
time_scale = 2 # maximum runtime scale
extra_time_fraction = 0.5 # since timing is never perfect, give some extra time on top of maximum runtime limit
min_runtime_per_tree = 0 # minimum run time found. Determined later
model_run_time = 0.0 # time taken to run a vanilla naivebayes model. Determined later.
allowed_runtime_diff = 0.1 # run time difference fraction when models are run by gridsearch and manually
laplace_scale = max_int_val # scale the laplace smoothing K to bigger range of numbers
family = 'multinomial' # choose default family to be gaussian
training_metric = 'logloss' # metrics by which we evaluate model performance
test_name = "pyunit_naivebayes_gridsearch_over_all_params_large.py" # name of this test
sandbox_dir = "" # sandbox directory where we are going to save our failed test data sets
# store information about training/test data sets
x_indices = [] # store predictor indices in the data set
y_index = 0 # store response index in the data set
training1_data = [] # store training data sets
test_failed = 0 # count total number of tests that have failed
# give the user opportunity to pre-assign hyper parameters for fixed values
hyper_params = dict()
hyper_params["fold_assignment"] = ["AUTO", "Random", "Modulo", "Stratified"]
hyper_params["compute_metrics"] = [False, True]
# parameters to be excluded from hyper parameter list even though they may be gridable
exclude_parameter_lists = ['validation_frame', 'response_column', 'fold_column', 'offset_column',
'min_sdev', 'eps_sdev', 'seed', 'class_sampling_factors', 'balance_classes',
'max_after_balance_size']
params_zero_one = ['min_prob', 'eps_prob']
params_more_than_zero = []
params_more_than_one = []
params_zero_positive = ['max_runtime_secs', 'laplace'] # >= 0
final_hyper_params = dict() # store the final hyper-parameters that we are going to use
gridable_parameters = [] # store griddable parameter names
gridable_types = [] # store the corresponding griddable parameter types
gridable_defaults = [] # store the gridabble parameter default values
possible_number_models = 0 # possible number of models built based on hyper-parameter specification
correct_model_number = 0 # count number of models built with bad hyper-parameter specification
true_correct_model_number = 0 # count number of models built with good hyper-parameter specification
nfolds = 5 # enable cross validation to test fold_assignment
def __init__(self):
self.setup_data()
self.setup_model()
def setup_data(self):
"""
This function performs all initializations necessary:
1. generates all the random parameter values for our dynamic tests like the Gaussian
noise std, column count and row count for training/test data sets.
2. with the chosen distribution family, generate the appropriate data sets
4. load the data sets and set the training set indices and response column index
"""
# create and clean out the sandbox directory first
self.sandbox_dir = pyunit_utils.make_Rsandbox_dir(self.current_dir, self.test_name, True)
# preload data sets
self.training1_data = h2o.import_file(path=pyunit_utils.locate(self.training1_filename))
# set data set indices for predictors and response
self.y_index = self.training1_data.ncol-1
self.x_indices = list(range(self.y_index))
# set response to be categorical for classification tasks
self.training1_data[self.y_index] = self.training1_data[self.y_index].round().asfactor()
# save the training data files just in case the code crashed.
pyunit_utils.remove_csv_files(self.current_dir, ".csv", action='copy', new_dir_path=self.sandbox_dir)
def setup_model(self):
"""
This function setup the gridsearch hyper-parameters that will be used later on:
1. It will first try to grab all the parameters that are griddable and parameters used by naivebayes.
2. It will find the intersection of parameters that are both griddable and used by naivebayes.
3. There are several extra parameters that are used by naivebayes that are denoted as griddable but actually
are not. These parameters have to be discovered manually and they are captured in
self.exclude_parameter_lists.
4. We generate the gridsearch hyper-parameter. For numerical parameters, we will generate those randomly.
For enums, we will include all of them.
:return: None
"""
# build bare bone model to get all parameters
model = H2ONaiveBayesEstimator(nfolds=self.nfolds, compute_metrics=True)
model.train(x=self.x_indices, y=self.y_index, training_frame=self.training1_data)
self.model_run_time = pyunit_utils.find_grid_runtime([model]) # find model train time
print("Time taken to build a base barebone model is {0}".format(self.model_run_time))
# grab all gridable parameters and its type
(self.gridable_parameters, self.gridable_types, self.gridable_defaults) = \
pyunit_utils.get_gridables(model._model_json["parameters"])
# randomly generate griddable parameters including values outside legal range, like setting alpha values to
# be outside legal range of 0 and 1 and etc
(self.hyper_params, self.gridable_parameters, self.gridable_types, self.gridable_defaults) = \
pyunit_utils.gen_grid_search(model.full_parameters.keys(), self.hyper_params,
self.exclude_parameter_lists,
self.gridable_parameters, self.gridable_types, self.gridable_defaults,
random.randint(1, self.max_int_number),
self.max_int_val, self.min_int_val,
random.randint(1, self.max_real_number),
self.max_real_val, self.min_real_val)
# scale the max_runtime_secs parameter and others as well to make sure they make sense
time_scale = self.time_scale * self.model_run_time
if "max_runtime_secs" in list(self.hyper_params):
self.hyper_params["max_runtime_secs"] = [time_scale * x for x
in self.hyper_params["max_runtime_secs"]]
# generate a new final_hyper_params which only takes a subset of all griddable parameters while
# hyper_params take all griddable parameters and generate the grid search hyper-parameters
[self.possible_number_models, self.final_hyper_params] = \
pyunit_utils.check_and_count_models(self.hyper_params, self.params_zero_one, self.params_more_than_zero,
self.params_more_than_one, self.params_zero_positive,
self.max_grid_model)
final_hyper_params_keys = list(self.final_hyper_params)
# must add max_runtime_secs to restrict unit test run time and as a promise to Arno to test for this
if ("max_runtime_secs" not in final_hyper_params_keys) and \
("max_runtime_secs" in list(self.hyper_params)):
self.final_hyper_params["max_runtime_secs"] = self.hyper_params["max_runtime_secs"]
len_good_time = len([x for x in self.hyper_params["max_runtime_secs"] if (x >= 0)])
self.possible_number_models = self.possible_number_models*len_good_time
# need to check that min_prob >= 1e-10
if "min_prob" in final_hyper_params_keys:
old_len_prob = len([x for x in self.final_hyper_params["max_runtime_secs"] if (x >= 0)])
good_len_prob = len([x for x in self.final_hyper_params["max_runtime_secs"] if (x >= 1e-10)])
if (old_len_prob > 0):
self.possible_number_models = self.possible_number_models*good_len_prob/old_len_prob
else:
self.possible_number_models = 0
if "laplace" in final_hyper_params_keys:
self.final_hyper_params["laplace"] = [self.laplace_scale * x for x
in self.hyper_params["laplace"]]
# write out the hyper-parameters used into json files.
pyunit_utils.write_hyper_parameters_json(self.current_dir, self.sandbox_dir, self.json_filename,
self.final_hyper_params)
def tear_down(self):
"""
This function performs teardown after the dynamic test is completed. If all tests
passed, it will delete all data sets generated since they can be quite large. It
will move the training/validation/test data sets into a Rsandbox directory so that
we can re-run the failed test.
"""
if self.test_failed: # some tests have failed. Need to save data sets for later re-runs
# create Rsandbox directory to keep data sets and weight information
self.sandbox_dir = pyunit_utils.make_Rsandbox_dir(self.current_dir, self.test_name, True)
# Do not want to save all data sets. Only save data sets that are needed for failed tests
pyunit_utils.move_files(self.sandbox_dir, self.training1_data_file, self.training1_filename)
# write out the jenkins job info into log files.
json_file = os.path.join(self.sandbox_dir, self.json_filename)
with open(json_file,'wb') as test_file:
json.dump(self.hyper_params, test_file)
else: # all tests have passed. Delete sandbox if if was not wiped before
pyunit_utils.make_Rsandbox_dir(self.current_dir, self.test_name, False)
# remove any csv files left in test directory
pyunit_utils.remove_csv_files(self.current_dir, ".csv")
pyunit_utils.remove_csv_files(self.current_dir, ".json")
def test_naivebayes_grid_search_over_params(self):
"""
test_naivebayes_grid_search_over_params the following:
a. grab all truely griddable parameters and randomly or manually set the parameter values.
b. Next, build H2O naivebayes models using grid search. Count and make sure models
are only built for hyper-parameters set to legal values. No model is built for bad hyper-parameters
values. We should instead get a warning/error message printed out.
c. For each model built using grid search, we will extract the parameters used in building
that model and manually build a H2O naivebayes model. Training metrics are calculated from the
gridsearch model and the manually built model. If their metrics
differ by too much, print a warning message but don't fail the test.
d. we will check and make sure the models are built within the max_runtime_secs time limit that was set
for it as well. If max_runtime_secs was exceeded, declare test failure.
"""
print("*******************************************************************************************")
print("test_naivebayes_grid_search_over_params for naivebayes ")
h2o.cluster_info()
try:
print("Hyper-parameters used here is {0}".format(self.final_hyper_params))
# start grid search
grid_model = H2OGridSearch(H2ONaiveBayesEstimator(nfolds=self.nfolds),
hyper_params=self.final_hyper_params)
grid_model.train(x=self.x_indices, y=self.y_index, training_frame=self.training1_data)
self.correct_model_number = len(grid_model) # store number of models built
# make sure the correct number of models are built by gridsearch
if not (self.correct_model_number == self.possible_number_models): # wrong grid model number
self.test_failed += 1
print("test_naivebayes_grid_search_over_params for naivebayes failed: number of models built by "
"gridsearch {0} does not equal to all possible combinations of hyper-parameters "
"{1}".format(self.correct_model_number, self.possible_number_models))
else:
# add parameters into params_dict. Use this to manually build model
params_dict = dict()
params_dict["nfolds"] = self.nfolds
total_run_time_limits = 0.0 # calculate upper bound of max_runtime_secs
true_run_time_limits = 0.0
manual_run_runtime = 0.0
gridsearch_runtime = 0.0
# compare performance metric of model built by gridsearch with manually built model
for each_model in grid_model:
params_list = grid_model.get_hyperparams_dict(each_model._id)
params_list.update(params_dict)
model_params = dict()
# need to taken out max_runtime_secs from model parameters, it is now set in .train()
if "max_runtime_secs" in params_list:
model_params["max_runtime_secs"] = params_list["max_runtime_secs"]
max_runtime = params_list["max_runtime_secs"]
del params_list["max_runtime_secs"]
else:
max_runtime = 0
if "validation_frame" in params_list:
model_params["validation_frame"] = params_list["validation_frame"]
del params_list["validation_frame"]
if "eps_prob" in params_list:
model_params["eps_prob"] = params_list["eps_prob"]
del params_list["eps_prob"]
if "min_prob" in params_list:
model_params["min_prob"] = params_list["min_prob"]
del params_list["min_prob"]
# make sure manual model was provided the same max_runtime_secs as the grid model
each_model_runtime = pyunit_utils.find_grid_runtime([each_model])
gridsearch_runtime += each_model_runtime
manual_model = H2ONaiveBayesEstimator(**params_list)
manual_model.train(x=self.x_indices, y=self.y_index, training_frame=self.training1_data,
**model_params)
# collect the time taken to manually built all models
model_runtime = pyunit_utils.find_grid_runtime([manual_model]) # time taken to build this model
manual_run_runtime += model_runtime
if max_runtime > 0:
# shortest possible time it takes to build this model
if (max_runtime < self.model_run_time):
total_run_time_limits += model_runtime
else:
total_run_time_limits += max_runtime
true_run_time_limits += max_runtime
# compute and compare test metrics between the two models
grid_model_metrics = \
each_model.model_performance(test_data=self.training1_data)._metric_json[self.training_metric]
manual_model_metrics = \
manual_model.model_performance(test_data=self.training1_data)._metric_json[self.training_metric]
# just compare the mse in this case within tolerance:
if not((type(grid_model_metrics) == str) or (type(manual_model_metrics) == str)):
if (abs(grid_model_metrics) > 0) \
and (abs(grid_model_metrics - manual_model_metrics)/grid_model_metrics > self.allowed_diff):
print("test_naivebayes_grid_search_over_params for naivebayes WARNING\ngrid search model "
"{0}: {1}, time taken to build (secs): {2}\n and manually built H2O model {3}: {4}, "
"time taken to build (secs): {5}\ndiffer too much!"
"".format(self.training_metric, grid_model_metrics, each_model_runtime,
self.training_metric, manual_model_metrics, model_runtime))
print("Time taken for gridsearch to build all models (sec): {0}\n Time taken to manually build all "
"models (sec): {1}, total run time limits (sec): "
"{2}".format(gridsearch_runtime, manual_run_runtime, total_run_time_limits))
total_run_time_limits = max(total_run_time_limits, true_run_time_limits) * (1+self.extra_time_fraction)
# make sure the max_runtime_secs is working to restrict model built time
if not(manual_run_runtime <= total_run_time_limits):
self.test_failed += 1
print("test_naivebayes_grid_search_over_params for naivebayes failed: time taken to manually build "
"models is {0}. Maximum allowed time "
"is {1}".format(manual_run_runtime, total_run_time_limits))
if self.test_failed == 0:
print("test_naivebayes_grid_search_over_params for naivebayes has passed!")
except Exception as e:
if self.possible_number_models > 0:
print("test_naivebayes_grid_search_over_params for naivebayes failed: exception ({0}) was thrown for "
"no reason.".format(e))
self.test_failed += 1
def test_grid_search_for_naivebayes_over_all_params():
"""
Create and instantiate class and perform tests specified for naive bayes
:return: None
"""
test_naivebayes_grid = Test_naivebayes_grid_search()
test_naivebayes_grid.test_naivebayes_grid_search_over_params()
sys.stdout.flush()
if test_naivebayes_grid.test_failed: # exit with error if any tests have failed
sys.exit(1)
if __name__ == "__main__":
pyunit_utils.standalone_test(test_grid_search_for_naivebayes_over_all_params)
else:
test_grid_search_for_naivebayes_over_all_params()
|
h2oai/h2o-3
|
h2o-py/dynamic_tests/testdir_algos/naivebayes/pyunit_naivebayes_gridsearch_over_all_params_large.py
|
Python
|
apache-2.0
| 22,200
|
[
"Gaussian"
] |
2c619655f82813059da389c095ea686022899a53d5742b51e930bc83222b7604
|
"""
Tests for serial.py.
"""
import cPickle
from cStringIO import StringIO
import gzip
import shutil
import tempfile
import unittest
from rdkit import Chem
from rdkit.Chem import AllChem
from rdkit_utils import conformers, serial
class TestMolIO(unittest.TestCase):
"""
Base test class for molecule I/O.
"""
def setUp(self):
"""
Write SDF and SMILES molecules to temporary files.
"""
self.temp_dir = tempfile.mkdtemp()
# aspirin
self.aspirin = self._get_mol_from_smiles('CC(=O)OC1=CC=CC=C1C(=O)O',
'aspirin')
self.aspirin_h = Chem.AddHs(self.aspirin)
self.aspirin_sodium = self._get_mol_from_smiles(
'CC(=O)OC1=CC=CC=C1C(=O)[O-].[Na+]', 'aspirin sodium')
# levalbuterol (chiral)
self.levalbuterol = self._get_mol_from_smiles(
'CC(C)(C)NC[C@@H](C1=CC(=C(C=C1)O)CO)O', 'levalbuterol')
self.levalbuterol_hcl = self._get_mol_from_smiles(
'CC(C)(C)NC[C@@H](C1=CC(=C(C=C1)O)CO)O.Cl',
'levalbuterol hydrochloride')
self.ref_mols = [self.aspirin, self.levalbuterol]
self.reader = serial.MolReader(compute_2d_coords=False)
def _get_mol_from_smiles(self, smiles, name=None):
"""
Construct a molecule from a SMILES string.
Molecules loaded from SMILES strings have zero conformers, but
molecules loaded from SDF blocks are treated as 3D and have one
conformer even if coordinates are not set. This method dumps the
molecule to SDF and loads it again to obtain a molecule with one
conformer.
Parameters
----------
smiles : str
SMILES string.
name : str, optional
Molecule name.
"""
mol = Chem.MolFromSmiles(smiles)
if name is not None:
mol.SetProp('_Name', name)
AllChem.Compute2DCoords(mol) # required to preserve stereo
sdf = Chem.MolToMolBlock(mol, includeStereo=True)
mol_with_conf = Chem.MolFromMolBlock(sdf)
return mol_with_conf
def tearDown(self):
"""
Clean up temporary files.
"""
shutil.rmtree(self.temp_dir)
def test_guess_mol_format(self):
"""
Test MolIO.guess_mol_format.
"""
mol_formats = {
'pkl': ['test.pkl', 'test.pkl.gz', 'test.test.pkl',
'test.test.pkl.gz'],
'sdf': ['test.sdf', 'test.sdf.gz', 'test.test.sdf',
'test.test.sdf.gz'],
'smi': ['test.smi', 'test.smi.gz', 'test.can', 'test.can.gz',
'test.ism', 'test.ism.gz', 'test.test.smi',
'test.test.smi.gz']
}
for mol_format in mol_formats.keys():
for filename in mol_formats[mol_format]:
assert self.reader.guess_mol_format(filename) == mol_format
def test_close_context(self):
"""
Make sure MolIO closes files it opened.
"""
_, filename = tempfile.mkstemp(suffix='.sdf', dir=self.temp_dir)
self.reader.open(filename)
self.reader.close()
assert self.reader.f.closed
# also test the context manager
with self.reader.open(filename):
pass
assert self.reader.f.closed
def test_not_close_other(self):
"""
Make sure MolIO doesn't close files it didn't open.
"""
_, filename = tempfile.mkstemp(suffix='.sdf', dir=self.temp_dir)
with open(filename) as f:
reader = serial.MolReader(f, mol_format='sdf')
reader.close()
assert not f.closed
# also test the context manager
with open(filename) as g:
with serial.MolReader(g, mol_format='sdf'):
pass
assert not g.closed
class TestMolReader(TestMolIO):
"""
Test MolReader.
"""
def test_read_sdf(self):
"""
Read an SDF file.
"""
_, filename = tempfile.mkstemp(suffix='.sdf', dir=self.temp_dir)
with open(filename, 'wb') as f:
f.write(Chem.MolToMolBlock(self.aspirin))
self.reader.open(filename)
mols = self.reader.get_mols()
assert mols.next().ToBinary() == self.aspirin.ToBinary()
def test_read_sdf_gz(self):
"""
Read a compressed SDF file.
"""
_, filename = tempfile.mkstemp(suffix='.sdf.gz', dir=self.temp_dir)
with gzip.open(filename, 'wb') as f:
f.write(Chem.MolToMolBlock(self.aspirin))
self.reader.open(filename)
mols = self.reader.get_mols()
assert mols.next().ToBinary() == self.aspirin.ToBinary()
def test_read_smi(self):
"""
Read a SMILES file.
"""
self.aspirin.RemoveAllConformers() # SMILES are read without confs
_, filename = tempfile.mkstemp(suffix='.smi', dir=self.temp_dir)
with open(filename, 'wb') as f:
f.write(Chem.MolToSmiles(self.aspirin))
self.reader.open(filename)
mols = self.reader.get_mols()
assert mols.next().ToBinary() == self.aspirin.ToBinary()
def test_read_smi_title(self):
"""
Read a SMILES file with molecule titles.
"""
self.aspirin.RemoveAllConformers() # SMILES are read without confs
_, filename = tempfile.mkstemp(suffix='.smi', dir=self.temp_dir)
with open(filename, 'wb') as f:
f.write('{}\t{}'.format(Chem.MolToSmiles(self.aspirin), 'aspirin'))
self.reader.open(filename)
mols = self.reader.get_mols()
mol = mols.next()
assert mol.ToBinary() == self.aspirin.ToBinary()
assert mol.GetProp('_Name') == self.aspirin.GetProp('_Name')
def test_read_smi_gz(self):
"""
Read a compressed SMILES file.
"""
self.aspirin.RemoveAllConformers() # SMILES are read without confs
_, filename = tempfile.mkstemp(suffix='.smi.gz', dir=self.temp_dir)
with gzip.open(filename, 'wb') as f:
f.write(Chem.MolToSmiles(self.aspirin))
self.reader.open(filename)
mols = self.reader.get_mols()
assert mols.next().ToBinary() == self.aspirin.ToBinary()
def test_read_pickle(self):
"""
Read from a pickle.
"""
_, filename = tempfile.mkstemp(suffix='.pkl', dir=self.temp_dir)
with open(filename, 'wb') as f:
cPickle.dump([self.aspirin], f, cPickle.HIGHEST_PROTOCOL)
self.reader.open(filename)
mols = self.reader.get_mols()
assert mols.next().ToBinary() == self.aspirin.ToBinary()
def test_read_pickle_gz(self):
"""
Read from a compressed pickle.
"""
_, filename = tempfile.mkstemp(suffix='.pkl.gz', dir=self.temp_dir)
with gzip.open(filename, 'wb') as f:
cPickle.dump([self.aspirin], f, cPickle.HIGHEST_PROTOCOL)
self.reader.open(filename)
mols = self.reader.get_mols()
assert mols.next().ToBinary() == self.aspirin.ToBinary()
def test_read_file_like(self):
"""
Read from a file-like object.
"""
_, filename = tempfile.mkstemp(suffix='.sdf', dir=self.temp_dir)
with open(filename, 'wb') as f:
f.write(Chem.MolToMolBlock(self.aspirin))
with open(filename) as f:
reader = serial.MolReader(f, mol_format='sdf')
mols = reader.get_mols()
assert mols.next().ToBinary() == self.aspirin.ToBinary()
def test_read_compressed_file_like(self):
"""
Read from a file-like object using gzip.
"""
_, filename = tempfile.mkstemp(suffix='.sdf.gz', dir=self.temp_dir)
with gzip.open(filename, 'wb') as f:
f.write(Chem.MolToMolBlock(self.aspirin))
with gzip.open(filename) as f:
reader = serial.MolReader(f, mol_format='sdf')
mols = reader.get_mols()
assert mols.next().ToBinary() == self.aspirin.ToBinary()
def test_read_multiple_sdf(self):
"""
Read a multiple-molecule SDF file.
"""
_, filename = tempfile.mkstemp(suffix='.sdf', dir=self.temp_dir)
with open(filename, 'wb') as f:
for mol in self.ref_mols:
sdf = Chem.MolToMolBlock(mol)
f.write(sdf)
f.write('$$$$\n') # add molecule delimiter
self.reader.open(filename)
mols = self.reader.get_mols()
mols = list(mols)
assert len(mols) == 2
for i in xrange(len(mols)):
assert mols[i].ToBinary() == self.ref_mols[i].ToBinary()
def test_read_multiple_smiles(self):
"""
Read a multiple-molecule SMILES file.
"""
ref_mols = []
for mol in self.ref_mols:
mol = Chem.MolFromSmiles(Chem.MolToSmiles(mol))
ref_mols.append(mol)
_, filename = tempfile.mkstemp(suffix='.smi', dir=self.temp_dir)
with open(filename, 'wb') as f:
for mol in self.ref_mols:
smiles = Chem.MolToSmiles(mol)
name = mol.GetProp('_Name')
f.write('{}\t{}\n'.format(smiles, name))
self.reader.open(filename)
mols = self.reader.get_mols()
mols = list(mols)
assert len(mols) == 2
for i in xrange(len(mols)):
assert mols[i].ToBinary() == ref_mols[i].ToBinary()
def test_read_multiconformer(self):
"""
Read a multiconformer SDF file.
"""
# generate conformers
engine = conformers.ConformerGenerator(max_conformers=3,
pool_multiplier=1)
ref_mol = engine.generate_conformers(self.aspirin)
assert ref_mol.GetNumConformers() > 1
# write to disk
_, filename = tempfile.mkstemp(suffix='.sdf', dir=self.temp_dir)
with open(filename, 'wb') as f:
for conf in ref_mol.GetConformers():
f.write(Chem.MolToMolBlock(ref_mol, confId=conf.GetId()))
f.write('$$$$\n') # add molecule delimiter
# compare
self.reader.open(filename)
mols = self.reader.get_mols()
mols = list(mols)
assert len(mols) == 1
# FIXME get ToBinary test to work
# assert mols[0].ToBinary() == ref_mol.ToBinary()
assert Chem.MolToMolBlock(mols[0]) == Chem.MolToMolBlock(ref_mol)
def test_read_multiple_multiconformer(self):
"""
Read a multiconformer SDF file containing multiple molecules.
"""
# generate conformers
ref_mols = []
engine = conformers.ConformerGenerator(max_conformers=3,
pool_multiplier=1)
for mol in self.ref_mols:
expanded = engine.generate_conformers(mol)
assert expanded.GetNumConformers() > 1
ref_mols.append(expanded)
# write to disk
_, filename = tempfile.mkstemp(suffix='.sdf', dir=self.temp_dir)
with open(filename, 'wb') as f:
for mol in ref_mols:
for conf in mol.GetConformers():
f.write(Chem.MolToMolBlock(mol, includeStereo=1,
confId=conf.GetId()))
f.write('$$$$\n') # add molecule delimiter
# compare
self.reader.open(filename)
mols = self.reader.get_mols()
mols = list(mols)
assert len(mols) == 2
for mol, ref_mol in zip(mols, ref_mols):
# FIXME get ToBinary test to work
# assert mol.ToBinary() == ref_mol.ToBinary()
assert Chem.MolToMolBlock(
mol, includeStereo=1) == Chem.MolToMolBlock(ref_mol,
includeStereo=1)
def test_are_same_molecule(self):
"""
Test MolReader.are_same_molecule.
"""
assert self.reader.are_same_molecule(self.aspirin, self.aspirin)
assert not self.reader.are_same_molecule(self.aspirin,
self.levalbuterol)
def test_no_remove_hydrogens(self):
"""
Test hydrogen retention.
"""
_, filename = tempfile.mkstemp(suffix='.sdf', dir=self.temp_dir)
with open(filename, 'wb') as f:
f.write(Chem.MolToMolBlock(self.aspirin_h))
reader = serial.MolReader(remove_hydrogens=False, remove_salts=False)
reader.open(filename)
mols = reader.get_mols()
# FIXME get ToBinary test to work
# assert mols.next().ToBinary() == self.aspirin_h.ToBinary()
assert Chem.MolToMolBlock(mols.next()) == Chem.MolToMolBlock(
self.aspirin_h)
def test_remove_hydrogens(self):
"""
Test hydrogen removal.
"""
_, filename = tempfile.mkstemp(suffix='.sdf', dir=self.temp_dir)
with open(filename, 'wb') as f:
f.write(Chem.MolToMolBlock(self.aspirin_h))
reader = serial.MolReader(remove_hydrogens=True)
reader.open(filename)
mols = reader.get_mols()
assert mols.next().ToBinary() == self.aspirin.ToBinary()
def test_remove_salts(self):
"""
Test salt removal.
"""
_, filename = tempfile.mkstemp(suffix='.sdf', dir=self.temp_dir)
with open(filename, 'wb') as f:
for mol in [self.aspirin_sodium, self.levalbuterol_hcl]:
f.write(Chem.MolToMolBlock(mol))
f.write('$$$$\n') # molecule delimiter
ref_mols = [self.aspirin_sodium, self.levalbuterol_hcl]
self.reader = serial.MolReader(remove_salts=True)
self.reader.open(filename)
mols = self.reader.get_mols()
mols = list(mols)
assert len(mols) == 2
for mol, ref_mol in zip(mols, ref_mols):
assert mol.GetNumAtoms() < ref_mol.GetNumAtoms()
desalted = self.reader.clean_mol(ref_mol)
assert mol.ToBinary() == desalted.ToBinary()
def test_no_remove_salts(self):
"""
Test salt retention.
"""
_, filename = tempfile.mkstemp(suffix='.sdf', dir=self.temp_dir)
with open(filename, 'wb') as f:
for mol in [self.aspirin_sodium, self.levalbuterol_hcl]:
f.write(Chem.MolToMolBlock(mol))
f.write('$$$$\n') # molecule delimiter
ref_mols = [self.aspirin_sodium, self.levalbuterol_hcl]
self.reader = serial.MolReader(remove_salts=False)
self.reader.open(filename)
mols = self.reader.get_mols()
mols = list(mols)
assert len(mols) == 2
self.reader = serial.MolReader(remove_salts=True)
for mol, ref_mol in zip(mols, ref_mols):
assert mol.ToBinary() == ref_mol.ToBinary()
desalted = self.reader.clean_mol(ref_mol)
assert mol.GetNumAtoms() > desalted.GetNumAtoms()
def test_iterator(self):
"""
Test MolWriter.__iter__.
"""
_, filename = tempfile.mkstemp(suffix='.sdf', dir=self.temp_dir)
with open(filename, 'wb') as f:
for mol in self.ref_mols:
f.write(Chem.MolToMolBlock(mol))
f.write('$$$$\n') # molecule delimiter
self.reader.open(filename)
for i, mol in enumerate(self.reader):
assert mol.ToBinary() == self.ref_mols[i].ToBinary()
def test_context_manager(self):
"""
Test using 'with' statement to read molecules.
"""
_, filename = tempfile.mkstemp(suffix='.sdf', dir=self.temp_dir)
with open(filename, 'wb') as f:
for mol in self.ref_mols:
f.write(Chem.MolToMolBlock(mol))
f.write('$$$$\n') # molecule delimiter
with self.reader.open(filename) as reader:
for i, mol in enumerate(reader):
assert mol.ToBinary() == self.ref_mols[i].ToBinary()
def test_skip_failures(self):
"""
Test skip read failures.
"""
smiles = 'CO(C)C'
reader = serial.MolReader(StringIO(smiles), 'smi')
mols = list(reader.get_mols())
assert len(mols) == 0
def test_is_a_salt(self):
"""
Test that a molecule that _is_ a salt is not returned empty.
"""
smiles = 'C(=CC(=O)O)C(=O)O'
reader = serial.MolReader(StringIO(smiles), 'smi', remove_salts=True)
mols = list(reader.get_mols())
assert len(mols) == 1 and mols[0].GetNumAtoms()
def test_read_multiple_pickles(self):
"""
Test reading a file containing multiple pickles. This can occur if
MolWriter.write is called multiple times.
"""
_, filename = tempfile.mkstemp(suffix='.pkl', dir=self.temp_dir)
with serial.MolWriter().open(filename) as writer:
writer.write([self.aspirin])
writer.write([self.levalbuterol])
with self.reader.open(filename) as reader:
mols = list(reader)
assert len(mols) == 2
assert mols[0].ToBinary() == self.aspirin.ToBinary()
assert mols[1].ToBinary() == self.levalbuterol.ToBinary()
class TestMolWriter(TestMolIO):
"""
Test MolWriter.
"""
def setUp(self):
"""
Add writer to inherited setup.
"""
super(TestMolWriter, self).setUp()
self.writer = serial.MolWriter()
self.aspirin_sdf = Chem.MolToMolBlock(self.aspirin)
self.aspirin_smiles = Chem.MolToSmiles(self.aspirin) + '\taspirin'
def test_write_sdf(self):
"""
Write an SDF file.
"""
_, filename = tempfile.mkstemp(suffix='.sdf', dir=self.temp_dir)
self.writer.open(filename)
self.writer.write([self.aspirin])
self.writer.close()
self.reader.open(filename)
mols = self.reader.get_mols()
# compare molecules
assert mols.next().ToBinary() == self.aspirin.ToBinary()
# compare files
with open(filename) as f:
data = f.read()
assert data == self.aspirin_sdf + '$$$$\n'
def test_write_sdf_gz(self):
"""
Write a compressed SDF file.
"""
_, filename = tempfile.mkstemp(suffix='.sdf.gz', dir=self.temp_dir)
self.writer.open(filename)
self.writer.write([self.aspirin])
self.writer.close()
self.reader.open(filename)
mols = self.reader.get_mols()
# compare molecules
assert mols.next().ToBinary() == self.aspirin.ToBinary()
# compare files
with gzip.open(filename) as f:
data = f.read()
assert data == self.aspirin_sdf + '$$$$\n'
def test_write_smiles(self):
"""
Write a SMILES file.
"""
_, filename = tempfile.mkstemp(suffix='.smi', dir=self.temp_dir)
self.writer.open(filename)
self.writer.write([self.aspirin])
self.writer.close()
self.reader.open(filename)
mols = self.reader.get_mols()
# compare molecules
self.aspirin.RemoveAllConformers() # SMILES are read without confs
assert mols.next().ToBinary() == self.aspirin.ToBinary()
# compare files
with open(filename) as f:
data = f.read()
assert data.strip() == self.aspirin_smiles
def test_write_smiles_gz(self):
"""
Write a compressed SMILES file.
"""
_, filename = tempfile.mkstemp(suffix='.smi.gz', dir=self.temp_dir)
self.writer.open(filename)
self.writer.write([self.aspirin])
self.writer.close()
self.reader.open(filename)
mols = self.reader.get_mols()
# compare molecules
self.aspirin.RemoveAllConformers() # SMILES are read without confs
assert mols.next().ToBinary() == self.aspirin.ToBinary()
# compare files
with gzip.open(filename) as f:
data = f.read()
assert data.strip() == self.aspirin_smiles
def test_write_pickle(self):
"""
Write a pickle.
"""
_, filename = tempfile.mkstemp(suffix='.pkl', dir=self.temp_dir)
self.writer.open(filename)
self.writer.write([self.aspirin])
self.writer.close()
self.reader.open(filename)
mols = self.reader.get_mols()
# compare molecules
assert mols.next().ToBinary() == self.aspirin.ToBinary()
# compare files
with open(filename) as f:
data = f.read()
assert data == cPickle.dumps([self.aspirin],
cPickle.HIGHEST_PROTOCOL)
def test_write_pickle_gz(self):
"""
Write a compressed pickle.
"""
_, filename = tempfile.mkstemp(suffix='.pkl.gz', dir=self.temp_dir)
self.writer.open(filename)
self.writer.write([self.aspirin])
self.writer.close()
self.reader.open(filename)
mols = self.reader.get_mols()
# compare molecules
assert mols.next().ToBinary() == self.aspirin.ToBinary()
# compare files
with gzip.open(filename) as f:
data = f.read()
assert data == cPickle.dumps([self.aspirin],
cPickle.HIGHEST_PROTOCOL)
def test_stereo_setup(self):
"""
Make sure chiral reference molecule is correct.
"""
smiles = Chem.MolToSmiles(self.levalbuterol, isomericSmiles=True)
assert '@' in smiles # check for stereochemistry flag
# check that removing stereochemistry changes the molecule
original = self.levalbuterol.ToBinary()
AllChem.RemoveStereochemistry(self.levalbuterol)
assert self.levalbuterol.ToBinary() != original
def test_stereo_sdf(self):
"""
Test stereochemistry preservation when writing to SDF.
"""
_, filename = tempfile.mkstemp(suffix='.sdf', dir=self.temp_dir)
writer = serial.MolWriter(stereo=True)
writer.open(filename)
writer.write([self.levalbuterol])
writer.close()
self.reader.open(filename)
mols = self.reader.get_mols()
assert mols.next().ToBinary() == self.levalbuterol.ToBinary()
def test_stereo_smi(self):
"""
Test stereochemistry preservation when writing to SMILES.
"""
# FIXME avoid this and use self.levalbuterol.RemoveAllConformers()
ref_mol = Chem.MolFromSmiles(Chem.MolToSmiles(self.levalbuterol,
isomericSmiles=True))
_, filename = tempfile.mkstemp(suffix='.smi', dir=self.temp_dir)
writer = serial.MolWriter(stereo=True)
writer.open(filename)
writer.write([self.levalbuterol])
writer.close()
self.reader.open(filename)
mols = self.reader.get_mols()
assert mols.next().ToBinary() == ref_mol.ToBinary()
def test_no_stereo_sdf(self):
"""
Test stereochemistry removal when writing to SDF.
"""
_, filename = tempfile.mkstemp(suffix='.sdf', dir=self.temp_dir)
writer = serial.MolWriter(stereo=False)
writer.open(filename)
writer.write([self.levalbuterol])
writer.close()
self.reader.open(filename)
mols = self.reader.get_mols()
mol = mols.next()
# make sure the written molecule differs from the reference
assert mol.ToBinary() != self.levalbuterol.ToBinary()
# check again after removing stereochemistry
AllChem.RemoveStereochemistry(self.levalbuterol)
# FIXME get ToBinary test to work
# assert mol.ToBinary() == self.levalbuterol.ToBinary()
assert Chem.MolToMolBlock(
mol, includeStereo=True) == Chem.MolToMolBlock(
self.levalbuterol, includeStereo=True)
def test_no_stereo_smiles(self):
"""
Test stereochemistry removal when writing to SMILES.
"""
_, filename = tempfile.mkstemp(suffix='.smi', dir=self.temp_dir)
writer = serial.MolWriter(stereo=False)
writer.open(filename)
writer.write([self.levalbuterol])
writer.close()
self.reader.open(filename)
mols = self.reader.get_mols()
mol = mols.next()
# make sure the written molecule differs from the reference
assert mol.ToBinary() != self.levalbuterol.ToBinary()
# check again after removing stereochemistry
AllChem.RemoveStereochemistry(self.levalbuterol)
# FIXME get ToBinary test to work
# assert mol.ToBinary() == self.levalbuterol.ToBinary()
assert Chem.MolToSmiles(mol, isomericSmiles=True) == Chem.MolToSmiles(
self.levalbuterol, isomericSmiles=True)
def test_context_manager(self):
"""
Test use of 'with' statement to write molecules.
"""
_, filename = tempfile.mkstemp(suffix='.sdf', dir=self.temp_dir)
with self.writer.open(filename) as writer:
writer.write([self.aspirin])
self.reader.open(filename)
mols = self.reader.get_mols()
# compare molecules
assert mols.next().ToBinary() == self.aspirin.ToBinary()
# compare files
with open(filename) as f:
data = f.read()
assert data == self.aspirin_sdf + '$$$$\n'
|
skearnes/rdkit-utils
|
rdkit_utils/tests/test_serial.py
|
Python
|
bsd-3-clause
| 25,578
|
[
"RDKit"
] |
d9599d0027567c2f64b6d4e89b2a98de1b444680bf0084f5caf546c706b4083f
|
# Orca
#
# Copyright 2014 Igalia, S.L.
#
# Author: Joanmarie Diggs <jdiggs@igalia.com>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., Franklin Street, Fifth Floor,
# Boston MA 02110-1301 USA.
"""Customized support for spellcheck in Gedit."""
__id__ = "$Id$"
__version__ = "$Revision$"
__date__ = "$Date$"
__copyright__ = "Copyright (c) 2014 Igalia, S.L."
__license__ = "LGPL"
import pyatspi
import orca.spellcheck as spellcheck
class SpellCheck(spellcheck.SpellCheck):
def __init__(self, script):
super(SpellCheck, self).__init__(script)
def _isCandidateWindow(self, window):
if not (window and window.getRole() == pyatspi.ROLE_FRAME):
return False
isMenuBar = lambda x: x and x.getRole() == pyatspi.ROLE_MENU_BAR
if pyatspi.findDescendant(window, isMenuBar):
return False
return True
def _findChangeToEntry(self, root):
isEntry = lambda x: x and x.getRole() == pyatspi.ROLE_TEXT \
and x.getState().contains(pyatspi.STATE_SINGLE_LINE)
return pyatspi.findDescendant(root, isEntry)
def _findErrorWidget(self, root):
isPanel = lambda x: x and x.getRole() == pyatspi.ROLE_PANEL
panel = pyatspi.findAncestor(self._changeToEntry, isPanel)
if not panel:
return None
isError = lambda x: x and x.getRole() == pyatspi.ROLE_LABEL \
and not ":" in x.name and not x.getRelationSet()
return pyatspi.findDescendant(panel, isError)
def _findSuggestionsList(self, root):
isTable = lambda x: x and x.getRole() == pyatspi.ROLE_TABLE \
and 'Selection' in x.get_interfaces()
return pyatspi.findDescendant(root, isTable)
|
ruibarreira/linuxtrail
|
usr/lib/python3/dist-packages/orca/scripts/apps/gedit/spellcheck.py
|
Python
|
gpl-3.0
| 2,365
|
[
"ORCA"
] |
6ab1317994e98eca91531342f0b13ca01f960847af9f88f9ecbb981847314f2c
|
from __future__ import division
from __future__ import print_function
from past.utils import old_div
import sys
sys.path.insert(1, "../../../")
import h2o
from tests import pyunit_utils
from h2o.estimators.gam import H2OGeneralizedAdditiveEstimator
# In this test, we check and make sure that we can do scoring
def test_gam_model_predict():
h2o_data = h2o.import_file(
path=pyunit_utils.locate("smalldata/glm_test/gaussian_20cols_10000Rows.csv"))
h2o_data["C1"] = h2o_data["C1"].asfactor()
h2o_data["C2"] = h2o_data["C2"].asfactor()
myY = "C21"
numKnots = [8,8,8]
x = list(set(h2o_data.names) - {"response", "C11", "C12", "C13"})
h2o_model_lambdaMinRatio10 = H2OGeneralizedAdditiveEstimator(family="gaussian", gam_columns=["C11", "C12", "C13"],
scale = [0.01, 0.01, 0.01], num_knots=numKnots, standardize=True,
lambda_search=True, lambda_min_ratio = 10, solver="irlsm")
h2o_model_lambdaMinRatio10.train(x=x, y=myY, training_frame=h2o_data)
h2o_model_lambdaMinRatio01 = H2OGeneralizedAdditiveEstimator(family="gaussian", gam_columns=["C11", "C12", "C13"],
scale = [0.01, 0.01, 0.01], num_knots=numKnots,
standardize=True, lambda_search=True,
lambda_min_ratio = 0.1, solver="irlsm")
h2o_model_lambdaMinRatio01.train(x=x, y=myY, training_frame=h2o_data)
assert h2o_model_lambdaMinRatio10.mse() >= h2o_model_lambdaMinRatio01.mse(), "Gam model with lambda_min_ratio=0.1 performs " \
"better than lambda_min_ratio=10. Shame!"
if __name__ == "__main__":
pyunit_utils.standalone_test(test_gam_model_predict)
else:
test_gam_model_predict()
|
h2oai/h2o-3
|
h2o-py/tests/testdir_algos/gam/pyunit_PUBDEV_7708_check_lambda_min_ratios.py
|
Python
|
apache-2.0
| 1,958
|
[
"Gaussian"
] |
c02d8a0c6bf9dafebad2070dc60375df3f1f823d95a0748f84920d4876673cf2
|
"""Generating random numbers."""
import numpy as np
def test_run():
# Generate an array full of random numbers, uniformly distributed from [0.0, 1.0)
print np.random.random((5, 4))
# Sample from a Gaussian distribution
print np.random.normal(size = (2, 3))# standard normal (mean = 0, s.d. = 1)
print np.random.normal(50, 10, size = (2, 3)) # mean = 50, s.d. = 10
# Random integers
print np.random.randint(10)
print np.random.randint(0, 10) # a single integer in [0, 10)
print np.random.randint(0, 10, size = 5) # 5 random integers in [0, 10)
print np.random.randint(0, 10, size = (2, 3)) # 2x3 array
if __name__ == "__main__":
test_run()
|
bluemurder/mlfl
|
udacity course code/01-03-numpyrandomarrays.py
|
Python
|
mit
| 737
|
[
"Gaussian"
] |
775ed1eb74614c1dc7023a76720425bef6c1f5acba53e17614d240a0d89f819d
|
# Copyright (c) 2019 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from absl import app
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions
from selenium.webdriver.support.ui import WebDriverWait
import test_util
# Detect if history deletion is enabled or disabled and print the result.
# The way to check is:
# - visit chrome://history;
# - get the first history item;
# - check the checkbox. If history deletion is disabled, then the check
# box has attribute 'disabled';
# TODO(crbug.com/986444): move those helper methods into test_util.py once
def getElementFromShadowRoot(driver, element, selector):
if element is None:
return None
else:
return driver.execute_script(
"return arguments[0].shadowRoot.querySelector(arguments[1])", element,
selector)
def main(argv):
driver = test_util.create_chrome_webdriver()
try:
driver.get('http://www.google.com')
driver.get('chrome://history')
# wait for page to be loaded
wait = WebDriverWait(driver, 10)
wait.until(
expected_conditions.visibility_of_element_located((By.TAG_NAME,
'history-app')))
history_app = driver.find_element_by_css_selector("history-app")
histroy_list = getElementFromShadowRoot(driver, history_app, "history-list")
# get the checkbox of the first history item
histroy_item = getElementFromShadowRoot(driver, histroy_list,
'history-item')
checkbox = getElementFromShadowRoot(driver, histroy_item,
'#main-container cr-checkbox')
disabled = checkbox.get_attribute('disabled')
if disabled == 'true':
print('DISABLED')
else:
print('ENABLED')
finally:
driver.quit()
if __name__ == '__main__':
app.run(main)
|
ric2b/Vivaldi-browser
|
chromium/chrome/test/enterprise/e2e/policy/allow_deleting_browser_history/allow_deleting_browser_history_webdriver_test.py
|
Python
|
bsd-3-clause
| 1,991
|
[
"VisIt"
] |
08f1465a129b57a9b6434aded573276e061fc0c61c4194e70090debfda95cba3
|
# test on word2vec package
import os
import sys
reload(sys)
import random
import multiprocessing
import gensim, logging
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
import numpy as np
from gensim.models import Word2Vec
from gensim.models.word2vec import LineSentence
class dir_sentences(object):
def __init__(self, dirname):
self.dirname = dirname
def __iter__(self):
for filename in os.listdir(self.dirname):
for line in open(os.path.join(self.dirname, filename)):
yield line.split()
class w2v_model(object):
""" word2vec, doc2vec """
def __init__(self):
self.model = Word2Vec()
def train(self, sentences):
# ignore words less then 5 times, parallel threads to be 8, 100 neuron size (50 to 100)
bigram_transformer = gensim.models.Phrases(sentences)
self.model = Word2Vec(bigram_transformer[sentences], min_count=10, size=300 ,window=5, workers=8)
self.model.save('models/mymodel')
#self.model.save_word2vec_format('models/vector')
#self.model.init_sims(replace=True)
#self.model.build_vocab()
def ctrain(self, sentences):
self.model = Word2Vec.load('models/mymodel')
self.model.train(sentences)
self.model.save_word2vec_format('models/vector')
def accuracy(self):
self.model.accuracy('data/questions-words.txt')
# merge wiki data with text8
wiki = dir_sentences('data/wikipedia_sci')
text8 = LineSentence('data/text8')
pre_model = w2v_model()
pre_model.train(text8)
#pre_model.accuracy()
pre_model.ctrain(wiki)
pre_model.accuracy()
#pre_model.save(output)
# load pre-train model from GloVe
|
sufengniu/kaggle_ai2
|
word_embedding.py
|
Python
|
mit
| 1,611
|
[
"NEURON"
] |
04bb4c2745793acb05199e906b185ea468a6d5ee819ab2facfd37a6fe7621a5b
|
import os
from os.path import splitext, basename, abspath, dirname, join
import h5py
import logging
# noinspection PyUnresolvedReferences
from rdkit import Chem
from datatypes.objects import Properties, Molecule
from datatypes.strings import Smiles
def set_createdb_parser(root):
createdb = root.add_parser ( "createdb" )
createdb.set_defaults ( action='createdb' )
createdb.add_argument ( '--file', type=str, help="An sdf file with molecules",required=True)
createdb.add_argument ( '--properties', type=str, nargs='+', help="The properties for prediction",required = True)
createdb.add_argument ( '--output', type=str,help="A path to the database created",required=True)
def create_hdf_database(**kwargs):
assert kwargs['properties']
if kwargs['output']:
_output_ = kwargs['output']
base = _output_
else:
base = splitext ( basename ( kwargs['file'] ) )[0]
rundir = dirname ( abspath ( __file__ ) )
_output_ = join ( rundir, base )
try: os.mkdir ( _output_ )
except: pass
suppl = Chem.SDMolSupplier ( kwargs['file'] )
mol_and_prop = [(mol,{prop_name:mol.GetProp ( prop_name )for prop_name in kwargs['properties']}) for mol in suppl]
total = len ( mol_and_prop )
logging.info ( "Readed: {} molecules from: {} with property {}"
.format ( total, kwargs['file'] , kwargs['properties'] ) )
for i in range ( total ):
with h5py.File ( join ( _output_, '{}_{}_zzz.hdf'.format ( base, i ) ), 'w' ) as f:
logging.info ( "Creating file: {}_{}_zzz.hdf".format ( base, i ) )
rdkit_mol, prop = mol_and_prop[i]
rdkit_mol = Chem.AddHs ( rdkit_mol )
prop = Properties(**{"properties":prop})
smiles = Smiles(Chem.MolToSmiles ( rdkit_mol, isomericSmiles=True ))
mol = Molecule(**{"smiles":smiles,"properties":prop})
mol.dump_to_hdf(f)
|
sergsb/clever
|
processing/hdf5creator.py
|
Python
|
apache-2.0
| 1,931
|
[
"RDKit"
] |
0edead7d5f9a09f7a2950402be26924dcf3e714e9edc8bb50443568ef085c70b
|
# -*- coding: utf-8 -*-
# ------------------------------------------------------------
# streamondemand.- XBMC Plugin
# Canal para asiansubita
# http://www.mimediacenter.info/foro/viewforum.php?f=36
# ------------------------------------------------------------
import re
import urlparse
from core import logger
from core import scrapertools
from core import servertools
from core.item import Item
from core.tmdb import infoSod
from servers import adfly
__channel__ = "asiansubita"
__category__ = "F,A"
__type__ = "generic"
__title__ = "asiansubita"
__language__ = "IT"
host = "http://asiansubita.altervista.org"
def isGeneric():
return True
def mainlist(item):
logger.info("streamondemand.asiansubita mainlist")
itemlist = [Item(channel=__channel__,
title="[COLOR azure]Home[/COLOR]",
action="peliculas",
url=host,
thumbnail="http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png"),
Item(channel=__channel__,
title="[COLOR azure]Genere - Nazione[/COLOR]",
action="categorias",
url=host,
thumbnail="http://xbmc-repo-ackbarr.googlecode.com/svn/trunk/dev/skin.cirrus%20extended%20v2/extras/moviegenres/All%20Movies%20by%20Genre.png"),
Item(channel=__channel__,
title="[COLOR yellow]Cerca...[/COLOR]",
action="search",
extra="movie",
thumbnail="http://dc467.4shared.com/img/fEbJqOum/s7/13feaf0c8c0/Search")]
return itemlist
def search(item, texto):
logger.info("[asiansubita.py] " + item.url + " search " + texto)
item.url = host + "/?s=" + texto
try:
return peliculas(item)
# Se captura la excepción, para no interrumpir al buscador global si un canal falla
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def peliculas(item):
logger.info("streamondemand.asiansubita peliculas")
itemlist = []
# Descarga la pagina
data = scrapertools.cache_page(item.url)
# Extrae las entradas (carpetas)
patron = '<!-- Post Type 3 -->\s*'
patron += '<a.*?href="(.*?)" title="(.*?)" rel="bookmark">.*?<img src="(.*?)".*?<div class="entry-summary">\s*'
patron += '(.*?)<a class="more-link"'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle, scrapedthumbnail, scrapedplot in matches:
scrapedplot = scrapertools.decodeHtmlentities(scrapedplot)
title = scrapertools.decodeHtmlentities(scrapedtitle)
itemlist.append(infoSod(
Item(channel=__channel__,
action="findvideos",
title=title,
url=scrapedurl,
thumbnail=scrapedthumbnail,
fulltitle=title,
show=title,
plot=scrapedplot,
viewmode="movie_with_plot"), tipo='movie'))
# Paginación
patron = '<div class="nav-previous"><a href="(.*?)" ><span class="meta-nav">←</span> Articoli precedenti</a></div>'
next_page = scrapertools.find_single_match(data, patron)
if next_page != "":
itemlist.append(
Item(channel=__channel__,
action="peliculas",
title="[COLOR orange]Post piu' vecchi >>[/COLOR]",
url=next_page,
thumbnail="http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png"))
return itemlist
def categorias(item):
logger.info("streamondemand.asiansubita categorias")
itemlist = []
data = scrapertools.cache_page(item.url)
# The categories are the options for the combo
patron = '<li id="menu-item-[^>"]+" class="menu-item menu-item-type-taxonomy menu-item-object-category menu-item-[^>"]+"><a href="(.*?)">(.*?)</a></li>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle in matches:
itemlist.append(
Item(channel=__channel__,
action="peliculas",
title="[COLOR azure]" + scrapedtitle + "[/COLOR]",
url=urlparse.urljoin(host, scrapedurl)))
return itemlist
def findvideos(item):
logger.info("[asiansubita.py] findvideos")
itemlist = []
# Descarga la página
data = scrapertools.cache_page(item.url)
# Extrae las datos
thumbnail = scrapertools.find_single_match(data, 'src="([^"]+)"[^<]+</p>')
plot = scrapertools.find_single_match(data, '<p style="text-align: justify;">(.*?)</p>')
plot = scrapertools.decodeHtmlentities(plot)
patron = 'href="(http://adf.ly/[^"]+)" target="_blank">([^<]+)</a>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle in matches:
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
title = "[" + scrapedtitle + "] " + item.fulltitle
itemlist.append(
Item(channel=__channel__,
action="play",
title=title,
url=scrapedurl,
thumbnail=thumbnail,
plot=plot,
fulltitle=item.fulltitle,
show=item.show))
return itemlist
def play(item):
logger.info("[asiansubita.py] play")
data = adfly.get_long_url(item.url)
itemlist = servertools.find_video_items(data=data)
for videoitem in itemlist:
videoitem.title = item.show
videoitem.fulltitle = item.fulltitle
videoitem.show = item.show
videoitem.thumbnail = item.thumbnail
videoitem.channel = __channel__
return itemlist
|
costadorione/purestream
|
channels/asiansubita.py
|
Python
|
gpl-3.0
| 5,847
|
[
"ADF"
] |
04732b23197e2ceb09be0a131c0d0c4c9ffc4846bab27315b5f322856c304caa
|
#!/usr/bin/python
"""
" @section DESCRIPTION
" RF model classes
"""
import os
import copy
import numpy as np
from operator import mul
from time import time, sleep
from scipy.ndimage import correlate as im_corr
import plotting.plotting_functions as plt_fun
import rf_obj_funs_and_grads as rf_obj_fun
import rf_optimization as rf_opt
from rf_field_format import Field
from rf_piecewise_lin_act_fun import ActFun
from rf_parameters import Parameters
from rf_evaluation import Evaluation
from rf_helper import add_fake_dimension, get_insignificant_basis,\
inner_product, outer_product, sta_and_stc, load_saved_models
from rf_nonlinearity import Nonlinearity
class RFModel(object):
"""Base class for different receptive field models"""
def __init__(self):
self.params = []
self.multilin = False
self.solver = None
self.init_method = None
# Receptive fields
self.n_rfs = None
self.rf_type = 'lin_rfs'
self.rf_truncated = False
self.rf_significance = []
self.qn_poly_params = np.array([])
# Context fields
self.cf_zero_origin = False
self.cf_mapping = None
self.cf_type = None
self.cf_alignment = 'edge'
self.cf_act_fun = None
self.pos_sol = None
# Regularization
self.reg_c_init = None
self.reg_type = None
# Nonlinearity
self.nonlinearity = []
self.dist_res = 20 # histogram resolution in bins per dimension
self.name = 'Base'
self.trained = []
# Evaluation
self.obj_fun_val = []
self.eval_train = []
self.eval_test = []
# Training meta data
self.data = None
self.basis_fun = None
self.labels = None
self.ticks = None
self.dt = None
def __str__(self):
return "{} model".format(self.name)
def initialize_params(self, rf_shape, cf_shape, load_path=None):
# Default initialization
params = Parameters()
if self.name[:3] == 'MID':
rf_init_values = 'randn'
cf_init_values = 'randn'
else:
rf_init_values = 'zeros'
cf_init_values = 'zeros'
rf_multilin = self.multilin
params.init_rfs(rf_shape, self.reg_c_init,
type=self.rf_type,
n_rfs=self.n_rfs,
multilin=rf_multilin,
reg_type=self.reg_type,
init_values=rf_init_values)
cf_multilin = self.multilin
params.init_cfs(cf_shape, self.cf_mapping, self.cf_type,
self.reg_c_init,
multilin=cf_multilin,
reg_type=self.reg_type,
init_values=cf_init_values,
zero_origin=self.cf_zero_origin,
alignment=self.cf_alignment)
params.init_act_funs(self.cf_act_fun, self.reg_c_init)
# RF bias not used with MID models
if self.name[:3] == 'MID':
for rf in params.rfs:
rf.bias = 0
# Try to load parameters if requested
params_loaded = False
split_idx = len(self.params)
if self.init_method is not None:
# Load appropriate models
loaded_models = load_saved_models(load_path, tag=self.init_method)
# Check if these models are trained on the same data,
# using the same basis function, and for the same split
for model in loaded_models:
# Do not load multilinear models
if not model.multilin:
same_data = model.data == self.data
same_basis_fun = model.basis_fun == self.basis_fun
same_split = split_idx < len(model.params)
if same_data and same_basis_fun and same_split:
params_tmp = copy.deepcopy(model.params[split_idx])
params_loaded = True
print "Model parameters loaded!"
# Extract loaded parameters
if params_loaded:
for rf, rf_tmp in zip(params.rfs, params_tmp.rfs):
rf.field = rf_tmp.field
rf.parts = rf_tmp.parts
if rf.multilin and len(rf.parts) == 0:
rf.make_separable(self.reg_c_init, self.reg_type)
for cf, cf_tmp in zip(params.cfs, params_tmp.cfs):
cf.field = cf_tmp.field
cf.parts = cf_tmp.parts
if cf.multilin and len(cf.parts) == 0:
cf.make_separable(self.reg_c_init, self.reg_type)
if hasattr(params_tmp, 'cf_act_funs'):
for act_fun, act_fun_tmp in zip(params.cf_act_funs,
params_tmp.cf_act_funs):
act_fun.base_peaks = act_fun_tmp.base_peaks
act_fun.alpha = act_fun_tmp.alpha
if hasattr(params_tmp, 'qn_poly'):
params.qn_poly = params_tmp.qn_poly
elif hasattr(model, 'qn_poly_params'): # backward compatibility
params.qn_poly = model.qn_poly_params[split_idx]
elif hasattr(model, 'poly_params'): # backward compatibility
params.qn_poly = model.poly_params[split_idx]
else:
if self.cf_type == 'ctx_nl' or self.cf_type == 'subunit':
raise Exception("{} type context models require a "
"loaded ctx type model as an initial "
"guess".format(self.cf_type))
self.params.append(params)
self.nonlinearity.append(Nonlinearity(self.dist_res))
self.eval_train.append(Evaluation())
self.eval_test.append(Evaluation())
self.trained.append(False)
def train(self, x, y, rf_win_size, split_idx=0, verbose=1):
"""Trains the RF model
Args:
x: input array
y: output array
rf_win_size: rf window size in time bins
split_idx: fold index when estimating models for each KFold
verbose: show output if greater than 0
Returns:
Raises:
"""
self._check_initialization(split_idx)
self._estimate_nonlinearity(x, y, split_idx)
self.trained[split_idx] = True
def predict(self, x, split_idx=0):
""" Predict response to input
IMPORTANT!
Predictions only use the first two receptive fields on the LN-model.
Args:
x: input array
split_idx: use parameters from specific KFold
Returns:
y_hat: model output
Raises:
No parameters found
Model not trained
"""
self._check_initialization(split_idx)
self._check_training(split_idx)
# Calculate the similarity score and make a prediction
nonlinearity = self.nonlinearity[split_idx]
z = self._calculate_z(x, split_idx=split_idx)
y_hat = nonlinearity.predict(z)
return y_hat
def set_meta_data(self, name, basis_fun, labels=[], ticks=[], dt=1.):
self.data = name
self.basis_fun = basis_fun
self.labels = labels
self.ticks = ticks
self.dt = dt
self.name += '_' + basis_fun[:3]
def evaluate(self, x, y, set, split_idx):
""" Evaluate a trained model on data
IMPORTANT!
Evaluation only uses the first two receptive fields on the LN-model.
:param x:
:param y:
:param set:
:return:
"""
self._check_initialization(split_idx)
self._check_training(split_idx)
y_mean = np.vstack(y.mean(axis=1))
y_sum = np.vstack(y.sum(axis=1))
z = self._calculate_z(x, split_idx=split_idx)
z_null = self._calculate_z_null(x, y_sum, split_idx=split_idx)
y_hat = self.predict(x, split_idx)
if set == 'train':
if len(self.eval_train) < split_idx + 1:
for fold_idx_tmp in range(len(self.eval_train), split_idx + 1):
self.eval_train.append(Evaluation())
self.eval_train[split_idx].evaluate_mi_stc(z, z_null, y_sum)
# self.eval_train[split_idx].evaluate_mi_qe(z, y_sum)
# self.eval_train[split_idx].evaluate_mi_raw(z, y_sum)
self.eval_train[split_idx].evaluate_r(y_hat, y)
elif set == 'test':
if len(self.eval_test) < split_idx + 1:
for fold_idx_tmp in range(len(self.eval_test), split_idx + 1):
self.eval_test.append(Evaluation())
self.eval_test[split_idx].evaluate_mi_stc(z, z_null, y_sum)
# self.eval_test[split_idx].evaluate_mi_qe(z, y_sum)
# self.eval_test[split_idx].evaluate_mi_raw(z, y_sum)
self.eval_test[split_idx].evaluate_r(y_hat, y)
def _add_obj_fun_val(self, obj_fun_val, split_idx):
if len(self.obj_fun_val) < split_idx + 1:
for fold_idx_tmp in range(len(self.obj_fun_val), split_idx + 1):
self.obj_fun_val.append(None)
self.obj_fun_val[split_idx] = obj_fun_val
def _add_rf_significancen(self, split_idx):
if len(self.rf_significance) < split_idx + 1:
for fold_idx_tmp in range(len(self.rf_significance), split_idx + 1):
self.rf_significance.append(None)
def _calculate_z(self, x, split_idx=0):
""" Calculate the similarity score vector z
:param x:
:param y:
:return z:
"""
# Get params for the selected fold
params = self.params[split_idx]
# Calculate the similarity score
if hasattr(params, 'rf_type') and params.rf_type == 'pos_inv_rfs':
z = rf_obj_fun.z_rf_pos_inv(x, params)
else:
# Get the full x-matrix (CF processed if CFs are used)
x_nd_full = rf_obj_fun.z_rf_der(x, params)
z = inner_product(x_nd_full, params.rfs)
if hasattr(params, 'rf_type') and params.rf_type == 'max_rfs':
z = z.max(axis=1).reshape(z.shape[0], 1)
elif hasattr(params, 'rf_type') and params.rf_type == 'qn_rfs':
z = z.sum(axis=1).reshape(z.shape[0], 1)
return z
def _calculate_z_null(self, x, y, split_idx=0):
""" Calculates the null similarity score vector z_null
:param x:
:param y:
:param split_idx:
:return z_null:
"""
# Get params for the selected fold
params = self.params[split_idx]
# Get the full x-matrix (CF processed if CFs are used)
x_nd_full = rf_obj_fun.z_rf_der(x, params)
# Get the null vector from STC analysis
rfs_null = []
null_fields = get_insignificant_basis(x, y, params.rfs[0].shape)
for null_field in null_fields:
rf_null = Field(params.rfs[0].shape, reg_c=None)
rf_null.field = null_field
rfs_null.append(rf_null)
# Calculate the null similarity score
z_null = inner_product(x_nd_full, rfs_null)
return z_null
def _estimate_nonlinearity(self, x, y, split_idx):
"""Estimate a general nonlinearity mapping z to spike probabilities
Args:
x: input array
y: output array
split_idx:
Returns:
Raises:
"""
# Calculate the similarity score
z = self._calculate_z(x, split_idx=split_idx)
# Non-linearity, P(spike|score)
self.nonlinearity[split_idx] = Nonlinearity(self.dist_res)
self.nonlinearity[split_idx].estimate(z, y, solver=self.solver)
def _check_initialization(self, split_idx):
if len(self.params) < split_idx + 1:
raise Exception("No parameters for split: {}".format(split_idx))
def _check_training(self, split_idx):
if not self.trained[split_idx]:
raise Exception("Model NOT trained on split: {}".format(split_idx))
def _print_model_info(self):
info = "{:25s}{:15s}{:15s}".format(
self.name, self.cf_mapping, str(self.multilin))
print info
# Only included for backwards compatibility
class LinModel(RFModel):
"""RF model class: Generalized Linear Model (GLM)"""
def __init__(self, params_dict):
"""configures the model according to provided parameters
Args:
params_dict: model parameters
Returns:
Raises:
Unknown optimization method
"""
super(GenLinModel, self).__init__()
self.n_rfs = params_dict['n_rfs']
self.solver = params_dict['solver']
self.optimization = params_dict['optimization']
self.multilin = params_dict['multilin']
self.cf_mapping = params_dict['cf_mapping']
self.cf_alignment = params_dict['cf_alignment']
self.pos_sol = params_dict['pos_sol']
self.init_method = params_dict['init']
if self.solver == 'linreg':
self.name = 'LinReg'
elif self.solver == 'logreg':
self.name = 'LogReg'
elif self.solver == 'poireg':
self.name = 'PoiReg'
else:
raise Exception("Unknown solver: {}".format(self.solver))
if self.cf_mapping == 'same':
self.name += 'Ctx'
if not self.pos_sol:
self.name += 'Neg'
elif self.cf_mapping == 'dim0_split':
self.name += 'CtxDim0'
elif self.cf_mapping == 'dim1_split':
self.name += 'CtxDim1'
elif self.cf_mapping == 'dim2_split':
self.name += 'CtxDim2'
elif self.cf_mapping == 'level':
self.name += 'CtxInt'
if self.cf_alignment == 'center':
self.name += '_c'
self.reg_c_init = params_dict['reg_c_init']
self.reg_type = params_dict['reg_type']
self.name += '_' + self.reg_type
if self.multilin:
self.name += '_ml'
if self.optimization == 'gradient_descent':
self.name += '_gd'
self._print_model_info()
def train(self, x, y, rf_win_size, split_idx=0, verbose=1):
"""Trains the RF model
Args:
x: input array
y: output array
rf_win_size: rf window size in time bins
split_idx: training fold index
verbose: print progress
Returns:
Raises:
"""
# 1. Check initialization
self._check_initialization(split_idx)
# 2. Handle data with y-values from multiple trials
if self.solver == 'linreg':
y_tmp = np.vstack(y.mean(axis=1))
elif self.solver == 'logreg':
y_tmp = np.vstack(y.mean(axis=1))
elif self.solver == 'poireg':
y_tmp = np.vstack(y.sum(axis=1))
else:
raise Exception("Unknown solver: {}".format(self.solver))
# 3. Find receptive and context filters using the model dependent method
# OPTION 1. Alternating multiple RFs solver
if self.optimization == 'alternating_rfs':
opt_dict = {'solver': self.solver,
'verbose': verbose > 0}
self.params[split_idx] = rf_opt.alternating_solver_rfs(
x, y_tmp, self.params[split_idx], opt_dict)
# OPTION 2. Alternating context model solver
if self.optimization == 'alternating':
opt_dict = {'solver': self.solver,
'pos_sol': self.pos_sol,
'verbose': verbose > 0}
if self.multilin:
self.params[split_idx] = rf_opt.alternating_solver_ctx_multilin(
x, y_tmp, self.params[split_idx], opt_dict)
else:
self.params[split_idx] = rf_opt.alternating_solver_ctx(
x, y_tmp, self.params[split_idx], opt_dict)
# OPTION 3. Gradient descent
elif self.optimization == 'gradient_descent':
# Run gradient descent to find CF parameters
opt_dict = {'eta': 1e-6,
'alpha': 0.9,
'batch_frac': 1.0,
'solver': self.solver,
'verbose': verbose > 0}
self.params[split_idx] = rf_opt.gradient_descent(
x, y_tmp, self.params[split_idx], opt_dict)
# Get final objection function value
weights = np.ones(y_tmp.shape)
if self.solver == 'logreg':
weights[y_tmp > 1] = y_tmp[y_tmp > 1]
if self.solver == 'linreg':
obj_fun = rf_obj_fun.mean_squared_error
elif self.solver == 'logreg':
obj_fun = rf_obj_fun.neg_log_lik_bernoulli
elif self.solver == 'poireg':
obj_fun = rf_obj_fun.neg_log_lik_poisson
obj_fun_val = obj_fun(x, y_tmp, self.params[split_idx],
weights=weights,
reg_c=self.params[split_idx].reg_c)
self._add_obj_fun_val(obj_fun_val, split_idx)
# 4. Estimate the nonlinearity
self._estimate_nonlinearity(x, y_tmp, split_idx)
self.trained[split_idx] = True
def predict(self, x, split_idx=0):
""" Predict response to input
Args:
x: input array
Returns:
y_hat: model output
Raises:
Model not trained
"""
self._check_initialization(split_idx)
self._check_training(split_idx)
# Get the similarity score
z = self._calculate_z(x, split_idx=split_idx)
if self.solver == 'linreg':
y_hat = z
if self.solver == 'logreg':
y_hat = rf_obj_fun.inv_bernoulli_link(z)
elif self.solver == 'poireg':
y_hat = rf_obj_fun.inv_poisson_link(z)
return y_hat
class GenLinModel(RFModel):
"""RF model class: Generalized Linear Model (GLM)"""
def __init__(self, params_dict):
"""configures the model according to provided parameters
Args:
params_dict: model parameters
Returns:
Raises:
Unknown optimization method
"""
super(GenLinModel, self).__init__()
self.rf_type = 'max_rfs'
self.n_rfs = params_dict['n_rfs']
self.solver = params_dict['solver']
self.multilin = params_dict['multilin']
self.init_method = params_dict['init']
if self.solver == 'linreg':
self.name = 'LinReg'
elif self.solver == 'logreg':
self.name = 'LogReg'
elif self.solver == 'poireg':
self.name = 'PoiReg'
else:
raise Exception("Unknown solver: {}".format(self.solver))
self.reg_c_init = params_dict['reg_c_init']
self.reg_type = params_dict['reg_type']
self.name += '_' + self.reg_type
if self.multilin:
self.name += '_ml'
self._print_model_info()
def train(self, x, y, rf_win_size, split_idx=0, verbose=1):
"""Trains the RF model
Args:
x: input array
y: output array
rf_win_size: rf window size in time bins
split_idx: training fold index
verbose: print progress
Returns:
Raises:
"""
# 1. Check initialization
self._check_initialization(split_idx)
# 2. Handle data with y-values from multiple trials
if self.solver == 'linreg':
y_tmp = np.vstack(y.mean(axis=1))
elif self.solver == 'logreg':
y_tmp = np.vstack(y.mean(axis=1))
elif self.solver == 'poireg':
y_tmp = np.vstack(y.sum(axis=1))
else:
raise Exception("Unknown solver: {}".format(self.solver))
# 3. Find receptive fields using an appropriate solver
opt_dict = {'solver': self.solver,
'verbose': verbose > 0}
self.params[split_idx] = rf_opt.alternating_solver_ctx(
x, y_tmp, self.params[split_idx], opt_dict)
# Get final objection function value
weights = np.ones(y_tmp.shape)
if self.solver == 'logreg':
weights[y_tmp > 1] = y_tmp[y_tmp > 1]
if self.solver == 'linreg':
obj_fun = rf_obj_fun.mean_squared_error
elif self.solver == 'logreg':
obj_fun = rf_obj_fun.neg_log_lik_bernoulli
elif self.solver == 'poireg':
obj_fun = rf_obj_fun.neg_log_lik_poisson
obj_fun_val = obj_fun(x, y_tmp, self.params[split_idx], weights=weights)
self._add_obj_fun_val(obj_fun_val, split_idx)
# 4. Estimate the nonlinearity
self._estimate_nonlinearity(x, y_tmp, split_idx)
self.trained[split_idx] = True
def predict(self, x, split_idx=0):
""" Predict response to input
Args:
x: input array
split_idx:
Returns:
y_hat: model output
Raises:
Model not trained
"""
self._check_initialization(split_idx)
self._check_training(split_idx)
# Get the similarity score
z = self._calculate_z(x, split_idx=split_idx)
if self.solver == 'linreg':
y_hat = z
if self.solver == 'logreg':
y_hat = rf_obj_fun.inv_bernoulli_link(z)
elif self.solver == 'poireg':
y_hat = rf_obj_fun.inv_poisson_link(z)
return y_hat
class CtxModel(RFModel):
"""RF model class: Context model | subunit model)"""
def __init__(self, params_dict):
"""configures the model according to provided parameters
Args:
params_dict: model parameters
Returns:
Raises:
Unknown optimization method
"""
super(CtxModel, self).__init__()
self.rf_type = 'lin_rfs'
self.n_rfs = 1
self.rf_truncated = params_dict['rf_truncated']
self.solver = params_dict['solver']
self.multilin = params_dict['multilin']
self.cf_mapping = params_dict['cf_mapping']
self.cf_type = params_dict['cf_type']
self.cf_alignment = params_dict['cf_alignment']
self.cf_act_fun = params_dict['cf_act_fun']
self.pos_sol = params_dict['pos_sol']
self.init_method = params_dict['init']
if self.solver == 'linreg':
self.name = 'LinReg'
elif self.solver == 'logreg':
self.name = 'LogReg'
elif self.solver == 'poireg':
self.name = 'PoiReg'
else:
raise Exception("Unknown solver: {}".format(self.solver))
if self.cf_type == 'ctx':
self.name += 'Ctx'
elif self.cf_type == 'ctx_nl':
self.name += 'CtxNl'
elif self.cf_type == 'subunit':
self.name += 'Sub'
if self.rf_truncated:
self.name += 'T'
if self.cf_mapping == 'temporal':
self.name += 'Temp'
if self.cf_act_fun is not None:
self.name += self.cf_act_fun[:4].capitalize()
else:
self.cf_zero_origin = True
if self.cf_alignment == 'center':
self.name += '_c'
self.reg_c_init = params_dict['reg_c_init']
self.reg_type = params_dict['reg_type']
self.name += '_' + self.reg_type
if self.multilin:
self.name += '_ml'
self._print_model_info()
def train(self, x, y, rf_win_size, split_idx=0, verbose=1):
"""Trains the RF model
Args:
x: input array
y: output array
rf_win_size: rf window size in time bins
split_idx: training fold index
verbose: print progress
Returns:
Raises:
"""
# 1. Check initialization
self._check_initialization(split_idx)
# 2. Handle data with y-values from multiple trials
if self.solver == 'linreg':
y_tmp = np.vstack(y.mean(axis=1))
elif self.solver == 'logreg':
y_tmp = np.vstack(y.mean(axis=1))
elif self.solver == 'poireg':
y_tmp = np.vstack(y.sum(axis=1))
else:
raise Exception("Unknown solver: {}".format(self.solver))
# 3. Find receptive and context fields using the model dependent method
# OPTION 1. No CF activation function
if self.cf_act_fun is None:
opt_dict = {'solver': self.solver,
'pos_sol': self.pos_sol,
'verbose': verbose > 0}
self.params[split_idx] = rf_opt.alternating_solver_ctx(
x, y_tmp, self.params[split_idx], opt_dict)
# OPTION 2. Fixed CF activation function
if self.cf_act_fun and self.cf_act_fun != 'adaptive':
opt_dict = {'solver': self.solver,
'pos_sol': self.pos_sol,
'verbose': verbose > 0}
self.params[split_idx] = rf_opt.alternating_solver_ctx_fixed(
x, y_tmp, self.params[split_idx], opt_dict)
# OPTION 3. Adaptive CF activation function
if self.cf_act_fun == 'adaptive':
opt_dict = {'solver': self.solver,
'rf_truncated': self.rf_truncated,
'first_fold': split_idx == 0,
'pos_sol': self.pos_sol,
'verbose': verbose > 0}
self.params[split_idx] = rf_opt.alternating_solver_ctx_adaptive(
x, y_tmp, self.params[split_idx], opt_dict)
# Get final objection function value
weights = np.ones(y_tmp.shape)
if self.solver == 'logreg':
weights[y_tmp > 1] = y_tmp[y_tmp > 1]
if self.solver == 'linreg':
obj_fun = rf_obj_fun.mean_squared_error
elif self.solver == 'logreg':
obj_fun = rf_obj_fun.neg_log_lik_bernoulli
elif self.solver == 'poireg':
obj_fun = rf_obj_fun.neg_log_lik_poisson
obj_fun_val = obj_fun(x, y_tmp, self.params[split_idx], weights=weights)
self._add_obj_fun_val(obj_fun_val, split_idx)
# 4. Estimate the nonlinearity
self._estimate_nonlinearity(x, y_tmp, split_idx)
self.trained[split_idx] = True
def predict(self, x, split_idx=0):
""" Predict response to input
Args:
x: input array
split_idx: fold index
Returns:
y_hat: model output
Raises:
Model not trained
"""
self._check_initialization(split_idx)
self._check_training(split_idx)
# Get the similarity score
z = self._calculate_z(x, split_idx=split_idx)
if self.solver == 'linreg':
y_hat = z
if self.solver == 'logreg':
y_hat = rf_obj_fun.inv_bernoulli_link(z)
elif self.solver == 'poireg':
y_hat = rf_obj_fun.inv_poisson_link(z)
return y_hat
class MIDModel(RFModel):
"""RF model class: Maximally Informative Dimensions (MID)
This model class can train:
- Multi-filter LN models (Sharpee et.a. 2004 and Sharpee et.a. 2006)
- Position invariant models (Eickenberg et al. 2012)
- Context models (Westo and May, 2017)
by maximizing the single spike information directly.
"""
def __init__(self, params_dict):
"""configures the model according to provided parameters
Args:
params_dict: model parameters
Returns:
Raises:
Unknown optimization method
"""
super(MIDModel, self).__init__()
self.name = 'MID'
self.init_method = params_dict['init']
self.rf_type = params_dict['rf_type']
self.n_rfs = params_dict['n_rfs']
self.multilin = params_dict['multilin']
self.cf_mapping = params_dict['cf_mapping']
self.cf_alignment = params_dict['cf_alignment']
if self.cf_mapping == 'same':
self.name += 'Ctx'
elif self.cf_mapping == 'level':
self.name += 'CtxInt'
else:
self.name += str(self.n_rfs)
if self.init_method is not None:
self.name += '_' + self.init_method
self._print_model_info()
def train(self, x, y, rf_win_size, split_idx=0, verbose=1):
"""Trains the RF model
Args:
x: input array
y: output array
rf_win_size: rf window size in time bins
Returns:
Raises:
"""
# 1. Check initialization
y_sum = np.vstack(y.sum(axis=1))
self._check_initialization(split_idx)
# 2. Find field parameters through gradient ascent
opt_dict = {'n_bins': self.dist_res,
'verbose': verbose > 0}
self.params[split_idx] = rf_opt.gradient_ascent(
x, y_sum, self.params[split_idx], opt_dict)
# Get final objection function value
mi_val = rf_obj_fun.mid_mi(
x, y_sum, self.params[split_idx], self.dist_res)
self._add_obj_fun_val(mi_val, split_idx)
# 3. Estimate the nonlinearity
self._estimate_nonlinearity(x, y_sum, split_idx)
self.trained[split_idx] = True
class LNModel(RFModel):
"""RF model class: multi-filter linear-nonlinear (LN) model
Multi-filter LN models can be estimated using STC or iSTAC analysis.
These methods both assume a Gaussian stimulus distribution
See Touryan et al. (2002) and Pillow and Simoncelli (2006) for details.
"""
def __init__(self, params_dict):
"""configures the model according to provided parameters
Args:
params_dict: model parameters
Returns:
Raises:
Unknown optimization method
"""
super(LNModel, self).__init__()
self.solver = params_dict['solver']
self.n_rfs = params_dict['n_rfs']
if self.solver == 'stc':
self.name = 'STC'
elif self.solver == 'stc_w':
self.name = 'STCw'
elif self.solver == 'istac':
self.name = 'iSTAC'
else:
raise Exception("Unknown solver: {}".format(self.solver))
self.name += str(self.n_rfs)
self._print_model_info()
def train(self, x, y, rf_win_size, split_idx=0, verbose=1):
"""Trains the RF model
Args:
x: input array
y: output array
rf_win_size: rf window size in time bins
split_idx: fold index when estimating models for each KFold
verbose: show output if greater than 0
Returns:
Raises:
"""
# 1. Check initialization
self._check_initialization(split_idx)
# 2. Find LN-filters using the model dependent solver
self._add_rf_significancen(split_idx)
opt_dict = {'n_dims': self.n_rfs,
'solver': self.solver,
'verbose': verbose > 0}
self.params[split_idx], self.rf_significance[split_idx] = \
rf_opt.ln_solver(x, y, self.params[split_idx], opt_dict)
# 3. Estimate the nonlinearity (only first two filters)
self._estimate_nonlinearity(x, y, split_idx)
self.trained[split_idx] = True
class QNModel(RFModel):
"""RF model class: quadratic-nonlinear (QN) model
QN-models for a subest of LN models with a one-dimensional nonlinearity.
These can hence be used together with more than two fitlers as they
do not suffer from the curse of dimensionality.
See Fitzgerald et al. (2011) and Sharpee (2013) for details
"""
def __init__(self, params_dict):
"""configures the model according to provided parameters
Args:
params_dict: model parameters
Returns:
Raises:
Unknown optimization method
"""
super(QNModel, self).__init__()
self.solver = params_dict['solver']
self.rf_type = 'qn_rfs'
self.n_rfs = params_dict['n_rfs']
self.init_method = params_dict['init']
self.reg_c_init = params_dict['reg_c_init']
self.reg_type = 'norm'
if self.solver == 'mne':
self.name = 'MNE'
else:
raise Exception("Unknown solver: {}".format(self.solver))
self.name += str(self.n_rfs)
self.name += '_C' + str(self.reg_c_init)
self._print_model_info()
def train(self, x, y, rf_win_size, split_idx=0, verbose=1):
"""Trains the RF model
Args:
x: input array
y: output array
rf_win_size: rf window size in time bins
split_idx: fold index when estimating models for each KFold
verbose: show output if greater than 0
Returns:
Raises:
"""
# 1. Check initialization
y_tmp = np.vstack(y.mean(axis=1))
self._check_initialization(split_idx)
# 2. Find QN polynomial parameters using the selected solver
if self.params[split_idx].qn_poly.size == 0:
# Use an initial guess if available
if self.params[0].qn_poly.size > 0:
w0 = self.params[0].qn_poly
else:
w0 = np.array([])
opt_dict = {'solver': self.solver,
'w0': w0,
'verbose': verbose > 0}
self.params[split_idx] = \
rf_opt.qn_solver(x, y_tmp, self.params[split_idx], opt_dict)
# 3. Extract filters from the polynomial's parameters
self._add_rf_significancen(split_idx)
opt_dict = {'solver': self.solver,
'n_filters': self.n_rfs}
self.params[split_idx], self.rf_significance[split_idx] = \
rf_opt.qn_extract_filters(x, y_tmp,
self.params[split_idx],
opt_dict)
# 4. Fine tune filter weights by optimizing the QN obj. fun. directly
opt_dict = {'eta': 1e-6,
'alpha': 0.95,
'batch_frac': 1.0,
'solver': 'logreg',
'verbose': verbose > 0}
self.params[split_idx] = rf_opt.gd_with_search(
x, y_tmp, self.params[split_idx], opt_dict)
# Normalize quadratic filters to unity length
for rf in self.params[split_idx].rfs:
if rf.qn_square:
tmp = np.linalg.norm(rf.field)
rf.field /= tmp
rf.qn_lambda *= (tmp**2)
# 5. Estimate the nonlinearity
self._estimate_nonlinearity(x, y_tmp, split_idx)
self.trained[split_idx] = True
def gradient_checking(x, y, rf_win_size):
"""Gradient checking for all error_measure functions using provided data
Args:
x: input array
y: output array
rf_win_size:
Returns:
Raises:
Unknown error measure
"""
diff_lim = 1e-5
n_bins = 11
# Field shapes
x_nd = add_fake_dimension(x, rf_win_size)
rf_shape = x_nd.shape[1:]
cf_shape = [5, 5, 5] # default shape
cf_shape = _modify_cf_shape(rf_shape, cf_shape) # modification if needed
# Combinations to test
# error_measures = ['mse', 'neg_log_lik_bernoulli', 'neg_log_lik_poisson']
error_measures = ['neg_log_lik_bernoulli']
multilin_options = [True]
cf_mappings = ['same']
cf_alignments = ['edge']
cf_types= ['ctx', 'ctx_nl', 'subunit']
reg_c_vals = [1e-3, 1e1]
istac_dims = [1, 2]
mids = []
print "Running gradient checking for MSE and log-likelihood models"
print "{:20}{:15}{:15}{:15}{:15}{:15}{:15}{:15}{:15}".format(
'Error', 'MultiLin', 'CF mapping', 'CF alignment', 'CF type',
'Reg C', 'Field', 'Diff', 'Status')
# Part 1: MSE or neg-log-likelihood error functions
for error_measure in error_measures:
weights = np.array([])
# Select proper functions
if error_measure == 'mse':
err_fun = rf_obj_fun.mean_squared_error
err_fun_der = rf_obj_fun.mean_squared_error_der
elif error_measure == 'neg_log_lik_bernoulli':
err_fun = rf_obj_fun.neg_log_lik_bernoulli
err_fun_der = rf_obj_fun.neg_log_lik_bernoulli_der
elif error_measure == 'neg_log_lik_poisson':
err_fun = rf_obj_fun.neg_log_lik_poisson
err_fun_der = rf_obj_fun.neg_log_lik_poisson_der
else:
raise Exception("Unknown error measure: {}".format(error_measure))
for multilin in multilin_options:
for cf_mapping in cf_mappings:
for cf_alignment in cf_alignments:
for cf_type in cf_types:
for reg_c in reg_c_vals:
# Initialize rf/cf models
params = Parameters()
params.init_rfs(rf_shape, reg_c,
multilin=multilin,
init_values='randn')
params.init_cfs(cf_shape, cf_mapping,
type=cf_type,
reg_c=reg_c,
multilin=multilin,
init_values='randn',
zero_origin=False,
alignment=cf_alignment)
params.init_act_funs('random', reg_c)
params.update_context_map()
rf_ders_ana, cf_ders_ana, act_fun_ders_ana = \
err_fun_der(x, y, params,
weights=weights)
rf_ders_num, cf_ders_num, act_fun_ders_num = \
_num_param_der(x, y, params,
error_fun=err_fun,
weights=weights)
# Check RF parameters
for rf_idx in range(len(params.rfs)):
rf_diff = _field_diff(rf_ders_ana[rf_idx],
rf_ders_num[rf_idx])
outcome = 'passed' \
if rf_diff < diff_lim else 'FAILED'
print "{:20.15}{:15}{:15}{:15}{:15}{:<15.1e}" \
"{:15}{:<15.1e}{:15}".format(
error_measure, str(multilin), cf_mapping,
cf_alignment, cf_type, reg_c,
'rf', rf_diff, outcome)
if outcome == 'FAILED':
plt_fun.plot_field_gradients(
rf_ders_ana[rf_idx],
rf_ders_num[rf_idx])
# Check CF parameters
for cf_idx in range(len(params.cfs)):
cf_diff = _field_diff(cf_ders_ana[cf_idx],
cf_ders_num[cf_idx])
outcome = 'passed' \
if cf_diff < diff_lim else 'FAILED'
print "{:20.15}{:15}{:15}{:15}{:15}{:<15.1e}" \
"{:15}{:<15.1e}{:15}".format(
error_measure, str(multilin), cf_mapping,
cf_alignment, cf_type, reg_c,
'cf', cf_diff, outcome)
if outcome == 'FAILED':
plt_fun.plot_field_gradients(
cf_ders_ana[cf_idx],
cf_ders_num[cf_idx])
# Check activation function parameters
for act_fun_idx in range(len(params.cf_act_funs)):
alpha_diff = _alpha_diff(
act_fun_ders_ana[act_fun_idx],
act_fun_ders_num[act_fun_idx])
outcome = 'passed' \
if alpha_diff < diff_lim else 'FAILED'
print "{:20.15}{:15}{:15}{:15}{:15}{:<15.1e}" \
"{:15}{:<15.1e}{:15}".format(
error_measure, str(multilin), cf_mapping,
cf_alignment, cf_type, reg_c,
'act_fun', alpha_diff, outcome)
if outcome == 'FAILED':
plt_fun.plot_act_fun_gradients(
act_fun_ders_ana[cf_idx],
act_fun_ders_num[cf_idx])
# Part 2: iSTAC
print "Running gradient checking for iSTAC"
print "{:15}{:15}{:15}".format('Dims', 'Diff', 'Status')
rf_size = reduce(mul, rf_shape)
x_nd = add_fake_dimension(x, rf_shape[0])
x_2d = x_nd.reshape(x_nd.shape[0], rf_size)
sta, stc = sta_and_stc(x_2d, y)
# Loop over different number of bases vectors
for dim in istac_dims:
# Initialize b
b = np.random.randn(sta.size*dim).reshape(sta.size, dim)
b /= np.linalg.norm(b, axis=0)
# Get the gradients
b_der_ana = rf_obj_fun.istac_mi_der(b, sta, stc)
b_der_num = _num_istack_der(b, sta, stc)
for field_id in range(dim):
ana_field_der = Field(rf_shape, reg_c=None, init_values='zeros')
ana_field_der.field = b_der_ana[:, field_id].reshape(rf_shape)
num_field_der = Field(rf_shape, reg_c=None, init_values='zeros')
num_field_der.field = b_der_num[:, field_id].reshape(rf_shape)
# Check RF parameters
rmse = _field_diff(ana_field_der, num_field_der)
outcome = 'passed' if rmse < diff_lim else 'FAILED'
print "{:<15}{:<15.1e}{:15}".format(dim, rmse, outcome)
if outcome == 'FAILED':
plt_fun.plot_field_gradients(ana_field_der, num_field_der)
# Part 3: MID
# This part is not always worth running as the partial derivatives are very
# noisy due to the probability functions being estimated with histograms.
print "Maximally Informative Dimensions"
print "{:15}{:15}{:15}{:15}{:15}".format(
'MIDs', 'CF mapping', 'Field', 'RMSE', 'Status')
# Loop over different number of mids
for n_mids in mids:
for cf_mapping in cf_mappings:
for multilin in multilin_options:
# Initialize rf/cf models
params = Parameters()
params.init_rfs(rf_shape,
reg_c=None,
n_rfs=n_mids,
multilin=multilin,
init_values='randn')
params.init_cfs(cf_shape, cf_mapping, 'ctx', None,
multilin=multilin,
init_values='randn')
# Get the gradients
rf_ders_ana, cf_ders_ana = \
rf_obj_fun.mid_mi_der(x, y, params, n_bins)
rf_ders_num, cf_ders_num = \
_num_mi_der(x, y, params, n_bins)
# Check mid derivatives
for mid_idx in range(len(rf_ders_ana)):
rf_diff = _field_diff(rf_ders_ana[mid_idx],
rf_ders_num[mid_idx])
if rf_diff < diff_lim:
status = 'passed'
else:
status = 'FAILED'
plt_fun.plot_field_gradients(rf_ders_ana[mid_idx],
rf_ders_num[mid_idx])
print "{:<15d}{:15}{:15}{:<15.1e}{:15}".format(
n_mids, cf_mapping, 'rf', rf_diff, status)
# Check mid derivatives
for cf_idx in range(len(cf_ders_ana)):
cf_diff = _field_diff(cf_ders_ana[cf_idx],
cf_ders_num[cf_idx])
if cf_diff < diff_lim:
status = 'passed'
else:
status = 'FAILED'
plt_fun.plot_field_gradients(cf_ders_ana[mid_idx],
cf_ders_num[mid_idx])
print "{:<15d}{:15}{:15}{:<15.1e}{:15}".format(
n_mids, cf_mapping, 'cf', cf_diff, status)
def cf_der_checking(x, y, rf_win_size):
""" Calculate the CF gradient using two methods and compare the results
Args:
x: input array
y: output array
Returns:
Raises:
Unknown error measure
"""
# Field shapes
x_nd = add_fake_dimension(x, rf_win_size)
rf_shape = x_nd.shape[1:]
cf_shape = [5, 5, 5] # default shape
cf_shape = _modify_cf_shape(rf_shape, cf_shape) # modification if needed
params = Parameters()
params.init_rfs(rf_shape,
multilin=False,
init_values='randn')
params.init_cfs(cf_shape,
method='same',
multilin=False,
init_values='randn')
t0 = time()
rf_ders_ana, cf_ders_ana = \
rf_obj_fun.neg_log_lik_bernoulli_der(x, y, params,
cf_act_fun='linear',
reg_c=0.0)
cf_standard_time = time() - t0
print "CF standard: {:f}".format(cf_standard_time)
sleep(0.1)
t0 = time()
cfs_mat = [Field(cf_shape, None, multilin=False, init_values='zeros')]
cf_der_mats = rf_obj_fun.cf_bernoulli_der(x, y, params, 'linear')
cf_der_mat_sum = cf_der_mats[0].sum(axis=1)
cfs_mat[0].bias = cf_der_mat_sum[0]
cfs_mat[0].field = cf_der_mat_sum[1:].reshape(cf_shape)
cf_mat_time = time() - t0
print "CF matrix: {:f}".format(cf_mat_time)
plt_fun.plot_field_gradients(cf_ders_ana[0], cfs_mat[0])
# def lines_in_parameter_space(x, y, rf_win_size):
# """ Calculates the objective function for lines in the parameter space
# Args:
# x: input array
# y: output array
# rf_win_size: rf window size in time bins
# Returns:
# Raises:
# """
# n_lines = 50
# n_points = 25
# # Add a fake dimension by sliding a window over x
# x_nd = add_fake_dimension(x, rf_win_size)
# rf_dims = x_nd.shape[1:]
# # Modify cf_shape according to the input array
# cf_dims = [5, 5, 5] # default shape
# cf_dims = _modify_cf_shape(rf_dims, cf_dims)
# # Errors: ['mse', 'neg_log_lik_bernoulli', 'neg_log_lik_poisson']
# error_measure = 'mse'
# multilin = False
# # Preallocation
# delta = np.linspace(0, 1, n_points)
# lines = np.empty(n_points)
# plt.figure()
# plt.ion()
# plt.show()
# for line_idx in range(n_lines):
# # Initialize rf/cf models
# rf1 = Field(rf_dims, None, multilin=multilin, init_values='randn')
# rf2 = Field(rf_dims, None, multilin=multilin, init_values='randn')
# cf1 = Field(rf_dims, None, multilin=multilin, init_values='randn')
# cf2 = Field(rf_dims, None, multilin=multilin, init_values='randn')
# rf = Field(rf_dims, None, multilin=multilin, init_values='zeros')
# cf = Field(rf_dims, None, multilin=multilin, init_values='zeros')
# for point_idx in range(n_points):
# rf.field = rf1.field + delta[point_idx]*rf2.field
# cf.field = cf1.field + delta[point_idx]*cf2.field
# lines[point_idx] = rf_obj_fun.mean_squared_error(x, y, [rf], [cf])
# print line_idx
# plt.cla()
# plt.plot(lines.T, 'o-', lw=2)
# plt.draw()
# plt.pause(0.25)
def _field_diff(field_a, field_b):
""" Calculate the RMSE between two fields
Args:
field_a: first field
field_b: second field
Returns:
field_diff:
Raises:
"""
assert field_a.multilin == field_b.multilin
assert field_a.shape == field_b.shape
v_a = []
v_b = []
if field_a.multilin:
for part_idx in range(len(field_a.parts)):
v_a.append(field_a.parts[part_idx].ravel())
v_b.append(field_b.parts[part_idx].ravel())
else:
v_a.append(field_a.field.ravel())
v_b.append(field_b.field.ravel())
v_a.append(field_a.bias)
v_b.append(field_b.bias)
v_a = np.hstack(v_a)
v_b = np.hstack(v_b)
field_diff = np.linalg.norm(v_a - v_b)**2 / \
(np.linalg.norm(v_a)*np.linalg.norm(v_b))
return field_diff
def _alpha_diff(act_fun_a, act_fun_b):
""" Calculate the RMSE between two fields
Args:
field_a: first field
field_b: second field
Returns:
rmse:
Raises:
"""
assert act_fun_a.alpha.shape == act_fun_b.alpha.shape
v_a = act_fun_a.alpha
v_b = act_fun_b.alpha
alpha_diff = np.linalg.norm(v_a - v_b)**2 / \
(np.linalg.norm(v_a)*np.linalg.norm(v_b))
return alpha_diff
def _modify_cf_shape(rf_shape, cf_shape):
""" Modifies the shape of the context field after the input dimensionality
Rank 1 field models differ from full field models by having a list of part
vectors stored under the 'parts' key. For full field models this list is
instead empty.
Args:
rf_shape: [n_time_bins, n_frequencies, n_levels]
cf_shape: default context shape [time, frequency, level]
Returns:
cf_shape:
Raise
"""
# Loop over all dimensions and replace to large values in cf_shape with
# the dimensionality of x_shape.
for idx in range(len(cf_shape)):
if cf_shape[idx] > rf_shape[idx]:
cf_shape[idx] = rf_shape[idx]
if np.mod(rf_shape[idx], 2) == 0:
cf_shape[idx] += 1
return cf_shape
def _num_param_der(x, y, params, error_fun, weights):
""" Numerical partial derivatives
This function estimates the numerical partial derivatives for both rf
and cf parameters on the provided error function
Args:
x: input array
y: output array
params: field parameters
error_fun: error function
weights:
Returns:
rf_ders: list with derivatives for rf parts and rf bias
cf_ders: list with derivatives for cf parts and rf bias
Raise
"""
kappa = 1e-5 # step length when determining derivatives numerically
rf_ders = []
cf_ders = []
act_fun_ders = []
# Partial derivative for RF parameters
for rf in params.rfs:
rf_der = _num_field_der(x, y, params, rf, error_fun,
weights, kappa)
rf_ders.append(rf_der)
# Partial derivative for RF parameters
for cf in params.cfs:
cf_der = _num_field_der(x, y, params, cf, error_fun,
weights, kappa)
cf_ders.append(cf_der)
for act_fun in params.cf_act_funs:
act_fun_der = _num_act_fun_der(x, y, params, act_fun, error_fun,
weights, kappa)
act_fun_ders.append(act_fun_der)
return rf_ders, cf_ders, act_fun_ders
def _num_field_der(x, y, params, field, error_fun, weights, kappa):
""" Estimates numerical derivatives for RF and CF parameters
:param x:
:param y:
:param params:
:param field:
:param error_fun:
:param weights:
:param kappa:
:return:
"""
field_der = Field(field.shape, reg_c=None, multilin=field.multilin)
if field.multilin:
for part_idx in range(len(field.parts)):
for idx in range(field.parts[part_idx].size):
field.parts[part_idx].ravel()[idx] += kappa
field.field = outer_product(field.parts)
err_plus = error_fun(x, y, params, weights)
field.parts[part_idx].ravel()[idx] -= 2 * kappa
field.field = outer_product(field.parts)
err_minus = error_fun(x, y, params, weights)
field.parts[part_idx].ravel()[idx] += kappa
field.field = outer_product(field.parts)
field_der.parts[part_idx].ravel()[idx] = \
(err_plus - err_minus) / 2 / kappa
else:
for idx in range(field.field.size):
field.field.ravel()[idx] += kappa
err_plus = error_fun(x, y, params, weights)
field.field.ravel()[idx] -= 2 * kappa
err_minus = error_fun(x, y, params, weights)
field.field.ravel()[idx] += kappa
field_der.field.ravel()[idx] = (err_plus - err_minus) \
/ 2 / kappa
field.bias += kappa
err_plus = error_fun(x, y, params, weights)
field.bias -= 2 * kappa
err_minus = error_fun(x, y, params, weights)
field.bias += kappa
field_der.bias = (err_plus - err_minus) / 2 / kappa
return field_der
def _num_act_fun_der(x, y, params, act_fun, error_fun, weights, kappa):
""" Estimates numerical derivatives for the cf_act_fun's parameters
:param x:
:param y:
:param params:
:param field:
:param error_fun:
:param weights:
:param kappa:
:return act_fun_der:
"""
alpha = act_fun.alpha
act_fun_der = ActFun(reg_c=None)
for idx in range(alpha.size):
alpha[idx] += kappa
err_plus = error_fun(x, y, params, weights)
alpha[idx] -= 2 * kappa
err_minus = error_fun(x, y, params, weights)
alpha[idx] += kappa
act_fun_der.alpha[idx] = (err_plus - err_minus) / 2 / kappa
return act_fun_der
def _num_istack_der(b, sta, stc):
""" Numerical partial derivatives (I_iSTAC)
This function estimates numerical partial derivatives for the vectors
that span the relevant subspace (Multi filter LN models, iSTAC)
Args:
x: input matrix
y: spike count matrix
params: field parameters
n_bins: number of bins to use when approximating the distribution
Returns:
rf_ders: partial derivatives for mids
cf_ders:
Raise
"""
kappa = 1e-5
b_der = np.zeros(b.shape)
# Partial derivative for RF parameters
for idx in range(b.size):
b.ravel()[idx] += kappa
mi_plus = rf_obj_fun.istac_mi(b, sta, stc)
b.ravel()[idx] -= 2 * kappa
mi_minus = rf_obj_fun.istac_mi(b, sta, stc)
b.ravel()[idx] += kappa
b_der.ravel()[idx] = (mi_plus - mi_minus) / 2 / kappa
return b_der
def _num_mi_der(x, y, params, n_bins):
""" Numerical partial derivatives (I_mid)
This function estimates the numerical partial derivatives for RFs and CFs
when these are optimized using the nutual information objective function.
Args:
x: input matrix
y: spike count matrix
params: field parameters
n_bins: number of bins to use when approximating the distribution
Returns:
rf_ders: partial derivatives for mids
cf_ders:
Raise
"""
kappa = 1e-2
rf_ders = []
cf_ders = []
# Partial derivative for RF parameters
for rf in params.rfs:
rf_der = Field(rf.shape, None, multilin=rf.multilin)
if rf.multilin:
for part_idx, part in enumerate(rf.parts):
for idx in range(part.size):
part.ravel()[idx] += kappa
rf.field = outer_product(rf.parts)
mi_plus = rf_obj_fun.mid_mi(x, y, params, n_bins)[0]
part.ravel()[idx] -= 2 * kappa
rf.field = outer_product(rf.parts)
mi_minus = rf_obj_fun.mid_mi(x, y, params, n_bins)[0]
part.ravel()[idx] += kappa
rf.field = outer_product(rf.parts)
rf_der.parts[part_idx].ravel()[idx] = \
(mi_plus - mi_minus) / 2 / kappa
else:
for idx in range(rf.field.size):
rf.field.ravel()[idx] += kappa
mi_plus = rf_obj_fun.mid_mi(x, y, params, n_bins)[0]
rf.field.ravel()[idx] -= 2 * kappa
mi_minus = rf_obj_fun.mid_mi(x, y, params, n_bins)[0]
rf.field.ravel()[idx] += kappa
rf_der.field.ravel()[idx] = (mi_plus - mi_minus) / 2 / kappa
rf.bias += kappa
mi_plus = rf_obj_fun.mid_mi(x, y, params, n_bins)[0]
rf.bias -= 2 * kappa
mi_minus = rf_obj_fun.mid_mi(x, y, params, n_bins)[0]
rf.bias += kappa
rf_der.bias = (mi_plus - mi_minus) / 2 / kappa
rf_ders.append(rf_der)
# Partial derivative for RF parameters
for cf in params.cfs:
cf_der = Field(cf.shape, None, multilin=cf.multilin)
for idx in range(cf.field.size):
cf.field.ravel()[idx] += kappa
mi_plus = rf_obj_fun.mid_mi(x, y, params, n_bins)[0]
cf.field.ravel()[idx] -= 2 * kappa
mi_minus = rf_obj_fun.mid_mi(x, y, params, n_bins)[0]
cf.field.ravel()[idx] += kappa
cf_der.field.ravel()[idx] = (mi_plus - mi_minus) / 2 / kappa
cf.bias += kappa
mi_plus = rf_obj_fun.mid_mi(x, y, params, n_bins)[0]
cf.bias -= 2 * kappa
mi_minus = rf_obj_fun.mid_mi(x, y, params, n_bins)[0]
cf.bias += kappa
cf_der.bias = (mi_plus - mi_minus) / 2 / kappa
cf_ders.append(cf_der)
return rf_ders, cf_ders
|
JohanWesto/receptive-field-models
|
rf_models/rf_models.py
|
Python
|
mit
| 58,351
|
[
"Gaussian"
] |
708dd801e4bcb182e0b98b73a3deb22d459ee55231dc115312579c2384a264c5
|
from flyingpigeon import visualisation as vs
from pywps.Process import WPSProcess
class VisualisationProcess(WPSProcess):
def __init__(self):
# definition of this process
WPSProcess.__init__(self,
identifier = "visualisation",
title="Visualisation of netcdf files",
version = "0.3",
metadata= [
{"title": "Climate Service Center", "href": "http://www.climate-service-center.de/"}
],
abstract="Just testing a nice script to visualise some variables",
statusSupported=True,
storeSupported=True
)
self.resource = self.addComplexInput(
identifier="resource",
title="NetCDF Files",
abstract="NetCDF Files",
minOccurs=1,
maxOccurs=100,
maxmegabites=5000,
formats=[{"mimeType":"application/x-netcdf"}],
)
self.variableIn = self.addLiteralInput(
identifier="variable",
title="Variable",
abstract="Variable to be expected in the input files (Variable will be detected if not set, )",
default=None,
type=type(''),
minOccurs=0,
maxOccurs=1,
)
self.plotout_spagetti = self.addComplexOutput(
identifier="plotout_spagetti",
title="Visualisation, Spagetti plot",
abstract="Visualisation of single variables as a spagetti plot",
formats=[{"mimeType":"application/html"}],
asReference=True,
)
self.plotout_uncertainty = self.addComplexOutput(
identifier="plotout_uncertainty",
title="Visualisation, Uncertainty plot",
abstract="Visualisation of single variables ensemble mean with uncertainty",
formats=[{"mimeType":"application/html"}],
asReference=True,
)
def execute(self):
ncfiles = self.getInputValues(identifier='resource')
var = self.variableIn.getValue()
self.status.set('plotting variable %s' % var, 10)
plotout_spagetti_file = vs.spaghetti(ncfiles , variable=var, title=var, dir_out=None)
self.status.set('Spagetti plot for %s %s files done' % (len(ncfiles), var), 50)
plotout_uncertainty_file = vs.uncertainty(ncfiles , variable=var, title=var, dir_out=None)
self.status.set('Uncertainty plot for %s %s files done' % (len(ncfiles), var), 90)
self.plotout_spagetti.setValue( plotout_spagetti_file )
self.plotout_uncertainty.setValue( plotout_uncertainty_file )
self.status.set('visualisation done', 100)
|
sradanov/flyingpigeon
|
flyingpigeon/processes/wps_visualisation.py
|
Python
|
apache-2.0
| 2,787
|
[
"NetCDF"
] |
e13b1ecae60c6898c3ca8a5158be743da26eec4a53535f3bcf82da087251b50b
|
#
# ----------------------------------------------------------------------------------------------------
#
# Copyright (c) 2007, 2015, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 2 only, as
# published by the Free Software Foundation.
#
# This code is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# version 2 for more details (a copy is included in the LICENSE file that
# accompanied this code).
#
# You should have received a copy of the GNU General Public License version
# 2 along with this work; if not, write to the Free Software Foundation,
# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
# or visit www.oracle.com if you need additional information or have any
# questions.
#
# ----------------------------------------------------------------------------------------------------
import mx
if mx.get_jdk(tag='default').javaCompliance < "1.9":
mx.abort('JAVA_HOME is not a JDK9: ' + mx.get_jdk(tag='default').home)
from mx_graal_9 import mx_post_parse_cmd_line, run_vm, get_vm, isJVMCIEnabled # pylint: disable=unused-import
import mx_graal_bench # pylint: disable=unused-import
|
md-5/jdk10
|
src/jdk.internal.vm.compiler/.mx.graal/mx_graal.py
|
Python
|
gpl-2.0
| 1,547
|
[
"VisIt"
] |
863ca0fd4856c040ac353e9786dccba4114c2e2b722c3cc756e4353cb0056217
|
#!/usr/bin/python
#
# Copyright (C) 2010 The ESPResSo project
# Copyright (C) 2008 Axel Arnold
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import sys
import re
maxbacktrace=5
f=open(sys.argv[1], "r")
if len(sys.argv) > 2:
n=int(sys.argv[2])
else:
n=0
# regular expressions
re_start = re.compile(r"^%d: (?P<op>[a-z]+) (?P<args>.*)" % n)
allocated = {}
linenr=0
for line in f:
linenr = linenr + 1
if linenr % 1000 == 0:
sys.stderr.write(".")
match = re_start.match(line)
if match == None: continue
op = match.group('op')
args = match.group('args').split(" ")
if op == "alloc":
size = args[0]
addr = args[2]
src = [args[4]]
allocated[addr] = (size, src)
elif op == "realloc":
old = args[0]
addr = args[2]
size = args[4]
src = [args[6]]
if old == "(nil)":
pass
elif old in addr:
prev = allocated[old][1][:maxbacktrace-1]
src.extend(prev)
del allocated[old]
else:
src.append("unmanaged source " + old)
allocated[addr] = (size, src)
elif op == "free":
addr = args[0]
src = args[2]
if addr == "(nil)":
pass
elif addr in allocated:
del allocated[addr]
else:
print "\n" + addr + " freed at " + src + ", but never allocated\n"
print "\n"
for (addr,info) in allocated.iteritems():
s = info[0] + " @ " + addr + " allocated at " + info[1][0]
for loc in info[1][1:]:
s += ", from " + loc
print s
|
roehm/cython
|
tools/trace_memory.py
|
Python
|
gpl-3.0
| 2,219
|
[
"ESPResSo"
] |
c7e255d1ed3b25e9e8231e242dae4465b245e19d1bffc71c70fc315715229b49
|
import numpy as np
_merra2_long_res = 0.625
_merra2_lat_res = 0.5
def get_surrounding_merra2_nodes(long, lat, grid=False):
"""
Calculates four surrounding MERRA2 nodes for the given coordinate.
Returns a string of long for ncks, string of long for ncks
"""
# Round to the appropriate resolution
long = round(long, 3)
lat = round(lat, 2)
# Closest points
leftLong = long - ((long*1000)%(_merra2_long_res*1000))/1000
rightLong = leftLong + _merra2_long_res
bottonLat = lat - ((lat*100)%(_merra2_lat_res*100))/100
topLat = bottonLat + _merra2_lat_res
# Return a single coordinate if an exact MERRA node location has been requested, surrounding points otherwise
if (leftLong*1000)%(_merra2_long_res*1000) == 0 and (lat*100)%(_merra2_lat_res*100) == 0:
if grid:
long_grid, lat_grid = np.meshgrid([long], [lat])
return long_grid, lat_grid
else:
return '{}'.format(long), '{}'.format(lat)
else:
if grid:
long_grid, lat_grid = np.meshgrid([leftLong, rightLong], [bottonLat, topLat])
return long_grid, lat_grid
else:
return '{},{}'.format(leftLong, rightLong), '{},{}'.format(bottonLat, topLat)
def download_all_merra2(windb2, long, lat, variables, dryrun=False, download_missing=False, startyear=1980):
"""Checks the inventory and downloads all MERRA2 for a given coordinate"""
from datetime import datetime, timedelta
import pytz
import subprocess
import os.path
# Get the surrounding nodes
longSurround, latSurround = get_surrounding_merra2_nodes(long, lat)
# MERRA2 data is updated around the 15th of the month
merra2_start_incl = datetime(1980, 1, 1, 0, 0, 0).replace(tzinfo=pytz.utc)
merra2_end_excl = datetime.utcnow().replace(day=1, hour=0, minute=0, second=0, microsecond=0, tzinfo=pytz.utc)
if datetime.utcnow().day < 15:
merra2_end_excl = merra2_end_excl - timedelta(days=15)
merra2_end_excl = datetime.utcnow().replace(day=1, hour=0, minute=0, second=0, microsecond=0, tzinfo=pytz.utc)
# If there's nothing in the database, then we need to get everything
missing_data = False
# TODO this needs to be implemented in a smart way, just downloading everything for now
missing_data = True
# for var in variables.split(','):
# sql = """SELECT min(t), max(t)
# FROM {}_{}
# WHERE domainkey=(SELECT key FROM horizgeom WHERE st_transform(geom,4326)=st_geomfromtext('POINT({} {})',4326))"""\
# .format(var, domainkey, long, lat)
# windb2.curs.execute(sql)
# existing_t_range = windb2.curs.fetchone()
# if existing_t_range[0] is None and existing_t_range[1] is None:
# missing_data = True
# break
# Get everything is nothing is here
if missing_data:
# Download the data in chunks
start_t_incl = datetime(startyear, 1, 1, 0, 0, 0).replace(tzinfo=pytz.utc)
chunk_size_days = 200
while start_t_incl < merra2_end_excl:
# Convert start index to hours
index_start = (start_t_incl - merra2_start_incl).days * 24
if start_t_incl + timedelta(days=chunk_size_days) < merra2_end_excl:
index_stop = (start_t_incl + timedelta(days=chunk_size_days) - merra2_start_incl).days * 24 - 1
else:
index_stop = (merra2_end_excl - merra2_start_incl).days * 24 - 1
end_t_incl = merra2_start_incl + timedelta(hours=index_stop)
# Generatet the ncks command to run
url = 'http://goldsmr4.gesdisc.eosdis.nasa.gov/dods/M2T1NXSLV'
cmd = '/usr/bin/ncks'
filename = 'merra2_{}_{}_{:06}-{:06}.nc'.format(longSurround, latSurround, index_start, index_stop)
args = '-O -v {} -d time,{},{} -d lon,{} -d lat,{} {} {}' \
.format(variables, index_start, index_stop, longSurround, latSurround, url, filename)
# Only download what's missing
if download_missing:
if os.path.isfile(filename):
print('Skipping file: {}'.format(filename))
start_t_incl += timedelta(days=chunk_size_days)
continue
if dryrun:
print(cmd, ' ', args)
else:
print('Running: {} {}'.format(cmd, args))
subprocess.call(cmd + ' ' + args, shell=True)
start_t_incl += timedelta(days=chunk_size_days)
def insert_merra2_file(windb2conn, ncfile, vars, reinsert=False):
"""Inserts a MERRA2 file downloaded using ncks
ncfile: netCDF file downloaded with ncks
vars: CSV list of MERRA2 variables (e.g. u50m,v50m,ps)
"""
from netCDF4 import Dataset, num2date
import re
from windb2.struct import geovariable, insert
import pytz
# Info
print('Inserting: {}'.format(ncfile))
# Open the netCDF file
ncfile = Dataset(ncfile, 'r')
# Get the times
timevar = ncfile.variables['time']
timearr = num2date(timevar[:], units=timevar.units)
# For each variable...
for var in vars.split(','):
# Masked value to check for
missing_value = ncfile[var].missing_value
# Break up the variable name
var_re = re.match(r'([a-z]+)([0-9]*)([a-z]*)[,]*', var)
# For each lat
latcount = 0
latarr = ncfile.variables['lat'][:]
for lat in latarr:
# For each long
longcount = 0
longarr = ncfile.variables['lon'][:]
for long in longarr:
# For each time in the variable
tcount = 0
varstoinsert = []
for t in timearr:
# Clean up the seconds because every other time has a residual
t = t.replace(microsecond=0).replace(tzinfo=pytz.utc)
# Figure out the height
if var_re.group(2) is not None:
height = var_re.group(2)
else:
height = -9999
# Append the data to insert
if ncfile.variables[var][tcount, latcount, longcount] != missing_value:
v = geovariable.GeoVariable(var, t, height,
ncfile.variables[var][tcount, latcount, longcount])
else:
v = geovariable.GeoVariable(var, t, height, None)
varstoinsert.append(v)
# Increment t
tcount += 1
# Insert the data
insert.insertGeoVariable(windb2conn, "MERRA2", "NASA", varstoinsert,
longitude=long, latitude=lat, reinsert=reinsert)
# Increment long
longcount += 1
# Increment lat
latcount += 1
def export_to_csv(windb2conn, long, lat, variables, startyear=1980):
import numpy as np
import re
# Split the variables
variables = variables.split(',')
# Get the gridded coordinates
long_grid, lat_grid = get_surrounding_merra2_nodes(long, lat, grid=True)
# Write out a CSV file for each MERRA node, labeled A through D
labels = np.array([['C', 'D'], ['A', 'B']])
it = np.nditer(long_grid, flags=['multi_index'])
while not it.finished:
# Get the geomkey for this node
sql = "SELECT key, domainkey " \
"FROM horizgeom " \
"WHERE st_transform(geom,4326)=st_geomfromtext('POINT({} {})',4326) LIMIT 1"\
.format(long_grid[it.multi_index], lat_grid[it.multi_index])
windb2conn.curs.execute(sql)
geomkey, domainkey = windb2conn.curs.fetchone()
# Get the max time
sql = 'SELECT max(t) FROM {}_{} WHERE geomkey={}'.format(variables[0], domainkey, geomkey)
windb2conn.curs.execute(sql)
tmax = windb2conn.curs.fetchone()[0].date()
# Create the view
names_re = [re.match(r'([a-z]+)([0-9]*)([a-z]*)[,]*', var) for var in variables]
selects = '{}.t as t, '.format(names_re[0].group(0)) + \
'%s ' % str(['{varname}.value as {varname} '
.format(varname=var.group(0)) for var in names_re])\
.replace("'", '').replace('[', '').replace(',$', '').replace(']', '')
froms = '{varname}_{domainkey} as {varname}'.format(varname=variables[0], domainkey=domainkey)
leftjoins = '%s ' % str(['LEFT JOIN {varname}_{domainkey} as {varname} ON {varname1}.t={varname}.t '
'AND {varname1}.geomkey={varname}.geomkey AND {varname}.geomkey={geomkey}' \
.format(varname1=variables[0], varname=var, domainkey=domainkey, geomkey=geomkey) for var in variables[1:-1]]) \
.replace("'", '').replace('[', '').replace(',$', '').replace(']', '').replace(',', '')
leftjoins += 'LEFT JOIN {varname}_{domainkey} as {varname} ON {varname1}.t={varname}.t ' \
'AND {varname1}.geomkey={varname}.geomkey AND {varname}.geomkey={geomkey}' \
.format(varname1=variables[0], varname=variables[-1], domainkey=domainkey, geomkey=geomkey)
wheres = '%s ' % str(['{varname}.geomkey={geomkey} AND '
.format(varname=var, geomkey=geomkey) for var in variables[:-1]])\
.replace("'", '').replace('[', '').replace(',$', '').replace(']', '').replace(',', '')
wheres += '{varname}.geomkey={geomkey}'.format(varname=variables[-1], geomkey=geomkey)
# Convert u,v pairs into speed "ws" and "wd" direction fields
# Negate the wind direction to get the meteorogical wind direction
selects = re.sub(r'u([0-9]+)m\.value as u[0-9]+m , v([0-9]+)m\.value as v[0-9]+m ([,]*) ',
'speed(u\g<1>m.value, v\g<2>m.value) as ws\g<1>m, calc_dir_deg(-u\g<1>m.value, -v\g<2>m.value)::int as wd\g<1>m\g<3> ',
selects)
# sql = """CREATE TEMP VIEW csv_out_node_A AS
# SELECT u50m.t as t, u50m.value as u50m , v50m.value as v50m , t2m.value as t2m
# FROM u50m_1 as u50m
# LEFT JOIN v50m_1 as v50m ON u50m.t=v50m.t
# LEFT JOIN t2m_1 as t2m ON u50m.t=t2m.t
# WHERE u50m.geomkey=1 AND v50m.geomkey=1 AND t2m.geomkey=1
# ORDER BY t"""
# Final query to create the view
sql = """CREATE TEMP VIEW csv_out_node_{} AS
SELECT {}
FROM {}
{}
WHERE {} AND {}.t>=timestamp'{}-01-01 00:00:00+00'
ORDER BY t""".format(labels[it.multi_index], selects, froms, leftjoins, wheres, variables[0], startyear)
windb2conn.curs.execute(sql)
# Make the filename
filename='MERRA2_Node_{node}_{long:.{prec}f}_{lat:.{prec}f}_{startyear}_thru_{tmax}.csv'\
.format(node=labels[it.multi_index], long=long_grid[it.multi_index], lat=lat_grid[it.multi_index],
prec=3, startyear=startyear, tmax=tmax)
# Write out the CSV file
sql = 'COPY (SELECT * FROM csv_out_node_{}) TO STDOUT WITH CSV HEADER'.format(labels[it.multi_index])
with open(filename, 'w') as file:
print('Writing out Node {}: {}'.format(labels[it.multi_index], filename))
windb2conn.curs.copy_expert(sql, file)
it.iternext()
def _convert_long_to_index(long_surround, lat_surround):
if isinstance(long_surround, list):
long_surround = np.array(long_surround)
lat_surround = np.array(lat_surround)
if np.all((long_surround*1000)%(_merra2_long_res*1000)) or np.all((lat_surround*100)%(_merra2_lat_res*100)):
raise ValueError('Illegal long or at value')
return (long_surround + 179.375)/_merra2_long_res, (lat_surround + 90)/_merra2_lat_res
|
sailorsenergy/windb2
|
windb2/model/merra2/util.py
|
Python
|
gpl-3.0
| 12,108
|
[
"NetCDF"
] |
db5b8b943f122a76f8ffa682f0c862a95aba196b0479a61604742b12a3ea8fea
|
# coding: utf-8
"""
===================================
Enhanced chroma and chroma variants
===================================
This notebook demonstrates a variety of techniques for enhancing chroma features and
also, introduces chroma variants implemented in librosa.
"""
###############################################################################################
#
# Enhanced chroma
# ^^^^^^^^^^^^^^^
# Beyond the default parameter settings of librosa's chroma functions, we apply the following
# enhancements:
#
# 1. Harmonic-percussive-residual source separation to eliminate transients.
# 2. Nearest-neighbor smoothing to eliminate passing tones and sparse noise. This is inspired by the
# recurrence-based smoothing technique of
# `Cho and Bello, 2011 <http://ismir2011.ismir.net/papers/OS8-4.pdf>`_.
# 3. Local median filtering to suppress remaining discontinuities.
# Code source: Brian McFee
# License: ISC
# sphinx_gallery_thumbnail_number = 5
import numpy as np
import scipy
import matplotlib.pyplot as plt
import librosa
import librosa.display
#######################################################################
# We'll use a track that has harmonic, melodic, and percussive elements
# Karissa Hobbs - Let's Go Fishin'
y, sr = librosa.load(librosa.ex('fishin'))
#######################################
# First, let's plot the original chroma
chroma_orig = librosa.feature.chroma_cqt(y=y, sr=sr)
# For display purposes, let's zoom in on a 15-second chunk from the middle of the song
idx = tuple([slice(None), slice(*list(librosa.time_to_frames([45, 60])))])
# And for comparison, we'll show the CQT matrix as well.
C = np.abs(librosa.cqt(y=y, sr=sr, bins_per_octave=12*3, n_bins=7*12*3))
fig, ax = plt.subplots(nrows=2, sharex=True)
img1 = librosa.display.specshow(librosa.amplitude_to_db(C, ref=np.max)[idx],
y_axis='cqt_note', x_axis='time', bins_per_octave=12*3,
ax=ax[0])
fig.colorbar(img1, ax=[ax[0]], format="%+2.f dB")
ax[0].label_outer()
img2 = librosa.display.specshow(chroma_orig[idx], y_axis='chroma', x_axis='time', ax=ax[1])
fig.colorbar(img2, ax=[ax[1]])
ax[1].set(ylabel='Default chroma')
########################################################
# We can do better by isolating the harmonic component of the audio signal.
# We'll use a large margin for separating harmonics from percussives:
y_harm = librosa.effects.harmonic(y=y, margin=8)
chroma_harm = librosa.feature.chroma_cqt(y=y_harm, sr=sr)
fig, ax = plt.subplots(nrows=2, sharex=True, sharey=True)
librosa.display.specshow(chroma_orig[idx], y_axis='chroma', x_axis='time', ax=ax[0])
ax[0].set(ylabel='Default chroma')
ax[0].label_outer()
librosa.display.specshow(chroma_harm[idx], y_axis='chroma', x_axis='time', ax=ax[1])
ax[1].set(ylabel='Harmonic')
###########################################
# There's still some noise in there though.
# We can clean it up using non-local filtering.
# This effectively removes any sparse additive noise from the features.
chroma_filter = np.minimum(chroma_harm,
librosa.decompose.nn_filter(chroma_harm,
aggregate=np.median,
metric='cosine'))
fig, ax = plt.subplots(nrows=2, sharex=True, sharey=True)
librosa.display.specshow(chroma_harm[idx], y_axis='chroma', x_axis='time', ax=ax[0])
ax[0].set(ylabel='Harmonic')
ax[0].label_outer()
librosa.display.specshow(chroma_filter[idx], y_axis='chroma', x_axis='time', ax=ax[1])
ax[1].set(ylabel='Non-local')
###########################################################
# Local discontinuities and transients can be suppressed by
# using a horizontal median filter.
chroma_smooth = scipy.ndimage.median_filter(chroma_filter, size=(1, 9))
fig, ax = plt.subplots(nrows=2, sharex=True, sharey=True)
librosa.display.specshow(chroma_filter[idx], y_axis='chroma', x_axis='time', ax=ax[0])
ax[0].set(ylabel='Non-local')
ax[0].label_outer()
librosa.display.specshow(chroma_smooth[idx], y_axis='chroma', x_axis='time', ax=ax[1])
ax[1].set(ylabel='Median-filtered')
#########################################################
# A final comparison between the CQT, original chromagram
# and the result of our filtering.
fig, ax = plt.subplots(nrows=3, sharex=True)
librosa.display.specshow(librosa.amplitude_to_db(C, ref=np.max)[idx],
y_axis='cqt_note', x_axis='time',
bins_per_octave=12*3, ax=ax[0])
ax[0].set(ylabel='CQT')
ax[0].label_outer()
librosa.display.specshow(chroma_orig[idx], y_axis='chroma', x_axis='time', ax=ax[1])
ax[1].set(ylabel='Default chroma')
ax[1].label_outer()
librosa.display.specshow(chroma_smooth[idx], y_axis='chroma', x_axis='time', ax=ax[2])
ax[2].set(ylabel='Processed')
#################################################################################################
# Chroma variants
# ^^^^^^^^^^^^^^^
# There are three chroma variants implemented in librosa: `chroma_stft`, `chroma_cqt`, and `chroma_cens`.
# `chroma_stft` and `chroma_cqt` are two alternative ways of plotting chroma.
# `chroma_stft` performs short-time fourier transform of an audio input and maps each STFT bin to chroma, while `chroma_cqt` uses constant-Q transform and maps each cq-bin to chroma.
#
# A comparison between the STFT and the CQT methods for chromagram.
chromagram_stft = librosa.feature.chroma_stft(y=y, sr=sr)
chromagram_cqt = librosa.feature.chroma_cqt(y=y, sr=sr)
fig, ax = plt.subplots(nrows=2, sharex=True, sharey=True)
librosa.display.specshow(chromagram_stft[idx], y_axis='chroma', x_axis='time', ax=ax[0])
ax[0].set(ylabel='STFT')
ax[0].label_outer()
librosa.display.specshow(chromagram_cqt[idx], y_axis='chroma', x_axis='time', ax=ax[1])
ax[1].set(ylabel='CQT')
###################################################################################################
# CENS features (`chroma_cens`) are variants of chroma features introduced in
# `Müller and Ewart, 2011 <http://ismir2011.ismir.net/papers/PS2-8.pdf>`_, in which
# additional post processing steps are performed on the constant-Q chromagram to obtain features
# that are invariant to dynamics and timbre.
#
# Thus, the CENS features are useful for applications, such as audio matching and retrieval.
#
# Following steps are additional processing done on the chromagram, and are implemented in `chroma_cens`:
# 1. L1-Normalization across each chroma vector
# 2. Quantization of the amplitudes based on "log-like" amplitude thresholds
# 3. Smoothing with sliding window (optional parameter)
# 4. Downsampling (not implemented)
#
# A comparison between the original constant-Q chromagram and the CENS features.
chromagram_cens = librosa.feature.chroma_cens(y=y, sr=sr)
fig, ax = plt.subplots(nrows=2, sharex=True, sharey=True)
librosa.display.specshow(chromagram_cqt[idx], y_axis='chroma', x_axis='time', ax=ax[0])
ax[0].set(ylabel='Orig')
librosa.display.specshow(chromagram_cens[idx], y_axis='chroma', x_axis='time', ax=ax[1])
ax[1].set(ylabel='CENS')
|
librosa/librosa
|
docs/examples/plot_chroma.py
|
Python
|
isc
| 7,136
|
[
"Brian"
] |
e791217d16a60c4c3edcea95dc43d136f2ab135d3e551214dcc58425328621f3
|
from __future__ import print_function, division
import matplotlib
matplotlib.use('agg')
from nutils import *
import scipy
import numpy as np
import matplotlib.pyplot as plt
from scipy import sparse
from scipy.io import mmwrite
from mskrylov import poly_driver, nested_driver
from mekrylov import me_driver
import time
from math import pi
import pyamg
import scipy.sparse.linalg as spla
@log.title
def makeplots( domain, geom, Lx, Lz, value, name, title, ndigits=0, index=None, clim=None, lineOn=False, imgtype=None,):
points, colors = domain.elem_eval( [ geom, value ], ischeme='bezier3', separate=True )
with plot.PyPlot( name, ndigits=ndigits, figsize=(10,10), index=index, imgtype=imgtype ) as plt:
plt.mesh( points, colors, triangulate='bezier', edgecolors='none' )
if imgtype is not 'eps':
plt.title(title, fontsize=20)
plt.xlabel('x [m]', fontsize=20)
plt.ylabel('z [m]', fontsize=20)
plt.xticks( [0, 0.5*Lx, Lx], ['0', str(int(0.5*Lx)), str(int(Lx))], fontsize=20 )
plt.yticks( [-0, -0.5*Lz, -Lz], ['0', str(int(0.5*Lz)), str(int(Lz))], fontsize=20 )
if clim is not None:
plt.clim(*clim)
if imgtype is not 'eps':
plt.colorbar()
def makevtk(domain, geom, rho, lam, mu, cp, cs, sol, freq, vec_basis, name):
Nom = len(freq)
vtk_geom, vtk_rho, vtk_lam, vtk_mu, vtk_cp, vtk_cs = domain.simplex.elem_eval( [ geom, rho, lam, mu, cp, cs ], ischeme='vtk', separate=True )
with plot.VTKFile( name ) as vtk:
vtk.unstructuredgrid( vtk_geom )
vtk.pointdataarray( 'rho', vtk_rho )
vtk.pointdataarray( 'lambda', vtk_lam )
vtk.pointdataarray( 'mu', vtk_mu )
vtk.pointdataarray( 'cp', vtk_cp )
vtk.pointdataarray( 'cs', vtk_cs )
for k in range(0,Nom):
disp = vec_basis.dot( sol[k,:] ).real
vtk_disp = domain.simplex.elem_eval( disp, ischeme='vtk', separate=True )
vtk.pointdataarray( 'disp_f'+str(freq[k]), vtk_disp )
def makespyplot( matrix, name, imgtype=None ):
if not scipy.sparse.isspmatrix( matrix ):
matrix = matrix.toscipy()
with plot.PyPlot( name, ndigits=0, imgtype=imgtype ) as plt:
plt.spy( matrix, markersize=0.8, color='black')
plt.title( name+', nnz = '+str(matrix.nnz) )
def point_eval(func, domain, geom, point):
domain = domain[tuple(slice(0, p) if p > 0 else slice(None) for p in point)]
for p in point:
domain = domain.boundary['right' if p > 0 else 'left']
return numpy.asarray(domain.integrate( func, geometry=geom, ischeme='gauss2' ).toscipy().todense())
def elast_mat(rho, cp, cs, lam, mu, ndims, nx, ny, nz, vec_basis, domain, geom, block):
# define PDE
stress = lambda u: lam*u.div(geom)[:,_,_]*function.eye(ndims) + 2.0*mu*u.symgrad(geom)
elasticity = function.outer( stress(vec_basis), vec_basis.grad(geom) ).sum([2,3])
w_mass = lambda u: rho*u
mass = function.outer( w_mass(vec_basis), vec_basis ).sum(-1)
# define BC
n = geom.normal()
t = np.eye(ndims)
t = t-(t*n[_,:]).sum(1)
B_bc = cp*n[:,_]*n[_,:]+cs*(t[:,:,_]*t[:,_,:]).sum(0)
bc_fun = lambda u: rho*(B_bc*u[:,_,:]).sum(-1)
sommerfeld = function.outer( bc_fun(vec_basis), vec_basis ).sum(-1)
if ndims == 2:
sommerfeld_boundary = 'left,right,bottom'
source_position = nx//2, nz
else:
sommerfeld_boundary = 'left,right,bottom,front,back'
source_position = nx//2, ny//2, nz
# Build matrices
K, M = domain.integrate( [elasticity, mass], geometry=geom, ischeme='gauss2' )
C = domain.boundary[sommerfeld_boundary].integrate( sommerfeld, geometry=geom, ischeme='gauss2' )
# Build RHS
if not block:
C = 0*C
source_position = int(np.round(6.0/10.0*nx)), int(np.round(4.0/10.0*nz))
rhs = point_eval(vec_basis, domain, geom, source_position)[:,-1] #+ point_eval(vec_basis, domain, geom, source_position)[:,0]
else:
rhs = point_eval(vec_basis, domain, geom, source_position)[:,-1]
return K, C, M, rhs
def test_orig_problem(K, C, M, b, om, x):
Nom = len(om)
normb = np.linalg.norm(b)
relerr = np.zeros((Nom,))
for j in range(Nom):
xs = x[:,j]
r = b - (K*xs + 1j*om[j]*(C*xs) - om[j]**2*(M*xs))
relerr[j] = np.linalg.norm(r)/normb
print('Relative residual of original problem:' +str(relerr))
def main( ndims=2, # problem dimension (2,3)
dx=100.0, # grid size in x-direction
dy=100.0, # grid size in y-direction
dz=100.0, # grid size in z-direction
freq=[1.0,9.0], # frequencies in Hz
Nom=5, # number of equally-spaced freq's
degree=1, # degree of FEM splines
damping=0.7, # viscous damping param
maxit=300, # is also maxit_o in nested algo
maxit_i=10, # maxit of inner method
tol=1e-8, # is also tol_o in nested algo
tol_i=1e-1, # tol of inner method
dg_pp=0, # degree of poly preconditioner
tau_re=0.7, # real(seed), if tau.real<0: take 'optimal' tau
tau_im=-0.3, # imag(seed)
iLU=False, # exact or inexact shif-and-invert
rot =True, # spactral rotation for MatrEqn
fill_factor=1.0, # fill-in in iLU decomposition
block=True, # C=0 if False
plots=False, # plots on/off
plot_resnrm=False, # display residual norm live
solver_flag=-1): # -1(python's built-in), 0(MatrEqn), 1(poly_pre), 2(nested)
# domain size
Lx = 500.0
Ly = 500.0
Lz = 500.0
tau = tau_re+1j*tau_im
# problem parameters
freq = np.linspace(freq[0],freq[-1],Nom)
om = 2.0*np.pi*freq*(1.0-1j*damping)
Nom = len(om)
# define physical params
rho0 = 1800.0
cp0 = 2000.0
cs0 = 800.0
rho1 = 2100.0
cp1 = 3000.0
cs1 = 1600.0
# define Cartesian grid
nx = int(np.round(Lx/dx))+1
nz = int(np.round(Lz/dz))+1
verts_x = np.linspace( 0, Lx, nx )
verts_z = np.linspace( -Lz, 0, nz )
if ndims == 2:
ny = 1
dy = 0.
verts = [verts_x, verts_z]
elif ndims == 3:
ny = int(np.round(Ly/dy))+1
verts_y = np.linspace( 0, Ly, ny )
verts = [verts_x, verts_y, verts_z]
domain, geom = mesh.rectilinear(verts)
vec_basis = domain.splinefunc( degree=degree ).vector( ndims )
# define block-in-block problem
rho = function.select( [function.greater(0.35*Lx,geom[0]), function.greater(-0.85*Lx,-geom[0]),
function.greater(0.35*Lz,-geom[-1]), function.greater(-0.85*Lz,geom[-1])],
[rho0, rho0, rho0, rho0], rho1)
cp = function.select( [function.greater(0.35*Lx,geom[0]), function.greater(-0.85*Lx,-geom[0]),
function.greater(0.35*Lz,-geom[-1]), function.greater(-0.85*Lz,geom[-1])],
[cp0, cp0, cp0, cp0], cp1)
cs = function.select( [function.greater(0.35*Lx,geom[0]), function.greater(-0.85*Lx,-geom[0]),
function.greater(0.35*Lz,-geom[-1]), function.greater(-0.85*Lz,geom[-1])],
[cs0, cs0, cs0, cs0], cs1)
mu = cs**2 * rho
lam = rho * (cp**2 - 2.0*cs**2)
# problem summary
print( '---- SQUARES PROBLEM ----' )
ppw = 20.0
print( 'problem size : ' + str(nx-1+degree)+' x '+str(ny-1+degree)+' x '+str(nz-1+degree) )
print( '# dofs : ' + str(len(vec_basis)) )
print( 'max. frequency : ' + str( min(cs0,cs1,cp0,cp1)/(ppw*max(dx,dy,dz)) ) )
print( '---------------------------------\n' )
# Create discretization matrices using nutils
K, C, M, rhs = elast_mat(rho, cp, cs, lam, mu, ndims, nx, ny, nz, vec_basis, domain, geom, block)
t0 = time.time()
if solver_flag==0:
print('Use megmres')
sol, it = me_driver(K.toscipy().tocsc(), C.toscipy().tocsc(), M.toscipy().tocsc(), rhs, freq, tau, damping, tol, maxit, iLU, rot, fill_factor, plot_resnrm)
elif solver_flag==1:
print('Use poly_msgmres')
sol, it = poly_driver(K.toscipy().tocsc(), C.toscipy().tocsc(), M.toscipy().tocsc(), rhs, freq, tau, damping, tol, maxit, dg_pp, iLU, fill_factor, plot_resnrm)
elif solver_flag==2:
maxit_o = maxit
tol_o = tol
print('Use fom-fgmres')
sol, it = nested_driver(K.toscipy().tocsc(), C.toscipy().tocsc(), M.toscipy().tocsc(), rhs, freq, tau, damping,
tol_i, tol_o, maxit_i, maxit_o, iLU, fill_factor, plot_resnrm)
else:
print('Use pythons built-in solver...')
sol = np.zeros((Nom, len(vec_basis)), dtype=complex)
it = -1
for k in range(0,Nom):
matrix = K + 1j*om[k]*C - om[k]**2*M
A = matrix.toscipy().tocsc()
if ndims==2:
t0_lu = time.time()
lu = spla.splu(A)
print('LU decomposition:'+str(time.time()-t0_lu))
t0_solve = time.time()
sol[k,:] = lu.solve(rhs)
print('solve:'+str(time.time()-t0_solve))
else:
print('Use ILU+GMRES')
class gmres_counter(object):
def __init__(self, disp=True):
self._disp = disp
self.resvec=[]
self.niter = 0
def __call__(self, rk=None):
self.niter += 1
self.resvec.append(rk)
if self._disp:
print('iter %3i\trk = %s' % (self.niter, str(rk)))
t0_lu = time.time()
invA = spla.spilu( A, fill_factor=1.0)
invA_x = lambda x: invA.solve(x)
ilu = spla.LinearOperator(A.shape, invA_x)
print('ilu setup:'+str(time.time()-t0_lu))
t0_solve = time.time()
counter = gmres_counter(disp=True)
sol[k,:], info = spla.gmres(A, rhs, tol=1e-16, restart=200, maxiter=600, M=ilu, callback=counter)
it = info
print('GMRES time:'+str(time.time()-t0_solve))
print('GMRES info:'+str(counter.niter)+' -- '+str(counter.resvec[-1]))
te = time.time()
print('No iterations: '+str(it)+' CPU time: '+str(te-t0))
test_orig_problem(K.toscipy(), C.toscipy(), M.toscipy(), rhs, om, sol.T)
if plots:
if(ndims ==2):
makeplots( domain, geom, Lx, Lz, rho, 'rho', 'Density distribution' )
#makeplots( domain, geom, Lx, Lz, rho, 'rho', 'Density distribution', imgtype='eps' )
#makeplots( domain, geom, Lx, Lz, cp, 'cp', 'c_p [m/s]' )
#makeplots( domain, geom, Lx, Lz, cs, 'cs', 'c_s [m/s]' )
for k in range(0,Nom):
disp = vec_basis.dot( sol[k,:] )
disp_x = disp[0].real
disp_z = disp[-1].real
makeplots( domain, geom, Lx, Lz, disp_x, 'disp_x'+str(k), 'u_x at {} Hz'.format(freq[k]), lineOn=False )
makeplots( domain, geom, Lx, Lz, disp_z, 'disp_z'+str(k), 'u_z at {} Hz'.format(freq[k]), lineOn=False )
#makeplots( domain, geom, Lx, Lz, disp_x, 'disp_x'+str(k), 'u_x at {} Hz'.format(freq[k]), lineOn=False, imgtype='eps' )
#makeplots( domain, geom, Lx, Lz, disp_z, 'disp_z'+str(k), 'u_z at {} Hz'.format(freq[k]), lineOn=False, imgtype='eps' )
makevtk(domain, geom, rho, lam, mu, cp, cs, sol, freq, vec_basis, 'wedge2d')
elif(ndims==3):
makevtk(domain, geom, rho, lam, mu, cp, cs, sol, freq, vec_basis, 'wedge3d')
util.run( main )
|
ManuelMBaumann/freqdom_compare
|
python_code/elast_squares.py
|
Python
|
mit
| 11,705
|
[
"VTK"
] |
60d795799f93dd81cfbffb8cf2eba711e32d43a297a29805aa98c03a37c7473f
|
"""
Copyright (c) 2015 Red Hat, Inc
All rights reserved.
This software may be modified and distributed under the terms
of the BSD license. See the LICENSE file for details.
"""
from __future__ import print_function, absolute_import, unicode_literals
import json
import os
import logging
from requests.utils import guess_json_utf
logger = logging.getLogger(__name__)
class IterLinesSaver(object):
"""
Wrap HttpStream.iter_lines() and save responses.
"""
def __init__(self, path, fn):
self.path = path
self.fn = fn
self.line = 0
def iter_lines(self):
encoding = None
for line in self.fn():
path = "{f}-{n:0>3}.json".format(f=self.path, n=self.line)
logger.debug("capturing to %s", path)
if not encoding:
encoding = guess_json_utf(line)
with open(path, "w") as outf:
try:
json.dump(json.loads(line.decode(encoding)), outf,
sort_keys=True, indent=4)
except ValueError:
outf.write(line)
self.line += 1
yield line
class ResponseSaver(object):
"""
Wrap HttpSession.request() and save responses.
"""
def __init__(self, capture_dir, openshift_api_uri, k8s_api_uri, fn):
self.capture_dir = capture_dir
self.openshift_api_uri = openshift_api_uri
self.k8s_api_uri = k8s_api_uri
self.fn = fn
self.visited = {}
def request(self, url, method, *args, **kwargs):
filename = url
if filename.startswith(self.openshift_api_uri):
filename = filename[len(self.openshift_api_uri):]
if filename.startswith(self.k8s_api_uri):
filename = filename[len(self.k8s_api_uri):]
filename = filename.replace('/', '_')
path = os.path.join(self.capture_dir,
"{method}-{url}".format(method=method,
url=filename))
visit = self.visited.get(path, 0)
self.visited[path] = visit + 1
path += "-{0:0>3}".format(visit)
if kwargs.get('stream', False):
stream = self.fn(url, method, *args, **kwargs)
stream.iter_lines = IterLinesSaver(path,
stream.iter_lines).iter_lines
return stream
else:
response = self.fn(url, method, *args, **kwargs)
logger.debug("capturing to %s.json", path)
encoding = guess_json_utf(response.content)
with open(path + ".json", "w") as outf:
try:
json.dump(json.loads(response.content.decode(encoding)),
outf, sort_keys=True, indent=4)
except ValueError:
outf.write(response.content)
return response
def setup_json_capture(osbs, os_conf, capture_dir):
"""
Only used for setting up the testing framework.
"""
try:
os.mkdir(capture_dir)
except OSError:
pass
finally:
osbs.os._con.request = ResponseSaver(capture_dir,
os_conf.get_openshift_api_uri(),
os_conf.get_k8s_api_uri(),
osbs.os._con.request).request
|
bfontecc007/osbs-client
|
osbs/cli/capture.py
|
Python
|
bsd-3-clause
| 3,426
|
[
"VisIt"
] |
4f323443e919183206cd904e667bd38c639e5bf96fda3056413fa7135c51a615
|
def alignImage(static, moving, level_iters):
"""The Symmetric Normalization algorithm uses a multi-resolution approach by building a Gaussian Pyramid.
The static image is used a reference; a transformation that transforms the moving image into the
static image is found and applied to the static image. Returns overlay images, error image, and transformation matrix.
Uses CrossCorrelation metric which makes sense for aligning AFM topography.
INPUT: static and moving are both 2d arrays of the same dimension.
Level_iters is a list (typically 4 entries) defines
the number of iterations a user wants at each level of the pyramid, with the 0th entry referring to the
finest resolution.
OUTPUT: overlayimages: an image with the static, moving, and overlay images
errorimage: the difference between the warped_moving image and the static reference. THe flatter
the better.
transformmatrix: the matrix corresponding to the forward transformation of a matrix."""
from dipy.align.imwarp import SymmetricDiffeomorphicRegistration
from dipy.align.metrics import SSDMetric, CCMetric, EMMetric
from dipy.viz import regtools
dim = static.ndim
metric = CCMetric(dim)
sdr = SymmetricDiffeomorphicRegistration(metric, level_iters, inv_iter = 50)
mapping = sdr.optimize(static, moving)
warped_moving = mapping.transform(moving, 'linear')
overlayimages = regtools.overlay_images(static, warped_moving, 'Static','Overlay','Warped moving',
'direct_warp_result.png')
errorimage = plt.matshow(warped_moving-static, cmap='viridis')
transformmatrix=mapping.forward
return overlayimages, errorimage, transformmatrix
|
wesleybeckner/afm-miner
|
afmMiner/alignimages.py
|
Python
|
mit
| 1,747
|
[
"Gaussian"
] |
4537509e10cb79030b3bcad71e49d67f761d912fd69f769bccd0dbf82921fc8c
|
import platform, os, re
import subprocess
from mooseutils import colorText
from collections import namedtuple
import json
from tempfile import TemporaryFile
TERM_COLS = 110
LIBMESH_OPTIONS = {
'mesh_mode' : { 're_option' : r'#define\s+LIBMESH_ENABLE_PARMESH\s+(\d+)',
'default' : 'REPLICATED',
'options' :
{
'DISTRIBUTED' : '1',
'REPLICATED' : '0'
}
},
'unique_ids' : { 're_option' : r'#define\s+LIBMESH_ENABLE_UNIQUE_ID\s+(\d+)',
'default' : 'FALSE',
'options' :
{
'TRUE' : '1',
'FALSE' : '0'
}
},
'dtk' : { 're_option' : r'#define\s+LIBMESH_TRILINOS_HAVE_DTK\s+(\d+)',
'default' : 'FALSE',
'options' :
{
'TRUE' : '1',
'FALSE' : '0'
}
},
'boost' : { 're_option' : r'#define\s+LIBMESH_HAVE_EXTERNAL_BOOST\s+(\d+)',
'default' : 'FALSE',
'options' :
{
'TRUE' : '1',
'FALSE' : '0'
}
},
'vtk' : { 're_option' : r'#define\s+LIBMESH_HAVE_VTK\s+(\d+)',
'default' : 'FALSE',
'options' :
{
'TRUE' : '1',
'FALSE' : '0'
}
},
'tecplot' : { 're_option' : r'#define\s+LIBMESH_HAVE_TECPLOT_API\s+(\d+)',
'default' : 'FALSE',
'options' :
{
'TRUE' : '1',
'FALSE' : '0'
}
},
'petsc_major' : { 're_option' : r'#define\s+LIBMESH_DETECTED_PETSC_VERSION_MAJOR\s+(\d+)',
'default' : '1'
},
'petsc_minor' : { 're_option' : r'#define\s+LIBMESH_DETECTED_PETSC_VERSION_MINOR\s+(\d+)',
'default' : '1'
},
'petsc_version_release' : { 're_option' : r'#define\s+LIBMESH_DETECTED_PETSC_VERSION_RELEASE\s+(\d+)',
'default' : 'TRUE',
'options' : {'TRUE' : '1', 'FALSE' : '0'}
},
'slepc_major' : { 're_option' : r'#define\s+LIBMESH_DETECTED_SLEPC_VERSION_MAJOR\s+(\d+)',
'default' : '1'
},
'slepc_minor' : { 're_option' : r'#define\s+LIBMESH_DETECTED_SLEPC_VERSION_MINOR\s+(\d+)',
'default' : '1'
},
'slepc_subminor' : { 're_option' : r'#define\s+LIBMESH_DETECTED_SLEPC_VERSION_SUBMINOR\s+(\d+)',
'default' : '1'
},
'dof_id_bytes' : { 're_option' : r'#define\s+LIBMESH_DOF_ID_BYTES\s+(\d+)',
'default' : '4'
},
'petsc_debug' : { 're_option' : r'#define\s+LIBMESH_PETSC_USE_DEBUG\s+(\d+)',
'default' : 'FALSE',
'options' : {'TRUE' : '1', 'FALSE' : '0'}
},
'curl' : { 're_option' : r'#define\s+LIBMESH_HAVE_CURL\s+(\d+)',
'default' : 'FALSE',
'options' : {'TRUE' : '1', 'FALSE' : '0'}
},
'tbb' : { 're_option' : r'#define\s+LIBMESH_HAVE_TBB_API\s+(\d+)',
'default' : 'FALSE',
'options' : {'TRUE' : '1', 'FALSE' : '0'}
},
'superlu' : { 're_option' : r'#define\s+LIBMESH_PETSC_HAVE_SUPERLU_DIST\s+(\d+)',
'default' : 'FALSE',
'options' : {'TRUE' : '1', 'FALSE' : '0'}
},
'slepc' : { 're_option' : r'#define\s+LIBMESH_HAVE_SLEPC\s+(\d+)',
'default' : 'FALSE',
'options' : {'TRUE' : '1', 'FALSE' : '0'}
},
'cxx11' : { 're_option' : r'#define\s+LIBMESH_HAVE_CXX11\s+(\d+)',
'default' : 'FALSE',
'options' : {'TRUE' : '1', 'FALSE' : '0'}
},
'unique_id' : { 're_option' : r'#define\s+LIBMESH_ENABLE_UNIQUE_ID\s+(\d+)',
'default' : 'FALSE',
'options' : {'TRUE' : '1', 'FALSE' : '0'}
},
}
## Run a command and return the output, or ERROR: + output if retcode != 0
def runCommand(cmd, cwd=None):
# On Windows it is not allowed to close fds while redirecting output
should_close = platform.system() != "Windows"
p = subprocess.Popen([cmd], cwd=cwd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, close_fds=should_close, shell=True)
output = p.communicate()[0]
if (p.returncode != 0):
output = 'ERROR: ' + output
return output
def launchTesterCommand(tester, command):
"""
Method to enter the tester directory, execute a command, and return the process
and temporary output file objects.
"""
try:
f = TemporaryFile()
# On Windows, there is an issue with path translation when the command is passed in
# as a list.
if platform.system() == "Windows":
process = subprocess.Popen(command,stdout=f,stderr=f,close_fds=False, shell=True, creationflags=subprocess.CREATE_NEW_PROCESS_GROUP, cwd=tester.getTestDir())
else:
process = subprocess.Popen(command,stdout=f,stderr=f,close_fds=False, shell=True, preexec_fn=os.setsid, cwd=tester.getTestDir())
except:
print("Error in launching a new task", command)
raise
return (process, f)
## print an optionally colorified test result
#
# The test will not be colored if
# 1) options.colored is False,
# 2) the environment variable BITTEN_NOCOLOR is true, or
# 3) the color parameter is False.
def formatResult(tester_data, result, options, color=True):
tester = tester_data.getTester()
timing = tester_data.getTiming()
f_result = ''
caveats = ''
first_directory = tester.specs['first_directory']
test_name = tester.getTestName()
status = tester.getStatus()
cnt = (TERM_COLS-2) - len(test_name + result)
color_opts = {'code' : options.code, 'colored' : options.colored}
if color:
if options.color_first_directory:
test_name = colorText(first_directory, 'CYAN', **color_opts) + test_name.replace(first_directory, '', 1) # Strip out first occurence only
# Color the Caveats CYAN
m = re.search(r'(\[.*?\])', result)
if m:
caveats = m.group(1)
f_result = colorText(caveats, 'CYAN', **color_opts)
# Color test results based on status.
# Keep any caveats that may have been colored
if status:
f_result += colorText(result.replace(caveats, ''), tester.getColor(), **color_opts)
f_result = test_name + '.'*cnt + ' ' + f_result
else:
f_result = test_name + '.'*cnt + ' ' + result
# Tack on the timing if it exists
if options.timing:
f_result += ' [' + '%0.3f' % float(timing) + 's]'
if options.debug_harness:
f_result += ' Start: ' + '%0.3f' % tester_data.getStartTime() + ' End: ' + '%0.3f' % tester_data.getEndTime()
return f_result
## Color the error messages if the options permit, also do not color in bitten scripts because
# it messes up the trac output.
# supports weirded html for more advanced coloring schemes. \verbatim<r>,<g>,<y>,<b>\endverbatim All colors are bolded.
def getPlatforms():
# We'll use uname to figure this out. platform.uname() is available on all platforms
# while os.uname() is not (See bugs.python.org/issue8080).
# Supported platforms are LINUX, DARWIN, ML, MAVERICKS, YOSEMITE, or ALL
platforms = set(['ALL'])
raw_uname = platform.uname()
if raw_uname[0].upper() == 'DARWIN':
platforms.add('DARWIN')
if re.match("12\.", raw_uname[2]):
platforms.add('ML')
if re.match("13\.", raw_uname[2]):
platforms.add("MAVERICKS")
if re.match("14\.", raw_uname[2]):
platforms.add("YOSEMITE")
else:
platforms.add(raw_uname[0].upper())
return platforms
def runExecutable(libmesh_dir, location, bin, args):
# Installed location of libmesh executable
libmesh_installed = libmesh_dir + '/' + location + '/' + bin
# Uninstalled location of libmesh executable
libmesh_uninstalled = libmesh_dir + '/' + bin
# Uninstalled location of libmesh executable
libmesh_uninstalled2 = libmesh_dir + '/contrib/bin/' + bin
# The eventual variable we will use to refer to libmesh's executable
libmesh_exe = ''
if os.path.exists(libmesh_installed):
libmesh_exe = libmesh_installed
elif os.path.exists(libmesh_uninstalled):
libmesh_exe = libmesh_uninstalled
elif os.path.exists(libmesh_uninstalled2):
libmesh_exe = libmesh_uninstalled2
else:
print("Error! Could not find '" + bin + "' in any of the usual libmesh's locations!")
exit(1)
return runCommand(libmesh_exe + " " + args).rstrip()
def getCompilers(libmesh_dir):
# Supported compilers are GCC, INTEL or ALL
compilers = set(['ALL'])
mpicxx_cmd = runExecutable(libmesh_dir, "bin", "libmesh-config", "--cxx")
# Account for usage of distcc or ccache
if "distcc" in mpicxx_cmd or "ccache" in mpicxx_cmd:
mpicxx_cmd = mpicxx_cmd.split()[-1]
# If mpi ic on the command, run -show to get the compiler
if "mpi" in mpicxx_cmd:
raw_compiler = runCommand(mpicxx_cmd + " -show")
else:
raw_compiler = mpicxx_cmd
if re.match('icpc', raw_compiler) != None:
compilers.add("INTEL")
elif re.match('[cg]\+\+', raw_compiler) != None:
compilers.add("GCC")
elif re.match('clang\+\+', raw_compiler) != None:
compilers.add("CLANG")
return compilers
def getPetscVersion(libmesh_dir):
major_version = getLibMeshConfigOption(libmesh_dir, 'petsc_major')
minor_version = getLibMeshConfigOption(libmesh_dir, 'petsc_minor')
if len(major_version) != 1 or len(minor_version) != 1:
print "Error determining PETSC version"
exit(1)
return major_version.pop() + '.' + minor_version.pop()
def getSlepcVersion(libmesh_dir):
major_version = getLibMeshConfigOption(libmesh_dir, 'slepc_major')
minor_version = getLibMeshConfigOption(libmesh_dir, 'slepc_minor')
subminor_version = getLibMeshConfigOption(libmesh_dir, 'slepc_subminor')
if len(major_version) != 1 or len(minor_version) != 1 or len(major_version) != 1:
return None
return major_version.pop() + '.' + minor_version.pop() + '.' + subminor_version.pop()
# Break down petsc version logic in a new define
# TODO: find a way to eval() logic instead
def checkPetscVersion(checks, test):
# If any version of petsc works, return true immediately
if 'ALL' in set(test['petsc_version']):
return (True, None, None)
# Iterate through petsc versions in test[PETSC_VERSION] and match it against check[PETSC_VERSION]
for petsc_version in test['petsc_version']:
logic, version = re.search(r'(.*?)(\d\S+)', petsc_version).groups()
# Exact match
if logic == '' or logic == '=':
if version == checks['petsc_version']:
return (True, None, version)
else:
return (False, '!=', version)
# Logical match
if logic == '>' and checks['petsc_version'][0:3] > version[0:3]:
return (True, None, version)
elif logic == '>=' and checks['petsc_version'][0:3] >= version[0:3]:
return (True, None, version)
elif logic == '<' and checks['petsc_version'][0:3] < version[0:3]:
return (True, None, version)
elif logic == '<=' and checks['petsc_version'][0:3] <= version[0:3]:
return (True, None, version)
return (False, logic, version)
# Break down slepc version logic in a new define
def checkSlepcVersion(checks, test):
# User does not require anything
if len(test['slepc_version']) == 0:
return (False, None, None)
# SLEPc is not installed
if checks['slepc_version'] == None:
return (False, None, None)
# If any version of SLEPc works, return true immediately
if 'ALL' in set(test['slepc_version']):
return (True, None, None)
# Iterate through SLEPc versions in test[SLEPC_VERSION] and match it against check[SLEPC_VERSION]
for slepc_version in test['slepc_version']:
logic, version = re.search(r'(.*?)(\d\S+)', slepc_version).groups()
# Exact match
if logic == '' or logic == '=':
if version == checks['slepc_version']:
return (True, None, version)
else:
return (False, '!=', version)
# Logical match
if logic == '>' and checks['slepc_version'][0:5] > version[0:5]:
return (True, None, version)
elif logic == '>=' and checks['slepc_version'][0:5] >= version[0:5]:
return (True, None, version)
elif logic == '<' and checks['slepc_version'][0:5] < version[0:5]:
return (True, None, version)
elif logic == '<=' and checks['slepc_version'][0:5] <= version[0:5]:
return (True, None, version)
return (False, logic, version)
def getIfAsioExists(moose_dir):
option_set = set(['ALL'])
if os.path.exists(moose_dir+"/framework/contrib/asio/include/asio.hpp"):
option_set.add('TRUE')
else:
option_set.add('FALSE')
return option_set
def getLibMeshConfigOption(libmesh_dir, option):
# Some tests work differently with parallel mesh enabled
# We need to detect this condition
option_set = set(['ALL'])
filenames = [
libmesh_dir + '/include/base/libmesh_config.h', # Old location
libmesh_dir + '/include/libmesh/libmesh_config.h' # New location
];
success = 0
for filename in filenames:
if success == 1:
break
try:
f = open(filename)
contents = f.read()
f.close()
info = LIBMESH_OPTIONS[option]
m = re.search(info['re_option'], contents)
if m != None:
if 'options' in info:
for value, option in info['options'].iteritems():
if m.group(1) == option:
option_set.add(value)
else:
option_set.clear()
option_set.add(m.group(1))
else:
option_set.add(info['default'])
success = 1
except IOError:
# print "Warning: I/O Error trying to read", filename, ":", e.strerror, "... Will try other locations."
pass
if success == 0:
print "Error! Could not find libmesh_config.h in any of the usual locations!"
exit(1)
return option_set
def getSharedOption(libmesh_dir):
# Some tests may only run properly with shared libraries on/off
# We need to detect this condition
shared_option = set(['ALL'])
result = runExecutable(libmesh_dir, "contrib/bin", "libtool", "--config | grep build_libtool_libs | cut -d'=' -f2")
if re.search('yes', result) != None:
shared_option.add('DYNAMIC')
elif re.search('no', result) != None:
shared_option.add('STATIC')
else:
# Neither no nor yes? Not possible!
print("Error! Could not determine whether shared libraries were built.")
exit(1)
return shared_option
def getInitializedSubmodules(root_dir):
"""
Gets a list of initialized submodules.
Input:
root_dir[str]: path to execute the git command. This should be the root
directory of the app so that the submodule names are correct
Return:
list[str]: List of iniitalized submodule names or an empty list if there was an error.
"""
output = runCommand("git submodule status", cwd=root_dir)
if output.startswith("ERROR"):
return []
# This ignores submodules that have a '-' at the beginning which means they are not initialized
return re.findall(r'^[ +]\S+ (\S+)', output, flags=re.MULTILINE)
def addObjectsFromBlock(objs, node, block_name):
"""
Utility function that iterates over a dictionary and adds keys
to the executable object name set.
"""
data = node.get(block_name, {})
if data: # could be None so we can't just iterate over items
for name, block in data.iteritems():
objs.add(name)
addObjectNames(objs, block)
def addObjectNames(objs, node):
"""
Add object names that reside in this node.
"""
if not node:
return
addObjectsFromBlock(objs, node, "subblocks")
addObjectsFromBlock(objs, node, "subblock_types")
star = node.get("star")
if star:
addObjectNames(objs, star)
def getExeObjects(exe):
"""
Gets a set of object names that are in the executable JSON dump.
"""
output = runCommand("%s --json" % exe)
output = output.split('**START JSON DATA**\n')[1]
output = output.split('**END JSON DATA**\n')[0]
obj_names = set()
data = json.loads(output)
addObjectsFromBlock(obj_names, data, "blocks")
return obj_names
def checkOutputForPattern(output, re_pattern):
"""
Returns boolean of pattern match
"""
if re.search(re_pattern, output, re.MULTILINE | re.DOTALL) == None:
return False
else:
return True
def checkOutputForLiteral(output, literal):
"""
Returns boolean of literal match
"""
if output.find(literal) == -1:
return False
else:
return True
def deleteFilesAndFolders(test_dir, paths, delete_folders=True):
"""
Delete specified files
test_dir: The base test directory
paths: A list contianing files to delete
delete_folders: Attempt to delete any folders created
"""
for file in paths:
full_path = os.path.join(test_dir, file)
if os.path.exists(full_path):
try:
os.remove(full_path)
except:
print("Unable to remove file: " + full_path)
# Now try to delete directories that might have been created
if delete_folders:
for file in paths:
path = os.path.dirname(file)
while path != '':
(path, tail) = os.path.split(path)
try:
os.rmdir(os.path.join(test_dir, path, tail))
except:
# There could definitely be problems with removing the directory
# because it might be non-empty due to checkpoint files or other
# files being created on different operating systems. We just
# don't care for the most part and we don't want to error out.
# As long as our test boxes clean before each test, we'll notice
# the case where these files aren't being generated for a
# particular run.
#
# TL;DR; Just pass...
pass
# Check if test has any redirected output, and if its ready to be read
def checkOutputReady(tester, options):
for redirected_file in tester.getRedirectedOutputFiles(options):
file_path = os.path.join(tester.getTestDir(), redirected_file)
if not os.access(file_path, os.R_OK):
return False
return True
# return concatenated output from tests with redirected output
def getOutputFromFiles(tester, options):
file_output = ''
if checkOutputReady(tester, options):
for iteration, redirected_file in enumerate(tester.getRedirectedOutputFiles(options)):
file_path = os.path.join(tester.getTestDir(), redirected_file)
with open(file_path, 'r') as f:
file_output += "#"*80 + "\nOutput from processor " + str(iteration) + "\n" + "#"*80 + "\n" + readOutput(f, options)
return file_output
# This function reads output from the file (i.e. the test output)
# but trims it down to the specified size. It'll save the first two thirds
# of the requested size and the last third trimming from the middle
def readOutput(f, options, max_size=100000):
first_part = int(max_size*(2.0/3.0))
second_part = int(max_size*(1.0/3.0))
output = ''
f.seek(0)
if options.sep_files != True:
output = f.read(first_part) # Limit the output to 1MB
if len(output) == first_part: # This means we didn't read the whole file yet
output += "\n" + "#"*80 + "\n\nOutput trimmed\n\n" + "#"*80 + "\n"
f.seek(-second_part, 2) # Skip the middle part of the file
if (f.tell() <= first_part): # Don't re-read some of what you've already read
f.seek(first_part+1, 0)
output += f.read() # Now read the rest
return output
class TestStatus(object):
"""
Class for handling test statuses
"""
###### bucket status discriptions
## The following is a list of statuses possible in the TestHarness
##
## INITIALIZED = The default tester status when it is instanced
## PASS = Passing tests
## FAIL = Failing tests
## DIFF = Failing tests due to Exodiff, CSVDiff
## PENDING = A pending status applied by the TestHarness (RUNNING...)
## FINISHED = A status that can mean it finished in any status (like a pending queued status type)
## DELETED = A skipped test hidden from reporting. Under normal circumstances, this sort of test
## is placed in the SILENT bucket. It is only placed in the DELETED bucket (and therfor
## printed to stdout) when the user has specifically asked for more information while
## running tests (-e)
## SKIP = Any test reported as skipped
## SILENT = Any test reported as skipped and should not alert the user (deleted, tests not
## matching '--re=' options, etc)
######
test_status = namedtuple('test_status', 'status color')
bucket_initialized = test_status(status='INITIALIZED', color='CYAN')
bucket_success = test_status(status='PASS', color='GREEN')
bucket_fail = test_status(status='FAIL', color='RED')
bucket_deleted = test_status(status='DELETED', color='RED')
bucket_diff = test_status(status='DIFF', color='YELLOW')
bucket_pending = test_status(status='PENDING', color='CYAN')
bucket_finished = test_status(status='FINISHED', color='CYAN')
bucket_skip = test_status(status='SKIP', color='RESET')
bucket_silent = test_status(status='SILENT', color='RESET')
# Initialize the class with a pending status
# TODO: don't do this? Initialize instead with None type? If we do
# and forget to set a status, getStatus will fail with None type errors
def __init__(self, status_message='initialized', status=bucket_initialized):
self.__status_message = status_message
self.__status = status
def setStatus(self, status_message, status_bucket):
"""
Set bucket status
setStatus("reason", TestStatus.bucket_tuple)
"""
self.__status_message = status_message
self.__status = status_bucket
def getStatus(self):
"""
Return status bucket namedtuple
"""
return self.__status
def getStatusMessage(self):
"""
Return status message string
"""
return self.__status_message
def getColor(self):
"""
Return enumerated color string
"""
return self.__status.color
def didPass(self):
"""
Return boolean passing status (True if passed)
"""
return self.getStatus() == self.bucket_success
def didFail(self):
"""
Return boolean failing status (True if failed)
"""
status = self.getStatus()
return status == self.bucket_fail or status == self.bucket_diff
def didDiff(self):
"""
Return boolean diff status (True if diff'd)
"""
status = self.getStatus()
return status == self.bucket_diff
def isInitialized(self):
"""
Return boolean initialized status
"""
status = self.getStatus()
return status == self.bucket_initialized
def isPending(self):
"""
Return boolean pending status
"""
status = self.getStatus()
return status == self.bucket_pending
def isSkipped(self):
"""
Return boolean skipped status
"""
status = self.getStatus()
return status == self.bucket_skip
def isSilent(self):
"""
Return boolean silent status
"""
status = self.getStatus()
return status == self.bucket_silent
def isDeleted(self):
"""
Return boolean deleted status
"""
status = self.getStatus()
return status == self.bucket_deleted
def isFinished(self):
"""
Return boolean finished status
"""
status = self.getStatus()
return (status == self.bucket_finished or status != self.bucket_pending and status != self.bucket_initialized)
|
liuwenf/moose
|
python/TestHarness/util.py
|
Python
|
lgpl-2.1
| 25,575
|
[
"VTK"
] |
64de2b9f40ab74d230bcd464d4d3f4d1e09f4689a19421fb545593434dcc5cb6
|
# -*- coding: utf-8 -*-
# FOGLAMP_BEGIN
# See: http://foglamp.readthedocs.io/
# FOGLAMP_END
import asyncio
import time
import json
import logging
import socket
import subprocess
from aiohttp import web
from foglamp.common import logger
from foglamp.services.core import server
from foglamp.services.core.api.statistics import get_statistics
from foglamp.services.core import connect
from foglamp.common.configuration_manager import ConfigurationManager
from foglamp.services.core.service_registry.service_registry import ServiceRegistry
from foglamp.common.service_record import ServiceRecord
__author__ = "Amarendra K. Sinha, Ashish Jabble"
__copyright__ = "Copyright (c) 2017 OSIsoft, LLC"
__license__ = "Apache 2.0"
__version__ = "${VERSION}"
__start_time = time.time()
_logger = logger.setup(__name__, level=logging.INFO)
_help = """
-------------------------------------------------------------------------------
| GET | /foglamp/ping |
| PUT | /foglamp/shutdown |
| PUT | /foglamp/restart |
-------------------------------------------------------------------------------
"""
async def ping(request):
"""
Args:
request:
Returns:
basic health information json payload
:Example:
curl -X GET http://localhost:8081/foglamp/ping
"""
try:
auth_token = request.token
except AttributeError:
if request.is_auth_optional is False:
cfg_mgr = ConfigurationManager(connect.get_storage_async())
category_item = await cfg_mgr.get_category_item('rest_api', 'allowPing')
allow_ping = True if category_item['value'].lower() == 'true' else False
if allow_ping is False:
_logger.warning("Permission denied for Ping when Auth is mandatory.")
raise web.HTTPForbidden
since_started = time.time() - __start_time
stats_request = request.clone(rel_url='foglamp/statistics')
data_read, data_sent, data_purged = await get_stats(stats_request)
host_name = socket.gethostname()
# all addresses for the host
all_ip_addresses_cmd_res = subprocess.run(['hostname', '-I'], stdout=subprocess.PIPE)
ip_addresses = all_ip_addresses_cmd_res.stdout.decode('utf-8').replace("\n", "").strip().split(" ")
svc_name = server.Server._service_name
def services_health_litmus_test():
all_svc_status = [ServiceRecord.Status(int(service_record._status)).name.upper()
for service_record in ServiceRegistry.all()]
if 'FAILED' in all_svc_status:
return 'red'
elif 'UNRESPONSIVE' in all_svc_status:
return 'amber'
return 'green'
status_color = services_health_litmus_test()
safe_mode = True if server.Server.running_in_safe_mode else False
return web.json_response({'uptime': int(since_started),
'dataRead': data_read,
'dataSent': data_sent,
'dataPurged': data_purged,
'authenticationOptional': request.is_auth_optional,
'serviceName': svc_name,
'hostName': host_name,
'ipAddresses': ip_addresses,
'health': status_color,
'safeMode': safe_mode
})
async def get_stats(req):
"""
:param req: a clone of 'foglamp/statistics' endpoint request
:return: data_read, data_sent, data_purged
"""
res = await get_statistics(req)
stats = json.loads(res.body.decode())
def filter_stat(k):
"""
there is no statistics about 'Readings Sent' at the start of FogLAMP
so the specific exception is caught and 0 is returned to avoid the error 'index out of range'
calling the API ping.
"""
try:
v = [s['value'] for s in stats if s['key'] == k]
value = int(v[0])
except IndexError:
value = 0
return value
data_read = filter_stat('READINGS')
data_sent = filter_stat('Readings Sent')
data_purged = filter_stat('PURGED')
return data_read, data_sent, data_purged
async def shutdown(request):
"""
Args:
request:
Returns:
:Example:
curl -X PUT http://localhost:8081/foglamp/shutdown
"""
try:
loop = request.loop
loop.call_later(2, do_shutdown, request)
return web.json_response({'message': 'FogLAMP shutdown has been scheduled. '
'Wait for few seconds for process cleanup.'})
except TimeoutError as err:
raise web.HTTPInternalServerError(reason=str(err))
except Exception as ex:
raise web.HTTPInternalServerError(reason=str(ex))
def do_shutdown(request):
_logger.info("Executing controlled shutdown")
try:
loop = request.loop
asyncio.ensure_future(server.Server.shutdown(request), loop=loop)
except RuntimeError as e:
_logger.exception("Error while stopping FogLAMP server: {}".format(str(e)))
raise
async def restart(request):
"""
:Example:
curl -X PUT http://localhost:8081/foglamp/restart
"""
try:
_logger.info("Executing controlled shutdown and start")
asyncio.ensure_future(server.Server.restart(request), loop=request.loop)
return web.json_response({'message': 'FogLAMP restart has been scheduled.'})
except TimeoutError as e:
_logger.exception("Error while stopping FogLAMP server: %s", e)
raise web.HTTPInternalServerError(reason=e)
except Exception as ex:
_logger.exception("Error while stopping FogLAMP server: %s", ex)
raise web.HTTPInternalServerError(reason=ex)
|
foglamp/FogLAMP
|
python/foglamp/services/core/api/common.py
|
Python
|
apache-2.0
| 6,018
|
[
"Amber"
] |
256da2bf55666afc135ed86235a3dedb9daced98c696abdb0124e5d0a30e25b8
|
#!/usr/bin/env python
import Command
import recalboxFiles
import zipfile
import shutil
from generators.Generator import Generator
import fsuaeControllers
from os import path
class FsuaeGenerator(Generator):
# from one file (x1.zip), get the list of all existing files with the same extension + last char (as number) suffix
# for example, "/path/toto0.zip" becomes ["/path/toto0.zip", "/path/toto1.zip", "/path/toto2.zip"]
def floppiesFromRom(self, rom):
floppies = []
# split path and extension
filepath, fileext = path.splitext(rom)
# if the last char is not a digit, only 1 file
if not filepath[-1:].isdigit():
floppies.append(rom)
return floppies
# path without the number
fileprefix=filepath[:-1]
# special case for 0 while numerotation can start at 1
n = 0
if path.isfile(fileprefix + str(n) + fileext):
floppies.append(fileprefix + str(n) + fileext)
# adding all other files
n = 1
while path.isfile(fileprefix + str(n) + fileext):
floppies.append(fileprefix + str(n) + fileext)
n += 1
return floppies
def filePrefix(self, rom):
filename, fileext = path.splitext(path.basename(rom))
if not filename[-1:].isdigit():
return filename
return filename[:-1]
def generate(self, system, rom, playersControllers, gameResolution):
fsuaeControllers.generateControllerConfig(system, playersControllers)
commandArray = [recalboxFiles.recalboxBins[system.config['emulator']], "--fullscreen",
"--amiga-model=" + system.config['core'],
"--base_dir=" + recalboxFiles.fsuaeConfig,
"--kickstarts_dir=" + recalboxFiles.fsuaeBios,
"--save_states_dir=" + recalboxFiles.fsuaeSaves + "/" + system.config['core'] + "/" + self.filePrefix(rom),
"--zoom=auto"
]
device_type = "floppy"
if system.config['core'] in ["CD32", "CDTV"]:
device_type = "cdrom"
# extract zip here
TEMP_DIR="/tmp/fsuae/" # with trailing slash!
diskNames = []
# read from zip
if (rom.lower().endswith("zip")):
zf = zipfile.ZipFile(rom, 'r')
for name in zf.namelist():
d = name.lower()
if (d.endswith("ipf") or d.endswith("adf") or d.endswith("dms") or d.endswith("adz")):
diskNames.append(name)
print("Amount of disks in zip " + str(len(diskNames)))
# if 2+ files, we have a multidisk ZIP (0=no zip)
if (len(diskNames) > 1):
print("extracting...")
shutil.rmtree(TEMP_DIR, ignore_errors=True) # cleanup
zf.extractall(TEMP_DIR)
n = 0
for disk in diskNames:
commandArray.append("--" + device_type + "_image_" + str(n) + "=" + TEMP_DIR + disk + "")
if (n <= 1 and device_type == "floppy") or (n == 0 and device_type == "cdrom"):
commandArray.append("--" + device_type + "_drive_" + str(n) + "=" + TEMP_DIR + disk + "")
n += 1
else:
n = 0
for img in self.floppiesFromRom(rom):
commandArray.append("--" + device_type + "_image_" + str(n) + "=" + img + "")
if (n <= 1 and device_type == "floppy") or (n == 0 and device_type == "cdrom"):
commandArray.append("--" + device_type + "_drive_" + str(n) + "=" + img + "")
n += 1
# controllers
n = 0
for pad in playersControllers:
if n <= 3:
commandArray.append("--joystick_port_" + str(n) + "=" + playersControllers[pad].realName + "")
n += 1
if 'args' in system.config and system.config['args'] is not None:
commandArray.extend(system.config['args'])
return Command.Command(array=commandArray)
|
nadenislamarre/recalbox-configgen
|
configgen/generators/fsuae/fsuaeGenerator.py
|
Python
|
mit
| 4,372
|
[
"ADF"
] |
36a81e0386193751052bb2ca787620a14d68d3c70a43f1216f65d4cc1819d18f
|
# -*- coding: utf-8 -*-
# vim: autoindent shiftwidth=4 expandtab textwidth=120 tabstop=4 softtabstop=4
###############################################################################
# OpenLP - Open Source Lyrics Projection #
# --------------------------------------------------------------------------- #
# Copyright (c) 2008-2013 Raoul Snyman #
# Portions copyright (c) 2008-2013 Tim Bentley, Gerald Britton, Jonathan #
# Corwin, Samuel Findlay, Michael Gorven, Scott Guerrieri, Matthias Hub, #
# Meinert Jordan, Armin Köhler, Erik Lundin, Edwin Lunando, Brian T. Meyer. #
# Joshua Miller, Stevan Pettit, Andreas Preikschat, Mattias Põldaru, #
# Christian Richter, Philip Ridout, Simon Scudder, Jeffrey Smith, #
# Maikel Stuivenberg, Martin Thompson, Jon Tibble, Dave Warnock, #
# Frode Woldsund, Martin Zibricky, Patrick Zimmermann #
# --------------------------------------------------------------------------- #
# This program is free software; you can redistribute it and/or modify it #
# under the terms of the GNU General Public License as published by the Free #
# Software Foundation; version 2 of the License. #
# #
# This program is distributed in the hope that it will be useful, but WITHOUT #
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or #
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for #
# more details. #
# #
# You should have received a copy of the GNU General Public License along #
# with this program; if not, write to the Free Software Foundation, Inc., 59 #
# Temple Place, Suite 330, Boston, MA 02111-1307 USA #
###############################################################################
"""
The :mod:`openlyricsexport` module provides the functionality for exporting
songs from the database to the OpenLyrics format.
"""
import logging
import os
from lxml import etree
from openlp.core.lib import Registry, check_directory_exists, translate
from openlp.core.utils import clean_filename
from openlp.plugins.songs.lib import OpenLyrics
log = logging.getLogger(__name__)
class OpenLyricsExport(object):
"""
This provides the Openlyrics export.
"""
def __init__(self, parent, songs, save_path):
"""
Initialise the export.
"""
log.debug(u'initialise OpenLyricsExport')
self.parent = parent
self.manager = parent.plugin.manager
self.songs = songs
self.save_path = save_path
check_directory_exists(self.save_path)
def do_export(self):
"""
Export the songs.
"""
log.debug(u'started OpenLyricsExport')
openLyrics = OpenLyrics(self.manager)
self.parent.progressBar.setMaximum(len(self.songs))
for song in self.songs:
self.application.process_events()
if self.parent.stop_export_flag:
return False
self.parent.incrementProgressBar(translate('SongsPlugin.OpenLyricsExport', 'Exporting "%s"...') %
song.title)
xml = openLyrics.song_to_xml(song)
tree = etree.ElementTree(etree.fromstring(xml))
filename = u'%s (%s)' % (song.title, u', '.join([author.display_name for author in song.authors]))
filename = clean_filename(filename)
# Ensure the filename isn't too long for some filesystems
filename = u'%s.xml' % filename[0:250 - len(self.save_path)]
# Pass a file object, because lxml does not cope with some special
# characters in the path (see lp:757673 and lp:744337).
tree.write(open(os.path.join(self.save_path, filename), u'w'),
encoding=u'utf-8', xml_declaration=True, pretty_print=True)
return True
def _get_application(self):
"""
Adds the openlp to the class dynamically
"""
if not hasattr(self, u'_application'):
self._application = Registry().get(u'application')
return self._application
application = property(_get_application)
|
marmyshev/transitions
|
openlp/plugins/songs/lib/openlyricsexport.py
|
Python
|
gpl-2.0
| 4,459
|
[
"Brian"
] |
24c6584b4e693ea37ce7c5bbe26c753340753bddba935997c0f68cc34029e09c
|
# -*- coding: utf-8 -*-
#
# test_siegert_neuron.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
# This script tests the siegert_neuron in NEST.
import nest
import unittest
import numpy as np
HAVE_GSL = nest.sli_func("statusdict/have_gsl ::")
@nest.check_stack
@unittest.skipIf(not HAVE_GSL, 'GSL is not available')
class SiegertNeuronTestCase(unittest.TestCase):
"""
Test siegert_neuron
Details
-------
Compares the rate of a Poisson-driven iaf_psc_delta neuron
with the prediction from the siegert neuron.
"""
def setUp(self):
# test parameter to compare analytic solution to simulation
self.rtol = 1.0
# test parameters
self.N = 100
self.rate_ex = 1.5 * 1e4
self.J = 0.1
# simulation parameters
self.simtime = 500.
self.dt = 0.1
self.start = 200.
nest.set_verbosity('M_WARNING')
nest.ResetKernel()
nest.SetKernelStatus(
{'resolution': self.dt, 'use_wfr': False, 'print_time': True})
# set up driven integrate-and-fire neuron
self.iaf_psc_delta = nest.Create(
'iaf_psc_delta', self.N) # , params={"C_m": 1.0})
self.poisson_generator = nest.Create(
'poisson_generator', params={'rate': self.rate_ex})
nest.Connect(self.poisson_generator, self.iaf_psc_delta,
syn_spec={'weight': self.J, 'delay': self.dt})
self.spike_detector = nest.Create(
"spike_detector", params={'start': self.start})
nest.Connect(
self.iaf_psc_delta, self.spike_detector)
# set up driven siegert neuron
neuron_status = nest.GetStatus(self.iaf_psc_delta)[0]
siegert_params = {'tau_m': neuron_status['tau_m'],
't_ref': neuron_status['t_ref'],
'theta': neuron_status['V_th'] -
neuron_status['E_L'],
'V_reset': neuron_status['V_reset'] -
neuron_status['E_L']}
self.siegert_neuron = nest.Create(
'siegert_neuron', params=siegert_params)
self.siegert_drive = nest.Create(
'siegert_neuron', 1, params={'mean': self.rate_ex})
J_mu_ex = neuron_status['tau_m'] * 1e-3 * self.J
J_sigma_ex = neuron_status['tau_m'] * 1e-3 * self.J**2
syn_dict = {'drift_factor': J_mu_ex, 'diffusion_factor':
J_sigma_ex, 'model': 'diffusion_connection'}
nest.Connect(
self.siegert_drive, self.siegert_neuron, syn_spec=syn_dict)
self.multimeter = nest.Create(
"multimeter", params={'record_from': ['rate'],
'interval': self.dt})
nest.Connect(
self.multimeter, self.siegert_neuron)
def test_RatePrediction(self):
"""Check the rate prediction of the siegert neuron"""
# simulate
nest.Simulate(self.simtime)
# get rate prediction from siegert neuron
events = nest.GetStatus(self.multimeter)[0]["events"]
senders = events['senders']
rate = events['rate'][np.where(senders == self.siegert_neuron)]
rate_prediction = rate[-1]
# get simulated rate of integrate-and-fire neuron
rate_iaf = nest.GetStatus(self.spike_detector)[0][
"n_events"] / ((self.simtime - self.start) * 1e-3) / self.N
# test rate prediction against simulated rate of
# integrate-and-fire neuron
self.assertTrue(np.isclose(rate_iaf, rate_prediction, rtol=self.rtol))
# test rate prediction against hard coded result
rate_prediction_test = 27.1095934379
self.assertTrue(np.isclose(rate_prediction_test, rate_prediction))
def suite():
# makeSuite is sort of obsolete http://bugs.python.org/issue2721
# using loadTestsFromTestCase instead.
suite1 = unittest.TestLoader().loadTestsFromTestCase(
SiegertNeuronTestCase)
return unittest.TestSuite([suite1])
def run():
runner = unittest.TextTestRunner(verbosity=2)
runner.run(suite())
if __name__ == "__main__":
run()
|
hesam-setareh/nest-simulator
|
pynest/nest/tests/test_siegert_neuron.py
|
Python
|
gpl-2.0
| 4,811
|
[
"NEURON"
] |
69e5c78c2ce4a82c3de2b2c8321ed6cf69652b42b53ec55eb3536662a97530fb
|
# Copyright (C) 2013 Matthew C. Zwier, Joshua L. Adelman and Lillian T. Chong (see note below).
#
# This file is part of WESTPA.
#
# WESTPA is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# WESTPA is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with WESTPA. If not, see <http://www.gnu.org/licenses/>.
#
# This implementation is derived from the ``concurrent.futures``
# module of Python 3.2, by Brian Quinlan, (C) 2011 the Python Software
# Foundation. See http://docs.python.org/3/license.html for more information.
__metaclass__ = type
import logging
import uuid, threading, signal
from itertools import islice
from contextlib import contextmanager
log = logging.getLogger(__name__)
class WorkManager:
'''Base class for all work managers. At a minimum, work managers must provide a
``submit()`` function and a ``n_workers`` attribute (which may be a property),
though most will also override ``startup()`` and ``shutdown()``.'''
@classmethod
def from_environ(cls, wmenv=None):
raise NotImplementedError
@classmethod
def add_wm_args(cls, parser, wmenv=None):
return
def __repr__(self):
return '<{classname} at 0x{id:x}>'.format(classname=self.__class__.__name__, id=id(self))
def __init__(self):
self._sigint_handler_installed = False
self.prior_sigint_handler = None
self.running = False
def __enter__(self):
self.startup()
return self
def __exit__(self, exc_type, exc_val, exc_traceback):
self.shutdown()
return False
def sigint_handler(self, signum, frame):
self.shutdown()
if self.prior_sigint_handler in (signal.SIG_IGN,None):
pass
elif self.prior_sigint_handler == signal.SIG_DFL:
raise KeyboardInterrupt
else:
self.prior_sigint_handler(signum, frame)
def install_sigint_handler(self):
if not self._sigint_handler_installed:
self._sigint_handler_installed = True
self.prior_sigint_handler = signal.signal(signal.SIGINT, self.sigint_handler)
def startup(self):
'''Perform any necessary startup work, such as spawning clients.'''
self.running = True
def shutdown(self):
'''Cleanly shut down any active workers.'''
self.running = False
def run(self):
'''Run the worker loop (in clients only).'''
pass
def submit(self, fn, args=None, kwargs=None):
'''Submit a task to the work manager, returning a `WMFuture` object representing the pending
result. ``fn(*args,**kwargs)`` will be executed by a worker, and the return value assigned as the
result of the returned future. The function ``fn`` and all arguments must be picklable; note
particularly that off-path modules (like the system module and any active plugins) are not
picklable unless pre-loaded in the worker process (i.e. prior to forking the master).'''
raise NotImplementedError
def submit_many(self, tasks):
'''Submit a set of tasks to the work manager, returning a list of `WMFuture` objects representing
pending results. Each entry in ``tasks`` should be a triple (fn, args, kwargs), which will result in
fn(*args, **kwargs) being executed by a worker. The function ``fn`` and all arguments must be
picklable; note particularly that off-path modules are not picklable unless pre-loaded in the worker
process.'''
return [self.submit(fn,args,kwargs) for (fn,args,kwargs) in tasks]
def as_completed(self, futures):
'''Return a generator which yields results from the given ``futures`` as they become
available.'''
pending = set(futures)
# See which futures have results, and install a watcher on those that do not
with WMFuture.all_acquired(pending):
completed = {future for future in futures if future.done}
pending -= completed
watcher = FutureWatcher(pending, threshold=1)
# Yield available results immediately
for future in completed:
yield future
del completed
# Wait on any remaining results
while pending:
watcher.wait()
completed = watcher.reset()
for future in completed:
yield future
pending.remove(future)
def submit_as_completed(self, task_generator, queue_size=None):
'''Return a generator which yields results from a set of ``futures`` as they become
available. Futures are generated by the ``task_generator``, which must return a triple of the form
expected by ``submit``. The method also accepts an int ``queue_size`` that dictates the
maximum number of Futures that should be pending at any given time. The default value of
``None`` submits all of the tasks at once.'''
futures = [self.submit(fn,args,kwargs) for (fn,args,kwargs) in islice(task_generator, queue_size)]
pending = set(futures)
with WMFuture.all_acquired(pending):
watcher = FutureWatcher(pending, threshold=1)
while pending:
watcher.wait()
completed = watcher.reset()
new_futures = [self.submit(fn,args,kwargs) for (fn,args,kwargs) in islice(task_generator, len(completed))]
pending.update(new_futures)
with WMFuture.all_acquired(new_futures):
watcher.add(new_futures)
for future in completed:
yield future
pending.remove(future)
def wait_any(self, futures):
'''Wait on any of the given ``futures`` and return the first one which has a result available.
If more than one result is or becomes available simultaneously, any completed future may be returned.'''
pending = set(futures)
with WMFuture.all_acquired(pending):
completed = {future for future in futures if future.done}
if completed:
# If any futures are complete, then we don't need to do anything else
return completed.pop()
else:
# Otherwise, we need to install a watcher
watcher = FutureWatcher(futures, threshold = 1)
watcher.wait()
completed = watcher.reset()
return completed.pop()
def wait_any_return_done(self, futures):
'''Wait on any of the given ``futures`` and return the first one which has a result available.
If more than one result is or becomes available simultaneously, any completed future may be returned.'''
pending = set(futures)
with WMFuture.all_acquired(pending):
completed = {future for future in futures if future.done}
if completed:
# If any futures are complete, then we don't need to do anything else
return completed
else:
# Otherwise, we need to install a watcher
watcher = FutureWatcher(futures, threshold = 1)
watcher.wait()
completed = watcher.reset()
return completed
def wait_all(self, futures):
'''A convenience function which waits on all the given ``futures`` in order. This function returns
the same ``futures`` as submitted to the function as a list, indicating the order in which waits
occurred.'''
futures = list(futures)
results = []
for future in futures:
results.append(future.result)
return futures
@property
def is_master(self):
'''True if this is the master process for task distribution. This is necessary, e.g., for
MPI, where all processes start identically and then must branch depending on rank.'''
return True
class FutureWatcher:
'''A device to wait on multiple results and/or exceptions with only one lock.'''
def __init__(self, futures, threshold = 1):
self.event = threading.Event()
self.lock = threading.RLock()
self.threshold = threshold
self.completed = []
for future in futures:
future._add_watcher(self)
def signal(self, future):
'''Signal this watcher that the given future has results available. If this
brings the number of available futures above signal_threshold, this watcher's
event object will be signalled as well.'''
with self.lock:
self.completed.append(future)
if len(self.completed) >= self.threshold:
self.event.set()
def wait(self):
'''Wait on one or more futures.'''
return self.event.wait()
def reset(self):
'''Reset this watcher's list of completed futures, returning the list of completed futures
prior to resetting it.'''
with self.lock:
self.event.clear()
completed = self.completed
self.completed = []
return completed
def add(self, futures):
'''Add watchers to all futures in the iterable of futures.'''
for future in futures:
future._add_watcher(self)
class WMFuture:
'''A "future", representing work which has been dispatched for completion asynchronously.'''
@staticmethod
@contextmanager
def all_acquired(futures):
'''Context manager to acquire all locks on the given ``futures``. Primarily for internal use.'''
futures = list(futures)
for future in futures:
future._condition.acquire()
yield # to contents of "with" block
for future in futures:
future._condition.release()
def __init__(self, task_id=None):
self.task_id = task_id or uuid.uuid4()
self._condition = threading.Condition()
self._done = False
self._result = None
self._exception = None
self._traceback = None
# a set of Events representing who is waiting on results from this future
# this set will be cleared after the result is updated and watchers are notified
self._watchers = set()
# a set of functions that will be called with this future as an argument when it is updated with a
# result. This list will be cleared after the result is updated and all callbacks invoked
self._update_callbacks = []
def __repr__(self):
return '<WMFuture 0x{id:x}: {self.task_id!s}>'.format(id=id(self), self=self)
def __hash__(self):
return hash(self.task_id)
def _notify_watchers(self):
'''Notify all watchers that this future has been updated, then deletes the list of update watchers.'''
with self._condition:
assert self._done
for watcher in self._watchers:
watcher.signal(self)
self._watchers.clear()
def _invoke_callbacks(self):
'''Invoke all callbacks which have been registered on this future. Exceptions in callbacks
will be logged and ignored.'''
with self._condition:
for callback in self._update_callbacks:
try:
callback(self)
except Exception:
# This may need to be a simple print to stderr, depending on the locking
# semantics of the logger.
log.exception('ignoring exception in result callback')
del self._update_callbacks
self._update_callbacks = []
def _add_watcher(self, watcher):
'''Add the given update watcher to the internal list of watchers. If a result is available,
returns immediately without updating the list of watchers.'''
with self._condition:
if self._done:
watcher.signal(self)
return
else:
self._watchers.add(watcher)
def _add_callback(self, callback):
'''Add the given update callback to the internal list of callbacks. If a result is available,
invokes the callback immediately without updating the list of callbacks.'''
with self._condition:
if self._done:
try:
callback(self)
except Exception:
log.exception('ignoring exception in result callback')
else:
self._update_callbacks.append(callback)
def _set_result(self, result):
'''Set the result of this future to the given value, invoke on-completion callbacks, and notify
watchers.'''
with self._condition:
self._result = result
self._done = True
self._condition.notify_all()
self._invoke_callbacks()
self._notify_watchers()
def _set_exception(self, exception, traceback=None):
'''Set the exception of this future to the given value, invoke on-completion callbacks, and notify
watchers.'''
with self._condition:
self._exception = exception
self._traceback = traceback
self._done = True
self._condition.notify_all()
self._invoke_callbacks()
self._notify_watchers()
def get_result(self,discard=True):
'''Get the result associated with this future, blocking until it is available.
If ``discard`` is true, then removes the reference to the result contained
in this instance, so that a collection of futures need not turn into a cache of
all associated results.'''
with self._condition:
if self._done:
if self._exception:
if isinstance(self._traceback, basestring):
if self._traceback:
log.error('uncaught exception in remote function\n{}'.format(self._traceback))
raise self._exception
else:
raise self._exception, None, self._traceback
else:
self._condition.wait()
assert self._done
if self._exception:
if isinstance(self._traceback, str):
log.error('uncaught exception in remote function\n{}'.format(self._traceback))
raise self._exception
else:
raise self._exception, None, self._traceback
result = self._result
if discard:
del self._result
return result
@property
def result(self):
return self.get_result(discard=False)
def wait(self):
'''Wait until this future has a result or exception available.'''
with self._condition:
if self._done:
return
else:
self._condition.wait()
assert self._done
return
def get_exception(self):
'''Get the exception associated with this future, blocking until it is available.'''
with self._condition:
if self._done:
return self._exception
else:
self._condition.wait()
assert self._done
return self._exception
exception = property(get_exception, None, None, get_exception.__doc__)
def get_traceback(self):
'''Get the traceback object associated with this future, if any.'''
with self._condition:
if self._returned:
return self._traceback
else:
self._condition.wait()
assert self._done
return self._traceback
traceback = property(get_traceback, None, None, get_traceback.__doc__)
def is_done(self):
'Indicates whether this future is done executing (may block if this future is being updated).'
with self._condition:
return self._done
done = property(is_done, None, None, is_done.__doc__)
# end class WMFuture
|
ajoshpratt/westpa
|
lib/wwmgr/work_managers/core.py
|
Python
|
gpl-3.0
| 16,963
|
[
"Brian"
] |
00de6cc1b47828d5354c75477fb516d9f6e596e2d833c4c99e706119324ed9e5
|
"""
State Space Model
Author: Chad Fulton
License: Simplified-BSD
"""
from __future__ import division, absolute_import, print_function
import numpy as np
import pandas as pd
from scipy.stats import norm
from .kalman_smoother import KalmanSmoother, SmootherResults
from .kalman_filter import (
KalmanFilter, FilterResults, PredictionResults, INVERT_UNIVARIATE, SOLVE_LU
)
import statsmodels.tsa.base.tsa_model as tsbase
import statsmodels.base.wrapper as wrap
from statsmodels.tools.numdiff import (
_get_epsilon, approx_hess_cs, approx_fprime_cs
)
from statsmodels.tools.decorators import cache_readonly, resettable_cache
from statsmodels.tools.eval_measures import aic, bic, hqic
from statsmodels.tools.tools import pinv_extended
from statsmodels.tools.tools import Bunch
import statsmodels.genmod._prediction as pred
from statsmodels.genmod.families.links import identity
class MLEModel(tsbase.TimeSeriesModel):
r"""
State space model for maximum likelihood estimation
Parameters
----------
endog : array_like
The observed time-series process :math:`y`
k_states : int
The dimension of the unobserved state process.
exog : array_like, optional
Array of exogenous regressors, shaped nobs x k. Default is no
exogenous regressors.
dates : array-like of datetime, optional
An array-like object of datetime objects. If a Pandas object is given
for endog, it is assumed to have a DateIndex.
freq : str, optional
The frequency of the time-series. A Pandas offset or 'B', 'D', 'W',
'M', 'A', or 'Q'. This is optional if dates are given.
**kwargs
Keyword arguments may be used to provide default values for state space
matrices or for Kalman filtering options. See `Representation`, and
`KalmanFilter` for more details.
Attributes
----------
ssm : KalmanFilter
Underlying state space representation.
Notes
-----
This class wraps the state space model with Kalman filtering to add in
functionality for maximum likelihood estimation. In particular, it adds
the concept of updating the state space representation based on a defined
set of parameters, through the `update` method or `updater` attribute (see
below for more details on which to use when), and it adds a `fit` method
which uses a numerical optimizer to select the parameters that maximize
the likelihood of the model.
The `start_params` `update` method must be overridden in the
child class (and the `transform` and `untransform` methods, if needed).
See Also
--------
MLEResults
statsmodels.tsa.statespace.kalman_filter.KalmanFilter
statsmodels.tsa.statespace.representation.Representation
"""
optim_hessian = 'cs'
def __init__(self, endog, k_states, exog=None, dates=None, freq=None,
**kwargs):
# Initialize the model base
super(MLEModel, self).__init__(endog=endog, exog=exog,
dates=dates, freq=freq,
missing='none')
# Store kwargs to recreate model
self._init_kwargs = kwargs
# Prepared the endog array: C-ordered, shape=(nobs x k_endog)
self.endog, self.exog = self.prepare_data()
# Dimensions
self.nobs = self.endog.shape[0]
self.k_states = k_states
# Initialize the state-space representation
self.initialize_statespace(**kwargs)
def prepare_data(self):
"""
Prepare data for use in the state space representation
"""
endog = np.array(self.data.orig_endog, order='C')
exog = self.data.orig_exog
if exog is not None:
exog = np.array(exog)
# Base class may allow 1-dim data, whereas we need 2-dim
if endog.ndim == 1:
endog.shape = (endog.shape[0], 1) # this will be C-contiguous
return endog, exog
def initialize_statespace(self, **kwargs):
"""
Initialize the state space representation
Parameters
----------
**kwargs
Additional keyword arguments to pass to the state space class
constructor.
"""
# (Now self.endog is C-ordered and in long format (nobs x k_endog). To
# get F-ordered and in wide format just need to transpose)
endog = self.endog.T
# Instantiate the state space object
self.ssm = KalmanSmoother(endog.shape[0], self.k_states, **kwargs)
# Bind the data to the model
self.ssm.bind(endog)
# Other dimensions, now that `ssm` is available
self.k_endog = self.ssm.k_endog
def __setitem__(self, key, value):
return self.ssm.__setitem__(key, value)
def __getitem__(self, key):
return self.ssm.__getitem__(key)
def set_filter_method(self, filter_method=None, **kwargs):
"""
Set the filtering method
The filtering method controls aspects of which Kalman filtering
approach will be used.
Parameters
----------
filter_method : integer, optional
Bitmask value to set the filter method to. See notes for details.
**kwargs
Keyword arguments may be used to influence the filter method by
setting individual boolean flags. See notes for details.
Notes
-----
This method is rarely used. See the corresponding function in the
`KalmanFilter` class for details.
"""
self.ssm.set_filter_method(filter_method, **kwargs)
def set_inversion_method(self, inversion_method=None, **kwargs):
"""
Set the inversion method
The Kalman filter may contain one matrix inversion: that of the
forecast error covariance matrix. The inversion method controls how and
if that inverse is performed.
Parameters
----------
inversion_method : integer, optional
Bitmask value to set the inversion method to. See notes for
details.
**kwargs
Keyword arguments may be used to influence the inversion method by
setting individual boolean flags. See notes for details.
Notes
-----
This method is rarely used. See the corresponding function in the
`KalmanFilter` class for details.
"""
self.ssm.set_inversion_method(inversion_method, **kwargs)
def set_stability_method(self, stability_method=None, **kwargs):
"""
Set the numerical stability method
The Kalman filter is a recursive algorithm that may in some cases
suffer issues with numerical stability. The stability method controls
what, if any, measures are taken to promote stability.
Parameters
----------
stability_method : integer, optional
Bitmask value to set the stability method to. See notes for
details.
**kwargs
Keyword arguments may be used to influence the stability method by
setting individual boolean flags. See notes for details.
Notes
-----
This method is rarely used. See the corresponding function in the
`KalmanFilter` class for details.
"""
self.ssm.set_stability_method(stability_method, **kwargs)
def set_conserve_memory(self, conserve_memory=None, **kwargs):
"""
Set the memory conservation method
By default, the Kalman filter computes a number of intermediate
matrices at each iteration. The memory conservation options control
which of those matrices are stored.
Parameters
----------
conserve_memory : integer, optional
Bitmask value to set the memory conservation method to. See notes
for details.
**kwargs
Keyword arguments may be used to influence the memory conservation
method by setting individual boolean flags.
Notes
-----
This method is rarely used. See the corresponding function in the
`KalmanFilter` class for details.
"""
self.ssm.set_conserve_memory(conserve_memory, **kwargs)
def set_smoother_output(self, smoother_output=None, **kwargs):
"""
Set the smoother output
The smoother can produce several types of results. The smoother output
variable controls which are calculated and returned.
Parameters
----------
smoother_output : integer, optional
Bitmask value to set the smoother output to. See notes for details.
**kwargs
Keyword arguments may be used to influence the smoother output by
setting individual boolean flags.
Notes
-----
This method is rarely used. See the corresponding function in the
`KalmanSmoother` class for details.
"""
self.ssm.set_smoother_output(smoother_output, **kwargs)
def initialize_known(self, initial_state, initial_state_cov):
self.ssm.initialize_known(initial_state, initial_state_cov)
def initialize_approximate_diffuse(self, variance=None):
self.ssm.initialize_approximate_diffuse(variance)
def initialize_stationary(self):
self.ssm.initialize_stationary()
@property
def initialization(self):
return self.ssm.initialization
@property
def initial_variance(self):
return self.ssm.initial_variance
@initial_variance.setter
def initial_variance(self, value):
self.ssm.initial_variance = value
@property
def loglikelihood_burn(self):
return self.ssm.loglikelihood_burn
@loglikelihood_burn.setter
def loglikelihood_burn(self, value):
self.ssm.loglikelihood_burn = value
@property
def tolerance(self):
return self.ssm.tolerance
@tolerance.setter
def tolerance(self, value):
self.ssm.tolerance = value
def fit(self, start_params=None, transformed=True, cov_type='opg',
cov_kwds=None, method='lbfgs', maxiter=50, full_output=1,
disp=5, callback=None, return_params=False,
optim_hessian=None, **kwargs):
"""
Fits the model by maximum likelihood via Kalman filter.
Parameters
----------
start_params : array_like, optional
Initial guess of the solution for the loglikelihood maximization.
If None, the default is given by Model.start_params.
transformed : boolean, optional
Whether or not `start_params` is already transformed. Default is
True.
method : str, optional
The `method` determines which solver from `scipy.optimize`
is used, and it can be chosen from among the following strings:
- 'newton' for Newton-Raphson, 'nm' for Nelder-Mead
- 'bfgs' for Broyden-Fletcher-Goldfarb-Shanno (BFGS)
- 'lbfgs' for limited-memory BFGS with optional box constraints
- 'powell' for modified Powell's method
- 'cg' for conjugate gradient
- 'ncg' for Newton-conjugate gradient
- 'basinhopping' for global basin-hopping solver
The explicit arguments in `fit` are passed to the solver,
with the exception of the basin-hopping solver. Each
solver has several optional arguments that are not the same across
solvers. See the notes section below (or scipy.optimize) for the
available arguments and for the list of explicit arguments that the
basin-hopping solver supports.
cov_type : str, optional
The `cov_type` keyword governs the method for calculating the
covariance matrix of parameter estimates. Can be one of:
- 'opg' for the outer product of gradient estimator
- 'oim' for the observed information matrix estimator, calculated
using the method of Harvey (1989)
- 'cs' for the observed information matrix estimator, calculated
using a numerical (complex step) approximation of the Hessian
matrix.
- 'delta' for the observed information matrix estimator, calculated
using a numerical (complex step) approximation of the Hessian
along with the delta method (method of propagation of errors)
applied to the parameter transformation function
`transform_params`.
- 'robust' for an approximate (quasi-maximum likelihood) covariance
matrix that may be valid even in the presense of some
misspecifications. Intermediate calculations use the 'oim'
method.
- 'robust_cs' is the same as 'robust' except that the intermediate
calculations use the 'cs' method.
cov_kwds : dict or None, optional
See `MLEResults.get_robustcov_results` for a description required
keywords for alternative covariance estimators
maxiter : int, optional
The maximum number of iterations to perform.
full_output : boolean, optional
Set to True to have all available output in the Results object's
mle_retvals attribute. The output is dependent on the solver.
See LikelihoodModelResults notes section for more information.
disp : boolean, optional
Set to True to print convergence messages.
callback : callable callback(xk), optional
Called after each iteration, as callback(xk), where xk is the
current parameter vector.
return_params : boolean, optional
Whether or not to return only the array of maximizing parameters.
Default is False.
optim_hessian : {'opg','oim','cs'}, optional
The method by which the Hessian is numerically approximated. 'opg'
uses outer product of gradients, 'oim' uses the information
matrix formula from Harvey (1989), and 'cs' uses second-order
complex step differentiation. This keyword is only relevant if the
optimization method uses the Hessian matrix.
**kwargs
Additional keyword arguments to pass to the optimizer.
Returns
-------
MLEResults
See also
--------
statsmodels.base.model.LikelihoodModel.fit
MLEResults
"""
if start_params is None:
start_params = self.start_params
transformed = True
# Update the hessian method
if optim_hessian is not None:
self.optim_hessian = optim_hessian
# Unconstrain the starting parameters
if transformed:
start_params = self.untransform_params(np.array(start_params))
if method == 'lbfgs' or method == 'bfgs':
# kwargs.setdefault('pgtol', 1e-8)
# kwargs.setdefault('factr', 1e2)
# kwargs.setdefault('m', 12)
kwargs.setdefault('approx_grad', True)
kwargs.setdefault('epsilon', 1e-5)
# Maximum likelihood estimation
fargs = (False,) # (sets transformed=False)
mlefit = super(MLEModel, self).fit(start_params, method=method,
fargs=fargs,
maxiter=maxiter,
full_output=full_output,
disp=disp, callback=callback,
skip_hessian=True, **kwargs)
# Just return the fitted parameters if requested
if return_params:
return self.transform_params(mlefit.params)
# Otherwise construct the results class if desired
else:
res = self.smooth(mlefit.params, transformed=False,
cov_type=cov_type, cov_kwds=cov_kwds)
res.mlefit = mlefit
res.mle_retvals = mlefit.mle_retvals
res.mle_settings = mlefit.mle_settings
return res
def filter(self, params, transformed=True, cov_type=None, cov_kwds=None,
return_ssm=False, **kwargs):
"""
Kalman filtering
Parameters
----------
params : array_like
Array of parameters at which to evaluate the loglikelihood
function.
transformed : boolean, optional
Whether or not `params` is already transformed. Default is True.
return_ssm : boolean,optional
Whether or not to return only the state space output or a full
results object. Default is to return a full results object.
cov_type : str, optional
See `MLEResults.fit` for a description of covariance matrix types
for results object.
cov_kwds : dict or None, optional
See `MLEResults.get_robustcov_results` for a description required
keywords for alternative covariance estimators
**kwargs
Additional keyword arguments to pass to the Kalman filter. See
`KalmanFilter.filter` for more details.
"""
params = np.array(params, ndmin=1)
if not transformed:
params = self.transform_params(params)
self.update(params, transformed=True)
# Save the parameter names
self.data.param_names = self.param_names
# Get the state space output
result = self.ssm.filter(**kwargs)
# Wrap in a results object
if not return_ssm:
result_kwargs = {}
if cov_type is not None:
result_kwargs['cov_type'] = cov_type
result = MLEResultsWrapper(
MLEResults(self, params, result, **result_kwargs)
)
return result
def smooth(self, params, transformed=True, cov_type=None, cov_kwds=None,
return_ssm=False, **kwargs):
"""
Kalman smoothing
Parameters
----------
params : array_like
Array of parameters at which to evaluate the loglikelihood
function.
transformed : boolean, optional
Whether or not `params` is already transformed. Default is True.
return_ssm : boolean,optional
Whether or not to return only the state space output or a full
results object. Default is to return a full results object.
cov_type : str, optional
See `MLEResults.fit` for a description of covariance matrix types
for results object.
cov_kwds : dict or None, optional
See `MLEResults.get_robustcov_results` for a description required
keywords for alternative covariance estimators
**kwargs
Additional keyword arguments to pass to the Kalman filter. See
`KalmanFilter.filter` for more details.
"""
params = np.array(params, ndmin=1)
if not transformed:
params = self.transform_params(params)
self.update(params, transformed=True)
# Save the parameter names
self.data.param_names = self.param_names
# Get the state space output
result = self.ssm.smooth(**kwargs)
# Wrap in a results object
if not return_ssm:
result_kwargs = {}
if cov_type is not None:
result_kwargs['cov_type'] = cov_type
result = MLEResultsWrapper(
MLEResults(self, params, result, **result_kwargs)
)
return result
def loglike(self, params, transformed=True, **kwargs):
"""
Loglikelihood evaluation
Parameters
----------
params : array_like
Array of parameters at which to evaluate the loglikelihood
function.
transformed : boolean, optional
Whether or not `params` is already transformed. Default is True.
**kwargs
Additional keyword arguments to pass to the Kalman filter. See
`KalmanFilter.filter` for more details.
Notes
-----
[1]_ recommend maximizing the average likelihood to avoid scale issues;
this is done automatically by the base Model fit method.
References
----------
.. [1] Koopman, Siem Jan, Neil Shephard, and Jurgen A. Doornik. 1999.
Statistical Algorithms for Models in State Space Using SsfPack 2.2.
Econometrics Journal 2 (1): 107-60. doi:10.1111/1368-423X.00023.
See Also
--------
update : modifies the internal state of the state space model to
reflect new params
"""
if not transformed:
params = self.transform_params(params)
self.update(params, transformed=True)
loglike = self.ssm.loglike(**kwargs)
# Koopman, Shephard, and Doornik recommend maximizing the average
# likelihood to avoid scale issues, but the averaging is done
# automatically in the base model `fit` method
return loglike
def loglikeobs(self, params, transformed=True, **kwargs):
"""
Loglikelihood evaluation
Parameters
----------
params : array_like
Array of parameters at which to evaluate the loglikelihood
function.
transformed : boolean, optional
Whether or not `params` is already transformed. Default is True.
**kwargs
Additional keyword arguments to pass to the Kalman filter. See
`KalmanFilter.filter` for more details.
Notes
-----
[1]_ recommend maximizing the average likelihood to avoid scale issues;
this is done automatically by the base Model fit method.
References
----------
.. [1] Koopman, Siem Jan, Neil Shephard, and Jurgen A. Doornik. 1999.
Statistical Algorithms for Models in State Space Using SsfPack 2.2.
Econometrics Journal 2 (1): 107-60. doi:10.1111/1368-423X.00023.
See Also
--------
update : modifies the internal state of the Model to reflect new params
"""
if not transformed:
params = self.transform_params(params)
self.update(params, transformed=True)
return self.ssm.loglikeobs(**kwargs)
def observed_information_matrix(self, params, **kwargs):
"""
Observed information matrix
Parameters
----------
params : array_like, optional
Array of parameters at which to evaluate the loglikelihood
function.
**kwargs
Additional keyword arguments to pass to the Kalman filter. See
`KalmanFilter.filter` for more details.
Notes
-----
This method is from Harvey (1989), which shows that the information
matrix only depends on terms from the gradient. This implementation is
partially analytic and partially numeric approximation, therefore,
because it uses the analytic formula for the information matrix, with
numerically computed elements of the gradient.
References
----------
Harvey, Andrew C. 1990.
Forecasting, Structural Time Series Models and the Kalman Filter.
Cambridge University Press.
"""
params = np.array(params, ndmin=1)
# Setup
n = len(params)
epsilon = _get_epsilon(params, 1, None, n)
increments = np.identity(n) * 1j * epsilon
# Get values at the params themselves
self.update(params)
res = self.ssm.filter(**kwargs)
dtype = self.ssm.dtype
# Save this for inversion later
inv_forecasts_error_cov = res.forecasts_error_cov.copy()
# Compute partial derivatives
partials_forecasts_error = (
np.zeros((self.k_endog, self.nobs, n))
)
partials_forecasts_error_cov = (
np.zeros((self.k_endog, self.k_endog, self.nobs, n))
)
for i, ih in enumerate(increments):
self.update(params + ih)
res = self.ssm.filter(**kwargs)
partials_forecasts_error[:, :, i] = (
res.forecasts_error.imag / epsilon[i]
)
partials_forecasts_error_cov[:, :, :, i] = (
res.forecasts_error_cov.imag / epsilon[i]
)
# Compute the information matrix
tmp = np.zeros((self.k_endog, self.k_endog, self.nobs, n), dtype=dtype)
information_matrix = np.zeros((n, n), dtype=dtype)
for t in range(self.ssm.loglikelihood_burn, self.nobs):
inv_forecasts_error_cov[:, :, t] = (
np.linalg.inv(inv_forecasts_error_cov[:, :, t])
)
for i in range(n):
tmp[:, :, t, i] = np.dot(
inv_forecasts_error_cov[:, :, t],
partials_forecasts_error_cov[:, :, t, i]
)
for i in range(n):
for j in range(n):
information_matrix[i, j] += (
0.5 * np.trace(np.dot(tmp[:, :, t, i],
tmp[:, :, t, j]))
)
information_matrix[i, j] += np.inner(
partials_forecasts_error[:, t, i],
np.dot(inv_forecasts_error_cov[:,:,t],
partials_forecasts_error[:, t, j])
)
return information_matrix / (self.nobs - self.ssm.loglikelihood_burn)
def opg_information_matrix(self, params, **kwargs):
"""
Outer product of gradients information matrix
Parameters
----------
params : array_like, optional
Array of parameters at which to evaluate the loglikelihood
function.
**kwargs
Additional arguments to the `loglikeobs` method.
References
----------
Berndt, Ernst R., Bronwyn Hall, Robert Hall, and Jerry Hausman. 1974.
Estimation and Inference in Nonlinear Structural Models.
NBER Chapters. National Bureau of Economic Research, Inc.
"""
score_obs = self.score_obs(params, **kwargs).transpose()
return (
np.inner(score_obs, score_obs) /
(self.nobs - self.ssm.loglikelihood_burn)
)
def score(self, params, *args, **kwargs):
"""
Compute the score function at params.
Parameters
----------
params : array_like
Array of parameters at which to evaluate the score.
*args, **kwargs
Additional arguments to the `loglike` method.
Returns
----------
score : array
Score, evaluated at `params`.
Notes
-----
This is a numerical approximation, calculated using first-order complex
step differentiation on the `loglike` method.
Both \*args and \*\*kwargs are necessary because the optimizer from
`fit` must call this function and only supports passing arguments via
\*args (for example `scipy.optimize.fmin_l_bfgs`).
"""
params = np.array(params, ndmin=1)
transformed = (
args[0] if len(args) > 0 else kwargs.get('transformed', False)
)
score = approx_fprime_cs(params, self.loglike, kwargs={
'transformed': transformed
})
return score
def score_obs(self, params, **kwargs):
"""
Compute the score per observation, evaluated at params
Parameters
----------
params : array_like
Array of parameters at which to evaluate the score.
*args, **kwargs
Additional arguments to the `loglike` method.
Returns
----------
score : array (nobs, k_vars)
Score per observation, evaluated at `params`.
Notes
-----
This is a numerical approximation, calculated using first-order complex
step differentiation on the `loglikeobs` method.
"""
params = np.array(params, ndmin=1)
self.update(params)
return approx_fprime_cs(params, self.loglikeobs, kwargs=kwargs)
def hessian(self, params, *args, **kwargs):
"""
Hessian matrix of the likelihood function, evaluated at the given
parameters
Parameters
----------
params : array_like
Array of parameters at which to evaluate the hessian.
*args, **kwargs
Additional arguments to the `loglike` method.
Returns
-------
hessian : array
Hessian matrix evaluated at `params`
Notes
-----
This is a numerical approximation.
Both \*args and \*\*kwargs are necessary because the optimizer from
`fit` must call this function and only supports passing arguments via
\*args (for example `scipy.optimize.fmin_l_bfgs`).
"""
if self.optim_hessian == 'cs':
hessian = self._hessian_cs(params, *args, **kwargs)
elif self.optim_hessian == 'oim':
hessian = self._hessian_oim(params)
elif self.optim_hessian == 'opg':
hessian = self._hessian_opg(params)
else:
raise NotImplementedError('Invalid Hessian calculation method.')
return hessian
def _hessian_oim(self, params):
"""
Hessian matrix computed using the Harvey (1989) information matrix
"""
return -self.observed_information_matrix(params)
def _hessian_opg(self, params):
"""
Hessian matrix computed using the outer product of gradients
information matrix
"""
return -self.opg_information_matrix(params)
def _hessian_cs(self, params, *args, **kwargs):
"""
Hessian matrix computed by second-order complex-step differentiation
on the `loglike` function.
"""
transformed = (
args[0] if len(args) > 0 else kwargs.get('transformed', False)
)
f = lambda params, **kwargs: self.loglike(params, **kwargs) / self.nobs
hessian = approx_hess_cs(params, f, kwargs={
'transformed': transformed
})
return hessian
@property
def start_params(self):
"""
(array) Starting parameters for maximum likelihood estimation.
"""
if hasattr(self, '_start_params'):
return self._start_params
else:
raise NotImplementedError
@property
def param_names(self):
"""
(list of str) List of human readable parameter names (for parameters
actually included in the model).
"""
if hasattr(self, '_param_names'):
return self._param_names
else:
try:
names = ['param.%d' % i for i in range(len(self.start_params))]
except NotImplementedError:
names = []
return names
def transform_jacobian(self, unconstrained):
"""
Jacobian matrix for the parameter transformation function
Parameters
----------
unconstrained : array_like
Array of unconstrained parameters used by the optimizer.
Returns
-------
jacobian : array
Jacobian matrix of the transformation, evaluated at `unconstrained`
Notes
-----
This is a numerical approximation.
See Also
--------
transform_params
"""
return approx_fprime_cs(unconstrained, self.transform_params)
def transform_params(self, unconstrained):
"""
Transform unconstrained parameters used by the optimizer to constrained
parameters used in likelihood evaluation
Parameters
----------
unconstrained : array_like
Array of unconstrained parameters used by the optimizer, to be
transformed.
Returns
-------
constrained : array_like
Array of constrained parameters which may be used in likelihood
evalation.
Notes
-----
This is a noop in the base class, subclasses should override where
appropriate.
"""
return np.array(unconstrained, ndmin=1)
def untransform_params(self, constrained):
"""
Transform constrained parameters used in likelihood evaluation
to unconstrained parameters used by the optimizer
Parameters
----------
constrained : array_like
Array of constrained parameters used in likelihood evalution, to be
transformed.
Returns
-------
unconstrained : array_like
Array of unconstrained parameters used by the optimizer.
Notes
-----
This is a noop in the base class, subclasses should override where
appropriate.
"""
return np.array(constrained, ndmin=1)
def update(self, params, transformed=True):
"""
Update the parameters of the model
Parameters
----------
params : array_like
Array of new parameters.
transformed : boolean, optional
Whether or not `params` is already transformed. If set to False,
`transform_params` is called. Default is True.
Returns
-------
params : array_like
Array of parameters.
Notes
-----
Since Model is a base class, this method should be overridden by
subclasses to perform actual updating steps.
"""
params = np.array(params, ndmin=1)
if not transformed:
params = self.transform_params(params)
return params
def simulate(self, params, nsimulations, measurement_shocks=None,
state_shocks=None, initial_state=None):
"""
Simulate a new time series following the state space model
Parameters
----------
params : array_like
Array of model parameters.
nsimulations : int
The number of observations to simulate. If the model is
time-invariant this can be any number. If the model is
time-varying, then this number must be less than or equal to the
number
measurement_shocks : array_like, optional
If specified, these are the shocks to the measurement equation,
:math:`\varepsilon_t`. If unspecified, these are automatically
generated using a pseudo-random number generator. If specified,
must be shaped `nsimulations` x `k_endog`, where `k_endog` is the
same as in the state space model.
state_shocks : array_like, optional
If specified, these are the shocks to the state equation,
:math:`\eta_t`. If unspecified, these are automatically
generated using a pseudo-random number generator. If specified,
must be shaped `nsimulations` x `k_posdef` where `k_posdef` is the
same as in the state space model.
initial_state : array_like, optional
If specified, this is the state vector at time zero, which should
be shaped (`k_states` x 1), where `k_states` is the same as in the
state space model. If unspecified, but the model has been
initialized, then that initialization is used. If unspecified and
the model has not been initialized, then a vector of zeros is used.
Note that this is not included in the returned `simulated_states`
array.
Returns
-------
simulated_obs : array
An (nsimulations x k_endog) array of simulated observations.
"""
self.update(params)
simulated_obs, simulated_states = self.ssm.simulate(
nsimulations, measurement_shocks, state_shocks, initial_state)
# Simulated obs is (k_endog x nobs); don't want to squeeze in
# case of npredictions = 1
if simulated_obs.shape[0] == 1:
simulated_obs = simulated_obs[0,:]
else:
simulated_obs = simulated_obs.T
return simulated_obs
def impulse_responses(self, params, steps=1, impulse=0,
orthogonalized=False, cumulative=False, **kwargs):
"""
Impulse response function
Parameters
----------
params : array_like
Array of model parameters.
steps : int, optional
The number of steps for which impulse responses are calculated.
Default is 1. Note that the initial impulse is not counted as a
step, so if `steps=1`, the output will have 2 entries.
impulse : int or array_like
If an integer, the state innovation to pulse; must be between 0
and `k_posdef-1`. Alternatively, a custom impulse vector may be
provided; must be shaped `k_posdef x 1`.
orthogonalized : boolean, optional
Whether or not to perform impulse using orthogonalized innovations.
Note that this will also affect custum `impulse` vectors. Default
is False.
cumulative : boolean, optional
Whether or not to return cumulative impulse responses. Default is
False.
**kwargs
If the model is time-varying and `steps` is greater than the number
of observations, any of the state space representation matrices
that are time-varying must have updated values provided for the
out-of-sample steps.
For example, if `design` is a time-varying component, `nobs` is 10,
and `steps` is 15, a (`k_endog` x `k_states` x 5) matrix must be
provided with the new design matrix values.
Returns
-------
impulse_responses : array
Responses for each endogenous variable due to the impulse
given by the `impulse` argument. A (steps + 1 x k_endog) array.
Notes
-----
Intercepts in the measurement and state equation are ignored when
calculating impulse responses.
"""
self.update(params)
return self.ssm.impulse_responses(
steps, impulse, orthogonalized, cumulative, **kwargs)
@classmethod
def from_formula(cls, formula, data, subset=None):
"""
Not implemented for state space models
"""
raise NotImplementedError
class MLEResults(tsbase.TimeSeriesModelResults):
r"""
Class to hold results from fitting a state space model.
Parameters
----------
model : MLEModel instance
The fitted model instance
params : array
Fitted parameters
filter_results : KalmanFilter instance
The underlying state space model and Kalman filter output
Attributes
----------
model : Model instance
A reference to the model that was fit.
filter_results : KalmanFilter instance
The underlying state space model and Kalman filter output
nobs : float
The number of observations used to fit the model.
params : array
The parameters of the model.
scale : float
This is currently set to 1.0 and not used by the model or its results.
See Also
--------
MLEModel
statsmodels.tsa.statespace.kalman_filter.FilterResults
statsmodels.tsa.statespace.representation.FrozenRepresentation
"""
def __init__(self, model, params, results, cov_type='opg',
cov_kwds=None, **kwargs):
self.data = model.data
tsbase.TimeSeriesModelResults.__init__(self, model, params,
normalized_cov_params=None,
scale=1.)
# Save the state space representation output
self.filter_results = results
if isinstance(results, SmootherResults):
self.smoother_results = results
else:
self.smoother_results = None
# Dimensions
self.nobs = model.nobs
# Setup covariance matrix notes dictionary
if not hasattr(self, 'cov_kwds'):
self.cov_kwds = {}
self.cov_type = cov_type
# Setup the cache
self._cache = resettable_cache()
# Handle covariance matrix calculation
if cov_kwds is None:
cov_kwds = {}
try:
self._rank = None
self._get_robustcov_results(cov_type=cov_type, use_self=True,
**cov_kwds)
except np.linalg.LinAlgError:
self._rank = 0
k_params = len(self.params)
self.cov_params_default = np.zeros((k_params, k_params)) * np.nan
self.cov_kwds['cov_type'] = (
'Covariance matrix could not be calculated: singular.'
' information matrix.')
# References of filter and smoother output
for name in ['filtered_state', 'filtered_state_cov', 'predicted_state',
'predicted_state_cov', 'forecasts', 'forecasts_error',
'forecasts_error_cov', 'smoothed_state',
'smoothed_state_cov', 'smoothed_measurement_disturbance',
'smoothed_state_disturbance',
'smoothed_measurement_disturbance_cov',
'smoothed_state_disturbance_cov']:
setattr(self, name, getattr(self.filter_results, name, None))
def _get_robustcov_results(self, cov_type='opg', **kwargs):
"""
Create new results instance with specified covariance estimator as
default
Note: creating new results instance currently not supported.
Parameters
----------
cov_type : string
the type of covariance matrix estimator to use. See Notes below
kwargs : depends on cov_type
Required or optional arguments for covariance calculation.
See Notes below.
Returns
-------
results : results instance
This method creates a new results instance with the requested
covariance as the default covariance of the parameters.
Inferential statistics like p-values and hypothesis tests will be
based on this covariance matrix.
Notes
-----
The following covariance types and required or optional arguments are
currently available:
- 'opg' for the outer product of gradient estimator
- 'oim' for the observed information matrix estimator, calculated
using the method of Harvey (1989)
- 'cs' for the observed information matrix estimator, calculated
using a numerical (complex step) approximation of the Hessian
matrix.
- 'delta' for the observed information matrix estimator, calculated
using a numerical (complex step) approximation of the Hessian along
with the delta method (method of propagation of errors)
applied to the parameter transformation function `transform_params`.
- 'robust' for an approximate (quasi-maximum likelihood) covariance
matrix that may be valid even in the presense of some
misspecifications. Intermediate calculations use the 'oim'
method.
- 'robust_cs' is the same as 'robust' except that the intermediate
calculations use the 'cs' method.
"""
import statsmodels.stats.sandwich_covariance as sw
use_self = kwargs.pop('use_self', False)
if use_self:
res = self
else:
raise NotImplementedError
res = self.__class__(
self.model, self.params,
normalized_cov_params=self.normalized_cov_params,
scale=self.scale)
# Set the new covariance type
res.cov_type = cov_type
res.cov_kwds = {}
# Calculate the new covariance matrix
if len(self.params) == 0:
res.cov_params_default = np.zeros((0,0))
res._rank = 0
res.cov_kwds['cov_type'] = (
'No parameters estimated.')
elif self.cov_type == 'cs':
res.cov_params_default = res.cov_params_cs
res.cov_kwds['description'] = (
'Covariance matrix calculated using numerical (complex-step)'
' differentiation.')
elif self.cov_type == 'delta':
res.cov_params_default = res.cov_params_delta
res.cov_kwds['description'] = (
'Covariance matrix calculated using numerical differentiation'
' and the delta method (method of propagation of errors)'
' applied to the parameter transformation function.')
elif self.cov_type == 'oim':
res.cov_params_default = res.cov_params_oim
res.cov_kwds['description'] = (
'Covariance matrix calculated using the observed information'
' matrix described in Harvey (1989).')
elif self.cov_type == 'opg':
res.cov_params_default = res.cov_params_opg
res.cov_kwds['description'] = (
'Covariance matrix calculated using the outer product of'
' gradients.'
)
elif self.cov_type == 'robust' or self.cov_type == 'robust_oim':
res.cov_params_default = res.cov_params_robust_oim
res.cov_kwds['description'] = (
'Quasi-maximum likelihood covariance matrix used for'
' robustness to some misspecifications; calculated using the'
' observed information matrix described in Harvey (1989).')
elif self.cov_type == 'robust_cs':
res.cov_params_default = res.cov_params_robust_cs
res.cov_kwds['description'] = (
'Quasi-maximum likelihood covariance matrix used for'
' robustness to some misspecifications; calculated using'
' numerical (complex-step) differentiation.')
else:
raise NotImplementedError('Invalid covariance matrix type.')
return res
@cache_readonly
def aic(self):
"""
(float) Akaike Information Criterion
"""
# return -2*self.llf + 2*self.params.shape[0]
return aic(self.llf, self.nobs, self.params.shape[0])
@cache_readonly
def bic(self):
"""
(float) Bayes Information Criterion
"""
# return -2*self.llf + self.params.shape[0]*np.log(self.nobs)
return bic(self.llf, self.nobs, self.params.shape[0])
@cache_readonly
def cov_params_cs(self):
"""
(array) The variance / covariance matrix. Computed using the numerical
Hessian computed without using parameter transformations.
"""
nobs = (self.model.nobs - self.filter_results.loglikelihood_burn)
# When using complex-step methods, cannot rely on Cholesky inversion
# because variance parameters will then have a complex component which
# which implies non-positive-definiteness.
inversion_method = INVERT_UNIVARIATE | SOLVE_LU
evaluated_hessian = self.model._hessian_cs(
self.params, transformed=True, inversion_method=inversion_method
)
self.model.update(self.params)
neg_cov, singular_values = pinv_extended(nobs * evaluated_hessian)
if self._rank is None:
self._rank = np.linalg.matrix_rank(np.diag(singular_values))
return -neg_cov
@cache_readonly
def cov_params_delta(self):
"""
(array) The variance / covariance matrix. Computed using the numerical
Hessian computed using parameter transformations and the Delta method
(method of propagation of errors).
"""
nobs = (self.model.nobs - self.filter_results.loglikelihood_burn)
unconstrained = self.model.untransform_params(self.params)
jacobian = self.model.transform_jacobian(unconstrained)
neg_cov, singular_values = pinv_extended(
nobs * self.model._hessian_cs(unconstrained, transformed=False)
)
if self._rank is None:
self._rank = np.linalg.matrix_rank(np.diag(singular_values))
return np.dot(np.dot(jacobian, -neg_cov), jacobian.transpose())
@cache_readonly
def cov_params_oim(self):
"""
(array) The variance / covariance matrix. Computed using the method
from Harvey (1989).
"""
nobs = (self.model.nobs - self.filter_results.loglikelihood_burn)
cov_params, singular_values = pinv_extended(
nobs * self.model.observed_information_matrix(self.params)
)
if self._rank is None:
self._rank = np.linalg.matrix_rank(np.diag(singular_values))
return cov_params
@cache_readonly
def cov_params_opg(self):
"""
(array) The variance / covariance matrix. Computed using the outer
product of gradients method.
"""
nobs = (self.model.nobs - self.filter_results.loglikelihood_burn)
# When using complex-step methods, cannot rely on Cholesky inversion
# because variance parameters will then have a complex component which
# which implies non-positive-definiteness.
inversion_method = INVERT_UNIVARIATE | SOLVE_LU
cov_params, singular_values = pinv_extended(
nobs *
self.model.opg_information_matrix(
self.params, inversion_method=inversion_method
)
)
if self._rank is None:
self._rank = np.linalg.matrix_rank(np.diag(singular_values))
return cov_params
@cache_readonly
def cov_params_robust(self):
"""
(array) The QMLE variance / covariance matrix. Alias for
`cov_params_robust_oim`
"""
return self.cov_params_robust_oim
@cache_readonly
def cov_params_robust_oim(self):
"""
(array) The QMLE variance / covariance matrix. Computed using the
method from Harvey (1989) as the evaluated hessian.
"""
nobs = (self.model.nobs - self.filter_results.loglikelihood_burn)
cov_opg = self.cov_params_opg
evaluated_hessian = (
nobs * self.model.observed_information_matrix(self.params)
)
cov_params, singular_values = pinv_extended(
np.dot(np.dot(evaluated_hessian, cov_opg), evaluated_hessian)
)
if self._rank is None:
self._rank = np.linalg.matrix_rank(np.diag(singular_values))
return cov_params
@cache_readonly
def cov_params_robust_cs(self):
"""
(array) The QMLE variance / covariance matrix. Computed using the
numerical Hessian computed without using parameter transformations as
the evaluated hessian.
"""
nobs = (self.model.nobs - self.filter_results.loglikelihood_burn)
cov_opg = self.cov_params_opg
# When using complex-step methods, cannot rely on Cholesky inversion
# because variance parameters will then have a complex component which
# which implies non-positive-definiteness.
inversion_method = INVERT_UNIVARIATE | SOLVE_LU
evaluated_hessian = (
nobs *
self.model._hessian_cs(self.params, transformed=True,
inversion_method=inversion_method)
)
cov_params, singular_values = pinv_extended(
np.dot(np.dot(evaluated_hessian, cov_opg), evaluated_hessian)
)
if self._rank is None:
self._rank = np.linalg.matrix_rank(np.diag(singular_values))
return cov_params
@cache_readonly
def fittedvalues(self):
"""
(array) The predicted values of the model. An (nobs x k_endog) array.
"""
# This is a (k_endog x nobs array; don't want to squeeze in case of
# the corner case where nobs = 1 (mostly a concern in the predict or
# forecast functions, but here also to maintain consistency)
fittedvalues = self.filter_results.forecasts
if fittedvalues.shape[0] == 1:
fittedvalues = fittedvalues[0,:]
else:
fittedvalues = fittedvalues.T
return fittedvalues
@cache_readonly
def hqic(self):
"""
(float) Hannan-Quinn Information Criterion
"""
# return -2*self.llf + 2*np.log(np.log(self.nobs))*self.params.shape[0]
return hqic(self.llf, self.nobs, self.params.shape[0])
@cache_readonly
def llf_obs(self):
"""
(float) The value of the log-likelihood function evaluated at `params`.
"""
return self.model.loglikeobs(self.params)
@cache_readonly
def llf(self):
"""
(float) The value of the log-likelihood function evaluated at `params`.
"""
return self.llf_obs[self.filter_results.loglikelihood_burn:].sum()
@cache_readonly
def loglikelihood_burn(self):
"""
(float) The number of observations during which the likelihood is not
evaluated.
"""
return self.filter_results.loglikelihood_burn
@cache_readonly
def pvalues(self):
"""
(array) The p-values associated with the z-statistics of the
coefficients. Note that the coefficients are assumed to have a Normal
distribution.
"""
return norm.sf(np.abs(self.zvalues)) * 2
@cache_readonly
def resid(self):
"""
(array) The model residuals. An (nobs x k_endog) array.
"""
# This is a (k_endog x nobs array; don't want to squeeze in case of
# the corner case where nobs = 1 (mostly a concern in the predict or
# forecast functions, but here also to maintain consistency)
resid = self.filter_results.forecasts_error
if resid.shape[0] == 1:
resid = resid[0,:]
else:
resid = resid.T
return resid
@cache_readonly
def zvalues(self):
"""
(array) The z-statistics for the coefficients.
"""
return self.params / self.bse
def test_normality(self, method):
"""
Test for normality of standardized residuals.
Null hypothesis is normality.
Parameters
----------
method : string {'jarquebera'} or None
The statistical test for normality. Must be 'jarquebera' for
Jarque-Bera normality test. If None, an attempt is made to select
an appropriate test.
Notes
-----
If the first `d` loglikelihood values were burned (i.e. in the
specified model, `loglikelihood_burn=d`), then this test is calculated
ignoring the first `d` residuals.
See Also
--------
statsmodels.stats.stattools.jarque_bera
"""
if method is None:
method = 'jarquebera'
if method == 'jarquebera':
from statsmodels.stats.stattools import jarque_bera
d = self.loglikelihood_burn
output = np.array(jarque_bera(
self.filter_results.standardized_forecasts_error[:, d:],
axis=1
)).transpose()
else:
raise NotImplementedError('Invalid normality test method.')
return output
def test_heteroskedasticity(self, method, alternative='two-sided',
use_f=True):
"""
Test for heteroskedasticity of standardized residuals
Tests whether the sum-of-squares in the first third of the sample is
significantly different than the sum-of-squares in the last third
of the sample. Analogous to a Goldfeld-Quandt test.
Parameters
----------
method : string {'breakvar'} or None
The statistical test for heteroskedasticity. Must be 'breakvar'
for test of a break in the variance. If None, an attempt is
made to select an appropriate test.
alternative : string, 'increasing', 'decreasing' or 'two-sided'
This specifies the alternative for the p-value calculation. Default
is two-sided.
use_f : boolean, optional
Whether or not to compare against the asymptotic distribution
(chi-squared) or the approximate small-sample distribution (F).
Default is True (i.e. default is to compare against an F
distribution).
Notes
-----
The null hypothesis is of no heteroskedasticity. That means different
things depending on which alternative is selected:
- Increasing: Null hypothesis is that the variance is not increasing
throughout the sample; that the sum-of-squares in the later
subsample is *not* greater than the sum-of-squares in the earlier
subsample.
- Decreasing: Null hypothesis is that the variance is not decreasing
throughout the sample; that the sum-of-squares in the earlier
subsample is *not* greater than the sum-of-squares in the later
subsample.
- Two-sided: Null hypothesis is that the variance is not changing
throughout the sample. Both that the sum-of-squares in the earlier
subsample is not greater than the sum-of-squares in the later
subsample *and* that the sum-of-squares in the later subsample is
not greater than the sum-of-squares in the earlier subsample.
For :math:`h = [T/3]`, the test statistic is:
.. math::
H(h) = \sum_{t=T-h+1}^T \tilde v_t^2
\left / \sum_{t=d+1}^{d+1+h} \tilde v_t^2 \right .
where :math:`d` is the number of periods in which the loglikelihood was
burned in the parent model (usually corresponding to diffuse
initialization).
This statistic can be tested against an :math:`F(h,h)` distribution.
Alternatively, :math:`h H(h)` is asymptotically distributed according
to :math:`\chi_h^2`; this second test can be applied by passing
`asymptotic=True` as an argument.
See section 5.4 of [1]_ for the above formula and discussion, as well
as additional details.
TODO
- Allow specification of :math:`h`
Returns
-------
output : array
An array with `(test_statistic, pvalue)` for each endogenous
variable. The array is then sized `(k_endog, 2)`. If the method is
called as `het = res.test_heteroskedasticity()`, then `het[0]` is
an array of size 2 corresponding to the first endogenous variable,
where `het[0][0]` is the test statistic, and `het[0][1]` is the
p-value.
References
----------
.. [1] Harvey, Andrew C. 1990.
Forecasting, Structural Time Series Models and the Kalman Filter.
Cambridge University Press.
"""
if method is None:
method = 'breakvar'
if method == 'breakvar':
# Store some values
squared_resid = self.filter_results.standardized_forecasts_error**2
d = self.loglikelihood_burn
h = int(np.round((self.nobs - d) / 3))
# Calculate the test statistics for each endogenous variable
test_statistics = np.array([
np.sum(squared_resid[i][-h:]) / np.sum(squared_resid[i][d:d+h])
for i in range(self.model.k_endog)
])
# Setup functions to calculate the p-values
if use_f:
from scipy.stats import f
pval_lower = lambda test_statistics: f.cdf(test_statistics, h, h)
pval_upper = lambda test_statistics: f.sf(test_statistics, h, h)
else:
from scipy.stats import chi2
pval_lower = lambda test_statistics: chi2.cdf(h*test_statistics, h)
pval_upper = lambda test_statistics: chi2.sf(h*test_statistics, h)
# Calculate the one- or two-sided p-values
alternative = alternative.lower()
if alternative in ['i', 'inc', 'increasing']:
p_values = pval_upper(test_statistics)
elif alternative in ['d', 'dec', 'decreasing']:
test_statistics = 1. / test_statistics
p_values = pval_upper(test_statistics)
elif alternative in ['2', '2-sided', 'two-sided']:
p_values = 2 * np.minimum(
pval_lower(test_statistics),
pval_upper(test_statistics)
)
else:
raise ValueError('Invalid alternative.')
output = np.c_[test_statistics, p_values]
else:
raise NotImplementedError('Invalid heteroskedasticity test'
' method.')
return output
def test_serial_correlation(self, method, lags=None):
"""
Ljung-box test for no serial correlation of standardized residuals
Null hypothesis is no serial correlation.
Parameters
----------
method : string {'ljungbox','boxpierece'} or None
The statistical test for serial correlation. If None, an attempt is
made to select an appropriate test.
lags : None, int or array_like
If lags is an integer then this is taken to be the largest lag
that is included, the test result is reported for all smaller lag
length.
If lags is a list or array, then all lags are included up to the
largest lag in the list, however only the tests for the lags in the
list are reported.
If lags is None, then the default maxlag is 12*(nobs/100)^{1/4}
Returns
-------
output : array
An array with `(test_statistic, pvalue)` for each endogenous
variable and each lag. The array is then sized
`(k_endog, 2, lags)`. If the method is called as
`ljungbox = res.test_serial_correlation()`, then `ljungbox[i]`
holds the results of the Ljung-Box test (as would be returned by
`statsmodels.stats.diagnostic.acorr_ljungbox`) for the `i`th
endogenous variable.
Notes
-----
If the first `d` loglikelihood values were burned (i.e. in the
specified model, `loglikelihood_burn=d`), then this test is calculated
ignoring the first `d` residuals.
See Also
--------
statsmodels.stats.diagnostic.acorr_ljungbox
"""
if method is None:
method = 'ljungbox'
if method == 'ljungbox' or method == 'boxpierce':
from statsmodels.stats.diagnostic import acorr_ljungbox
d = self.loglikelihood_burn
output = []
# Default lags for acorr_ljungbox is 40, but may not always have
# that many observations
if lags is None:
lags = min(40, self.nobs - d - 1)
for i in range(self.model.k_endog):
results = acorr_ljungbox(
self.filter_results.standardized_forecasts_error[i][d:],
lags=lags, boxpierce=(method == 'boxpierce'))
if method == 'ljungbox':
output.append(results[0:2])
else:
output.append(results[2:])
output = np.c_[output]
else:
raise NotImplementedError('Invalid serial correlation test'
' method.')
return output
def get_prediction(self, start=None, end=None, dynamic=False, **kwargs):
"""
In-sample prediction and out-of-sample forecasting
Parameters
----------
start : int, str, or datetime, optional
Zero-indexed observation number at which to start forecasting,
i.e., the first forecast is start. Can also be a date string to
parse or a datetime type. Default is the the zeroth observation.
end : int, str, or datetime, optional
Zero-indexed observation number at which to end forecasting, i.e.,
the last forecast is end. Can also be a date string to
parse or a datetime type. However, if the dates index does not
have a fixed frequency, end must be an integer index if you
want out of sample prediction. Default is the last observation in
the sample.
dynamic : boolean, int, str, or datetime, optional
Integer offset relative to `start` at which to begin dynamic
prediction. Can also be an absolute date string to parse or a
datetime type (these are not interpreted as offsets).
Prior to this observation, true endogenous values will be used for
prediction; starting with this observation and continuing through
the end of prediction, forecasted endogenous values will be used
instead.
**kwargs
Additional arguments may required for forecasting beyond the end
of the sample. See `FilterResults.predict` for more details.
Returns
-------
forecast : array
Array of out of in-sample predictions and / or out-of-sample
forecasts. An (npredict x k_endog) array.
"""
if start is None:
start = 0
# Handle start and end (e.g. dates)
start = self.model._get_predict_start(start)
end, out_of_sample = self.model._get_predict_end(end)
# Handle string dynamic
dates = self.data.dates
if isinstance(dynamic, str):
if dates is None:
raise ValueError("Got a string for dynamic and dates is None")
dtdynamic = self.model._str_to_date(dynamic)
try:
dynamic_start = self.model._get_dates_loc(dates, dtdynamic)
dynamic = dynamic_start - start
except KeyError:
raise ValueError("Dynamic must be in dates. Got %s | %s" %
(str(dynamic), str(dtdynamic)))
# Perform the prediction
# This is a (k_endog x npredictions) array; don't want to squeeze in
# case of npredictions = 1
prediction_results = self.filter_results.predict(
start, end+out_of_sample+1, dynamic, **kwargs
)
# Return a new mlemodel.PredictionResults object
if self.data.dates is None:
row_labels = self.data.row_labels
else:
row_labels = self.data.predict_dates
return PredictionResultsWrapper(
PredictionResults(self, prediction_results, row_labels=row_labels))
def get_forecast(self, steps=1, **kwargs):
"""
Out-of-sample forecasts
Parameters
----------
steps : int, str, or datetime, optional
If an integer, the number of steps to forecast from the end of the
sample. Can also be a date string to parse or a datetime type.
However, if the dates index does not have a fixed frequency, steps
must be an integer. Default
**kwargs
Additional arguments may required for forecasting beyond the end
of the sample. See `FilterResults.predict` for more details.
Returns
-------
forecast : array
Array of out of sample forecasts. A (steps x k_endog) array.
"""
if isinstance(steps, int):
end = self.nobs+steps-1
else:
end = steps
return self.get_prediction(start=self.nobs, end=end, **kwargs)
def predict(self, start=None, end=None, dynamic=False, **kwargs):
"""
In-sample prediction and out-of-sample forecasting
Parameters
----------
start : int, str, or datetime, optional
Zero-indexed observation number at which to start forecasting,
i.e., the first forecast is start. Can also be a date string to
parse or a datetime type. Default is the the zeroth observation.
end : int, str, or datetime, optional
Zero-indexed observation number at which to end forecasting, i.e.,
the last forecast is end. Can also be a date string to
parse or a datetime type. However, if the dates index does not
have a fixed frequency, end must be an integer index if you
want out of sample prediction. Default is the last observation in
the sample.
dynamic : boolean, int, str, or datetime, optional
Integer offset relative to `start` at which to begin dynamic
prediction. Can also be an absolute date string to parse or a
datetime type (these are not interpreted as offsets).
Prior to this observation, true endogenous values will be used for
prediction; starting with this observation and continuing through
the end of prediction, forecasted endogenous values will be used
instead.
**kwargs
Additional arguments may required for forecasting beyond the end
of the sample. See `FilterResults.predict` for more details.
Returns
-------
forecast : array
Array of out of in-sample predictions and / or out-of-sample
forecasts. An (npredict x k_endog) array.
"""
if start is None:
start = 0
# Handle start and end (e.g. dates)
start = self.model._get_predict_start(start)
end, out_of_sample = self.model._get_predict_end(end)
# Handle string dynamic
dates = self.data.dates
if isinstance(dynamic, str):
if dates is None:
raise ValueError("Got a string for dynamic and dates is None")
dtdynamic = self.model._str_to_date(dynamic)
try:
dynamic_start = self.model._get_dates_loc(dates, dtdynamic)
dynamic = dynamic_start - start
except KeyError:
raise ValueError("Dynamic must be in dates. Got %s | %s" %
(str(dynamic), str(dtdynamic)))
# Perform the prediction
# This is a (k_endog x npredictions) array; don't want to squeeze in
# case of npredictions = 1
prediction_results = self.filter_results.predict(
start, end+out_of_sample+1, dynamic, **kwargs
)
predicted_mean = prediction_results.forecasts
if predicted_mean.shape[0] == 1:
predicted_mean = predicted_mean[0,:]
else:
predicted_mean = predicted_mean.T
return predicted_mean
def forecast(self, steps=1, **kwargs):
"""
Out-of-sample forecasts
Parameters
----------
steps : int, str, or datetime, optional
If an integer, the number of steps to forecast from the end of the
sample. Can also be a date string to parse or a datetime type.
However, if the dates index does not have a fixed frequency, steps
must be an integer. Default
**kwargs
Additional arguments may required for forecasting beyond the end
of the sample. See `FilterResults.predict` for more details.
Returns
-------
forecast : array
Array of out of sample forecasts. A (steps x k_endog) array.
"""
if isinstance(steps, int):
end = self.nobs+steps-1
else:
end = steps
return self.predict(start=self.nobs, end=end, **kwargs)
def simulate(self, nsimulations, measurement_shocks=None,
state_shocks=None, initial_state=None):
"""
Simulate a new time series following the state space model
Parameters
----------
nsimulations : int
The number of observations to simulate. If the model is
time-invariant this can be any number. If the model is
time-varying, then this number must be less than or equal to the
number
measurement_shocks : array_like, optional
If specified, these are the shocks to the measurement equation,
:math:`\varepsilon_t`. If unspecified, these are automatically
generated using a pseudo-random number generator. If specified,
must be shaped `nsimulations` x `k_endog`, where `k_endog` is the
same as in the state space model.
state_shocks : array_like, optional
If specified, these are the shocks to the state equation,
:math:`\eta_t`. If unspecified, these are automatically
generated using a pseudo-random number generator. If specified,
must be shaped `nsimulations` x `k_posdef` where `k_posdef` is the
same as in the state space model.
initial_state : array_like, optional
If specified, this is the state vector at time zero, which should
be shaped (`k_states` x 1), where `k_states` is the same as in the
state space model. If unspecified, but the model has been
initialized, then that initialization is used. If unspecified and
the model has not been initialized, then a vector of zeros is used.
Note that this is not included in the returned `simulated_states`
array.
Returns
-------
simulated_obs : array
An (nsimulations x k_endog) array of simulated observations.
"""
return self.model.simulate(self.params, nsimulations,
measurement_shocks, state_shocks, initial_state)
def impulse_responses(self, steps=1, impulse=0, orthogonalized=False,
cumulative=False, **kwargs):
"""
Impulse response function
Parameters
----------
steps : int, optional
The number of steps for which impulse responses are calculated.
Default is 1. Note that the initial impulse is not counted as a
step, so if `steps=1`, the output will have 2 entries.
impulse : int or array_like
If an integer, the state innovation to pulse; must be between 0
and `k_posdef-1`. Alternatively, a custom impulse vector may be
provided; must be shaped `k_posdef x 1`.
orthogonalized : boolean, optional
Whether or not to perform impulse using orthogonalized innovations.
Note that this will also affect custum `impulse` vectors. Default
is False.
cumulative : boolean, optional
Whether or not to return cumulative impulse responses. Default is
False.
**kwargs
If the model is time-varying and `steps` is greater than the number
of observations, any of the state space representation matrices
that are time-varying must have updated values provided for the
out-of-sample steps.
For example, if `design` is a time-varying component, `nobs` is 10,
and `steps` is 15, a (`k_endog` x `k_states` x 5) matrix must be
provided with the new design matrix values.
Returns
-------
impulse_responses : array
Responses for each endogenous variable due to the impulse
given by the `impulse` argument. A (steps + 1 x k_endog) array.
Notes
-----
Intercepts in the measurement and state equation are ignored when
calculating impulse responses.
"""
return self.model.impulse_responses(self.params, steps, impulse,
orthogonalized, cumulative, **kwargs)
def plot_diagnostics(self, variable=0, lags=10, fig=None, figsize=None):
"""
Diagnostic plots for standardized residuals of one endogenous variable
Parameters
----------
variable : integer, optional
Index of the endogenous variable for which the diagnostic plots
should be created. Default is 0.
lags : integer, optional
Number of lags to include in the correlogram. Default is 10.
fig : Matplotlib Figure instance, optional
If given, subplots are created in this figure instead of in a new
figure. Note that the 2x2 grid will be created in the provided
figure using `fig.add_subplot()`.
figsize : tuple, optional
If a figure is created, this argument allows specifying a size.
The tuple is (width, height).
Notes
-----
Produces a 2x2 plot grid with the following plots (ordered clockwise
from top left):
1. Standardized residuals over time
2. Histogram plus estimated density of standardized residulas, along
with a Normal(0,1) density plotted for reference.
3. Normal Q-Q plot, with Normal reference line.
4. Correlogram
See Also
--------
statsmodels.graphics.gofplots.qqplot
statsmodels.graphics.tsaplots.plot_acf
"""
from statsmodels.graphics.utils import _import_mpl, create_mpl_fig
_import_mpl()
fig = create_mpl_fig(fig, figsize)
# Eliminate residuals associated with burned likelihoods
d = self.loglikelihood_burn
resid = self.filter_results.standardized_forecasts_error[variable, d:]
# Top-left: residuals vs time
ax = fig.add_subplot(221)
if hasattr(self.data, 'dates') and self.data.dates is not None:
x = self.data.dates[self.loglikelihood_burn:]._mpl_repr()
else:
x = np.arange(len(resid))
ax.plot(x, resid)
ax.hlines(0, x[0], x[-1], alpha=0.5)
ax.set_xlim(x[0], x[-1])
ax.set_title('Standardized residual')
# Top-right: histogram, Gaussian kernel density, Normal density
# Can only do histogram and Gaussian kernel density on the non-null
# elements
resid_nonmissing = resid[~(np.isnan(resid))]
ax = fig.add_subplot(222)
ax.hist(resid_nonmissing, normed=True, label='Hist')
from scipy.stats import gaussian_kde, norm
kde = gaussian_kde(resid_nonmissing)
xlim = (-1.96*2, 1.96*2)
x = np.linspace(xlim[0], xlim[1])
ax.plot(x, kde(x), label='KDE')
ax.plot(x, norm.pdf(x), label='N(0,1)')
ax.set_xlim(xlim)
ax.legend()
ax.set_title('Histogram plus estimated density')
# Bottom-left: QQ plot
ax = fig.add_subplot(223)
from statsmodels.graphics.gofplots import qqplot
qqplot(resid, line='s', ax=ax)
ax.set_title('Normal Q-Q')
# Bottom-right: Correlogram
ax = fig.add_subplot(224)
from statsmodels.graphics.tsaplots import plot_acf
plot_acf(resid, ax=ax, lags=lags)
ax.set_title('Correlogram')
ax.set_ylim(-1,1)
return fig
def summary(self, alpha=.05, start=None, title=None, model_name=None,
display_params=True):
"""
Summarize the Model
Parameters
----------
alpha : float, optional
Significance level for the confidence intervals. Default is 0.05.
start : int, optional
Integer of the start observation. Default is 0.
model_name : string
The name of the model used. Default is to use model class name.
Returns
-------
summary : Summary instance
This holds the summary table and text, which can be printed or
converted to various output formats.
See Also
--------
statsmodels.iolib.summary.Summary
"""
from statsmodels.iolib.summary import Summary
# Model specification results
model = self.model
if title is None:
title = 'Statespace Model Results'
if start is None:
start = 0
if self.data.dates is not None:
dates = self.data.dates
d = dates[start]
sample = ['%02d-%02d-%02d' % (d.month, d.day, d.year)]
d = dates[-1]
sample += ['- ' + '%02d-%02d-%02d' % (d.month, d.day, d.year)]
else:
sample = [str(start), ' - ' + str(self.model.nobs)]
# Standardize the model name as a list of str
if model_name is None:
model_name = model.__class__.__name__
# Diagnostic tests results
het = self.test_heteroskedasticity(method='breakvar')
lb = self.test_serial_correlation(method='ljungbox')
jb = self.test_normality(method='jarquebera')
# Create the tables
if not isinstance(model_name, list):
model_name = [model_name]
top_left = [('Dep. Variable:', None)]
top_left.append(('Model:', [model_name[0]]))
for i in range(1, len(model_name)):
top_left.append(('', ['+ ' + model_name[i]]))
top_left += [
('Date:', None),
('Time:', None),
('Sample:', [sample[0]]),
('', [sample[1]])
]
top_right = [
('No. Observations:', [self.model.nobs]),
('Log Likelihood', ["%#5.3f" % self.llf]),
('AIC', ["%#5.3f" % self.aic]),
('BIC', ["%#5.3f" % self.bic]),
('HQIC', ["%#5.3f" % self.hqic])
]
if hasattr(self, 'cov_type'):
top_left.append(('Covariance Type:', [self.cov_type]))
format_str = lambda array: [
', '.join(['{0:.2f}'.format(i) for i in array])
]
diagn_left = [('Ljung-Box (Q):', format_str(lb[:,0,-1])),
('Prob(Q):', format_str(lb[:,1,-1])),
('Heteroskedasticity (H):', format_str(het[:,0])),
('Prob(H) (two-sided):', format_str(het[:,1]))
]
diagn_right = [('Jarque-Bera (JB):', format_str(jb[:,0])),
('Prob(JB):', format_str(jb[:,1])),
('Skew:', format_str(jb[:,2])),
('Kurtosis:', format_str(jb[:,3]))
]
summary = Summary()
summary.add_table_2cols(self, gleft=top_left, gright=top_right,
title=title)
if len(self.params) > 0 and display_params:
summary.add_table_params(self, alpha=alpha,
xname=self.data.param_names, use_t=False)
summary.add_table_2cols(self, gleft=diagn_left, gright=diagn_right,
title="")
# Add warnings/notes, added to text format only
etext = []
if hasattr(self, 'cov_type') and 'description' in self.cov_kwds:
etext.append(self.cov_kwds['description'])
if self._rank < len(self.params):
etext.append("Covariance matrix is singular or near-singular,"
" with condition number %6.3g. Standard errors may be"
" unstable." % np.linalg.cond(self.cov_params()))
if etext:
etext = ["[{0}] {1}".format(i + 1, text)
for i, text in enumerate(etext)]
etext.insert(0, "Warnings:")
summary.add_extra_txt(etext)
return summary
class MLEResultsWrapper(wrap.ResultsWrapper):
_attrs = {
'zvalues': 'columns',
'cov_params_cs': 'cov',
'cov_params_default': 'cov',
'cov_params_delta': 'cov',
'cov_params_oim': 'cov',
'cov_params_opg': 'cov',
'cov_params_robust': 'cov',
'cov_params_robust_cs': 'cov',
'cov_params_robust_oim': 'cov',
}
_wrap_attrs = wrap.union_dicts(tsbase.TimeSeriesResultsWrapper._wrap_attrs,
_attrs)
_methods = {
'forecast': 'dates',
'simulate': 'ynames',
'impulse_responses': 'ynames'
}
_wrap_methods = wrap.union_dicts(
tsbase.TimeSeriesResultsWrapper._wrap_methods, _methods)
wrap.populate_wrapper(MLEResultsWrapper, MLEResults)
class PredictionResults(pred.PredictionResults):
"""
Parameters
----------
prediction_results : kalman_filter.PredictionResults instance
Results object from prediction after fitting or filtering a state space
model.
row_labels : iterable
Row labels for the predicted data.
Attributes
----------
"""
def __init__(self, model, prediction_results, row_labels=None):
self.model = Bunch(data=model.data.__class__(
endog=prediction_results.endog.T,
predict_dates=getattr(model.data, 'predict_dates', None))
)
self.prediction_results = prediction_results
# Get required values
predicted_mean = self.prediction_results.forecasts
if predicted_mean.shape[0] == 1:
predicted_mean = predicted_mean[0, :]
else:
predicted_mean = predicted_mean.transpose()
var_pred_mean = self.prediction_results.forecasts_error_cov
if var_pred_mean.shape[0] == 1:
var_pred_mean = var_pred_mean[0, 0, :]
else:
var_pred_mean = var_pred_mean.transpose()
# Initialize
super(PredictionResults, self).__init__(predicted_mean, var_pred_mean,
dist='norm',
row_labels=row_labels,
link=identity())
def conf_int(self, method='endpoint', alpha=0.05, **kwds):
# TODO: this performs metadata wrapping, and that should be handled
# by attach_* methods. However, they don't currently support
# this use case.
conf_int = super(PredictionResults, self).conf_int(
method, alpha, **kwds)
if self.model.data.predict_dates is not None:
conf_int = pd.DataFrame(conf_int,
index=self.model.data.predict_dates)
return conf_int
def summary_frame(self, endog=0, what='all', alpha=0.05):
# TODO: finish and cleanup
# import pandas as pd
from statsmodels.compat.collections import OrderedDict
#ci_obs = self.conf_int(alpha=alpha, obs=True) # need to split
ci_mean = self.conf_int(alpha=alpha)
to_include = OrderedDict()
if self.predicted_mean.ndim == 1:
yname = self.model.data.ynames
to_include['mean'] = self.predicted_mean
to_include['mean_se'] = self.se_mean
k_endog = 1
else:
yname = self.model.data.ynames[endog]
to_include['mean'] = self.predicted_mean[:, endog]
to_include['mean_se'] = self.se_mean[:, endog]
k_endog = self.predicted_mean.shape[1]
to_include['mean_ci_lower'] = ci_mean[:, endog]
to_include['mean_ci_upper'] = ci_mean[:, k_endog + endog]
self.table = to_include
#OrderedDict doesn't work to preserve sequence
# pandas dict doesn't handle 2d_array
#data = np.column_stack(list(to_include.values()))
#names = ....
res = pd.DataFrame(to_include, index=self.row_labels,
columns=to_include.keys())
res.columns.name = yname
return res
class PredictionResultsWrapper(wrap.ResultsWrapper):
_attrs = {
'predicted_mean': 'dates',
'se_mean': 'dates',
't_values': 'dates',
}
_wrap_attrs = wrap.union_dicts(_attrs)
_methods = {}
_wrap_methods = wrap.union_dicts(_methods)
wrap.populate_wrapper(PredictionResultsWrapper, PredictionResults)
|
kiyoto/statsmodels
|
statsmodels/tsa/statespace/mlemodel.py
|
Python
|
bsd-3-clause
| 88,741
|
[
"Gaussian"
] |
b2f969aa6d137b362b0617dee1501f51baff8f1576c003516eda974fe8ba2f42
|
"""
The GA4GH data model. Defines all the methods required to translate
data in existing formats into GA4GH protocol types.
"""
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import base64
import collections
import glob
import json
import os
import ga4gh.server.exceptions as exceptions
import ga4gh.schemas.protocol as protocol
class PysamFileHandleCache(object):
"""
Cache for opened file handles. We use a deque which has the
advantage to have push/pop operations in O(1) We always add
elements on the left of the deque and pop elements from the right.
When a file is accessed via getFileHandle, its priority gets
updated, it is put at the "top" of the deque.
"""
def __init__(self):
self._cache = collections.deque()
self._memoTable = dict()
# Initialize the value even if it will be set up by the config
self._maxCacheSize = 50
def setMaxCacheSize(self, size):
"""
Sets the maximum size of the cache
"""
if size <= 0:
raise ValueError(
"The size of the cache must be a strictly positive value")
self._maxCacheSize = size
def _add(self, dataFile, handle):
"""
Add a file handle to the left of the deque
"""
self._cache.appendleft((dataFile, handle))
def _update(self, dataFile, handle):
"""
Update the priority of the file handle. The element is first
removed and then added to the left of the deque.
"""
self._cache.remove((dataFile, handle))
self._add(dataFile, handle)
def _removeLru(self):
"""
Remove the least recently used file handle from the cache.
The pop method removes an element from the right of the deque.
Returns the name of the file that has been removed.
"""
(dataFile, handle) = self._cache.pop()
handle.close()
return dataFile
def getFileHandle(self, dataFile, openMethod):
"""
Returns handle associated to the filename. If the file is
already opened, update its priority in the cache and return
its handle. Otherwise, open the file using openMethod, store
it in the cache and return the corresponding handle.
"""
if dataFile in self._memoTable:
handle = self._memoTable[dataFile]
self._update(dataFile, handle)
return handle
else:
try:
handle = openMethod(dataFile)
except ValueError:
raise exceptions.FileOpenFailedException(dataFile)
self._memoTable[dataFile] = handle
self._add(dataFile, handle)
if len(self._memoTable) > self._maxCacheSize:
dataFile = self._removeLru()
del self._memoTable[dataFile]
return handle
# LRU cache of open file handles
fileHandleCache = PysamFileHandleCache()
class CompoundId(object):
"""
Base class for an id composed of several different parts. Each
compound ID consists of a set of fields, each of which corresponds to a
local ID in the data hierarchy. For example, we might have fields like
["dataset", "variantSet"] for a variantSet. These are available as
cid.dataset, and cid.variantSet. The actual IDs of the containing
objects can be obtained using the corresponding attributes, e.g.
cid.datasetId and cid.variantSetId.
"""
fields = []
"""
The fields that the compound ID is composed of. These are parsed and
made available as attributes on the object.
"""
containerIds = []
"""
The fields of the ID form a breadcrumb trail through the data
hierarchy, and successive prefixes provide the IDs for objects
further up the tree. This list is a set of tuples giving the
name and length of a given prefix forming an identifier.
"""
differentiator = None
"""
A string used to guarantee unique ids for objects. A value of None
indicates no string is used. Otherwise, this string will be spliced
into the object's id.
"""
differentiatorFieldName = 'differentiator'
"""
The name of the differentiator field in the fields array for CompoundId
subclasses.
"""
def __init__(self, parentCompoundId, *localIds):
"""
Allocates a new CompoundId for the specified parentCompoundId and
local identifiers. This compoundId inherits all of the fields and
values from the parent compound ID, and must have localIds
corresponding to its fields. If no parent id is present,
parentCompoundId should be set to None.
"""
index = 0
if parentCompoundId is not None:
for field in parentCompoundId.fields:
setattr(self, field, getattr(parentCompoundId, field))
index += 1
if (self.differentiator is not None and
self.differentiatorFieldName in self.fields[index:]):
# insert a differentiator into the localIds if appropriate
# for this class and we haven't advanced beyond it already
differentiatorIndex = self.fields[index:].index(
self.differentiatorFieldName)
localIds = localIds[:differentiatorIndex] + tuple([
self.differentiator]) + localIds[differentiatorIndex:]
for field, localId in zip(self.fields[index:], localIds):
if not isinstance(localId, basestring):
raise exceptions.BadIdentifierNotStringException(localId)
encodedLocalId = self.encode(localId)
setattr(self, field, encodedLocalId)
if len(localIds) != len(self.fields) - index:
raise ValueError(
"Incorrect number of fields provided to instantiate ID")
for idFieldName, prefix in self.containerIds:
values = [getattr(self, f) for f in self.fields[:prefix + 1]]
containerId = self.join(values)
obfuscated = self.obfuscate(containerId)
setattr(self, idFieldName, obfuscated)
def __str__(self):
values = [getattr(self, f) for f in self.fields]
compoundIdStr = self.join(values)
return self.obfuscate(compoundIdStr)
@classmethod
def join(cls, splits):
"""
Join an array of ids into a compound id string
"""
segments = []
for split in splits:
segments.append('"{}",'.format(split))
if len(segments) > 0:
segments[-1] = segments[-1][:-1]
jsonString = '[{}]'.format(''.join(segments))
return jsonString
@classmethod
def split(cls, jsonString):
"""
Split a compound id string into an array of ids
"""
splits = json.loads(jsonString)
return splits
@classmethod
def encode(cls, idString):
"""
Encode a string by escaping problematic characters
"""
return idString.replace('"', '\\"')
@classmethod
def decode(cls, encodedString):
"""
Decode an encoded string
"""
return encodedString.replace('\\"', '"')
@classmethod
def parse(cls, compoundIdStr):
"""
Parses the specified compoundId string and returns an instance
of this CompoundId class.
:raises: An ObjectWithIdNotFoundException if parsing fails. This is
because this method is a client-facing method, and if a malformed
identifier (under our internal rules) is provided, the response should
be that the identifier does not exist.
"""
if not isinstance(compoundIdStr, basestring):
raise exceptions.BadIdentifierException(compoundIdStr)
try:
deobfuscated = cls.deobfuscate(compoundIdStr)
except TypeError:
# When a string that cannot be converted to base64 is passed
# as an argument, b64decode raises a TypeError. We must treat
# this as an ID not found error.
raise exceptions.ObjectWithIdNotFoundException(compoundIdStr)
try:
encodedSplits = cls.split(deobfuscated)
splits = [cls.decode(split) for split in encodedSplits]
except (UnicodeDecodeError, ValueError):
# Sometimes base64 decoding succeeds but we're left with
# unicode gibberish. This is also and IdNotFound.
raise exceptions.ObjectWithIdNotFoundException(compoundIdStr)
# pull the differentiator out of the splits before instantiating
# the class, if the differentiator exists
fieldsLength = len(cls.fields)
if cls.differentiator is not None:
differentiatorIndex = cls.fields.index(
cls.differentiatorFieldName)
if differentiatorIndex < len(splits):
del splits[differentiatorIndex]
else:
raise exceptions.ObjectWithIdNotFoundException(
compoundIdStr)
fieldsLength -= 1
if len(splits) != fieldsLength:
raise exceptions.ObjectWithIdNotFoundException(compoundIdStr)
return cls(None, *splits)
@classmethod
def obfuscate(cls, idStr):
"""
Mildly obfuscates the specified ID string in an easily reversible
fashion. This is not intended for security purposes, but rather to
dissuade users from depending on our internal ID structures.
"""
return unicode(base64.urlsafe_b64encode(
idStr.encode('utf-8')).replace(b'=', b''))
@classmethod
def deobfuscate(cls, data):
"""
Reverses the obfuscation done by the :meth:`obfuscate` method.
If an identifier arrives without correct base64 padding this
function will append it to the end.
"""
# the str() call is necessary to convert the unicode string
# to an ascii string since the urlsafe_b64decode method
# sometimes chokes on unicode strings
return base64.urlsafe_b64decode(str((
data + b'A=='[(len(data) - 1) % 4:])))
@classmethod
def getInvalidIdString(cls):
"""
Return an id string that is well-formed but probably does not
correspond to any existing object; used mostly in testing
"""
return cls.join(['notValid'] * len(cls.fields))
class ReferenceSetCompoundId(CompoundId):
"""
The compound ID for reference sets.
"""
fields = ['reference_set']
containerIds = [('reference_set_id', 0)]
class ReferenceCompoundId(ReferenceSetCompoundId):
"""
The compound id for a reference
"""
fields = ReferenceSetCompoundId.fields + ['reference']
class DatasetCompoundId(CompoundId):
"""
The compound id for a data set
"""
fields = ['dataset']
containerIds = [('dataset_id', 0)]
class PhenotypeAssociationSetCompoundId(CompoundId):
"""
The compound id for a data set
"""
fields = DatasetCompoundId.fields + ['phenotypeAssociationSet']
containerIds = DatasetCompoundId.containerIds + [
('phenotypeAssociationSetId', 1)]
class VariantSetCompoundId(DatasetCompoundId):
"""
The compound id for a variant set
"""
fields = DatasetCompoundId.fields + [
CompoundId.differentiatorFieldName, 'variant_set']
containerIds = DatasetCompoundId.containerIds + [('variant_set_id', 2)]
differentiator = 'vs'
class IndividualCompoundId(DatasetCompoundId):
"""
The compound id for an individual
"""
fields = DatasetCompoundId.fields + [
CompoundId.differentiatorFieldName, 'individual']
containerIds = DatasetCompoundId.containerIds + [('individual_id', 2)]
differentiator = 'i'
class BiosampleCompoundId(DatasetCompoundId):
"""
The compound id for a biosample
"""
fields = DatasetCompoundId.fields + [
CompoundId.differentiatorFieldName, 'biosample']
containerIds = DatasetCompoundId.containerIds + [('biosample_id', 2)]
differentiator = 'b'
class VariantAnnotationSetCompoundId(VariantSetCompoundId):
"""
The compound id for a variant annotation set
"""
fields = VariantSetCompoundId.fields + ['variant_annotation_set']
containerIds = VariantSetCompoundId.containerIds + [
('variant_annotation_set_id', 3)]
class VariantSetMetadataCompoundId(VariantSetCompoundId):
"""
The compound id for a variant set
"""
fields = VariantSetCompoundId.fields + ['key']
containerIds = VariantSetCompoundId.containerIds + [
('variant_set_metadata_id', 2)]
class VariantCompoundId(VariantSetCompoundId):
"""
The compound id for a variant
"""
fields = VariantSetCompoundId.fields + ['reference_name', 'start', 'md5']
class VariantAnnotationCompoundId(VariantAnnotationSetCompoundId):
"""
The compound id for a variant annotaiton
"""
fields = VariantAnnotationSetCompoundId.fields + [
'reference_name', 'start', 'md5']
class VariantAnnotationSetAnalysisCompoundId(VariantAnnotationSetCompoundId):
"""
The compound id for a variant annotaiton set's Analysis
"""
fields = VariantAnnotationSetCompoundId.fields + ['analysis']
class CallSetCompoundId(VariantSetCompoundId):
"""
The compound id for a callset
"""
fields = VariantSetCompoundId.fields + ['name']
class FeatureSetCompoundId(DatasetCompoundId):
"""
The compound id for a feature set
"""
fields = DatasetCompoundId.fields + ['feature_set']
containerIds = DatasetCompoundId.containerIds + [('feature_set_id', 1)]
class FeatureCompoundId(FeatureSetCompoundId):
"""
The compound id class for a feature
"""
fields = FeatureSetCompoundId.fields + ['featureId']
class ContinuousSetCompoundId(DatasetCompoundId):
"""
The compound id for a continuous set
"""
fields = DatasetCompoundId.fields + ['continuous_set']
containerIds = DatasetCompoundId.containerIds + [('continuous_set_id', 1)]
class ReadGroupSetCompoundId(DatasetCompoundId):
"""
The compound id for a read group set
"""
fields = DatasetCompoundId.fields + [
CompoundId.differentiatorFieldName, 'read_group_set']
containerIds = DatasetCompoundId.containerIds + [('read_group_set_id', 2)]
differentiator = 'rgs'
class ReadGroupCompoundId(ReadGroupSetCompoundId):
"""
The compound id for a read group
"""
fields = ReadGroupSetCompoundId.fields + ['read_group']
containerIds = ReadGroupSetCompoundId.containerIds + [('read_group_id', 3)]
class ExperimentCompoundId(ReadGroupCompoundId):
"""
The compound id for an experiment
"""
fields = ReadGroupCompoundId.fields + ['experiment']
containerIds = ReadGroupCompoundId.containerIds + [('experiment_id', 3)]
class ReadAlignmentCompoundId(ReadGroupSetCompoundId):
"""
The compound id for a read alignment
"""
fields = ReadGroupSetCompoundId.fields + ['read_alignment']
containerIds = ReadGroupSetCompoundId.containerIds + \
[('read_alignment_id', 2)]
class RnaQuantificationSetCompoundId(DatasetCompoundId):
"""
The compound id for a rna quantification
"""
fields = DatasetCompoundId.fields + ['rna_quantification_set']
container = [('rna_quantification_set_id', 1)]
containerIds = DatasetCompoundId.containerIds + container
class RnaQuantificationCompoundId(RnaQuantificationSetCompoundId):
"""
The compound id for a rna quantification
"""
fields = RnaQuantificationSetCompoundId.fields + ['rna_quantification']
container = [('rna_quantification_id', 2)]
containerIds = RnaQuantificationSetCompoundId.containerIds + container
class ExpressionLevelCompoundId(RnaQuantificationCompoundId):
"""
The compound id for a expression level
"""
fields = RnaQuantificationCompoundId.fields + ['expression_level_id']
class DatamodelObject(object):
"""
Superclass of all datamodel types. A datamodel object is a concrete
representation of some data, either a single observation (such as a
read) or an aggregated set of related observations (such as a dataset).
Every datamodel object has an ID and a localId. The ID is an identifier
which uniquely idenfifies the object within a server instance. The
localId is a name that identifies the object with a given its
parent container.
"""
compoundIdClass = None
""" The class for compoundIds. Must be set in concrete subclasses. """
def __init__(self, parentContainer, localId):
self._parentContainer = parentContainer
self._localId = localId
parentId = None
if parentContainer is not None:
parentId = parentContainer.getCompoundId()
self._compoundId = self.compoundIdClass(parentId, localId)
self._attributes = {}
def getId(self):
"""
Returns the string identifying this DatamodelObject within the
server.
"""
return str(self._compoundId)
def getCompoundId(self):
"""
Returns the CompoundId instance that identifies this object
within the server.
"""
return self._compoundId
def getLocalId(self):
"""
Returns the localId of this DatamodelObject. The localId of a
DatamodelObject is a name that identifies it within its parent
container.
"""
return self._localId
def getParentContainer(self):
"""
Returns the parent container for this DatamodelObject. This the
object that is one-level above this object in the data hierarchy.
For example, for a Variant this is the VariantSet that it belongs
to.
"""
return self._parentContainer
def setAttributes(self, attributes):
"""
Sets the attributes message to the provided value.
"""
self._attributes = attributes
def setAttributesJson(self, attributesJson):
"""
Sets the attributes dictionary from a JSON string.
"""
self._attributes = json.loads(attributesJson)
def serializeAttributes(self, msg):
"""
Sets the attrbutes of a message during serialization.
"""
attributes = self.getAttributes()
for key in attributes:
protocol.setAttribute(
msg.attributes.attr[key].values, attributes[key])
return msg
def getAttributes(self):
"""
Returns the attributes for the DatamodelObject.
"""
return self._attributes
def _scanDataFiles(self, dataDir, patterns):
"""
Scans the specified directory for files with the specified globbing
pattern and calls self._addDataFile for each. Raises an
EmptyDirException if no data files are found.
"""
numDataFiles = 0
for pattern in patterns:
scanPath = os.path.join(dataDir, pattern)
for filename in glob.glob(scanPath):
self._addDataFile(filename)
numDataFiles += 1
if numDataFiles == 0:
raise exceptions.EmptyDirException(dataDir, patterns)
class PysamDatamodelMixin(object):
"""
A mixin class to simplify working with DatamodelObjects based on
directories of files interpreted using pysam. This mixin is designed
to work within the DatamodelObject hierarchy.
"""
samMin = 0
samMaxStart = 2**30 - 1
samMaxEnd = 2**30
vcfMin = -2**31
vcfMax = 2**31 - 1
maxStringLength = 2**10 # arbitrary
@classmethod
def sanitizeVariantFileFetch(cls, contig=None, start=None, stop=None):
if contig is not None:
contig = cls.sanitizeString(contig, 'contig')
if start is not None:
start = cls.sanitizeInt(start, cls.vcfMin, cls.vcfMax, 'start')
if stop is not None:
stop = cls.sanitizeInt(stop, cls.vcfMin, cls.vcfMax, 'stop')
if start is not None and stop is not None:
cls.assertValidRange(start, stop, 'start', 'stop')
return contig, start, stop
@classmethod
def sanitizeAlignmentFileFetch(cls, start=None, end=None):
if start is not None:
start = cls.sanitizeInt(
start, cls.samMin, cls.samMaxStart, 'start')
if end is not None:
end = cls.sanitizeInt(end, cls.samMin, cls.samMaxEnd, 'end')
if start is not None and end is not None:
cls.assertValidRange(start, end, 'start', 'end')
return start, end
@classmethod
def assertValidRange(cls, start, end, startName, endName):
if start > end:
message = "invalid coordinates: {} ({}) " \
"greater than {} ({})".format(startName, start, endName, end)
raise exceptions.DatamodelValidationException(message)
@classmethod
def assertInRange(cls, attr, minVal, maxVal, attrName):
message = "invalid {} '{}' outside of range [{}, {}]"
if attr < minVal:
raise exceptions.DatamodelValidationException(message.format(
attrName, attr, minVal, maxVal))
if attr > maxVal:
raise exceptions.DatamodelValidationException(message.format(
attrName, attr, minVal, maxVal))
@classmethod
def assertInt(cls, attr, attrName):
if not isinstance(attr, (int, long)):
message = "invalid {} '{}' not an int".format(attrName, attr)
raise exceptions.DatamodelValidationException(message)
@classmethod
def sanitizeInt(cls, attr, minVal, maxVal, attrName):
cls.assertInt(attr, attrName)
if attr < minVal:
attr = minVal
if attr > maxVal:
attr = maxVal
return attr
@classmethod
def sanitizeString(cls, attr, attrName):
if not isinstance(attr, basestring):
message = "invalid {} '{}' not a string".format(
attrName, attr)
raise exceptions.DatamodelValidationException(message)
if isinstance(attr, unicode):
attr = attr.encode('utf8')
if len(attr) > cls.maxStringLength:
attr = attr[:cls.maxStringLength]
return attr
def getFileHandle(self, dataFile):
return fileHandleCache.getFileHandle(dataFile, self.openFile)
|
saupchurch/server
|
ga4gh/server/datamodel/__init__.py
|
Python
|
apache-2.0
| 22,507
|
[
"pysam"
] |
22634f66058b9b49d2d38c84ab8e9ad9699957d8b85af06bcc444c227d43029d
|
# Orca
#
# Copyright (C) 2010 Joanmarie Diggs
# Copyright (C) 2011-2012 Igalia, S.L.
#
# Author: Joanmarie Diggs <jdiggs@igalia.com>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., Franklin Street, Fifth Floor,
# Boston MA 02110-1301 USA.
__id__ = "$Id$"
__version__ = "$Revision$"
__date__ = "$Date$"
__copyright__ = "Copyright (c) 2010 Joanmarie Diggs." \
"Copyright (c) 2011-2012 Igalia, S.L."
__license__ = "LGPL"
import pyatspi
import re
import orca.script_utilities as script_utilities
import orca.keybindings as keybindings
import orca.orca as orca
import orca.orca_state as orca_state
#############################################################################
# #
# Utilities #
# #
#############################################################################
class Utilities(script_utilities.Utilities):
def __init__(self, script):
"""Creates an instance of the Utilities class.
Arguments:
- script: the script with which this instance is associated.
"""
super().__init__(script)
def isWebKitGtk(self, obj):
"""Returns True if this object is a WebKitGtk object."""
if not obj:
return False
attrs = self.objectAttributes(obj)
return attrs.get('toolkit', '') == 'WebKitGtk'
def getCaretContext(self):
# TODO - JD: This is private, but it's only here temporarily until we
# have the shared web content support.
obj, offset = self._script._lastCaretContext
if not obj and self.isWebKitGtk(orca_state.locusOfFocus):
obj, offset = super().getCaretContext()
return obj, offset
def setCaretContext(self, obj, offset):
# TODO - JD: This is private, but it's only here temporarily until we
# have the shared web content support.
self._script._lastCaretContext = obj, offset
orca.setLocusOfFocus(None, obj, notifyScript=False)
def setCaretPosition(self, obj, offset):
self.setCaretContext(obj, offset)
self.setCaretOffset(obj, offset)
def isReadOnlyTextArea(self, obj):
"""Returns True if obj is a text entry area that is read only."""
if not obj.getRole() == pyatspi.ROLE_ENTRY:
return False
state = obj.getState()
readOnly = state.contains(pyatspi.STATE_FOCUSABLE) \
and not state.contains(pyatspi.STATE_EDITABLE)
return readOnly
def displayedText(self, obj):
"""Returns the text being displayed for an object.
Arguments:
- obj: the object
Returns the text being displayed for an object or None if there isn't
any text being shown.
"""
text = script_utilities.Utilities.displayedText(self, obj)
if text and text != self.EMBEDDED_OBJECT_CHARACTER:
return text
if obj.getRole() in [pyatspi.ROLE_LINK, pyatspi.ROLE_LIST_ITEM]:
text = ' '.join(map(self.displayedText, (x for x in obj)))
if not text:
text = self.linkBasename(obj)
return text
def getLineContentsAtOffset(self, obj, offset, layoutMode=True, useCache=True):
return self.getObjectsFromEOCs(
obj, offset, pyatspi.TEXT_BOUNDARY_LINE_START)
def getObjectContentsAtOffset(self, obj, offset=0, useCache=True):
return self.getObjectsFromEOCs(obj, offset)
def getObjectsFromEOCs(self, obj, offset=None, boundary=None):
"""Breaks the string containing a mixture of text and embedded object
characters into a list of (obj, startOffset, endOffset, string) tuples.
Arguments
- obj: the object whose EOCs we need to expand into tuples
- offset: the character offset. If None, use the current offset.
- boundary: the pyatspi text boundary type. If None, get all text.
Returns a list of (obj, startOffset, endOffset, string) tuples.
"""
try:
text = obj.queryText()
htext = obj.queryHypertext()
except (AttributeError, NotImplementedError):
return [(obj, 0, 1, '')]
string = text.getText(0, -1)
if not string:
return [(obj, 0, 1, '')]
if offset is None:
offset = text.caretOffset
if boundary is None:
start = 0
end = text.characterCount
else:
if boundary == pyatspi.TEXT_BOUNDARY_CHAR:
key, mods = self.lastKeyAndModifiers()
if (mods & keybindings.SHIFT_MODIFIER_MASK) and key == 'Right':
offset -= 1
segment, start, end = text.getTextAtOffset(offset, boundary)
pattern = re.compile(self.EMBEDDED_OBJECT_CHARACTER)
offsets = [m.start(0) for m in re.finditer(pattern, string)]
offsets = [x for x in offsets if start <= x < end]
objects = []
try:
objs = [obj[htext.getLinkIndex(offset)] for offset in offsets]
except:
objs = []
ranges = [self.getHyperlinkRange(x) for x in objs]
for i, (first, last) in enumerate(ranges):
objects.append((obj, start, first, string[start:first]))
objects.append((objs[i], first, last, ''))
start = last
objects.append((obj, start, end, string[start:end]))
objects = [x for x in objects if x[1] < x[2]]
return objects
def findPreviousObject(self, obj):
"""Finds the object before this one."""
if not obj:
return None
if obj.getRole() == pyatspi.ROLE_LINK:
obj = obj.parent
index = obj.getIndexInParent() - 1
if not (0 <= index < obj.parent.childCount - 1):
obj = obj.parent
index = obj.getIndexInParent() - 1
try:
prevObj = obj.parent[index]
except:
prevObj = obj
else:
if prevObj.getRole() == pyatspi.ROLE_LIST and prevObj.childCount:
if self.isTextListItem(prevObj[0]):
prevObj = prevObj[-1]
return prevObj
def findNextObject(self, obj):
"""Finds the object after this one."""
if not obj:
return None
if obj.getRole() == pyatspi.ROLE_LINK:
obj = obj.parent
index = obj.getIndexInParent() + 1
if not (0 < index < obj.parent.childCount):
obj = obj.parent
index = obj.getIndexInParent() + 1
try:
nextObj = obj.parent[index]
except:
nextObj = None
else:
if nextObj.getRole() == pyatspi.ROLE_LIST and nextObj.childCount:
if self.isTextListItem(nextObj[0]):
nextObj = nextObj[0]
return nextObj
def isTextListItem(self, obj):
"""Returns True if obj is an item in a non-selectable list."""
if obj.getRole() != pyatspi.ROLE_LIST_ITEM:
return False
return not obj.parent.getState().contains(pyatspi.STATE_FOCUSABLE)
def isInlineContainer(self, obj):
"""Returns True if obj is an inline/non-wrapped container."""
if obj.getRole() == pyatspi.ROLE_SECTION:
if obj.childCount > 1:
return self.onSameLine(obj[1], obj[1])
return False
if obj.getRole() == pyatspi.ROLE_LIST:
if obj.getState().contains(pyatspi.STATE_FOCUSABLE):
return False
if not obj.childCount:
return 'Text' in pyatspi.utils.listInterfaces(obj)
if obj.childCount == 1:
return False
return self.onSameLine(obj[0], obj[1])
return False
def isEmbeddedDocument(self, obj):
if not self.isWebKitGtk(obj):
return False
docRoles = [pyatspi.ROLE_DOCUMENT_FRAME, pyatspi.ROLE_DOCUMENT_WEB]
if not (obj and obj.getRole() in docRoles):
return False
parent = obj.parent
if not (parent and self.isWebKitGtk(parent)):
return False
parent = parent.parent
if not (parent and not self.isWebKitGtk(parent)):
return False
return True
def setCaretAtStart(self, obj):
def implementsText(obj):
if obj.getRole() == pyatspi.ROLE_LIST:
return False
return 'Text' in pyatspi.utils.listInterfaces(obj)
child = obj
if not implementsText(obj):
child = pyatspi.utils.findDescendant(obj, implementsText)
if not child:
return None, -1
index = -1
text = child.queryText()
for i in range(text.characterCount):
if text.setCaretOffset(i):
index = i
break
return child, index
def treatAsBrowser(self, obj):
return self.isEmbeddedDocument(obj)
def inDocumentContent(self, obj=None):
obj = obj or orca_state.locusOfFocus
return self.isWebKitGtk(obj)
|
GNOME/orca
|
src/orca/scripts/toolkits/WebKitGtk/script_utilities.py
|
Python
|
lgpl-2.1
| 9,888
|
[
"ORCA"
] |
2a116e1bfbd324765ec3c7c946aa02db92beb1395947f05859d62876f7ded99c
|
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2010, 2011, 2012, 2013, 2014 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""
Batch Uploader core functions. Uploading metadata and documents.
"""
import os
import pwd
import grp
import sys
import time
import tempfile
import re
from invenio.legacy.dbquery import run_sql, Error
from invenio.modules.access.engine import acc_authorize_action
from invenio.legacy.webuser import collect_user_info, page_not_authorized
from invenio.config import CFG_BINDIR, CFG_TMPSHAREDDIR, CFG_LOGDIR, \
CFG_BIBUPLOAD_EXTERNAL_SYSNO_TAG, \
CFG_BIBUPLOAD_EXTERNAL_OAIID_TAG, \
CFG_OAI_ID_FIELD, CFG_BATCHUPLOADER_DAEMON_DIR, \
CFG_BATCHUPLOADER_WEB_ROBOT_RIGHTS, \
CFG_BATCHUPLOADER_WEB_ROBOT_AGENTS, \
CFG_PREFIX, CFG_SITE_LANG
from invenio.utils.text import encode_for_xml
from invenio.legacy.bibsched.bibtask import task_low_level_submission
from invenio.base.i18n import gettext_set_language
from invenio.legacy.bibrecord.textmarc2xmlmarc import transform_file
from invenio.utils.shell import run_shell_command
from invenio.legacy.bibupload.engine import xml_marc_to_records, bibupload
from invenio.modules.access.firerole import _ip_matcher_builder, _ipmatch
from invenio.utils.apache import HTTP_BAD_REQUEST, HTTP_FORBIDDEN
import invenio.legacy.bibupload.engine as bibupload_module
from invenio.legacy.bibrecord import create_records, \
record_strip_empty_volatile_subfields, \
record_strip_empty_fields
try:
from six import StringIO
except ImportError:
from StringIO import StringIO
PERMITTED_MODES = ['-i', '-r', '-c', '-a', '-ir',
'--insert', '--replace', '--correct', '--append']
_CFG_BATCHUPLOADER_WEB_ROBOT_AGENTS_RE = re.compile(CFG_BATCHUPLOADER_WEB_ROBOT_AGENTS)
_CFG_BATCHUPLOADER_WEB_ROBOT_RIGHTS = []
for _network, _collection in CFG_BATCHUPLOADER_WEB_ROBOT_RIGHTS.items():
if '/' not in _network:
_network += '/32'
_CFG_BATCHUPLOADER_WEB_ROBOT_RIGHTS.append((_ip_matcher_builder(_network), _collection))
del _network
del _collection
def cli_allocate_record(req):
req.content_type = "text/plain"
req.send_http_header()
# check IP and useragent:
if not _get_client_authorized_collections(_get_client_ip(req)):
msg = "[ERROR] Sorry, client IP %s cannot use the service." % _get_client_ip(req)
_log(msg)
return _write(req, msg)
if not _check_client_useragent(req):
msg = '[ERROR] Sorry, the "%s" useragent cannot use the service.' % _get_useragent(req)
_log(msg)
return _write(req, msg)
recid = run_sql("insert into bibrec (creation_date,modification_date) values(NOW(),NOW())")
return recid
def cli_upload(req, file_content=None, mode=None, callback_url=None, nonce=None, special_treatment=None):
""" Robot interface for uploading MARC files
"""
req.content_type = "text/plain"
# check IP and useragent:
if not _get_client_authorized_collections(_get_client_ip(req)):
msg = "[ERROR] Sorry, client IP %s cannot use the service." % _get_client_ip(req)
_log(msg)
req.status = HTTP_FORBIDDEN
return _write(req, msg)
if not _check_client_useragent(req):
msg = "[ERROR] Sorry, the %s useragent cannot use the service." % _get_useragent(req)
_log(msg)
req.status = HTTP_FORBIDDEN
return _write(req, msg)
arg_mode = mode
if not arg_mode:
msg = "[ERROR] Please specify upload mode to use."
_log(msg)
req.status = HTTP_BAD_REQUEST
return _write(req, msg)
if arg_mode == '--insertorreplace':
arg_mode = '-ir'
if not arg_mode in PERMITTED_MODES:
msg = "[ERROR] Invalid upload mode."
_log(msg)
req.status = HTTP_BAD_REQUEST
return _write(req, msg)
arg_file = file_content
if hasattr(arg_file, 'read'):
## We've been passed a readable file, e.g. req
arg_file = arg_file.read()
if not arg_file:
msg = "[ERROR] Please provide a body to your request."
_log(msg)
req.status = HTTP_BAD_REQUEST
return _write(req, msg)
else:
if not arg_file:
msg = "[ERROR] Please specify file body to input."
_log(msg)
req.status = HTTP_BAD_REQUEST
return _write(req, msg)
if hasattr(arg_file, "filename"):
arg_file = arg_file.value
else:
msg = "[ERROR] 'file' parameter must be a (single) file"
_log(msg)
req.status = HTTP_BAD_REQUEST
return _write(req, msg)
# write temporary file:
(fd, filename) = tempfile.mkstemp(prefix="batchupload_" + \
time.strftime("%Y%m%d%H%M%S", time.localtime()) + "_",
dir=CFG_TMPSHAREDDIR)
filedesc = os.fdopen(fd, 'w')
filedesc.write(arg_file)
filedesc.close()
# check if this client can run this file:
client_ip = _get_client_ip(req)
permitted_dbcollids = _get_client_authorized_collections(client_ip)
if '*' not in permitted_dbcollids: # wildcard
allow = _check_client_can_submit_file(client_ip, filename, req, 0)
if not allow:
msg = "[ERROR] Cannot submit such a file from this IP. (Wrong collection.)"
_log(msg)
req.status = HTTP_FORBIDDEN
return _write(req, msg)
# check validity of marcxml
xmlmarclint_path = CFG_BINDIR + '/xmlmarclint'
xmlmarclint_output, dummy1, dummy2 = run_shell_command('%s %s' % (xmlmarclint_path, filename))
if xmlmarclint_output != 0:
msg = "[ERROR] MARCXML is not valid."
_log(msg)
req.status = HTTP_BAD_REQUEST
return _write(req, msg)
args = ['bibupload', "batchupload", arg_mode, filename]
# run upload command
if callback_url:
args += ["--callback-url", callback_url]
if nonce:
args += ["--nonce", nonce]
if special_treatment:
args += ["--special-treatment", special_treatment]
task_low_level_submission(*args)
msg = "[INFO] %s" % ' '.join(args)
_log(msg)
return _write(req, msg)
def metadata_upload(req, metafile=None, filetype=None, mode=None, exec_date=None,
exec_time=None, metafilename=None, ln=CFG_SITE_LANG,
priority="1", email_logs_to=None):
"""
Metadata web upload service. Get upload parameters and exec bibupload for the given file.
Finally, write upload history.
@return: tuple (error code, message)
error code: code that indicates if an error ocurred
message: message describing the error
"""
# start output:
req.content_type = "text/html"
req.send_http_header()
error_codes = {'not_authorized': 1}
user_info = collect_user_info(req)
(fd, filename) = tempfile.mkstemp(prefix="batchupload_" + \
user_info['nickname'] + "_" + time.strftime("%Y%m%d%H%M%S",
time.localtime()) + "_", dir=CFG_TMPSHAREDDIR)
filedesc = os.fdopen(fd, 'w')
filedesc.write(metafile)
filedesc.close()
# check if this client can run this file:
if req is not None:
allow = _check_client_can_submit_file(req=req, metafile=metafile, webupload=1, ln=ln)
if allow[0] != 0:
return (error_codes['not_authorized'], allow[1])
# run upload command:
task_arguments = ('bibupload', user_info['nickname'], mode,
"--priority=" + priority, "-N", "batchupload")
if exec_date:
date = exec_date
if exec_time:
date += ' ' + exec_time
task_arguments += ("-t", date)
if email_logs_to:
task_arguments += ('--email-logs-to', email_logs_to)
task_arguments += (filename, )
jobid = task_low_level_submission(*task_arguments)
# write batch upload history
run_sql("""INSERT INTO hstBATCHUPLOAD (user, submitdate,
filename, execdate, id_schTASK, batch_mode)
VALUES (%s, NOW(), %s, %s, %s, "metadata")""",
(user_info['nickname'], metafilename,
exec_date != "" and (exec_date + ' ' + exec_time)
or time.strftime("%Y-%m-%d %H:%M:%S"), str(jobid), ))
return (0, "Task %s queued" % str(jobid))
def document_upload(req=None, folder="", matching="", mode="", exec_date="", exec_time="", ln=CFG_SITE_LANG, priority="1", email_logs_to=None):
""" Take files from the given directory and upload them with the appropiate mode.
@parameters:
+ folder: Folder where the files to upload are stored
+ matching: How to match file names with record fields (report number, barcode,...)
+ mode: Upload mode (append, revise, replace)
@return: tuple (file, error code)
file: file name causing the error to notify the user
error code:
1 - More than one possible recID, ambiguous behaviour
2 - No records match that file name
3 - File already exists
"""
import sys
from invenio.legacy.bibdocfile.api import BibRecDocs, file_strip_ext
from invenio.utils.hash import md5
import shutil
from invenio.legacy.search_engine import perform_request_search, \
search_pattern, \
guess_collection_of_a_record
_ = gettext_set_language(ln)
errors = []
info = [0, []] # Number of files read, name of the files
try:
files = os.listdir(folder)
except OSError as error:
errors.append(("", error))
return errors, info
err_desc = {1: _("More than one possible recID, ambiguous behaviour"), 2: _("No records match that file name"),
3: _("File already exists"), 4: _("A file with the same name and format already exists")}
# Create directory DONE/ if doesn't exist
folder = (folder[-1] == "/") and folder or (folder + "/")
files_done_dir = folder + "DONE/"
try:
os.mkdir(files_done_dir)
except OSError:
# Directory exists or no write permission
pass
for docfile in files:
if os.path.isfile(os.path.join(folder, docfile)):
info[0] += 1
identifier = file_strip_ext(docfile)
extension = docfile[len(identifier):]
rec_id = None
if identifier:
rec_id = search_pattern(p=identifier, f=matching, m='e')
if not rec_id:
errors.append((docfile, err_desc[2]))
continue
elif len(rec_id) > 1:
errors.append((docfile, err_desc[1]))
continue
else:
rec_id = str(list(rec_id)[0])
rec_info = BibRecDocs(rec_id)
if rec_info.bibdocs:
for bibdoc in rec_info.bibdocs:
attached_files = bibdoc.list_all_files()
file_md5 = md5(open(os.path.join(folder, docfile), "rb").read()).hexdigest()
num_errors = len(errors)
for attached_file in attached_files:
if attached_file.checksum == file_md5:
errors.append((docfile, err_desc[3]))
break
elif attached_file.get_full_name() == docfile:
errors.append((docfile, err_desc[4]))
break
if len(errors) > num_errors:
continue
# Check if user has rights to upload file
if req is not None:
file_collection = guess_collection_of_a_record(int(rec_id))
auth_code, auth_message = acc_authorize_action(req, 'runbatchuploader', collection=file_collection)
if auth_code != 0:
error_msg = _("No rights to upload to collection '%(x_name)s'", x_name=file_collection)
errors.append((docfile, error_msg))
continue
# Move document to be uploaded to temporary folder
(fd, tmp_file) = tempfile.mkstemp(prefix=identifier + "_" + time.strftime("%Y%m%d%H%M%S", time.localtime()) + "_", suffix=extension, dir=CFG_TMPSHAREDDIR)
shutil.copy(os.path.join(folder, docfile), tmp_file)
# Create MARC temporary file with FFT tag and call bibupload
(fd, filename) = tempfile.mkstemp(prefix=identifier + '_', dir=CFG_TMPSHAREDDIR)
filedesc = os.fdopen(fd, 'w')
marc_content = """ <record>
<controlfield tag="001">%(rec_id)s</controlfield>
<datafield tag="FFT" ind1=" " ind2=" ">
<subfield code="n">%(name)s</subfield>
<subfield code="a">%(path)s</subfield>
</datafield>
</record> """ % {'rec_id': rec_id,
'name': encode_for_xml(identifier),
'path': encode_for_xml(tmp_file),
}
filedesc.write(marc_content)
filedesc.close()
info[1].append(docfile)
user = ""
if req is not None:
user_info = collect_user_info(req)
user = user_info['nickname']
if not user:
user = "batchupload"
# Execute bibupload with the appropiate mode
task_arguments = ('bibupload', user, "--" + mode,
"--priority=" + priority, "-N", "batchupload")
if exec_date:
date = '--runtime=' + "\'" + exec_date + ' ' + exec_time + "\'"
task_arguments += (date, )
if email_logs_to:
task_arguments += ("--email-logs-to", email_logs_to)
task_arguments += (filename, )
jobid = task_low_level_submission(*task_arguments)
# write batch upload history
run_sql("""INSERT INTO hstBATCHUPLOAD (user, submitdate,
filename, execdate, id_schTASK, batch_mode)
VALUES (%s, NOW(), %s, %s, %s, "document")""",
(user_info['nickname'], docfile,
exec_date != "" and (exec_date + ' ' + exec_time)
or time.strftime("%Y-%m-%d %H:%M:%S"), str(jobid)))
# Move file to DONE folder
done_filename = docfile + "_" + time.strftime("%Y%m%d%H%M%S", time.localtime()) + "_" + str(jobid)
try:
os.rename(os.path.join(folder, docfile), os.path.join(files_done_dir, done_filename))
except OSError:
errors.append('MoveError')
return errors, info
def get_user_metadata_uploads(req):
"""Retrieve all metadata upload history information for a given user"""
user_info = collect_user_info(req)
upload_list = run_sql("""SELECT DATE_FORMAT(h.submitdate, '%%Y-%%m-%%d %%H:%%i:%%S'), \
h.filename, DATE_FORMAT(h.execdate, '%%Y-%%m-%%d %%H:%%i:%%S'), \
s.status \
FROM hstBATCHUPLOAD h INNER JOIN schTASK s \
ON h.id_schTASK = s.id \
WHERE h.user=%s and h.batch_mode="metadata"
ORDER BY h.submitdate DESC""", (user_info['nickname'],))
return upload_list
def get_user_document_uploads(req):
"""Retrieve all document upload history information for a given user"""
user_info = collect_user_info(req)
upload_list = run_sql("""SELECT DATE_FORMAT(h.submitdate, '%%Y-%%m-%%d %%H:%%i:%%S'), \
h.filename, DATE_FORMAT(h.execdate, '%%Y-%%m-%%d %%H:%%i:%%S'), \
s.status \
FROM hstBATCHUPLOAD h INNER JOIN schTASK s \
ON h.id_schTASK = s.id \
WHERE h.user=%s and h.batch_mode="document"
ORDER BY h.submitdate DESC""", (user_info['nickname'],))
return upload_list
def get_daemon_doc_files():
""" Return all files found in batchuploader document folders """
files = {}
for folder in ['/revise', '/append']:
try:
daemon_dir = CFG_BATCHUPLOADER_DAEMON_DIR[0] == '/' and CFG_BATCHUPLOADER_DAEMON_DIR \
or CFG_PREFIX + '/' + CFG_BATCHUPLOADER_DAEMON_DIR
directory = daemon_dir + '/documents' + folder
files[directory] = [(filename, []) for filename in os.listdir(directory) if os.path.isfile(os.path.join(directory, filename))]
for file_instance, info in files[directory]:
stat_info = os.lstat(os.path.join(directory, file_instance))
info.append("%s" % pwd.getpwuid(stat_info.st_uid)[0]) # Owner
info.append("%s" % grp.getgrgid(stat_info.st_gid)[0]) # Group
info.append("%d" % stat_info.st_size) # Size
time_stat = stat_info.st_mtime
time_fmt = "%Y-%m-%d %R"
info.append(time.strftime(time_fmt, time.gmtime(time_stat))) # Last modified
except OSError:
pass
return files
def get_daemon_meta_files():
""" Return all files found in batchuploader metadata folders """
files = {}
for folder in ['/correct', '/replace', '/insert', '/append']:
try:
daemon_dir = CFG_BATCHUPLOADER_DAEMON_DIR[0] == '/' and CFG_BATCHUPLOADER_DAEMON_DIR \
or CFG_PREFIX + '/' + CFG_BATCHUPLOADER_DAEMON_DIR
directory = daemon_dir + '/metadata' + folder
files[directory] = [(filename, []) for filename in os.listdir(directory) if os.path.isfile(os.path.join(directory, filename))]
for file_instance, info in files[directory]:
stat_info = os.lstat(os.path.join(directory, file_instance))
info.append("%s" % pwd.getpwuid(stat_info.st_uid)[0]) # Owner
info.append("%s" % grp.getgrgid(stat_info.st_gid)[0]) # Group
info.append("%d" % stat_info.st_size) # Size
time_stat = stat_info.st_mtime
time_fmt = "%Y-%m-%d %R"
info.append(time.strftime(time_fmt, time.gmtime(time_stat))) # Last modified
except OSError:
pass
return files
def user_authorization(req, ln):
""" Check user authorization to visit page """
auth_code, auth_message = acc_authorize_action(req, 'runbatchuploader')
if auth_code != 0:
referer = '/batchuploader/'
return page_not_authorized(req=req, referer=referer,
text=auth_message, navmenuid="batchuploader")
else:
return None
def perform_basic_upload_checks(xml_record):
""" Performs tests that would provoke the bibupload task to fail with
an exit status 1, to prevent batchupload from crashing while alarming
the user wabout the issue
"""
from invenio.legacy.bibupload.engine import writing_rights_p
errors = []
if not writing_rights_p():
errors.append("Error: BibUpload does not have rights to write fulltext files.")
recs = create_records(xml_record, 1, 1)
if recs == []:
errors.append("Error: Cannot parse MARCXML file.")
elif recs[0][0] is None:
errors.append("Error: MARCXML file has wrong format: %s" % recs)
return errors
def perform_upload_check(xml_record, mode):
""" Performs a upload simulation with the given record and mode
@return: string describing errors
@rtype: string
"""
error_cache = []
def my_writer(msg, stream=sys.stdout, verbose=1):
if verbose == 1:
if 'DONE' not in msg:
error_cache.append(msg.strip())
orig_writer = bibupload_module.write_message
bibupload_module.write_message = my_writer
error_cache.extend(perform_basic_upload_checks(xml_record))
if error_cache:
# There has been some critical error
return '\n'.join(error_cache)
recs = xml_marc_to_records(xml_record)
try:
upload_mode = mode[2:]
# Adapt input data for bibupload function
if upload_mode == "r insert-or-replace":
upload_mode = "replace_or_insert"
for record in recs:
if record:
record_strip_empty_volatile_subfields(record)
record_strip_empty_fields(record)
bibupload(record, opt_mode=upload_mode, pretend=True)
finally:
bibupload_module.write_message = orig_writer
return '\n'.join(error_cache)
def _get_useragent(req):
"""Return client user agent from req object."""
user_info = collect_user_info(req)
return user_info['agent']
def _get_client_ip(req):
"""Return client IP address from req object."""
return str(req.remote_ip)
def _get_client_authorized_collections(client_ip):
"""
Is this client permitted to use the service?
Return list of collections for which the client is authorized
"""
ret = []
for network, collection in _CFG_BATCHUPLOADER_WEB_ROBOT_RIGHTS:
if _ipmatch(client_ip, network):
if '*' in collection:
return ['*']
ret += collection
return ret
def _check_client_useragent(req):
"""
Is this user agent permitted to use the service?
"""
client_useragent = _get_useragent(req)
if _CFG_BATCHUPLOADER_WEB_ROBOT_AGENTS_RE.match(client_useragent):
return True
return False
def _check_client_can_submit_file(client_ip="", metafile="", req=None, webupload=0, ln=CFG_SITE_LANG):
"""
Is this client able to upload such a FILENAME?
check 980 $a values and collection tags in the file to see if they are among the
permitted ones as specified by CFG_BATCHUPLOADER_WEB_ROBOT_RIGHTS and ACC_AUTHORIZE_ACTION.
Useful to make sure that the client does not override other records by
mistake.
"""
_ = gettext_set_language(ln)
recs = create_records(metafile, 0, 0)
user_info = collect_user_info(req)
permitted_dbcollids = _get_client_authorized_collections(client_ip)
if '*' in permitted_dbcollids:
if not webupload:
return True
else:
return (0, " ")
filename_tag980_values = _detect_980_values_from_marcxml_file(recs)
for filename_tag980_value in filename_tag980_values:
if not filename_tag980_value:
if not webupload:
return False
else:
return(1, "Invalid collection in tag 980")
if not webupload:
if not filename_tag980_value in permitted_dbcollids:
return False
else:
auth_code, auth_message = acc_authorize_action(req, 'runbatchuploader', collection=filename_tag980_value)
if auth_code != 0:
error_msg = _("The user '%(x_user)s' is not authorized to modify collection '%(x_coll)s'") % \
{'x_user': user_info['nickname'], 'x_coll': filename_tag980_value}
return (auth_code, error_msg)
filename_rec_id_collections = _detect_collections_from_marcxml_file(recs)
for filename_rec_id_collection in filename_rec_id_collections:
if not webupload:
if not filename_rec_id_collection in permitted_dbcollids:
return False
else:
auth_code, auth_message = acc_authorize_action(req, 'runbatchuploader', collection=filename_rec_id_collection)
if auth_code != 0:
error_msg = _("The user '%(x_user)s' is not authorized to modify collection '%(x_coll)s'") % \
{'x_user': user_info['nickname'], 'x_coll': filename_rec_id_collection}
return (auth_code, error_msg)
if not webupload:
return True
else:
return (0, " ")
def _detect_980_values_from_marcxml_file(recs):
"""
Read MARCXML file and return list of 980 $a values found in that file.
Useful for checking rights.
"""
from invenio.legacy.bibrecord import record_get_field_values
collection_tag = run_sql("SELECT value FROM tag, field_tag, field \
WHERE tag.id=field_tag.id_tag AND \
field_tag.id_field=field.id AND \
field.code='collection'")
collection_tag = collection_tag[0][0]
dbcollids = {}
for rec, dummy1, dummy2 in recs:
if rec:
for tag980 in record_get_field_values(rec,
tag=collection_tag[:3],
ind1=collection_tag[3],
ind2=collection_tag[4],
code=collection_tag[5]):
dbcollids[tag980] = 1
return dbcollids.keys()
def _detect_collections_from_marcxml_file(recs):
"""
Extract all possible recIDs from MARCXML file and guess collections
for these recIDs.
"""
from invenio.legacy.bibrecord import record_get_field_values
from invenio.legacy.search_engine import guess_collection_of_a_record
from invenio.legacy.bibupload.engine import find_record_from_sysno, \
find_records_from_extoaiid, \
find_record_from_oaiid
dbcollids = {}
sysno_tag = CFG_BIBUPLOAD_EXTERNAL_SYSNO_TAG
oaiid_tag = CFG_BIBUPLOAD_EXTERNAL_OAIID_TAG
oai_tag = CFG_OAI_ID_FIELD
for rec, dummy1, dummy2 in recs:
if rec:
for tag001 in record_get_field_values(rec, '001'):
collection = guess_collection_of_a_record(int(tag001))
dbcollids[collection] = 1
for tag_sysno in record_get_field_values(rec, tag=sysno_tag[:3],
ind1=sysno_tag[3],
ind2=sysno_tag[4],
code=sysno_tag[5]):
record = find_record_from_sysno(tag_sysno)
if record:
collection = guess_collection_of_a_record(int(record))
dbcollids[collection] = 1
for tag_oaiid in record_get_field_values(rec, tag=oaiid_tag[:3],
ind1=oaiid_tag[3],
ind2=oaiid_tag[4],
code=oaiid_tag[5]):
try:
records = find_records_from_extoaiid(tag_oaiid)
except Error:
records = []
if records:
record = records.pop()
collection = guess_collection_of_a_record(int(record))
dbcollids[collection] = 1
for tag_oai in record_get_field_values(rec, tag=oai_tag[0:3],
ind1=oai_tag[3],
ind2=oai_tag[4],
code=oai_tag[5]):
record = find_record_from_oaiid(tag_oai)
if record:
collection = guess_collection_of_a_record(int(record))
dbcollids[collection] = 1
return dbcollids.keys()
def _transform_input_to_marcxml(file_input=""):
"""
Takes text-marc as input and transforms it
to MARCXML.
"""
# Create temporary file to read from
tmp_fd, filename = tempfile.mkstemp(dir=CFG_TMPSHAREDDIR)
os.write(tmp_fd, file_input)
os.close(tmp_fd)
try:
# Redirect output, transform, restore old references
old_stdout = sys.stdout
new_stdout = StringIO()
sys.stdout = new_stdout
transform_file(filename)
finally:
sys.stdout = old_stdout
return new_stdout.getvalue()
def _log(msg, logfile="webupload.log"):
"""
Log MSG into LOGFILE with timestamp.
"""
filedesc = open(CFG_LOGDIR + "/" + logfile, "a")
filedesc.write(time.strftime("%Y-%m-%d %H:%M:%S") + " --> " + msg + "\n")
filedesc.close()
return
def _write(req, msg):
"""
Write MSG to the output stream for the end user.
"""
req.write(msg + "\n")
return
|
zenodo/invenio
|
invenio/legacy/batchuploader/engine.py
|
Python
|
gpl-2.0
| 29,447
|
[
"VisIt"
] |
b5e5c843326ccee4e7d7f2f6f2335d43102081cc27b82b18614f3cdd0414f1ed
|
#!/usr/bin/env python
"""[License: GNU General Public License v3 (GPLv3)]
This file is part of FuMa.
FuMa is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
FuMa is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
Documentation as defined by:
<http://epydoc.sourceforge.net/manual-fields.html#fields-synonyms>
"""
import fuma
from setuptools import setup
setup(name='fuma',
version=fuma.__version__,
description='Fusion Matcher',
long_description="FuMa (Fusion Matcher) matches predicted fusion events (both genomic and transcriptomic) according to chromosomal location and corresponding annotated genes. It is the organisation of the transcriptome (provided by the user) that forms the basis for FuMa to consider fusion genes to be identical or not. The provided gene annotation can be adjusted to define the biological question. For example, if it is desired to only consider fusion events that occur within exons, FuMa can be provided a list of such regions instead of entire genes.",
author=fuma.__author__,
maintainer=fuma.__author__,
license=fuma.__license__,
url=fuma.__homepage__,
scripts=["bin/fuma","bin/defuse-clusters-to-CG",'bin/chimerascan-exclude-transcriptome-events',"bin/fusioncatcher-to-CG","bin/chimerascan-relative-bedpe-to-CG","bin/fuma-list-to-boolean-list","bin/fuma-gencode-gtf-to-bed"],
packages=['fuma'],
test_suite="tests",
platforms=['any'],
setup_requires=['numpy'],
install_requires=['numpy','HTSeq >= 0.6.1','nose'],
classifiers=[
'Environment :: Console',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: GNU General Public License v3 (GPLv3)',
'Operating System :: OS Independent',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Bio-Informatics'
],
)
|
yhoogstrate/fuma
|
setup.py
|
Python
|
gpl-3.0
| 2,283
|
[
"HTSeq"
] |
e8fa9a58a2a5d7899047a1dae239fff6a12f56db32b5fe723ef344a604a7f79e
|
#! /usr/bin/env python
#
# Copyright (C) 2015-2016 Rich Lewis <rl403@cam.ac.uk>
# License: 3-clause BSD
"""
# skchem.io.sdf
Defining input and output operations for sdf files.
"""
from functools import wraps
import warnings
from rdkit import Chem
import pandas as pd
from ..core import Mol
from ..utils import Suppressor, squeeze
def _drop_props(row):
for prop in row.structure.props.keys():
row.structure.ClearProp(prop)
def _set_props(row, cols):
for i in cols:
row.structure.SetProp(str(i), str(row[i]))
def _set_name(row):
row.structure.name = str(row.name) # rdkit props can only be strs
def read_sdf(sdf, error_bad_mol=False, warn_bad_mol=True, nmols=None,
skipmols=None, skipfooter=None, read_props=True, mol_props=False,
*args, **kwargs):
"""Read an sdf file into a `pd.DataFrame`.
The function wraps the RDKit `ForwardSDMolSupplier` object.
Args:
sdf (str or file-like):
The location of data to load as a file path, or a file-like object.
error_bad_mol (bool):
Whether an error should be raised if a molecule fails to parse.
Default is False.
warn_bad_mol (bool):
Whether a warning should be output if a molecule fails to parse.
Default is True.
nmols (int):
The number of molecules to read. If `None`, read all molecules.
Default is `None`.
skipmols (int):
The number of molecules to skip at start.
Default is `0`.
skipfooter (int):
The number of molecules to skip from the end.
Default is `0`.
read_props (bool):
Whether to read the properties into the data frame.
Default is `True`.
mol_props (bool):
Whether to keep properties in the molecule dictionary after they
are extracted to the DataFrame.
Default is `False`.
args, kwargs:
Arguments will be passed to RDKit ForwardSDMolSupplier.
Returns:
pandas.DataFrame:
The loaded data frame, with Mols supplied in the `structure` field.
See also:
rdkit.Chem.SDForwardMolSupplier
skchem.read_smiles
"""
# nmols is actually the index to cutoff. If we skip some at start, we need
# to add this number
if skipmols:
nmols += skipmols
if isinstance(sdf, str):
sdf = open(sdf, 'rb') # use read bytes for python 3 compatibility
# use the suppression context manager to not pollute our stdout with rdkit
# errors and warnings.
# perhaps this should be captured better by Mol etc.
with Suppressor():
mol_supp = Chem.ForwardSDMolSupplier(sdf, *args, **kwargs)
mols = []
# single loop through sdf
for i, mol in enumerate(mol_supp):
if skipmols and i < skipmols:
continue
if nmols and i >= nmols:
break
if mol is None:
msg = 'Molecule {} could not be decoded.'.format(i + 1)
if error_bad_mol:
raise ValueError(msg)
elif warn_bad_mol:
warnings.warn(msg)
continue
mols.append(Mol(mol))
if skipfooter:
mols = mols[:-skipfooter]
idx = pd.Index((m.name for m in mols), name='batch')
data = pd.DataFrame(mols, columns=['structure'])
if read_props:
props = pd.DataFrame([{k: v for (k, v) in mol.props.items()}
for mol in mols])
data = pd.concat([data, props], axis=1)
# now we have extracted the props, we can delete if required
if not mol_props:
data.apply(_drop_props, axis=1)
data.index = idx
return squeeze(data, axis=1)
def write_sdf(data, sdf, write_cols=True, index_as_name=True, mol_props=False,
*args, **kwargs):
""" Write an sdf file from a dataframe.
Args:
data (pandas.Series or pandas.DataFrame):
Pandas data structure with a `structure` column containing
compounds to serialize.
sdf (str or file-like):
A file path or file-like object specifying where to write the
compound data.
write_cols (bool):
Whether columns should be written as props. Default `True`.
index_as_name (bool):
Whether to use index as the header, or the molecule's name.
Default is `True`.
mol_props (bool):
Whether to write properties in the Mol dictionary in addition to
fields in the frame.
Warn:
This function will change the names of the compounds if the
`index_as_name` argument is `True`, and will delete all properties in
the molecule dictionary if `mol_props` is `False`.
"""
if isinstance(data, pd.Series):
data = data.to_frame(name='structure')
names = [m.name for m in data.structure]
writer = Chem.SDWriter(sdf, *args, **kwargs)
cols = list(data.columns.drop('structure'))
if not mol_props:
data.apply(_drop_props, axis=1)
if write_cols:
data.apply(_set_props, cols=cols, axis=1)
if index_as_name:
data.apply(_set_name, axis=1)
data.structure.apply(writer.write)
# rdkit writer changes names sometimes
for mol, name in zip(data.structure, names):
mol.name = name
@wraps(write_sdf)
def _to_sdf_series(self, *args, **kwargs):
return write_sdf(self, write_cols=False, *args, **kwargs)
@wraps(write_sdf)
def _to_sdf_df(self, *args, **kwargs):
return write_sdf(self, *args, **kwargs)
pd.Series.to_sdf = _to_sdf_series
pd.DataFrame.to_sdf = _to_sdf_df
@classmethod
@wraps(read_sdf)
def _from_sdf_df(_, *args, **kwargs):
return read_sdf(*args, **kwargs)
pd.DataFrame.from_sdf = _from_sdf_df
@classmethod
@wraps(read_sdf)
def _from_sdf_series(_, *args, **kwargs):
return read_sdf(*args, **kwargs).structure
pd.Series.from_sdf = _from_sdf_series
|
richlewis42/scikit-chem
|
skchem/io/sdf.py
|
Python
|
bsd-3-clause
| 6,101
|
[
"RDKit"
] |
e4a650d962fa887da97087cd8533d6f9cfdf0fcaebd76aa82f04158b1a75a79f
|
# Copyright 2015-2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Computation of split penalties before/between tokens."""
from lib2to3 import pytree
from yapf.yapflib import format_token
from yapf.yapflib import py3compat
from yapf.yapflib import pytree_utils
from yapf.yapflib import pytree_visitor
from yapf.yapflib import style
# TODO(morbo): Document the annotations in a centralized place. E.g., the
# README file.
UNBREAKABLE = 1000 * 1000
DOTTED_NAME = 2500
STRONGLY_CONNECTED = 2000
CONTIGUOUS_LIST = 500
NOT_TEST = 242
COMPARISON_EXPRESSION = 842
ARITHMETIC_EXPRESSION = 942
def ComputeSplitPenalties(tree):
"""Compute split penalties on tokens in the given parse tree.
Arguments:
tree: the top-level pytree node to annotate with penalties.
"""
_TreePenaltyAssigner().Visit(tree)
class _TreePenaltyAssigner(pytree_visitor.PyTreeVisitor):
"""Assigns split penalties to tokens, based on parse tree structure.
Split penalties are attached as annotations to tokens.
"""
def Visit_import_as_names(self, node): # pyline: disable=invalid-name
# import_as_names ::= import_as_name (',' import_as_name)* [',']
self.DefaultNodeVisit(node)
prev_child = None
for child in node.children:
if (prev_child and isinstance(prev_child, pytree.Leaf) and
prev_child.value == ','):
pytree_utils.SetNodeAnnotation(child,
pytree_utils.Annotation.SPLIT_PENALTY,
style.Get('SPLIT_PENALTY_IMPORT_NAMES'))
prev_child = child
def Visit_classdef(self, node): # pylint: disable=invalid-name
# classdef ::= 'class' NAME ['(' [arglist] ')'] ':' suite
#
# NAME
self._SetUnbreakable(node.children[1])
if len(node.children) > 4:
# opening '('
self._SetUnbreakable(node.children[2])
# ':'
self._SetUnbreakable(node.children[-2])
self.DefaultNodeVisit(node)
def Visit_funcdef(self, node): # pylint: disable=invalid-name
# funcdef ::= 'def' NAME parameters ['->' test] ':' suite
#
# Can't break before the function name and before the colon. The parameters
# are handled by child iteration.
colon_idx = 1
while pytree_utils.NodeName(node.children[colon_idx]) == 'simple_stmt':
colon_idx += 1
self._SetUnbreakable(node.children[colon_idx])
while colon_idx < len(node.children):
if (isinstance(node.children[colon_idx], pytree.Leaf) and
node.children[colon_idx].value == ':'):
break
colon_idx += 1
self._SetUnbreakable(node.children[colon_idx])
self.DefaultNodeVisit(node)
def Visit_lambdef(self, node): # pylint: disable=invalid-name
# lambdef ::= 'lambda' [varargslist] ':' test
# Loop over the lambda up to and including the colon.
if style.Get('ALLOW_MULTILINE_LAMBDAS'):
self._SetStronglyConnected(node)
else:
self._SetUnbreakableOnChildren(node)
def Visit_parameters(self, node): # pylint: disable=invalid-name
# parameters ::= '(' [typedargslist] ')'
self.DefaultNodeVisit(node)
# Can't break before the opening paren of a parameter list.
self._SetUnbreakable(node.children[0])
if not style.Get('DEDENT_CLOSING_BRACKETS'):
self._SetStronglyConnected(node.children[-1])
def Visit_argument(self, node): # pylint: disable=invalid-name
# argument ::= test [comp_for] | test '=' test # Really [keyword '='] test
self.DefaultNodeVisit(node)
index = 0
while index < len(node.children) - 1:
next_child = node.children[index + 1]
if isinstance(next_child, pytree.Leaf) and next_child.value == '=':
self._SetUnbreakable(_FirstChildNode(node.children[index + 1]))
self._SetUnbreakable(_FirstChildNode(node.children[index + 2]))
index += 1
def Visit_dotted_name(self, node): # pylint: disable=invalid-name
# dotted_name ::= NAME ('.' NAME)*
self._SetUnbreakableOnChildren(node)
def Visit_dictsetmaker(self, node): # pylint: disable=invalid-name
# dictsetmaker ::= ( (test ':' test
# (comp_for | (',' test ':' test)* [','])) |
# (test (comp_for | (',' test)* [','])) )
for child in node.children:
self.Visit(child)
if pytree_utils.NodeName(child) == 'COLON':
# This is a key to a dictionary. We don't want to split the key if at
# all possible.
self._SetStronglyConnected(child)
def Visit_trailer(self, node): # pylint: disable=invalid-name
# trailer ::= '(' [arglist] ')' | '[' subscriptlist ']' | '.' NAME
self.DefaultNodeVisit(node)
if node.children[0].value == '.':
self._SetUnbreakableOnChildren(node)
pytree_utils.SetNodeAnnotation(node.children[1],
pytree_utils.Annotation.SPLIT_PENALTY,
DOTTED_NAME)
elif len(node.children) == 2:
# Don't split an empty argument list if at all possible.
self._SetStronglyConnected(node.children[1])
elif len(node.children) == 3:
if pytree_utils.NodeName(node.children[1]) not in {
'arglist', 'argument', 'term', 'or_test', 'and_test', 'comparison',
'atom'
}:
# Don't split an argument list with one element if at all possible.
self._SetStronglyConnected(node.children[1], node.children[2])
if pytree_utils.NodeName(node.children[-1]) == 'RSQB':
# Don't split the ending bracket of a subscript list.
self._SetStronglyConnected(node.children[-1])
def Visit_power(self, node): # pylint: disable=invalid-name,missing-docstring
# power ::= atom trailer* ['**' factor]
self.DefaultNodeVisit(node)
# When atom is followed by a trailer, we can not break between them.
# E.g. arr[idx] - no break allowed between 'arr' and '['.
if (len(node.children) > 1 and
pytree_utils.NodeName(node.children[1]) == 'trailer'):
# children[1] itself is a whole trailer: we don't want to
# mark all of it as unbreakable, only its first token: (, [ or .
self._SetUnbreakable(node.children[1].children[0])
# A special case when there are more trailers in the sequence. Given:
# atom tr1 tr2
# The last token of tr1 and the first token of tr2 comprise an unbreakable
# region. For example: foo.bar.baz(1)
# We can't put breaks between either of the '.', '(', or '[' and the names
# *preceding* them.
prev_trailer_idx = 1
while prev_trailer_idx < len(node.children) - 1:
cur_trailer_idx = prev_trailer_idx + 1
cur_trailer = node.children[cur_trailer_idx]
if pytree_utils.NodeName(cur_trailer) == 'trailer':
# Now we know we have two trailers one after the other
prev_trailer = node.children[prev_trailer_idx]
if prev_trailer.children[-1].value != ')':
# Set the previous node unbreakable if it's not a function call:
# atom tr1() tr2
# It may be necessary (though undesirable) to split up a previous
# function call's parentheses to the next line.
self._SetUnbreakable(prev_trailer.children[-1])
self._SetUnbreakable(cur_trailer.children[0])
prev_trailer_idx = cur_trailer_idx
else:
break
# We don't want to split before the last ')' of a function call. This also
# takes care of the special case of:
# atom tr1 tr2 ... trn
# where the 'tr#' are trailers that may end in a ')'.
for trailer in node.children[1:]:
if pytree_utils.NodeName(trailer) != 'trailer':
break
if trailer.children[0].value in '([':
if len(trailer.children) > 2:
subtypes = pytree_utils.GetNodeAnnotation(
trailer.children[0], pytree_utils.Annotation.SUBTYPE)
if subtypes and format_token.Subtype.SUBSCRIPT_BRACKET in subtypes:
self._SetStronglyConnected(_FirstChildNode(trailer.children[1]))
last_child_node = _LastChildNode(trailer)
if last_child_node.value.strip().startswith('#'):
last_child_node = last_child_node.prev_sibling
if not style.Get('DEDENT_CLOSING_BRACKETS'):
self._SetUnbreakable(last_child_node)
if _FirstChildNode(trailer).lineno == last_child_node.lineno:
# If the trailer was originally on one line, then try to keep it
# like that.
self._SetExpressionPenalty(trailer, CONTIGUOUS_LIST)
else:
# If the trailer's children are '()', then make it a strongly
# connected region. It's sometimes necessary, though undesirable, to
# split the two.
self._SetStronglyConnected(trailer.children[-1])
# If the original source has a "builder" style calls, then we should allow
# the reformatter to retain that.
_AllowBuilderStyleCalls(node)
def Visit_subscript(self, node): # pylint: disable=invalid-name
# subscript ::= test | [test] ':' [test] [sliceop]
self._SetStronglyConnected(*node.children)
self.DefaultNodeVisit(node)
def Visit_comp_for(self, node): # pylint: disable=invalid-name
# comp_for ::= 'for' exprlist 'in' testlist_safe [comp_iter]
pytree_utils.SetNodeAnnotation(
_FirstChildNode(node), pytree_utils.Annotation.SPLIT_PENALTY, 0)
self._SetStronglyConnected(*node.children[1:])
self.DefaultNodeVisit(node)
def Visit_comp_if(self, node): # pylint: disable=invalid-name
# comp_if ::= 'if' old_test [comp_iter]
pytree_utils.SetNodeAnnotation(node.children[0],
pytree_utils.Annotation.SPLIT_PENALTY,
style.Get('SPLIT_PENALTY_BEFORE_IF_EXPR'))
self._SetStronglyConnected(*node.children[1:])
self.DefaultNodeVisit(node)
def Visit_not_test(self, node): # pylint: disable=invalid-name
# not_test ::= 'not' not_test | comparison
self.DefaultNodeVisit(node)
self._SetExpressionPenalty(node, NOT_TEST)
def Visit_comparison(self, node): # pylint: disable=invalid-name
# comparison ::= expr (comp_op expr)*
self.DefaultNodeVisit(node)
self._SetExpressionPenalty(node, COMPARISON_EXPRESSION)
def Visit_arith_expr(self, node): # pylint: disable=invalid-name
# arith_expr ::= term (('+'|'-') term)*
self.DefaultNodeVisit(node)
self._SetExpressionPenalty(node, ARITHMETIC_EXPRESSION)
def Visit_atom(self, node): # pylint: disable=invalid-name
# atom ::= ('(' [yield_expr|testlist_gexp] ')'
# '[' [listmaker] ']' |
# '{' [dictsetmaker] '}')
self.DefaultNodeVisit(node)
if node.children[0].value == '(':
if node.children[0].lineno == node.children[-1].lineno:
self._SetExpressionPenalty(node, CONTIGUOUS_LIST)
if node.children[-1].value == ')':
if pytree_utils.NodeName(node.parent) == 'if_stmt':
pytree_utils.SetNodeAnnotation(node.children[-1],
pytree_utils.Annotation.SPLIT_PENALTY,
UNBREAKABLE)
else:
pytree_utils.SetNodeAnnotation(node.children[-1],
pytree_utils.Annotation.SPLIT_PENALTY,
COMPARISON_EXPRESSION)
elif node.children[0].value in '[{':
# Keep empty containers together if we can.
lbracket = node.children[0]
rbracket = node.children[-1]
if len(node.children) == 2:
self._SetUnbreakable(node.children[-1])
elif (rbracket.value in ']}' and
lbracket.get_lineno() == rbracket.get_lineno() and
rbracket.column - lbracket.column < style.Get('COLUMN_LIMIT')):
self._SetStronglyConnected(*node.children[1:])
############################################################################
# Helper methods that set the annotations.
def _SetUnbreakable(self, node):
"""Set an UNBREAKABLE penalty annotation for the given node."""
_RecAnnotate(node, pytree_utils.Annotation.SPLIT_PENALTY, UNBREAKABLE)
def _SetStronglyConnected(self, *nodes):
"""Set a STRONGLY_CONNECTED penalty annotation for the given nodes."""
for node in nodes:
_RecAnnotate(node, pytree_utils.Annotation.SPLIT_PENALTY,
STRONGLY_CONNECTED)
def _SetUnbreakableOnChildren(self, node):
"""Set an UNBREAKABLE penalty annotation on children of node."""
for child in node.children:
self.Visit(child)
start = 2 if hasattr(node.children[0], 'is_pseudo') else 1
for i in py3compat.range(start, len(node.children)):
self._SetUnbreakable(node.children[i])
def _SetExpressionPenalty(self, node, penalty):
"""Set an ARITHMETIC_EXPRESSION penalty annotation children nodes."""
def RecArithmeticExpression(node, first_child_leaf):
if node is first_child_leaf:
return
if isinstance(node, pytree.Leaf):
if node.value in {'(', 'for', 'if'}:
return
penalty_annotation = pytree_utils.GetNodeAnnotation(
node, pytree_utils.Annotation.SPLIT_PENALTY, default=0)
if penalty_annotation < penalty:
pytree_utils.SetNodeAnnotation(node,
pytree_utils.Annotation.SPLIT_PENALTY,
penalty)
else:
for child in node.children:
RecArithmeticExpression(child, first_child_leaf)
RecArithmeticExpression(node, _FirstChildNode(node))
def _RecAnnotate(tree, annotate_name, annotate_value):
"""Recursively set the given annotation on all leafs of the subtree.
Takes care to only increase the penalty. If the node already has a higher
or equal penalty associated with it, this is a no-op.
Args:
tree: subtree to annotate
annotate_name: name of the annotation to set
annotate_value: value of the annotation to set
"""
for child in tree.children:
_RecAnnotate(child, annotate_name, annotate_value)
if isinstance(tree, pytree.Leaf):
cur_annotate = pytree_utils.GetNodeAnnotation(
tree, annotate_name, default=0)
if cur_annotate < annotate_value:
pytree_utils.SetNodeAnnotation(tree, annotate_name, annotate_value)
def _AllowBuilderStyleCalls(node):
"""Allow splitting before '.' if it's a builder style function call."""
def RecGetLeaves(node):
if isinstance(node, pytree.Leaf):
return [node]
children = []
for child in node.children:
children += RecGetLeaves(child)
return children
list_of_children = RecGetLeaves(node)
prev_child = None
for child in list_of_children:
if child.value == '.':
if prev_child.lineno != child.lineno:
pytree_utils.SetNodeAnnotation(child,
pytree_utils.Annotation.SPLIT_PENALTY, 0)
prev_child = child
def _FirstChildNode(node):
if isinstance(node, pytree.Leaf):
return node
return _FirstChildNode(node.children[0])
def _LastChildNode(node):
if isinstance(node, pytree.Leaf):
return node
return _LastChildNode(node.children[-1])
|
wklken/yapf
|
yapf/yapflib/split_penalty.py
|
Python
|
apache-2.0
| 15,677
|
[
"VisIt"
] |
7e2e3c979d1e76cfce4e5337a573ceaad8edfc4b8daa5a48e9732ec6b9036bc9
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Converter construction support.
This module contains a base class for all converters, as well as supporting
structures. These structures are referred to as contexts.
The class hierarchy is as follows:
<your converter>
[extends] converter.Base
[extends] transformer.Base
[extends] gast.nodeTransformer
[uses] transformer.SourceInfo
[uses] converter.EntityContext
[uses] converter.ProgramContext
[uses] transformer.SourceInfo
converter.Base is a specialization of transformer.Base for AutoGraph. It's a
very lightweight subclass that adds a `ctx` attribute holding the corresponding
EntityContext object (see below). Note that converters are not reusable, and
`visit` will raise an error if called more than once.
converter.EntityContext contains mutable state associated with an entity that
the converter processes.
converter.ProgramContext contains mutable state across related entities. For
example, when converting several functions that call one another, the
ProgramContext should be shared across these entities.
Below is the overall flow at conversion:
program_ctx = ProgramContext(<entities to convert>, <global settings>, ...)
while <program_ctx has more entities to convert>:
entity, source_info = <get next entity from program_ctx>
entity_ctx = EntityContext(program_ctx, source_info)
for <each ConverterClass>:
converter = ConverterClass(entity_ctx)
# May update entity_ctx and program_ctx
entity = converter.visit(entity)
<add entity's dependencies to program_ctx>
Note that pyct contains a small number of transformers used for static analysis.
These implement transformer.Base, rather than converter.Base, to avoid a
dependency on AutoGraph.
"""
import enum
from tensorflow.python.autograph.pyct import anno
from tensorflow.python.autograph.pyct import ast_util
from tensorflow.python.autograph.pyct import parser
from tensorflow.python.autograph.pyct import templates
from tensorflow.python.autograph.pyct import transformer
from tensorflow.python.util.tf_export import tf_export
# TODO(mdan): These contexts can be refactored into first class objects.
# For example, we could define Program and Entity abstractions that hold on
# to the actual entity and have conversion methods.
# TODO(mdan): Add a test specific to this converter.
@tf_export('autograph.experimental.Feature')
class Feature(enum.Enum):
"""This enumeration represents optional conversion options.
These conversion options are experimental. They are subject to change without
notice and offer no guarantees.
_Example Usage_
```python
optionals= tf.autograph.experimental.Feature.EQUALITY_OPERATORS
@tf.function(experimental_autograph_options=optionals)
def f(i):
if i == 0: # EQUALITY_OPERATORS allows the use of == here.
tf.print('i is zero')
```
Attributes:
ALL: Enable all features.
AUTO_CONTROL_DEPS: Insert of control dependencies in the generated code.
ASSERT_STATEMENTS: Convert Tensor-dependent assert statements to tf.Assert.
BUILTIN_FUNCTIONS: Convert builtin functions applied to Tensors to
their TF counterparts.
EQUALITY_OPERATORS: Whether to convert the comparison operators, like
equality. This is soon to be deprecated as support is being added to the
Tensor class.
LISTS: Convert list idioms, like initializers, slices, append, etc.
NAME_SCOPES: Insert name scopes that name ops according to context, like the
function they were defined in.
"""
ALL = 'ALL'
AUTO_CONTROL_DEPS = 'AUTO_CONTROL_DEPS'
ASSERT_STATEMENTS = 'ASSERT_STATEMENTS'
BUILTIN_FUNCTIONS = 'BUILTIN_FUNCTIONS'
EQUALITY_OPERATORS = 'EQUALITY_OPERATORS'
LISTS = 'LISTS'
NAME_SCOPES = 'NAME_SCOPES'
@classmethod
def all(cls):
"""Returns a tuple that enables all options."""
return tuple(cls.__members__.values())
@classmethod
def all_but(cls, exclude):
"""Returns a tuple that enables all but the excluded options."""
if not isinstance(exclude, (list, tuple, set)):
exclude = (exclude,)
return tuple(set(cls.all()) - set(exclude) - {cls.ALL})
STANDARD_OPTIONS = None # Forward definition.
class ConversionOptions(object):
"""Immutable container for global conversion flags.
Attributes:
recursive: bool, whether to recursively convert any user functions or
classes that the converted function may use.
user_requested: bool, whether the conversion was explicitly requested by
the user, as opposed to being performed as a result of other logic. This
value always auto-resets to False in child conversions.
optional_features: Union[Feature, Set[Feature]], controls the use of
optional features in the conversion process. See Feature for available
options.
"""
def __init__(self,
recursive=False,
user_requested=False,
internal_convert_user_code=True,
optional_features=Feature.ALL):
self.recursive = recursive
self.user_requested = user_requested
# TODO(mdan): Rename to conversion_recursion_depth?
self.internal_convert_user_code = internal_convert_user_code
if optional_features is None:
optional_features = ()
elif isinstance(optional_features, Feature):
optional_features = (optional_features,)
optional_features = frozenset(optional_features)
self.optional_features = optional_features
def as_tuple(self):
return (self.recursive, self.user_requested,
self.internal_convert_user_code, self.optional_features)
def __hash__(self):
return hash(self.as_tuple())
def __eq__(self, other):
assert isinstance(other, ConversionOptions)
return self.as_tuple() == other.as_tuple()
def __str__(self):
return 'ConversionOptions[{}]'
def uses(self, feature):
return (Feature.ALL in self.optional_features or
feature in self.optional_features)
def call_options(self):
"""Returns the corresponding options to be used for recursive conversion."""
return ConversionOptions(
recursive=self.recursive,
user_requested=False,
internal_convert_user_code=self.recursive,
optional_features=self.optional_features)
def to_ast(self):
"""Returns a representation of this object as an AST node.
The AST node encodes a constructor that would create an object with the
same contents.
Returns:
ast.Node
"""
if self == STANDARD_OPTIONS:
return parser.parse_expression('ag__.STD')
template = """
ag__.ConversionOptions(
recursive=recursive_val,
user_requested=user_requested_val,
optional_features=optional_features_val,
internal_convert_user_code=internal_convert_user_code_val)
"""
def list_of_features(values):
return parser.parse_expression('({})'.format(', '.join(
'ag__.{}'.format(str(v)) for v in values)))
expr_ast = templates.replace(
template,
recursive_val=parser.parse_expression(str(self.recursive)),
user_requested_val=parser.parse_expression(str(self.user_requested)),
internal_convert_user_code_val=parser.parse_expression(
str(self.internal_convert_user_code)),
optional_features_val=list_of_features(self.optional_features))
return expr_ast[0].value
STANDARD_OPTIONS = ConversionOptions(
recursive=True,
user_requested=False,
internal_convert_user_code=True,
optional_features=None)
class ProgramContext(object):
"""ProgramContext keeps track of converting function hierarchies.
Attributes:
options: ConversionOptions
autograph_module: Deprecated. Do not use.
"""
def __init__(self, options, autograph_module=None):
self.options = options
self.autograph_module = autograph_module
class Base(transformer.Base):
"""All converters should inherit from this class.
Attributes:
ctx: EntityContext
"""
def __init__(self, ctx):
super(Base, self).__init__(ctx)
self._used = False
self._ast_depth = 0
def get_definition_directive(self, node, directive, arg, default):
"""Returns the unique directive argument for a symbol.
See lang/directives.py for details on directives.
Example:
# Given a directive in the code:
ag.foo_directive(bar, baz=1)
# One can write for an AST node Name(id='bar'):
get_definition_directive(node, ag.foo_directive, 'baz')
Args:
node: ast.AST, the node representing the symbol for which the directive
argument is needed.
directive: Callable[..., Any], the directive to search.
arg: str, the directive argument to return.
default: Any
Raises:
ValueError: if conflicting annotations have been found
"""
defs = anno.getanno(node, anno.Static.ORIG_DEFINITIONS, ())
if not defs:
return default
arg_values_found = []
for def_ in defs:
if (directive in def_.directives and arg in def_.directives[directive]):
arg_values_found.append(def_.directives[directive][arg])
if not arg_values_found:
return default
if len(arg_values_found) == 1:
return arg_values_found[0]
# If multiple annotations reach the symbol, they must all match. If they do,
# return any of them.
first_value = arg_values_found[0]
for other_value in arg_values_found[1:]:
if not ast_util.matches(first_value, other_value):
qn = anno.getanno(node, anno.Basic.QN)
raise ValueError(
'%s has ambiguous annotations for %s(%s): %s, %s' %
(qn, directive.__name__, arg, parser.unparse(other_value).strip(),
parser.unparse(first_value).strip()))
return first_value
def visit(self, node):
if not self._ast_depth:
if self._used:
raise ValueError('converter objects cannot be reused')
self._used = True
self._ast_depth += 1
try:
return super(Base, self).visit(node)
finally:
self._ast_depth -= 1
|
tensorflow/tensorflow
|
tensorflow/python/autograph/core/converter.py
|
Python
|
apache-2.0
| 10,788
|
[
"VisIt"
] |
46d570684fe6b79297229aec1e5c726d385816877a147619042a33f41f38a7d7
|
import os, sys
import numpy as np
import pysam
DATAPATH="/cs/taatto/group/mlb/synergy/data/2012-03_RNA"
TRANSCRIPTS = ["ENST00000561552", "ENST00000361711", "ENST00000343939", "ENST00000393306", "ENST00000565123", "ENST00000563543", "ENST00000567707", "ENSG00000140961_unspliced"]
UNSPLICED = ["ENSG00000140961_unspliced"]
def find_files():
f = os.listdir(DATAPATH)
return sorted([DATAPATH + '/' + x for x in f if x.startswith('MCF7_t')])
def gettids(f, seqs):
return [f.gettid(x) for x in seqs]
def count_alignments(f, mytids, logf=sys.stdout):
curqname = "invalid"
hasother = np.zeros(len(mytids), np.bool)
hascorrect = np.zeros(len(mytids), np.bool)
count = np.zeros(len(mytids), np.int64)
lines_done = 0
for a in f:
lines_done += 1
if lines_done % 100000 == 0:
logf.write(str(lines_done) + " lines done, count: " + str(count) + "\n")
logf.flush()
# New alignment for the same read
if a.qname == curqname:
match = np.array([a.tid in x for x in mytids], np.bool)
hascorrect = hascorrect | match
hasother = hasother | (~match)
# new read
else:
count += (hascorrect & ~hasother)
match = np.array([a.tid in x for x in mytids], np.bool)
hascorrect = match
hasother = ~match
curqname = a.qname
return count
if __name__ == "__main__":
myind = int(sys.argv[1])
if len(sys.argv) > 2:
logf = open(sys.argv[2], 'w')
else:
logf = sys.stdout
fn = find_files()
logf.write("Processing file " + fn[myind] + "\n")
f = pysam.Samfile(fn[myind], "rb" )
alltids = set(gettids(f, TRANSCRIPTS))
premrnatid = set(gettids(f, UNSPLICED))
mycount = count_alignments(f, [alltids, premrnatid], logf)
f.close()
f = open(DATAPATH + '/OSGIN1_counts_%d.txt' % myind, 'w')
f.write(str(mycount) + '\n')
f.close()
if len(sys.argv) > 2:
logf.close()
|
ahonkela/pol2rna
|
python/count_osgin1_reads.py
|
Python
|
bsd-3-clause
| 2,013
|
[
"pysam"
] |
d03c4bf27bd6bb12c395df75dc55b92cda4b07820fdd00514f4d962664ebe7cb
|
'''
VAE that associate latent representations for different modals of sensory data
'''
import os
import time
import datetime
import numpy as np
from itertools import combinations
import tensorflow as tf
def xavier_init(fan_in, fan_out, constant=1):
""" Xavier initialization of network weights"""
# https://stackoverflow.com/questions/33640581/how-to-do-xavier-initialization-on-tensorflow
low = -constant*np.sqrt(6.0/(fan_in + fan_out))
high = constant*np.sqrt(6.0/(fan_in + fan_out))
return tf.random_uniform((fan_in, fan_out),
minval=low, maxval=high,
dtype=tf.float32)
class AssocVariationalAutoEncoder(object):
'''
Extend Variational AutoEncoder to associate latent representations for two modal of sensory data
Generally we force the latent variable for two sensory modals to be same by penalizing the divergence of Gaussian distributions
'''
def __init__(self, network_architectures, binary=True, transfer_fct=tf.nn.softplus, weights = 1.0, assoc_lambda = 1.0,
learning_rate=0.001, batch_size=100):
self.network_architectures = network_architectures
self.assoc_lambda = assoc_lambda
#check if binary data
if type(binary) is list:
assert len(binary) == len(network_architectures)
self.binary = binary
else:
self.binary = [binary] * len(network_architectures)
if type(weights) is list:
assert len(weights) == len(network_architectures)
self.weights = weights
else:
self.weights = [weights] * len(network_architectures)
# if type(transfer_fct) is list:
# assert len(transfer_fct) == len(network_architectures)
# self.transfer_fct = transfer_fct
# else:
# self.transfer_fct = [transfer_fct] * len(network_architectures)
self.transfer_fct = transfer_fct
self.learning_rate = learning_rate
self.batch_size = batch_size
self.x = [tf.placeholder(tf.float32, [None, na["n_input"]]) for na in self.network_architectures]
# Create autoencoder network
self._create_network(batch_size=self.batch_size)
# Define loss function based variational upper-bound and
# corresponding optimizer
self._create_loss_optimizer()
# Initializing the tensor flow variables
init = tf.initialize_all_variables()
# Launch the session
self.sess = tf.InteractiveSession()
self.sess.run(init)
#create a saver for future saving/restoring the model
self.saver = tf.train.Saver()
return
def _unpack_network_arguments(self, scope, hidden_conv, n_hidden_recog_1, n_hidden_recog_2,
n_hidden_gener_1, n_hidden_gener_2,
n_input, n_z):
return scope, hidden_conv, n_hidden_recog_1, n_hidden_recog_2, n_hidden_gener_1, n_hidden_gener_2, n_input, n_z
def _create_network(self, batch_size):
# Initialize autoencode network weights and biases
# self.network_weights = [self._initialize_weights(**na) for na in self.network_architectures]
self.z_means = []
self.z_log_sigma_sqs = []
self.z_array = []
self.x_reconstr_means = []
# Draw one sample z from Gaussian distribution
# this must be uniform for all networks
self.n_z = self.network_architectures[0]["n_z"]
tmp_eps = tf.random_normal((batch_size, self.n_z), 0, 1,
dtype=tf.float32)
for sens_idx, na in enumerate(self.network_architectures):
# Use recognition network to determine mean and
# (log) variance of Gaussian distribution in latent
# space
tmp_z_mean, tmp_z_log_sigma_sq = \
self._recognition_network(self.x[sens_idx],
na)
# z = mu + sigma*epsilon
tmp_z = tf.add(tmp_z_mean,
tf.multiply(tf.sqrt(tf.exp(tmp_z_log_sigma_sq)), tmp_eps))
# Use generator to determine mean of
# Bernoulli distribution of reconstructed input
tmp_x_reconstr_mean = \
self._generator_network(tmp_z,
na,
self.binary[sens_idx],
conv_enable=True)
#store related tf variables
self.z_means.append(tmp_z_mean)
self.z_log_sigma_sqs.append(tmp_z_log_sigma_sq)
self.z_array.append(tmp_z)
self.x_reconstr_means.append(tmp_x_reconstr_mean)
return
def _initialize_weights(self, scope, n_hidden_recog_1, n_hidden_recog_2,
n_hidden_gener_1, n_hidden_gener_2,
n_input, n_z):
#use scope to differentiate variabels for different sensory information
with tf.variable_scope(scope):
all_weights = dict()
all_weights['weights_recog'] = {
'h1': tf.Variable(xavier_init(n_input, n_hidden_recog_1)),
'h2': tf.Variable(xavier_init(n_hidden_recog_1, n_hidden_recog_2)),
'out_mean': tf.Variable(xavier_init(n_hidden_recog_2, n_z)),
'out_log_sigma': tf.Variable(xavier_init(n_hidden_recog_2, n_z))}
all_weights['biases_recog'] = {
'b1': tf.Variable(tf.zeros([n_hidden_recog_1], dtype=tf.float32)),
'b2': tf.Variable(tf.zeros([n_hidden_recog_2], dtype=tf.float32)),
'out_mean': tf.Variable(tf.zeros([n_z], dtype=tf.float32)),
'out_log_sigma': tf.Variable(tf.zeros([n_z], dtype=tf.float32))}
all_weights['weights_gener'] = {
'h1': tf.Variable(xavier_init(n_z, n_hidden_gener_1)),
'h2': tf.Variable(xavier_init(n_hidden_gener_1, n_hidden_gener_2)),
'out_mean': tf.Variable(xavier_init(n_hidden_gener_2, n_input)),
'out_log_sigma': tf.Variable(xavier_init(n_hidden_gener_2, n_input))}
all_weights['biases_gener'] = {
'b1': tf.Variable(tf.zeros([n_hidden_gener_1], dtype=tf.float32)),
'b2': tf.Variable(tf.zeros([n_hidden_gener_2], dtype=tf.float32)),
'out_mean': tf.Variable(tf.zeros([n_input], dtype=tf.float32)),
'out_log_sigma': tf.Variable(tf.zeros([n_input], dtype=tf.float32))}
return all_weights
# def _recognition_network(self, x, weights, biases):
# # Generate probabilistic encoder (recognition network), which
# # maps inputs onto a normal distribution in latent space.
# # The transformation is parametrized and can be learned.
# layer_1 = self.transfer_fct(tf.add(tf.matmul(x, weights['h1']),
# biases['b1']))
# layer_2 = self.transfer_fct(tf.add(tf.matmul(layer_1, weights['h2']),
# biases['b2']))
# z_mean = tf.add(tf.matmul(layer_2, weights['out_mean']),
# biases['out_mean'])
# z_log_sigma_sq = \
# tf.add(tf.matmul(layer_2, weights['out_log_sigma']),
# biases['out_log_sigma'])
# return (z_mean, z_log_sigma_sq)
def _recognition_network(self, x, network_architecture):
# Generate probabilistic encoder (recognition network), which
# maps inputs onto a normal distribution in latent space.
# The transformation is parametrized and can be learned.
scope, hidden_conv, n_hidden_recog_1, n_hidden_recog_2, n_hidden_gener_1, n_hidden_gener_2, n_input, n_z = self._unpack_network_arguments(**network_architecture)
with tf.variable_scope(scope):
if hidden_conv:
#convolution layer filtering over 2d 28x28 images
input_size = int(np.sqrt(n_input))
x_2d = tf.reshape(x, [-1, input_size, input_size, 1])
#convolution layers use their own initialization of weight variables
layer_0_5 = conv_2d( x_2d,
filter_set=[5, 1, n_hidden_recog_1],
stride=2,
padding='SAME',
transfer_fct=None) #no nonlinear transformation here
layer_1 = conv_2d( layer_0_5,
filter_set=[5, n_hidden_recog_1, n_hidden_recog_1*2],
stride=2,
padding='SAME',
transfer_fct=None)
else:
weights_hidden_1 = tf.Variable(xavier_init(n_input, n_hidden_recog_1))
biases_hidden_1 = tf.Variable(tf.zeros([n_hidden_recog_1], dtype=tf.float32))
layer_1 = self.transfer_fct(tf.add(tf.matmul(x, weights_hidden_1),
biases_hidden_1))
if hidden_conv:
# layer_1_size = (input_size - 5) / 1
layer_1_size = input_size / 2 / 2
#convolution layers use their own initialization of weight variables
layer_2_2d = conv_2d( layer_1,
filter_set=[5, n_hidden_recog_1*2, n_hidden_recog_2],
stride=1,
padding='VALID') #no nonlinear transformation here
layer_2_size = (layer_1_size - 5) / 1 + 1
layer_2 = tf.reshape(layer_2_2d, (-1, layer_2_size*layer_2_size*n_hidden_recog_2))
else:
weights_hidden_2 = tf.Variable(xavier_init(n_hidden_recog_1, n_hidden_recog_2))
biases_hidden_2 = tf.Variable(tf.zeros([n_hidden_recog_2], dtype=tf.float32))
layer_2 = self.transfer_fct(tf.add(tf.matmul(layer_1, weights_hidden_2),
biases_hidden_2))
if hidden_conv:
weights_out_mean = tf.Variable(xavier_init(layer_2_size * layer_2_size * n_hidden_recog_2, n_z))
biases_out_mean = tf.Variable(tf.zeros([n_z], dtype=tf.float32))
weights_out_log_sigma = tf.Variable(xavier_init(layer_2_size * layer_2_size * n_hidden_recog_2, n_z))
biases_out_log_sigma = tf.Variable(tf.zeros([n_z], dtype=tf.float32))
else:
weights_out_mean = tf.Variable(xavier_init(n_hidden_recog_2, n_z))
biases_out_mean = tf.Variable(tf.zeros([n_z], dtype=tf.float32))
weights_out_log_sigma = tf.Variable(xavier_init(n_hidden_recog_2, n_z))
biases_out_log_sigma = tf.Variable(tf.zeros([n_z], dtype=tf.float32))
z_mean = tf.add(tf.matmul(layer_2, weights_out_mean),
biases_out_mean)
z_log_sigma_sq = \
tf.add(tf.matmul(layer_2, weights_out_log_sigma),
biases_out_log_sigma)
return (z_mean, z_log_sigma_sq)
# def _generator_network(self, z, weights, biases, binary):
# # Generate probabilistic decoder (decoder network), which
# # maps points in latent space onto a Bernoulli distribution in data space.
# # The transformation is parametrized and can be learned.
# layer_1 = self.transfer_fct(tf.add(tf.matmul(z, weights['h1']),
# biases['b1']))
# layer_2 = self.transfer_fct(tf.add(tf.matmul(layer_1, weights['h2']),
# biases['b2']))
#
# if binary:
# x_reconstr_mean = \
# tf.nn.sigmoid(tf.add(tf.matmul(layer_2, weights['out_mean']),
# biases['out_mean']))
# else:
# x_reconstr_mean = \
# tf.add(tf.matmul(layer_2, weights['out_mean']),
# biases['out_mean'])
# return x_reconstr_mean
def _generator_network(self, z, network_architecture, binary, conv_enable=True):
# Generate probabilistic decoder (decoder network), which
# maps points in latent space onto a Bernoulli distribution in data space.
# The transformation is parametrized and can be learned.
scope, hidden_conv, n_hidden_recog_1, n_hidden_recog_2, n_hidden_gener_1, n_hidden_gener_2, n_input, n_z = self._unpack_network_arguments(**network_architecture)
with tf.variable_scope(scope):
if hidden_conv and conv_enable:
z_2d = tf.reshape(z, (-1, 1, 1, n_z))
layer_1 = deconv_2d( z_2d,
filter_set=[3, n_z, n_hidden_gener_1],
stride=None,
padding='VALID'
) # no nonlinear transformation
else:
weights_hidden_1 = tf.Variable(xavier_init(n_z, n_hidden_recog_1))
biases_hidden_1 = tf.Variable(tf.zeros([n_hidden_recog_1], dtype=tf.float32))
layer_1 = self.transfer_fct(tf.add(tf.matmul(z, weights_hidden_1),
biases_hidden_1))
if hidden_conv and conv_enable:
layer_1_25 = deconv_2d( layer_1,
filter_set=[5, n_hidden_gener_1, n_hidden_gener_1/2],
stride=1,
padding='VALID'
) # no nonlinear transformation
layer_1_50 = deconv_2d( layer_1_25,
filter_set=[5, n_hidden_gener_1/2, n_hidden_gener_2],
stride=2,
padding='SAME'
) # no nonlinear transformation
layer_2_2d = deconv_2d( layer_1_50,
filter_set=[5, n_hidden_gener_2, 1],
stride=2,
padding='SAME'
) # no nonlinear transformation
layer_2 = tf.reshape(layer_2_2d, (-1, n_input))
else:
weights_hidden_2 = tf.Variable(xavier_init(n_hidden_recog_1, n_hidden_recog_2))
biases_hidden_2 = tf.Variable(tf.zeros([n_hidden_recog_2], dtype=tf.float32))
layer_2 = self.transfer_fct(tf.add(tf.matmul(layer_1, weights_hidden_2),
biases_hidden_2))
if binary:
if hidden_conv and conv_enable:
weights_genr_mean = tf.Variable(xavier_init(n_input, n_input))
biases_genr_mean = tf.Variable(tf.zeros([n_input]))
x_reconstr_mean = \
tf.nn.sigmoid(tf.add(tf.matmul(layer_2, weights_genr_mean),
biases_genr_mean))
else:
weights_genr_mean = tf.Variable(xavier_init(n_hidden_recog_2, n_input))
biases_genr_mean = tf.Variable(tf.zeros([n_input]))
x_reconstr_mean = \
tf.nn.sigmoid(tf.add(tf.matmul(layer_2, weights_genr_mean),
biases_genr_mean))
else:
weights_genr_mean = tf.Variable(xavier_init(n_hidden_recog_2, n_input))
biases_genr_mean = tf.Variable(tf.zeros([n_input]))
x_reconstr_mean = \
tf.add(tf.matmul(layer_2, weights_genr_mean),
biases_genr_mean)
return x_reconstr_mean
def _create_loss_optimizer(self):
# The loss is composed of two terms:
# 1.) The reconstruction loss (the negative log probability
# of the input under the reconstructed Bernoulli distribution
# induced by the decoder in the data space).
# This can be interpreted as the number of "nats" required
# for reconstructing the input when the activation in latent
# is given.
# Adding 1e-10 to avoid evaluatio of log(0.0)
# <hyin/Apr-1st-2016> 1e-10 seems not enough to regularize for more epoches
self.vae_costs = []
self.vae_reconstr_losses = []
self.vae_latent_losses = []
for binary, x, x_reconstr_mean, z_mean, z_log_sigma_sq, weight in zip(self.binary, self.x, self.x_reconstr_means, self.z_means, self.z_log_sigma_sqs, self.weights):
if binary:
reconstr_loss = \
-tf.reduce_sum(x * tf.log(1e-3 + x_reconstr_mean)
+ (1-x) * tf.log(1e-3 + 1 - x_reconstr_mean),
1)
else:
# <hyin/Apr-4th-2016> square cost for gaussian output, need average?
reconstr_loss = \
tf.reduce_sum(tf.nn.l2_loss(x - x_reconstr_mean))
# 2.) The latent loss, which is defined as the Kullback Leibler divergence
## between the distribution in latent space induced by the encoder on
# the data and some prior. This acts as a kind of regularizer.
# This can be interpreted as the number of "nats" required
# for transmitting the the latent space distribution given
# the prior.
latent_loss = -0.5 * tf.reduce_sum(1 + z_log_sigma_sq
- tf.square(z_mean)
- tf.exp(z_log_sigma_sq), 1)
self.vae_reconstr_losses.append(reconstr_loss)
self.vae_latent_losses.append(latent_loss)
self.vae_costs.append(tf.reduce_mean(reconstr_loss + latent_loss) * weight)
#<hyin/Apr-8th-2016> now we have a third term to associate latent representations
#associativity regularization reconstron the latent representations
self.assoc_costs = []
#combine each pair of sensory latent representations
for i, j in list(combinations(range(len(self.network_architectures)), 2)):
self.assoc_costs.append(
# <hyin/Apr-8th-2016> it is wrong to simply expand the univariate case to multi-variate one
# tf.reduce_sum(
# tf.div(tf.square(self.z_means[i] -self.z_means[j]) + tf.exp(self.z_log_sigma_sqs[i]) - tf.exp(self.z_log_sigma_sqs[j]),
# 2 * tf.exp(self.z_log_sigma_sqs[i]) + 1e-3)
# + 0.5 * (self.z_log_sigma_sqs[i] - self.z_log_sigma_sqs[j])
# )
# see http://stats.stackexchange.com/questions/60680/kl-divergence-between-two-multivariate-gaussians for the derivation
tf.reduce_sum(
0.5 * (tf.reduce_sum(self.z_log_sigma_sqs[j], 1) - tf.reduce_sum(self.z_log_sigma_sqs[i], 1) - self.n_z
+ tf.reduce_sum(tf.exp(self.z_log_sigma_sqs[i] - self.z_log_sigma_sqs[j]), 1)
+ tf.reduce_sum(tf.multiply(tf.square(self.z_means[j] - self.z_means[i]), tf.exp(-self.z_log_sigma_sqs[j])), 1))
)
#<hyin/May-19th-2016> try a symmetry distance
+ tf.reduce_sum(
0.5 * (tf.reduce_sum(self.z_log_sigma_sqs[i], 1) - tf.reduce_sum(self.z_log_sigma_sqs[j], 1) - self.n_z
+ tf.reduce_sum(tf.exp(self.z_log_sigma_sqs[j] - self.z_log_sigma_sqs[i]), 1)
+ tf.reduce_sum(tf.multiply(tf.square(self.z_means[i] - self.z_means[j]), tf.exp(-self.z_log_sigma_sqs[i])), 1))
)
)
if self.assoc_costs:
self.cost = tf.add_n(self.vae_costs) + self.assoc_lambda * tf.add_n(self.assoc_costs)
else:
self.cost = tf.add_n(self.vae_costs)
# Use ADAM optimizerz_mean
self.optimizer = \
tf.train.AdamOptimizer(learning_rate=self.learning_rate).minimize(self.cost)
return
def partial_fit(self, X):
"""Train model based on mini-batch of input data.
Return cost of mini-batch.
"""
opt, cost = self.sess.run((self.optimizer, self.cost),
feed_dict={x_sens:X_sens for x_sens, X_sens in zip(self.x, X)})
return cost
def evaluate_cost(self, X):
cost = self.sess.run(self.cost,
feed_dict={x_sens:X_sens for x_sens, X_sens in zip(self.x, X)})
return cost
def transform(self, X, sens_idx=None):
"""Transform data by mapping it into the latent space."""
# Note: This maps to mean of distribution, we could alternatively
# sample from Gaussian distribution
# sens_idx is either None or an integer, indicating the desired data modality
if sens_idx is None:
latent_reps = [self.sess.run(z_mean, feed_dict={x_sens: X_sens}) for z_mean, x_sens, X_sens in zip(self.z_means, self.x, X)]
else:
assert sens_idx < len(self.z_means)
latent_reps = self.sess.run(self.z_means[sens_idx], feed_dict={self.x[sens_idx]: X})
return latent_reps
def generate(self, z_mu=None):
""" Generate data by sampling from latent space.
If z_mu is not None, data for this point in latent space is
generated. Otherwise, z_mu is drawn from prior in latent
space.
"""
if z_mu is None:
#<hyin/Apr-8th-2016> the size of random variables have to be same as a batch...
z_mu = np.random.normal(size=(self.batch_size, self.n_z))
# Note: This maps to mean of distribution, we could alternatively
# sample from Gaussian distributionn_hidden_recog_1
x_reconstr_means = [self.sess.run(x_reconstr_mean,
feed_dict={z: z_mu}) for x_reconstr_mean, z in zip(self.x_reconstr_means, self.z_array)]
return x_reconstr_means
def reconstruct(self, X):
""" Use VAE to reconstruct given data. """
x_reconstr_means = [self.sess.run(x_reconstr_mean,
feed_dict={x_sens: X_sens}) for x_reconstr_mean, x_sens, X_sens in zip(self.x_reconstr_means, self.x, X)]
return x_reconstr_means
def save_model(self, fname=None):
if fname is None:
ts = time.time()
ckpt_fname = 'vae_assoc_' + datetime.datetime.fromtimestamp(ts).strftime('%Y_%m_%d_%H_%M_%S') + '_batchsize_{}.ckpt'.format(self.batch_size)
else:
ckpt_fname = fname
print 'Saving model to {}...'.format(ckpt_fname)
save_path = self.saver.save(self.sess, ckpt_fname)
return
def restore_model(self, folder=None, fname=None):
if folder is None:
model_folder = 'output'
else:
model_folder = folder
if os.path.isdir(model_folder) and os.path.exists(model_folder):
if fname is None:
#search all checkpoint files in the folder
files = [f for f in os.listdir(model_folder) if f.endswith('.ckpt')]
if not files:
print 'No valid model file.'
return
else:
model_file = files[-1]
else:
model_file = fname
if os.path.exists(os.path.join(model_folder, model_file)):
#using saver to load it
print 'Loading {}...'.format(os.path.join(model_folder, model_file))
self.saver.restore(self.sess, os.path.join(model_folder, model_file))
else:
print 'Invalid or non-exist model file.'
else:
print 'Invalid or non-exist model folder.'
return
'''
Convolution hidden layer for 2d grayscale image
'''
import prettytensor as pt
from deconv import deconv2d
def weight_variable(shape):
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial)
def bias_variable(shape):
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial)
# helper functions for convolution and deconvolution operation for 2D mono-channel images
def conv_2d(x_2d, filter_set=[5, 1, 32], stride=2, padding='VALID', transfer_fct=None):
#hidden layer with a convolution operation
W_conv = weight_variable([filter_set[0], filter_set[0], filter_set[1], filter_set[2]])
if transfer_fct is not None:
h_conv = transfer_fct(tf.nn.conv2d(x_2d, W_conv, strides=(1, stride, stride, 1), padding=padding))
else:
h_conv = tf.nn.conv2d(x_2d, W_conv, strides=(1, stride, stride, 1), padding=padding)
#no pooling for now
return h_conv
def deconv_2d(x_2d, filter_set=[5, 32, 1], stride=None, padding='VALID', transfer_fct=tf.nn.sigmoid):
return (pt.wrap(x_2d).
# reshape([-1, 1, 1, filter_set[1]]).
deconv2d(kernel=filter_set[0], depth=filter_set[2], stride=stride, edges=padding, activation_fn=transfer_fct)
).tensor
def train(data_sets, network_architectures, binary=True, weights=1.0, assoc_lambda=1e-5, learning_rate=0.001,
batch_size=100, training_epochs=10, display_step=5, early_stop=False):
vae_assoc = AssocVariationalAutoEncoder(network_architectures,
binary,
transfer_fct=tf.nn.relu,
weights=weights,
assoc_lambda=assoc_lambda,
learning_rate=learning_rate,
batch_size=batch_size)
n_samples = data_sets.train._data.shape[0]
sens_indices = np.concatenate([[0], np.cumsum([na["n_input"] for na in network_architectures])])
avg_cost_hist = []
valid_cost = None
# Training cycle
for epoch in range(training_epochs):
avg_cost = 0.
total_batch = int(n_samples / batch_size)
if early_stop:
#if it is the time to check early stop condition
if epoch % early_stop == 0:
#calculate current validation cost
curr_valid_cost = 0
n_valid_batches = int(data_sets.validation._data.shape[0] / batch_size)
for i in range(n_valid_batches):
batch_xs, _ = data_sets.validation.next_batch(batch_size)
#construct batch data to desired format: segement different sensory modalities
batch_xs_seg = [batch_xs[:, sens_indices[i]:sens_indices[i+1]] for i in range(len(network_architectures))]
curr_valid_cost += vae_assoc.evaluate_cost(batch_xs_seg) / n_valid_batches
print "Validation cost=", "{:.9f}".format(curr_valid_cost)
if valid_cost is not None:
if curr_valid_cost > valid_cost:
#stop the iteration
print 'Validation error increases. Early stop at epoch {} to prevent overfitting...'.format(epoch+1)
break
valid_cost = curr_valid_cost
# Loop over all batches
for i in range(total_batch):
batch_xs, _ = data_sets.train.next_batch(batch_size)
#construct batch data to desired format: segement different sensory modalities
batch_xs_seg = [batch_xs[:, sens_indices[i]:sens_indices[i+1]] for i in range(len(network_architectures))]
# print batch_xs_seg[0][-1, :]
# print vae_assoc.network_weights[0]["weights_recog"]['h1'].eval()
# print vae_assoc.network_weights[0]["weights_recog"]['h2'].eval()
# vae_costs = [vae_assoc.sess.run(cost,
# feed_dict={x_sens:X_sens}) for cost, x_sens, X_sens in zip(vae_assoc.vae_costs, vae_assoc.x, batch_xs_seg)]
# print vae_costs
# reconstr_costs = [vae_assoc.sess.run(reconstr_cost,
# feed_dict={x_sens:X_sens}) for reconstr_cost, x_sens, X_sens in zip(vae_assoc.vae_reconstr_losses, vae_assoc.x, batch_xs_seg)]
# latent_costs = [vae_assoc.sess.run(latent_cost,
# feed_dict={x_sens:X_sens}) for latent_cost, x_sens, X_sens in zip(vae_assoc.vae_latent_losses, vae_assoc.x, batch_xs_seg)]
#
# print reconstr_costs
# print latent_costs
# z_means = [vae_assoc.sess.run(z_mean,
# feed_dict={x_sens:X_sens}) for z_mean, x_sens, X_sens in zip(vae_assoc.z_means, vae_assoc.x, batch_xs_seg)]
# print z_means
#
# z_log_sigma_sqs = [vae_assoc.sess.run(z_log_sigma_sq,
# feed_dict={x_sens:X_sens}) for z_log_sigma_sq, x_sens, X_sens in zip(vae_assoc.z_log_sigma_sqs, vae_assoc.x, batch_xs_seg)]
# print z_log_sigma_sqs
# x_reconstr_means = [vae_assoc.sess.run(x_reconstr_mean,
# feed_dict={x_sens:X_sens}) for x_reconstr_mean, x_sens, X_sens in zip(vae_assoc.x_reconstr_means, vae_assoc.x, batch_xs_seg)]
# print x_reconstr_means
# raw_input()
# Fit training using batch data
cost = vae_assoc.partial_fit(batch_xs_seg)
# Compute average loss
avg_cost += cost / n_samples * batch_size
avg_cost_hist.append(avg_cost)
# Display logs per epoch step
if epoch % display_step == 0:
print "Epoch:", '%04d' % (epoch+1), \
"cost=", "{:.9f}".format(avg_cost)
return vae_assoc, avg_cost_hist
|
navigator8972/vae_hwmotion
|
vae_assoc.py
|
Python
|
gpl-3.0
| 30,163
|
[
"Gaussian"
] |
206a9644ce7caba6295051ee2851527a5db22226e071d4b09b6de64aabfffeaf
|
# Authors : Denis A. Engemann <denis.engemann@gmail.com>
# Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
#
# License : BSD 3-clause
from copy import deepcopy
import math
import numpy as np
from scipy import fftpack
# XXX explore cuda optimization at some point.
from ..io.pick import _pick_data_channels, pick_info
from ..utils import verbose, warn
from ..parallel import parallel_func, check_n_jobs
from .tfr import AverageTFR, _get_data
def _check_input_st(x_in, n_fft):
"""Aux function."""
# flatten to 2 D and memorize original shape
n_times = x_in.shape[-1]
def _is_power_of_two(n):
return not (n > 0 and ((n & (n - 1))))
if n_fft is None or (not _is_power_of_two(n_fft) and n_times > n_fft):
# Compute next power of 2
n_fft = 2 ** int(math.ceil(math.log(n_times, 2)))
elif n_fft < n_times:
raise ValueError("n_fft cannot be smaller than signal size. "
"Got %s < %s." % (n_fft, n_times))
if n_times < n_fft:
warn('The input signal is shorter ({}) than "n_fft" ({}). '
'Applying zero padding.'.format(x_in.shape[-1], n_fft))
zero_pad = n_fft - n_times
pad_array = np.zeros(x_in.shape[:-1] + (zero_pad,), x_in.dtype)
x_in = np.concatenate((x_in, pad_array), axis=-1)
else:
zero_pad = 0
return x_in, n_fft, zero_pad
def _precompute_st_windows(n_samp, start_f, stop_f, sfreq, width):
"""Precompute stockwell Gaussian windows (in the freq domain)."""
tw = fftpack.fftfreq(n_samp, 1. / sfreq) / n_samp
tw = np.r_[tw[:1], tw[1:][::-1]]
k = width # 1 for classical stowckwell transform
f_range = np.arange(start_f, stop_f, 1)
windows = np.empty((len(f_range), len(tw)), dtype=np.complex)
for i_f, f in enumerate(f_range):
if f == 0.:
window = np.ones(len(tw))
else:
window = ((f / (np.sqrt(2. * np.pi) * k)) *
np.exp(-0.5 * (1. / k ** 2.) * (f ** 2.) * tw ** 2.))
window /= window.sum() # normalisation
windows[i_f] = fftpack.fft(window)
return windows
def _st(x, start_f, windows):
"""Compute ST based on Ali Moukadem MATLAB code (used in tests)."""
n_samp = x.shape[-1]
ST = np.empty(x.shape[:-1] + (len(windows), n_samp), dtype=np.complex)
# do the work
Fx = fftpack.fft(x)
XF = np.concatenate([Fx, Fx], axis=-1)
for i_f, window in enumerate(windows):
f = start_f + i_f
ST[..., i_f, :] = fftpack.ifft(XF[..., f:f + n_samp] * window)
return ST
def _st_power_itc(x, start_f, compute_itc, zero_pad, decim, W):
"""Aux function."""
n_samp = x.shape[-1]
n_out = (n_samp - zero_pad)
n_out = n_out // decim + bool(n_out % decim)
psd = np.empty((len(W), n_out))
itc = np.empty_like(psd) if compute_itc else None
X = fftpack.fft(x)
XX = np.concatenate([X, X], axis=-1)
for i_f, window in enumerate(W):
f = start_f + i_f
ST = fftpack.ifft(XX[:, f:f + n_samp] * window)
if zero_pad > 0:
TFR = ST[:, :-zero_pad:decim]
else:
TFR = ST[:, ::decim]
TFR_abs = np.abs(TFR)
TFR_abs[TFR_abs == 0] = 1.
if compute_itc:
TFR /= TFR_abs
itc[i_f] = np.abs(np.mean(TFR, axis=0))
TFR_abs *= TFR_abs
psd[i_f] = np.mean(TFR_abs, axis=0)
return psd, itc
def tfr_array_stockwell(data, sfreq, fmin=None, fmax=None, n_fft=None,
width=1.0, decim=1, return_itc=False, n_jobs=1):
"""Compute power and intertrial coherence using Stockwell (S) transform.
See [1]_, [2]_, [3]_, [4]_ for more information.
Parameters
----------
data : ndarray
The signal to transform. Any dimensionality supported as long
as the last dimension is time.
sfreq : float
The sampling frequency.
fmin : None, float
The minimum frequency to include. If None defaults to the minimum fft
frequency greater than zero.
fmax : None, float
The maximum frequency to include. If None defaults to the maximum fft.
n_fft : int | None
The length of the windows used for FFT. If None, it defaults to the
next power of 2 larger than the signal length.
width : float
The width of the Gaussian window. If < 1, increased temporal
resolution, if > 1, increased frequency resolution. Defaults to 1.
(classical S-Transform).
decim : int
The decimation factor on the time axis. To reduce memory usage.
return_itc : bool
Return intertrial coherence (ITC) as well as averaged power.
n_jobs : int
Number of parallel jobs to use.
Returns
-------
st_power : ndarray
The multitaper power of the Stockwell transformed data.
The last two dimensions are frequency and time.
itc : ndarray
The intertrial coherence. Only returned if return_itc is True.
freqs : ndarray
The frequencies.
References
----------
.. [1] Stockwell, R. G. "Why use the S-transform." AMS Pseudo-differential
operators: Partial differential equations and time-frequency
analysis 52 (2007): 279-309.
.. [2] Moukadem, A., Bouguila, Z., Abdeslam, D. O, and Dieterlen, A.
Stockwell transform optimization applied on the detection of split in
heart sounds (2014). Signal Processing Conference (EUSIPCO), 2013
Proceedings of the 22nd European, pages 2015--2019.
.. [3] Wheat, K., Cornelissen, P. L., Frost, S.J, and Peter C. Hansen
(2010). During Visual Word Recognition, Phonology Is Accessed
within 100 ms and May Be Mediated by a Speech Production
Code: Evidence from Magnetoencephalography. The Journal of
Neuroscience, 30 (15), 5229-5233.
.. [4] K. A. Jones and B. Porjesz and D. Chorlian and M. Rangaswamy and C.
Kamarajan and A. Padmanabhapillai and A. Stimus and H. Begleiter
(2006). S-transform time-frequency analysis of P300 reveals deficits in
individuals diagnosed with alcoholism.
Clinical Neurophysiology 117 2128--2143
See Also
--------
mne.time_frequency.tfr_stockwell
mne.time_frequency.tfr_multitaper
mne.time_frequency.tfr_array_multitaper
mne.time_frequency.tfr_morlet
mne.time_frequency.tfr_array_morlet
"""
n_epochs, n_channels = data.shape[:2]
n_out = data.shape[2] // decim + bool(data.shape[2] % decim)
data, n_fft_, zero_pad = _check_input_st(data, n_fft)
freqs = fftpack.fftfreq(n_fft_, 1. / sfreq)
if fmin is None:
fmin = freqs[freqs > 0][0]
if fmax is None:
fmax = freqs.max()
start_f = np.abs(freqs - fmin).argmin()
stop_f = np.abs(freqs - fmax).argmin()
freqs = freqs[start_f:stop_f]
W = _precompute_st_windows(data.shape[-1], start_f, stop_f, sfreq, width)
n_freq = stop_f - start_f
psd = np.empty((n_channels, n_freq, n_out))
itc = np.empty((n_channels, n_freq, n_out)) if return_itc else None
parallel, my_st, _ = parallel_func(_st_power_itc, n_jobs)
tfrs = parallel(my_st(data[:, c, :], start_f, return_itc, zero_pad,
decim, W)
for c in range(n_channels))
for c, (this_psd, this_itc) in enumerate(iter(tfrs)):
psd[c] = this_psd
if this_itc is not None:
itc[c] = this_itc
return psd, itc, freqs
@verbose
def tfr_stockwell(inst, fmin=None, fmax=None, n_fft=None,
width=1.0, decim=1, return_itc=False, n_jobs=1,
verbose=None):
"""Time-Frequency Representation (TFR) using Stockwell Transform.
Parameters
----------
inst : Epochs | Evoked
The epochs or evoked object.
fmin : None, float
The minimum frequency to include. If None defaults to the minimum fft
frequency greater than zero.
fmax : None, float
The maximum frequency to include. If None defaults to the maximum fft.
n_fft : int | None
The length of the windows used for FFT. If None, it defaults to the
next power of 2 larger than the signal length.
width : float
The width of the Gaussian window. If < 1, increased temporal
resolution, if > 1, increased frequency resolution. Defaults to 1.
(classical S-Transform).
decim : int
The decimation factor on the time axis. To reduce memory usage.
return_itc : bool
Return intertrial coherence (ITC) as well as averaged power.
n_jobs : int
The number of jobs to run in parallel (over channels).
%(verbose)s
Returns
-------
power : AverageTFR
The averaged power.
itc : AverageTFR
The intertrial coherence. Only returned if return_itc is True.
See Also
--------
mne.time_frequency.tfr_array_stockwell
mne.time_frequency.tfr_multitaper
mne.time_frequency.tfr_array_multitaper
mne.time_frequency.tfr_morlet
mne.time_frequency.tfr_array_morlet
Notes
-----
.. versionadded:: 0.9.0
"""
# verbose dec is used b/c subfunctions are verbose
data = _get_data(inst, return_itc)
picks = _pick_data_channels(inst.info)
info = pick_info(inst.info, picks)
data = data[:, picks, :]
n_jobs = check_n_jobs(n_jobs)
power, itc, freqs = tfr_array_stockwell(data, sfreq=info['sfreq'],
fmin=fmin, fmax=fmax, n_fft=n_fft,
width=width, decim=decim,
return_itc=return_itc,
n_jobs=n_jobs)
times = inst.times[::decim].copy()
nave = len(data)
out = AverageTFR(info, power, times, freqs, nave, method='stockwell-power')
if return_itc:
out = (out, AverageTFR(deepcopy(info), itc, times.copy(),
freqs.copy(), nave, method='stockwell-itc'))
return out
|
adykstra/mne-python
|
mne/time_frequency/_stockwell.py
|
Python
|
bsd-3-clause
| 10,013
|
[
"Gaussian"
] |
b94fa9ddae26a7dad822985b02a7f499db5abe117de7451f5a17a9ac81bc052f
|
# -*- coding: utf-8 -*-
"""
Created on Tue Jan 30 21:01:59 2018
@author: mhangaard
"""
import numpy as np
from ase.build import bulk
from catkit.surface import SlabGenerator
from ase.build import add_adsorbate
from ase.data import covalent_radii
from atoml.fingerprint.adsorbate_prep import get_radius, autogen_info
from atoml.fingerprint.setup import FeatureGenerator
from atoml.preprocess.clean_data import clean_infinite
# Define an adsorbate.
ads = 'C'
# Create a bulk.
atoms = bulk('Pd', 'fcc', a=4, cubic=True)
atoms[3].symbol = 'Cu'
# Create some surfaces.
Gen = SlabGenerator(
atoms,
miller_index=[1, 1, 1],
layers=3,
fixed=1,
vacuum=8)
terminations = Gen.get_unique_terminations()
# Loop over unique surface terminations.
images = []
for i, t in enumerate(terminations):
# Get the slab.
slab = Gen.get_slab(iterm=i)
slab.center(axis=2, vacuum=5)
# Add an adsorbate to each slab.
radii = np.average([get_radius(a.number) for a in slab])
height = radii + covalent_radii[6]
add_adsorbate(slab, ads, height, position=slab.get_positions()[-1, :2])
# Usually, the chemical formula (Hill) is available for the adsorbate.
# It will be used by AtoML if attached like so:
slab.info['key_value_pairs'] = {}
slab.info['key_value_pairs']['species'] = ads
# If the adsorbate consist of the same elements that the slab does,
# it is preferable to specify the atomic indices belonging to the adsorbate.
slab.info['ads_atoms'] = [12]
# Append them to a list.
images.append(slab)
# If you import from an ase.db, it is recommended to make use of
# atoml.api.ase_atoms_api.database_to_list
# Some information is expected to be attached to atoms objects.
# There are various ways of doing this, but the easiest is to call
AtoML_atoms = autogen_info(images)
# This is where checks should be made
# Instantiate the fingerprint generator for adsorbate structures.
fingerprinter = FeatureGenerator()
# All user methods under the fingerprinter accepts an atoms object and
# returns a vector.
functions = [fingerprinter.mean_chemisorbed_atoms,
fingerprinter.count_chemisorbed_fragment,
fingerprinter.count_ads_atoms,
fingerprinter.count_ads_bonds,
fingerprinter.ads_av,
fingerprinter.ads_sum,
fingerprinter.bulk,
fingerprinter.term,
fingerprinter.strain,
fingerprinter.mean_surf_ligands,
fingerprinter.mean_site,
fingerprinter.sum_site,
fingerprinter.dbid,
fingerprinter.delta_energy]
# This list is passed on to the following setup functions,
# along with a list of atoms.
# Get and print the names of features.
features_labels = fingerprinter.return_names(functions)
for l in range(len(features_labels)):
print(l, features_labels[l])
# Get a matrix containing the fingerprints.
unlabeled_data_matrix = fingerprinter.return_vec(AtoML_atoms, functions)
print(np.shape(unlabeled_data_matrix), 'data matrix created.')
# Cleanup in case some of the functions are returning NaNs or Infs
print("Cleaning data.")
clean_data_matrix = clean_infinite(unlabeled_data_matrix)['train']
# Ready for Machine learning.
print(np.shape(clean_data_matrix), 'data matrix returned.')
|
jboes/CatKit
|
docs/tutorials/9_adsorbate_fingerprints.py
|
Python
|
gpl-3.0
| 3,329
|
[
"ASE"
] |
61d8e50986964575c5c6c930acb244c190baa4df9f1358077d56fd9d52cef1ef
|
import os
import shutil
import sys
import glob
from setuptools import setup, find_packages
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
version = 'x.y.z'
if os.path.exists('VERSION'):
version = open('VERSION').read().strip()
setup(
name='saffrontree',
version=version,
description='SaffronTree: Reference free rapid phylogenetic tree construction from raw read data',
long_description=read('README.md'),
packages = find_packages(),
package_data={'saffrontree': ['example_data/fastas/*', 'example_data/fastqs/*']},
author='Andrew J. Page',
author_email='path-help@sanger.ac.uk',
url='https://github.com/sanger-pathogens/saffrontree',
scripts=glob.glob('scripts/*'),
test_suite='nose.collector',
tests_require=['nose >= 1.3'],
install_requires=[
'dendropy >= 4.1.0',
'biopython >= 1.68',
'pyfastaq >= 3.12.0',
],
license='GPLv3',
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Science/Research',
'Topic :: Scientific/Engineering :: Bio-Informatics',
'Programming Language :: Python :: 3 :: Only',
'License :: OSI Approved :: GNU General Public License v3 (GPLv3)'
],
)
|
andrewjpage/saffrontree
|
setup.py
|
Python
|
gpl-3.0
| 1,270
|
[
"Biopython"
] |
e72c9a8bf04646f48d76af4e57de803b0cd8b6488a3bec223dd523494689bd59
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
import unittest
import warnings
import numpy as np
from pymatgen.core.composition import Composition
from pymatgen.core.periodic_table import DummySpecies, Element, Species
from pymatgen.core.lattice import Lattice
from pymatgen.core.structure import Structure
from pymatgen.analysis.structure_matcher import StructureMatcher
from pymatgen.electronic_structure.core import Magmom
from pymatgen.symmetry.structure import SymmetrizedStructure
from pymatgen.io.cif import CifBlock, CifParser, CifWriter
from pymatgen.io.vasp.inputs import Poscar
from pymatgen.util.testing import PymatgenTest
try:
import pybtex
except ImportError:
pybtex = None
class CifBlockTest(PymatgenTest):
def test_to_string(self):
with open(self.TEST_FILES_DIR / "Graphite.cif") as f:
s = f.read()
c = CifBlock.from_string(s)
cif_str_2 = str(CifBlock.from_string(str(c)))
cif_str = """data_53781-ICSD
_database_code_ICSD 53781
_audit_creation_date 2003-04-01
_audit_update_record 2013-02-01
_chemical_name_systematic Carbon
_chemical_formula_structural C
_chemical_formula_sum C1
_chemical_name_structure_type Graphite(2H)
_chemical_name_mineral 'Graphite 2H'
_exptl_crystal_density_diffrn 2.22
_publ_section_title 'Structure of graphite'
loop_
_citation_id
_citation_journal_full
_citation_year
_citation_journal_volume
_citation_page_first
_citation_page_last
_citation_journal_id_ASTM
primary 'Physical Review (1,1893-132,1963/141,1966-188,1969)'
1917 10 661 696 PHRVAO
loop_
_publ_author_name
'Hull, A.W.'
_cell_length_a 2.47
_cell_length_b 2.47
_cell_length_c 6.8
_cell_angle_alpha 90.
_cell_angle_beta 90.
_cell_angle_gamma 120.
_cell_volume 35.93
_cell_formula_units_Z 4
_symmetry_space_group_name_H-M 'P 63/m m c'
_symmetry_Int_Tables_number 194
loop_
_symmetry_equiv_pos_site_id
_symmetry_equiv_pos_as_xyz
1 'x, x-y, -z+1/2'
2 '-x+y, y, -z+1/2'
3 '-y, -x, -z+1/2'
4 '-x+y, -x, -z+1/2'
5 '-y, x-y, -z+1/2'
6 'x, y, -z+1/2'
7 '-x, -x+y, z+1/2'
8 'x-y, -y, z+1/2'
9 'y, x, z+1/2'
10 'x-y, x, z+1/2'
11 'y, -x+y, z+1/2'
12 '-x, -y, z+1/2'
13 '-x, -x+y, -z'
14 'x-y, -y, -z'
15 'y, x, -z'
16 'x-y, x, -z'
17 'y, -x+y, -z'
18 '-x, -y, -z'
19 'x, x-y, z'
20 '-x+y, y, z'
21 '-y, -x, z'
22 '-x+y, -x, z'
23 '-y, x-y, z'
24 'x, y, z'
loop_
_atom_type_symbol
_atom_type_oxidation_number
C0+ 0
loop_
_atom_site_label
_atom_site_type_symbol
_atom_site_symmetry_multiplicity
_atom_site_Wyckoff_symbol
_atom_site_fract_x
_atom_site_fract_y
_atom_site_fract_z
_atom_site_B_iso_or_equiv
_atom_site_occupancy
_atom_site_attached_hydrogens
C1 C0+ 2 b 0 0 0.25 . 1. 0
C2 C0+ 2 c 0.3333 0.6667 0.25 . 1. 0"""
for l1, l2, l3 in zip(str(c).split("\n"), cif_str.split("\n"), cif_str_2.split("\n")):
self.assertEqual(l1.strip(), l2.strip())
self.assertEqual(l2.strip(), l3.strip())
def test_double_quotes_and_underscore_data(self):
cif_str = """data_test
_symmetry_space_group_name_H-M "P -3 m 1"
_thing '_annoying_data'"""
cb = CifBlock.from_string(cif_str)
self.assertEqual(cb["_symmetry_space_group_name_H-M"], "P -3 m 1")
self.assertEqual(cb["_thing"], "_annoying_data")
self.assertEqual(str(cb), cif_str.replace('"', "'"))
def test_double_quoted_data(self):
cif_str = """data_test
_thing ' '_annoying_data''
_other " "_more_annoying_data""
_more ' "even more" ' """
cb = CifBlock.from_string(cif_str)
self.assertEqual(cb["_thing"], " '_annoying_data'")
self.assertEqual(cb["_other"], ' "_more_annoying_data"')
self.assertEqual(cb["_more"], ' "even more" ')
def test_nested_fake_multiline_quotes(self):
cif_str = """data_test
_thing
;
long quotes
;
still in the quote
;
actually going to end now
;"""
cb = CifBlock.from_string(cif_str)
self.assertEqual(
cb["_thing"],
" long quotes ; still in the quote" " ; actually going to end now",
)
def test_long_loop(self):
data = {
"_stuff1": ["A" * 30] * 2,
"_stuff2": ["B" * 30] * 2,
"_stuff3": ["C" * 30] * 2,
}
loops = [["_stuff1", "_stuff2", "_stuff3"]]
cif_str = """data_test
loop_
_stuff1
_stuff2
_stuff3
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA BBBBBBBBBBBBBBBBBBBBBBBBBBBBBB
CCCCCCCCCCCCCCCCCCCCCCCCCCCCCC
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA BBBBBBBBBBBBBBBBBBBBBBBBBBBBBB
CCCCCCCCCCCCCCCCCCCCCCCCCCCCCC"""
self.assertEqual(str(CifBlock(data, loops, "test")), cif_str)
class CifIOTest(PymatgenTest):
def test_CifParser(self):
parser = CifParser(self.TEST_FILES_DIR / "LiFePO4.cif")
for s in parser.get_structures(True):
self.assertEqual(s.formula, "Li4 Fe4 P4 O16", "Incorrectly parsed cif.")
parser = CifParser(self.TEST_FILES_DIR / "V2O3.cif")
for s in parser.get_structures(True):
self.assertEqual(s.formula, "V4 O6")
bibtex_str = """
@article{cifref0,
author = "Andersson, G.",
title = "Studies on vanadium oxides. I. Phase analysis",
journal = "Acta Chemica Scandinavica (1-27,1973-42,1988)",
volume = "8",
year = "1954",
pages = "1599--1606"
}
"""
self.assertEqual(parser.get_bibtex_string().strip(), bibtex_str.strip())
parser = CifParser(self.TEST_FILES_DIR / "Li2O.cif")
prim = parser.get_structures(True)[0]
self.assertEqual(prim.formula, "Li2 O1")
conv = parser.get_structures(False)[0]
self.assertEqual(conv.formula, "Li8 O4")
# test for disordered structures
parser = CifParser(self.TEST_FILES_DIR / "Li10GeP2S12.cif")
for s in parser.get_structures(True):
self.assertEqual(s.formula, "Li20.2 Ge2.06 P3.94 S24", "Incorrectly parsed cif.")
cif_str = r"""#\#CIF1.1
##########################################################################
# Crystallographic Information Format file
# Produced by PyCifRW module
#
# This is a CIF file. CIF has been adopted by the International
# Union of Crystallography as the standard for data archiving and
# transmission.
#
# For information on this file format, follow the CIF links at
# http://www.iucr.org
##########################################################################
data_FePO4
_symmetry_space_group_name_H-M 'P 1'
_cell_length_a 10.4117668699
_cell_length_b 6.06717187997
_cell_length_c 4.75948953998
loop_ # sometimes this is in a loop (incorrectly)
_cell_angle_alpha
91.0
_cell_angle_beta 92.0
_cell_angle_gamma 93.0
_chemical_name_systematic 'Generated by pymatgen'
_symmetry_Int_Tables_number 1
_chemical_formula_structural FePO4
_chemical_formula_sum 'Fe4 P4 O16'
_cell_volume 300.65685512
_cell_formula_units_Z 4
loop_
_symmetry_equiv_pos_site_id
_symmetry_equiv_pos_as_xyz
1 'x, y, z'
loop_
_atom_site_type_symbol
_atom_site_label
_atom_site_symmetry_multiplicity
_atom_site_fract_x
_atom_site_fract_y
_atom_site_fract_z
_atom_site_attached_hydrogens
_atom_site_B_iso_or_equiv
_atom_site_occupancy
Fe Fe1 1 0.218728 0.750000 0.474867 0 . 1
Fe JJ2 1 0.281272 0.250000 0.974867 0 . 1
# there's a typo here, parser should read the symbol from the
# _atom_site_type_symbol
Fe Fe3 1 0.718728 0.750000 0.025133 0 . 1
Fe Fe4 1 0.781272 0.250000 0.525133 0 . 1
P P5 1 0.094613 0.250000 0.418243 0 . 1
P P6 1 0.405387 0.750000 0.918243 0 . 1
P P7 1 0.594613 0.250000 0.081757 0 . 1
P P8 1 0.905387 0.750000 0.581757 0 . 1
O O9 1 0.043372 0.750000 0.707138 0 . 1
O O10 1 0.096642 0.250000 0.741320 0 . 1
O O11 1 0.165710 0.046072 0.285384 0 . 1
O O12 1 0.165710 0.453928 0.285384 0 . 1
O O13 1 0.334290 0.546072 0.785384 0 . 1
O O14 1 0.334290 0.953928 0.785384 0 . 1
O O15 1 0.403358 0.750000 0.241320 0 . 1
O O16 1 0.456628 0.250000 0.207138 0 . 1
O O17 1 0.543372 0.750000 0.792862 0 . 1
O O18 1 0.596642 0.250000 0.758680 0 . 1
O O19 1 0.665710 0.046072 0.214616 0 . 1
O O20 1 0.665710 0.453928 0.214616 0 . 1
O O21 1 0.834290 0.546072 0.714616 0 . 1
O O22 1 0.834290 0.953928 0.714616 0 . 1
O O23 1 0.903358 0.750000 0.258680 0 . 1
O O24 1 0.956628 0.250000 0.292862 0 . 1
"""
parser = CifParser.from_string(cif_str)
struct = parser.get_structures(primitive=False)[0]
self.assertEqual(struct.formula, "Fe4 P4 O16")
self.assertAlmostEqual(struct.lattice.a, 10.4117668699)
self.assertAlmostEqual(struct.lattice.b, 6.06717187997)
self.assertAlmostEqual(struct.lattice.c, 4.75948953998)
self.assertAlmostEqual(struct.lattice.alpha, 91)
self.assertAlmostEqual(struct.lattice.beta, 92)
self.assertAlmostEqual(struct.lattice.gamma, 93)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
parser = CifParser(self.TEST_FILES_DIR / "srycoo.cif")
self.assertEqual(parser.get_structures()[0].formula, "Sr5.6 Y2.4 Co8 O21")
# Test with a decimal Xyz. This should parse as two atoms in
# conventional cell if it is correct, one if not.
parser = CifParser(self.TEST_FILES_DIR / "Fe.cif")
self.assertEqual(len(parser.get_structures(primitive=False)[0]), 2)
self.assertFalse(parser.has_errors)
def test_get_symmetrized_structure(self):
parser = CifParser(self.TEST_FILES_DIR / "Li2O.cif")
sym_structure = parser.get_structures(primitive=False, symmetrized=True)[0]
structure = parser.get_structures(primitive=False, symmetrized=False)[0]
self.assertIsInstance(sym_structure, SymmetrizedStructure)
self.assertEqual(structure, sym_structure)
self.assertEqual(sym_structure.equivalent_indices, [[0, 1, 2, 3], [4, 5, 6, 7, 8, 9, 10, 11]])
def test_site_symbol_preference(self):
parser = CifParser(self.TEST_FILES_DIR / "site_type_symbol_test.cif")
self.assertEqual(parser.get_structures()[0].formula, "Ge0.4 Sb0.4 Te1")
def test_implicit_hydrogen(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
parser = CifParser(self.TEST_FILES_DIR / "Senegalite_implicit_hydrogen.cif")
for s in parser.get_structures():
self.assertEqual(s.formula, "Al8 P4 O32")
self.assertEqual(sum(s.site_properties["implicit_hydrogens"]), 20)
self.assertIn(
"Structure has implicit hydrogens defined, "
"parsed structure unlikely to be suitable for use "
"in calculations unless hydrogens added.",
parser.warnings,
)
parser = CifParser(self.TEST_FILES_DIR / "cif_implicit_hydrogens_cod_1011130.cif")
s = parser.get_structures()[0]
self.assertIn(
"Structure has implicit hydrogens defined, "
"parsed structure unlikely to be suitable for use "
"in calculations unless hydrogens added.",
parser.warnings,
)
def test_CifParserSpringerPauling(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
# Below are 10 tests for CIFs from the Springer Materials/Pauling file DBs.
# Partial occupancy on sites, incorrect label, previously unparsable
parser = CifParser(self.TEST_FILES_DIR / "PF_sd_1928405.cif")
for s in parser.get_structures(True):
self.assertEqual(s.formula, "Er1 Mn3.888 Fe2.112 Sn6")
self.assertTrue(parser.has_errors)
# Partial occupancy on sites, previously parsed as an ordered structure
parser = CifParser(self.TEST_FILES_DIR / "PF_sd_1011081.cif")
for s in parser.get_structures(True):
self.assertEqual(s.formula, "Zr0.2 Nb0.8")
self.assertTrue(parser.has_errors)
# Partial occupancy on sites, incorrect label, previously unparsable
parser = CifParser(self.TEST_FILES_DIR / "PF_sd_1615854.cif")
for s in parser.get_structures(True):
self.assertEqual(s.formula, "Na2 Al2 Si6 O16")
self.assertTrue(parser.has_errors)
# Partial occupancy on sites, incorrect label, previously unparsable
parser = CifParser(self.TEST_FILES_DIR / "PF_sd_1622133.cif")
for s in parser.get_structures(True):
self.assertEqual(s.formula, "Ca0.184 Mg13.016 Fe2.8 Si16 O48")
self.assertTrue(parser.has_errors)
# Partial occupancy on sites, previously parsed as an ordered structure
parser = CifParser(self.TEST_FILES_DIR / "PF_sd_1908491.cif")
for s in parser.get_structures(True):
self.assertEqual(s.formula, "Mn0.48 Zn0.52 Ga2 Se4")
self.assertTrue(parser.has_errors)
# Partial occupancy on sites, incorrect label, previously unparsable
parser = CifParser(self.TEST_FILES_DIR / "PF_sd_1811457.cif")
for s in parser.get_structures(True):
self.assertEqual(s.formula, "Ba2 Mg0.6 Zr0.2 Ta1.2 O6")
self.assertTrue(parser.has_errors)
# Incomplete powder diffraction data, previously unparsable
# This CIF file contains the molecular species "NH3" which is
# parsed as "N" because the label is "N{x}" (x = 1,2,..) and the
# corresponding symbol is "NH3". Since, the label and symbol are switched
# in CIFs from Springer Materials/Pauling file DBs, CifParser parses the
# element as "Nh" (Nihonium).
parser = CifParser(self.TEST_FILES_DIR / "PF_sd_1002871.cif")
self.assertEqual(parser.get_structures(True)[0].formula, "Cu1 Br2 Nh6")
self.assertEqual(parser.get_structures(True)[1].formula, "Cu1 Br4 Nh6")
self.assertTrue(parser.has_errors)
# Incomplete powder diffraction data, previously unparsable
parser = CifParser(self.TEST_FILES_DIR / "PF_sd_1704003.cif")
for s in parser.get_structures():
self.assertEqual(s.formula, "Rb4 Mn2 F12")
self.assertTrue(parser.has_errors)
# Unparsable species 'OH/OH2', previously parsed as "O"
parser = CifParser(self.TEST_FILES_DIR / "PF_sd_1500382.cif")
for s in parser.get_structures():
self.assertEqual(s.formula, "Mg6 B2 O6 F1.764")
self.assertTrue(parser.has_errors)
# Unparsable species 'OH/OH2', previously parsed as "O"
parser = CifParser(self.TEST_FILES_DIR / "PF_sd_1601634.cif")
for s in parser.get_structures():
self.assertEqual(s.formula, "Zn1.29 Fe0.69 As2 Pb1.02 O8")
def test_CifParserCod(self):
"""
Parsing problematic cif files from the COD database
"""
with warnings.catch_warnings():
warnings.simplefilter("ignore")
# Symbol in capital letters
parser = CifParser(self.TEST_FILES_DIR / "Cod_2100513.cif")
for s in parser.get_structures(True):
self.assertEqual(s.formula, "Ca4 Nb2.0 Al2 O12")
# Label in capital letters
parser = CifParser(self.TEST_FILES_DIR / "Cod_4115344.cif")
for s in parser.get_structures(True):
self.assertEqual(s.formula, "Mo4 P2 H60 C60 I4 O4")
def test_parse_symbol(self):
"""
Test the _parse_symbol function with several potentially
problematic examples of symbols and labels.
"""
test_cases = {
"MgT": "Mg",
"MgT1": "Mg",
"H(46A)": "H",
"O(M)": "O",
"N(Am)": "N",
"H1N2a": "H",
"CO(1)": "Co",
"Wat1": "O",
"MgM2A": "Mg",
"CaX": "Ca",
"X1": "X",
"X": "X",
"OA1": "O",
"NaA2": "Na",
"O-H2": "O",
"OD2": "O",
"OW": "O",
"SiT": "Si",
"SiTet": "Si",
"Na-Int": "Na",
"CaD1": "Ca",
"KAm": "K",
"D+1": "D",
"D": "D",
"D1-": "D",
"D4": "D",
"D0": "D",
"NH": "Nh",
"NH2": "Nh",
"NH3": "Nh",
"SH": "S",
}
for e in Element:
name = e.name
test_cases[name] = name
if len(name) == 2:
test_cases[name.upper()] = name
test_cases[name.upper() + str(1)] = name
test_cases[name.upper() + "A"] = name
test_cases[name + str(1)] = name
test_cases[name + str(2)] = name
test_cases[name + str(3)] = name
test_cases[name + str(1) + "A"] = name
special = {"Hw": "H", "Ow": "O", "Wat": "O", "wat": "O", "OH": "", "OH2": ""}
test_cases.update(special)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
parser = CifParser(self.TEST_FILES_DIR / "LiFePO4.cif")
for sym, expected_symbol in test_cases.items():
self.assertEqual(parser._parse_symbol(sym), expected_symbol)
def test_CifWriter(self):
filepath = self.TEST_FILES_DIR / "POSCAR"
poscar = Poscar.from_file(filepath)
writer = CifWriter(poscar.structure, symprec=0.01)
ans = """# generated using pymatgen
data_FePO4
_symmetry_space_group_name_H-M Pnma
_cell_length_a 10.41176687
_cell_length_b 6.06717188
_cell_length_c 4.75948954
_cell_angle_alpha 90.00000000
_cell_angle_beta 90.00000000
_cell_angle_gamma 90.00000000
_symmetry_Int_Tables_number 62
_chemical_formula_structural FePO4
_chemical_formula_sum 'Fe4 P4 O16'
_cell_volume 300.65685512
_cell_formula_units_Z 4
loop_
_symmetry_equiv_pos_site_id
_symmetry_equiv_pos_as_xyz
1 'x, y, z'
2 '-x, -y, -z'
3 '-x+1/2, -y, z+1/2'
4 'x+1/2, y, -z+1/2'
5 'x+1/2, -y+1/2, -z+1/2'
6 '-x+1/2, y+1/2, z+1/2'
7 '-x, y+1/2, -z'
8 'x, -y+1/2, z'
loop_
_atom_site_type_symbol
_atom_site_label
_atom_site_symmetry_multiplicity
_atom_site_fract_x
_atom_site_fract_y
_atom_site_fract_z
_atom_site_occupancy
Fe Fe0 4 0.21872822 0.75000000 0.47486711 1
P P1 4 0.09461309 0.25000000 0.41824327 1
O O2 8 0.16570974 0.04607233 0.28538394 1
O O3 4 0.04337231 0.75000000 0.70713767 1
O O4 4 0.09664244 0.25000000 0.74132035 1"""
for l1, l2 in zip(str(writer).split("\n"), ans.split("\n")):
self.assertEqual(l1.strip(), l2.strip())
def test_symmetrized(self):
filepath = self.TEST_FILES_DIR / "POSCAR"
poscar = Poscar.from_file(filepath, check_for_POTCAR=False)
writer = CifWriter(poscar.structure, symprec=0.1)
ans = """# generated using pymatgen
data_FePO4
_symmetry_space_group_name_H-M Pnma
_cell_length_a 10.41176687
_cell_length_b 6.06717188
_cell_length_c 4.75948954
_cell_angle_alpha 90.00000000
_cell_angle_beta 90.00000000
_cell_angle_gamma 90.00000000
_symmetry_Int_Tables_number 62
_chemical_formula_structural FePO4
_chemical_formula_sum 'Fe4 P4 O16'
_cell_volume 300.65685512
_cell_formula_units_Z 4
loop_
_symmetry_equiv_pos_site_id
_symmetry_equiv_pos_as_xyz
1 'x, y, z'
2 '-x, -y, -z'
3 '-x+1/2, -y, z+1/2'
4 'x+1/2, y, -z+1/2'
5 'x+1/2, -y+1/2, -z+1/2'
6 '-x+1/2, y+1/2, z+1/2'
7 '-x, y+1/2, -z'
8 'x, -y+1/2, z'
loop_
_atom_site_type_symbol
_atom_site_label
_atom_site_symmetry_multiplicity
_atom_site_fract_x
_atom_site_fract_y
_atom_site_fract_z
_atom_site_occupancy
Fe Fe1 4 0.218728 0.250000 0.525133 1
P P2 4 0.094613 0.750000 0.581757 1
O O3 8 0.165710 0.546072 0.714616 1
O O4 4 0.043372 0.250000 0.292862 1
O O5 4 0.096642 0.750000 0.258680 1"""
cif = CifParser.from_string(str(writer))
m = StructureMatcher()
self.assertTrue(m.fit(cif.get_structures()[0], poscar.structure))
# for l1, l2 in zip(str(writer).split("\n"), ans.split("\n")):
# self.assertEqual(l1.strip(), l2.strip())
ans = """# generated using pymatgen
data_LiFePO4
_symmetry_space_group_name_H-M Pnma
_cell_length_a 10.41037000
_cell_length_b 6.06577000
_cell_length_c 4.74480000
_cell_angle_alpha 90.00000000
_cell_angle_beta 90.00000000
_cell_angle_gamma 90.00000000
_symmetry_Int_Tables_number 62
_chemical_formula_structural LiFePO4
_chemical_formula_sum 'Li4 Fe4 P4 O16'
_cell_volume 299.619458734
_cell_formula_units_Z 4
loop_
_symmetry_equiv_pos_site_id
_symmetry_equiv_pos_as_xyz
1 'x, y, z'
2 '-x, -y, -z'
3 '-x+1/2, -y, z+1/2'
4 'x+1/2, y, -z+1/2'
5 'x+1/2, -y+1/2, -z+1/2'
6 '-x+1/2, y+1/2, z+1/2'
7 '-x, y+1/2, -z'
8 'x, -y+1/2, z'
loop_
_atom_site_type_symbol
_atom_site_label
_atom_site_symmetry_multiplicity
_atom_site_fract_x
_atom_site_fract_y
_atom_site_fract_z
_atom_site_occupancy
Li Li1 4 0.000000 0.000000 0.000000 1.0
Fe Fe2 4 0.218845 0.750000 0.474910 1.0
P P3 4 0.094445 0.250000 0.417920 1.0
O O4 8 0.165815 0.044060 0.286540 1.0
O O5 4 0.043155 0.750000 0.708460 1.0
O O6 4 0.096215 0.250000 0.741480 1.0
"""
s = Structure.from_file(self.TEST_FILES_DIR / "LiFePO4.cif")
writer = CifWriter(s, symprec=0.1)
s2 = CifParser.from_string(str(writer)).get_structures()[0]
self.assertTrue(m.fit(s, s2))
s = self.get_structure("Li2O")
writer = CifWriter(s, symprec=0.1)
s2 = CifParser.from_string(str(writer)).get_structures()[0]
self.assertTrue(m.fit(s, s2))
# test angle tolerance.
s = Structure.from_file(self.TEST_FILES_DIR / "LiFePO4.cif")
writer = CifWriter(s, symprec=0.1, angle_tolerance=0)
d = list(writer.ciffile.data.values())[0]
self.assertEqual(d["_symmetry_Int_Tables_number"], 14)
s = Structure.from_file(self.TEST_FILES_DIR / "LiFePO4.cif")
writer = CifWriter(s, symprec=0.1, angle_tolerance=2)
d = list(writer.ciffile.data.values())[0]
self.assertEqual(d["_symmetry_Int_Tables_number"], 62)
def test_disordered(self):
si = Element("Si")
n = Element("N")
coords = list()
coords.append(np.array([0, 0, 0]))
coords.append(np.array([0.75, 0.5, 0.75]))
lattice = Lattice(
np.array(
[
[3.8401979337, 0.00, 0.00],
[1.9200989668, 3.3257101909, 0.00],
[0.00, -2.2171384943, 3.1355090603],
]
)
)
struct = Structure(lattice, [si, {si: 0.5, n: 0.5}], coords)
writer = CifWriter(struct)
ans = """# generated using pymatgen
data_Si1.5N0.5
_symmetry_space_group_name_H-M 'P 1'
_cell_length_a 3.84019793
_cell_length_b 3.84019899
_cell_length_c 3.84019793
_cell_angle_alpha 119.99999086
_cell_angle_beta 90.00000000
_cell_angle_gamma 60.00000914
_symmetry_Int_Tables_number 1
_chemical_formula_structural Si1.5N0.5
_chemical_formula_sum 'Si1.5 N0.5'
_cell_volume 40.04479464
_cell_formula_units_Z 1
loop_
_symmetry_equiv_pos_site_id
_symmetry_equiv_pos_as_xyz
1 'x, y, z'
loop_
_atom_site_type_symbol
_atom_site_label
_atom_site_symmetry_multiplicity
_atom_site_fract_x
_atom_site_fract_y
_atom_site_fract_z
_atom_site_occupancy
Si Si0 1 0.00000000 0.00000000 0.00000000 1
Si Si1 1 0.75000000 0.50000000 0.75000000 0.5
N N2 1 0.75000000 0.50000000 0.75000000 0.5"""
for l1, l2 in zip(str(writer).split("\n"), ans.split("\n")):
self.assertEqual(l1.strip(), l2.strip())
def test_cifwrite_without_refinement(self):
si2 = Structure.from_file(self.TEST_FILES_DIR / "abinit" / "si.cif")
writer = CifWriter(si2, symprec=1e-3, significant_figures=10, refine_struct=False)
s = str(writer)
assert "Fd-3m" in s
same_si2 = CifParser.from_string(s).get_structures()[0]
assert len(si2) == len(same_si2)
def test_specie_cifwriter(self):
si4 = Species("Si", 4)
si3 = Species("Si", 3)
n = DummySpecies("X", -3)
coords = list()
coords.append(np.array([0.5, 0.5, 0.5]))
coords.append(np.array([0.75, 0.5, 0.75]))
coords.append(np.array([0, 0, 0]))
lattice = Lattice(
np.array(
[
[3.8401979337, 0.00, 0.00],
[1.9200989668, 3.3257101909, 0.00],
[0.00, -2.2171384943, 3.1355090603],
]
)
)
struct = Structure(lattice, [n, {si3: 0.5, n: 0.5}, si4], coords)
writer = CifWriter(struct)
ans = """# generated using pymatgen
data_X1.5Si1.5
_symmetry_space_group_name_H-M 'P 1'
_cell_length_a 3.84019793
_cell_length_b 3.84019899
_cell_length_c 3.84019793
_cell_angle_alpha 119.99999086
_cell_angle_beta 90.00000000
_cell_angle_gamma 60.00000914
_symmetry_Int_Tables_number 1
_chemical_formula_structural X1.5Si1.5
_chemical_formula_sum 'X1.5 Si1.5'
_cell_volume 40.04479464
_cell_formula_units_Z 1
loop_
_symmetry_equiv_pos_site_id
_symmetry_equiv_pos_as_xyz
1 'x, y, z'
loop_
_atom_type_symbol
_atom_type_oxidation_number
X3- -3.0
Si3+ 3.0
Si4+ 4.0
loop_
_atom_site_type_symbol
_atom_site_label
_atom_site_symmetry_multiplicity
_atom_site_fract_x
_atom_site_fract_y
_atom_site_fract_z
_atom_site_occupancy
X3- X0 1 0.50000000 0.50000000 0.50000000 1
X3- X1 1 0.75000000 0.50000000 0.75000000 0.5
Si3+ Si2 1 0.75000000 0.50000000 0.75000000 0.5
Si4+ Si3 1 0.00000000 0.00000000 0.00000000 1
"""
for l1, l2 in zip(str(writer).split("\n"), ans.split("\n")):
self.assertEqual(l1.strip(), l2.strip())
# test that mixed valence works properly
s2 = Structure.from_str(ans, "cif")
self.assertEqual(struct.composition, s2.composition)
def test_primes(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
parser = CifParser(self.TEST_FILES_DIR / "C26H16BeN2O2S2.cif")
for s in parser.get_structures(False):
self.assertEqual(s.composition, 8 * Composition("C26H16BeN2O2S2"))
def test_missing_atom_site_type_with_oxistates(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
parser = CifParser(self.TEST_FILES_DIR / "P24Ru4H252C296S24N16.cif")
c = Composition({"S0+": 24, "Ru0+": 4, "H0+": 252, "C0+": 296, "N0+": 16, "P0+": 24})
for s in parser.get_structures(False):
self.assertEqual(s.composition, c)
def test_no_coords_or_species(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
string = """#generated using pymatgen
data_Si1.5N1.5
_symmetry_space_group_name_H-M 'P 1'
_cell_length_a 3.84019793
_cell_length_b 3.84019899
_cell_length_c 3.84019793
_cell_angle_alpha 119.99999086
_cell_angle_beta 90.00000000
_cell_angle_gamma 60.00000914
_symmetry_Int_Tables_number 1
_chemical_formula_structural Si1.5N1.5
_chemical_formula_sum 'Si1.5 N1.5'
_cell_volume 40.0447946443
_cell_formula_units_Z 0
loop_
_symmetry_equiv_pos_site_id
_symmetry_equiv_pos_as_xyz
1 'x, y, z'
loop_
_atom_type_symbol
_atom_type_oxidation_number
Si3+ 3.0
Si4+ 4.0
N3- -3.0
loop_
_atom_site_type_symbol
_atom_site_label
_atom_site_symmetry_multiplicity
_atom_site_fract_x
_atom_site_fract_y
_atom_site_fract_z
_atom_site_occupancy
? ? ? ? ? ? ?
"""
parser = CifParser.from_string(string)
self.assertRaises(ValueError, parser.get_structures)
def test_get_lattice_from_lattice_type(self):
cif_structure = """#generated using pymatgen
data_FePO4
_symmetry_space_group_name_H-M Pnma
_cell_length_a 10.41176687
_cell_length_b 6.06717188
_cell_length_c 4.75948954
_chemical_formula_structural FePO4
_chemical_formula_sum 'Fe4 P4 O16'
_cell_volume 300.65685512
_cell_formula_units_Z 4
_symmetry_cell_setting Orthorhombic
loop_
_symmetry_equiv_pos_site_id
_symmetry_equiv_pos_as_xyz
1 'x, y, z'
loop_
_atom_site_type_symbol
_atom_site_label
_atom_site_symmetry_multiplicity
_atom_site_fract_x
_atom_site_fract_y
_atom_site_fract_z
_atom_site_occupancy
Fe Fe1 1 0.218728 0.750000 0.474867 1
Fe Fe2 1 0.281272 0.250000 0.974867 1
Fe Fe3 1 0.718728 0.750000 0.025133 1
Fe Fe4 1 0.781272 0.250000 0.525133 1
P P5 1 0.094613 0.250000 0.418243 1
P P6 1 0.405387 0.750000 0.918243 1
P P7 1 0.594613 0.250000 0.081757 1
P P8 1 0.905387 0.750000 0.581757 1
O O9 1 0.043372 0.750000 0.707138 1
O O10 1 0.096642 0.250000 0.741320 1
O O11 1 0.165710 0.046072 0.285384 1
O O12 1 0.165710 0.453928 0.285384 1
O O13 1 0.334290 0.546072 0.785384 1
O O14 1 0.334290 0.953928 0.785384 1
O O15 1 0.403358 0.750000 0.241320 1
O O16 1 0.456628 0.250000 0.207138 1
O O17 1 0.543372 0.750000 0.792862 1
O O18 1 0.596642 0.250000 0.758680 1
O O19 1 0.665710 0.046072 0.214616 1
O O20 1 0.665710 0.453928 0.214616 1
O O21 1 0.834290 0.546072 0.714616 1
O O22 1 0.834290 0.953928 0.714616 1
O O23 1 0.903358 0.750000 0.258680 1
O O24 1 0.956628 0.250000 0.292862 1
"""
cp = CifParser.from_string(cif_structure)
s_test = cp.get_structures(False)[0]
filepath = self.TEST_FILES_DIR / "POSCAR"
poscar = Poscar.from_file(filepath)
s_ref = poscar.structure
sm = StructureMatcher(stol=0.05, ltol=0.01, angle_tol=0.1)
self.assertTrue(sm.fit(s_ref, s_test))
def test_empty(self):
# single line
cb = CifBlock.from_string("data_mwe\nloop_\n_tag\n ''")
self.assertEqual(cb.data["_tag"][0], "")
# multi line
cb = CifBlock.from_string("data_mwe\nloop_\n_tag\n;\n;")
self.assertEqual(cb.data["_tag"][0], "")
cb2 = CifBlock.from_string(str(cb))
self.assertEqual(cb, cb2)
def test_bad_cif(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
f = self.TEST_FILES_DIR / "bad_occu.cif"
p = CifParser(f)
self.assertRaises(ValueError, p.get_structures)
p = CifParser(f, occupancy_tolerance=2)
s = p.get_structures()[0]
self.assertAlmostEqual(s[0].species["Al3+"], 0.5)
def test_one_line_symm(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
f = self.TEST_FILES_DIR / "OneLineSymmP1.cif"
p = CifParser(f)
s = p.get_structures()[0]
self.assertEqual(s.formula, "Ga4 Pb2 O8")
def test_no_symmops(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
f = self.TEST_FILES_DIR / "nosymm.cif"
p = CifParser(f)
s = p.get_structures()[0]
self.assertEqual(s.formula, "H96 C60 O8")
def test_dot_positions(self):
f = self.TEST_FILES_DIR / "ICSD59959.cif"
p = CifParser(f)
s = p.get_structures()[0]
self.assertEqual(s.formula, "K1 Mn1 F3")
def test_replacing_finite_precision_frac_coords(self):
f = self.TEST_FILES_DIR / "cif_finite_precision_frac_coord_error.cif"
with warnings.catch_warnings():
p = CifParser(f)
s = p.get_structures()[0]
self.assertEqual(str(s.composition), "N5+24")
self.assertIn(
"Some fractional co-ordinates rounded to ideal " "values to avoid issues with finite precision.",
p.warnings,
)
def test_empty_deque(self):
s = """data_1526655
_journal_name_full
_space_group_IT_number 227
_symmetry_space_group_name_Hall 'F 4d 2 3 -1d'
_symmetry_space_group_name_H-M 'F d -3 m :1'
_cell_angle_alpha 90
_cell_angle_beta 90
_cell_angle_gamma 90
_cell_formula_units_Z 8
_cell_length_a 5.381
_cell_length_b 5.381
_cell_length_c 5.381
_cell_volume 155.808
loop_
_atom_site_label
_atom_site_type_symbol
_atom_site_fract_x
_atom_site_fract_y
_atom_site_fract_z
_atom_site_occupancy
_atom_site_U_iso_or_equiv
Si1 Si 0 0 0 1 0.0
_iucr_refine_fcf_details
;
data_symmetries
loop_
_space_group_symop_id
_space_group_symop_operation_xyz
1 x,y,z
2 -x+1/2,y+1/2,-z+1/2
3 -x,-y,-z
4 x-1/2,-y-1/2,z-1/2
;"""
p = CifParser.from_string(s)
self.assertEqual(p.get_structures()[0].formula, "Si1")
cif = """
data_1526655
_journal_name_full
_space_group_IT_number 227
_symmetry_space_group_name_Hall 'F 4d 2 3 -1d'
_symmetry_space_group_name_H-M 'F d -3 m :1'
_cell_angle_alpha 90
_cell_angle_beta 90
_cell_angle_gamma 90
_cell_formula_units_Z 8
_cell_length_a 5.381
_cell_length_b 5.381
_cell_length_c 5.381
_cell_volume 155.808
_iucr_refine_fcf_details
;
data_symmetries
Some arbitrary multiline string
;
loop_
_atom_site_label
_atom_site_type_symbol
_atom_site_fract_x
_atom_site_fract_y
_atom_site_fract_z
_atom_site_occupancy
_atom_site_U_iso_or_equiv
Si1 Si 0 0 0 1 0.0
"""
p = CifParser.from_string(cif)
self.assertRaises(ValueError, p.get_structures)
class MagCifTest(PymatgenTest):
def setUp(self):
warnings.filterwarnings("ignore")
self.mcif = CifParser(self.TEST_FILES_DIR / "magnetic.example.NiO.mcif")
self.mcif_ncl = CifParser(self.TEST_FILES_DIR / "magnetic.ncl.example.GdB4.mcif")
self.mcif_incom = CifParser(self.TEST_FILES_DIR / "magnetic.incommensurate.example.Cr.mcif")
self.mcif_disord = CifParser(self.TEST_FILES_DIR / "magnetic.disordered.example.CuMnO2.mcif")
self.mcif_ncl2 = CifParser(self.TEST_FILES_DIR / "Mn3Ge_IR2.mcif")
def tearDown(self):
warnings.simplefilter("default")
def test_mcif_detection(self):
self.assertTrue(self.mcif.feature_flags["magcif"])
self.assertTrue(self.mcif_ncl.feature_flags["magcif"])
self.assertTrue(self.mcif_incom.feature_flags["magcif"])
self.assertTrue(self.mcif_disord.feature_flags["magcif"])
self.assertFalse(self.mcif.feature_flags["magcif_incommensurate"])
self.assertFalse(self.mcif_ncl.feature_flags["magcif_incommensurate"])
self.assertTrue(self.mcif_incom.feature_flags["magcif_incommensurate"])
self.assertFalse(self.mcif_disord.feature_flags["magcif_incommensurate"])
def test_get_structures(self):
# incommensurate structures not currently supported
self.assertRaises(NotImplementedError, self.mcif_incom.get_structures)
# disordered magnetic structures not currently supported
self.assertRaises(NotImplementedError, self.mcif_disord.get_structures)
# taken from self.mcif_ncl, removing explicit magnetic symmops
# so that MagneticSymmetryGroup() has to be invoked
magcifstr = """
data_5yOhtAoR
_space_group.magn_name_BNS "P 4/m' b' m' "
_cell_length_a 7.1316
_cell_length_b 7.1316
_cell_length_c 4.0505
_cell_angle_alpha 90.00
_cell_angle_beta 90.00
_cell_angle_gamma 90.00
loop_
_atom_site_label
_atom_site_type_symbol
_atom_site_fract_x
_atom_site_fract_y
_atom_site_fract_z
_atom_site_occupancy
Gd1 Gd 0.31746 0.81746 0.00000 1
B1 B 0.00000 0.00000 0.20290 1
B2 B 0.17590 0.03800 0.50000 1
B3 B 0.08670 0.58670 0.50000 1
loop_
_atom_site_moment_label
_atom_site_moment_crystalaxis_x
_atom_site_moment_crystalaxis_y
_atom_site_moment_crystalaxis_z
Gd1 5.05 5.05 0.0"""
s = self.mcif.get_structures(primitive=False)[0]
self.assertEqual(s.formula, "Ni32 O32")
self.assertTrue(Magmom.are_collinear(s.site_properties["magmom"]))
# example with non-collinear spin
s_ncl = self.mcif_ncl.get_structures(primitive=False)[0]
s_ncl_from_msg = CifParser.from_string(magcifstr).get_structures(primitive=False)[0]
self.assertEqual(s_ncl.formula, "Gd4 B16")
self.assertFalse(Magmom.are_collinear(s_ncl.site_properties["magmom"]))
self.assertTrue(s_ncl.matches(s_ncl_from_msg))
def test_write(self):
cw_ref_string = """# generated using pymatgen
data_GdB4
_symmetry_space_group_name_H-M 'P 1'
_cell_length_a 7.13160000
_cell_length_b 7.13160000
_cell_length_c 4.05050000
_cell_angle_alpha 90.00000000
_cell_angle_beta 90.00000000
_cell_angle_gamma 90.00000000
_symmetry_Int_Tables_number 1
_chemical_formula_structural GdB4
_chemical_formula_sum 'Gd4 B16'
_cell_volume 206.00729003
_cell_formula_units_Z 4
loop_
_symmetry_equiv_pos_site_id
_symmetry_equiv_pos_as_xyz
1 'x, y, z'
loop_
_atom_site_type_symbol
_atom_site_label
_atom_site_symmetry_multiplicity
_atom_site_fract_x
_atom_site_fract_y
_atom_site_fract_z
_atom_site_occupancy
Gd Gd0 1 0.31746000 0.81746000 0.00000000 1.0
Gd Gd1 1 0.18254000 0.31746000 0.00000000 1.0
Gd Gd2 1 0.81746000 0.68254000 0.00000000 1.0
Gd Gd3 1 0.68254000 0.18254000 0.00000000 1.0
B B4 1 0.00000000 0.00000000 0.20290000 1.0
B B5 1 0.50000000 0.50000000 0.79710000 1.0
B B6 1 0.00000000 0.00000000 0.79710000 1.0
B B7 1 0.50000000 0.50000000 0.20290000 1.0
B B8 1 0.17590000 0.03800000 0.50000000 1.0
B B9 1 0.96200000 0.17590000 0.50000000 1.0
B B10 1 0.03800000 0.82410000 0.50000000 1.0
B B11 1 0.67590000 0.46200000 0.50000000 1.0
B B12 1 0.32410000 0.53800000 0.50000000 1.0
B B13 1 0.82410000 0.96200000 0.50000000 1.0
B B14 1 0.53800000 0.67590000 0.50000000 1.0
B B15 1 0.46200000 0.32410000 0.50000000 1.0
B B16 1 0.08670000 0.58670000 0.50000000 1.0
B B17 1 0.41330000 0.08670000 0.50000000 1.0
B B18 1 0.58670000 0.91330000 0.50000000 1.0
B B19 1 0.91330000 0.41330000 0.50000000 1.0
loop_
_atom_site_moment_label
_atom_site_moment_crystalaxis_x
_atom_site_moment_crystalaxis_y
_atom_site_moment_crystalaxis_z
Gd0 5.05000000 5.05000000 0.00000000
Gd1 -5.05000000 5.05000000 0.00000000
Gd2 5.05000000 -5.05000000 0.00000000
Gd3 -5.05000000 -5.05000000 0.00000000
"""
s_ncl = self.mcif_ncl.get_structures(primitive=False)[0]
cw = CifWriter(s_ncl, write_magmoms=True)
self.assertEqual(cw.__str__(), cw_ref_string)
# from list-type magmoms
list_magmoms = [list(m) for m in s_ncl.site_properties["magmom"]]
# float magmoms (magnitude only)
float_magmoms = [float(m) for m in s_ncl.site_properties["magmom"]]
s_ncl.add_site_property("magmom", list_magmoms)
cw = CifWriter(s_ncl, write_magmoms=True)
self.assertEqual(cw.__str__(), cw_ref_string)
s_ncl.add_site_property("magmom", float_magmoms)
cw = CifWriter(s_ncl, write_magmoms=True)
cw_ref_string_magnitudes = """# generated using pymatgen
data_GdB4
_symmetry_space_group_name_H-M 'P 1'
_cell_length_a 7.13160000
_cell_length_b 7.13160000
_cell_length_c 4.05050000
_cell_angle_alpha 90.00000000
_cell_angle_beta 90.00000000
_cell_angle_gamma 90.00000000
_symmetry_Int_Tables_number 1
_chemical_formula_structural GdB4
_chemical_formula_sum 'Gd4 B16'
_cell_volume 206.00729003
_cell_formula_units_Z 4
loop_
_symmetry_equiv_pos_site_id
_symmetry_equiv_pos_as_xyz
1 'x, y, z'
loop_
_atom_site_type_symbol
_atom_site_label
_atom_site_symmetry_multiplicity
_atom_site_fract_x
_atom_site_fract_y
_atom_site_fract_z
_atom_site_occupancy
Gd Gd0 1 0.31746000 0.81746000 0.00000000 1.0
Gd Gd1 1 0.18254000 0.31746000 0.00000000 1.0
Gd Gd2 1 0.81746000 0.68254000 0.00000000 1.0
Gd Gd3 1 0.68254000 0.18254000 0.00000000 1.0
B B4 1 0.00000000 0.00000000 0.20290000 1.0
B B5 1 0.50000000 0.50000000 0.79710000 1.0
B B6 1 0.00000000 0.00000000 0.79710000 1.0
B B7 1 0.50000000 0.50000000 0.20290000 1.0
B B8 1 0.17590000 0.03800000 0.50000000 1.0
B B9 1 0.96200000 0.17590000 0.50000000 1.0
B B10 1 0.03800000 0.82410000 0.50000000 1.0
B B11 1 0.67590000 0.46200000 0.50000000 1.0
B B12 1 0.32410000 0.53800000 0.50000000 1.0
B B13 1 0.82410000 0.96200000 0.50000000 1.0
B B14 1 0.53800000 0.67590000 0.50000000 1.0
B B15 1 0.46200000 0.32410000 0.50000000 1.0
B B16 1 0.08670000 0.58670000 0.50000000 1.0
B B17 1 0.41330000 0.08670000 0.50000000 1.0
B B18 1 0.58670000 0.91330000 0.50000000 1.0
B B19 1 0.91330000 0.41330000 0.50000000 1.0
loop_
_atom_site_moment_label
_atom_site_moment_crystalaxis_x
_atom_site_moment_crystalaxis_y
_atom_site_moment_crystalaxis_z
Gd0 0.00000000 0.00000000 7.14177849
Gd1 0.00000000 0.00000000 7.14177849
Gd2 0.00000000 0.00000000 -7.14177849
Gd3 0.00000000 0.00000000 -7.14177849
"""
self.assertEqual(cw.__str__().strip(), cw_ref_string_magnitudes.strip())
# test we're getting correct magmoms in ncl case
s_ncl2 = self.mcif_ncl2.get_structures()[0]
list_magmoms = [list(m) for m in s_ncl2.site_properties["magmom"]]
self.assertEqual(list_magmoms[0][0], 0.0)
self.assertAlmostEqual(list_magmoms[0][1], 5.9160793408726366)
self.assertAlmostEqual(list_magmoms[1][0], -5.1234749999999991)
self.assertAlmostEqual(list_magmoms[1][1], 2.9580396704363183)
# test creating an structure without oxidation state doesn't raise errors
s_manual = Structure(Lattice.cubic(4.2), ["Cs", "Cl"], [[0, 0, 0], [0.5, 0.5, 0.5]])
s_manual.add_spin_by_site([1, -1])
cw = CifWriter(s_manual, write_magmoms=True)
# check oxidation state
cw_manual_oxi_string = """# generated using pymatgen
data_CsCl
_symmetry_space_group_name_H-M 'P 1'
_cell_length_a 4.20000000
_cell_length_b 4.20000000
_cell_length_c 4.20000000
_cell_angle_alpha 90.00000000
_cell_angle_beta 90.00000000
_cell_angle_gamma 90.00000000
_symmetry_Int_Tables_number 1
_chemical_formula_structural CsCl
_chemical_formula_sum 'Cs1 Cl1'
_cell_volume 74.08800000
_cell_formula_units_Z 1
loop_
_symmetry_equiv_pos_site_id
_symmetry_equiv_pos_as_xyz
1 'x, y, z'
loop_
_atom_type_symbol
_atom_type_oxidation_number
Cs+ 1.0
Cl+ 1.0
loop_
_atom_site_type_symbol
_atom_site_label
_atom_site_symmetry_multiplicity
_atom_site_fract_x
_atom_site_fract_y
_atom_site_fract_z
_atom_site_occupancy
Cs+ Cs0 1 0.00000000 0.00000000 0.00000000 1
Cl+ Cl1 1 0.50000000 0.50000000 0.50000000 1
loop_
_atom_site_moment_label
_atom_site_moment_crystalaxis_x
_atom_site_moment_crystalaxis_y
_atom_site_moment_crystalaxis_z
"""
s_manual.add_oxidation_state_by_site([1, 1])
cw = CifWriter(s_manual, write_magmoms=True)
self.assertEqual(cw.__str__(), cw_manual_oxi_string)
@unittest.skipIf(pybtex is None, "pybtex not present")
def test_bibtex(self):
ref_bibtex_string = """@article{cifref0,
author = "Blanco, J.A.",
journal = "PHYSICAL REVIEW B",
volume = "73",
year = "2006",
pages = "?--?"
}
"""
self.assertEqual(self.mcif_ncl.get_bibtex_string(), ref_bibtex_string)
if __name__ == "__main__":
unittest.main()
|
gmatteo/pymatgen
|
pymatgen/io/tests/test_cif.py
|
Python
|
mit
| 44,924
|
[
"ABINIT",
"VASP",
"pymatgen"
] |
90cbf9f9c818c409e47a57e36e641b43b5b64fd431a38a69728e2732317d8918
|
# -*- coding: utf-8 -*-
"""
====================
1D optimal transport
====================
This example illustrates the computation of EMD and Sinkhorn transport plans
and their visualization.
"""
# Author: Remi Flamary <remi.flamary@unice.fr>
#
# License: MIT License
import numpy as np
import matplotlib.pylab as pl
import ot
import ot.plot
from ot.datasets import make_1D_gauss as gauss
##############################################################################
# Generate data
# -------------
#%% parameters
n = 100 # nb bins
# bin positions
x = np.arange(n, dtype=np.float64)
# Gaussian distributions
a = gauss(n, m=20, s=5) # m= mean, s= std
b = gauss(n, m=60, s=10)
# loss matrix
M = ot.dist(x.reshape((n, 1)), x.reshape((n, 1)))
M /= M.max()
##############################################################################
# Plot distributions and loss matrix
# ----------------------------------
#%% plot the distributions
pl.figure(1, figsize=(6.4, 3))
pl.plot(x, a, 'b', label='Source distribution')
pl.plot(x, b, 'r', label='Target distribution')
pl.legend()
#%% plot distributions and loss matrix
pl.figure(2, figsize=(5, 5))
ot.plot.plot1D_mat(a, b, M, 'Cost matrix M')
##############################################################################
# Solve EMD
# ---------
#%% EMD
G0 = ot.emd(a, b, M)
pl.figure(3, figsize=(5, 5))
ot.plot.plot1D_mat(a, b, G0, 'OT matrix G0')
##############################################################################
# Solve Sinkhorn
# --------------
#%% Sinkhorn
lambd = 1e-3
Gs = ot.sinkhorn(a, b, M, lambd, verbose=True)
pl.figure(4, figsize=(5, 5))
ot.plot.plot1D_mat(a, b, Gs, 'OT matrix Sinkhorn')
pl.show()
|
rflamary/POT
|
examples/plot_OT_1D.py
|
Python
|
mit
| 1,698
|
[
"Gaussian"
] |
f59f6ecafb992db0c9a1ab80eae1643cd3e7d4205cb9b8bc3581ea42739e2cca
|
from __future__ import print_function
import numpy as np
import mdtraj as md
from mdtraj.testing import get_fn, eq, skipif
from mdtraj.nmr.shift_wrappers import find_executable, SPARTA_PLUS, PPM, SHIFTX2
from mdtraj.utils import six
@skipif(not find_executable(SPARTA_PLUS), 'SPARTA+ binary not found')
def test_spartaplus():
t = md.load(get_fn('2EQQ.pdb'))
result = md.chemical_shifts_spartaplus(t)
print(result)
eq(result.shape[1], 20) # 2EQQ is NMR structure with 20 frames
eq(float(result.ix[(1, "HA")][0]), 4.378, decimal=4)
# 4.378 taken from first entry in pred.tab, which looks like the following:
# 1 E HA 0.000 4.378 4.350 0.047 0.000 0.291
@skipif(not find_executable(PPM), 'PPM binary not found')
def test_ppm():
t = md.load(get_fn('2EQQ.pdb'))
result = md.chemical_shifts_ppm(t)
print(result)
eq(result.shape[1], 20) # 2EQQ is NMR structure with 20 frames
eq(float(result.ix[(2, "CA")][0]), 53.004, decimal=4)
# taken from first entry in bb_details.dat, which looks like the following:
# 2 ASN CA 999.000 53.004 51.168 51.802 53.081 54.098 52.820 52.379 51.856 53.034 52.754 54.134 54.222 51.210 52.207 50.824 54.459 53.605 54.211 53.688 52.344 53.004 51.168 51.802 53.081 54.098 52.820 52.379 51.856 53.034 52.754 54.134 54.222 51.210 52.207 50.824 54.459 53.605 54.211 53.688 52.344
@skipif(not (find_executable(SHIFTX2) and six.PY2), 'SHIFTX2 binary not found or python2.X not found.')
def test_shiftx2():
t = md.load(get_fn('2EQQ.pdb'))
result = md.chemical_shifts_shiftx2(t)
print(result)
eq(result.shape[1], 20) # 2EQQ is NMR structure with 20 frames
eq(float(result.ix[(1, "C")][0]), 175.2570, decimal=4)
# taken from first entry in trj.pdb.cs, which looks like the following:
# NUM,RES,ATOMNAME,SHIFT
# 1,E,C,175.2570
def test_2_scalar_couplings():
t = md.load(get_fn('frame0.h5')) # This is Alanine dipeptide
for model in ["Ruterjans1999", "Bax2007", "Bax1997"]:
indices, J = md.compute_J3_HN_HA(t)
eq(indices.shape, (1, 4))
eq(J.shape, (501, 1))
J = J.mean()
assert abs(J - 6.06) <= 2.0, "Value is far from experimental value."
# 6.06 [Hz] is the value from Baldwin PNAS 2006 Table 1.
# We expect the models to give something comparable to this
# If it doesn't, something is fishy.
# Typical ranges are between 1 and 9.
# Obviously this isn't a perfect test, but it's still a useful sanity check.
def test_3_scalar_couplings():
t = md.load(get_fn('1bpi.pdb'))
for model in ["Bax2007"]:
indices_HA, J_HA = md.compute_J3_HN_HA(t)
indices_C, J_C = md.compute_J3_HN_C(t)
indices_CB, J_CB = md.compute_J3_HN_CB(t)
eq(indices_HA.shape, (57, 4))
eq(indices_C.shape, (57, 4))
eq(indices_CB.shape, (57, 4))
eq(J_HA.shape, (1, 57))
eq(J_C.shape, (1, 57))
eq(J_CB.shape, (1, 57))
np.testing.assert_almost_equal(J_HA[0,0], 0.48885268)
np.testing.assert_almost_equal(J_C[0,0], 3.840529)
np.testing.assert_almost_equal(J_CB[0,0], 2.5702963)
|
daviddesancho/mdtraj
|
mdtraj/tests/test_nmr.py
|
Python
|
lgpl-2.1
| 3,237
|
[
"MDTraj"
] |
504b5148ea8fa9d033a9aad93591fffd111534f369cd5e12cb9652ff291549c5
|
from django.conf import settings
from copy import copy
from django.core.exceptions import ImproperlyConfigured
class AdminExcludeFieldsMixin(object):
visit_model = None
visit_attr = None
visit_codes = {}
def __init__(self, *args, **kwargs):
super(AdminExcludeFieldsMixin, self).__init__(*args, **kwargs)
self.original_fields = list(copy(self.fields))
try:
self.original_instructions = copy(self.instructions)
except AttributeError:
self.original_instructions = None
try:
exclude = self.custom_exclude.values()
for fields in exclude:
unknown_fields = [f for f in fields if f not in self.original_fields]
if unknown_fields:
raise ImproperlyConfigured(
'Custom exclude field(s) {} not found in ModelAdmin.field list. '
'See \'{}\''.format(unknown_fields, self.__class__.__name__))
except AttributeError:
pass
if not self.visit_model:
raise ImproperlyConfigured(
'Attribute visit_model may not be None. Declare as a class attribute on the ModelAdmin')
if not self.visit_attr:
raise ImproperlyConfigured(
'Attribute visit_attr may not be None. Declare as a class attribute on the ModelAdmin')
if not self.visit_codes:
raise ImproperlyConfigured(
'Attribute visit_codes may not be None. Declare as a class attribute on the ModelAdmin')
def contribute_to_extra_context(self, extra_context, request=None, object_id=None):
extra_context = super(AdminExcludeFieldsMixin, self).contribute_to_extra_context(
extra_context, request=request, object_id=object_id)
visit_code = self.get_visit_code(request, object_id=object_id)
instructions = self.get_custom_instructions(self.get_key(visit_code))
extra_context.update(instructions=instructions)
extra_context.update(admin_exclude_code=visit_code)
return extra_context
def get_form(self, request, obj=None, **kwargs):
"""Returns a form but customizes the fields list first."""
visit_code = self.get_visit_code(request, obj)
self.fields = self.get_custom_fields(request, obj, visit_code=visit_code)
kwargs.update({'fields': self.fields, 'exclude': self.get_custom_exclude(request, obj)})
return super(AdminExcludeFieldsMixin, self).get_form(request, obj, **kwargs)
def get_fields(self, request, obj=None):
"""Returns fields but sets the fields attr before calling super."""
self.fields = self.get_custom_fields(request, obj)
return super(AdminExcludeFieldsMixin, self).get_fields(request, obj)
def get_custom_fields(self, request, obj=None, visit_code=None):
"""Returns a list of field names less those to be excluded for the \'visit\' or None."""
if self.original_fields:
visit_code = visit_code or self.get_visit_code(request, obj)
return list([f for f in self.original_fields if f not in self.get_custom_exclude(
request, obj, visit_code)])
return None
def get_custom_exclude(self, request, obj=None, visit_code=None):
"""Returns a list of fields to be excluded."""
exclude = []
visit_code = visit_code or self.get_visit_code(request, obj)
if visit_code:
try:
exclude = self.custom_exclude.get(self.get_key(visit_code), [])
except AttributeError:
pass
return list(exclude)
def get_custom_instructions(self, key):
"""Returns instructions for this \'key\' as a list."""
try:
instructions = self.original_instructions.get(key)
except KeyError:
instructions = None
except AttributeError:
instructions = self.original_instructions
return instructions
def get_visit_code(self, request, obj=None, object_id=None):
"""Returns the visit code from the instance or settings attr or returns None."""
try:
visit_code = self.get_visit(request, obj=obj, object_id=object_id).get_visit_code()
except AttributeError:
try:
visit_code = settings.ADMIN_EXCLUDE_DEFAULT_CODE
except AttributeError:
visit_code = None
return visit_code
def get_visit(self, request, obj=None, object_id=None):
"""Returns a visit instance using the model instance or the request object or None."""
visit_instance = None
try:
if obj:
visit_instance = getattr(obj, self.visit_attr)
elif object_id:
visit_instance = self.model.objects.get(pk=object_id)
else:
pk = request.GET.get(self.visit_attr)
if pk:
visit_instance = self.visit_model.objects.get(pk=pk)
except AttributeError:
pass
return visit_instance
def get_key(self, visit_code):
"""Returns the dictionary 'key' if the visit_code is in the dictionary 'value'."""
if visit_code:
for key, visit_code_list in self.visit_codes.items():
if visit_code in visit_code_list:
return key
return None
|
botswana-harvard/edc-admin-exclude
|
edc_admin_exclude/admin.py
|
Python
|
gpl-2.0
| 5,423
|
[
"VisIt"
] |
784388e45e6ad43cae1a9ed103ec20b4da58c697c9dff5162b21eb7dc848a0b2
|
# -*- coding: utf-8 -*-
""" Incident Reporting System - Model
@author: Sahana Taiwan Team
"""
module = "irs"
if deployment_settings.has_module(module):
# ---------------------------------------------------------------------
# List of Incident Categories
# NB It is important that the meaning of these entries is not changed as otherwise this hurts our ability to do synchronisation
# The keys are based on the Canadian ems.incident hierarchy, with a few extra general versions added
# The 2nd is meant for end-users
# Entries can be hidden from user view in the controller.
# Additional sets of 'translations' can be added to the tuples.
irs_incident_type_opts = {
"animalHealth.animalDieOff" : T("Animal Die Off"),
"animalHealth.animalFeed" : T("Animal Feed"),
"aviation.aircraftCrash" : T("Aircraft Crash"),
"aviation.aircraftHijacking" : T("Aircraft Hijacking"),
"aviation.airportClosure" : T("Airport Closure"),
"aviation.airspaceClosure" : T("Airspace Closure"),
"aviation.noticeToAirmen" : T("Notice to Airmen"),
"aviation.spaceDebris" : T("Space Debris"),
"civil.demonstrations" : T("Demonstrations"),
"civil.dignitaryVisit" : T("Dignitary Visit"),
"civil.displacedPopulations" : T("Displaced Populations"),
"civil.emergency" : T("Civil Emergency"),
"civil.looting" : T("Looting"),
"civil.publicEvent" : T("Public Event"),
"civil.riot" : T("Riot"),
"civil.volunteerRequest" : T("Volunteer Request"),
"crime" : T("Crime"),
"crime.bomb" : T("Bomb"),
"crime.bombExplosion" : T("Bomb Explosion"),
"crime.bombThreat" : T("Bomb Threat"),
"crime.dangerousPerson" : T("Dangerous Person"),
"crime.drugs" : T("Drugs"),
"crime.homeCrime" : T("Home Crime"),
"crime.illegalImmigrant" : T("Illegal Immigrant"),
"crime.industrialCrime" : T("Industrial Crime"),
"crime.poisoning" : T("Poisoning"),
"crime.retailCrime" : T("Retail Crime"),
"crime.shooting" : T("Shooting"),
"crime.stowaway" : T("Stowaway"),
"crime.terrorism" : T("Terrorism"),
"crime.vehicleCrime" : T("Vehicle Crime"),
"fire" : T("Fire"),
"fire.forestFire" : T("Forest Fire"),
"fire.hotSpot" : T("Hot Spot"),
"fire.industryFire" : T("Industry Fire"),
"fire.smoke" : T("Smoke"),
"fire.urbanFire" : T("Urban Fire"),
"fire.wildFire" : T("Wild Fire"),
"flood" : T("Flood"),
"flood.damOverflow" : T("Dam Overflow"),
"flood.flashFlood" : T("Flash Flood"),
"flood.highWater" : T("High Water"),
"flood.overlandFlowFlood" : T("Overland Flow Flood"),
"flood.tsunami" : T("Tsunami"),
"geophysical.avalanche" : T("Avalanche"),
"geophysical.earthquake" : T("Earthquake"),
"geophysical.lahar" : T("Lahar"),
"geophysical.landslide" : T("Landslide"),
"geophysical.magneticStorm" : T("Magnetic Storm"),
"geophysical.meteorite" : T("Meteorite"),
"geophysical.pyroclasticFlow" : T("Pyroclastic Flow"),
"geophysical.pyroclasticSurge" : T("Pyroclastic Surge"),
"geophysical.volcanicAshCloud" : T("Volcanic Ash Cloud"),
"geophysical.volcanicEvent" : T("Volcanic Event"),
"hazardousMaterial" : T("Hazardous Material"),
"hazardousMaterial.biologicalHazard" : T("Biological Hazard"),
"hazardousMaterial.chemicalHazard" : T("Chemical Hazard"),
"hazardousMaterial.explosiveHazard" : T("Explosive Hazard"),
"hazardousMaterial.fallingObjectHazard" : T("Falling Object Hazard"),
"hazardousMaterial.infectiousDisease" : T("Infectious Disease"),
"hazardousMaterial.poisonousGas" : T("Poisonous Gas"),
"hazardousMaterial.radiologicalHazard" : T("Radiological Hazard"),
"health.infectiousDisease" : T("Infectious Disease"),
"health.infestation" : T("Infestation"),
"ice.iceberg" : T("Iceberg"),
"ice.icePressure" : T("Ice Pressure"),
"ice.rapidCloseLead" : T("Rapid Close Lead"),
"ice.specialIce" : T("Special Ice"),
"marine.marineSecurity" : T("Marine Security"),
"marine.nauticalAccident" : T("Nautical Accident"),
"marine.nauticalHijacking" : T("Nautical Hijacking"),
"marine.portClosure" : T("Port Closure"),
"marine.specialMarine" : T("Special Marine"),
"meteorological.blizzard" : T("Blizzard"),
"meteorological.blowingSnow" : T("Blowing Snow"),
"meteorological.drought" : T("Drought"),
"meteorological.dustStorm" : T("Dust Storm"),
"meteorological.fog" : T("Fog"),
"meteorological.freezingDrizzle" : T("Freezing Drizzle"),
"meteorological.freezingRain" : T("Freezing Rain"),
"meteorological.freezingSpray" : T("Freezing Spray"),
"meteorological.hail" : T("Hail"),
"meteorological.hurricane" : T("Hurricane"),
"meteorological.rainFall" : T("Rain Fall"),
"meteorological.snowFall" : T("Snow Fall"),
"meteorological.snowSquall" : T("Snow Squall"),
"meteorological.squall" : T("Squall"),
"meteorological.stormSurge" : T("Storm Surge"),
"meteorological.thunderstorm" : T("Thunderstorm"),
"meteorological.tornado" : T("Tornado"),
"meteorological.tropicalStorm" : T("Tropical Storm"),
"meteorological.waterspout" : T("Waterspout"),
"meteorological.winterStorm" : T("Winter Storm"),
"missingPerson" : T("Missing Person"),
"missingPerson.amberAlert" : T("Child Abduction Emergency"), # http://en.wikipedia.org/wiki/Amber_Alert
"missingPerson.missingVulnerablePerson" : T("Missing Vulnerable Person"),
"missingPerson.silver" : T("Missing Senior Citizen"), # http://en.wikipedia.org/wiki/Silver_Alert
"publicService.emergencySupportFacility" : T("Emergency Support Facility"),
"publicService.emergencySupportService" : T("Emergency Support Service"),
"publicService.schoolClosure" : T("School Closure"),
"publicService.schoolLockdown" : T("School Lockdown"),
"publicService.serviceOrFacility" : T("Service or Facility"),
"publicService.transit" : T("Transit"),
"railway.railwayAccident" : T("Railway Accident"),
"railway.railwayHijacking" : T("Railway Hijacking"),
"roadway.bridgeClosure" : T("Bridge Closed"),
"roadway.hazardousRoadConditions" : T("Hazardous Road Conditions"),
"roadway.roadwayAccident" : T("Road Accident"),
"roadway.roadwayClosure" : T("Road Closed"),
"roadway.roadwayDelay" : T("Road Delay"),
"roadway.roadwayHijacking" : T("Road Hijacking"),
"roadway.roadwayUsageCondition" : T("Road Usage Condition"),
"roadway.trafficReport" : T("Traffic Report"),
"temperature.arcticOutflow" : T("Arctic Outflow"),
"temperature.coldWave" : T("Cold Wave"),
"temperature.flashFreeze" : T("Flash Freeze"),
"temperature.frost" : T("Frost"),
"temperature.heatAndHumidity" : T("Heat and Humidity"),
"temperature.heatWave" : T("Heat Wave"),
"temperature.windChill" : T("Wind Chill"),
"wind.galeWind" : T("Gale Wind"),
"wind.hurricaneForceWind" : T("Hurricane Force Wind"),
"wind.stormForceWind" : T("Storm Force Wind"),
"wind.strongWind" : T("Strong Wind"),
"other.buildingCollapsed" : T("Building Collapsed"),
"other.peopleTrapped" : T("People Trapped"),
"other.powerFailure" : T("Power Failure"),
}
# This Table defines which Categories are visible to end-users
resourcename = "icategory"
tablename = "%s_%s" % (module, resourcename)
table = db.define_table(tablename,
Field("code"))
table.code.label = T("Category")
table.code.requires = IS_IN_SET(irs_incident_type_opts)
table.code.represent = lambda opt: irs_incident_type_opts.get(opt, opt)
# ---------------------------------------------------------------------
# Incidents
# This is the current status of an Incident
# @ToDo Change this so that there is a 'lead' ireport updated in the case of duplicates
resourcename = "incident"
tablename = "%s_%s" % (module, resourcename)
table = db.define_table(tablename,
super_link(db.sit_situation),
Field("name"),
Field("category"),
Field("contact"),
location_id(),
Field("datetime", "datetime"),
#Field("persons_affected", "integer"),
#Field("persons_injured", "integer"),
#Field("persons_deceased", "integer"),
comments(),
migrate=migrate, *s3_meta_fields())
table.name.requires = IS_NOT_EMPTY()
table.datetime.label = T("Date/Time")
table.datetime.requires = [IS_NOT_EMPTY(),
IS_UTC_DATETIME(utc_offset=shn_user_utc_offset(), allow_future=False)]
# The full set available to Admins & Imports/Exports
# (users use the subset by over-riding this in the Controller)
table.category.requires = IS_NULL_OR(IS_IN_SET(irs_incident_type_opts))
table.category.represent = lambda opt: irs_incident_type_opts.get(opt, opt)
# CRUD strings
ADD_INCIDENT = T("Add Incident")
LIST_INCIDENTS = T("List Incidents")
s3.crud_strings[tablename] = Storage(
title_create = ADD_INCIDENT,
title_display = T("Incident Details"),
title_list = LIST_INCIDENTS,
title_update = T("Edit Incident"),
title_search = T("Search Incidents"),
subtitle_create = T("Add New Incident"),
subtitle_list = T("Incidents"),
label_list_button = LIST_INCIDENTS,
label_create_button = ADD_INCIDENT,
label_delete_button = T("Delete Incident"),
msg_record_created = T("Incident added"),
msg_record_modified = T("Incident updated"),
msg_record_deleted = T("Incident deleted"),
msg_list_empty = T("No Incidents currently registered"))
incident_id = S3ReusableField("incident_id", table,
requires = IS_NULL_OR(IS_ONE_OF(db, "irs_incident.id", "%(id)s")),
represent = lambda id: id,
label = T("Incident"),
ondelete = "RESTRICT")
s3xrc.model.configure(table,
super_entity = db.sit_situation,
list_fields = [
"id",
"category",
"datetime",
"location_id"
])
# -----------------------------------------------------------------------------
# Reports
# This is a report of an Incident
# (A single incident may generate many reports)
#def shn_assess_represent(assessments):
# """ Represent assessments in the Incidents List """
# add_assessment = A(T("Add Assessment"), _href=URL(r=request, c="assess", f="assess.html", args="create"), _class="action-btn")
# output = add_assessment
# if assessments:
# _assessments = assessments.split("|")
# for assessment in _assessments:
# output.append(A(T("Open Assessment"), _href=URL(r=request, c="assess", f="assess", args=assessment), _class="action-btn"))
# return output
# else:
# return output
resourcename = "ireport"
tablename = "%s_%s" % (module, resourcename)
table = db.define_table(tablename,
incident_id(), # ToDo: Remove
Field("name"),
Field("message", "text"),
Field("category"),
person_id(),
Field("contact"),
organisation_id(),
Field("datetime", "datetime"),
location_id(),
# To be replaced by flexible Impacts as per Assessments?
#Field("persons_affected", "integer"),
#Field("persons_injured", "integer"),
#Field("persons_deceased", "integer"),
document_id(),
Field("verified", "boolean"),
#Field("assess_id", label=T("Assessments"),
# represent = shn_assess_represent
#),
comments(),
migrate=migrate, *s3_meta_fields())
table.category.label = T("Category")
# The full set available to Admins & Imports/Exports
# (users use the subset by over-riding this in the Controller)
table.category.requires = IS_NULL_OR(IS_IN_SET(irs_incident_type_opts))
table.category.represent = lambda opt: irs_incident_type_opts.get(opt, opt)
table.name.label = T("Short Description")
table.name.requires = IS_NOT_EMPTY()
table.message.label = T("Message")
table.message.represent = lambda message: shn_abbreviate(message)
table.person_id.label = T("Reporter Name")
table.person_id.comment = (T("At/Visited Location (not virtual)"),
shn_person_comment(T("Reporter Name"), T("The person at the location who is reporting this incident (optional)")))
table.contact.label = T("Contact Details")
table.datetime.label = T("Date/Time")
table.datetime.requires = [IS_NOT_EMPTY(),
IS_UTC_DATETIME(utc_offset=shn_user_utc_offset(), allow_future=False)]
organisation_id.label = T("Assign to Org.")
#table.persons_affected.label = T("# of People Affected")
#table.persons_injured.label = T("# of People Injured")
#table.persons_deceased.label = T("# of People Deceased")
table.verified.label = T("Verified?")
table.verified.represent = lambda verified: (T("No"), T("Yes"))[verified == True]
# CRUD strings
ADD_INC_REPORT = T("Add Incident Report")
LIST_INC_REPORTS = T("List Incident Reports")
s3.crud_strings[tablename] = Storage(
title_create = ADD_INC_REPORT,
title_display = T("Incident Report Details"),
title_list = LIST_INC_REPORTS,
title_update = T("Edit Incident Report"),
title_search = T("Search Incident Reports"),
subtitle_create = T("Add New Incident Report"),
subtitle_list = T("Incident Reports"),
label_list_button = LIST_INC_REPORTS,
label_create_button = ADD_INC_REPORT,
label_delete_button = T("Delete Incident Report"),
msg_record_created = T("Incident Report added"),
msg_record_modified = T("Incident Report updated"),
msg_record_deleted = T("Incident Report deleted"),
msg_list_empty = T("No Incident Reports currently registered"))
#def ireport_onaccept(form):
# """ Nasty Hack for Resource Linking """
# if "assessments" in form.vars and form.vars.assessments:
# pass
# else:
# # Default it to the record ID so that the represent can create an assessment for this Incident
# form.vars.assessments = form.vars.id
# We don't want these visible in Create forms
# (we override in Update forms in controller)
table.verified.writable = table.verified.readable = False
#table.assess_id.writable = table.assess_id.readable = False
s3xrc.model.configure(table,
#onaccept = lambda form: ireport_onaccept(form),
#onvalidation = ireport_onvalidation,
list_fields = ["id", "category", "location_id", "organisation_id", "verified", "name", "message"]
)
# irs_ireport as component of doc_documents
s3xrc.model.add_component(module, resourcename,
multiple=True,
joinby=dict(doc_document="document_id"),
deletable=True,
editable=True)
ireport_id = S3ReusableField("incident_id", table,
requires = IS_NULL_OR(IS_ONE_OF(db, "irs_ireport.id", "%(name)s")),
represent = lambda id: id,
label = T("Incident"),
ondelete = "RESTRICT")
# -----------------------------------------------------------------------------
irs_assessment_type_opts = {
1:T("initial assessment"),
2:T("follow-up assessment"),
3:T("final report"),
99:T("other")
}
irs_event_type_opts = {
1:T("primary incident"),
2:T("secondary effect"),
3:T("collateral event"),
99:T("other")
}
irs_cause_type_opts = {
1:T("natural hazard"),
2:T("technical failure"),
3:T("human error"),
4:T("criminal intent"),
5:T("operational intent"),
99:T("other")
}
# Assessments
# This is a follow-up assessment of an Incident
# Deprecated by Assessments module?
resourcename = "iassessment"
tablename = "%s_%s" % (module, resourcename)
table = db.define_table(tablename,
incident_id(),
Field("datetime", "datetime"),
Field("itype", "integer",
requires = IS_IN_SET(irs_assessment_type_opts, zero=None),
default = 1,
label = T("Report Type"),
represent = lambda opt: irs_assessment_type_opts.get(opt, UNKNOWN_OPT)),
Field("event_type", "integer",
requires = IS_IN_SET(irs_event_type_opts, zero=None),
default = 1,
label = T("Event type"),
represent = lambda opt: irs_event_type_opts.get(opt, UNKNOWN_OPT)),
Field("cause_type", "integer",
requires = IS_IN_SET(irs_cause_type_opts, zero=None),
default = 1,
label = T("Type of cause"),
represent = lambda opt: irs_cause_type_opts.get(opt, UNKNOWN_OPT)),
Field("report", "text"),
Field("persons_affected", "integer"),
Field("persons_injured", "integer"),
Field("persons_deceased", "integer"),
migrate=migrate, *s3_meta_fields())
table.modified_by.label = T("Reporter")
table.modified_by.readable = True
table.datetime.label = T("Date/Time")
# CRUD strings
ADD_ASSESSMENT = T("Add Assessment")
LIST_ASSESSMENTS = T("List Assessments")
s3.crud_strings[tablename] = Storage(
title_create = ADD_ASSESSMENT,
title_display = T("Assessment Details"),
title_list = LIST_ASSESSMENTS,
title_update = T("Edit Assessment"),
title_search = T("Search Assessments"),
subtitle_create = T("Add New Assessment"),
subtitle_list = T("Assessments"),
label_list_button = LIST_ASSESSMENTS,
label_create_button = ADD_ASSESSMENT,
msg_record_created = T("Assessment added"),
msg_record_modified = T("Assessment updated"),
msg_record_deleted = T("Assessment deleted"),
msg_list_empty = T("No Assessments currently registered"))
s3xrc.model.configure(table,
list_fields = [
"id",
"datetime",
"itype",
"modified_by"
])
# Disabling until we figure out how to link to Assessments module
#s3xrc.model.add_component(module, resourcename,
# multiple = True,
# joinby = dict(irs_incident="incident_id"),
# deletable = True,
# editable = True)
# -----------------------------------------------------------------------------
irs_image_type_opts = {
1:T("Photograph"),
2:T("Map"),
3:T("Document Scan"),
99:T("other")
}
# Replace by image_id
resourcename = "iimage"
tablename = "%s_%s" % (module, resourcename)
table = db.define_table(tablename,
Field("report_id", db.irs_ireport),
incident_id(),
Field("assessment_id", db.irs_iassessment),
Field("type", "integer",
requires = IS_IN_SET(irs_image_type_opts, zero=None),
default = 1,
label = T("Image Type"),
represent = lambda opt: irs_image_type_opts.get(opt, UNKNOWN_OPT)),
Field("image", "upload", autodelete=True),
#Field("url"),
Field("description"),
#Field("tags"),
migrate=migrate, *s3_meta_fields())
# CRUD strings
ADD_IMAGE = T("Add Image")
LIST_IMAGES = T("List Images")
s3.crud_strings[tablename] = Storage(
title_create = ADD_IMAGE,
title_display = T("Image Details"),
title_list = LIST_IMAGES,
title_update = T("Edit Image"),
title_search = T("Search Images"),
subtitle_create = T("Add New Image"),
subtitle_list = T("Images"),
label_list_button = LIST_IMAGES,
label_create_button = ADD_IMAGE,
msg_record_created = T("Image added"),
msg_record_modified = T("Image updated"),
msg_record_deleted = T("Image deleted"),
msg_list_empty = T("No Images currently registered"))
s3xrc.model.add_component(module, resourcename,
multiple = True,
joinby = dict(irs_incident="incident_id",
irs_ireport="report_id",
irs_iassessment="assessment_id"),
deletable = True,
editable = True)
# -----------------------------------------------------------------------------
irs_response_type_opts = {
1:T("Alert"),
2:T("Intervention"),
3:T("Closure"),
99:T("other")
}
resourcename = "iresponse"
tablename = "%s_%s" % (module, resourcename)
table = db.define_table(tablename,
incident_id(),
Field("datetime", "datetime"),
Field("itype", "integer",
requires = IS_IN_SET(irs_response_type_opts, zero=None),
default = 1,
label = T("Type"),
represent = lambda opt: irs_response_type_opts.get(opt, UNKNOWN_OPT)),
Field("report", "text"),
migrate=migrate, *s3_meta_fields())
# CRUD strings
ADD_RESPONSE = T("Add Response")
LIST_RESPONSES = T("List Responses")
s3.crud_strings[tablename] = Storage(
title_create = ADD_RESPONSE,
title_display = T("Response Details"),
title_list = LIST_RESPONSES,
title_update = T("Edit Response"),
title_search = T("Search Responses"),
subtitle_create = T("Add New Response"),
subtitle_list = T("Responses"),
label_list_button = LIST_RESPONSES,
label_create_button = ADD_RESPONSE,
msg_record_created = T("Response added"),
msg_record_modified = T("Response updated"),
msg_record_deleted = T("Response deleted"),
msg_list_empty = T("No Responses currently registered"))
s3xrc.model.add_component(module, resourcename,
multiple = True,
joinby = dict(irs_incident="incident_id"),
deletable = True,
editable = True)
# -----------------------------------------------------------------------------
@auth.shn_requires_membership(1) # must be Administrator
def shn_irs_ushahidi_import(r, **attr):
if r.representation == "html" and \
r.name == "ireport" and not r.component and not r.id:
url = r.request.get_vars.get("url", "http://")
title = T("Incident Reports")
subtitle = T("Import from Ushahidi Instance")
form = FORM(TABLE(TR(
TH("URL: "),
INPUT(_type="text", _name="url", _size="100", _value=url,
requires=[IS_URL(), IS_NOT_EMPTY()]),
TH(DIV(SPAN("*", _class="req", _style="padding-right: 5px;")))),
TR(TD("Ignore Errors?: "),
TD(INPUT(_type="checkbox", _name="ignore_errors", _id="ignore_errors"))),
TR("", INPUT(_type="submit", _value=T("Import")))))
label_list_btn = shn_get_crud_string(r.tablename, "title_list")
list_btn = A(label_list_btn,
_href=r.other(method="", vars=None),
_class="action-btn")
rheader = DIV(P(T("API is documented here") + ": http://wiki.ushahidi.com/doku.php?id=ushahidi_api"), P(T("Example") + " URL: http://ushahidi.my.domain/api?task=incidents&by=all&resp=xml&limit=1000"))
output = dict(title=title, form=form, subtitle=subtitle, list_btn=list_btn, rheader=rheader)
if form.accepts(request.vars, session):
import_count = [0]
def sync(vector, import_count = import_count):
if vector.tablename == "irs_ireport":
import_count[0] += 1
s3xrc.sync_resolve = sync
ireports = r.resource
ushahidi = form.vars.url
ignore_errors = form.vars.get("ignore_errors", None)
template = os.path.join(request.folder, "static", "xslt", "import", "ushahidi.xsl")
if os.path.exists(template) and ushahidi:
try:
success = ireports.import_xml(ushahidi, template=template, ignore_errors=ignore_errors)
except:
import sys
e = sys.exc_info()[1]
response.error = e
else:
if success:
count = import_count[0]
if count:
response.flash = "%s %s" % (import_count[0], T("reports successfully imported."))
else:
response.flash = T("No reports available.")
else:
response.error = s3xrc.error
response.view = "create.html"
return output
else:
raise HTTP(501, BADMETHOD)
s3xrc.model.set_method(module, "ireport",
method="ushahidi",
action=shn_irs_ushahidi_import)
# -----------------------------------------------------------------------------
|
ksetyadi/Sahana-Eden
|
models/06_irs.py
|
Python
|
mit
| 28,097
|
[
"VisIt"
] |
29a3a9e3481481db5876718f3cfe5f324a89142ec2260b0479288c94b7dba669
|
#!/usr/bin/python
import os, sys
import argparse
# Locate the moose/python directory
sys.path.append(os.path.join(os.path.abspath('..'), 'python'))
# Load the required moose/python packages
from FactorySystem import ParseGetPot
from PresentationBuilder import base
if __name__ == '__main__':
# Create the argument parser
parser = argparse.ArgumentParser(description='A wiki presentation builder')
parser.add_argument('input', type=str, help='Input file name')
parser.add_argument('--format', '-f', type=str, default='remark', help='Select the presentation output format (remark | reveal)')
args = parser.parse_args()
# Build the presentation
builder = base.PresentationBuilder(args.input, format=args.format)
builder.write()
|
danielru/moose
|
scripts/presentation_builder.py
|
Python
|
lgpl-2.1
| 746
|
[
"MOOSE"
] |
d49b7888c1afded8a733b68499e89f38676ad722c6c74cf5fe854b0874774775
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import subprocess
import sys
# Version check prior to non-stdlib imports
if sys.version_info < (3, 5):
sys.stderr.write(
"ERROR: mapDamage requires Python v3.5 or above, but "
"Python v%i.%i was used\n" % sys.version_info[:2]
)
sys.exit(1)
from setuptools import setup, Extension
from setuptools.command.build_py import build_py as SetuptoolsBuildPy
class GitVersionBuild(SetuptoolsBuildPy):
def run(self):
if os.path.exists(".git"):
try:
version = subprocess.check_output(
("git", "describe", "--always", "--tags", "--dirty")
)
except (subprocess.CalledProcessError, OSError) as error:
raise SystemExit("Could not determine mapDamage version: %s" % (error,))
with open(os.path.join("mapdamage", "_version.py"), "w") as handle:
handle.write("#!/usr/bin/env python\n")
handle.write("__version__ = %r\n" % (version.decode("utf-8").strip(),))
super().run()
setup(
cmdclass={"build_py": GitVersionBuild},
name="mapdamage",
version="2.2.1",
author="Aurélien Ginolhac, Mikkel Schubert, Hákon Jónsson",
packages=["mapdamage"],
package_data={"mapdamage": ["r/*.r", "r/stats/*.r", "tests/*"]},
entry_points={"console_scripts": ["mapDamage=mapdamage.main:entry_point"]},
url="https://github.com/ginolhac/mapDamage",
license="LICENSE.txt",
description="mapDamage tracks and quantifies DNA damage patterns in ancient DNA sequencing reads generated by Next-Generation Sequencing platforms",
long_description=open("README.md").read(),
long_description_content_type='text/markdown',
python_requires=">=3.5",
install_requires=["coloredlogs", "pysam"],
ext_modules=[
Extension(
"mapdamage.seqtk", sources=["mapdamage/seqtk/seqtk.c"], libraries=["z"],
)
],
)
|
ginolhac/mapDamage
|
setup.py
|
Python
|
mit
| 1,986
|
[
"pysam"
] |
de2ffa0d386b85193fddd8ca7806b8f4d765302cf0d36aeea998889259242b68
|
# Copyright (C) 2012,2013
# Max Planck Institute for Polymer Research
# Copyright (C) 2008,2009,2010,2011
# Max-Planck-Institute for Polymer Research & Fraunhofer SCAI
#
# This file is part of ESPResSo++.
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from espresso.esutil import pmiimport
pmiimport('espresso.storage')
from espresso.storage.Storage import *
from espresso.storage.DomainDecomposition import *
from espresso.storage.DomainDecompositionNonBlocking import *
from espresso.storage.DomainDecompositionAdress import *
|
BackupTheBerlios/espressopp
|
src/storage/__init__.py
|
Python
|
gpl-3.0
| 1,149
|
[
"ESPResSo"
] |
9a4f53036baf0191927e9d22cee4aa3327ab91a7d09eb4ff2713ecf95d9418bb
|
# -*- coding: utf-8 -*-
import logging
import sys
import numpy as np
from itertools import tee, izip
import llvm.core as lc
import llvm.passes as lp
import llvm.ee as le
from core_translator import CoreTranslator
from type_inference import TypeInfer, UnderDeteremined
from type_system import TVar, TFun, int32, int64, double64, float32
from pretty_printer import dump
from constrain_solver import ConstrainSolver
from llvm_codegen import determined, LLVMEmitter
from type_mapping import mangler, wrap_module
logging.basicConfig(level=logging.WARN)
import ast
import inspect
module = lc.Module.new('fastpy.module')
engine = None
function_cache = {}
tm = le.TargetMachine.new(features='', cm=le.CM_JITDEFAULT)
eb = le.EngineBuilder.new(module)
engine = eb.create(tm)
def fast(fn):
"""
Decorator which maps the function through translator, does type inference,
and the creates a closure which when called will automatically specialize
the function to the given argument types and compile a new version if needed.
We will cache based on the arguments ( which entirely define the function )
and whenever a similar typed argument set is passed we just lookup
the preJIT'd function and invoke it without recompiling.
Args:
fn (function): Function which we want to decorate
"""
# debug(dump(ast.parse(inspect.getsource(fn))))
core_ast = CoreTranslator().translate(fn)
debug(dump(core_ast))
ty, mgu = typeinfer(core_ast)
debug(dump(core_ast))
return specialize(core_ast, ty, mgu)
def typeinfer(core_ast):
"""Infer types
Args:
core_ast (ast): Untyped abstract syntax tree transfromed by the core translator
Returns:
TYPE: Description
"""
infer = TypeInfer()
ty = infer.visit(core_ast)
mgu = ConstrainSolver().solve(infer.constraints)
infer_ty = ConstrainSolver().apply(mgu, ty)
debug('infered types:'+ str(infer_ty))
debug(mgu)
debug(infer.constraints)
return (infer_ty, mgu)
def arg_pytype(arg):
if isinstance(arg, np.ndarray):
if arg.dtype == np.dtype('int32'):
return array(int32)
elif arg.dtype == np.dtype('int64'):
return array(int64)
elif arg.dtype == np.dtype('double'):
return array(double64)
elif arg.dtype == np.dtype('float'):
return array(float32)
elif isinstance(arg, int) & (arg < sys.maxint):
return int64
elif isinstance(arg, float):
return double64
else:
raise Exception("Type not supported: %s" % type(arg))
def specialize(ast, infer_ty, mgu):
def _wrapper(*args):
types = map(arg_pytype, list(args))
spec_ty = TFun(argtys=types, retty=TVar("$retty"))
unifier = ConstrainSolver().unify(infer_ty, spec_ty)
specializer = ConstrainSolver().compose(unifier, mgu)
retty = ConstrainSolver().apply(specializer, TVar("$retty"))
argtys = [ConstrainSolver().apply(specializer, ty) for ty in types]
debug('Specialized Function:' + str(TFun(argtys, retty)))
if determined(retty) and all(map(determined, argtys)):
key = mangler(ast.fname, argtys)
# Don't recompile after we've specialized.
if key in function_cache:
return function_cache[key](*args)
else:
llfunc = codegen(ast, specializer, retty, argtys)
pyfunc = wrap_module(argtys, llfunc, engine)
function_cache[key] = pyfunc
return pyfunc(*args)
else:
raise UnderDeteremined()
return _wrapper
def codegen(ast, specializer, retty, argtys):
cgen = LLVMEmitter(module, specializer, retty, argtys)
mod = cgen.visit(ast)
cgen.function.verify()
tm = le.TargetMachine.new(opt=3, cm=le.CM_JITDEFAULT, features='')
pms = lp.build_pass_managers(tm=tm,
fpm=False,
mod=module,
opt=3,
vectorize=False,
loop_vectorize=True)
pms.pm.run(module)
debug(cgen.function)
debug(module.to_native_assembly())
return cgen.function
def debug(fmt, *args):
logging.debug('=' * 80)
logging.debug(fmt, *args)
|
tartavull/fastpy
|
fastpy/fastpy.py
|
Python
|
mit
| 4,343
|
[
"VisIt"
] |
a618fe99c2891e2c7eb2075cd4223ddf186fdcb65640c465b6ac436ad825994f
|
# coding=utf-8
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests ImportVisitor and related classes."""
from __future__ import unicode_literals
import copy
import os
import shutil
import tempfile
import textwrap
import unittest
from grumpy_tools.compiler import imputil
from grumpy_tools.compiler import util
from grumpy_tools.vendor import pythonparser
class ImportVisitorTest(unittest.TestCase):
_PATH_SPEC = {
'foo.py': None,
'qux.py': None,
'bar/': {
'fred/': {
'__init__.py': None,
'quux.py': None,
},
'__init__.py': None,
'baz.py': None,
'foo.py': None,
},
'baz.py': None,
}
def setUp(self):
self.rootdir = tempfile.mkdtemp()
self.pydir = os.path.join(self.rootdir, 'src', '__python__')
self._materialize_tree(
self.rootdir, {'src/': {'__python__/': self._PATH_SPEC}})
foo_script = os.path.join(self.rootdir, 'foo.py')
self.importer = imputil.Importer(self.rootdir, 'foo', foo_script, False)
bar_script = os.path.join(self.pydir, 'bar', '__init__.py')
self.bar_importer = imputil.Importer(
self.rootdir, 'bar', bar_script, False)
fred_script = os.path.join(self.pydir, 'bar', 'fred', '__init__.py')
self.fred_importer = imputil.Importer(
self.rootdir, 'bar.fred', fred_script, False)
self.foo_import = imputil.Import(
'foo', os.path.join(self.pydir, 'foo.py'))
self.qux_import = imputil.Import(
'qux', os.path.join(self.pydir, 'qux.py'))
self.bar_import = imputil.Import(
'bar', os.path.join(self.pydir, 'bar/__init__.py'))
self.fred_import = imputil.Import(
'bar.fred', os.path.join(self.pydir, 'bar/fred/__init__.py'))
self.quux_import = imputil.Import(
'bar.fred.quux', os.path.join(self.pydir, 'bar/fred/quux.py'))
self.baz2_import = imputil.Import(
'bar.baz', os.path.join(self.pydir, 'bar/baz.py'))
self.foo2_import = imputil.Import(
'bar.foo', os.path.join(self.pydir, 'bar/foo.py'))
self.baz_import = imputil.Import(
'baz', os.path.join(self.pydir, 'baz.py'))
def tearDown(self):
shutil.rmtree(self.rootdir)
def testImportEmptyPath(self):
importer = imputil.Importer(None, 'foo', 'foo.py', False)
self.assertRaises(util.ImportError, importer.visit,
pythonparser.parse('import bar').body[0])
def testImportTopLevelModule(self):
imp = copy.deepcopy(self.qux_import)
imp.add_binding(imputil.Import.MODULE, 'qux', 0)
self._check_imports('import qux', [imp])
def testImportTopLevelPackage(self):
imp = copy.deepcopy(self.bar_import)
imp.add_binding(imputil.Import.MODULE, 'bar', 0)
self._check_imports('import bar', [imp])
def testImportPackageModuleAbsolute(self):
imp = copy.deepcopy(self.baz2_import)
imp.add_binding(imputil.Import.MODULE, 'bar', 0)
self._check_imports('import bar.baz', [imp])
def testImportFromSubModule(self):
imp = copy.deepcopy(self.baz2_import)
imp.add_binding(imputil.Import.MODULE, 'baz', 1)
self._check_imports('from bar import baz', [imp])
def testImportPackageModuleRelative(self):
imp = copy.deepcopy(self.baz2_import)
imp.add_binding(imputil.Import.MODULE, 'baz', 1)
got = self.bar_importer.visit(pythonparser.parse('import baz').body[0])
self._assert_imports_equal([imp], got)
def testImportPackageModuleRelativeFromSubModule(self):
imp = copy.deepcopy(self.baz2_import)
imp.add_binding(imputil.Import.MODULE, 'baz', 1)
foo_script = os.path.join(self.pydir, 'bar', 'foo.py')
importer = imputil.Importer(self.rootdir, 'bar.foo', foo_script, False)
got = importer.visit(pythonparser.parse('import baz').body[0])
self._assert_imports_equal([imp], got)
def testImportPackageModuleAbsoluteImport(self):
imp = copy.deepcopy(self.baz_import)
imp.add_binding(imputil.Import.MODULE, 'baz', 0)
bar_script = os.path.join(self.pydir, 'bar', '__init__.py')
importer = imputil.Importer(self.rootdir, 'bar', bar_script, True)
got = importer.visit(pythonparser.parse('import baz').body[0])
self._assert_imports_equal([imp], got)
def testImportMultiple(self):
imp1 = copy.deepcopy(self.foo_import)
imp1.add_binding(imputil.Import.MODULE, 'foo', 0)
imp2 = copy.deepcopy(self.baz2_import)
imp2.add_binding(imputil.Import.MODULE, 'bar', 0)
self._check_imports('import foo, bar.baz', [imp1, imp2])
def testImportAs(self):
imp = copy.deepcopy(self.foo_import)
imp.add_binding(imputil.Import.MODULE, 'bar', 0)
self._check_imports('import foo as bar', [imp])
def testImportFrom(self):
imp = copy.deepcopy(self.baz2_import)
imp.add_binding(imputil.Import.MODULE, 'baz', 1)
self._check_imports('from bar import baz', [imp])
def testImportFromMember(self):
imp = copy.deepcopy(self.foo_import)
imp.add_binding(imputil.Import.MEMBER, 'bar', 'bar')
self._check_imports('from foo import bar', [imp])
def testImportFromMultiple(self):
imp1 = copy.deepcopy(self.baz2_import)
imp1.add_binding(imputil.Import.MODULE, 'baz', 1)
imp2 = copy.deepcopy(self.foo2_import)
imp2.add_binding(imputil.Import.MODULE, 'foo', 1)
self._check_imports('from bar import baz, foo', [imp1, imp2])
def testImportFromMixedMembers(self):
imp1 = copy.deepcopy(self.bar_import)
imp1.add_binding(imputil.Import.MEMBER, 'qux', 'qux')
imp2 = copy.deepcopy(self.baz2_import)
imp2.add_binding(imputil.Import.MODULE, 'baz', 1)
self._check_imports('from bar import qux, baz', [imp1, imp2])
def testImportFromAs(self):
imp = copy.deepcopy(self.baz2_import)
imp.add_binding(imputil.Import.MODULE, 'qux', 1)
self._check_imports('from bar import baz as qux', [imp])
def testImportFromAsMembers(self):
imp = copy.deepcopy(self.foo_import)
imp.add_binding(imputil.Import.MEMBER, 'baz', 'bar')
self._check_imports('from foo import bar as baz', [imp])
def testImportFromWildcardRaises(self):
self.assertRaises(util.ImportError, self.importer.visit,
pythonparser.parse('from foo import *').body[0])
def testImportFromFuture(self):
self._check_imports('from __future__ import print_function', [])
def testImportFromNative(self):
imp = imputil.Import('__go__/fmt', is_native=True)
imp.add_binding(imputil.Import.MEMBER, 'Printf', 'Printf')
self._check_imports('from "__go__/fmt" import Printf', [imp])
def testImportFromNativeMultiple(self):
imp = imputil.Import('__go__/fmt', is_native=True)
imp.add_binding(imputil.Import.MEMBER, 'Printf', 'Printf')
imp.add_binding(imputil.Import.MEMBER, 'Println', 'Println')
self._check_imports('from "__go__/fmt" import Printf, Println', [imp])
def testImportFromNativeAs(self):
imp = imputil.Import('__go__/fmt', is_native=True)
imp.add_binding(imputil.Import.MEMBER, 'foo', 'Printf')
self._check_imports('from "__go__/fmt" import Printf as foo', [imp])
def testRelativeImportNonPackage(self):
self.assertRaises(util.ImportError, self.importer.visit,
pythonparser.parse('from . import bar').body[0])
def testRelativeImportBeyondTopLevel(self):
self.assertRaises(util.ImportError, self.bar_importer.visit,
pythonparser.parse('from .. import qux').body[0])
def testRelativeModuleNoExist(self):
imp = copy.deepcopy(self.bar_import)
imp.add_binding(imputil.Import.MEMBER, 'qux', 'qux')
node = pythonparser.parse('from . import qux').body[0]
self._assert_imports_equal([imp], self.bar_importer.visit(node))
def testRelativeModule(self):
imp = copy.deepcopy(self.foo2_import)
imp.add_binding(imputil.Import.MODULE, 'foo', 1)
node = pythonparser.parse('from . import foo').body[0]
self._assert_imports_equal([imp], self.bar_importer.visit(node))
def testRelativeModuleFromSubModule(self):
imp = copy.deepcopy(self.foo2_import)
imp.add_binding(imputil.Import.MODULE, 'foo', 1)
baz_script = os.path.join(self.pydir, 'bar', 'baz.py')
importer = imputil.Importer(self.rootdir, 'bar.baz', baz_script, False)
node = pythonparser.parse('from . import foo').body[0]
self._assert_imports_equal([imp], importer.visit(node))
def testRelativeModuleMember(self):
imp = copy.deepcopy(self.foo2_import)
imp.add_binding(imputil.Import.MEMBER, 'qux', 'qux')
node = pythonparser.parse('from .foo import qux').body[0]
self._assert_imports_equal([imp], self.bar_importer.visit(node))
def testRelativeModuleMemberMixed(self):
imp1 = copy.deepcopy(self.fred_import)
imp1.add_binding(imputil.Import.MEMBER, 'qux', 'qux')
imp2 = copy.deepcopy(self.quux_import)
imp2.add_binding(imputil.Import.MODULE, 'quux', 2)
node = pythonparser.parse('from .fred import qux, quux').body[0]
self._assert_imports_equal([imp1, imp2], self.bar_importer.visit(node))
def testRelativeUpLevel(self):
imp = copy.deepcopy(self.foo2_import)
imp.add_binding(imputil.Import.MODULE, 'foo', 1)
node = pythonparser.parse('from .. import foo').body[0]
self._assert_imports_equal([imp], self.fred_importer.visit(node))
def testRelativeUpLevelMember(self):
imp = copy.deepcopy(self.foo2_import)
imp.add_binding(imputil.Import.MEMBER, 'qux', 'qux')
node = pythonparser.parse('from ..foo import qux').body[0]
self._assert_imports_equal([imp], self.fred_importer.visit(node))
def _check_imports(self, stmt, want):
got = self.importer.visit(pythonparser.parse(stmt).body[0])
self._assert_imports_equal(want, got)
def _assert_imports_equal(self, want, got):
self.assertEqual([imp.__dict__ for imp in want],
[imp.__dict__ for imp in got])
def _materialize_tree(self, dirname, spec):
for name, sub_spec in spec.iteritems():
if name.endswith('/'):
subdir = os.path.join(dirname, name[:-1])
os.mkdir(subdir)
self._materialize_tree(subdir, sub_spec)
else:
with open(os.path.join(dirname, name), 'w'):
pass
class MakeFutureFeaturesTest(unittest.TestCase):
def testImportFromFuture(self):
testcases = [
('from __future__ import print_function',
imputil.FutureFeatures(print_function=True)),
('from __future__ import generators', imputil.FutureFeatures()),
('from __future__ import generators, print_function',
imputil.FutureFeatures(print_function=True)),
]
for tc in testcases:
source, want = tc
mod = pythonparser.parse(textwrap.dedent(source))
node = mod.body[0]
got = imputil._make_future_features(node) # pylint: disable=protected-access
self.assertEqual(want.__dict__, got.__dict__)
def testImportFromFutureParseError(self):
testcases = [
# NOTE: move this group to testImportFromFuture as they are implemented
# by grumpy
('from __future__ import division',
r'future feature \w+ not yet implemented'),
('from __future__ import braces', 'not a chance'),
('from __future__ import nonexistant_feature',
r'future feature \w+ is not defined'),
]
for tc in testcases:
source, want_regexp = tc
mod = pythonparser.parse(source)
node = mod.body[0]
self.assertRaisesRegexp(util.ParseError, want_regexp,
imputil._make_future_features, node) # pylint: disable=protected-access
class ParseFutureFeaturesTest(unittest.TestCase):
def testFutureFeatures(self):
testcases = [
('from __future__ import print_function',
imputil.FutureFeatures(print_function=True)),
("""\
"module docstring"
from __future__ import print_function
""", imputil.FutureFeatures(print_function=True)),
("""\
"module docstring"
from __future__ import print_function, with_statement
from __future__ import nested_scopes
""", imputil.FutureFeatures(print_function=True)),
('from __future__ import absolute_import',
imputil.FutureFeatures(absolute_import=True)),
('from __future__ import absolute_import, print_function',
imputil.FutureFeatures(absolute_import=True, print_function=True)),
('foo = 123\nfrom __future__ import print_function',
imputil.FutureFeatures()),
('import os\nfrom __future__ import print_function',
imputil.FutureFeatures()),
]
for tc in testcases:
source, want = tc
mod = pythonparser.parse(textwrap.dedent(source))
_, got = imputil.parse_future_features(mod)
self.assertEqual(want.__dict__, got.__dict__)
def testUnimplementedFutureRaises(self):
mod = pythonparser.parse('from __future__ import division')
msg = 'future feature division not yet implemented by grumpy'
self.assertRaisesRegexp(util.ParseError, msg,
imputil.parse_future_features, mod)
def testUndefinedFutureRaises(self):
mod = pythonparser.parse('from __future__ import foo')
self.assertRaisesRegexp(
util.ParseError, 'future feature foo is not defined',
imputil.parse_future_features, mod)
if __name__ == '__main__':
unittest.main()
|
corona10/grumpy
|
grumpy-tools-src/grumpy_tools/compiler/imputil_test.py
|
Python
|
apache-2.0
| 13,859
|
[
"VisIt"
] |
9ab8b390aa5b91a9e86670527d70ab4f860dd97c7f38e559821726f164027bc8
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2010 British Broadcasting Corporation and Kamaelia Contributors(1)
#
# (1) Kamaelia Contributors are listed in the AUTHORS file and at
# http://www.kamaelia.org/AUTHORS - please extend this file,
# not this notice.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -------------------------------------------------------------------------
"""\
=================================================
Parsing Network Information Tables in DVB streams
=================================================
ParseNetworkInformationTable parses a reconstructed PSI table from a DVB MPEG
Transport Stream, and outputs a dictionary containing the data in the table.
The purpose of the NIT and details of the fields within in are defined in the
DVB SI specification:
- ETSI EN 300 468
"Digital Video Broadcasting (DVB); Specification for Service Information (SI)
in DVB systems"
ETSI / EBU (DVB group)
See Kamaelia.Support.DVB.Descriptors for information on how they are parsed.
Example Usage
-------------
A simple pipeline to receive, parse and display the Network Information Table in
a multiplex::
FREQUENCY = 505.833330
feparams = {
"inversion" : dvb3.frontend.INVERSION_AUTO,
"constellation" : dvb3.frontend.QAM_16,
"code_rate_HP" : dvb3.frontend.FEC_3_4,
"code_rate_LP" : dvb3.frontend.FEC_3_4,
}
NIT_PID = 0x10
Pipeline( DVB_Multiplex(FREQUENCY, [NIT_PID], feparams),
DVB_Demuxer({ NIT_PID:["outbox"]}),
ReassemblePSITables(),
ParseNetworkInformationTable_ActualNetwork(),
PrettifyNetworkInformationTable(),
ConsoleEchoer(),
).run()
Behaviour
---------
At initialisation, specify whether you want ParseNetworkInformationTable to
parse 'actual' or 'other' tables (or both). 'Actual' tables describe transport
streams/multiplexes within the same actual networks that this table has been
broadcast in. 'Other' tables describe transport streams/multiplexes being
broadcast for other networks. For example: 'freeview' is a network in the UK
that broadcasts several multiplexes.
For example::
ParseNetworkInformationTable(acceptTables = {0x40:"ACTUAL",0x41:"OTHER"})
There are shorthands available for the various combinations::
ParseNetworkInformationTable_ActualNetwork()
ParseNetworkInformationTable_OtherNetwork()
ParseNetworkInformationTable_ActualAndOtherNetwork():
Send reconstructed PSI table 'sections' to the "inbox" inbox. When all sections
of the table have arrived, ParseNetworkInformationTable will parse the table
and send it out of its "outbox" outbox.
If the table is unchanged since last time it was parsed, then it will not be
sent out. Parsed tables are only sent out when they are new or have just
changed.
The parsed table is sent out as a dictionary data structure, like this (list of
transport streams abridged for marginally better brevity)::
{
'table_id' : 64,
'table_type' : 'NIT',
'current' : 1,
'actual_other': 'ACTUAL',
'network_id' : 12293,
'descriptors': [ (64, {'type': 'network_name', 'network_name': 'Crystal Palace'} ) ]
'transport_streams': [
{ 'transport_stream_id': 4100,
'descriptors': [
( 65, { 'type': 'service_list'
'services': [
{'service_type': ('digital television service',), 'service_id': 4164},
{'service_type': ('digital television service',), 'service_id': 4228},
{'service_type': ('digital television service',), 'service_id': 4351},
{'service_type': ('digital television service',), 'service_id': 4415},
{'service_type': ('digital television service',), 'service_id': 4479},
{'service_type': ('digital television service',), 'service_id': 4671}
],
} ),
( 90, { 'other_frequencies': 1,
'params': { 'inversion': 2,
'transmission_mode': 0,
'hierarchy_information': 0,
'code_rate_LP': 3,
'guard_interval': 0,
'bandwidth': 0,
'frequency': 505833330,
'constellation': 1,
'code_rate_HP': 3
},
'type': 'terrestrial_delivery_system'
} ),
( 98, { 'type': 'frequency_list',
'frequencies': [697833330, 690166670, 554000000]
} ),
( 95, { 'type': 'private_data_specifier',
'private_data_specifier': 9018
} ),
(131, { 'type': 'logical_channel',
'mappings': { 4228: 2, 4351: 7, 4479: 105, 4164: 1, 4415: 80, 4671: 70 }
} )
],
'original_network_id': 9018
},
.....
{ 'transport_stream_id': 24576,
'descriptors': [
( 65, { 'services': [
{'service_type': ('digital television service',), 'service_id': 25664},
{'service_type': ('digital television service',), 'service_id': 25728},
{'service_type': ('digital television service',), 'service_id': 25792},
{'service_type': ('digital television service',), 'service_id': 25856},
{'service_type': ('digital television service',), 'service_id': 25920},
{'service_type': ('digital radio sound service',), 'service_id': 26176},
{'service_type': ('digital radio sound service',), 'service_id': 26240},
{'service_type': ('digital radio sound service',), 'service_id': 26304},
{'service_type': ('digital radio sound service',), 'service_id': 26368},
{'service_type': ('digital radio sound service',), 'service_id': 26432},
{'service_type': ('digital radio sound service',), 'service_id': 26496},
{'service_type': ('digital radio sound service',), 'service_id': 26560},
{'service_type': ('digital radio sound service',), 'service_id': 26624},
{'service_type': ('digital radio sound service',), 'service_id': 26688},
{'service_type': ('data broadcast service',), 'service_id': 27008},
{'service_type': ('digital television service',), 'service_id': 27072},
{'service_type': ('digital television service',), 'service_id': 27136}
],
'type': 'service_list'
} ),
( 90, { 'other_frequencies': 1,
'params': { 'inversion': 2,
'transmission_mode': 0,
'hierarchy_information': 0,
'code_rate_LP': 3,
'guard_interval': 0,
'bandwidth': 0,
'frequency': 537833330,
'constellation': 1,
'code_rate_HP': 3
},
'type': 'terrestrial_delivery_system'
} ),
( 98, { 'frequencies': [738000000, 826000000, 834000000],
'type': 'frequency_list'
} ),
( 95, { 'type': 'private_data_specifier',
'private_data_specifier': 9018
} ),
(131, { 'type': 'logical_channel',
'mappings': { 25664: 18, 25728: 21, 26496: 710, 26432: 717,
26560: 711, 26624: 715, 26688: 716, 25792: 19,
25856: 20, 25920: 22, 27008: 300, 27072: 31,
27136: 29, 26176: 713, 26240: 712, 26304: 722,
26368: 718
}
} )
],
'original_network_id': 9018
}
],
}
This is an instantaneous snapshot of the NIT for Crystal Palace MUX 1
transmission (505.8MHz) in the UK on 21th Dec 2006. It describes the each of
the transport streams being broadcast, including how to tune to them (frequency
and other parameters) and an overview of the services in each. It also describes
the mapping of channel numbers on the user's remote control, to service ids.
If this data is sent on through a PrettifyNetworkInformationTable component,
then the equivalent output is a string containing this (again, abridged for
brevity)::
NIT received:
Table ID : 64
Table is valid for : CURRENT (valid)
Actual or Other n/w: ACTUAL
Network ID : 12293
Network descriptors:
Network Descriptors:
Descriptor 0x40 : network_name
network_name : 'Crystal Palace'
Transport Stream:
transport stream id : 4100
original network id : 9018
Transport Stream Descriptors:
Descriptor 0x41 : service_list
services : [ {'service_type': ('digital television service',), 'service_id': 4164},
{'service_type': ('digital television service',), 'service_id': 4228},
{'service_type': ('digital television service',), 'service_id': 4351},
{'service_type': ('digital television service',), 'service_id': 4415},
{'service_type': ('digital television service',), 'service_id': 4479},
{'service_type': ('digital television service',), 'service_id': 4671}]
Descriptor 0x5a : terrestrial_delivery_system
other_frequencies : 1
params : { 'bandwidth': 0,
'code_rate_HP': 3,
'code_rate_LP': 3,
'constellation': 1,
'frequency': 505833330,
'guard_interval': 0,
'hierarchy_information': 0,
'inversion': 2,
'transmission_mode': 0}
Descriptor 0x62 : frequency_list
frequencies : [697833330, 690166670, 554000000]
Descriptor 0x5f : private_data_specifier
private_data_specifier : 9018
Descriptor 0x83 : logical_channel
mappings : {4228: 2, 4351: 7, 4479: 105, 4164: 1, 4415: 80, 4671: 70}
.....
Transport Stream:
transport stream id : 24576
original network id : 9018
Transport Stream Descriptors:
Descriptor 0x41 : service_list
services : [ {'service_type': ('digital television service',), 'service_id': 25664},
{'service_type': ('digital television service',), 'service_id': 25728},
{'service_type': ('digital television service',), 'service_id': 25792},
{'service_type': ('digital television service',), 'service_id': 25856},
{'service_type': ('digital television service',), 'service_id': 25920},
{'service_type': ('digital radio sound service',), 'service_id': 26176},
{'service_type': ('digital radio sound service',), 'service_id': 26240},
{'service_type': ('digital radio sound service',), 'service_id': 26304},
{'service_type': ('digital radio sound service',), 'service_id': 26368},
{'service_type': ('digital radio sound service',), 'service_id': 26432},
{'service_type': ('digital radio sound service',), 'service_id': 26496},
{'service_type': ('digital radio sound service',), 'service_id': 26560},
{'service_type': ('digital radio sound service',), 'service_id': 26624},
{'service_type': ('digital radio sound service',), 'service_id': 26688},
{'service_type': ('data broadcast service',), 'service_id': 27008},
{'service_type': ('digital television service',), 'service_id': 27072},
{'service_type': ('digital television service',), 'service_id': 27136}]
Descriptor 0x5a : terrestrial_delivery_system
other_frequencies : 1
params : { 'bandwidth': 0,
'code_rate_HP': 3,
'code_rate_LP': 3,
'constellation': 1,
'frequency': 537833330,
'guard_interval': 0,
'hierarchy_information': 0,
'inversion': 2,
'transmission_mode': 0}
Descriptor 0x62 : frequency_list
frequencies : [738000000, 826000000, 834000000]
Descriptor 0x5f : private_data_specifier
private_data_specifier : 9018
Descriptor 0x83 : logical_channel
mappings : { 25664: 18,
25728: 21,
25792: 19,
25856: 20,
25920: 22,
26176: 713,
26240: 712,
26304: 722,
26368: 718,
26432: 717,
26496: 710,
26560: 711,
26624: 715,
26688: 716,
27008: 300,
27072: 31,
27136: 29}
ParseNetworkInformationTable can collect the sections of, and then parse, both
'current' and 'next' tables simultaneously.
If a shutdownMicroprocess or producerFinished message is received on the
"control" inbox, then it will immediately be sent on out of the "signal" outbox
and the component will then immediately terminate.
How does it work?
-----------------
ParseNetworkInformationTable logs all the table sections it receives, until it
determines it has the complete set; then it parses them.
If the version number field in any table section changes, then the log is
cleared, and the component starts collecting the sections again from scratch.
"""
from Axon.Component import component
from Axon.Ipc import producerFinished,shutdownMicroprocess
from Kamaelia.Support.DVB.Descriptors import parseDescriptor
from Kamaelia.Support.DVB.CRC import dvbcrc
NIT_PID = 0x10
def ParseNetworkInformationTable_ActualNetwork():
"""\
ParseNetworkInformationTable_ActualNetwork() -> new ParseNetworkInformationTable component.
Instantiates a ParseNetworkInformationTable component configured to parse
'ACTUAL' Network tables only (table id 0x40)
"""
return ParseNetworkInformationTable(acceptTables = {0x40:"ACTUAL"})
def ParseNetworkInformationTable_OtherNetwork():
"""\
ParseNetworkInformationTable_OtherNetwork() -> new ParseNetworkInformationTable component.
Instantiates a ParseNetworkInformationTable component configured to parse
'OTHER' Netowrk tables only (table id 0x41)
"""
return ParseNetworkInformationTable(acceptTables = {0x41:"OTHER"})
def ParseNetworkInformationTable_ActualAndOtherNetwork():
"""\
ParseNetworkInformationTable_ActualAndOtherNetwork() -> new ParseNetworkInformationTable component.
Instantiates a ParseNetworkInformationTable component configured to parse
both 'ACTUAL' and 'OTHER' Network tables (table ids 0x40 and 0x41)
"""
return ParseNetworkInformationTable(acceptTables = {0x40:"ACTUAL",0x41:"OTHER"})
class ParseNetworkInformationTable(component):
"""\
ParseNetworkInformationTable([acceptTables]) -> new ParseNetworkInformationTable component.
Send reconstructed PSI table sections to the "inbox" inbox. When a complete
table is assembled and parsed, the result is sent out of the "outbox" outbox
as a dictionary.
Doesn't emit anything again until the version number of the table changes.
Keyword arguments::
- acceptTables - dict of (table_id,string_description) mappings for tables to be accepted (default={0x40:"ACTUAL",0x41:"OTHER"})
"""
Inboxes = { "inbox" : "DVB PSI Packets from a single PID containing NIT table sections",
"control" : "Shutdown signalling",
}
Outboxes = { "outbox" : "Parsed NIT (only when it changes)",
"signal" : "Shutdown signalling",
}
def __init__(self, acceptTables = {0x40:"ACTUAL",0x41:"OTHER"}):
super(ParseNetworkInformationTable,self).__init__()
self.acceptTables = acceptTables
def parseTable(self, index, sections):
(table_id, current_next, network_id) = index
msg = { "table_type" : "NIT",
"table_id" : table_id,
"actual_other" : self.acceptTables[table_id],
"current" : current_next,
"network_id" : network_id,
}
services = {}
tss = []
for (data,section_length) in sections:
network_descriptors_length = ((ord(data[8])<<8) + ord(data[9])) & 0x0fff
i=10
network_descriptors_end = i+network_descriptors_length
msg['descriptors'] = []
while i < network_descriptors_end:
descriptor, i = parseDescriptor(i,data)
msg['descriptors'].append(descriptor)
ts_loop_length = ((ord(data[i])<<8) + ord(data[i+1])) & 0x0fff
i=i+2
ts_loop_end = i+ts_loop_length
while i < ts_loop_end:
ts = {}
ts['transport_stream_id'] = (ord(data[i])<<8) + ord(data[i+1])
ts['original_network_id'] = (ord(data[i+2])<<8) + ord(data[i+3])
transport_descriptors_length = ((ord(data[i+4])<<8) + ord(data[i+5])) & 0x0fff
i=i+6
transport_descriptors_end = i+transport_descriptors_length
ts['descriptors'] = []
while i < transport_descriptors_end:
descriptor,i = parseDescriptor(i,data)
ts['descriptors'].append(descriptor)
tss.append(ts)
msg['transport_streams'] = tss
return msg
def shutdown(self):
while self.dataReady("control"):
msg = self.recv("control")
self.send(msg,"signal")
if isinstance(msg, (shutdownMicroprocess, producerFinished)):
return True
return False
def main(self):
# initialise buffers
# ...for holding table sections (until we get complete table)
# indexed by (table_id, current_next, transport_stream_id, original_network_id)
sections = {}
latest_versions = {}
last_section_numbers = {}
missing_sections_count = {}
while not self.shutdown():
while self.dataReady("inbox"):
data = self.recv("inbox")
# extract basic info from this PSI packet - enough to work
# out what table it is; what section, and the version
e = [ord(data[i]) for i in (0,1,2) ]
table_id = e[0]
if table_id not in self.acceptTables.keys():
continue
syntax = e[1] & 0x80
if not syntax:
continue
section_length = ((e[1]<<8) + e[2]) & 0x0fff
# now were reasonably certain we've got a correct packet
# we'll convert the rest of the packet
e = [ord(data[i]) for i in (0,1,2,3,4,5,6,7) ]
network_id = (e[3]<<8) + e[4]
version = (e[5] &0x3e) # no need to >> 1
current_next = e[5] & 0x01
section_number = e[6]
last_section_number = e[7]
index = (table_id, current_next, network_id)
# if version number has changed, flush out all previously fetched tables
crcpass = False
if version != latest_versions.get(index,-1):
if not dvbcrc(data[:3+section_length]):
continue
else:
crcpass = True
latest_versions[index] = version
sections[index] = [None]*(last_section_number+1)
missing_sections_count[index] = last_section_number+1
if sections[index][section_number] == None:
if crcpass or dvbcrc(data[:3+section_length]):
sections[index][section_number] = (data, section_length)
missing_sections_count[index] -= 1
# see if we have all sections of the table
# if we do, send the whole bundle onwards
if missing_sections_count[index] == 0:
table = self.parseTable(index, sections[index])
self.send( table, "outbox")
self.pause()
yield 1
__kamaelia_components__ = ( ParseNetworkInformationTable, )
__kamaelia_prefabs__ = ( ParseNetworkInformationTable_ActualNetwork,
ParseNetworkInformationTable_OtherNetwork,
ParseNetworkInformationTable_ActualAndOtherNetwork, )
if __name__ == "__main__":
from Kamaelia.Chassis.Pipeline import Pipeline
from Kamaelia.Util.Console import ConsoleEchoer
from Kamaelia.Device.DVB.Core import DVB_Multiplex, DVB_Demuxer
from Kamaelia.Device.DVB.Parse.ReassemblePSITables import ReassemblePSITables
from Kamaelia.Device.DVB.Parse.PrettifyTables import PrettifyNetworkInformationTable
import dvb3.frontend
feparams = {
"inversion" : dvb3.frontend.INVERSION_AUTO,
"constellation" : dvb3.frontend.QAM_16,
"code_rate_HP" : dvb3.frontend.FEC_3_4,
"code_rate_LP" : dvb3.frontend.FEC_3_4,
}
Pipeline( DVB_Multiplex(505833330.0/1000000.0, [NIT_PID], feparams),
DVB_Demuxer({ NIT_PID:["outbox"]}),
ReassemblePSITables(),
ParseNetworkInformationTable_ActualNetwork(),
ConsoleEchoer(forwarder=True),
PrettifyNetworkInformationTable(),
ConsoleEchoer(),
).run()
|
sparkslabs/kamaelia
|
Sketches/MPS/BugReports/FixTests/Kamaelia/Kamaelia/Device/DVB/Parse/ParseNetworkInformationTable.py
|
Python
|
apache-2.0
| 25,045
|
[
"CRYSTAL"
] |
edbd4519a0f86693300ed533f81c5d99b2514a26b1478fa55f740a66297d88ee
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.