text stringlengths 12 1.05M | repo_name stringlengths 5 86 | path stringlengths 4 191 | language stringclasses 1 value | license stringclasses 15 values | size int32 12 1.05M | keyword listlengths 1 23 | text_hash stringlengths 64 64 |
|---|---|---|---|---|---|---|---|
from __future__ import absolute_import
import pytest
import re
import responses
import six
from symbolic import SourceMapTokenMatch
from mock import patch
from requests.exceptions import RequestException
from sentry import http
from sentry.lang.javascript.processor import (
JavaScriptStacktraceProcessor,
discover_sourcemap,
fetch_sourcemap,
fetch_file,
generate_module,
trim_line,
fetch_release_file,
UnparseableSourcemap,
get_max_age,
CACHE_CONTROL_MAX,
CACHE_CONTROL_MIN,
)
from sentry.lang.javascript.errormapping import (rewrite_exception, REACT_MAPPING_URL)
from sentry.models import File, Release, ReleaseFile, EventError
from sentry.testutils import TestCase
from sentry.utils.strings import truncatechars
base64_sourcemap = 'data:application/json;base64,eyJ2ZXJzaW9uIjozLCJmaWxlIjoiZ2VuZXJhdGVkLmpzIiwic291cmNlcyI6WyIvdGVzdC5qcyJdLCJuYW1lcyI6W10sIm1hcHBpbmdzIjoiO0FBQUEiLCJzb3VyY2VzQ29udGVudCI6WyJjb25zb2xlLmxvZyhcImhlbGxvLCBXb3JsZCFcIikiXX0='
unicode_body = b"""function add(a, b) {
"use strict";
return a + b; // f\xc3\xb4o
}""".decode('utf-8')
class JavaScriptStacktraceProcessorTest(TestCase):
def test_infers_allow_scraping(self):
project = self.create_project()
r = JavaScriptStacktraceProcessor({}, None, project)
# defaults
assert r.allow_scraping
# disabled for project
project.update_option('sentry:scrape_javascript', False)
r = JavaScriptStacktraceProcessor({}, None, project)
assert not r.allow_scraping
# disabled for org
project.delete_option('sentry:scrape_javascript')
project.organization.update_option('sentry:scrape_javascript', False)
r = JavaScriptStacktraceProcessor({}, None, project)
assert not r.allow_scraping
class FetchReleaseFileTest(TestCase):
def test_unicode(self):
project = self.project
release = Release.objects.create(
organization_id=project.organization_id,
version='abc',
)
release.add_project(project)
file = File.objects.create(
name='file.min.js',
type='release.file',
headers={'Content-Type': 'application/json; charset=utf-8'},
)
binary_body = unicode_body.encode('utf-8')
file.putfile(six.BytesIO(binary_body))
ReleaseFile.objects.create(
name='file.min.js',
release=release,
organization_id=project.organization_id,
file=file,
)
result = fetch_release_file('file.min.js', release)
assert isinstance(result.body, six.binary_type)
assert result == http.UrlResult(
'file.min.js',
{'content-type': 'application/json; charset=utf-8'},
binary_body,
200,
'utf-8',
)
# test with cache hit, which should be compressed
new_result = fetch_release_file('file.min.js', release)
assert result == new_result
def test_distribution(self):
project = self.project
release = Release.objects.create(
organization_id=project.organization_id,
version='abc',
)
release.add_project(project)
other_file = File.objects.create(
name='file.min.js',
type='release.file',
headers={'Content-Type': 'application/json; charset=utf-8'},
)
file = File.objects.create(
name='file.min.js',
type='release.file',
headers={'Content-Type': 'application/json; charset=utf-8'},
)
binary_body = unicode_body.encode('utf-8')
other_file.putfile(six.BytesIO(b''))
file.putfile(six.BytesIO(binary_body))
dist = release.add_dist('foo')
ReleaseFile.objects.create(
name='file.min.js',
release=release,
organization_id=project.organization_id,
file=other_file,
)
ReleaseFile.objects.create(
name='file.min.js',
release=release,
dist=dist,
organization_id=project.organization_id,
file=file,
)
result = fetch_release_file('file.min.js', release, dist)
assert isinstance(result.body, six.binary_type)
assert result == http.UrlResult(
'file.min.js',
{'content-type': 'application/json; charset=utf-8'},
binary_body,
200,
'utf-8',
)
# test with cache hit, which should be compressed
new_result = fetch_release_file('file.min.js', release, dist)
assert result == new_result
def test_fallbacks(self):
project = self.project
release = Release.objects.create(
organization_id=project.organization_id,
version='abc',
)
release.add_project(project)
file = File.objects.create(
name='~/file.min.js',
type='release.file',
headers={'Content-Type': 'application/json; charset=utf-8'},
)
binary_body = unicode_body.encode('utf-8')
file.putfile(six.BytesIO(binary_body))
ReleaseFile.objects.create(
name='~/file.min.js',
release=release,
organization_id=project.organization_id,
file=file,
)
result = fetch_release_file('http://example.com/file.min.js?lol', release)
assert isinstance(result.body, six.binary_type)
assert result == http.UrlResult(
'http://example.com/file.min.js?lol',
{'content-type': 'application/json; charset=utf-8'},
binary_body,
200,
'utf-8',
)
class FetchFileTest(TestCase):
@responses.activate
def test_simple(self):
responses.add(
responses.GET, 'http://example.com', body='foo bar', content_type='application/json'
)
result = fetch_file('http://example.com')
assert len(responses.calls) == 1
assert result.url == 'http://example.com'
assert result.body == 'foo bar'
assert result.headers == {'content-type': 'application/json'}
# ensure we use the cached result
result2 = fetch_file('http://example.com')
assert len(responses.calls) == 1
assert result == result2
@responses.activate
def test_with_token(self):
responses.add(
responses.GET,
re.compile(r'http://example.com/\d+/'),
body='foo bar',
content_type='application/json'
)
self.project.update_option('sentry:token', 'foobar')
self.project.update_option('sentry:origins', ['*'])
default_header_name = 'X-Sentry-Token'
header_pairs = [
(None, default_header_name),
('', default_header_name),
('X-Custom-Token-Header', 'X-Custom-Token-Header'),
]
for i, (header_name_option_value, expected_request_header_name) in enumerate(header_pairs):
self.project.update_option('sentry:token_header', header_name_option_value)
url = 'http://example.com/{}/'.format(i)
result = fetch_file(url, project=self.project)
assert result.url == url
assert result.body == 'foo bar'
assert result.headers == {'content-type': 'application/json'}
assert len(responses.calls) == i + 1
assert responses.calls[i].request.headers[expected_request_header_name] == 'foobar'
@responses.activate
def test_connection_failure(self):
responses.add(responses.GET, 'http://example.com', body=RequestException())
with pytest.raises(http.BadSource):
fetch_file('http://example.com')
assert len(responses.calls) == 1
# ensure we use the cached domain-wide failure for the second call
with pytest.raises(http.BadSource):
fetch_file('http://example.com/foo/bar')
assert len(responses.calls) == 1
@responses.activate
def test_non_url_without_release(self):
with pytest.raises(http.BadSource):
fetch_file('/example.js')
@responses.activate
@patch('sentry.lang.javascript.processor.fetch_release_file')
def test_non_url_with_release(self, mock_fetch_release_file):
mock_fetch_release_file.return_value = http.UrlResult(
'/example.js',
{'content-type': 'application/json'},
'foo',
200,
None,
)
release = Release.objects.create(version='1', organization_id=self.project.organization_id)
release.add_project(self.project)
result = fetch_file('/example.js', release=release)
assert result.url == '/example.js'
assert result.body == 'foo'
assert isinstance(result.body, six.binary_type)
assert result.headers == {'content-type': 'application/json'}
assert result.encoding is None
@responses.activate
def test_unicode_body(self):
responses.add(
responses.GET,
'http://example.com',
body=b'"f\xc3\xb4o bar"'.decode('utf-8'),
content_type='application/json; charset=utf-8'
)
result = fetch_file('http://example.com')
assert len(responses.calls) == 1
assert result.url == 'http://example.com'
assert result.body == '"f\xc3\xb4o bar"'
assert result.headers == {'content-type': 'application/json; charset=utf-8'}
assert result.encoding == 'utf-8'
# ensure we use the cached result
result2 = fetch_file('http://example.com')
assert len(responses.calls) == 1
assert result == result2
@responses.activate
def test_truncated(self):
url = truncatechars('http://example.com', 3)
with pytest.raises(http.CannotFetch) as exc:
fetch_file(url)
assert exc.value.data['type'] == EventError.JS_MISSING_SOURCE
assert exc.value.data['url'] == url
class CacheControlTest(TestCase):
def test_simple(self):
headers = {'content-type': 'application/json', 'cache-control': 'max-age=120'}
assert get_max_age(headers) == 120
def test_max_and_min(self):
headers = {'content-type': 'application/json',
'cache-control': 'max-age=%s' % CACHE_CONTROL_MAX}
assert get_max_age(headers) == CACHE_CONTROL_MAX
headers = {'content-type': 'application/json',
'cache-control': 'max-age=%s' % CACHE_CONTROL_MIN}
assert get_max_age(headers) == CACHE_CONTROL_MIN
def test_out_of_bounds(self):
greater_than_max = CACHE_CONTROL_MAX + 1
headers = {'content-type': 'application/json',
'cache-control': 'max-age=%s' % greater_than_max}
assert get_max_age(headers) == CACHE_CONTROL_MAX
less_than_min = CACHE_CONTROL_MIN - 1
headers = {'content-type': 'application/json',
'cache-control': 'max-age=%s' % less_than_min}
assert get_max_age(headers) == CACHE_CONTROL_MIN
def test_no_cache_control(self):
headers = {'content-type': 'application/json'}
assert get_max_age(headers) == CACHE_CONTROL_MIN
def test_additional_cache_control_values(self):
headers = {'content-type': 'application/json',
'cache-control': 'private, s-maxage=60, max-age=120'}
assert get_max_age(headers) == 120
def test_valid_input(self):
headers = {'content-type': 'application/json', 'cache-control': 'max-age=12df0sdgfjhdgf'}
assert get_max_age(headers) == CACHE_CONTROL_MIN
headers = {'content-type': 'application/json', 'cache-control': 'max-age=df0sdgfjhdgf'}
assert get_max_age(headers) == CACHE_CONTROL_MIN
class DiscoverSourcemapTest(TestCase):
# discover_sourcemap(result)
def test_simple(self):
result = http.UrlResult('http://example.com', {}, '', 200, None)
assert discover_sourcemap(result) is None
result = http.UrlResult(
'http://example.com', {'x-sourcemap': 'http://example.com/source.map.js'}, '', 200, None
)
assert discover_sourcemap(result) == 'http://example.com/source.map.js'
result = http.UrlResult(
'http://example.com', {'sourcemap': 'http://example.com/source.map.js'}, '', 200, None
)
assert discover_sourcemap(result) == 'http://example.com/source.map.js'
result = http.UrlResult(
'http://example.com', {},
'//@ sourceMappingURL=http://example.com/source.map.js\nconsole.log(true)', 200, None
)
assert discover_sourcemap(result) == 'http://example.com/source.map.js'
result = http.UrlResult(
'http://example.com', {},
'//# sourceMappingURL=http://example.com/source.map.js\nconsole.log(true)', 200, None
)
assert discover_sourcemap(result) == 'http://example.com/source.map.js'
result = http.UrlResult(
'http://example.com', {},
'console.log(true)\n//@ sourceMappingURL=http://example.com/source.map.js', 200, None
)
assert discover_sourcemap(result) == 'http://example.com/source.map.js'
result = http.UrlResult(
'http://example.com', {},
'console.log(true)\n//# sourceMappingURL=http://example.com/source.map.js', 200, None
)
assert discover_sourcemap(result) == 'http://example.com/source.map.js'
result = http.UrlResult(
'http://example.com', {},
'console.log(true)\n//# sourceMappingURL=http://example.com/source.map.js\n//# sourceMappingURL=http://example.com/source2.map.js',
200, None
)
assert discover_sourcemap(result) == 'http://example.com/source2.map.js'
# sourceMappingURL found directly after code w/o newline
result = http.UrlResult(
'http://example.com', {},
'console.log(true);//# sourceMappingURL=http://example.com/source.map.js', 200, None
)
assert discover_sourcemap(result) == 'http://example.com/source.map.js'
result = http.UrlResult(
'http://example.com', {}, '//# sourceMappingURL=app.map.js/*ascii:lol*/', 200, None
)
assert discover_sourcemap(result) == 'http://example.com/app.map.js'
result = http.UrlResult('http://example.com', {}, '//# sourceMappingURL=/*lol*/', 200, None)
with self.assertRaises(AssertionError):
discover_sourcemap(result)
class GenerateModuleTest(TestCase):
def test_simple(self):
assert generate_module(None) == '<unknown module>'
assert generate_module('http://example.com/foo.js') == 'foo'
assert generate_module('http://example.com/foo/bar.js') == 'foo/bar'
assert generate_module('http://example.com/js/foo/bar.js') == 'foo/bar'
assert generate_module('http://example.com/javascript/foo/bar.js') == 'foo/bar'
assert generate_module('http://example.com/1.0/foo/bar.js') == 'foo/bar'
assert generate_module('http://example.com/v1/foo/bar.js') == 'foo/bar'
assert generate_module('http://example.com/v1.0.0/foo/bar.js') == 'foo/bar'
assert generate_module('http://example.com/_baz/foo/bar.js') == 'foo/bar'
assert generate_module('http://example.com/1/2/3/foo/bar.js') == 'foo/bar'
assert generate_module('http://example.com/abcdef0/foo/bar.js') == 'foo/bar'
assert generate_module(
'http://example.com/92cd589eca8235e7b373bf5ae94ebf898e3b949c/foo/bar.js'
) == 'foo/bar'
assert generate_module(
'http://example.com/7d6d00eae0ceccdc7ee689659585d95f/foo/bar.js'
) == 'foo/bar'
assert generate_module('http://example.com/foo/bar.coffee') == 'foo/bar'
assert generate_module('http://example.com/foo/bar.js?v=1234') == 'foo/bar'
assert generate_module('/foo/bar.js') == 'foo/bar'
assert generate_module('/foo/bar.ts') == 'foo/bar'
assert generate_module('../../foo/bar.js') == 'foo/bar'
assert generate_module('../../foo/bar.ts') == 'foo/bar'
assert generate_module('../../foo/bar.awesome') == 'foo/bar'
assert generate_module('../../foo/bar') == 'foo/bar'
assert generate_module('/foo/bar-7d6d00eae0ceccdc7ee689659585d95f.js') == 'foo/bar'
assert generate_module('/bower_components/foo/bar.js') == 'foo/bar'
assert generate_module('/node_modules/foo/bar.js') == 'foo/bar'
assert generate_module(
'http://example.com/vendor.92cd589eca8235e7b373bf5ae94ebf898e3b949c.js'
) == 'vendor'
assert generate_module(
'/a/javascripts/application-bundle-149360d3414c26adac3febdf6832e25c.min.js'
) == 'a/javascripts/application-bundle'
assert generate_module('https://example.com/libs/libs-20150417171659.min.js') == 'libs/libs'
assert generate_module(
'webpack:///92cd589eca8235e7b373bf5ae94ebf898e3b949c/vendor.js'
) == 'vendor'
assert generate_module(
'webpack:///92cd589eca8235e7b373bf5ae94ebf898e3b949c/vendor.js'
) == 'vendor'
assert generate_module(
'app:///92cd589eca8235e7b373bf5ae94ebf898e3b949c/vendor.js'
) == 'vendor'
assert generate_module(
'app:///example/92cd589eca8235e7b373bf5ae94ebf898e3b949c/vendor.js'
) == 'vendor'
assert generate_module(
'~/app/components/projectHeader/projectSelector.jsx'
) == 'app/components/projectHeader/projectSelector'
class FetchSourcemapTest(TestCase):
def test_simple_base64(self):
smap_view = fetch_sourcemap(base64_sourcemap)
tokens = [SourceMapTokenMatch(0, 0, 1, 0, src='/test.js', src_id=0)]
assert list(smap_view) == tokens
sv = smap_view.get_sourceview(0)
assert sv.get_source() == u'console.log("hello, World!")'
assert smap_view.get_source_name(0) == u'/test.js'
def test_base64_without_padding(self):
smap_view = fetch_sourcemap(base64_sourcemap.rstrip('='))
tokens = [SourceMapTokenMatch(0, 0, 1, 0, src='/test.js', src_id=0)]
assert list(smap_view) == tokens
sv = smap_view.get_sourceview(0)
assert sv.get_source() == u'console.log("hello, World!")'
assert smap_view.get_source_name(0) == u'/test.js'
def test_broken_base64(self):
with pytest.raises(UnparseableSourcemap):
fetch_sourcemap('data:application/json;base64,xxx')
@responses.activate
def test_garbage_json(self):
responses.add(
responses.GET, 'http://example.com', body='xxxx', content_type='application/json'
)
with pytest.raises(UnparseableSourcemap):
fetch_sourcemap('http://example.com')
class TrimLineTest(TestCase):
long_line = 'The public is more familiar with bad design than good design. It is, in effect, conditioned to prefer bad design, because that is what it lives with. The new becomes threatening, the old reassuring.'
def test_simple(self):
assert trim_line('foo') == 'foo'
assert trim_line(
self.long_line
) == 'The public is more familiar with bad design than good design. It is, in effect, conditioned to prefer bad design, because that is what it li {snip}'
assert trim_line(
self.long_line, column=10
) == 'The public is more familiar with bad design than good design. It is, in effect, conditioned to prefer bad design, because that is what it li {snip}'
assert trim_line(
self.long_line, column=66
) == '{snip} blic is more familiar with bad design than good design. It is, in effect, conditioned to prefer bad design, because that is what it lives wi {snip}'
assert trim_line(
self.long_line, column=190
) == '{snip} gn. It is, in effect, conditioned to prefer bad design, because that is what it lives with. The new becomes threatening, the old reassuring.'
assert trim_line(
self.long_line, column=9999
) == '{snip} gn. It is, in effect, conditioned to prefer bad design, because that is what it lives with. The new becomes threatening, the old reassuring.'
def test_get_culprit_is_patched():
from sentry.lang.javascript.plugin import fix_culprit, generate_modules
data = {
'message': 'hello',
'platform': 'javascript',
'sentry.interfaces.Exception': {
'values': [
{
'type': 'Error',
'stacktrace': {
'frames': [
{
'abs_path': 'http://example.com/foo.js',
'filename': 'foo.js',
'lineno': 4,
'colno': 0,
'function': 'thing',
},
{
'abs_path': 'http://example.com/bar.js',
'filename': 'bar.js',
'lineno': 1,
'colno': 0,
'function': 'oops',
},
],
},
}
],
}
}
generate_modules(data)
fix_culprit(data)
assert data['culprit'] == 'bar in oops'
def test_ensure_module_names():
from sentry.lang.javascript.plugin import generate_modules
data = {
'message': 'hello',
'platform': 'javascript',
'sentry.interfaces.Exception': {
'values': [
{
'type': 'Error',
'stacktrace': {
'frames': [
{
'filename': 'foo.js',
'lineno': 4,
'colno': 0,
'function': 'thing',
},
{
'abs_path': 'http://example.com/foo/bar.js',
'filename': 'bar.js',
'lineno': 1,
'colno': 0,
'function': 'oops',
},
],
},
}
],
}
}
generate_modules(data)
exc = data['sentry.interfaces.Exception']['values'][0]
assert exc['stacktrace']['frames'][1]['module'] == 'foo/bar'
class ErrorMappingTest(TestCase):
@responses.activate
def test_react_error_mapping_resolving(self):
responses.add(
responses.GET,
REACT_MAPPING_URL,
body=r'''
{
"108": "%s.getChildContext(): key \"%s\" is not defined in childContextTypes.",
"109": "%s.render(): A valid React element (or null) must be returned. You may have returned undefined, an array or some other invalid object.",
"110": "Stateless function components cannot have refs."
}
''',
content_type='application/json'
)
for x in range(3):
data = {
'platform': 'javascript',
'sentry.interfaces.Exception': {
'values': [
{
'type':
'InvariantViolation',
'value': (
'Minified React error #109; visit http://facebook'
'.github.io/react/docs/error-decoder.html?invariant='
'109&args[]=Component for the full message or use '
'the non-minified dev environment for full errors '
'and additional helpful warnings.'
),
'stacktrace': {
'frames': [
{
'abs_path': 'http://example.com/foo.js',
'filename': 'foo.js',
'lineno': 4,
'colno': 0,
},
{
'abs_path': 'http://example.com/foo.js',
'filename': 'foo.js',
'lineno': 1,
'colno': 0,
},
],
},
}
],
}
}
assert rewrite_exception(data)
assert data['sentry.interfaces.Exception']['values'][0]['value'] == (
'Component.render(): A valid React element (or null) must be '
'returned. You may have returned undefined, an array or '
'some other invalid object.'
)
@responses.activate
def test_react_error_mapping_empty_args(self):
responses.add(
responses.GET,
REACT_MAPPING_URL,
body=r'''
{
"108": "%s.getChildContext(): key \"%s\" is not defined in childContextTypes."
}
''',
content_type='application/json'
)
data = {
'platform': 'javascript',
'sentry.interfaces.Exception': {
'values': [
{
'type':
'InvariantViolation',
'value': (
'Minified React error #108; visit http://facebook'
'.github.io/react/docs/error-decoder.html?invariant='
'108&args[]=Component&args[]= for the full message '
'or use the non-minified dev environment for full '
'errors and additional helpful warnings.'
),
'stacktrace': {
'frames': [
{
'abs_path': 'http://example.com/foo.js',
'filename': 'foo.js',
'lineno': 4,
'colno': 0,
},
],
},
}
],
}
}
assert rewrite_exception(data)
assert data['sentry.interfaces.Exception']['values'][0]['value'] == (
'Component.getChildContext(): key "" is not defined in '
'childContextTypes.'
)
@responses.activate
def test_react_error_mapping_truncated(self):
responses.add(
responses.GET,
REACT_MAPPING_URL,
body=r'''
{
"108": "%s.getChildContext(): key \"%s\" is not defined in childContextTypes."
}
''',
content_type='application/json'
)
data = {
'platform': 'javascript',
'sentry.interfaces.Exception': {
'values': [
{
'type':
'InvariantViolation',
'value': (
u'Minified React error #108; visit http://facebook'
u'.github.io/react/docs/error-decoder.html?\u2026'
),
'stacktrace': {
'frames': [
{
'abs_path': 'http://example.com/foo.js',
'filename': 'foo.js',
'lineno': 4,
'colno': 0,
},
],
},
}
],
}
}
assert rewrite_exception(data)
assert data['sentry.interfaces.Exception']['values'][0]['value'] == (
'<redacted>.getChildContext(): key "<redacted>" is not defined in '
'childContextTypes.'
)
| looker/sentry | tests/sentry/lang/javascript/test_processor.py | Python | bsd-3-clause | 28,824 | [
"VisIt"
] | ee1519e6f1eaa028261cf2a2ef2ddc5502b88d0e56c78dcf5e1f75129203ddc6 |
########################################################################
# File : GlobusComputingElement.py
# Author : A.S.
########################################################################
""" Globus Computing Element
Allows direct submission to Globus Computing Elements with a SiteDirector Agent
Needs the globus grid middleware. On needs open ports GLOBUS_TCP_PORT_RANGE
to be set or open ports 20000 to 25000 (needs to be confirmed)
"""
__RCSID__ = "$Id$"
from DIRAC.Resources.Computing.ComputingElement import ComputingElement
from DIRAC.Core.Utilities.Grid import executeGridCommand
from DIRAC import S_OK, S_ERROR
from DIRAC.ConfigurationSystem.Client.Helpers.Registry import getGroupOption
from DIRAC.FrameworkSystem.Client.ProxyManagerClient import gProxyManager
from DIRAC.WorkloadManagementSystem.DB.PilotAgentsDB import PilotAgentsDB
from DIRAC.WorkloadManagementSystem.Agent.SiteDirector import WAITING_PILOT_STATUS
from DIRAC.Core.Utilities.File import makeGuid
import os
CE_NAME = 'Globus'
MANDATORY_PARAMETERS = [ 'Queue' ]
class GlobusComputingElement( ComputingElement ):
"""Globus computing element class
implementing the functions jobSubmit, getJobOutput """
#############################################################################
def __init__( self, ceUniqueID ):
""" Standard constructor.
"""
ComputingElement.__init__( self, ceUniqueID )
self.ceType = CE_NAME
self.submittedJobs = 0
self.mandatoryParameters = MANDATORY_PARAMETERS
self.pilotProxy = ''
self.queue = ''
self.outputURL = 'gsiftp://localhost'
self.gridEnv = ''
self.proxyRenewal = 0
def _reset( self ):
self.queue = self.ceParameters['Queue']
self.outputURL = self.ceParameters.get( 'OutputURL', 'gsiftp://localhost' )
self.gridEnv = self.ceParameters['GridEnv']
#############################################################################
def submitJob( self, executableFile, proxy, numberOfJobs = 1 ):
""" Method to submit job
"""
self.log.verbose( "Executable file path: %s" % executableFile )
if not os.access( executableFile, 5 ):
os.chmod( executableFile, 0755 )
batchIDList = []
stampDict = {}
for _i in xrange(numberOfJobs):
diracStamp = makeGuid()[:8]
queueName = '%s/%s' % ( self.ceName, self.queue )
cmd = ['globus-job-submit', queueName, "-s", executableFile ]
#cmd = ['globus-job-submit', '-r %s' % queueName, '-f %s' % jdlName ]
result = executeGridCommand( self.proxy, cmd, self.gridEnv )
self.log.verbose(result)
if result['OK']:
if result['Value'][0]:
# We have got a non-zero status code
errorString = result['Value'][2] if result['Value'][2] else result['Value'][1]
return S_ERROR( 'Pilot submission failed with error: %s ' % errorString.strip() )
pilotJobReference = result['Value'][1].strip()
if not pilotJobReference:
return S_ERROR( 'No pilot reference returned from the glite job submission command' )
if not pilotJobReference.startswith( 'https' ):
return S_ERROR( 'Invalid pilot reference %s' % pilotJobReference )
batchIDList.append( pilotJobReference )
stampDict[pilotJobReference] = diracStamp
if batchIDList:
result = S_OK( batchIDList )
result['PilotStampDict'] = stampDict
else:
result = S_ERROR( 'No pilot references obtained from the glite job submission' )
return result
def killJob( self, jobIDList ):
""" Kill the specified jobs
"""
jobList = list( jobIDList )
if isinstance(jobIDList, basestring):
jobList = [ jobIDList ]
for jobID in jobList:
cmd = ['globus-job-clean', '-f', jobID]
result = executeGridCommand( self.proxy, cmd, self.gridEnv )
if not result['OK']:
return result
if result['Value'][0] != 0:
return S_ERROR( 'Failed kill job: %s' % result['Value'][1].strip() )
return S_OK()
#############################################################################
def getCEStatus( self ):
""" Method to return information on running and pending jobs.
"""
result = S_OK()
result['SubmittedJobs'] = 0
result['RunningJobs'] = 0
result['WaitingJobs'] = 0
##getWaitingPilots
condDict = { 'DestinationSite': self.ceName,
'Status': WAITING_PILOT_STATUS }
res = PilotAgentsDB().countPilots( condDict )
if res['OK']:
result[ 'WaitingJobs' ] = int( res['Value'] )
else:
self.log.warn( "Failure getting pilot count for %s: %s " % ( self.ceName, res['Message'] ) )
return result
def getJobStatus( self, jobIDList ):
""" Get the status information for the given list of jobs
"""
resultDict = {}
self.log.verbose("JobIDList: %s" % jobIDList)
for jobInfo in jobIDList:
jobID = jobInfo.split(":::")[0]
#jobRef = jobInfo.split(":::")[1]
cmd = ['globus-job-status', jobID ]
result = executeGridCommand( self.proxy, cmd, self.gridEnv )
self.log.info("Result from globus-job-status %s " % str(result) )
if not result['OK']:
self.log.error( 'Failed to get job status for jobID', jobID )
continue
if result['Value'][0]:
if result['Value'][2]:
return S_ERROR( result['Value'][2] )
else:
return S_ERROR( 'Error while interrogating job statuses' )
if result['Value'][1]:
resultDict[jobID] = self.__parseJobStatus( result['Value'][1] )
if not resultDict:
return S_ERROR( 'No job statuses returned' )
# If CE does not know about a job, set the status to Unknown
for jobInfo in jobIDList:
jobID = jobInfo.split(":::")[0]
if not resultDict.has_key( jobID ):
resultDict[jobInfo] = 'Unknown'
return S_OK( resultDict )
def __parseJobStatus( self, output ):
""" Parse the output of the globus-job-status
"""
self.log.verbose("Output %s " % output)
output = output.strip()
self.log.verbose("Output Stripped %s " % output)
if output in ['DONE']:
return 'Done'
elif output in ['FAILED', 'SUSPENDED']:
return 'Failed'
elif output in ['PENDING', 'UNSUBMITTED']:
return 'Scheduled'
elif output in ['CANCELLED']:
return 'Killed'
elif output in ['RUNNING', 'ACTIVE', 'STAGE_IN', 'STAGE_OUT']:
return 'Running'
elif output == 'N/A':
return 'Unknown'
return 'Unknown'
def getJobOutput( self, jobID, _localDir = None ):
""" Get the specified job standard output and error files. The output is returned
as strings.
"""
if jobID.find( ':::' ) != -1:
pilotRef, stamp = jobID.split( ':::' )
else:
pilotRef = jobID
stamp = ''
if not stamp:
return S_ERROR( 'Pilot stamp not defined for %s' % pilotRef )
## somehow when this is called from the WMSAdministrator we don't
## get the right proxy, so we do all this stuff here now. Probably
## should be fixed in the WMSAdministrator?
## Because this function is called from the WMSAdminsitrator, the
## gridEnv that is picked up is not the one from the SiteDirector
## Definition, but from Computing/CEDefaults
result = PilotAgentsDB().getPilotInfo(pilotRef)
if not result['OK'] or not result[ 'Value' ]:
return S_ERROR('Failed to determine owner for pilot ' + pilotRef)
pilotDict = result['Value'][pilotRef]
owner = pilotDict['OwnerDN']
group = getGroupOption(pilotDict['OwnerGroup'],'VOMSRole',pilotDict['OwnerGroup'])
ret = gProxyManager.getPilotProxyFromVOMSGroup( owner, group )
if not ret['OK']:
self.log.error( ret['Message'] )
self.log.error( 'Could not get proxy:', 'User "%s", Group "%s"' % ( owner, group ) )
return S_ERROR("Failed to get the pilot's owner proxy")
self.proxy = ret['Value']
self.log.verbose("Getting output for: %s " % pilotRef)
cmd = ['globus-job-get-output', '-out', pilotRef ]
result = executeGridCommand( self.proxy, cmd, self.gridEnv )
output = ''
if result['OK']:
if not result['Value'][0]:
output = result['Value'][1]
elif result['Value'][0] == 1 and "No such file or directory" in result['Value'][2]:
output = "Standard Output is not available on the Globus service"
else:
error = '\n'.join( result['Value'][1:] )
return S_ERROR( error )
else:
return S_ERROR( 'Failed to retrieve output for %s' % jobID )
cmd = ['globus-job-get-output', '-err', pilotRef ]
result = executeGridCommand( self.proxy, cmd, self.gridEnv )
error = ''
if result['OK']:
if not result['Value'][0]:
error = result['Value'][1]
elif result['Value'][0] == 1 and "No such file or directory" in result['Value'][2]:
error = "Standard Error is not available on the Globus service"
else:
error = '\n'.join( result['Value'][1:] )
return S_ERROR( error )
else:
return S_ERROR( 'Failed to retrieve error for %s' % jobID )
return S_OK( ( output, error ) )
#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#
| Andrew-McNab-UK/DIRAC | Resources/Computing/GlobusComputingElement.py | Python | gpl-3.0 | 9,326 | [
"DIRAC"
] | c27414e720c035b812c202751c8888388cc18622e0bff9df65f85c22d0dddb93 |
"""Testing for kernels for Gaussian processes."""
# Author: Jan Hendrik Metzen <jhm@informatik.uni-bremen.de>
# License: BSD 3 clause
import pytest
import numpy as np
from inspect import signature
from sklearn.gaussian_process.kernels import _approx_fprime
from sklearn.metrics.pairwise \
import PAIRWISE_KERNEL_FUNCTIONS, euclidean_distances, pairwise_kernels
from sklearn.gaussian_process.kernels \
import (RBF, Matern, RationalQuadratic, ExpSineSquared, DotProduct,
ConstantKernel, WhiteKernel, PairwiseKernel, KernelOperator,
Exponentiation)
from sklearn.base import clone
from sklearn.utils.testing import (assert_almost_equal,
assert_array_equal,
assert_array_almost_equal)
X = np.random.RandomState(0).normal(0, 1, (5, 2))
Y = np.random.RandomState(0).normal(0, 1, (6, 2))
kernel_white = RBF(length_scale=2.0) + WhiteKernel(noise_level=3.0)
kernels = [RBF(length_scale=2.0), RBF(length_scale_bounds=(0.5, 2.0)),
ConstantKernel(constant_value=10.0),
2.0 * RBF(length_scale=0.33, length_scale_bounds="fixed"),
2.0 * RBF(length_scale=0.5), kernel_white,
2.0 * RBF(length_scale=[0.5, 2.0]),
2.0 * Matern(length_scale=0.33, length_scale_bounds="fixed"),
2.0 * Matern(length_scale=0.5, nu=0.5),
2.0 * Matern(length_scale=1.5, nu=1.5),
2.0 * Matern(length_scale=2.5, nu=2.5),
2.0 * Matern(length_scale=[0.5, 2.0], nu=0.5),
3.0 * Matern(length_scale=[2.0, 0.5], nu=1.5),
4.0 * Matern(length_scale=[0.5, 0.5], nu=2.5),
RationalQuadratic(length_scale=0.5, alpha=1.5),
ExpSineSquared(length_scale=0.5, periodicity=1.5),
DotProduct(sigma_0=2.0), DotProduct(sigma_0=2.0) ** 2,
RBF(length_scale=[2.0]), Matern(length_scale=[2.0])]
for metric in PAIRWISE_KERNEL_FUNCTIONS:
if metric in ["additive_chi2", "chi2"]:
continue
kernels.append(PairwiseKernel(gamma=1.0, metric=metric))
@pytest.mark.parametrize('kernel', kernels)
def test_kernel_gradient(kernel):
# Compare analytic and numeric gradient of kernels.
K, K_gradient = kernel(X, eval_gradient=True)
assert K_gradient.shape[0] == X.shape[0]
assert K_gradient.shape[1] == X.shape[0]
assert K_gradient.shape[2] == kernel.theta.shape[0]
def eval_kernel_for_theta(theta):
kernel_clone = kernel.clone_with_theta(theta)
K = kernel_clone(X, eval_gradient=False)
return K
K_gradient_approx = \
_approx_fprime(kernel.theta, eval_kernel_for_theta, 1e-10)
assert_almost_equal(K_gradient, K_gradient_approx, 4)
@pytest.mark.parametrize(
'kernel',
[kernel for kernel in kernels
# skip non-basic kernels
if not (isinstance(kernel, KernelOperator)
or isinstance(kernel, Exponentiation))])
def test_kernel_theta(kernel):
# Check that parameter vector theta of kernel is set correctly.
theta = kernel.theta
_, K_gradient = kernel(X, eval_gradient=True)
# Determine kernel parameters that contribute to theta
init_sign = signature(kernel.__class__.__init__).parameters.values()
args = [p.name for p in init_sign if p.name != 'self']
theta_vars = map(lambda s: s[0:-len("_bounds")],
filter(lambda s: s.endswith("_bounds"), args))
assert (
set(hyperparameter.name
for hyperparameter in kernel.hyperparameters) ==
set(theta_vars))
# Check that values returned in theta are consistent with
# hyperparameter values (being their logarithms)
for i, hyperparameter in enumerate(kernel.hyperparameters):
assert (theta[i] ==
np.log(getattr(kernel, hyperparameter.name)))
# Fixed kernel parameters must be excluded from theta and gradient.
for i, hyperparameter in enumerate(kernel.hyperparameters):
# create copy with certain hyperparameter fixed
params = kernel.get_params()
params[hyperparameter.name + "_bounds"] = "fixed"
kernel_class = kernel.__class__
new_kernel = kernel_class(**params)
# Check that theta and K_gradient are identical with the fixed
# dimension left out
_, K_gradient_new = new_kernel(X, eval_gradient=True)
assert theta.shape[0] == new_kernel.theta.shape[0] + 1
assert K_gradient.shape[2] == K_gradient_new.shape[2] + 1
if i > 0:
assert theta[:i] == new_kernel.theta[:i]
assert_array_equal(K_gradient[..., :i],
K_gradient_new[..., :i])
if i + 1 < len(kernel.hyperparameters):
assert theta[i + 1:] == new_kernel.theta[i:]
assert_array_equal(K_gradient[..., i + 1:],
K_gradient_new[..., i:])
# Check that values of theta are modified correctly
for i, hyperparameter in enumerate(kernel.hyperparameters):
theta[i] = np.log(42)
kernel.theta = theta
assert_almost_equal(getattr(kernel, hyperparameter.name), 42)
setattr(kernel, hyperparameter.name, 43)
assert_almost_equal(kernel.theta[i], np.log(43))
@pytest.mark.parametrize('kernel',
[kernel for kernel in kernels
# Identity is not satisfied on diagonal
if kernel != kernel_white])
def test_auto_vs_cross(kernel):
# Auto-correlation and cross-correlation should be consistent.
K_auto = kernel(X)
K_cross = kernel(X, X)
assert_almost_equal(K_auto, K_cross, 5)
@pytest.mark.parametrize('kernel', kernels)
def test_kernel_diag(kernel):
# Test that diag method of kernel returns consistent results.
K_call_diag = np.diag(kernel(X))
K_diag = kernel.diag(X)
assert_almost_equal(K_call_diag, K_diag, 5)
def test_kernel_operator_commutative():
# Adding kernels and multiplying kernels should be commutative.
# Check addition
assert_almost_equal((RBF(2.0) + 1.0)(X),
(1.0 + RBF(2.0))(X))
# Check multiplication
assert_almost_equal((3.0 * RBF(2.0))(X),
(RBF(2.0) * 3.0)(X))
def test_kernel_anisotropic():
# Anisotropic kernel should be consistent with isotropic kernels.
kernel = 3.0 * RBF([0.5, 2.0])
K = kernel(X)
X1 = np.array(X)
X1[:, 0] *= 4
K1 = 3.0 * RBF(2.0)(X1)
assert_almost_equal(K, K1)
X2 = np.array(X)
X2[:, 1] /= 4
K2 = 3.0 * RBF(0.5)(X2)
assert_almost_equal(K, K2)
# Check getting and setting via theta
kernel.theta = kernel.theta + np.log(2)
assert_array_equal(kernel.theta, np.log([6.0, 1.0, 4.0]))
assert_array_equal(kernel.k2.length_scale, [1.0, 4.0])
@pytest.mark.parametrize('kernel',
[kernel for kernel in kernels
if kernel.is_stationary()])
def test_kernel_stationary(kernel):
# Test stationarity of kernels.
K = kernel(X, X + 1)
assert_almost_equal(K[0, 0], np.diag(K))
def check_hyperparameters_equal(kernel1, kernel2):
# Check that hyperparameters of two kernels are equal
for attr in set(dir(kernel1) + dir(kernel2)):
if attr.startswith("hyperparameter_"):
attr_value1 = getattr(kernel1, attr)
attr_value2 = getattr(kernel2, attr)
assert attr_value1 == attr_value2
@pytest.mark.parametrize("kernel", kernels)
def test_kernel_clone(kernel):
# Test that sklearn's clone works correctly on kernels.
kernel_cloned = clone(kernel)
# XXX: Should this be fixed?
# This differs from the sklearn's estimators equality check.
assert kernel == kernel_cloned
assert id(kernel) != id(kernel_cloned)
# Check that all constructor parameters are equal.
assert kernel.get_params() == kernel_cloned.get_params()
# Check that all hyperparameters are equal.
check_hyperparameters_equal(kernel, kernel_cloned)
@pytest.mark.parametrize('kernel', kernels)
def test_kernel_clone_after_set_params(kernel):
# This test is to verify that using set_params does not
# break clone on kernels.
# This used to break because in kernels such as the RBF, non-trivial
# logic that modified the length scale used to be in the constructor
# See https://github.com/scikit-learn/scikit-learn/issues/6961
# for more details.
bounds = (1e-5, 1e5)
kernel_cloned = clone(kernel)
params = kernel.get_params()
# RationalQuadratic kernel is isotropic.
isotropic_kernels = (ExpSineSquared, RationalQuadratic)
if 'length_scale' in params and not isinstance(kernel,
isotropic_kernels):
length_scale = params['length_scale']
if np.iterable(length_scale):
params['length_scale'] = length_scale[0]
params['length_scale_bounds'] = bounds
else:
params['length_scale'] = [length_scale] * 2
params['length_scale_bounds'] = bounds * 2
kernel_cloned.set_params(**params)
kernel_cloned_clone = clone(kernel_cloned)
assert (kernel_cloned_clone.get_params() ==
kernel_cloned.get_params())
assert id(kernel_cloned_clone) != id(kernel_cloned)
check_hyperparameters_equal(kernel_cloned, kernel_cloned_clone)
def test_matern_kernel():
# Test consistency of Matern kernel for special values of nu.
K = Matern(nu=1.5, length_scale=1.0)(X)
# the diagonal elements of a matern kernel are 1
assert_array_almost_equal(np.diag(K), np.ones(X.shape[0]))
# matern kernel for coef0==0.5 is equal to absolute exponential kernel
K_absexp = np.exp(-euclidean_distances(X, X, squared=False))
K = Matern(nu=0.5, length_scale=1.0)(X)
assert_array_almost_equal(K, K_absexp)
# test that special cases of matern kernel (coef0 in [0.5, 1.5, 2.5])
# result in nearly identical results as the general case for coef0 in
# [0.5 + tiny, 1.5 + tiny, 2.5 + tiny]
tiny = 1e-10
for nu in [0.5, 1.5, 2.5]:
K1 = Matern(nu=nu, length_scale=1.0)(X)
K2 = Matern(nu=nu + tiny, length_scale=1.0)(X)
assert_array_almost_equal(K1, K2)
@pytest.mark.parametrize("kernel", kernels)
def test_kernel_versus_pairwise(kernel):
# Check that GP kernels can also be used as pairwise kernels.
# Test auto-kernel
if kernel != kernel_white:
# For WhiteKernel: k(X) != k(X,X). This is assumed by
# pairwise_kernels
K1 = kernel(X)
K2 = pairwise_kernels(X, metric=kernel)
assert_array_almost_equal(K1, K2)
# Test cross-kernel
K1 = kernel(X, Y)
K2 = pairwise_kernels(X, Y, metric=kernel)
assert_array_almost_equal(K1, K2)
@pytest.mark.parametrize("kernel", kernels)
def test_set_get_params(kernel):
# Check that set_params()/get_params() is consistent with kernel.theta.
# Test get_params()
index = 0
params = kernel.get_params()
for hyperparameter in kernel.hyperparameters:
if isinstance("string", type(hyperparameter.bounds)):
if hyperparameter.bounds == "fixed":
continue
size = hyperparameter.n_elements
if size > 1: # anisotropic kernels
assert_almost_equal(np.exp(kernel.theta[index:index + size]),
params[hyperparameter.name])
index += size
else:
assert_almost_equal(np.exp(kernel.theta[index]),
params[hyperparameter.name])
index += 1
# Test set_params()
index = 0
value = 10 # arbitrary value
for hyperparameter in kernel.hyperparameters:
if isinstance("string", type(hyperparameter.bounds)):
if hyperparameter.bounds == "fixed":
continue
size = hyperparameter.n_elements
if size > 1: # anisotropic kernels
kernel.set_params(**{hyperparameter.name: [value] * size})
assert_almost_equal(np.exp(kernel.theta[index:index + size]),
[value] * size)
index += size
else:
kernel.set_params(**{hyperparameter.name: value})
assert_almost_equal(np.exp(kernel.theta[index]), value)
index += 1
@pytest.mark.parametrize("kernel", kernels)
def test_repr_kernels(kernel):
# Smoke-test for repr in kernels.
repr(kernel)
| chrsrds/scikit-learn | sklearn/gaussian_process/tests/test_kernels.py | Python | bsd-3-clause | 12,489 | [
"Gaussian"
] | 02c5639df7641906cda12a77bbc27cd8abb8023a2dba183b4f416df26c8cdace |
#!/usr/bin/env python
import argparse
import binascii
import copy
import datetime
import hashlib
import json
import logging
import os
import shutil
import struct
import subprocess
import tempfile
import xml.etree.ElementTree as ET
from collections import defaultdict
from Bio.Data import CodonTable
logging.basicConfig(level=logging.INFO)
log = logging.getLogger('jbrowse')
TODAY = datetime.datetime.now().strftime("%Y-%m-%d")
GALAXY_INFRASTRUCTURE_URL = None
class ColorScaling(object):
COLOR_FUNCTION_TEMPLATE = """
function(feature, variableName, glyphObject, track) {{
var score = {score};
{opacity}
return 'rgba({red}, {green}, {blue}, ' + opacity + ')';
}}
"""
COLOR_FUNCTION_TEMPLATE_QUAL = r"""
function(feature, variableName, glyphObject, track) {{
var search_up = function self(sf, attr){{
if(sf.get(attr) !== undefined){{
return sf.get(attr);
}}
if(sf.parent() === undefined) {{
return;
}}else{{
return self(sf.parent(), attr);
}}
}};
var search_down = function self(sf, attr){{
if(sf.get(attr) !== undefined){{
return sf.get(attr);
}}
if(sf.children() === undefined) {{
return;
}}else{{
var kids = sf.children();
for(var child_idx in kids){{
var x = self(kids[child_idx], attr);
if(x !== undefined){{
return x;
}}
}}
return;
}}
}};
var color = ({user_spec_color} || search_up(feature, 'color') || search_down(feature, 'color') || {auto_gen_color});
var score = (search_up(feature, 'score') || search_down(feature, 'score'));
{opacity}
if(score === undefined){{ opacity = 1; }}
var result = /^#?([a-f\d]{{2}})([a-f\d]{{2}})([a-f\d]{{2}})$/i.exec(color);
var red = parseInt(result[1], 16);
var green = parseInt(result[2], 16);
var blue = parseInt(result[3], 16);
if(isNaN(opacity) || opacity < 0){{ opacity = 0; }}
return 'rgba(' + red + ',' + green + ',' + blue + ',' + opacity + ')';
}}
"""
OPACITY_MATH = {
'linear': """
var opacity = (score - ({min})) / (({max}) - ({min}));
""",
'logarithmic': """
var opacity = (score - ({min})) / (({max}) - ({min}));
opacity = Math.log10(opacity) + Math.log10({max});
""",
'blast': """
var opacity = 0;
if(score == 0.0) {{
opacity = 1;
}} else {{
opacity = (20 - Math.log10(score)) / 180;
}}
"""
}
BREWER_COLOUR_IDX = 0
BREWER_COLOUR_SCHEMES = [
(166, 206, 227),
(31, 120, 180),
(178, 223, 138),
(51, 160, 44),
(251, 154, 153),
(227, 26, 28),
(253, 191, 111),
(255, 127, 0),
(202, 178, 214),
(106, 61, 154),
(255, 255, 153),
(177, 89, 40),
(228, 26, 28),
(55, 126, 184),
(77, 175, 74),
(152, 78, 163),
(255, 127, 0),
]
BREWER_DIVERGING_PALLETES = {
'BrBg': ("#543005", "#003c30"),
'PiYg': ("#8e0152", "#276419"),
'PRGn': ("#40004b", "#00441b"),
'PuOr': ("#7f3b08", "#2d004b"),
'RdBu': ("#67001f", "#053061"),
'RdGy': ("#67001f", "#1a1a1a"),
'RdYlBu': ("#a50026", "#313695"),
'RdYlGn': ("#a50026", "#006837"),
'Spectral': ("#9e0142", "#5e4fa2"),
}
def __init__(self):
self.brewer_colour_idx = 0
def rgb_from_hex(self, hexstr):
# http://stackoverflow.com/questions/4296249/how-do-i-convert-a-hex-triplet-to-an-rgb-tuple-and-back
return struct.unpack('BBB', binascii.unhexlify(hexstr))
def min_max_gff(self, gff_file):
min_val = None
max_val = None
with open(gff_file, 'r') as handle:
for line in handle:
try:
value = float(line.split('\t')[5])
min_val = min(value, (min_val or value))
max_val = max(value, (max_val or value))
if value < min_val:
min_val = value
if value > max_val:
max_val = value
except Exception:
pass
return min_val, max_val
def hex_from_rgb(self, r, g, b):
return '#%02x%02x%02x' % (r, g, b)
def _get_colours(self):
r, g, b = self.BREWER_COLOUR_SCHEMES[self.brewer_colour_idx % len(self.BREWER_COLOUR_SCHEMES)]
self.brewer_colour_idx += 1
return r, g, b
def parse_menus(self, track):
trackConfig = {'menuTemplate': [{}, {}, {}, {}]}
if 'menu' in track['menus']:
menu_list = [track['menus']['menu']]
if isinstance(track['menus']['menu'], list):
menu_list = track['menus']['menu']
for m in menu_list:
tpl = {
'action': m['action'],
'label': m.get('label', '{name}'),
'iconClass': m.get('iconClass', 'dijitIconBookmark'),
}
if 'url' in m:
tpl['url'] = m['url']
if 'content' in m:
tpl['content'] = m['content']
if 'title' in m:
tpl['title'] = m['title']
trackConfig['menuTemplate'].append(tpl)
return trackConfig
def parse_colours(self, track, trackFormat, gff3=None):
# Wiggle tracks have a bicolor pallete
trackConfig = {'style': {}}
if trackFormat == 'wiggle':
trackConfig['style']['pos_color'] = track['wiggle']['color_pos']
trackConfig['style']['neg_color'] = track['wiggle']['color_neg']
if trackConfig['style']['pos_color'] == '__auto__':
trackConfig['style']['neg_color'] = self.hex_from_rgb(*self._get_colours())
trackConfig['style']['pos_color'] = self.hex_from_rgb(*self._get_colours())
# Wiggle tracks can change colour at a specified place
bc_pivot = track['wiggle']['bicolor_pivot']
if bc_pivot not in ('mean', 'zero'):
# The values are either one of those two strings
# or a number
bc_pivot = float(bc_pivot)
trackConfig['bicolor_pivot'] = bc_pivot
elif 'scaling' in track:
if track['scaling']['method'] == 'ignore':
if track['scaling']['scheme']['color'] != '__auto__':
trackConfig['style']['color'] = track['scaling']['scheme']['color']
else:
trackConfig['style']['color'] = self.hex_from_rgb(*self._get_colours())
else:
# Scored method
algo = track['scaling']['algo']
# linear, logarithmic, blast
scales = track['scaling']['scales']
# type __auto__, manual (min, max)
scheme = track['scaling']['scheme']
# scheme -> (type (opacity), color)
# ==================================
# GENE CALLS OR BLAST
# ==================================
if trackFormat == 'blast':
red, green, blue = self._get_colours()
color_function = self.COLOR_FUNCTION_TEMPLATE.format(**{
'score': "feature._parent.get('score')",
'opacity': self.OPACITY_MATH['blast'],
'red': red,
'green': green,
'blue': blue,
})
trackConfig['style']['color'] = color_function.replace('\n', '')
elif trackFormat == 'gene_calls':
# Default values, based on GFF3 spec
min_val = 0
max_val = 1000
# Get min/max and build a scoring function since JBrowse doesn't
if scales['type'] == 'automatic' or scales['type'] == '__auto__':
min_val, max_val = self.min_max_gff(gff3)
else:
min_val = scales.get('min', 0)
max_val = scales.get('max', 1000)
if scheme['color'] == '__auto__':
user_color = 'undefined'
auto_color = "'%s'" % self.hex_from_rgb(*self._get_colours())
elif scheme['color'].startswith('#'):
user_color = "'%s'" % self.hex_from_rgb(*self.rgb_from_hex(scheme['color'][1:]))
auto_color = 'undefined'
else:
user_color = 'undefined'
auto_color = "'%s'" % self.hex_from_rgb(*self._get_colours())
color_function = self.COLOR_FUNCTION_TEMPLATE_QUAL.format(**{
'opacity': self.OPACITY_MATH[algo].format(**{'max': max_val, 'min': min_val}),
'user_spec_color': user_color,
'auto_gen_color': auto_color,
})
trackConfig['style']['color'] = color_function.replace('\n', '')
return trackConfig
def etree_to_dict(t):
d = {t.tag: {} if t.attrib else None}
children = list(t)
if children:
dd = defaultdict(list)
for dc in map(etree_to_dict, children):
for k, v in dc.items():
dd[k].append(v)
d = {t.tag: {k: v[0] if len(v) == 1 else v for k, v in dd.items()}}
if t.attrib:
d[t.tag].update(('@' + k, v) for k, v in t.attrib.items())
if t.text:
text = t.text.strip()
if children or t.attrib:
if text:
d[t.tag]['#text'] = text
else:
d[t.tag] = text
return d
# score comes from feature._parent.get('score') or feature.get('score')
INSTALLED_TO = os.path.dirname(os.path.realpath(__file__))
def metadata_from_node(node):
metadata = {}
try:
if len(node.findall('dataset')) != 1:
# exit early
return metadata
except Exception:
return {}
for (key, value) in node.findall('dataset')[0].attrib.items():
metadata['dataset_%s' % key] = value
for (key, value) in node.findall('history')[0].attrib.items():
metadata['history_%s' % key] = value
for (key, value) in node.findall('metadata')[0].attrib.items():
metadata['metadata_%s' % key] = value
for (key, value) in node.findall('tool')[0].attrib.items():
metadata['tool_%s' % key] = value
# Additional Mappings applied:
metadata['dataset_edam_format'] = '<a target="_blank" href="http://edamontology.org/{0}">{1}</a>'.format(metadata['dataset_edam_format'], metadata['dataset_file_ext'])
metadata['history_user_email'] = '<a href="mailto:{0}">{0}</a>'.format(metadata['history_user_email'])
metadata['history_display_name'] = '<a target="_blank" href="{galaxy}/history/view/{encoded_hist_id}">{hist_name}</a>'.format(
galaxy=GALAXY_INFRASTRUCTURE_URL,
encoded_hist_id=metadata['history_id'],
hist_name=metadata['history_display_name']
)
metadata['tool_tool'] = '<a target="_blank" href="{galaxy}/datasets/{encoded_id}/show_params">{tool_id}</a>'.format(
galaxy=GALAXY_INFRASTRUCTURE_URL,
encoded_id=metadata['dataset_id'],
tool_id=metadata['tool_tool_id'],
tool_version=metadata['tool_tool_version'],
)
return metadata
class JbrowseConnector(object):
def __init__(self, jbrowse, outdir, genomes, standalone=False, gencode=1):
self.TN_TABLE = {
'gff3': '--gff',
'gff': '--gff',
'bed': '--bed',
'genbank': '--gbk',
}
self.cs = ColorScaling()
self.jbrowse = jbrowse
self.outdir = outdir
self.genome_paths = genomes
self.standalone = standalone
self.gencode = gencode
self.tracksToIndex = []
if standalone:
self.clone_jbrowse(self.jbrowse, self.outdir)
else:
try:
os.makedirs(self.outdir)
except OSError:
# Ignore if the folder exists
pass
try:
os.makedirs(os.path.join(self.outdir, 'data', 'raw'))
except OSError:
# Ignore if the folder exists
pass
self.process_genomes()
self.update_gencode()
def update_gencode(self):
table = CodonTable.unambiguous_dna_by_id[int(self.gencode)]
trackList = os.path.join(self.outdir, 'data', 'trackList.json')
with open(trackList, 'r') as handle:
trackListData = json.load(handle)
trackListData['tracks'][0].update({
'codonStarts': table.start_codons,
'codonStops': table.stop_codons,
'codonTable': table.forward_table,
})
with open(trackList, 'w') as handle:
json.dump(trackListData, handle, indent=2)
def subprocess_check_call(self, command):
log.debug('cd %s && %s', self.outdir, ' '.join(command))
subprocess.check_call(command, cwd=self.outdir)
def _jbrowse_bin(self, command):
return os.path.realpath(os.path.join(self.jbrowse, 'bin', command))
def process_genomes(self):
for genome_node in self.genome_paths:
# TODO: Waiting on https://github.com/GMOD/jbrowse/pull/884
self.subprocess_check_call([
'perl', self._jbrowse_bin('prepare-refseqs.pl'),
'--fasta', genome_node['path']])
def generate_names(self):
# Generate names
args = [
'perl', self._jbrowse_bin('generate-names.pl'),
'--hashBits', '16'
]
tracks = ','.join(self.tracksToIndex)
if tracks:
args += ['--tracks', tracks]
else:
# No tracks to index, index only the refseq
args += ['--tracks', 'DNA']
self.subprocess_check_call(args)
def _add_json(self, json_data):
cmd = [
'perl', self._jbrowse_bin('add-json.pl'),
json.dumps(json_data),
os.path.join('data', 'trackList.json')
]
self.subprocess_check_call(cmd)
def _add_track_json(self, json_data):
if len(json_data) == 0:
return
tmp = tempfile.NamedTemporaryFile(delete=False)
tmp.write(json.dumps(json_data))
tmp.close()
cmd = ['perl', self._jbrowse_bin('add-track-json.pl'), tmp.name,
os.path.join('data', 'trackList.json')]
self.subprocess_check_call(cmd)
os.unlink(tmp.name)
def _blastxml_to_gff3(self, xml, min_gap=10):
gff3_unrebased = tempfile.NamedTemporaryFile(delete=False)
cmd = ['python', os.path.join(INSTALLED_TO, 'blastxml_to_gapped_gff3.py'),
'--trim', '--trim_end', '--min_gap', str(min_gap), xml]
log.debug('cd %s && %s > %s', self.outdir, ' '.join(cmd), gff3_unrebased.name)
subprocess.check_call(cmd, cwd=self.outdir, stdout=gff3_unrebased)
gff3_unrebased.close()
return gff3_unrebased.name
def add_blastxml(self, data, trackData, blastOpts, **kwargs):
gff3 = self._blastxml_to_gff3(data, min_gap=blastOpts['min_gap'])
if 'parent' in blastOpts and blastOpts['parent'] != 'None':
gff3_rebased = tempfile.NamedTemporaryFile(delete=False)
cmd = ['python', os.path.join(INSTALLED_TO, 'gff3_rebase.py')]
if blastOpts.get('protein', 'false') == 'true':
cmd.append('--protein2dna')
cmd.extend([os.path.realpath(blastOpts['parent']), gff3])
log.debug('cd %s && %s > %s', self.outdir, ' '.join(cmd), gff3_rebased.name)
subprocess.check_call(cmd, cwd=self.outdir, stdout=gff3_rebased)
gff3_rebased.close()
# Replace original gff3 file
shutil.copy(gff3_rebased.name, gff3)
os.unlink(gff3_rebased.name)
config = {
'glyph': 'JBrowse/View/FeatureGlyph/Segments',
"category": trackData['category'],
}
clientConfig = trackData['style']
cmd = ['perl', self._jbrowse_bin('flatfile-to-json.pl'),
'--gff', gff3,
'--trackLabel', trackData['label'],
'--key', trackData['key'],
'--clientConfig', json.dumps(clientConfig),
'--config', json.dumps(config),
'--trackType', 'BlastView/View/Track/CanvasFeatures'
]
# className in --clientConfig is ignored, it needs to be set with --className
if 'className' in trackData['style']:
cmd += ['--className', trackData['style']['className']]
self.subprocess_check_call(cmd)
os.unlink(gff3)
if blastOpts.get('index', 'false') == 'true':
self.tracksToIndex.append("%s" % trackData['label'])
def add_bigwig(self, data, trackData, wiggleOpts, **kwargs):
dest = os.path.join('data', 'raw', trackData['label'] + '.bw')
cmd = ['ln', '-s', data, dest]
self.subprocess_check_call(cmd)
url = os.path.join('raw', trackData['label'] + '.bw')
trackData.update({
"urlTemplate": url,
"storeClass": "JBrowse/Store/SeqFeature/BigWig",
"type": "JBrowse/View/Track/Wiggle/Density",
})
trackData['type'] = wiggleOpts['type']
trackData['variance_band'] = True if wiggleOpts['variance_band'] == 'true' else False
if 'min' in wiggleOpts and 'max' in wiggleOpts:
trackData['min_score'] = wiggleOpts['min']
trackData['max_score'] = wiggleOpts['max']
else:
trackData['autoscale'] = wiggleOpts.get('autoscale', 'local')
trackData['scale'] = wiggleOpts['scale']
self._add_track_json(trackData)
def add_bam(self, data, trackData, bamOpts, bam_index=None, **kwargs):
dest = os.path.join('data', 'raw', trackData['label'] + '.bam')
cmd = ['ln', '-s', os.path.realpath(data), dest]
self.subprocess_check_call(cmd)
cmd = ['ln', '-s', os.path.realpath(bam_index), dest + '.bai']
self.subprocess_check_call(cmd)
url = os.path.join('raw', trackData['label'] + '.bam')
trackData.update({
"urlTemplate": url,
"type": "JBrowse/View/Track/Alignments2",
"storeClass": "JBrowse/Store/SeqFeature/BAM",
})
# Apollo will only switch to the (prettier) 'bam-read' className if it's not set explicitly in the track config
# So remove the default 'feature' value for these bam tracks
if 'className' in trackData['style'] and trackData['style']['className'] == 'feature':
del trackData['style']['className']
self._add_track_json(trackData)
if bamOpts.get('auto_snp', 'false') == 'true':
trackData2 = copy.copy(trackData)
trackData2.update({
"type": "JBrowse/View/Track/SNPCoverage",
"key": trackData['key'] + " - SNPs/Coverage",
"label": trackData['label'] + "_autosnp",
})
self._add_track_json(trackData2)
def add_vcf(self, data, trackData, vcfOpts={}, **kwargs):
dest = os.path.join('data', 'raw', trackData['label'] + '.vcf')
# ln?
cmd = ['ln', '-s', data, dest]
self.subprocess_check_call(cmd)
cmd = ['bgzip', dest]
self.subprocess_check_call(cmd)
cmd = ['tabix', '-p', 'vcf', dest + '.gz']
self.subprocess_check_call(cmd)
url = os.path.join('raw', trackData['label'] + '.vcf')
trackData.update({
"urlTemplate": url,
"type": "JBrowse/View/Track/HTMLVariants",
"storeClass": "JBrowse/Store/SeqFeature/VCFTabix",
})
self._add_track_json(trackData)
def add_features(self, data, format, trackData, gffOpts, metadata=None, **kwargs):
cmd = [
'perl', self._jbrowse_bin('flatfile-to-json.pl'),
self.TN_TABLE.get(format, 'gff'),
data,
'--trackLabel', trackData['label'],
'--key', trackData['key']
]
# className in --clientConfig is ignored, it needs to be set with --className
if 'className' in trackData['style']:
cmd += ['--className', trackData['style']['className']]
config = copy.copy(trackData)
clientConfig = trackData['style']
del config['style']
if 'match' in gffOpts:
config['glyph'] = 'JBrowse/View/FeatureGlyph/Segments'
if bool(gffOpts['match']):
# Can be empty for CanvasFeatures = will take all by default
cmd += ['--type', gffOpts['match']]
cmd += ['--clientConfig', json.dumps(clientConfig),
]
trackType = 'JBrowse/View/Track/CanvasFeatures'
if 'trackType' in gffOpts:
trackType = gffOpts['trackType']
if trackType == 'JBrowse/View/Track/CanvasFeatures':
if 'transcriptType' in gffOpts and gffOpts['transcriptType']:
config['transcriptType'] = gffOpts['transcriptType']
if 'subParts' in gffOpts and gffOpts['subParts']:
config['subParts'] = gffOpts['subParts']
if 'impliedUTRs' in gffOpts and gffOpts['impliedUTRs']:
config['impliedUTRs'] = gffOpts['impliedUTRs']
elif trackType == 'JBrowse/View/Track/HTMLFeatures':
if 'transcriptType' in gffOpts and gffOpts['transcriptType']:
cmd += ['--type', gffOpts['transcriptType']]
cmd += [
'--trackType', gffOpts['trackType']
]
if metadata:
config.update({'metadata': metadata})
cmd.extend(['--config', json.dumps(config)])
self.subprocess_check_call(cmd)
if gffOpts.get('index', 'false') == 'true':
self.tracksToIndex.append("%s" % trackData['label'])
def add_rest(self, url, trackData):
data = {
"label": trackData['label'],
"key": trackData['key'],
"category": trackData['category'],
"type": "JBrowse/View/Track/HTMLFeatures",
"storeClass": "JBrowse/Store/SeqFeature/REST",
"baseUrl": url,
"query": {
"organism": "tyrannosaurus"
}
}
self._add_track_json(data)
def process_annotations(self, track):
category = track['category'].replace('__pd__date__pd__', TODAY)
outputTrackConfig = {
'style': {
'label': track['style'].get('label', 'description'),
'className': track['style'].get('className', 'feature'),
'description': track['style'].get('description', ''),
},
'overridePlugins': track['style'].get('overridePlugins', False) == 'True',
'overrideDraggable': track['style'].get('overrideDraggable', False) == 'True',
'maxHeight': track['style'].get('maxHeight', '600'),
'category': category,
}
mapped_chars = {
'>': '__gt__',
'<': '__lt__',
"'": '__sq__',
'"': '__dq__',
'[': '__ob__',
']': '__cb__',
'{': '__oc__',
'}': '__cc__',
'@': '__at__',
'#': '__pd__'
}
for i, (dataset_path, dataset_ext, track_human_label, extra_metadata) in enumerate(track['trackfiles']):
# Unsanitize labels (element_identifiers are always sanitized by Galaxy)
for key, value in mapped_chars.items():
track_human_label = track_human_label.replace(value, key)
log.info('Processing %s / %s', category, track_human_label)
outputTrackConfig['key'] = track_human_label
# We add extra data to hash for the case of REST + SPARQL.
try:
rest_url = track['conf']['options']['url']
except KeyError:
rest_url = ''
# I chose to use track['category'] instead of 'category' here. This
# is intentional. This way re-running the tool on a different date
# will not generate different hashes and make comparison of outputs
# much simpler.
hashData = [dataset_path, track_human_label, track['category'], rest_url]
hashData = '|'.join(hashData).encode('utf-8')
outputTrackConfig['label'] = hashlib.md5(hashData).hexdigest() + '_%s' % i
# Colour parsing is complex due to different track types having
# different colour options.
colourOptions = self.cs.parse_colours(track['conf']['options'], track['format'], gff3=dataset_path)
# This used to be done with a dict.update() call, however that wiped out any previous style settings...
for key in colourOptions:
if key == 'style':
for subkey in colourOptions['style']:
outputTrackConfig['style'][subkey] = colourOptions['style'][subkey]
else:
outputTrackConfig[key] = colourOptions[key]
if 'menus' in track['conf']['options']:
menus = self.cs.parse_menus(track['conf']['options'])
outputTrackConfig.update(menus)
# import pprint; pprint.pprint(track)
# import sys; sys.exit()
if dataset_ext in ('gff', 'gff3', 'bed'):
self.add_features(dataset_path, dataset_ext, outputTrackConfig,
track['conf']['options']['gff'], metadata=extra_metadata)
elif dataset_ext == 'bigwig':
self.add_bigwig(dataset_path, outputTrackConfig,
track['conf']['options']['wiggle'], metadata=extra_metadata)
elif dataset_ext == 'bam':
real_indexes = track['conf']['options']['pileup']['bam_indices']['bam_index']
if not isinstance(real_indexes, list):
# <bam_indices>
# <bam_index>/path/to/a.bam.bai</bam_index>
# </bam_indices>
#
# The above will result in the 'bam_index' key containing a
# string. If there are two or more indices, the container
# becomes a list. Fun!
real_indexes = [real_indexes]
self.add_bam(dataset_path, outputTrackConfig,
track['conf']['options']['pileup'],
bam_index=real_indexes[i], metadata=extra_metadata)
elif dataset_ext == 'blastxml':
self.add_blastxml(dataset_path, outputTrackConfig, track['conf']['options']['blast'], metadata=extra_metadata)
elif dataset_ext == 'vcf':
self.add_vcf(dataset_path, outputTrackConfig, metadata=extra_metadata)
elif dataset_ext == 'rest':
self.add_rest(track['conf']['options']['url'], outputTrackConfig, metadata=extra_metadata)
else:
log.warn('Do not know how to handle %s', dataset_ext)
# Return non-human label for use in other fields
yield outputTrackConfig['label']
def add_final_data(self, data):
viz_data = {}
if len(data['visibility']['default_on']) > 0:
viz_data['defaultTracks'] = ','.join(data['visibility']['default_on'])
if len(data['visibility']['always']) > 0:
viz_data['alwaysOnTracks'] = ','.join(data['visibility']['always'])
if len(data['visibility']['force']) > 0:
viz_data['forceTracks'] = ','.join(data['visibility']['force'])
generalData = {}
if data['general']['aboutDescription'] is not None:
generalData['aboutThisBrowser'] = {'description': data['general']['aboutDescription'].strip()}
generalData['view'] = {
'trackPadding': data['general']['trackPadding']
}
generalData['shareLink'] = (data['general']['shareLink'] == 'true')
generalData['show_tracklist'] = (data['general']['show_tracklist'] == 'true')
generalData['show_nav'] = (data['general']['show_nav'] == 'true')
generalData['show_overview'] = (data['general']['show_overview'] == 'true')
generalData['show_menu'] = (data['general']['show_menu'] == 'true')
generalData['hideGenomeOptions'] = (data['general']['hideGenomeOptions'] == 'true')
generalData['plugins'] = data['plugins']
viz_data.update(generalData)
self._add_json(viz_data)
if 'GCContent' in data['plugins_python']:
self._add_track_json({
"storeClass": "JBrowse/Store/SeqFeature/SequenceChunks",
"type": "GCContent/View/Track/GCContentXY",
"label": "GCContentXY",
"urlTemplate": "seq/{refseq_dirpath}/{refseq}-",
"bicolor_pivot": 0.5
# TODO: Expose params for everyone.
})
if 'ComboTrackSelector' in data['plugins_python']:
with open(os.path.join(self.outdir, 'data', 'trackList.json'), 'r') as handle:
trackListJson = json.load(handle)
trackListJson.update({
"trackSelector": {
"renameFacets": {
"tool_tool": "Tool ID",
"tool_tool_id": "Tool ID",
"tool_tool_version": "Tool Version",
"dataset_edam_format": "EDAM",
"dataset_size": "Size",
"history_display_name": "History Name",
"history_user_email": "Owner",
"metadata_dbkey": "Dbkey",
},
"displayColumns": [
"key",
"tool_tool",
"tool_tool_version",
"dataset_edam_format",
"dataset_size",
"history_display_name",
"history_user_email",
"metadata_dbkey",
],
"type": "Faceted",
"title": ["Galaxy Metadata"],
"escapeHTMLInData": False
},
"trackMetadata": {
"indexFacets": [
"category",
"key",
"tool_tool_id",
"tool_tool_version",
"dataset_edam_format",
"history_user_email",
"history_display_name"
]
}
})
with open(os.path.join(self.outdir, 'data', 'trackList2.json'), 'w') as handle:
json.dump(trackListJson, handle)
def clone_jbrowse(self, jbrowse_dir, destination):
"""Clone a JBrowse directory into a destination directory.
"""
# JBrowse seems to have included some bad symlinks, cp ignores bad symlinks
# unlike copytree
cmd = ['cp', '-r', os.path.join(jbrowse_dir, '.'), destination]
log.debug(' '.join(cmd))
subprocess.check_call(cmd)
cmd = ['mkdir', '-p', os.path.join(destination, 'data', 'raw')]
log.debug(' '.join(cmd))
subprocess.check_call(cmd)
# http://unix.stackexchange.com/a/38691/22785
# JBrowse releases come with some broken symlinks
cmd = ['find', destination, '-type', 'l', '-xtype', 'l']
log.debug(' '.join(cmd))
symlinks = subprocess.check_output(cmd)
for i in symlinks:
try:
os.unlink(i)
except OSError:
pass
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="", epilog="")
parser.add_argument('xml', type=argparse.FileType('r'), help='Track Configuration')
parser.add_argument('--jbrowse', help='Folder containing a jbrowse release')
parser.add_argument('--outdir', help='Output directory', default='out')
parser.add_argument('--standalone', help='Standalone mode includes a copy of JBrowse', action='store_true')
parser.add_argument('--version', '-V', action='version', version="%(prog)s 0.7.0")
args = parser.parse_args()
tree = ET.parse(args.xml.name)
root = tree.getroot()
jc = JbrowseConnector(
jbrowse=args.jbrowse,
outdir=args.outdir,
genomes=[
{
'path': os.path.realpath(x.attrib['path']),
'meta': metadata_from_node(x.find('metadata'))
}
for x in root.findall('metadata/genomes/genome')
],
standalone=args.standalone,
gencode=root.find('metadata/gencode').text
)
extra_data = {
'visibility': {
'default_on': [],
'default_off': [],
'force': [],
'always': [],
},
'general': {
'defaultLocation': root.find('metadata/general/defaultLocation').text,
'trackPadding': int(root.find('metadata/general/trackPadding').text),
'shareLink': root.find('metadata/general/shareLink').text,
'aboutDescription': root.find('metadata/general/aboutDescription').text,
'show_tracklist': root.find('metadata/general/show_tracklist').text,
'show_nav': root.find('metadata/general/show_nav').text,
'show_overview': root.find('metadata/general/show_overview').text,
'show_menu': root.find('metadata/general/show_menu').text,
'hideGenomeOptions': root.find('metadata/general/hideGenomeOptions').text,
},
'plugins': [{
'location': 'https://cdn.rawgit.com/TAMU-CPT/blastview/97572a21b7f011c2b4d9a0b5af40e292d694cbef/',
'name': 'BlastView'
}],
'plugins_python': ['BlastView'],
}
plugins = root.find('plugins').attrib
if plugins['GCContent'] == 'True':
extra_data['plugins_python'].append('GCContent')
extra_data['plugins'].append({
'location': 'https://cdn.rawgit.com/elsiklab/gccontent/5c8b0582ecebf9edf684c76af8075fb3d30ec3fa/',
'name': 'GCContent'
})
if plugins['Bookmarks'] == 'True':
extra_data['plugins'].append({
'location': 'https://cdn.rawgit.com/TAMU-CPT/bookmarks-jbrowse/5242694120274c86e1ccd5cb0e5e943e78f82393/',
'name': 'Bookmarks'
})
if plugins['ComboTrackSelector'] == 'True':
extra_data['plugins_python'].append('ComboTrackSelector')
extra_data['plugins'].append({
'location': 'https://cdn.rawgit.com/Arabidopsis-Information-Portal/ComboTrackSelector/52403928d5ccbe2e3a86b0fa5eb8e61c0f2e2f57',
'icon': 'https://galaxyproject.org/images/logos/galaxy-icon-square.png',
'name': 'ComboTrackSelector'
})
if plugins['theme'] == 'Minimalist':
extra_data['plugins'].append({
'location': 'https://cdn.rawgit.com/erasche/jbrowse-minimalist-theme/d698718442da306cf87f033c72ddb745f3077775/',
'name': 'MinimalistTheme'
})
elif plugins['theme'] == 'Dark':
extra_data['plugins'].append({
'location': 'https://cdn.rawgit.com/erasche/jbrowse-dark-theme/689eceb7e33bbc1b9b15518d45a5a79b2e5d0a26/',
'name': 'DarkTheme'
})
GALAXY_INFRASTRUCTURE_URL = root.find('metadata/galaxyUrl').text
# Sometimes this comes as `localhost` without a protocol
if not GALAXY_INFRASTRUCTURE_URL.startswith('http'):
# so we'll prepend `http://` and hope for the best. Requests *should*
# be GET and not POST so it should redirect OK
GALAXY_INFRASTRUCTURE_URL = 'http://' + GALAXY_INFRASTRUCTURE_URL
for track in root.findall('tracks/track'):
track_conf = {}
track_conf['trackfiles'] = []
for x in track.findall('files/trackFile'):
metadata = metadata_from_node(x.find('metadata'))
track_conf['trackfiles'].append((
os.path.realpath(x.attrib['path']),
x.attrib['ext'],
x.attrib['label'],
metadata
))
track_conf['category'] = track.attrib['cat']
track_conf['format'] = track.attrib['format']
try:
# Only pertains to gff3 + blastxml. TODO?
track_conf['style'] = {t.tag: t.text for t in track.find('options/style')}
except TypeError as te:
track_conf['style'] = {}
pass
track_conf['conf'] = etree_to_dict(track.find('options'))
keys = jc.process_annotations(track_conf)
for key in keys:
extra_data['visibility'][track.attrib.get('visibility', 'default_off')].append(key)
jc.add_final_data(extra_data)
jc.generate_names()
| dpryan79/tools-iuc | tools/jbrowse/jbrowse.py | Python | mit | 37,817 | [
"BLAST",
"Galaxy"
] | da55fdc77a325975ac459348a0addd527ff86ef11e10ac766bdede0909a44cb5 |
# -*- coding: utf-8 -*-
"""
The :mod:`sklearn.naive_bayes` module implements Naive Bayes algorithms. These
are supervised learning methods based on applying Bayes' theorem with strong
(naive) feature independence assumptions.
"""
# Author: Vincent Michel <vincent.michel@inria.fr>
# Minor fixes by Fabian Pedregosa
# Amit Aides <amitibo@tx.technion.ac.il>
# Yehuda Finkelstein <yehudaf@tx.technion.ac.il>
# Lars Buitinck
# Jan Hendrik Metzen <jhm@informatik.uni-bremen.de>
# (parts based on earlier work by Mathieu Blondel)
#
# License: BSD 3 clause
import warnings
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.sparse import issparse
from .base import BaseEstimator, ClassifierMixin
from .preprocessing import binarize
from .preprocessing import LabelBinarizer
from .preprocessing import label_binarize
from .utils import check_X_y, check_array, check_consistent_length
from .utils.extmath import safe_sparse_dot
from .utils.fixes import logsumexp
from .utils.multiclass import _check_partial_fit_first_call
from .utils.validation import check_is_fitted
from .externals import six
__all__ = ['BernoulliNB', 'GaussianNB', 'MultinomialNB', 'ComplementNB']
class BaseNB(six.with_metaclass(ABCMeta, BaseEstimator, ClassifierMixin)):
"""Abstract base class for naive Bayes estimators"""
@abstractmethod
def _joint_log_likelihood(self, X):
"""Compute the unnormalized posterior log probability of X
I.e. ``log P(c) + log P(x|c)`` for all rows x of X, as an array-like of
shape [n_classes, n_samples].
Input is passed to _joint_log_likelihood as-is by predict,
predict_proba and predict_log_proba.
"""
def predict(self, X):
"""
Perform classification on an array of test vectors X.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = [n_samples]
Predicted target values for X
"""
jll = self._joint_log_likelihood(X)
return self.classes_[np.argmax(jll, axis=1)]
def predict_log_proba(self, X):
"""
Return log-probability estimates for the test vector X.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array-like, shape = [n_samples, n_classes]
Returns the log-probability of the samples for each class in
the model. The columns correspond to the classes in sorted
order, as they appear in the attribute `classes_`.
"""
jll = self._joint_log_likelihood(X)
# normalize by P(x) = P(f_1, ..., f_n)
log_prob_x = logsumexp(jll, axis=1)
return jll - np.atleast_2d(log_prob_x).T
def predict_proba(self, X):
"""
Return probability estimates for the test vector X.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array-like, shape = [n_samples, n_classes]
Returns the probability of the samples for each class in
the model. The columns correspond to the classes in sorted
order, as they appear in the attribute `classes_`.
"""
return np.exp(self.predict_log_proba(X))
class GaussianNB(BaseNB):
"""
Gaussian Naive Bayes (GaussianNB)
Can perform online updates to model parameters via `partial_fit` method.
For details on algorithm used to update feature means and variance online,
see Stanford CS tech report STAN-CS-79-773 by Chan, Golub, and LeVeque:
http://i.stanford.edu/pub/cstr/reports/cs/tr/79/773/CS-TR-79-773.pdf
Read more in the :ref:`User Guide <gaussian_naive_bayes>`.
Parameters
----------
priors : array-like, shape (n_classes,)
Prior probabilities of the classes. If specified the priors are not
adjusted according to the data.
var_smoothing : float, optional (default=1e-9)
Portion of the largest variance of all features that is added to
variances for calculation stability.
Attributes
----------
class_prior_ : array, shape (n_classes,)
probability of each class.
class_count_ : array, shape (n_classes,)
number of training samples observed in each class.
theta_ : array, shape (n_classes, n_features)
mean of each feature per class
sigma_ : array, shape (n_classes, n_features)
variance of each feature per class
epsilon_ : float
absolute additive value to variances
Examples
--------
>>> import numpy as np
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> Y = np.array([1, 1, 1, 2, 2, 2])
>>> from sklearn.naive_bayes import GaussianNB
>>> clf = GaussianNB()
>>> clf.fit(X, Y)
GaussianNB(priors=None, var_smoothing=1e-09)
>>> print(clf.predict([[-0.8, -1]]))
[1]
>>> clf_pf = GaussianNB()
>>> clf_pf.partial_fit(X, Y, np.unique(Y))
GaussianNB(priors=None, var_smoothing=1e-09)
>>> print(clf_pf.predict([[-0.8, -1]]))
[1]
"""
def __init__(self, priors=None, var_smoothing=1e-9):
self.priors = priors
self.var_smoothing = var_smoothing
def fit(self, X, y, sample_weight=None):
"""Fit Gaussian Naive Bayes according to X, y
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training vectors, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape (n_samples,)
Target values.
sample_weight : array-like, shape (n_samples,), optional (default=None)
Weights applied to individual samples (1. for unweighted).
.. versionadded:: 0.17
Gaussian Naive Bayes supports fitting with *sample_weight*.
Returns
-------
self : object
"""
X, y = check_X_y(X, y)
return self._partial_fit(X, y, np.unique(y), _refit=True,
sample_weight=sample_weight)
@staticmethod
def _update_mean_variance(n_past, mu, var, X, sample_weight=None):
"""Compute online update of Gaussian mean and variance.
Given starting sample count, mean, and variance, a new set of
points X, and optionally sample weights, return the updated mean and
variance. (NB - each dimension (column) in X is treated as independent
-- you get variance, not covariance).
Can take scalar mean and variance, or vector mean and variance to
simultaneously update a number of independent Gaussians.
See Stanford CS tech report STAN-CS-79-773 by Chan, Golub, and LeVeque:
http://i.stanford.edu/pub/cstr/reports/cs/tr/79/773/CS-TR-79-773.pdf
Parameters
----------
n_past : int
Number of samples represented in old mean and variance. If sample
weights were given, this should contain the sum of sample
weights represented in old mean and variance.
mu : array-like, shape (number of Gaussians,)
Means for Gaussians in original set.
var : array-like, shape (number of Gaussians,)
Variances for Gaussians in original set.
sample_weight : array-like, shape (n_samples,), optional (default=None)
Weights applied to individual samples (1. for unweighted).
Returns
-------
total_mu : array-like, shape (number of Gaussians,)
Updated mean for each Gaussian over the combined set.
total_var : array-like, shape (number of Gaussians,)
Updated variance for each Gaussian over the combined set.
"""
if X.shape[0] == 0:
return mu, var
# Compute (potentially weighted) mean and variance of new datapoints
if sample_weight is not None:
n_new = float(sample_weight.sum())
new_mu = np.average(X, axis=0, weights=sample_weight / n_new)
new_var = np.average((X - new_mu) ** 2, axis=0,
weights=sample_weight / n_new)
else:
n_new = X.shape[0]
new_var = np.var(X, axis=0)
new_mu = np.mean(X, axis=0)
if n_past == 0:
return new_mu, new_var
n_total = float(n_past + n_new)
# Combine mean of old and new data, taking into consideration
# (weighted) number of observations
total_mu = (n_new * new_mu + n_past * mu) / n_total
# Combine variance of old and new data, taking into consideration
# (weighted) number of observations. This is achieved by combining
# the sum-of-squared-differences (ssd)
old_ssd = n_past * var
new_ssd = n_new * new_var
total_ssd = (old_ssd + new_ssd +
(n_past / float(n_new * n_total)) *
(n_new * mu - n_new * new_mu) ** 2)
total_var = total_ssd / n_total
return total_mu, total_var
def partial_fit(self, X, y, classes=None, sample_weight=None):
"""Incremental fit on a batch of samples.
This method is expected to be called several times consecutively
on different chunks of a dataset so as to implement out-of-core
or online learning.
This is especially useful when the whole dataset is too big to fit in
memory at once.
This method has some performance and numerical stability overhead,
hence it is better to call partial_fit on chunks of data that are
as large as possible (as long as fitting in the memory budget) to
hide the overhead.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples,)
Target values.
classes : array-like, shape (n_classes,), optional (default=None)
List of all the classes that can possibly appear in the y vector.
Must be provided at the first call to partial_fit, can be omitted
in subsequent calls.
sample_weight : array-like, shape (n_samples,), optional (default=None)
Weights applied to individual samples (1. for unweighted).
.. versionadded:: 0.17
Returns
-------
self : object
"""
return self._partial_fit(X, y, classes, _refit=False,
sample_weight=sample_weight)
def _partial_fit(self, X, y, classes=None, _refit=False,
sample_weight=None):
"""Actual implementation of Gaussian NB fitting.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples,)
Target values.
classes : array-like, shape (n_classes,), optional (default=None)
List of all the classes that can possibly appear in the y vector.
Must be provided at the first call to partial_fit, can be omitted
in subsequent calls.
_refit : bool, optional (default=False)
If true, act as though this were the first time we called
_partial_fit (ie, throw away any past fitting and start over).
sample_weight : array-like, shape (n_samples,), optional (default=None)
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : object
"""
X, y = check_X_y(X, y)
if sample_weight is not None:
sample_weight = check_array(sample_weight, ensure_2d=False)
check_consistent_length(y, sample_weight)
# If the ratio of data variance between dimensions is too small, it
# will cause numerical errors. To address this, we artificially
# boost the variance by epsilon, a small fraction of the standard
# deviation of the largest dimension.
self.epsilon_ = self.var_smoothing * np.var(X, axis=0).max()
if _refit:
self.classes_ = None
if _check_partial_fit_first_call(self, classes):
# This is the first call to partial_fit:
# initialize various cumulative counters
n_features = X.shape[1]
n_classes = len(self.classes_)
self.theta_ = np.zeros((n_classes, n_features))
self.sigma_ = np.zeros((n_classes, n_features))
self.class_count_ = np.zeros(n_classes, dtype=np.float64)
# Initialise the class prior
# Take into account the priors
if self.priors is not None:
priors = np.asarray(self.priors)
# Check that the provide prior match the number of classes
if len(priors) != n_classes:
raise ValueError('Number of priors must match number of'
' classes.')
# Check that the sum is 1
if not np.isclose(priors.sum(), 1.0):
raise ValueError('The sum of the priors should be 1.')
# Check that the prior are non-negative
if (priors < 0).any():
raise ValueError('Priors must be non-negative.')
self.class_prior_ = priors
else:
# Initialize the priors to zeros for each class
self.class_prior_ = np.zeros(len(self.classes_),
dtype=np.float64)
else:
if X.shape[1] != self.theta_.shape[1]:
msg = "Number of features %d does not match previous data %d."
raise ValueError(msg % (X.shape[1], self.theta_.shape[1]))
# Put epsilon back in each time
self.sigma_[:, :] -= self.epsilon_
classes = self.classes_
unique_y = np.unique(y)
unique_y_in_classes = np.in1d(unique_y, classes)
if not np.all(unique_y_in_classes):
raise ValueError("The target label(s) %s in y do not exist in the "
"initial classes %s" %
(unique_y[~unique_y_in_classes], classes))
for y_i in unique_y:
i = classes.searchsorted(y_i)
X_i = X[y == y_i, :]
if sample_weight is not None:
sw_i = sample_weight[y == y_i]
N_i = sw_i.sum()
else:
sw_i = None
N_i = X_i.shape[0]
new_theta, new_sigma = self._update_mean_variance(
self.class_count_[i], self.theta_[i, :], self.sigma_[i, :],
X_i, sw_i)
self.theta_[i, :] = new_theta
self.sigma_[i, :] = new_sigma
self.class_count_[i] += N_i
self.sigma_[:, :] += self.epsilon_
# Update if only no priors is provided
if self.priors is None:
# Empirical prior, with sample_weight taken into account
self.class_prior_ = self.class_count_ / self.class_count_.sum()
return self
def _joint_log_likelihood(self, X):
check_is_fitted(self, "classes_")
X = check_array(X)
joint_log_likelihood = []
for i in range(np.size(self.classes_)):
jointi = np.log(self.class_prior_[i])
n_ij = - 0.5 * np.sum(np.log(2. * np.pi * self.sigma_[i, :]))
n_ij -= 0.5 * np.sum(((X - self.theta_[i, :]) ** 2) /
(self.sigma_[i, :]), 1)
joint_log_likelihood.append(jointi + n_ij)
joint_log_likelihood = np.array(joint_log_likelihood).T
return joint_log_likelihood
_ALPHA_MIN = 1e-10
class BaseDiscreteNB(BaseNB):
"""Abstract base class for naive Bayes on discrete/categorical data
Any estimator based on this class should provide:
__init__
_joint_log_likelihood(X) as per BaseNB
"""
def _update_class_log_prior(self, class_prior=None):
n_classes = len(self.classes_)
if class_prior is not None:
if len(class_prior) != n_classes:
raise ValueError("Number of priors must match number of"
" classes.")
self.class_log_prior_ = np.log(class_prior)
elif self.fit_prior:
# empirical prior, with sample_weight taken into account
self.class_log_prior_ = (np.log(self.class_count_) -
np.log(self.class_count_.sum()))
else:
self.class_log_prior_ = np.full(n_classes, -np.log(n_classes))
def _check_alpha(self):
if np.min(self.alpha) < 0:
raise ValueError('Smoothing parameter alpha = %.1e. '
'alpha should be > 0.' % np.min(self.alpha))
if isinstance(self.alpha, np.ndarray):
if not self.alpha.shape[0] == self.feature_count_.shape[1]:
raise ValueError("alpha should be a scalar or a numpy array "
"with shape [n_features]")
if np.min(self.alpha) < _ALPHA_MIN:
warnings.warn('alpha too small will result in numeric errors, '
'setting alpha = %.1e' % _ALPHA_MIN)
return np.maximum(self.alpha, _ALPHA_MIN)
return self.alpha
def partial_fit(self, X, y, classes=None, sample_weight=None):
"""Incremental fit on a batch of samples.
This method is expected to be called several times consecutively
on different chunks of a dataset so as to implement out-of-core
or online learning.
This is especially useful when the whole dataset is too big to fit in
memory at once.
This method has some performance overhead hence it is better to call
partial_fit on chunks of data that are as large as possible
(as long as fitting in the memory budget) to hide the overhead.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples]
Target values.
classes : array-like, shape = [n_classes] (default=None)
List of all the classes that can possibly appear in the y vector.
Must be provided at the first call to partial_fit, can be omitted
in subsequent calls.
sample_weight : array-like, shape = [n_samples] (default=None)
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : object
"""
X = check_array(X, accept_sparse='csr', dtype=np.float64)
_, n_features = X.shape
if _check_partial_fit_first_call(self, classes):
# This is the first call to partial_fit:
# initialize various cumulative counters
n_effective_classes = len(classes) if len(classes) > 1 else 2
self.class_count_ = np.zeros(n_effective_classes, dtype=np.float64)
self.feature_count_ = np.zeros((n_effective_classes, n_features),
dtype=np.float64)
elif n_features != self.coef_.shape[1]:
msg = "Number of features %d does not match previous data %d."
raise ValueError(msg % (n_features, self.coef_.shape[-1]))
Y = label_binarize(y, classes=self.classes_)
if Y.shape[1] == 1:
Y = np.concatenate((1 - Y, Y), axis=1)
n_samples, n_classes = Y.shape
if X.shape[0] != Y.shape[0]:
msg = "X.shape[0]=%d and y.shape[0]=%d are incompatible."
raise ValueError(msg % (X.shape[0], y.shape[0]))
# label_binarize() returns arrays with dtype=np.int64.
# We convert it to np.float64 to support sample_weight consistently
Y = Y.astype(np.float64)
if sample_weight is not None:
sample_weight = np.atleast_2d(sample_weight)
Y *= check_array(sample_weight).T
class_prior = self.class_prior
# Count raw events from data before updating the class log prior
# and feature log probas
self._count(X, Y)
# XXX: OPTIM: we could introduce a public finalization method to
# be called by the user explicitly just once after several consecutive
# calls to partial_fit and prior any call to predict[_[log_]proba]
# to avoid computing the smooth log probas at each call to partial fit
alpha = self._check_alpha()
self._update_feature_log_prob(alpha)
self._update_class_log_prior(class_prior=class_prior)
return self
def fit(self, X, y, sample_weight=None):
"""Fit Naive Bayes classifier according to X, y
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples]
Target values.
sample_weight : array-like, shape = [n_samples], (default=None)
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : object
"""
X, y = check_X_y(X, y, 'csr')
_, n_features = X.shape
labelbin = LabelBinarizer()
Y = labelbin.fit_transform(y)
self.classes_ = labelbin.classes_
if Y.shape[1] == 1:
Y = np.concatenate((1 - Y, Y), axis=1)
# LabelBinarizer().fit_transform() returns arrays with dtype=np.int64.
# We convert it to np.float64 to support sample_weight consistently;
# this means we also don't have to cast X to floating point
Y = Y.astype(np.float64)
if sample_weight is not None:
sample_weight = np.atleast_2d(sample_weight)
Y *= check_array(sample_weight).T
class_prior = self.class_prior
# Count raw events from data before updating the class log prior
# and feature log probas
n_effective_classes = Y.shape[1]
self.class_count_ = np.zeros(n_effective_classes, dtype=np.float64)
self.feature_count_ = np.zeros((n_effective_classes, n_features),
dtype=np.float64)
self._count(X, Y)
alpha = self._check_alpha()
self._update_feature_log_prob(alpha)
self._update_class_log_prior(class_prior=class_prior)
return self
# XXX The following is a stopgap measure; we need to set the dimensions
# of class_log_prior_ and feature_log_prob_ correctly.
def _get_coef(self):
return (self.feature_log_prob_[1:]
if len(self.classes_) == 2 else self.feature_log_prob_)
def _get_intercept(self):
return (self.class_log_prior_[1:]
if len(self.classes_) == 2 else self.class_log_prior_)
coef_ = property(_get_coef)
intercept_ = property(_get_intercept)
class MultinomialNB(BaseDiscreteNB):
"""
Naive Bayes classifier for multinomial models
The multinomial Naive Bayes classifier is suitable for classification with
discrete features (e.g., word counts for text classification). The
multinomial distribution normally requires integer feature counts. However,
in practice, fractional counts such as tf-idf may also work.
Read more in the :ref:`User Guide <multinomial_naive_bayes>`.
Parameters
----------
alpha : float, optional (default=1.0)
Additive (Laplace/Lidstone) smoothing parameter
(0 for no smoothing).
fit_prior : boolean, optional (default=True)
Whether to learn class prior probabilities or not.
If false, a uniform prior will be used.
class_prior : array-like, size (n_classes,), optional (default=None)
Prior probabilities of the classes. If specified the priors are not
adjusted according to the data.
Attributes
----------
class_log_prior_ : array, shape (n_classes, )
Smoothed empirical log probability for each class.
intercept_ : array, shape (n_classes, )
Mirrors ``class_log_prior_`` for interpreting MultinomialNB
as a linear model.
feature_log_prob_ : array, shape (n_classes, n_features)
Empirical log probability of features
given a class, ``P(x_i|y)``.
coef_ : array, shape (n_classes, n_features)
Mirrors ``feature_log_prob_`` for interpreting MultinomialNB
as a linear model.
class_count_ : array, shape (n_classes,)
Number of samples encountered for each class during fitting. This
value is weighted by the sample weight when provided.
feature_count_ : array, shape (n_classes, n_features)
Number of samples encountered for each (class, feature)
during fitting. This value is weighted by the sample weight when
provided.
Examples
--------
>>> import numpy as np
>>> X = np.random.randint(5, size=(6, 100))
>>> y = np.array([1, 2, 3, 4, 5, 6])
>>> from sklearn.naive_bayes import MultinomialNB
>>> clf = MultinomialNB()
>>> clf.fit(X, y)
MultinomialNB(alpha=1.0, class_prior=None, fit_prior=True)
>>> print(clf.predict(X[2:3]))
[3]
Notes
-----
For the rationale behind the names `coef_` and `intercept_`, i.e.
naive Bayes as a linear classifier, see J. Rennie et al. (2003),
Tackling the poor assumptions of naive Bayes text classifiers, ICML.
References
----------
C.D. Manning, P. Raghavan and H. Schuetze (2008). Introduction to
Information Retrieval. Cambridge University Press, pp. 234-265.
http://nlp.stanford.edu/IR-book/html/htmledition/naive-bayes-text-classification-1.html
"""
def __init__(self, alpha=1.0, fit_prior=True, class_prior=None):
self.alpha = alpha
self.fit_prior = fit_prior
self.class_prior = class_prior
def _count(self, X, Y):
"""Count and smooth feature occurrences."""
if np.any((X.data if issparse(X) else X) < 0):
raise ValueError("Input X must be non-negative")
self.feature_count_ += safe_sparse_dot(Y.T, X)
self.class_count_ += Y.sum(axis=0)
def _update_feature_log_prob(self, alpha):
"""Apply smoothing to raw counts and recompute log probabilities"""
smoothed_fc = self.feature_count_ + alpha
smoothed_cc = smoothed_fc.sum(axis=1)
self.feature_log_prob_ = (np.log(smoothed_fc) -
np.log(smoothed_cc.reshape(-1, 1)))
def _joint_log_likelihood(self, X):
"""Calculate the posterior log probability of the samples X"""
check_is_fitted(self, "classes_")
X = check_array(X, accept_sparse='csr')
return (safe_sparse_dot(X, self.feature_log_prob_.T) +
self.class_log_prior_)
class ComplementNB(BaseDiscreteNB):
"""The Complement Naive Bayes classifier described in Rennie et al. (2003).
The Complement Naive Bayes classifier was designed to correct the "severe
assumptions" made by the standard Multinomial Naive Bayes classifier. It is
particularly suited for imbalanced data sets.
Read more in the :ref:`User Guide <complement_naive_bayes>`.
Parameters
----------
alpha : float, optional (default=1.0)
Additive (Laplace/Lidstone) smoothing parameter (0 for no smoothing).
fit_prior : boolean, optional (default=True)
Only used in edge case with a single class in the training set.
class_prior : array-like, size (n_classes,), optional (default=None)
Prior probabilities of the classes. Not used.
norm : boolean, optional (default=False)
Whether or not a second normalization of the weights is performed. The
default behavior mirrors the implementations found in Mahout and Weka,
which do not follow the full algorithm described in Table 9 of the
paper.
Attributes
----------
class_log_prior_ : array, shape (n_classes, )
Smoothed empirical log probability for each class. Only used in edge
case with a single class in the training set.
feature_log_prob_ : array, shape (n_classes, n_features)
Empirical weights for class complements.
class_count_ : array, shape (n_classes,)
Number of samples encountered for each class during fitting. This
value is weighted by the sample weight when provided.
feature_count_ : array, shape (n_classes, n_features)
Number of samples encountered for each (class, feature) during fitting.
This value is weighted by the sample weight when provided.
feature_all_ : array, shape (n_features,)
Number of samples encountered for each feature during fitting. This
value is weighted by the sample weight when provided.
Examples
--------
>>> import numpy as np
>>> X = np.random.randint(5, size=(6, 100))
>>> y = np.array([1, 2, 3, 4, 5, 6])
>>> from sklearn.naive_bayes import ComplementNB
>>> clf = ComplementNB()
>>> clf.fit(X, y)
ComplementNB(alpha=1.0, class_prior=None, fit_prior=True, norm=False)
>>> print(clf.predict(X[2:3]))
[3]
References
----------
Rennie, J. D., Shih, L., Teevan, J., & Karger, D. R. (2003).
Tackling the poor assumptions of naive bayes text classifiers. In ICML
(Vol. 3, pp. 616-623).
http://people.csail.mit.edu/jrennie/papers/icml03-nb.pdf
"""
def __init__(self, alpha=1.0, fit_prior=True, class_prior=None,
norm=False):
self.alpha = alpha
self.fit_prior = fit_prior
self.class_prior = class_prior
self.norm = norm
def _count(self, X, Y):
"""Count feature occurrences."""
if np.any((X.data if issparse(X) else X) < 0):
raise ValueError("Input X must be non-negative")
self.feature_count_ += safe_sparse_dot(Y.T, X)
self.class_count_ += Y.sum(axis=0)
self.feature_all_ = self.feature_count_.sum(axis=0)
def _update_feature_log_prob(self, alpha):
"""Apply smoothing to raw counts and compute the weights."""
comp_count = self.feature_all_ + alpha - self.feature_count_
logged = np.log(comp_count / comp_count.sum(axis=1, keepdims=True))
# BaseNB.predict uses argmax, but ComplementNB operates with argmin.
feature_log_prob = -logged
if self.norm:
summed = logged.sum(axis=1, keepdims=True)
feature_log_prob = -feature_log_prob / summed
self.feature_log_prob_ = feature_log_prob
def _joint_log_likelihood(self, X):
"""Calculate the class scores for the samples in X."""
check_is_fitted(self, "classes_")
X = check_array(X, accept_sparse="csr")
jll = safe_sparse_dot(X, self.feature_log_prob_.T)
if len(self.classes_) == 1:
jll += self.class_log_prior_
return jll
class BernoulliNB(BaseDiscreteNB):
"""Naive Bayes classifier for multivariate Bernoulli models.
Like MultinomialNB, this classifier is suitable for discrete data. The
difference is that while MultinomialNB works with occurrence counts,
BernoulliNB is designed for binary/boolean features.
Read more in the :ref:`User Guide <bernoulli_naive_bayes>`.
Parameters
----------
alpha : float, optional (default=1.0)
Additive (Laplace/Lidstone) smoothing parameter
(0 for no smoothing).
binarize : float or None, optional (default=0.0)
Threshold for binarizing (mapping to booleans) of sample features.
If None, input is presumed to already consist of binary vectors.
fit_prior : boolean, optional (default=True)
Whether to learn class prior probabilities or not.
If false, a uniform prior will be used.
class_prior : array-like, size=[n_classes,], optional (default=None)
Prior probabilities of the classes. If specified the priors are not
adjusted according to the data.
Attributes
----------
class_log_prior_ : array, shape = [n_classes]
Log probability of each class (smoothed).
feature_log_prob_ : array, shape = [n_classes, n_features]
Empirical log probability of features given a class, P(x_i|y).
class_count_ : array, shape = [n_classes]
Number of samples encountered for each class during fitting. This
value is weighted by the sample weight when provided.
feature_count_ : array, shape = [n_classes, n_features]
Number of samples encountered for each (class, feature)
during fitting. This value is weighted by the sample weight when
provided.
Examples
--------
>>> import numpy as np
>>> X = np.random.randint(2, size=(6, 100))
>>> Y = np.array([1, 2, 3, 4, 4, 5])
>>> from sklearn.naive_bayes import BernoulliNB
>>> clf = BernoulliNB()
>>> clf.fit(X, Y)
BernoulliNB(alpha=1.0, binarize=0.0, class_prior=None, fit_prior=True)
>>> print(clf.predict(X[2:3]))
[3]
References
----------
C.D. Manning, P. Raghavan and H. Schuetze (2008). Introduction to
Information Retrieval. Cambridge University Press, pp. 234-265.
http://nlp.stanford.edu/IR-book/html/htmledition/the-bernoulli-model-1.html
A. McCallum and K. Nigam (1998). A comparison of event models for naive
Bayes text classification. Proc. AAAI/ICML-98 Workshop on Learning for
Text Categorization, pp. 41-48.
V. Metsis, I. Androutsopoulos and G. Paliouras (2006). Spam filtering with
naive Bayes -- Which naive Bayes? 3rd Conf. on Email and Anti-Spam (CEAS).
"""
def __init__(self, alpha=1.0, binarize=.0, fit_prior=True,
class_prior=None):
self.alpha = alpha
self.binarize = binarize
self.fit_prior = fit_prior
self.class_prior = class_prior
def _count(self, X, Y):
"""Count and smooth feature occurrences."""
if self.binarize is not None:
X = binarize(X, threshold=self.binarize)
self.feature_count_ += safe_sparse_dot(Y.T, X)
self.class_count_ += Y.sum(axis=0)
def _update_feature_log_prob(self, alpha):
"""Apply smoothing to raw counts and recompute log probabilities"""
smoothed_fc = self.feature_count_ + alpha
smoothed_cc = self.class_count_ + alpha * 2
self.feature_log_prob_ = (np.log(smoothed_fc) -
np.log(smoothed_cc.reshape(-1, 1)))
def _joint_log_likelihood(self, X):
"""Calculate the posterior log probability of the samples X"""
check_is_fitted(self, "classes_")
X = check_array(X, accept_sparse='csr')
if self.binarize is not None:
X = binarize(X, threshold=self.binarize)
n_classes, n_features = self.feature_log_prob_.shape
n_samples, n_features_X = X.shape
if n_features_X != n_features:
raise ValueError("Expected input with %d features, got %d instead"
% (n_features, n_features_X))
neg_prob = np.log(1 - np.exp(self.feature_log_prob_))
# Compute neg_prob · (1 - X).T as ∑neg_prob - X · neg_prob
jll = safe_sparse_dot(X, (self.feature_log_prob_ - neg_prob).T)
jll += self.class_log_prior_ + neg_prob.sum(axis=1)
return jll
| vortex-ape/scikit-learn | sklearn/naive_bayes.py | Python | bsd-3-clause | 36,009 | [
"Gaussian"
] | da720a272c64043902eb9a36d575f94fc89d9e7ca72a026053e847f5de17e8bb |
# Copyright 2013 Gokcen Eraslan. All rights reserved.
#
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Unit test for Bio.PDB.NACCESS which needs NACCESS tool.
See also test_PDB.py for dependency free NACCESS tests.
"""
import subprocess
import unittest
from Bio import MissingExternalDependencyError
from Bio.PDB import PDBParser
from Bio.PDB.NACCESS import NACCESS
# Check if NACCESS is installed
try:
subprocess.check_call(["naccess", "-q"],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
except OSError:
raise MissingExternalDependencyError(
"Install naccess if you want to use it from Biopython.")
class NACCESS_test(unittest.TestCase):
"""Test NACCESS module"""
def test_NACCESS(self):
"""Test NACCESS generation from PDB"""
p = PDBParser()
pdbfile = "PDB/1A8O.pdb"
model = p.get_structure("1A8O", pdbfile)[0]
naccess = NACCESS(model, pdbfile)
self.assertEqual(len(naccess), 66)
if __name__ == '__main__':
runner = unittest.TextTestRunner(verbosity=2)
unittest.main(testRunner=runner)
| updownlife/multipleK | dependencies/biopython-1.65/Tests/test_NACCESS_tool.py | Python | gpl-2.0 | 1,259 | [
"Biopython"
] | f1dc1dec6e0777b54fe823646f2056957bc48f09c84cbfb5dba121d24ee02966 |
# -*- coding: utf-8 -*-
# This file is part of beets.
# Copyright 2016, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Adds Discogs album search support to the autotagger. Requires the
discogs-client library.
"""
from __future__ import division, absolute_import, print_function
import beets.ui
from beets import config
from beets.autotag.hooks import AlbumInfo, TrackInfo, Distance
from beets.plugins import BeetsPlugin
from beets.util import confit
from discogs_client import Release, Client
from discogs_client.exceptions import DiscogsAPIError
from requests.exceptions import ConnectionError
from six.moves import http_client
import beets
import re
import time
import json
import socket
import os
import traceback
from string import ascii_lowercase
USER_AGENT = u'beets/{0} +http://beets.io/'.format(beets.__version__)
# Exceptions that discogs_client should really handle but does not.
CONNECTION_ERRORS = (ConnectionError, socket.error, http_client.HTTPException,
ValueError, # JSON decoding raises a ValueError.
DiscogsAPIError)
class DiscogsPlugin(BeetsPlugin):
def __init__(self):
super(DiscogsPlugin, self).__init__()
self.config.add({
'apikey': 'rAzVUQYRaoFjeBjyWuWZ',
'apisecret': 'plxtUTqoCzwxZpqdPysCwGuBSmZNdZVy',
'tokenfile': 'discogs_token.json',
'source_weight': 0.5,
})
self.config['apikey'].redact = True
self.config['apisecret'].redact = True
self.discogs_client = None
self.register_listener('import_begin', self.setup)
def setup(self, session=None):
"""Create the `discogs_client` field. Authenticate if necessary.
"""
c_key = self.config['apikey'].as_str()
c_secret = self.config['apisecret'].as_str()
# Get the OAuth token from a file or log in.
try:
with open(self._tokenfile()) as f:
tokendata = json.load(f)
except IOError:
# No token yet. Generate one.
token, secret = self.authenticate(c_key, c_secret)
else:
token = tokendata['token']
secret = tokendata['secret']
self.discogs_client = Client(USER_AGENT, c_key, c_secret,
token, secret)
def reset_auth(self):
"""Delete token file & redo the auth steps.
"""
os.remove(self._tokenfile())
self.setup()
def _tokenfile(self):
"""Get the path to the JSON file for storing the OAuth token.
"""
return self.config['tokenfile'].get(confit.Filename(in_app_dir=True))
def authenticate(self, c_key, c_secret):
# Get the link for the OAuth page.
auth_client = Client(USER_AGENT, c_key, c_secret)
try:
_, _, url = auth_client.get_authorize_url()
except CONNECTION_ERRORS as e:
self._log.debug(u'connection error: {0}', e)
raise beets.ui.UserError(u'communication with Discogs failed')
beets.ui.print_(u"To authenticate with Discogs, visit:")
beets.ui.print_(url)
# Ask for the code and validate it.
code = beets.ui.input_(u"Enter the code:")
try:
token, secret = auth_client.get_access_token(code)
except DiscogsAPIError:
raise beets.ui.UserError(u'Discogs authorization failed')
except CONNECTION_ERRORS as e:
self._log.debug(u'connection error: {0}', e)
raise beets.ui.UserError(u'Discogs token request failed')
# Save the token for later use.
self._log.debug(u'Discogs token {0}, secret {1}', token, secret)
with open(self._tokenfile(), 'w') as f:
json.dump({'token': token, 'secret': secret}, f)
return token, secret
def album_distance(self, items, album_info, mapping):
"""Returns the album distance.
"""
dist = Distance()
if album_info.data_source == 'Discogs':
dist.add('source', self.config['source_weight'].as_number())
return dist
def candidates(self, items, artist, album, va_likely):
"""Returns a list of AlbumInfo objects for discogs search results
matching an album and artist (if not various).
"""
if not self.discogs_client:
return
if va_likely:
query = album
else:
query = '%s %s' % (artist, album)
try:
return self.get_albums(query)
except DiscogsAPIError as e:
self._log.debug(u'API Error: {0} (query: {1})', e, query)
if e.status_code == 401:
self.reset_auth()
return self.candidates(items, artist, album, va_likely)
else:
return []
except CONNECTION_ERRORS:
self._log.debug(u'Connection error in album search', exc_info=True)
return []
def album_for_id(self, album_id):
"""Fetches an album by its Discogs ID and returns an AlbumInfo object
or None if the album is not found.
"""
if not self.discogs_client:
return
self._log.debug(u'Searching for release {0}', album_id)
# Discogs-IDs are simple integers. We only look for those at the end
# of an input string as to avoid confusion with other metadata plugins.
# An optional bracket can follow the integer, as this is how discogs
# displays the release ID on its webpage.
match = re.search(r'(^|\[*r|discogs\.com/.+/release/)(\d+)($|\])',
album_id)
if not match:
return None
result = Release(self.discogs_client, {'id': int(match.group(2))})
# Try to obtain title to verify that we indeed have a valid Release
try:
getattr(result, 'title')
except DiscogsAPIError as e:
if e.status_code != 404:
self._log.debug(u'API Error: {0} (query: {1})', e, result._uri)
if e.status_code == 401:
self.reset_auth()
return self.album_for_id(album_id)
return None
except CONNECTION_ERRORS:
self._log.debug(u'Connection error in album lookup', exc_info=True)
return None
return self.get_album_info(result)
def get_albums(self, query):
"""Returns a list of AlbumInfo objects for a discogs search query.
"""
# Strip non-word characters from query. Things like "!" and "-" can
# cause a query to return no results, even if they match the artist or
# album title. Use `re.UNICODE` flag to avoid stripping non-english
# word characters.
# FIXME: Encode as ASCII to work around a bug:
# https://github.com/beetbox/beets/issues/1051
# When the library is fixed, we should encode as UTF-8.
query = re.sub(r'(?u)\W+', ' ', query).encode('ascii', "replace")
# Strip medium information from query, Things like "CD1" and "disk 1"
# can also negate an otherwise positive result.
query = re.sub(br'(?i)\b(CD|disc)\s*\d+', b'', query)
try:
releases = self.discogs_client.search(query,
type='release').page(1)
except CONNECTION_ERRORS:
self._log.debug(u"Communication error while searching for {0!r}",
query, exc_info=True)
return []
return [album for album in map(self.get_album_info, releases[:5])
if album]
def get_album_info(self, result):
"""Returns an AlbumInfo object for a discogs Release object.
"""
# Explicitly reload the `Release` fields, as they might not be yet
# present if the result is from a `discogs_client.search()`.
if not result.data.get('artists'):
result.refresh()
# Sanity check for required fields. The list of required fields is
# defined at Guideline 1.3.1.a, but in practice some releases might be
# lacking some of these fields. This function expects at least:
# `artists` (>0), `title`, `id`, `tracklist` (>0)
# https://www.discogs.com/help/doc/submission-guidelines-general-rules
if not all([result.data.get(k) for k in ['artists', 'title', 'id',
'tracklist']]):
self._log.warn(u"Release does not contain the required fields")
return None
artist, artist_id = self.get_artist([a.data for a in result.artists])
album = re.sub(r' +', ' ', result.title)
album_id = result.data['id']
# Use `.data` to access the tracklist directly instead of the
# convenient `.tracklist` property, which will strip out useful artist
# information and leave us with skeleton `Artist` objects that will
# each make an API call just to get the same data back.
tracks = self.get_tracks(result.data['tracklist'])
# Extract information for the optional AlbumInfo fields, if possible.
va = result.data['artists'][0].get('name', '').lower() == 'various'
year = result.data.get('year')
mediums = len(set(t.medium for t in tracks))
country = result.data.get('country')
data_url = result.data.get('uri')
# Extract information for the optional AlbumInfo fields that are
# contained on nested discogs fields.
albumtype = media = label = catalogno = None
if result.data.get('formats'):
albumtype = ', '.join(
result.data['formats'][0].get('descriptions', [])) or None
media = result.data['formats'][0]['name']
if result.data.get('labels'):
label = result.data['labels'][0].get('name')
catalogno = result.data['labels'][0].get('catno')
# Additional cleanups (various artists name, catalog number, media).
if va:
artist = config['va_name'].as_str()
if catalogno == 'none':
catalogno = None
# Explicitly set the `media` for the tracks, since it is expected by
# `autotag.apply_metadata`, and set `medium_total`.
for track in tracks:
track.media = media
track.medium_total = mediums
return AlbumInfo(album, album_id, artist, artist_id, tracks, asin=None,
albumtype=albumtype, va=va, year=year, month=None,
day=None, label=label, mediums=mediums,
artist_sort=None, releasegroup_id=None,
catalognum=catalogno, script=None, language=None,
country=country, albumstatus=None, media=media,
albumdisambig=None, artist_credit=None,
original_year=None, original_month=None,
original_day=None, data_source='Discogs',
data_url=data_url)
def get_artist(self, artists):
"""Returns an artist string (all artists) and an artist_id (the main
artist) for a list of discogs album or track artists.
"""
artist_id = None
bits = []
for i, artist in enumerate(artists):
if not artist_id:
artist_id = artist['id']
name = artist['name']
# Strip disambiguation number.
name = re.sub(r' \(\d+\)$', '', name)
# Move articles to the front.
name = re.sub(r'(?i)^(.*?), (a|an|the)$', r'\2 \1', name)
bits.append(name)
if artist['join'] and i < len(artists) - 1:
bits.append(artist['join'])
artist = ' '.join(bits).replace(' ,', ',') or None
return artist, artist_id
def get_tracks(self, tracklist):
"""Returns a list of TrackInfo objects for a discogs tracklist.
"""
try:
clean_tracklist = self.coalesce_tracks(tracklist)
except Exception as exc:
# FIXME: this is an extra precaution for making sure there are no
# side effects after #2222. It should be removed after further
# testing.
self._log.debug(u'{}', traceback.format_exc())
self._log.error(u'uncaught exception in coalesce_tracks: {}', exc)
clean_tracklist = tracklist
tracks = []
index_tracks = {}
index = 0
for track in clean_tracklist:
# Only real tracks have `position`. Otherwise, it's an index track.
if track['position']:
index += 1
track_info = self.get_track_info(track, index)
track_info.track_alt = track['position']
tracks.append(track_info)
else:
index_tracks[index + 1] = track['title']
# Fix up medium and medium_index for each track. Discogs position is
# unreliable, but tracks are in order.
medium = None
medium_count, index_count, side_count = 0, 0, 0
sides_per_medium = 1
# If a medium has two sides (ie. vinyl or cassette), each pair of
# consecutive sides should belong to the same medium.
if all([track.medium is not None for track in tracks]):
m = sorted(set([track.medium.lower() for track in tracks]))
# If all track.medium are single consecutive letters, assume it is
# a 2-sided medium.
if ''.join(m) in ascii_lowercase:
sides_per_medium = 2
side_count = 1 # Force for first item, where medium == None
for track in tracks:
# Handle special case where a different medium does not indicate a
# new disc, when there is no medium_index and the ordinal of medium
# is not sequential. For example, I, II, III, IV, V. Assume these
# are the track index, not the medium.
medium_is_index = track.medium and not track.medium_index and (
len(track.medium) != 1 or
ord(track.medium) - 64 != medium_count + 1
)
if not medium_is_index and medium != track.medium:
if side_count < (sides_per_medium - 1):
# Increment side count: side changed, but not medium.
side_count += 1
medium = track.medium
else:
# Increment medium_count and reset index_count and side
# count when medium changes.
medium = track.medium
medium_count += 1
index_count = 0
side_count = 0
index_count += 1
medium_count = 1 if medium_count == 0 else medium_count
track.medium, track.medium_index = medium_count, index_count
# Get `disctitle` from Discogs index tracks. Assume that an index track
# before the first track of each medium is a disc title.
for track in tracks:
if track.medium_index == 1:
if track.index in index_tracks:
disctitle = index_tracks[track.index]
else:
disctitle = None
track.disctitle = disctitle
return tracks
def coalesce_tracks(self, raw_tracklist):
"""Pre-process a tracklist, merging subtracks into a single track. The
title for the merged track is the one from the previous index track,
if present; otherwise it is a combination of the subtracks titles.
"""
def add_merged_subtracks(tracklist, subtracks):
"""Modify `tracklist` in place, merging a list of `subtracks` into
a single track into `tracklist`."""
# Calculate position based on first subtrack, without subindex.
idx, medium_idx, sub_idx = \
self.get_track_index(subtracks[0]['position'])
position = '%s%s' % (idx or '', medium_idx or '')
if tracklist and not tracklist[-1]['position']:
# Assume the previous index track contains the track title.
if sub_idx:
# "Convert" the track title to a real track, discarding the
# subtracks assuming they are logical divisions of a
# physical track (12.2.9 Subtracks).
tracklist[-1]['position'] = position
else:
# Promote the subtracks to real tracks, discarding the
# index track, assuming the subtracks are physical tracks.
index_track = tracklist.pop()
# Fix artists when they are specified on the index track.
if index_track.get('artists'):
for subtrack in subtracks:
if not subtrack.get('artists'):
subtrack['artists'] = index_track['artists']
tracklist.extend(subtracks)
else:
# Merge the subtracks, pick a title, and append the new track.
track = subtracks[0].copy()
track['title'] = ' / '.join([t['title'] for t in subtracks])
tracklist.append(track)
# Pre-process the tracklist, trying to identify subtracks.
subtracks = []
tracklist = []
prev_subindex = ''
for track in raw_tracklist:
# Regular subtrack (track with subindex).
if track['position']:
_, _, subindex = self.get_track_index(track['position'])
if subindex:
if subindex.rjust(len(raw_tracklist)) > prev_subindex:
# Subtrack still part of the current main track.
subtracks.append(track)
else:
# Subtrack part of a new group (..., 1.3, *2.1*, ...).
add_merged_subtracks(tracklist, subtracks)
subtracks = [track]
prev_subindex = subindex.rjust(len(raw_tracklist))
continue
# Index track with nested sub_tracks.
if not track['position'] and 'sub_tracks' in track:
# Append the index track, assuming it contains the track title.
tracklist.append(track)
add_merged_subtracks(tracklist, track['sub_tracks'])
continue
# Regular track or index track without nested sub_tracks.
if subtracks:
add_merged_subtracks(tracklist, subtracks)
subtracks = []
prev_subindex = ''
tracklist.append(track)
# Merge and add the remaining subtracks, if any.
if subtracks:
add_merged_subtracks(tracklist, subtracks)
return tracklist
def get_track_info(self, track, index):
"""Returns a TrackInfo object for a discogs track.
"""
title = track['title']
track_id = None
medium, medium_index, _ = self.get_track_index(track['position'])
artist, artist_id = self.get_artist(track.get('artists', []))
length = self.get_track_length(track['duration'])
return TrackInfo(title, track_id, artist, artist_id, length, index,
medium, medium_index, artist_sort=None,
disctitle=None, artist_credit=None)
def get_track_index(self, position):
"""Returns the medium, medium index and subtrack index for a discogs
track position."""
# Match the standard Discogs positions (12.2.9), which can have several
# forms (1, 1-1, A1, A1.1, A1a, ...).
match = re.match(
r'^(.*?)' # medium: everything before medium_index.
r'(\d*?)' # medium_index: a number at the end of
# `position`, except if followed by a subtrack
# index.
# subtrack_index: can only be matched if medium
# or medium_index have been matched, and can be
r'((?<=\w)\.[\w]+' # - a dot followed by a string (A.1, 2.A)
r'|(?<=\d)[A-Z]+' # - a string that follows a number (1A, B2a)
r')?'
r'$',
position.upper()
)
if match:
medium, index, subindex = match.groups()
if subindex and subindex.startswith('.'):
subindex = subindex[1:]
else:
self._log.debug(u'Invalid position: {0}', position)
medium = index = subindex = None
return medium or None, index or None, subindex or None
def get_track_length(self, duration):
"""Returns the track length in seconds for a discogs duration.
"""
try:
length = time.strptime(duration, '%M:%S')
except ValueError:
return None
return length.tm_min * 60 + length.tm_sec
| lengtche/beets | beetsplug/discogs.py | Python | mit | 21,763 | [
"VisIt"
] | ef4190f001bdc2a7ca338993f920156b78826fda16acf588bc4c0e9ba7a11e8d |
# coding: utf-8
#
# Copyright 2014 NAMD-EMAP-FGV
#
# This file is part of PyPLN. You can get more information at: http://pypln.org/.
#
# PyPLN is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PyPLN is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with PyPLN. If not, see <http://www.gnu.org/licenses/>.
import base64
from StringIO import StringIO
from PIL import Image
from pypln.backend.workers import WordCloud
from utils import TaskTest
class TestFreqDistWorker(TaskTest):
name = "WordCloud"
def test_wordcloud_should_return_a_base64_encoded_png(self):
doc = {'freqdist': [('is', 2), ('the', 2), ('blue', 1), ('sun', 1),
('sky', 1), (',', 1), ('yellow', 1), ('.', 1)], 'language': 'en'}
doc_id = self.collection.insert(doc, w=1)
WordCloud().delay(doc_id)
refreshed_document = self.collection.find_one({'_id': doc_id})
raw_png_data = base64.b64decode(refreshed_document['wordcloud'])
fake_file = StringIO(raw_png_data)
img = Image.open(fake_file)
img.verify()
self.assertEqual(img.format, 'PNG')
| NAMD/pypln.backend | tests/test_worker_wordcloud.py | Python | gpl-3.0 | 1,543 | [
"NAMD"
] | b2fa2546921500a1b81d9a45a9708acc7cb5267146fd31effd38c9514f5f9bca |
# -*- coding: utf-8 -*-
def check_unresolved(locstr):
for key in fix_unresolved:
if locstr in key: return fix_unresolved[key]
def tests():
assert(check_unresolved('nyc') == 'United States')
assert(check_unresolved('perú') == 'Peru')
assert(check_unresolved('###############') == None)
print 'tests pass'
fix_unresolved = {
frozenset(['nyc', 'silicon valley', 'sf', 'uiuc', 'new england', 'philly', 'ucla', 'west coast', 'potrero, sf', 'cape may court house', 'u.s.a.', 'rit', 'northwest', 'trabuco canyon', 'west philly', 'mit', 'unlv', 'chicagoland', 'sdut', 'ucsc', 'pdx', 'u s a']): 'United States',
frozenset(['montreal', 'montreal, qc', 'montréal, québec', 'québec', 'montréal', 'quebec', 'prévost', 'montréal, qc', 'tiohtiake', 'montreal, quebec']): 'Canada',
frozenset(['england', 'scotland', 'oxfordshire', 'oxfordshire, england', 'u.k.', 'snowdonia, north wales', 'broughty ferry, scotland', 'cambrdige', 'u k', 'devon, england', 'bonnie scotland', 'bonsall, derbyshire']): 'United Kingdom',
frozenset(['munich', 'cologne', 'münchen', 'deutschland', 'düsseldorf', 'nürnberg', 'köln', 'göttingen', 'osnabrück', 'isny', 'duesseldorf', 'montabaur', 'münster']): 'Germany',
frozenset(['são paulo, brasil', 'brasil', 'são paulo', 'são paulo - sp', 'sao paulo', 'são paulo - brasil', 'são paulo, sp, brasil', 'são paulo, sp', 'goiânia', 'joão pessoa', 'três rios - rj', 'florianópolis', 'santo andre - sp - brasil', 'são paulo - sp - brasil', 'são paulo sp', 'são paulo brasil', 'três rios rj', 'são paulo sp brasil', 'santo andre sp brasil', 'brasilia']): 'Brazil',
frozenset(['russian federation', 'ekaterinburg', 'ulyanovsk', 'Москва', 'stary oskol', 'nizhny novgorod', 'moskow', 'ptz', 'russian', 'rnd', 'blagoveschensk, amur region, russian federation', 'Набережные Челны', 'Россия, Москва, Ногинск']): 'Russia',
frozenset(['chambéry', 'meyrargues', 'auvergne', 'rueil malmaison', 'evry', 'aix en provence', 'gex']): 'France',
frozenset(['korea']): 'South Korea',
frozenset(['istanbul', 'eskisehir', 'izmir']): 'Turkey',
frozenset(['hyderabad', 'varanasi', 'bengaluru', 'ahmedabad', 'kerala', 'gandhinagar, gujarat', 'kolkata']): 'India',
frozenset(['zurich', 'zürich', 'neuchatel', 'lucerne', 'paudex', 'stäfa', 'geneve']): 'Switzerland',
frozenset(['méxico', 'méxico city', 'uruapan michoacán méxico']): 'Mexico',
frozenset(['sapporo', 'osaka', 'yokohama', 'japanese', 'kagurazaka', 'kanagawa', 'nagoya', 'chiba', 'ja', '東京', 'kagurazaka']): 'Japan',
frozenset(['göteborg', 'linköping', 'gothenburg', 'swedish igloo']): 'Sweden',
frozenset(['europe->poland->gorzow', 'kraków', 'wrocław', 'cracow', 'warszawa', 'dąbrowa górnicza, polska', 'europe >poland >gorzow']): 'Poland',
frozenset(['bucuresti', 'bucurești, românia', 'iaşi, românia', 'cluj-napoca', 'iasi', 'cluj napoca']): 'Romania',
frozenset(['ghent', 'bruxelles', 'liège']): 'Belgium',
frozenset(['ukreine', 'lviv', 'kyiv', 'kharkov', 'cherkassy']): 'Ukraine',
frozenset(['patagonya']): 'Argentina',
frozenset(['valparaíso']): 'Chile',
frozenset(['gansu', "xi'an", 'beijng', 'hang zhou', '杭州', '福建厦门', '上海', '江苏苏州', '中国天津', '北京', '广州', '中国', '河南焦作', '家', '杭州西湖区', '广东 深圳', '中国深圳', 'beijingchina', '昆明']): 'China',
frozenset(['medellin', 'envigado! :']): 'Colombia',
frozenset(['czech rebublic']): 'Czech Republic',
frozenset(['itämerenkatu 13', 'itämerenkatu']): 'Finland',
frozenset(['soerabaja, east java']): 'Indonesia',
frozenset(['milano', 'turin', 'mozzanella', 'italia']): 'Italy',
frozenset(['almere', 'antwerp', 'flanders, eu', 'uddel']): 'Netherlands',
frozenset(['perú']): 'Peru',
frozenset(['phillipines']): 'Philippines',
frozenset(['catalunya', 'canary islands', 'seville', 'santa Úrsula', 'piera']): 'Spain',
frozenset(['taoyuan']): 'Taiwan',
frozenset(['viet nam', 'hanoi', 'ha noi, viet nam', 'ha noi - viet nam', 'ha noi', 'ha noi viet nam', 'hà nội, việt nam']): 'Vietnam',
frozenset(['tehran']): 'Iran',
frozenset(['joburg', "jo'burg"]): 'South Africa',
frozenset(['luxemburg']): 'Luxembourg',
frozenset(['københavn']): 'Denmark'
# frozenset([]): 'Australia',
# frozenset([]): 'Austria',
# frozenset([]): 'Ireland',
}
if __name__ == '__main__':
tests()
#, 'irc perl org'
#, 'beyond your wildest dreams'
#, 'pacific northwest'
#, 'northern england'
#, 'cmpt'
#, 'stornoway'
#, 'ahoka@rizon'
#, 'hampshire, england'
#, 'cern'
#, '?'
#, 'yay area'
#, 'the internets'
#, 'world wide web'
#, ', hoya'
#, 'samstagern'
#, 'belém/pa brasil'
#, 'burgum'
#, 'zz plural z alpha'
#, 'changes a lot'
#, 'yokohma'
#, 'uzb'
#, 'united kingdon'
#, 'wien'
#, 'ciracas'
#, 'luik'
#, 'bolzano'
#, 'jogjakarta'
#, 'l'
#, 'tübingen'
#, 'varick street'
#, '中国上海'
#, 'n e'
#, 'gijón'
#, 'matlock, derbyshire'
#, '::'
#, 'u s'
#, 'north east england'
#, 'interwebs'
#, 'cal poly slo'
#, 'the yay'
#, 'troon, scotland'
#, 'root'
#, '° ' "n ° ' "w m'
#, 'online'
#, 'cardrona'
#, 'pluto'
#, 'aarhus'
#, 'muc'
#, 'north wales'
#, 's'
#, 'pici'
#, 'ffm'
#, 'altach, Österreich'
#, 'f:\project\monitorproject'
#, 'argetnina'
#, 'lietuva lithuania, gmt+'
#, 'america'
#, 'stanford'
#, 'reykjavik'
#, 'espirito santo, brasil'
#, 'neo sitama'
#, 'atl'
#, 'homalg project'
#, '/dev/console'
#, 'berkshire, england'
#, 'odd pod, troon, scotland'
#, 'atx'
#, 'bahia blanca'
#, 'nurnberg'
#, 'belgique'
#, 'lisboa'
#, '/home/askn'
#, ''merica'
#, '浙江宁波'
#, 'lostwithiel'
#, 'nizhny novgorod, russian federation'
#, 'jorvas'
#, 'vilafranca del penedès, catalunya'
#, 'sfbay area'
#, 'gandhinagar'
#, 'nova scotia'
#, 'n/a'
#, '中国浙江省杭州市'
#, 'praha'
#, 'a coruña'
#, 'córdoba'
#, 'são paulo sp, brasil'
#, 'nova'
#, 'tver'
#, 'swizerland'
#, 'hacking from the moon'
#, 'the nederlands'
#, 'uc santa barbara'
#, 'tejas'
#, 'niterói/rj'
#, 'latvija, rīga'
#, '渋谷'
#, 'belém pará'
#, 'rupnagar'
#, 'chn'
#, 'méxico, d f'
#, 'upstate new yawk'
#, 'manipal'
#, 'whistler, bc'
#, 'russian, ryazan'
#, 'block island'
#, 'karkala'
#, 'tunbridge wells'
#, 'krakow'
#, '上海市浦东新区居里路 号'
#, '風見学園'
#, 'frankfurt, gemany'
#, 'lith'
#, 'leningrad'
#, 'hradec králové'
#, 'saarbrücken'
#, 'strängnäs'
#, 'köln, deutschland'
#, 'são carlos/sp'
#, 'idf'
#, 'slavičín'
#, 'hampshire'
#, 'newfoundland'
#, 'nomadic'
#, 'são paulo / brasil'
#, 'brasil/goiás/goiânia'
#, 'rychnov nad kneznou / praha'
#, 'suwon'
#, 'the mountains'
#, 'perm'
#, 'cần thơ'
#, 'czecho'
#, 'south wales'
#, 'fukuoka'
#, 'over there'
#, 'var location = { country: 'germany', city: 'saarbrücken' };'
#, 'ryazan'
#, 'sea / sfo / nyc / bos'
#, 'deajeon'
#, 'jocala com'
#, 'port coquitlam, bc'
#, 'Россия, Московская область, Долгопрудный'
#, 'fennectar, fennectus state, planet cyrusian'
#, 'saigon'
#, 'wales'
#, 'urumqi'
#, 'méxico df'
#, 'lancashire'
#, 'hà nội việt nam'
#, 'eivissa, españa'
#, 'españa'
#, 'méxico d f'
#, 'Сибирь, Кузбасс'
#, '浙江省杭州市文二路 号'
#, 'Казань'
#, 'arkania'
#, 'hell on earth'
#, 'fotaleza'
#, '/home/necronet'
#, 'leon, gto'
#, 'none'
#, 'kobe'
#, 'barnet'
#, 'tyumen'
#, 'north america'
#, '@philingrey'
#, 'catalonia'
#, 'elbląg'
#, 'málaga'
#, 'galactic sector zz plural z alpha'
#, 'quebec city'
#, 'oświęcim'
#, '武汉'
#, 'záhony'
#, 'stony brook'
#, 'oaxaca, méxico'
#, 'jyväskylä'
#, 'l a'
#, 'málaga, españa'
#, 'waseco building'
#, 'pacific palisades'
#, 'sun/sol'
#, 'poznan'
#, 'joão pessoa, paraíba'
#, 'brasília, brasil'
#, 'hellsinki / funland'
#, 'shang hai'
#, 'cascadia'
#, 'gdansk'
#, 'gdańsk'
#, 'alresford'
#, 'dollar, clackmannanshire'
#, 'the d'
#, 'Харьков'
#, 'scenic hinxton'
#, 'timisoara'
#, 'kyiv, ukrane'
#, 'rendsburg@germany'
#, 'gotland'
#, 'wow freakz'
#, 'idolgo com'
#, 'wonderland'
#, 'behind you'
#, 'the cloud'
#, 'bronx'
#, 'vernouillet'
#, 'space'
#, '˚∆˚'
#, 'cornell'
#, 'chania crete'
#, 'effretikon'
#, 'bavaria'
#, 'llanfrothen, north wales'
#, 'oisy le verger'
#, 'woclaw'
#, 'windows'
#, 'bundeshauptstadt'
#, 'vorarlberg'
#, 'bogota'
#, 'durandalingrad'
#, 'cheonan, korea'
#, 'mass, u s'
#, 'românia'
#, 'prg'
#, 'right behind you!!!'
#, 'brasília'
#, 'internets'
#, 'united stated'
#, 'milky way'
#, 'kocaeli, turkiye'
#, 'galway'
#, 'american midwest'
#, 'shizuoka'
#, 'hanoi, viet nam'
#, 'peking'
#, 'ciudad obregon sonora , méxico'
#, 'livry gargan'
#, 'brasília, df, brasil'
#, 'south england'
#, 'frankfurt'
#, 'suburbia'
#, 'sfca'
#, 'llanarmon dyffryn ceiriog'
#, 'milky way galaxy'
#, 'here, now'
#, 'nit kozhikode, kerala'
#, 'janelia farm research campus'
#, 'softslayer'
#, 'thrissur'
#, 'joão pessoa pb'
#, '中山大学'
#, 'usofa'
#, 'bahía blanca'
#, 'hz'
#, 'jamaca'
#, 'whatloo'
#, 'montermorelos'
#, 'le bourget du lac'
#, 'brasil, brasília'
#, 'iit kanpur'
#, 'the pleiades'
#, 'location independent'
#, 'nagpur'
#, 'ederbringhausen'
#, 'irc datnode net'
#, 'kotamobagu'
#, 'hyrule'
#, 'valles marineris, mars'
#, 'location'
#, 'vironezh'
#, 'singularity'
#, 'campina grande pb'
#, '/home'
#, 'sausalito'
#, 'right behind you!'
#, 'turkiye'
#, 'yayyyy'
#, 'gandhinagar/ahmedabad'
#, 'solaro'
#, 'schweiz'
#, 'nanking'
#, 'münchen, deutschland'
#, 'earth, solar system'
#, 'phx'
#, 'Österreich'
#, 'bhubaneswar'
#, '/home/wintervenom'
#, 'shambala'
#, 'epfl'
#, 'italia, milano'
#, 'việt nam'
#, 'engineer'
#, 'banglore'
#, 'são carlos, sp'
#, 'on a boat'
#, 'balneário camboriú'
#, 'north east, england'
#, 'kaslo, bc'
#, 'the rose city'
#, 'goiânia go'
#, 'tampon'
#, 'България, Свищов'
#, 'medellín'
#, 'help! polar bears are attacking me!'
#, 'down under'
#, 'slovenija'
#, 'yorkshire'
#, 'noe'
#, 'långshyttan'
#, '中国 浙江 衢州'
#, 'glyfada'
#, 'cologne ger'
#, 'moskau'
#, 'bcn'
#, 'brasília/df, brasil'
#, 'görlitz'
#, 'sussex, england'
#, 'west yorkshire'
#, 'florianopolis, brasil'
#, 'mtl'
#, 'sf baby!'
#, 'réunion'
#, 'Санкт Петербург, Россия'
#, 'lidköping'
#, 'ukrain'
#, '秦皇岛'
#, 'maryland's eastern shore'
#, 'curitba/pr brasil'
#, 'kumamoto'
#, 'Минеральные Воды'
#, 'campo grade ms, brasil'
#, 'narnia'
#, 'phl'
#, 'south pole'
#, 'donostia'
#, 'gmt+'
#, 'hertfordshire'
#, 'sp brasil'
#, 'sao paulo, brasil'
#, 'rensselaer polytechnic institute'
#, 'ufsc'
#, '萧山'
#, 'são josé dos campos, são paulo, brasil'
#, 'goiania, goiás, brasil'
#, 'great britain'
#, 'remote developer'
#, 'los angeles area'
#, 'zurich, swtizerland'
#, 'ilhéus/bahia/brasil'
#, 'rmit uni'
#, 'da nang, viet nam'
#, 'vellerat'
#, 'brasil, rj'
#, 'aubergenville'
#, 'sevastopol'
#, 'roaming'
#, 'herzliya'
#, 'frozen reindeer country'
#, 'melverley, shropshire'
#, 'alnilam'
#, 'inland norcal'
#, 'the hague'
#, 'wollishofen, zürich'
#, 'dans le'
#, 'eastern seaboard'
#, 'cis, eecs, peking univ'
#, 'hangzhou,zhejiang,china'
#, 'floduh'
#, 'los países bajos'
#, '' above ground floating on a balloon'
#, 'definitely!'
#, 'galicia, españa'
#, 'irc://irc datnode net: /#hacking'
#, 'münster, deutschland'
#, 'interwebz'
#, 'bunnik'
#, 'zhodino'
#, 'nullptr'
#, '° ' n, ° ''
#, 'who knows where'
#, 'takoyaki'
#, 'blowmage'
#, 'ku'
#, 'arkhangelsk'
#, 'sthlm'
#, 'bengalooru'
#, 'cesário lange/itapetininga sp'
#, 'compiègne'
#, 'web'
#, 'chisinau'
#, 'südwestdeutschland'
#, 'münster, deustchland'
#, 'rio piedras'
#, 'são carlos'
#, 'way up north, scotland'
#, 'cny'
#, 'schwyz'
#, 'likely indoors'
#, 'very small small town'
#, 'leidschendam'
#, 'phillynyc'
#, 'qro'
#, 'goiânia, brasil'
#, 'Łaziska górne'
#, 'montreal,qc'
#, 'space the final frontier'
#, 'pek'
#, 'colbert nation'
#, 'nippon'
#, '北京,中国'
#, 'a few places'
#, 'yokohama city'
#, 'ranchi'
#, 'sophia antipolis'
#, 'reken'
#, 'firenze'
#, 'blighty'
#, '河南,中国'
#, '@malsup on twitter'
#, 'united states est'
#, '日本'
#, '中国南昌'
#, 'mönchengladbach'
#, 'wroclaw'
#, 'isla'
#, 'pécs'
#, 'le kremlin bicêtre'
#, 'socal'
#, 'everywhere but nowhere'
#, 'ussel'
#, '浙江,杭州'
#, '/dev/hell'
#, 'bedum'
#, 'brasil, são paulo sp'
#, 'rus, spb'
#, 'louisianna'
#, 'lasalle, quebec'
#, 'teldrassil'
#, 'default city'
#, 'dnepropetrovsk'
#, 'xi'an,shaanxi,p r c'
#, 'freenode'
#, 'home'
#, 'würzburg'
#, 'east coast'
#, 'somewhere around the world'
#, 'alwernia'
#, 'level'
#, 'Ústí nad orlicí megapolis'
#, 'hcm'
#, 'worcestershire, england'
#, 'williams lake, bc'
#, 'pakkret, nonthaburi'
#, 'prag'
#, 'xàtiva'
#, 'sao paulo sp'
#, 'custom real estate websites'
#, 'earth, spiral arm, milky way'
#, 'middle earth'
#, 'rio'
#, 'kosice'
#, 'manningtree, england'
#, 'moon base alpha'
#, 'someplace near you'
#, 'nehterlands'
#, 'loltah'
#, 'lsju'
#, 'cloverdale, b c'
#, 'with the finger on the keyboard ;'
#, 'peachtree corners'
#, 'warszawa, polska'
#, 'krakow/kyiv'
#, 'bei jing'
#, 'cúcuta'
#, 'nasa goddard'
#, 'kharkov, urkaine'
#, 'oaklandia'
#, 'são mateus espirito santo'
#, 'cordoba'
#, 'mallorca, españa'
#, 'shuswap, bc'
#, 'upton'
#, 'Долгопрудный'
#, 'gloucseter'
#, 'nagano'
#, 'western hemisphere'
#, 'Украина, г Винница'
#, '在地球上'
#, 'chicoutimi, qc'
#, 'milano, italia'
#, 'newyork'
#, 'silicon slopes'
#, 'sachsen'
#, 'the desert'
#, 'petsamo, lapland'
#, 'siberia'
#, 'shell'
#, 'the czech republic'
#, 'londinium'
#, 'oaxaca'
#, 'bluelovers'
#, 'rueti zh'
#, 'bellatrix'
#, 'donostia gasteiz'
#, 'trivandrum kerala'
#, 'rajkot'
#, 'nederlands'
#, 'kansas! city!'
#, 'ladkrabang'
#, 'maarn'
#, 'orléans'
#, 'world wide wiretap'
#, '@squarism'
#, 'norrköping'
#, 'rajshahi'
#, 'sonoma'
#, 'russian federation, spb'
#, 'anime and mango'
#, 'wakayama pref'
#, 'on the interwebz'
#, 'euskadi'
#, 'Россия, Москва'
#, 'roudnice nad labem'
#, 'goiânia, goiás, brasil'
#, 'nwa'
#, 'joao pessoa, pb brasil'
#, 'slovak republic'
#, 'teh internets'
#, 'earht planet'
#, 'bistrița, românia'
#, 'czech'
#, 'd c'
#, 'stavropol'
#, 'sassenheim'
#, 'chicagoland+wi'
#, 'hongkou'
#, '中国>北京'
#, 'polska'
#, 'são paulo / sp'
#, 'helm's deep'
#, 'sai gon'
#, 'hà nội'
#, 'davidwright@gmail com'
#, 'dfw'
#, 'ringerike'
#, 'asuncion'
#, 'el ceñidor, múgica, michoacán, méxico'
#, 'bruges'
#, 'black books'
#, 'rzeszów/kraków'
#, 'a town near you'
#, 'avesnes sur helpe'
#, 'guwahati'
#, 'aus'
#, 'bellarine peninsula'
#, '#dtla'
#, 'derbyshire'
#, 'puebla, puebla'
#, '☠☠☠ nyc ☠☠☠'
#, 'lost between bits'
#, 'Россия'
#, 'dell inc'
#, 'skype: fljot_'
#, 'campina'
#, 'otaniemi'
#, 'lidzbark warmiński / gdańsk'
#, 'self confidence comes from being sure that your predictions are accurate'
#, '横浜市, 日本'
#, 'frankfurt oder'
#, 'hokkaido'
#, '太倉,蘇州'
#, 'fürth'
#, 'borlänge'
#, 'arbucies'
#, 'nizhnyi novgorod'
#, 'löhne'
#, '$home'
#, 'i came i saw i refactored'
#, 'vitebsk'
#, 'köthen anhalt'
#, 'michoacán, méxico'
#, 'eastern u s a'
#, 'hier'
#, 'brasília df brasil'
#, 'hyères les palmiers'
#, 'Россия, г Рубцовск'
#, 'breizh'
#, 'transnistria'
#, 'europe, earth, the universe'
#, 'hcmc'
#, 'boulogne'
#, 'tubarão/sc, brasil'
#, 'devon'
#, 'right here'
#, 'lincolnshire, england'
#, 'intertubes'
#, 'somewhere between here and there'
#, 'devon/bournemouth home/uni'
#, 'oahu'
#, '黑龙江省哈尔滨市南岗区西大直街 号哈尔滨工业大学一校区a 公寓 寝室'
#, 'santo andré sp'
#, 'worldwide baby!'
#, 'toxicity'
#, 'l'viv'
#, 'nor cal'
#, 'knivsta'
#, 'kanpur'
#, 'são paulo/sp brasil'
#, 'kozloduy'
#, 'xintend'
#, 'mpls'
#, 'batam'
#, 'rhone alpes'
#, 'minnesotta'
#, 'ja_jp'
#, 'méxico, df'
#, 'Калуга'
#, 'chain,bei jing'
#, 'uae'
#, 'left handed coordinate'
#, 'n novgorog'
#, '四机房'
#, '東京都杉並区'
#, 'triniad and tobago'
#, 'orel, rf'
#, 'georgsmarienhütte'
#, 'world citizen'
#, 'tasmania'
#, 'cenote'
#, 'skagit county'
#, 'xi`an'
#, 'toyko'
#, 'trivandrum'
#, 'bielsko biała'
#, 'tenerife'
#, 'kreuzberg'
#, 'pego p p c c'
#, 'worcestershire'
#, 'royal leamingtin spa'
#, 'chicoutimi, québec'
#, 'brittany'
#, 'midwest'
#, '中国 杭州'
#, 'reggio emilia'
#, 'somewhere else'
#, 'kowale'
#, 'on the run'
#, 'cyberjaya'
#, 'saint nazaire'
#, 'hải phòng, hồ chí minh'
#, 'oosterwolde'
#, 'são leopoldo'
#, 'danmark'
#, 'scott afb'
#, 'andreazevedo'
#, 'são paulo/brasil'
#, 'secret volcano lair'
#, 'a five line poem'
#, 'desconocida'
#, 'montreal qc'
#, 'lenzburg'
#, 'coffee bean & tea leaf'
#, 'planet java'
#, 'ilfracombe, devon'
#, 'nagoya univ'
#, '合肥,安徽'
#, 'saint germain en laye'
#, 'whu'
#, 'würselen'
#, 'undisclosed'
#, 'kraków / jaworznia'
#, 'herson'
#, 'the north east'
#, 'vinnitsa'
#, 'liège, belgique'
#, 'yeah right'
#, 'saint petes'
#, 'fringecity'
#, 'akihabara'
#, 'köln hürth'
#, '沖縄県名護市'
#, 'saltaire, west yorkshire'
#, 'cracov'
#, 'Казахстан'
#, 'osório'
#, 'jazzgumpy'
#, 'ondres'
#, 'kochi, kerala'
#, 'haboobland'
#, 'big o'
#, 'traveling'
#, 'cloud'
#, 'lübeck'
#, 'trissur'
#, 'kongens lyngby'
#, 'where pigs fly'
#, 'neither here not there'
#, 'mydlovary'
#, 'sector zz plural z alpha'
#, 'on a minecraft server, somewhere'
#, 'hinxton'
#, 'garia, west bengal, kolkata'
#, 'villeneuve d'ascq'
#, 'yaroslavl'
#, 'ntnu'
#, 'flekkerøy'
#, 'sao paulo / brasil'
#, 'japon'
#, 'são paulo sp / brasil'
#, 'бобруйск'
#, 'polska kraków'
#, 'planet mars, cydonia'
#, 'prc'
#, 'moor row, cumbria'
#, 'sulthan bathery'
#, 'always moving'
#, 'bolzano, südtirol'
#, '#occupyatlanta'
#, 'brasília df, brasil'
#, '@denmark'
#, 'zju'
#, 'joão pessoa, pb brasil'
#, 'tarashcha'
#, 'berkel'
#, 'cebu'
#, 'mvdc'
#, 'shibuya,toshima'
#, 'caloocan city'
#, '成都'
#, 'some where, i think'
#, 'the universe'
#, 'ilheus'
#, 'centallo'
#, 'kasaragod'
#, 'mérida'
#, 'hongkong'
#, 'sài gòn'
#, 'uhh, i'm here, i think'
#, 'chandler's ford'
#, 'qualicum beach, bc'
#, 'sei pana, batam'
#, 'nuermberg'
#, 'third planet from the sun'
#, 'sutz'
#, 'पुणे, महाराष्ट्र, भारत'
#, 'hajom, mark, sverige'
#, 'uberlândia brasil'
#, 'petrópolis, rj'
#, 'lax'
#, 'xidian'
#, 'long island'
#, 'earth, solar system, milky way galaxy, universe'
#, 'the north'
#, 'right near the submodule'
#, 'thüringen, deutschland'
#, 'squamish, bc'
#, 'valparaíso, são paulo, brasil'
#, 'brooklyn!'
#, 'alsace'
#, 'sendenhorst'
#, '@tc'
#, 'niteroi, rj'
#, '北京海淀'
#, 'southwest'
#, 'ekb'
#, 'beijing,china'
#, 'heavenly dynasty(和谐的天朝)'
#, 'sjamgjao'
#, 'ville d'emery'
#, 'besançon'
#, 'mry'
#, 'РФ, г Екатеринбург'
#, 'banagalore'
#, 'poa'
#, 'whitby, on'
#, 'Минск'
#, 'western europe'
#, 'santarém'
#, 'ain taya'
#, 'floating about the globe'
#, 'scl'
#, 'Ростов на Дону'
#, 'sverige'
#, 'itay'
#, 'joão pessoa, brasil'
#, 'lazistan'
#, 'suwałki'
#, 'outer space'
#, 'thrissur, kerala'
#, 'brasília df'
#, 'oporto'
#, '/bin'
#, 'banˈduŋ ˌɪndoʊˈniːziə'
#, 'terrier'
#, 'bp czech'
#, 'korean'
#, 'sfo'
#, 'bedroom'
#, 'sysu'
#, 'Украина, Житомир'
#, 'kazakstan, karaganda'
#, 'wiltshire, england'
#, 'canterlot'
#, 'varaždin'
#, 'far planet'
#, 'acapulco guerrero'
#, 'cristais paulista, sp, brasil'
#, 'the interwebs'
#, 'that place between dreaming and awake'
#, 'melitopol'
#, 'eire'
#, '大连'
#, 'mattgoldman'
#, 'jonestown'
#, 'marilia/sp brasil'
#, '中华人民共和国福建省厦门市'
#, 'df'
#, '日本国 広島県広島市'
#, 'standing behind you, stalking'
#, 'brasilia df, brasil'
#, 'puerto real cádiz'
#, 'onion land'
#, 'Благовещенск'
#, 'lake tahoe'
#, 'nova kahovka'
#, '中国西安'
#, 'typically the office'
#, 'planète terre, voie lactée'
#, 'knurów'
#, 'saint mandé'
#, 'wild wild web'
#, 'montréal québec'
#, 'iit punjab'
#, 'cancún, méxico'
#, 'almería, españa'
#, 'rottedam'
#, 'chendu'
#, 'alfaro city'
#, 'osaka && munich'
#, '嘉義'
#, 'g rap'
#, '"china wuan"'
#, 'Гомель, Беларусь'
#, 'nowhere important'
#, 'ciudad juárez, méxico'
#, 'itlay'
#, '山楂树树枝'
#, 'cluj napoca, cluj, românia'
#, 'kingwood'
#, 'docoka'
#, 'sępólno krajeńskie'
#, 'bc'
#, 'bazil'
#, 'utd'
#, 'big d'
#, 'the matrix'
#, 'moscu, rusia'
#, 'git oovyaonge@oovyaonge cafe app com:oovyaonge_oovyaonge'
#, 'ural'
#, 'renteria'
#, 'somewhere random on planet earth'
#, 'santa clara del mar'
#, '/var/www'
#, 'beijing&chengdu'
#, 'Березники'
#, 'debatable'
#, 'krk'
#, 'dnepr'
#, 'bue'
#, 'nyc / ucsc / polar'
#, 'philly burbs'
#, 'xi'an, shaanxi'
#, 'cze'
#, '福井'
#, 'patra'
#, 'nuzild'
#, 'ludhiana'
#, '中国山东省青岛市'
#, 'munich, bavaria'
#, 'asturias'
#, 'great russell street'
#, 'deepest, darkest wiltshire'
#, 'cpan, rop'
#, 'maranhão, brasil'
#, 'some random top secret base'
#, 'sussex'
#, 'ussr'
#, 'earth, sol, western spiral arm, milky way, omniverse'
#, 'chiavenna, lombardia, italia'
#, 'the intergalactic interwebs'
#, 'Пятигорск'
#, 'fasdf'
#, 'unnc'
#, 'lyngby'
#, 'yoyogi'
#, 'french'
#, 'フランス'
#, 'brussel, belgië'
#, 'seattle< >boston'
#, 'goiânia, goiás'
#, 'ulan ude'
#, 'pnw'
#, 'zhejiang province'
#, 'paulínia, são paulo'
#, 'ベルリン'
#, 'finalnd'
#, 'ururu'
#, 'zuerich'
#, 'rpi'
#, 'nikolaev'
#, 'kakinada'
#, 'middlebury'
#, 'ugvydggsief c ryywxpyq=='
#, 'bopohe}|{'
#, 'brasília, df'
#, 'Съемная норка кролика'
#, 'padua'
#, 'xi'an, shanxi'
#, 'visakapatnam'
#, 'são paulo/sp'
#, 'yurrup'
#, 'starvropol', russian federation'
#, 'stack'
#, 'sampa'
#, 'brasil, sp, são paulo, centro'
#, 'bruz'
#, 'kazahstan'
#, 'skehanagh park, watergrasshill'
#, 'córdoba españa'
#, ''the' earth'
#, 'planet pumpkin'
#, 'iraquis'
#, 'a city within a state'
#, 'north west england'
#, 'appsterdam'
#, 'homel'
#, 'méxico, mérida'
#, 'spain!'
#, 'córdoba, españa'
#, 'montreal, qc, can'
#, 'the earth'
#, 'shenzheng'
#, 'mukachevo'
#, 'miyazaki'
#, 'lomagna'
#, 'ratnagiri'
#, 'chişinău'
#, 'webernets'
#, 'southern africa'
#, 'uestc'
#, 'ghaziabad'
#, 'www gzur org'
#, 'makati'
#, 'belém pará brasil'
#, 'deklein'
#, 'hong kong s a r'
#, 'cylon occupied caprica'
#, 'arces'
#, 'kyiev'
#, 'russland'
#, 'bolsward'
#, 'brasov'
#, 'lancashire, england'
#, 'charleville mezieres'
#, 'consulting'
#, 'can tho, viet nam'
#, 'aichi, jpan'
#, 'bhimavaram'
#, 'goiânia goiás brasil'
#, 'brasilia, df'
#, 'trollhättan'
#, 'didu'
#, 'kraków, polska'
#, 'cape breton, ns'
#, 'zanè'
#, 'levallois perret'
#, 'apeiron'
#, 'bewdley, worcestershire'
#, 'it's complicated'
#, 'celestial empire'
#, 'greater nyc'
#, 'lietuva'
#, 'chinese'
#, 'europe/czech rep'
#, 'north yorkshire, england'
#, 'solar system'
#, 'ciechanki'
#, 'earth, sol'
#, 'hambizzle'
#, 'here :d'
#, 'anglesey, north wales'
#, 'mérida,yucatán,méxico'
#, 'belém'
#, 'aichi'
#, 'almería'
#, 'stl'
#, 'inner space'
#, 'struck oil'
#, 'niteroi rj brasil'
#, 'yorkshire, england'
#, 'ldn'
#, 'Россия, Казань'
#, 'cape town south africa'
#, 'mar del plata'
#, 'amsterdamn'
#, 'lonodn'
#, 'macs ln'
#, 'nieuwdorp'
#, 'Симферополь'
#, 'germany; osgrid volksland'
#, 'wellington; new zealand'
#, '中国,杭州'
#, 'klagenfurt'
#, 'hessle, england'
#, 'dehradun'
#, 'problemania org'
#, '中国辽宁沈阳'
#, 'various'
#, 'santa brigida'
#, 'maui'
#, 'jinju'
#, 'gothenburg swedgen'
#, '/users/rzm'
#, 'Украина, Запорожье'
#, 'silicon valey'
#, 'iitm'
#, 'kyiv, ukarine'
#, '+'
#, 'chiapas'
#, 'where you live'
#, 'grande pointe, mb'
#, 'hyper island'
#, 'rf'
#, 'ballina'
#, 'neumarkt'
#, 'existing between keyboard and chair'
#, 'ifoc'
#, 'slo'
#, 'district , hcmc'
#, 'venera planet'
#, 'eivissa'
#, '河南'
#, 'sol'
#, 'santa rosalia'
#, 'variable'
#, 'df, brasil'
#, 'bardowick'
#, 'rua timbira, teresina/piauí'
#, 'laaaaandon'
#, 'cluj'
#, 'gunma'
#, 'brussel'
#, 'denbigh, north wales'
#, 'neumünster, deutschland'
#, 'dniepropetrovsk'
#, 'bla'
#, 'anus'
#, 'leganés'
#, 'underhat'
#, 'panopticon'
#, 'oau, ile ife'
#, '天津市南开区华苑产业园区'
#, 'the nether'
#, 'bilbo/bizkaia'
#, 'montreal, québec'
#, 'west sussex'
#, 'wired'
#, 'Екатеринбург'
#, 'iruñea nafarroa'
#, 'Украина'
#, 'building'
#, 'gz'
#, 'penedès catalunya'
#, 'angus'
#, 'abq'
#, 'séoul'
#, 'hell'
#, 'wilrijk'
#, 'sector , reality sigma, cow'
#, 'osaka,kobe'
#, 'verges, catalonia'
#, 'sp/brasil'
#, 'lévis'
#, 'uxbridge'
#, '@non_'
#, 'illnoise'
#, 'orduña, bizkaia'
#, 'ciudad juarez'
#, 'magrathea'
#, 'sant cugat del vallés'
#, '深圳'
#, 'n l l v'
#, 'somewhere on the west coast'
#, 'ukraina'
#, 'usc'
#, 'РФ'
#, 'hertfordshire, england'
#, 'crépy en valois'
#, 'zz : plural zα'
#, 'www emlprime com'
#, 'saxony_germany'
#, 'são carlos, sp brasil'
#, 'taegu, southkorea'
#, 'twitter com/leemallabone'
#, 'Санкт Петербург'
#, 'ribeirão preto sp'
#, '横浜'
#, 'the semantic web'
#, '/home/foozzi'
#, 'morroco'
#, 'mhm'
#, 'schoonhoven'
#, 'miyagi'
#, 'vilafranca del penedès'
#, 'aus moskau'
#, 'niteroi rj'
#, 'di dalam hati wanita'
#, 'groenlo'
#, 'west brabant'
#, 'the grid'
#, 'palamos'
#, 'la?'
#, '中国广州'
#, 'osca, pyrenees'
#, 'mcveytown'
#, 'leghorn'
#, '埼玉県'
#, '@edebill'
#, 'České budějovice'
#, 'sydneylondon'
#, '上海,中国'
#, 'são gonçalo / rj'
#, 'galicia'
#, 'mobo'
#, 'b town'
#, 'srinagar, kashmir'
#, 'joao pessoa pb'
#, 'shen zhen,guang dong'
#, 'madeira'
#, 'rüsselsheim'
#, 'osaka@japan'
#, 'en fuga'
#, 'são josé dos campos, sp brasil'
#, 'culiacán, sinaloa, méxico'
#, 'internettsburg'
#, 'reggio emilia,italia'
#, 'www'
#, 'Česká republika, praha'
#, 'nipppon'
#, 'yangon'
#, 'parts unknown'
#, 'scotland, u k'
#, 'northeastern united states'
#, 'alger'
#, 'tty'
#, 'são josé dos campos/sp brasil'
#, 'dungeons'
#, 'wybcz'
#, 'neverland'
#, 'hamburgo'
#, 'pernambuco, brasil'
#, 'korat'
#, 'parkstein'
#, 'durgapur'
#, 'patras'
#, 'prueba'
#, 'somewhere beyond the sea'
#, 'software engineer'
#, 'sfo / sjc'
#, 'internetz'
#, 'mallorca'
#, 'rj, brasil'
#, 'sao paulo brasil'
#, 'tirol'
#, 'kratumban, samutsakorn'
#, 'the netherlanths'
#, 'massonnens, suisse'
#, 'www fraudpointer com'
#, 'schwanau'
#, 'serbien'
#, 'rva'
#, 'ap'
#, '$texas'
#, 'corea'
#, 'a cuu long, f , tan binh, tp hcm'
#, ''
#, 'goettingen'
#, 'brasil sp'
#, 'reykjavík'
#, '四川 绵阳'
#, 'trichy'
#, 'world wide'
#, 'mrthe name'
#, 'yoshkar ola'
#, 'dorset'
#, 'limanowa/kraków'
#, 'viêt nam'
#, 'djursholm'
#, 'władysławowo'
#, 'x'
#, 'gasteiz, basque country'
#, 'the netherlands, europe'
#, 'algerie'
#, 'greater dfw'
#, 'en_us'
#, '衡阳'
#, 'queretaro'
#, 'virtual space'
#, 'utc'
#, 'altanta'
#, 'ustc'
#, 'vimim'
#, 'love field'
#, 'www bulk inc com'
#, 'hkd'
#, 'campo grande ms, brasil'
#, 'camaragibe'
#, 'anywhere'
#, 'kyev'
#, 'rondônia brasil'
#, 'fukushima koriyama'
#, 'yamaguchi'
#, 'markt berolzheim'
#, 'moose creek, on'
#, 'enfield, ns'
#, '日の本'
#, 'brasilia brasil'
#, 'chitrakoot'
#, 'vzla'
#, 'РФ, Владимир'
#, 'tunisie'
#, 'earth mostly'
#, 'wirral, england'
#, 'underground'
#, 'tucuman'
#, 'malaga'
#, 'if city'
#, 'hours from anywhere'
#, 'eastern united states'
#, 'são félix, bahia, brasil'
#, 'one the move'
#, 'the_darkside'
#, 'mines'
#, 'béziers'
#, 'wales, britain'
#, 'northamptonshire'
#, '神奈川県藤沢市'
#, 'beijinchaina'
#, 'Россия, Оренбург'
#, 'shang'
#, 'frankfurt/hanau'
#, 'Ústí nad labem'
#, 'pilani'
#, 'são carlos são paulo brasil'
#, 'shiga'
#, 'schauernheimerstr , d dannstadt schauernheim'
#, 'milky way, universe, expanding mass'
#, 'akb'
#, 'dehiwala'
#, 'dimap'
#, '济南'
#, 'middle england'
#, 'sask, can'
#, 'kashipur'
#, 'jhb'
#, 'zulte'
#, 'yew nork'
#, 'schipluiden'
#, 'behind your firewall'
#, 'mdn'
#, 'cancun, méxico'
#, 'maginus'
#, 'montélimar'
#, 'forest row, england'
#, 'dumfries'
#, 'sockerbruket , , gÖteborg'
#, 'east & west'
#, 'amherst, nova scotia'
#, 'Томия, Япония'
#, 'lalaland'
#, 'normandie'
#, 'creating your github account'
#, 'hinxton united kingdom'
#, 'teh internetz'
#, 'suwon, korea'
#, '中国黑龙江大庆'
#, 'targu mures, romainia'
#, 'chicagoland area'
#, '/c/emu/trinitycore/'
#, 'makeevka'
#, '地獄'
#, 'uzhgorod'
#, 'vinaros, españa'
#, 'beauharnois, qc'
#, 'russian,rh,sayanogrosk'
#, 'guidlford'
#, 'delluf'
#, 'espírito santo brasil'
#, '首都相模大野'
#, 'east midlands'
#, 'cgn'
#, 'são josé dos campos/sp'
#, '中国福建福州'
#, 'nyu'
#, 'yucatán'
#, 'cluj napoca, românia'
#, 'the great white north'
#, 'são paulo, sp brasil'
#, 'são josé'
#, 'stari zednik'
#, 'flanders'
#, 'dracena, são paulo, brasil'
#, 'city'
#, 'córdoba/veracruz méxico'
#, 'somewhere, over the rainbow'
#, 'avranches'
#, 'ponyland'
#, 'gotham'
#, 'berne'
#, 'trivandrum, kerala'
#, 'iamfredng@gmail com'
#, 'bra[sz]il'
#, 'sart lez spa'
#, 'sapporo, hokkaido'
#, 'mensk'
#, 'hammah'
#, 'lower haight'
#, 'são josé dos campos, sp'
#, 'vsetín'
#, 'uiwang, korea'
#, 'nyc/sf'
#, 'guaruja'
#, 'holland!'
#, 'liphook'
#, 'the united states'
#, 'hel'
#, 'irc freenode net #python veloutin'
#, 'caltech'
#, 'bruyères le châtel'
#, 'phily'
#, 'concórdia santa catarina'
#, 'glorious nippon'
#, 'earth, sol system'
#, 'osaka city'
#, 'undecided'
#, 'far, far away!'
#, 'africa'
#, 'méxico d,f'
#, 'skoczów'
#, 'blackstone, qld'
#, 'chong qing'
#, 'praha, Česká republika'
#, 'cozumel, méxico'
#, 'p s d r e r f'
#, 'tahiti, french polynedia'
#, 'czechrepublic/orlová lutyně'
#, 'traveling salesman'
#, 'kc'
#, 'glenavy'
#, '/users/fabi'
#, 'chambery'
#, 'niterói, rj'
#, 'french riviera'
#, 'mikew'
#, 'fedora'
#, 'shaanxi xi'an'
#, 'vitoria gasteiz'
#, 'trentino'
#, 'vodafone, internet'
#, 'tufts'
#, 'nyc / ber'
#, 'stavropol, russian federation'
#, 'camerino'
#, 'macae rj, brasil'
#, 'berchem'
#, 'cymru south wales uk, tywi valley'
#, 'rus'
#, ':'
#, 'shandong'
#, 'forlimpopoli'
#, 'berkshire'
#, 'kiyv'
#, 'czestochowa'
#, 'ruhrgebiet'
#, 'vidin;bulgaria'
#, 'laaaarndon'
#, 'Украина, Донецк'
#, 'grid'
#, 'villefranche sur saône'
#, 'rva + nyc'
#, 'uviéu asturies'
#, 'hồ chí minh city'
#, 'neotropic'
#, 'София, България'
#, 'improving izariam'
#, '法国,tours'
#, '中国湖北武汉'
#, 'sardinia'
#, 'the island'
#, 'massassachusetts'
#, 'guerrero, méxico'
#, 'norcal'
#, 'florianópolis brasil'
#, 'hangchow'
#, 'sab com'
#, 'goring on thames'
#, 'pgh'
#, 'the haque'
#, 'hopefully somewhere warm and sunny'
#, 'rossendale, lancashire'
#, 'earth planet'
#, 'western siberia'
#, 'bedfordshire'
#, 'cancun, qroo'
#, 'www tellago com'
#, 'malnova, latgola'
#, 'layer'
#, 'planet earth, with firmly both feet'
#, 'somewhere cloudy'
#, 'pondicherry'
#, 'türkiye'
#, 'bigbuzzylocation'
#, 'vagabonding'
#, 'czech rep'
#, '+ ° ' ", + ° ' "'
#, 'castlemaine, australila'
#, 'wordwide'
#, 'mid atlantic'
#, 'denver!'
#, 'jkl/fin'
#, 'chorzów'
#, 'hauj khas'
#, 'mozhajsk'
#, 'net'
#, 'moray'
#, 'florianopolis'
#, 'härnösand'
#, '次元'
#, 'doap com'
#, 'russian/tyumen'
#, '珠海'
#, 'bretagne'
#, 'dumbo'
#, '@fredrocious'
#, 'Великий Новгород'
#, 'demark'
#, 'opus'
#, 'burwood east'
#, 'brasília brasil'
#, 'www daum net'
#, 'classified'
#, 'genève'
#, 'alexanderie, Égypte'
#, '+ :'
#, 'yorkshire england'
#, 'weiqing building , thu'
#, 'venezia'
#, 'tacos'r'us'
#, 'gxnn'
#, 'são josé dos campos, brasil'
#, 'linodia'
#, 'jehay'
#, 'mikocheni'
#, 'twisted disneyland'
#, 'perros guirec'
#, 'apt'
#, 'jerez'
#, 'ilhéus bahia'
#, 'uk_uk, lviv'
#, 'northeast'
#, 'the planet'
#, 'greater philly area'
#, 'xanth'
#, 'earth!'
#, 'Кривой Рог'
#, 'iwate'
#, 'behind the phosphor'
#, 'dark alley'
#, 'katwijk zh'
#, 'kazaǹ'
#, 'são carlos sp'
#, 'easy'
#, '/root/'
#, 'veliko tarnovo'
#, 'the localhost'
#, 'xjf'
#, 'karlruhe'
#, 'goiânia go / brasil'
#, 'a happy place'
#, 'sankt peterburg'
#, 'everywhere yet nowhere'
#, 'uberlândia'
#, 'occupied palestinian territory'
#, 'lutsk'
#, 'west coast, the states'
#, 'calfornia'
#, 'Ísland'
#, 'naters'
#, 'little brington, england'
#, 'münchen/munich/mnichov/Мюнхен'
#, 'the 'ham'
#, 'test'
#, 'brignoud'
#, 'bitbucket'
#, 'a desert island'
#, 'bruxelles belgique'
#, 'bahia'
#, 'transcontinental'
#, 'rua turi'
#, 'higashi nihonbashi'
#, 'd f méxico'
#, 'lago di garda'
#, '天津市河东区'
#, 'raffles institution lane'
#, 'msp'
#, 'somewhere out there'
#, 'Томск, Россия'
#, '/dev/null;'
#, 'epicmorgia'
#, 'ub,mgl'
#, '@sfrdmn'
#, 'orhem'
#, '東京都渋谷区神宮前'
#, 'qz'
#, 'cancun'
#, 'nürtingen'
#, 'sebastopol'
#, 'boardtown , bitstreet'
#, 'earth?'
#, 'perm''
#, 'bnagalore'
#, 'germay'
#, 'goiania'
#, 'center valley'
#, 'moscow\'
#, 'allegheny college'
#, 'singaproe'
#, 'nagoya,aichi / mt関連の物など色々作っています。'
#, 'tarnowskie góry'
#, 'rover'
#, 'ici'
#, 'boulogne billancourt'
#, 'argyll, scotland'
#, 'uchicago'
#, '@home'
#, '_'
#, 'kalix'
#, 'Россия, Санкт Петербург'
#, 'park city'
#, 'utc+'
#, 'muree'
#, 'viña del mar'
#, 'queensland'
#, 'ldn — nyc'
#, 'marília/sp brasil'
#, 'inotherworld'
#, 'darlinghurst'
#, 'traveling the world'
#, 'phase space'
#, 'jaén'
#, 'しんじく'
#, 'grudziądz'
#, 'joão pessoa / pb / brasil'
#, 'aizu'
#, 'far far away land'
#, 'nazi moonbase on the moon'
#, '東京都世田谷区用賀'
#, 'catskills'
#, '경남 창원시 마산회원구'
#, 'republica dominicana'
#, 'eets'
#, 'mazangé'
#, 'wenen, oostenrijk'
#, 'cipherspace'
#, 'estudiante'
#, 'seilles'
#, 'korea, south'
#, 'nigeira'
#, '台灣(taiwan)'
#, 'chang sha'
#, 'virgo supercluster, milky way galaxy, sol system, earth'
#, 'jönköping'
#, 'pratteln, schweiz'
#, 'lanzarote, canary islands'
#, 'zanjan'
#, 'phila'
#, 'miensk litowski, biełaruthenia'
#, 'andheri'
#, 'cucuta'
#, '/home/dilibau/'
#, 'auvelais'
#, 'locating'
#, 'turin, italiy'
#, 'kanhangad'
#, 'roncade'
#, 'santo andré sp brasil'
#, 'java'
#, 'north wilkesboro'
#, 'catamarca'
#, 'marieville, qc'
#, 'bosnia & herzegovina'
#, 'kochi'
#, 'republica'
#, 'shropshire, england'
#, 'sillycone valley'
#, 'd'
#, 'here and there'
#, 'marília sp brasil'
#, 'timișoara'
#, 'rio das ostras'
#, '中国浙江杭州'
#, 'montes claros brasil'
#, 'jutphaas'
#, 'the wild, wild west'
#, 'the multiverse'
#, 'ailleurs'
#, 'here, ithink'
#, 'geek between the keyboard and the chair'
#, 'osaka shi osaka pref'
#, 'a coruña, galicia, españa'
#, '東京都'
#, 'kathmandy'
#, 'Йошкар Ола'
#, 'private'
#, 'somewhere over the rainbow'
#, 'kalymnos'
#, 'mogi das cruzes, sp'
#, 'dominican rapublic'
#, 'twin cities, minn'
#, 'outah space'
#, 'frankfurt / main'
#, 'comodo'
#, '<radar offline>'
#, 'manipal/bokaro'
#, 'funky town'
#, 'nimes'
#, 'missal'
#, 'cafelândia sp'
#, 'thessaloniki'
#, 'labège'
#, 'skyrim'
#, 'pitesti'
#, 'azeroth'
#, 'сеть'
#, 'i'm around'
#, 'brasília, df brasil'
#, 'miensk litowski, kryŭja'
#, 'merry ol' england'
#, 'brébeuf, qc'
#, 'ksa'
#, 'besançon / aix en provence'
#, 'leicestershire, england'
#, 'far east'
#, 'daiict,gandhinagar'
#, 'bègles'
#, 'on the planet'
#, 'hinterlands'
#, 'Украина, Винница; ukraine, vinnitsya'
#, 'ac'
#, 'transylvania'
#, 'bengaluru/marudhur'
#, 'vtech'
#, 'nyc, sf'
#, 'liestal, schweiz'
#, 'bmnville'
#, 'Россия, Екатеринбург'
#, 'universe'
#, 'whistler'
#, 'hampshire, united kingdon'
#, 'the hidden fortress'
#, 'luxmebourg'
#, 'wrocław, polska'
#, 'dogpatch'
#, 'gelderland'
#, 'emeryville'
#, 'https://gitorious org/~elf pavlik'
| yaph/gh-commit-locations | loclists.py | Python | mit | 35,900 | [
"Galaxy",
"MOOSE"
] | 4eb46a03154f4927ee1070b7698e75b8b0ec04bbaf55fbfe27e51781d162fa80 |
#!/usr/bin/python
#
# Created on Aug 25, 2016
# @author: Gaurav Rastogi (grastogi@avinetworks.com)
# Eric Anderson (eanderson@avinetworks.com)
# module_check: supported
# Avi Version: 17.1.1
#
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: avi_ipaddrgroup
author: Gaurav Rastogi (grastogi@avinetworks.com)
short_description: Module for setup of IpAddrGroup Avi RESTful Object
description:
- This module is used to configure IpAddrGroup object
- more examples at U(https://github.com/avinetworks/devops)
requirements: [ avisdk ]
version_added: "2.4"
options:
state:
description:
- The state that should be applied on the entity.
default: present
choices: ["absent","present"]
addrs:
description:
- Configure ip address(es).
apic_epg_name:
description:
- Populate ip addresses from members of this cisco apic epg.
country_codes:
description:
- Populate the ip address ranges from the geo database for this country.
description:
description:
- User defined description for the object.
ip_ports:
description:
- Configure (ip address, port) tuple(s).
marathon_app_name:
description:
- Populate ip addresses from tasks of this marathon app.
marathon_service_port:
description:
- Task port associated with marathon service port.
- If marathon app has multiple service ports, this is required.
- Else, the first task port is used.
name:
description:
- Name of the ip address group.
required: true
prefixes:
description:
- Configure ip address prefix(es).
ranges:
description:
- Configure ip address range(s).
tenant_ref:
description:
- It is a reference to an object of type tenant.
url:
description:
- Avi controller URL of the object.
uuid:
description:
- Uuid of the ip address group.
extends_documentation_fragment:
- avi
'''
EXAMPLES = '''
- name: Create an IP Address Group configuration
avi_ipaddrgroup:
controller: ''
username: ''
password: ''
name: Client-Source-Block
prefixes:
- ip_addr:
addr: 10.0.0.0
type: V4
mask: 8
- ip_addr:
addr: 172.16.0.0
type: V4
mask: 12
- ip_addr:
addr: 192.168.0.0
type: V4
mask: 16
'''
RETURN = '''
obj:
description: IpAddrGroup (api/ipaddrgroup) object
returned: success, changed
type: dict
'''
from ansible.module_utils.basic import AnsibleModule
try:
from ansible.module_utils.avi import (
avi_common_argument_spec, HAS_AVI, avi_ansible_api)
except ImportError:
HAS_AVI = False
def main():
argument_specs = dict(
state=dict(default='present',
choices=['absent', 'present']),
addrs=dict(type='list',),
apic_epg_name=dict(type='str',),
country_codes=dict(type='list',),
description=dict(type='str',),
ip_ports=dict(type='list',),
marathon_app_name=dict(type='str',),
marathon_service_port=dict(type='int',),
name=dict(type='str', required=True),
prefixes=dict(type='list',),
ranges=dict(type='list',),
tenant_ref=dict(type='str',),
url=dict(type='str',),
uuid=dict(type='str',),
)
argument_specs.update(avi_common_argument_spec())
module = AnsibleModule(
argument_spec=argument_specs, supports_check_mode=True)
if not HAS_AVI:
return module.fail_json(msg=(
'Avi python API SDK (avisdk>=17.1) is not installed. '
'For more details visit https://github.com/avinetworks/sdk.'))
return avi_ansible_api(module, 'ipaddrgroup',
set([]))
if __name__ == '__main__':
main()
| HuaweiSwitch/ansible | lib/ansible/modules/network/avi/avi_ipaddrgroup.py | Python | gpl-3.0 | 4,785 | [
"VisIt"
] | a727d03911e1e51a54d8c68aee40b54d12e1b90222aff874a814eb76d6ab9c42 |
# -*- coding: utf-8 -*-
r"""A collection of commonly used one- and two-dimensional functions in neutron scattering,
=============== ==========================================================
gaussian Vector or matrix norm
gaussian2d Inverse of a square matrix
lorentzian Solve a linear system of equations
voigt Determinant of a square matrix
resolution Logarithm of the determinant of a square matrix
gaussian_ring Solve linear least-squares problem
=============== ==========================================================
"""
import numpy as np
from scipy import special
from scipy.special import erf
def gaussian(p, q):
r"""Returns an arbitrary number of Gaussian profiles.
Parameters
----------
p : ndarray
Parameters for the Gaussian, in the following format:
+-------+----------------------------+
| p[0] | Constant background |
+-------+----------------------------+
| p[1] | Linear background slope |
+-------+----------------------------+
| p[2] | Area under the first peak |
+-------+----------------------------+
| p[3] | Position of the first peak |
+-------+----------------------------+
| p[4] | FWHM of the first peak |
+-------+----------------------------+
| p[5] | Area under the second peak |
+-------+----------------------------+
| p[...]| etc. |
+-------+----------------------------+
q : ndarray
One dimensional input array.
Returns
-------
out : ndarray
One dimensional Gaussian profile.
Notes
-----
A Gaussian profile is defined as:
.. math:: f(q) = \frac{a}{\sigma \sqrt{2\pi}} e^{-\frac{(q-q_0)^2}{2\sigma^2}},
where the integral over the whole function is *a*, and
.. math:: fwhm = 2 \sqrt{2 \ln{2}} \sigma.
Examples
--------
Plot a single gaussian with an integrated intensity of 1, centered at zero, and fwhm of 0.3:
>>> import matplotlib.pyplot as plt
>>> import numpy as np
>>> p = np.array([0., 0., 1., 0., 0.3])
>>> x = np.linspace(-1, 1, 101)
>>> y = gaussian(p, x)
>>> plt.plot(x, y)
>>> plt.show()
Plot two gaussians, equidistant from the origin with the same intensity and fwhm as above:
>>> p = np.array([0., 0., 1., -0.3, 0.3, 1., 0.3, 0.3])
>>> x = np.linspace(-1, 1, 101)
>>> y = gaussian(p, x)
>>> plt.plot(x, y)
>>> plt.show()
"""
funct = p[0] + p[1] * q
for i in range(int(len(p[2:]) / 3)):
sigma = p[3 * i + 4] / (2. * np.sqrt(2. * np.log(2.)))
funct += p[3 * i + 2] / (sigma * np.sqrt(2. * np.pi)) * np.exp(-(q - p[3 * i + 3]) ** 2 / (2 * sigma ** 2))
return funct
def gaussian2d(p, q):
r"""Returns an arbitrary number of two-dimensional Gaussian profiles.
Parameters
----------
p : ndarray
Parameters for the Gaussian, in the following format:
+-------+------------------------------+
| p[0] | Constant background |
+-------+------------------------------+
| p[1] | Linear background slope |
+-------+------------------------------+
| p[2] | Volume under the first peak |
+-------+------------------------------+
| p[3] | X position of the first peak |
+-------+------------------------------+
| p[4] | Y position of the first peak |
+-------+------------------------------+
| p[5] | FWHM_x of the first peak |
+-------+------------------------------+
| p[6] | FWHM_y of the first peak |
+-------+------------------------------+
| p[7] | Area under the second peak |
+-------+------------------------------+
| p[...]| etc. |
+-------+------------------------------+
q : tuple
Tuple of two one-dimensional input arrays.
Returns
-------
out : ndarray
One dimensional Gaussian profile.
Notes
-----
A Gaussian profile is defined as:
.. math:: f(q) = \frac{a}{\sigma \sqrt{2\pi}} e^{-\left(\frac{(q_x-q_x0)^2}{2\sigma_x^2} + \frac{(q_y-q_y0)^2}{2\sigma_y^2}\right)},
where the integral over the whole function is *a*, and
.. math:: fwhm = 2 \sqrt{2 \ln{2}} \sigma.
Examples
--------
Plot a single gaussian with an integrated intensity of 1, centered at (0, 0), and fwhm of 0.3:
>>> import matplotlib.pyplot as plt
>>> import numpy as np
>>> p = np.array([0., 0., 1., 0., 0., 0.3, 0.3])
>>> x, y = np.meshgrid(np.linspace(-1, 1, 101), np.linspace(-1, 1, 101))
>>> z = gaussian(p, (x, y))
>>> plt.pcolormesh(x, y, z)
>>> plt.show()
Plot two gaussians, equidistant from the origin with the same intensity and fwhm as above:
>>> p = np.array([0., 0., 1., -0.3, -0.3, 0.3, 0.3, 1., 0.3, 0.3, 0.3, 0.3])
>>> x, y = np.meshgrid(np.linspace(-1, 1, 101), np.linspace(-1, 1, 101))
>>> z = gaussian(p, x)
>>> plt.pcolormesh(x, y, z)
>>> plt.show()
"""
x, y = q
funct = p[0] + p[1] * (x + y)
for i in range(int(len(p[2:]) // 5)):
sigma_x = p[5 * i + 5] / (2. * np.sqrt(2. * np.log(2.)))
sigma_y = p[5 * i + 6] / (2. * np.sqrt(2. * np.log(2.)))
funct += p[5 * i + 2] / (sigma_x * sigma_y * 2. * np.pi) * np.exp(
-((x - p[5 * i + 3]) ** 2 / (2 * sigma_x ** 2) + (y - p[5 * i + 4]) ** 2 / (2 * sigma_y ** 2)))
return funct
def lorentzian(p, q):
u"""Returns an arbitrary number of Lorentz profiles.
Parameters
----------
p : ndarray
Parameters for the Lorentzian, in the following format:
+-------+----------------------------+
| p[0] | Constant background |
+-------+----------------------------+
| p[1] | Linear background slope |
+-------+----------------------------+
| p[2] | Area under the first peak |
+-------+----------------------------+
| p[3] | Position of the first peak |
+-------+----------------------------+
| p[4] | FWHM of the first peak |
+-------+----------------------------+
| p[5] | Area under the second peak |
+-------+----------------------------+
| p[...]| etc. |
+-------+----------------------------+
q : ndarray
One dimensional input array.
Returns
-------
out : ndarray
One dimensional Lorentzian profile.
Notes
-----
A Lorentzian profile is defined as:
.. math:: f(q) = \\frac{a}{\\pi} \\frac{\\frac{1}{2} \\Gamma}{(q-q_0)^2 + (\\frac{1}{2} \\Gamma)^2},
where the integral over the whole function is *a*, and Γ is the full width at half maximum.
Examples
--------
Plot a single lorentzian with an integrated intensity of 1, centered at zero, and fwhm of 0.3:
>>> import matplotlib.pyplot as plt
>>> import numpy as np
>>> p = np.array([0., 0., 1., 0., 0.3])
>>> x = np.linspace(-1, 1, 101)
>>> y = lorentzian(p, x)
>>> plt.plot(x, y)
>>> plt.show()
Plot two lorentzians, equidistant from the origin with the same intensity and fwhm as above:
>>> p = np.array([0., 0., 1., -0.3, 0.3, 1., 0.3, 0.3])
>>> x = np.linspace(-1, 1, 101)
>>> y = lorentzian(p, x)
>>> plt.plot(x, y)
>>> plt.show()
"""
funct = p[0] + p[1] * q
for i in range(int(len(p[2:]) / 3)):
funct += p[3 * i + 2] / np.pi * 0.5 * p[3 * i + 4] / ((q - p[3 * i + 3]) ** 2 + (0.5 * p[3 * i + 4]) ** 2)
return funct
def voigt(p, q):
r"""Returns an arbitrary number of Voigt profiles, a Lorentz profile convoluted by a Gaussian.
Parameters
----------
p : ndarray
Parameters for the Lorentzian, in the following format:
+-------+------------------------------+
| p[0] | Constant background |
+-------+------------------------------+
| p[1] | Linear background slope |
+-------+------------------------------+
| p[2] | Area under the first peak |
+-------+------------------------------+
| p[3] | Position of the first peak |
+-------+------------------------------+
| p[4] | FWHM of the first Lorentzian |
+-------+------------------------------+
| p[5] | FWHM of the first Gaussian |
+-------+------------------------------+
| p[6] | Area under the second peak |
+-------+------------------------------+
| p[...]| etc. |
+-------+------------------------------+
q : ndarray
One dimensional input array.
Returns
-------
out : ndarray
One dimensional Voigt profile.
Notes
-----
A Voigt profile is defined as a convolution of a Lorentzian profile with a Gaussian Profile:
.. math:: V(x;\sigma,\gamma)=\int_{-\infty}^\infty G(x';\sigma)L(x-x';\gamma)\, dx'.
Examples
--------
Plot a single Voigt profile with an integrated intensity of 1, centered at zero, and FWHM = 0.2 convoluted with a Gaussian with FWHM = 0.3:
>>> import matplotlib.pyplot as plt
>>> import numpy as np
>>> p = np.array([0., 0., 1., 0., 0.2, 0.3])
>>> x = np.linspace(-1, 1, 101)
>>> y = voigt(p, x)
>>> plt.plot(x, y)
>>> plt.show()
Plot two Voigt profiles, equidistant from the origin with the same intensity and fwhm as above:
>>> p = np.array([0., 0., 1., -0.3, 0.2, 0.3, 1., 0.3, 0.2, 0.3])
>>> x = np.linspace(-1, 1, 101)
>>> y = voigt(p, x)
>>> plt.plot(x, y)
>>> plt.show()
"""
funct = p[0] + p[1] * q
for i in range(int(len(p[2:]) / 4)):
sigma = p[4 * i + 5] / (2. * np.sqrt(2. * np.log(2.)))
gamma = p[4 * i + 4] / 2.
# Normalization pre-factor
N = (sigma * np.sqrt(2 * np.pi))
funct += p[4 * i + 2] / N * np.real(special.wofz(((q - p[4 * i + 3]) + 1j * gamma) /
(sigma * np.sqrt(2))))
return funct
def resolution(p, q, mode='gaussian'):
r"""Returns a gaussian profile using a resolution matrix generated for a Triple Axis Spectrometer.
Parameters
----------
p : ndarray
Parameters for the resolution function, in the following format:
+-------+------------------------------+
| p[0] | Constant background |
+-------+------------------------------+
| p[1] | Linear background slope |
+-------+------------------------------+
| p[2] | Volume under the first peak |
+-------+------------------------------+
| p[3] | X position of the first peak |
+-------+------------------------------+
| p[4] | Y position of the first peak |
+-------+------------------------------+
| p[5] | R\ :sub:`0` |
+-------+------------------------------+
| p[6] | RM\ :sub:`xx` |
+-------+------------------------------+
| p[7] | RM\ :sub:`yy` |
+-------+------------------------------+
| p[8] | RM\ :sub:`xy` |
+-------+------------------------------+
| p[9] | Area under the second peak |
+-------+------------------------------+
| p[...]| etc. |
+-------+------------------------------+
q : tuple of ndarray
Two input arrays of equivalent size and shape.
Returns
-------
out : ndarray
Two dimensional resolution profile with shape of input arrays.
Notes
-----
A resolution profile is defined as a two dimensional gaussian that is comprised of elements of a
resolution matrix for a triple axis spectrometer, as produced by :py:meth:`.Instrument.calc_resolution`
.. math:: f(q) = R_0 e^{-\frac{1}{2}(RM_{xx}^2 (x-x_0)^2 + RM_{yy}^2 (y-y_0)^2 + 2RM_{xy}(x-x_0)(y-y_0))},
where RM is the resolution matrix.
"""
funct = p[0] + p[1] * (q[0] + q[1])
if mode == 'gaussian':
for i in range(int(len(p[2:]) / 7)):
# Normalization pre-factor
N = (np.sqrt(p[7 * i + 6]) * np.sqrt(p[7 * i + 7] - p[7 * i + 8] ** 2 / p[7 * i + 6])) / (
2. * np.pi * p[7 * i + 5])
funct += p[7 * i + 2] * p[7 * i + 5] * N * np.exp(-1. / 2. * (p[7 * i + 6] * (q[0] - p[7 * i + 3]) ** 2 +
p[7 * i + 7] * (q[1] - p[7 * i + 4]) ** 2 +
2. * p[7 * i + 8] * (q[0] - p[7 * i + 3]) * (
q[1] - p[7 * i + 4])))
return funct
def gaussian_ring(p, q):
r"""Returns a two dimensional gaussian ellipse profile.
Parameters
----------
p : ndarray
Parameters for the gaussian ellipse function, in the following format:
+-------+------------------------------+
| p[0] | Constant background |
+-------+------------------------------+
| p[1] | Linear background slope |
+-------+------------------------------+
| p[2] | Volume under first ellipse |
+-------+------------------------------+
| p[3] | X position of first ellipse |
+-------+------------------------------+
| p[4] | Y position of first ellipse |
+-------+------------------------------+
| p[5] | Radius of first ellipse |
+-------+------------------------------+
| p[6] | Eccentricity of first ellipse|
+-------+------------------------------+
| p[7] | FWHM of first ellipse |
+-------+------------------------------+
| p[8] | Volume under second ellipse |
+-------+------------------------------+
| p[...]| etc. |
+-------+------------------------------+
q : tuple of ndarray
Two input arrays of equivalent size and shape, e.g. formed with :py:func:`numpy.meshgrid`.
Returns
-------
out : ndarray
Two dimensional gaussian ellipse profile.
Notes
-----
A gaussian ellipse profile is defined as
.. math:: f(x,y) = \frac{1}{N} e^{-\frac{1}{2}\frac{(\sqrt{(x-x_0)^2 + \alpha^2(y-y_0)^2}-r_0)^2}{2 \sigma}},
where :math:`FWHM = 2\sqrt{2\ln(2)}\sigma`, and N is the normalization pre-factor given by
.. math:: N = \frac{2\pi}{\alpha} \left(\sigma^2 e^{-\frac{r_0^2}{2\sigma^2}} + \sqrt{\frac{\pi}{2}} r_0 \sigma \left(1 + \mathrm{Erf}\left(\frac{r_0}{\sqrt{2}\sigma}\right)\right)\right).
"""
x, y = q
funct = p[0] + p[1] * (x + y)
for i in range(int(len(p[2:]) / 6)):
# Normalization pre-factor
sigma = p[6 * i + 7] / (2. * np.sqrt(2. * np.log(2.)))
N = 2. * np.pi * (np.exp(-p[6 * i + 5] ** 2 / (2. * sigma ** 2)) *
sigma ** 2 + np.sqrt(np.pi / 2) * p[6 * i + 5] *
sigma * (1. + erf(p[6 * i + 5] / (np.sqrt(2) * sigma)))) / p[6 * i + 6]
funct += p[6 * i + 2] / N * np.exp(-4. * np.log(2.) * (np.sqrt((x - p[6 * i + 3]) ** 2 +
p[6 * i + 6] ** 2 * (y - p[6 * i + 4]) ** 2) -
p[6 * i + 5]) ** 2 / p[6 * i + 7] ** 2)
return funct
| granrothge/neutronpy | neutronpy/functions.py | Python | mit | 16,146 | [
"Gaussian"
] | caf0f55501b4690e597003e21e2478a194cd55db83552dfc19c785275a15c898 |
""" Affine image registration module consisting of the following classes:
AffineMap: encapsulates the necessary information to perform affine
transforms between two domains, defined by a `static` and a `moving`
image. The `domain` of the transform is the set of points in the
`static` image's grid, and the `codomain` is the set of points in
the `moving` image. When we call the `transform` method, `AffineMap`
maps each point `x` of the domain (`static` grid) to the codomain
(`moving` grid) and interpolates the `moving` image at that point
to obtain the intensity value to be placed at `x` in the resulting
grid. The `transform_inverse` method performs the opposite operation
mapping points in the codomain to points in the domain.
ParzenJointHistogram: computes the marginal and joint distributions of
intensities of a pair of images, using Parzen windows [Parzen62]
with a cubic spline kernel, as proposed by Mattes et al. [Mattes03].
It also computes the gradient of the joint histogram w.r.t. the
parameters of a given transform.
MutualInformationMetric: computes the value and gradient of the mutual
information metric the way `Optimizer` needs them. That is, given
a set of transform parameters, it will use `ParzenJointHistogram`
to compute the value and gradient of the joint intensity histogram
evaluated at the given parameters, and evaluate the the value and
gradient of the histogram's mutual information.
AffineRegistration: it runs the multi-resolution registration, putting
all the pieces together. It needs to create the scale space of the
images and run the multi-resolution registration by using the Metric
and the Optimizer at each level of the Gaussian pyramid. At each
level, it will setup the metric to compute value and gradient of the
metric with the input images with different levels of smoothing.
References
----------
[Parzen62] E. Parzen. On the estimation of a probability density
function and the mode. Annals of Mathematical Statistics,
33(3), 1065-1076, 1962.
[Mattes03] Mattes, D., Haynor, D. R., Vesselle, H., Lewellen, T. K.,
& Eubank, W. PET-CT image registration in the chest using
free-form deformations. IEEE Transactions on Medical
Imaging, 22(1), 120-8, 2003.
"""
import numpy as np
import numpy.linalg as npl
import scipy.ndimage as ndimage
from ..core.optimize import Optimizer
from ..core.optimize import SCIPY_LESS_0_12
from . import vector_fields as vf
from . import VerbosityLevels
from .parzenhist import (ParzenJointHistogram,
sample_domain_regular,
compute_parzen_mi)
from .imwarp import (get_direction_and_spacings, ScaleSpace)
from .scalespace import IsotropicScaleSpace
_interp_options = ['nearest', 'linear']
_transform_method = {}
_transform_method[(2, 'nearest')] = vf.transform_2d_affine_nn
_transform_method[(3, 'nearest')] = vf.transform_3d_affine_nn
_transform_method[(2, 'linear')] = vf.transform_2d_affine
_transform_method[(3, 'linear')] = vf.transform_3d_affine
class AffineInversionError(Exception):
pass
class AffineMap(object):
def __init__(self, affine, domain_grid_shape=None, domain_grid2world=None,
codomain_grid_shape=None, codomain_grid2world=None):
""" AffineMap
Implements an affine transformation whose domain is given by
`domain_grid` and `domain_grid2world`, and whose co-domain is
given by `codomain_grid` and `codomain_grid2world`.
The actual transform is represented by the `affine` matrix, which
operate in world coordinates. Therefore, to transform a moving image
towards a static image, we first map each voxel (i,j,k) of the static
image to world coordinates (x,y,z) by applying `domain_grid2world`.
Then we apply the `affine` transform to (x,y,z) obtaining (x', y', z')
in moving image's world coordinates. Finally, (x', y', z') is mapped
to voxel coordinates (i', j', k') in the moving image by multiplying
(x', y', z') by the inverse of `codomain_grid2world`. The
`codomain_grid_shape` is used analogously to transform the static
image towards the moving image when calling `transform_inverse`.
If the domain/co-domain information is not provided (None) then the
sampling information needs to be specified each time the `transform`
or `transform_inverse` is called to transform images. Note that such
sampling information is not necessary to transform points defined in
physical space, such as stream lines.
Parameters
----------
affine : array, shape (dim + 1, dim + 1)
the matrix defining the affine transform, where `dim` is the
dimension of the space this map operates in (2 for 2D images,
3 for 3D images). If None, then `self` represents the identity
transformation.
domain_grid_shape : sequence, shape (dim,), optional
the shape of the default domain sampling grid. When `transform`
is called to transform an image, the resulting image will have
this shape, unless a different sampling information is provided.
If None, then the sampling grid shape must be specified each time
the `transform` method is called.
domain_grid2world : array, shape (dim + 1, dim + 1), optional
the grid-to-world transform associated with the domain grid.
If None (the default), then the grid-to-world transform is assumed
to be the identity.
codomain_grid_shape : sequence of integers, shape (dim,)
the shape of the default co-domain sampling grid. When
`transform_inverse` is called to transform an image, the resulting
image will have this shape, unless a different sampling
information is provided. If None (the default), then the sampling
grid shape must be specified each time the `transform_inverse`
method is called.
codomain_grid2world : array, shape (dim + 1, dim + 1)
the grid-to-world transform associated with the co-domain grid.
If None (the default), then the grid-to-world transform is assumed
to be the identity.
"""
self.set_affine(affine)
self.domain_shape = domain_grid_shape
self.domain_grid2world = domain_grid2world
self.codomain_shape = codomain_grid_shape
self.codomain_grid2world = codomain_grid2world
def set_affine(self, affine):
""" Sets the affine transform (operating in physical space)
Parameters
----------
affine : array, shape (dim + 1, dim + 1)
the matrix representing the affine transform operating in
physical space. The domain and co-domain information
remains unchanged. If None, then `self` represents the identity
transformation.
"""
self.affine = affine
if self.affine is None:
self.affine_inv = None
return
if np.any(np.isnan(affine)):
raise AffineInversionError('Affine contains invalid elements')
try:
self.affine_inv = npl.inv(affine)
except npl.LinAlgError:
raise AffineInversionError('Affine cannot be inverted')
def _apply_transform(self, image, interp='linear', image_grid2world=None,
sampling_grid_shape=None, sampling_grid2world=None,
resample_only=False, apply_inverse=False):
""" Transforms the input image applying this affine transform
This is a generic function to transform images using either this
(direct) transform or its inverse.
If applying the direct transform (`apply_inverse=False`):
by default, the transformed image is sampled at a grid defined by
`self.domain_shape` and `self.domain_grid2world`.
If applying the inverse transform (`apply_inverse=True`):
by default, the transformed image is sampled at a grid defined by
`self.codomain_shape` and `self.codomain_grid2world`.
If the sampling information was not provided at initialization of this
transform then `sampling_grid_shape` is mandatory.
Parameters
----------
image : array, shape (X, Y) or (X, Y, Z)
the image to be transformed
interp : string, either 'linear' or 'nearest'
the type of interpolation to be used, either 'linear'
(for k-linear interpolation) or 'nearest' for nearest neighbor
image_grid2world : array, shape (dim + 1, dim + 1), optional
the grid-to-world transform associated with `image`.
If None (the default), then the grid-to-world transform is assumed
to be the identity.
sampling_grid_shape : sequence, shape (dim,), optional
the shape of the grid where the transformed image must be sampled.
If None (the default), then `self.domain_shape` is used instead
(which must have been set at initialization, otherwise an exception
will be raised).
sampling_grid2world : array, shape (dim + 1, dim + 1), optional
the grid-to-world transform associated with the sampling grid
(specified by `sampling_grid_shape`, or by default
`self.domain_shape`). If None (the default), then the
grid-to-world transform is assumed to be the identity.
resample_only : Boolean, optional
If False (the default) the affine transform is applied normally.
If True, then the affine transform is not applied, and the input
image is just re-sampled on the domain grid of this transform.
apply_inverse : Boolean, optional
If False (the default) the image is transformed from the codomain
of this transform to its domain using the (direct) affine
transform. Otherwise, the image is transformed from the domain
of this transform to its codomain using the (inverse) affine
transform.
Returns
-------
transformed : array, shape `sampling_grid_shape` or `self.domain_shape`
the transformed image, sampled at the requested grid
"""
# Verify valid interpolation requested
if interp not in _interp_options:
raise ValueError('Unknown interpolation method: %s' % (interp,))
# Obtain sampling grid
if sampling_grid_shape is None:
if apply_inverse:
sampling_grid_shape = self.codomain_shape
else:
sampling_grid_shape = self.domain_shape
if sampling_grid_shape is None:
msg = 'Unknown sampling info. Provide a valid sampling_grid_shape'
raise ValueError(msg)
dim = len(sampling_grid_shape)
shape = np.array(sampling_grid_shape, dtype=np.int32)
# Verify valid image dimension
if dim < 2 or dim > 3:
raise ValueError('Undefined transform for dimension: %d' % (dim,))
# Obtain grid-to-world transform for sampling grid
if sampling_grid2world is None:
if apply_inverse:
sampling_grid2world = self.codomain_grid2world
else:
sampling_grid2world = self.domain_grid2world
if sampling_grid2world is None:
sampling_grid2world = np.eye(dim + 1)
# Obtain world-to-grid transform for input image
if image_grid2world is None:
if apply_inverse:
image_grid2world = self.domain_grid2world
else:
image_grid2world = self.codomain_grid2world
if image_grid2world is None:
image_grid2world = np.eye(dim + 1)
image_world2grid = npl.inv(image_grid2world)
# Compute the transform from sampling grid to input image grid
if apply_inverse:
aff = self.affine_inv
else:
aff = self.affine
if (aff is None) or resample_only:
comp = image_world2grid.dot(sampling_grid2world)
else:
comp = image_world2grid.dot(aff.dot(sampling_grid2world))
# Transform the input image
if interp == 'linear':
image = image.astype(np.float64)
transformed = _transform_method[(dim, interp)](image, shape, comp)
return transformed
def transform(self, image, interp='linear', image_grid2world=None,
sampling_grid_shape=None, sampling_grid2world=None,
resample_only=False):
""" Transforms the input image from co-domain to domain space
By default, the transformed image is sampled at a grid defined by
`self.domain_shape` and `self.domain_grid2world`. If such
information was not provided then `sampling_grid_shape` is mandatory.
Parameters
----------
image : array, shape (X, Y) or (X, Y, Z)
the image to be transformed
interp : string, either 'linear' or 'nearest'
the type of interpolation to be used, either 'linear'
(for k-linear interpolation) or 'nearest' for nearest neighbor
image_grid2world : array, shape (dim + 1, dim + 1), optional
the grid-to-world transform associated with `image`.
If None (the default), then the grid-to-world transform is assumed
to be the identity.
sampling_grid_shape : sequence, shape (dim,), optional
the shape of the grid where the transformed image must be sampled.
If None (the default), then `self.codomain_shape` is used instead
(which must have been set at initialization, otherwise an exception
will be raised).
sampling_grid2world : array, shape (dim + 1, dim + 1), optional
the grid-to-world transform associated with the sampling grid
(specified by `sampling_grid_shape`, or by default
`self.codomain_shape`). If None (the default), then the
grid-to-world transform is assumed to be the identity.
resample_only : Boolean, optional
If False (the default) the affine transform is applied normally.
If True, then the affine transform is not applied, and the input
image is just re-sampled on the domain grid of this transform.
Returns
-------
transformed : array, shape `sampling_grid_shape` or
`self.codomain_shape`
the transformed image, sampled at the requested grid
"""
transformed = self._apply_transform(image, interp, image_grid2world,
sampling_grid_shape,
sampling_grid2world,
resample_only,
apply_inverse=False)
return np.array(transformed)
def transform_inverse(self, image, interp='linear', image_grid2world=None,
sampling_grid_shape=None, sampling_grid2world=None,
resample_only=False):
""" Transforms the input image from domain to co-domain space
By default, the transformed image is sampled at a grid defined by
`self.codomain_shape` and `self.codomain_grid2world`. If such
information was not provided then `sampling_grid_shape` is mandatory.
Parameters
----------
image : array, shape (X, Y) or (X, Y, Z)
the image to be transformed
interp : string, either 'linear' or 'nearest'
the type of interpolation to be used, either 'linear'
(for k-linear interpolation) or 'nearest' for nearest neighbor
image_grid2world : array, shape (dim + 1, dim + 1), optional
the grid-to-world transform associated with `image`.
If None (the default), then the grid-to-world transform is assumed
to be the identity.
sampling_grid_shape : sequence, shape (dim,), optional
the shape of the grid where the transformed image must be sampled.
If None (the default), then `self.codomain_shape` is used instead
(which must have been set at initialization, otherwise an exception
will be raised).
sampling_grid2world : array, shape (dim + 1, dim + 1), optional
the grid-to-world transform associated with the sampling grid
(specified by `sampling_grid_shape`, or by default
`self.codomain_shape`). If None (the default), then the
grid-to-world transform is assumed to be the identity.
resample_only : Boolean, optional
If False (the default) the affine transform is applied normally.
If True, then the affine transform is not applied, and the input
image is just re-sampled on the domain grid of this transform.
Returns
-------
transformed : array, shape `sampling_grid_shape` or
`self.codomain_shape`
the transformed image, sampled at the requested grid
"""
transformed = self._apply_transform(image, interp, image_grid2world,
sampling_grid_shape,
sampling_grid2world,
resample_only,
apply_inverse=True)
return np.array(transformed)
class MutualInformationMetric(object):
def __init__(self, nbins=32, sampling_proportion=None):
r""" Initializes an instance of the Mutual Information metric
This class implements the methods required by Optimizer to drive the
registration process.
Parameters
----------
nbins : int, optional
the number of bins to be used for computing the intensity
histograms. The default is 32.
sampling_proportion : None or float in interval (0, 1], optional
There are two types of sampling: dense and sparse. Dense sampling
uses all voxels for estimating the (joint and marginal) intensity
histograms, while sparse sampling uses a subset of them. If
`sampling_proportion` is None, then dense sampling is
used. If `sampling_proportion` is a floating point value in (0,1]
then sparse sampling is used, where `sampling_proportion`
specifies the proportion of voxels to be used. The default is
None.
Notes
-----
Since we use linear interpolation, images are not, in general,
differentiable at exact voxel coordinates, but they are differentiable
between voxel coordinates. When using sparse sampling, selected voxels
are slightly moved by adding a small random displacement within one
voxel to prevent sampling points from being located exactly at voxel
coordinates. When using dense sampling, this random displacement is
not applied.
"""
self.histogram = ParzenJointHistogram(nbins)
self.sampling_proportion = sampling_proportion
self.metric_val = None
self.metric_grad = None
def setup(self, transform, static, moving, static_grid2world=None,
moving_grid2world=None, starting_affine=None):
r""" Prepares the metric to compute intensity densities and gradients
The histograms will be setup to compute probability densities of
intensities within the minimum and maximum values of `static` and
`moving`
Parameters
----------
transform: instance of Transform
the transformation with respect to whose parameters the gradient
must be computed
static : array, shape (S, R, C) or (R, C)
static image
moving : array, shape (S', R', C') or (R', C')
moving image. The dimensions of the static (S, R, C) and moving
(S', R', C') images do not need to be the same.
static_grid2world : array (dim+1, dim+1), optional
the grid-to-space transform of the static image. The default is
None, implying the transform is the identity.
moving_grid2world : array (dim+1, dim+1)
the grid-to-space transform of the moving image. The default is
None, implying the spacing along all axes is 1.
starting_affine : array, shape (dim+1, dim+1), optional
the pre-aligning matrix (an affine transform) that roughly aligns
the moving image towards the static image. If None, no
pre-alignment is performed. If a pre-alignment matrix is available,
it is recommended to provide this matrix as `starting_affine`
instead of manually transforming the moving image to reduce
interpolation artifacts. The default is None, implying no
pre-alignment is performed.
"""
self.dim = len(static.shape)
if moving_grid2world is None:
moving_grid2world = np.eye(self.dim + 1)
if static_grid2world is None:
static_grid2world = np.eye(self.dim + 1)
self.transform = transform
self.static = np.array(static).astype(np.float64)
self.moving = np.array(moving).astype(np.float64)
self.static_grid2world = static_grid2world
self.static_world2grid = npl.inv(static_grid2world)
self.moving_grid2world = moving_grid2world
self.moving_world2grid = npl.inv(moving_grid2world)
self.static_direction, self.static_spacing = \
get_direction_and_spacings(static_grid2world, self.dim)
self.moving_direction, self.moving_spacing = \
get_direction_and_spacings(moving_grid2world, self.dim)
self.starting_affine = starting_affine
P = np.eye(self.dim + 1)
if self.starting_affine is not None:
P = self.starting_affine
self.affine_map = AffineMap(P, static.shape, static_grid2world,
moving.shape, moving_grid2world)
if self.dim == 2:
self.interp_method = vf.interpolate_scalar_2d
else:
self.interp_method = vf.interpolate_scalar_3d
if self.sampling_proportion is None:
self.samples = None
self.ns = 0
else:
k = int(np.ceil(1.0 / self.sampling_proportion))
shape = np.array(static.shape, dtype=np.int32)
self.samples = sample_domain_regular(k, shape, static_grid2world)
self.samples = np.array(self.samples)
self.ns = self.samples.shape[0]
# Add a column of ones (homogeneous coordinates)
self.samples = np.hstack((self.samples, np.ones(self.ns)[:, None]))
if self.starting_affine is None:
self.samples_prealigned = self.samples
else:
self.samples_prealigned =\
self.starting_affine.dot(self.samples.T).T
# Sample the static image
static_p = self.static_world2grid.dot(self.samples.T).T
static_p = static_p[..., :self.dim]
self.static_vals, inside = self.interp_method(static, static_p)
self.static_vals = np.array(self.static_vals, dtype=np.float64)
self.histogram.setup(self.static, self.moving)
def _update_histogram(self):
r""" Updates the histogram according to the current affine transform
The current affine transform is given by `self.affine_map`, which
must be set before calling this method.
Returns
-------
static_values: array, shape(n,) if sparse sampling is being used,
array, shape(S, R, C) or (R, C) if dense sampling
the intensity values corresponding to the static image used to
update the histogram. If sparse sampling is being used, then
it is simply a sequence of scalars, obtained by sampling the static
image at the `n` sampling points. If dense sampling is being used,
then the intensities are given directly by the static image,
whose shape is (S, R, C) in the 3D case or (R, C) in the 2D case.
moving_values: array, shape(n,) if sparse sampling is being used,
array, shape(S, R, C) or (R, C) if dense sampling
the intensity values corresponding to the moving image used to
update the histogram. If sparse sampling is being used, then
it is simply a sequence of scalars, obtained by sampling the moving
image at the `n` sampling points (mapped to the moving space by the
current affine transform). If dense sampling is being used,
then the intensities are given by the moving imaged linearly
transformed towards the static image by the current affine, which
results in an image of the same shape as the static image.
"""
static_values = None
moving_values = None
if self.sampling_proportion is None: # Dense case
static_values = self.static
moving_values = self.affine_map.transform(self.moving)
self.histogram.update_pdfs_dense(static_values, moving_values)
else: # Sparse case
sp_to_moving = self.moving_world2grid.dot(self.affine_map.affine)
pts = sp_to_moving.dot(self.samples.T).T # Points on moving grid
pts = pts[..., :self.dim]
self.moving_vals, inside = self.interp_method(self.moving, pts)
self.moving_vals = np.array(self.moving_vals)
static_values = self.static_vals
moving_values = self.moving_vals
self.histogram.update_pdfs_sparse(static_values, moving_values)
return static_values, moving_values
def _update_mutual_information(self, params, update_gradient=True):
r""" Updates marginal and joint distributions and the joint gradient
The distributions are updated according to the static and transformed
images. The transformed image is precisely the moving image after
transforming it by the transform defined by the `params` parameters.
The gradient of the joint PDF is computed only if update_gradient
is True.
Parameters
----------
params : array, shape (n,)
the parameter vector of the transform currently used by the metric
(the transform name is provided when self.setup is called), n is
the number of parameters of the transform
update_gradient : Boolean, optional
if True, the gradient of the joint PDF will also be computed,
otherwise, only the marginal and joint PDFs will be computed.
The default is True.
"""
# Get the matrix associated with the `params` parameter vector
current_affine = self.transform.param_to_matrix(params)
# Get the static-to-prealigned matrix (only needed for the MI gradient)
static2prealigned = self.static_grid2world
if self.starting_affine is not None:
current_affine = current_affine.dot(self.starting_affine)
static2prealigned = self.starting_affine.dot(static2prealigned)
self.affine_map.set_affine(current_affine)
# Update the histogram with the current joint intensities
static_values, moving_values = self._update_histogram()
H = self.histogram # Shortcut to `self.histogram`
grad = None # Buffer to write the MI gradient into (if needed)
if update_gradient:
# Re-allocate buffer for the gradient, if needed
n = params.shape[0] # Number of parameters
if (self.metric_grad is None) or (self.metric_grad.shape[0] != n):
self.metric_grad = np.empty(n)
grad = self.metric_grad
# Compute the gradient of the joint PDF w.r.t. parameters
if self.sampling_proportion is None: # Dense case
# Compute the gradient of moving img. at physical points
# associated with the >>static image's grid<< cells
# The image gradient must be eval. at current moved points
grid_to_world = current_affine.dot(self.static_grid2world)
mgrad, inside = vf.gradient(self.moving,
self.moving_world2grid,
self.moving_spacing,
self.static.shape,
grid_to_world)
# The Jacobian must be evaluated at the pre-aligned points
H.update_gradient_dense(params, self.transform, static_values,
moving_values, static2prealigned, mgrad)
else: # Sparse case
# Compute the gradient of moving at the sampling points
# which are already given in physical space coordinates
pts = current_affine.dot(self.samples.T).T # Moved points
mgrad, inside = vf.sparse_gradient(self.moving,
self.moving_world2grid,
self.moving_spacing,
pts)
# The Jacobian must be evaluated at the pre-aligned points
pts = self.samples_prealigned[..., :self.dim]
H.update_gradient_sparse(params, self.transform, static_values,
moving_values, pts, mgrad)
# Call the cythonized MI computation with self.histogram fields
self.metric_val = compute_parzen_mi(H.joint, H.joint_grad,
H.smarginal, H.mmarginal,
grad)
def distance(self, params):
r""" Numeric value of the negative Mutual Information
We need to change the sign so we can use standard minimization
algorithms.
Parameters
----------
params : array, shape (n,)
the parameter vector of the transform currently used by the metric
(the transform name is provided when self.setup is called), n is
the number of parameters of the transform
Returns
-------
neg_mi : float
the negative mutual information of the input images after
transforming the moving image by the currently set transform
with `params` parameters
"""
try:
self._update_mutual_information(params, False)
except AffineInversionError:
return np.inf
return -1 * self.metric_val
def gradient(self, params):
r""" Numeric value of the metric's gradient at the given parameters
Parameters
----------
params : array, shape (n,)
the parameter vector of the transform currently used by the metric
(the transform name is provided when self.setup is called), n is
the number of parameters of the transform
Returns
-------
grad : array, shape (n,)
the gradient of the negative Mutual Information
"""
try:
self._update_mutual_information(params, True)
except AffineInversionError:
return 0 * self.metric_grad
return -1 * self.metric_grad
def distance_and_gradient(self, params):
r""" Numeric value of the metric and its gradient at given parameters
Parameters
----------
params : array, shape (n,)
the parameter vector of the transform currently used by the metric
(the transform name is provided when self.setup is called), n is
the number of parameters of the transform
Returns
-------
neg_mi : float
the negative mutual information of the input images after
transforming the moving image by the currently set transform
with `params` parameters
neg_mi_grad : array, shape (n,)
the gradient of the negative Mutual Information
"""
try:
self._update_mutual_information(params, True)
except AffineInversionError:
return np.inf, 0 * self.metric_grad
return -1 * self.metric_val, -1 * self.metric_grad
class AffineRegistration(object):
def __init__(self,
metric=None,
level_iters=None,
sigmas=None,
factors=None,
method='L-BFGS-B',
ss_sigma_factor=None,
options=None):
r""" Initializes an instance of the AffineRegistration class
Parameters
----------
metric : None or object, optional
an instance of a metric. The default is None, implying
the Mutual Information metric with default settings.
level_iters : sequence, optional
the number of iterations at each scale of the scale space.
`level_iters[0]` corresponds to the coarsest scale,
`level_iters[-1]` the finest, where n is the length of the
sequence. By default, a 3-level scale space with iterations
sequence equal to [10000, 1000, 100] will be used.
sigmas : sequence of floats, optional
custom smoothing parameter to build the scale space (one parameter
for each scale). By default, the sequence of sigmas will be
[3, 1, 0].
factors : sequence of floats, optional
custom scale factors to build the scale space (one factor for each
scale). By default, the sequence of factors will be [4, 2, 1].
method : string, optional
optimization method to be used. If Scipy version < 0.12, then
only L-BFGS-B is available. Otherwise, `method` can be any
gradient-based method available in `dipy.core.Optimize`: CG, BFGS,
Newton-CG, dogleg or trust-ncg.
The default is 'L-BFGS-B'.
ss_sigma_factor : float, optional
If None, this parameter is not used and an isotropic scale
space with the given `factors` and `sigmas` will be built.
If not None, an anisotropic scale space will be used by
automatically selecting the smoothing sigmas along each axis
according to the voxel dimensions of the given image.
The `ss_sigma_factor` is used to scale the automatically computed
sigmas. For example, in the isotropic case, the sigma of the
kernel will be $factor * (2 ^ i)$ where
$i = 1, 2, ..., n_scales - 1$ is the scale (the finest resolution
image $i=0$ is never smoothed). The default is None.
options : dict, optional
extra optimization options. The default is None, implying
no extra options are passed to the optimizer.
"""
self.metric = metric
if self.metric is None:
self.metric = MutualInformationMetric()
if level_iters is None:
level_iters = [10000, 1000, 100]
self.level_iters = level_iters
self.levels = len(level_iters)
if self.levels == 0:
raise ValueError('The iterations sequence cannot be empty')
self.options = options
self.method = method
if ss_sigma_factor is not None:
self.use_isotropic = False
self.ss_sigma_factor = ss_sigma_factor
else:
self.use_isotropic = True
if factors is None:
factors = [4, 2, 1]
if sigmas is None:
sigmas = [3, 1, 0]
self.factors = factors
self.sigmas = sigmas
self.verbosity = VerbosityLevels.STATUS
def _init_optimizer(self, static, moving, transform, params0,
static_grid2world, moving_grid2world,
starting_affine):
r"""Initializes the registration optimizer
Initializes the optimizer by computing the scale space of the input
images
Parameters
----------
static : array, shape (S, R, C) or (R, C)
the image to be used as reference during optimization.
moving : array, shape (S', R', C') or (R', C')
the image to be used as "moving" during optimization. The
dimensions of the static (S, R, C) and moving (S', R', C') images
do not need to be the same.
transform : instance of Transform
the transformation with respect to whose parameters the gradient
must be computed
params0 : array, shape (n,)
parameters from which to start the optimization. If None, the
optimization will start at the identity transform. n is the
number of parameters of the specified transformation.
static_grid2world : array, shape (dim+1, dim+1)
the voxel-to-space transformation associated with the static image
moving_grid2world : array, shape (dim+1, dim+1)
the voxel-to-space transformation associated with the moving image
starting_affine : string, or matrix, or None
If string:
'mass': align centers of gravity
'voxel-origin': align physical coordinates of voxel (0,0,0)
'centers': align physical coordinates of central voxels
If matrix:
array, shape (dim+1, dim+1)
If None:
Start from identity
"""
self.dim = len(static.shape)
self.transform = transform
n = transform.get_number_of_parameters()
self.nparams = n
if params0 is None:
params0 = self.transform.get_identity_parameters()
self.params0 = params0
if starting_affine is None:
self.starting_affine = np.eye(self.dim + 1)
elif starting_affine == 'mass':
affine_map = align_centers_of_mass(static,
static_grid2world,
moving,
moving_grid2world)
self.starting_affine = affine_map.affine
elif starting_affine == 'voxel-origin':
affine_map = align_origins(static, static_grid2world,
moving, moving_grid2world)
self.starting_affine = affine_map.affine
elif starting_affine == 'centers':
affine_map = align_geometric_centers(static,
static_grid2world,
moving,
moving_grid2world)
self.starting_affine = affine_map.affine
elif (isinstance(starting_affine, np.ndarray) and
starting_affine.shape >= (self.dim, self.dim + 1)):
self.starting_affine = starting_affine
else:
raise ValueError('Invalid starting_affine matrix')
# Extract information from affine matrices to create the scale space
static_direction, static_spacing = \
get_direction_and_spacings(static_grid2world, self.dim)
moving_direction, moving_spacing = \
get_direction_and_spacings(moving_grid2world, self.dim)
static = ((static.astype(np.float64) - static.min()) /
(static.max() - static.min()))
moving = ((moving.astype(np.float64) - moving.min()) /
(moving.max() - moving.min()))
# Build the scale space of the input images
if self.use_isotropic:
self.moving_ss = IsotropicScaleSpace(moving, self.factors,
self.sigmas,
moving_grid2world,
moving_spacing, False)
self.static_ss = IsotropicScaleSpace(static, self.factors,
self.sigmas,
static_grid2world,
static_spacing, False)
else:
self.moving_ss = ScaleSpace(moving, self.levels, moving_grid2world,
moving_spacing, self.ss_sigma_factor,
False)
self.static_ss = ScaleSpace(static, self.levels, static_grid2world,
static_spacing, self.ss_sigma_factor,
False)
def optimize(self, static, moving, transform, params0,
static_grid2world=None, moving_grid2world=None,
starting_affine=None):
r''' Starts the optimization process
Parameters
----------
static : array, shape (S, R, C) or (R, C)
the image to be used as reference during optimization.
moving : array, shape (S', R', C') or (R', C')
the image to be used as "moving" during optimization. It is
necessary to pre-align the moving image to ensure its domain
lies inside the domain of the deformation fields. This is assumed
to be accomplished by "pre-aligning" the moving image towards the
static using an affine transformation given by the
'starting_affine' matrix
transform : instance of Transform
the transformation with respect to whose parameters the gradient
must be computed
params0 : array, shape (n,)
parameters from which to start the optimization. If None, the
optimization will start at the identity transform. n is the
number of parameters of the specified transformation.
static_grid2world : array, shape (dim+1, dim+1), optional
the voxel-to-space transformation associated with the static
image. The default is None, implying the transform is the
identity.
moving_grid2world : array, shape (dim+1, dim+1), optional
the voxel-to-space transformation associated with the moving
image. The default is None, implying the transform is the
identity.
starting_affine : string, or matrix, or None, optional
If string:
'mass': align centers of gravity
'voxel-origin': align physical coordinates of voxel (0,0,0)
'centers': align physical coordinates of central voxels
If matrix:
array, shape (dim+1, dim+1).
If None:
Start from identity.
The default is None.
Returns
-------
affine_map : instance of AffineMap
the affine resulting affine transformation
'''
self._init_optimizer(static, moving, transform, params0,
static_grid2world, moving_grid2world,
starting_affine)
del starting_affine # Now we must refer to self.starting_affine
# Multi-resolution iterations
original_static_shape = self.static_ss.get_image(0).shape
original_static_grid2world = self.static_ss.get_affine(0)
original_moving_shape = self.moving_ss.get_image(0).shape
original_moving_grid2world = self.moving_ss.get_affine(0)
affine_map = AffineMap(None,
original_static_shape,
original_static_grid2world,
original_moving_shape,
original_moving_grid2world)
for level in range(self.levels - 1, -1, -1):
self.current_level = level
max_iter = self.level_iters[-1 - level]
if self.verbosity >= VerbosityLevels.STATUS:
print('Optimizing level %d [max iter: %d]' % (level, max_iter))
# Resample the smooth static image to the shape of this level
smooth_static = self.static_ss.get_image(level)
current_static_shape = self.static_ss.get_domain_shape(level)
current_static_grid2world = self.static_ss.get_affine(level)
current_affine_map = AffineMap(None,
current_static_shape,
current_static_grid2world,
original_static_shape,
original_static_grid2world)
current_static = current_affine_map.transform(smooth_static)
# The moving image is full resolution
current_moving_grid2world = original_moving_grid2world
current_moving = self.moving_ss.get_image(level)
# Prepare the metric for iterations at this resolution
self.metric.setup(transform, current_static, current_moving,
current_static_grid2world,
current_moving_grid2world, self.starting_affine)
# Optimize this level
if self.options is None:
self.options = {'gtol': 1e-4,
'disp': False}
if self.method == 'L-BFGS-B':
self.options['maxfun'] = max_iter
else:
self.options['maxiter'] = max_iter
if SCIPY_LESS_0_12:
# Older versions don't expect value and gradient from
# the same function
opt = Optimizer(self.metric.distance, self.params0,
method=self.method, jac=self.metric.gradient,
options=self.options)
else:
opt = Optimizer(self.metric.distance_and_gradient, self.params0,
method=self.method, jac=True,
options=self.options)
params = opt.xopt
# Update starting_affine matrix with optimal parameters
T = self.transform.param_to_matrix(params)
self.starting_affine = T.dot(self.starting_affine)
# Start next iteration at identity
self.params0 = self.transform.get_identity_parameters()
affine_map.set_affine(self.starting_affine)
return affine_map
def align_centers_of_mass(static, static_grid2world,
moving, moving_grid2world):
r""" Transformation to align the center of mass of the input images
Parameters
----------
static : array, shape (S, R, C)
static image
static_grid2world : array, shape (dim+1, dim+1)
the voxel-to-space transformation of the static image
moving : array, shape (S, R, C)
moving image
moving_grid2world : array, shape (dim+1, dim+1)
the voxel-to-space transformation of the moving image
Returns
-------
affine_map : instance of AffineMap
the affine transformation (translation only, in this case) aligning
the center of mass of the moving image towards the one of the static
image
"""
dim = len(static.shape)
if static_grid2world is None:
static_grid2world = np.eye(dim + 1)
if moving_grid2world is None:
moving_grid2world = np.eye(dim + 1)
c_static = ndimage.measurements.center_of_mass(np.array(static))
c_static = static_grid2world.dot(c_static+(1,))
c_moving = ndimage.measurements.center_of_mass(np.array(moving))
c_moving = moving_grid2world.dot(c_moving+(1,))
transform = np.eye(dim + 1)
transform[:dim, dim] = (c_moving - c_static)[:dim]
affine_map = AffineMap(transform,
static.shape, static_grid2world,
moving.shape, moving_grid2world)
return affine_map
def align_geometric_centers(static, static_grid2world,
moving, moving_grid2world):
r""" Transformation to align the geometric center of the input images
With "geometric center" of a volume we mean the physical coordinates of
its central voxel
Parameters
----------
static : array, shape (S, R, C)
static image
static_grid2world : array, shape (dim+1, dim+1)
the voxel-to-space transformation of the static image
moving : array, shape (S, R, C)
moving image
moving_grid2world : array, shape (dim+1, dim+1)
the voxel-to-space transformation of the moving image
Returns
-------
affine_map : instance of AffineMap
the affine transformation (translation only, in this case) aligning
the geometric center of the moving image towards the one of the static
image
"""
dim = len(static.shape)
if static_grid2world is None:
static_grid2world = np.eye(dim + 1)
if moving_grid2world is None:
moving_grid2world = np.eye(dim + 1)
c_static = tuple((np.array(static.shape, dtype=np.float64)) * 0.5)
c_static = static_grid2world.dot(c_static+(1,))
c_moving = tuple((np.array(moving.shape, dtype=np.float64)) * 0.5)
c_moving = moving_grid2world.dot(c_moving+(1,))
transform = np.eye(dim + 1)
transform[:dim, dim] = (c_moving - c_static)[:dim]
affine_map = AffineMap(transform,
static.shape, static_grid2world,
moving.shape, moving_grid2world)
return affine_map
def align_origins(static, static_grid2world,
moving, moving_grid2world):
r""" Transformation to align the origins of the input images
With "origin" of a volume we mean the physical coordinates of
voxel (0,0,0)
Parameters
----------
static : array, shape (S, R, C)
static image
static_grid2world : array, shape (dim+1, dim+1)
the voxel-to-space transformation of the static image
moving : array, shape (S, R, C)
moving image
moving_grid2world : array, shape (dim+1, dim+1)
the voxel-to-space transformation of the moving image
Returns
-------
affine_map : instance of AffineMap
the affine transformation (translation only, in this case) aligning
the origin of the moving image towards the one of the static
image
"""
dim = len(static.shape)
if static_grid2world is None:
static_grid2world = np.eye(dim + 1)
if moving_grid2world is None:
moving_grid2world = np.eye(dim + 1)
c_static = static_grid2world[:dim, dim]
c_moving = moving_grid2world[:dim, dim]
transform = np.eye(dim + 1)
transform[:dim, dim] = (c_moving - c_static)[:dim]
affine_map = AffineMap(transform,
static.shape, static_grid2world,
moving.shape, moving_grid2world)
return affine_map
| jyeatman/dipy | dipy/align/imaffine.py | Python | bsd-3-clause | 51,745 | [
"Gaussian"
] | 2715603cff1caf09edffa330cde53cada230f0d38d01715a5a78424ec3c82a79 |
# Version: 0.21
"""The Versioneer - like a rocketeer, but for versions.
The Versioneer
==============
* like a rocketeer, but for versions!
* https://github.com/python-versioneer/python-versioneer
* Brian Warner
* License: Public Domain
* Compatible with: Python 3.6, 3.7, 3.8, 3.9 and pypy3
* [![Latest Version][pypi-image]][pypi-url]
* [![Build Status][travis-image]][travis-url]
This is a tool for managing a recorded version number in distutils-based
python projects. The goal is to remove the tedious and error-prone "update
the embedded version string" step from your release process. Making a new
release should be as easy as recording a new tag in your version-control
system, and maybe making new tarballs.
## Quick Install
* `pip install versioneer` to somewhere in your $PATH
* add a `[versioneer]` section to your setup.cfg (see [Install](INSTALL.md))
* run `versioneer install` in your source tree, commit the results
* Verify version information with `python setup.py version`
## Version Identifiers
Source trees come from a variety of places:
* a version-control system checkout (mostly used by developers)
* a nightly tarball, produced by build automation
* a snapshot tarball, produced by a web-based VCS browser, like github's
"tarball from tag" feature
* a release tarball, produced by "setup.py sdist", distributed through PyPI
Within each source tree, the version identifier (either a string or a number,
this tool is format-agnostic) can come from a variety of places:
* ask the VCS tool itself, e.g. "git describe" (for checkouts), which knows
about recent "tags" and an absolute revision-id
* the name of the directory into which the tarball was unpacked
* an expanded VCS keyword ($Id$, etc)
* a `_version.py` created by some earlier build step
For released software, the version identifier is closely related to a VCS
tag. Some projects use tag names that include more than just the version
string (e.g. "myproject-1.2" instead of just "1.2"), in which case the tool
needs to strip the tag prefix to extract the version identifier. For
unreleased software (between tags), the version identifier should provide
enough information to help developers recreate the same tree, while also
giving them an idea of roughly how old the tree is (after version 1.2, before
version 1.3). Many VCS systems can report a description that captures this,
for example `git describe --tags --dirty --always` reports things like
"0.7-1-g574ab98-dirty" to indicate that the checkout is one revision past the
0.7 tag, has a unique revision id of "574ab98", and is "dirty" (it has
uncommitted changes).
The version identifier is used for multiple purposes:
* to allow the module to self-identify its version: `myproject.__version__`
* to choose a name and prefix for a 'setup.py sdist' tarball
## Theory of Operation
Versioneer works by adding a special `_version.py` file into your source
tree, where your `__init__.py` can import it. This `_version.py` knows how to
dynamically ask the VCS tool for version information at import time.
`_version.py` also contains `$Revision$` markers, and the installation
process marks `_version.py` to have this marker rewritten with a tag name
during the `git archive` command. As a result, generated tarballs will
contain enough information to get the proper version.
To allow `setup.py` to compute a version too, a `versioneer.py` is added to
the top level of your source tree, next to `setup.py` and the `setup.cfg`
that configures it. This overrides several distutils/setuptools commands to
compute the version when invoked, and changes `setup.py build` and `setup.py
sdist` to replace `_version.py` with a small static file that contains just
the generated version data.
## Installation
See [INSTALL.md](./INSTALL.md) for detailed installation instructions.
## Version-String Flavors
Code which uses Versioneer can learn about its version string at runtime by
importing `_version` from your main `__init__.py` file and running the
`get_versions()` function. From the "outside" (e.g. in `setup.py`), you can
import the top-level `versioneer.py` and run `get_versions()`.
Both functions return a dictionary with different flavors of version
information:
* `['version']`: A condensed version string, rendered using the selected
style. This is the most commonly used value for the project's version
string. The default "pep440" style yields strings like `0.11`,
`0.11+2.g1076c97`, or `0.11+2.g1076c97.dirty`. See the "Styles" section
below for alternative styles.
* `['full-revisionid']`: detailed revision identifier. For Git, this is the
full SHA1 commit id, e.g. "1076c978a8d3cfc70f408fe5974aa6c092c949ac".
* `['date']`: Date and time of the latest `HEAD` commit. For Git, it is the
commit date in ISO 8601 format. This will be None if the date is not
available.
* `['dirty']`: a boolean, True if the tree has uncommitted changes. Note that
this is only accurate if run in a VCS checkout, otherwise it is likely to
be False or None
* `['error']`: if the version string could not be computed, this will be set
to a string describing the problem, otherwise it will be None. It may be
useful to throw an exception in setup.py if this is set, to avoid e.g.
creating tarballs with a version string of "unknown".
Some variants are more useful than others. Including `full-revisionid` in a
bug report should allow developers to reconstruct the exact code being tested
(or indicate the presence of local changes that should be shared with the
developers). `version` is suitable for display in an "about" box or a CLI
`--version` output: it can be easily compared against release notes and lists
of bugs fixed in various releases.
The installer adds the following text to your `__init__.py` to place a basic
version in `YOURPROJECT.__version__`:
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
## Styles
The setup.cfg `style=` configuration controls how the VCS information is
rendered into a version string.
The default style, "pep440", produces a PEP440-compliant string, equal to the
un-prefixed tag name for actual releases, and containing an additional "local
version" section with more detail for in-between builds. For Git, this is
TAG[+DISTANCE.gHEX[.dirty]] , using information from `git describe --tags
--dirty --always`. For example "0.11+2.g1076c97.dirty" indicates that the
tree is like the "1076c97" commit but has uncommitted changes (".dirty"), and
that this commit is two revisions ("+2") beyond the "0.11" tag. For released
software (exactly equal to a known tag), the identifier will only contain the
stripped tag, e.g. "0.11".
Other styles are available. See [details.md](details.md) in the Versioneer
source tree for descriptions.
## Debugging
Versioneer tries to avoid fatal errors: if something goes wrong, it will tend
to return a version of "0+unknown". To investigate the problem, run `setup.py
version`, which will run the version-lookup code in a verbose mode, and will
display the full contents of `get_versions()` (including the `error` string,
which may help identify what went wrong).
## Known Limitations
Some situations are known to cause problems for Versioneer. This details the
most significant ones. More can be found on Github
[issues page](https://github.com/python-versioneer/python-versioneer/issues).
### Subprojects
Versioneer has limited support for source trees in which `setup.py` is not in
the root directory (e.g. `setup.py` and `.git/` are *not* siblings). The are
two common reasons why `setup.py` might not be in the root:
* Source trees which contain multiple subprojects, such as
[Buildbot](https://github.com/buildbot/buildbot), which contains both
"master" and "slave" subprojects, each with their own `setup.py`,
`setup.cfg`, and `tox.ini`. Projects like these produce multiple PyPI
distributions (and upload multiple independently-installable tarballs).
* Source trees whose main purpose is to contain a C library, but which also
provide bindings to Python (and perhaps other languages) in subdirectories.
Versioneer will look for `.git` in parent directories, and most operations
should get the right version string. However `pip` and `setuptools` have bugs
and implementation details which frequently cause `pip install .` from a
subproject directory to fail to find a correct version string (so it usually
defaults to `0+unknown`).
`pip install --editable .` should work correctly. `setup.py install` might
work too.
Pip-8.1.1 is known to have this problem, but hopefully it will get fixed in
some later version.
[Bug #38](https://github.com/python-versioneer/python-versioneer/issues/38) is tracking
this issue. The discussion in
[PR #61](https://github.com/python-versioneer/python-versioneer/pull/61) describes the
issue from the Versioneer side in more detail.
[pip PR#3176](https://github.com/pypa/pip/pull/3176) and
[pip PR#3615](https://github.com/pypa/pip/pull/3615) contain work to improve
pip to let Versioneer work correctly.
Versioneer-0.16 and earlier only looked for a `.git` directory next to the
`setup.cfg`, so subprojects were completely unsupported with those releases.
### Editable installs with setuptools <= 18.5
`setup.py develop` and `pip install --editable .` allow you to install a
project into a virtualenv once, then continue editing the source code (and
test) without re-installing after every change.
"Entry-point scripts" (`setup(entry_points={"console_scripts": ..})`) are a
convenient way to specify executable scripts that should be installed along
with the python package.
These both work as expected when using modern setuptools. When using
setuptools-18.5 or earlier, however, certain operations will cause
`pkg_resources.DistributionNotFound` errors when running the entrypoint
script, which must be resolved by re-installing the package. This happens
when the install happens with one version, then the egg_info data is
regenerated while a different version is checked out. Many setup.py commands
cause egg_info to be rebuilt (including `sdist`, `wheel`, and installing into
a different virtualenv), so this can be surprising.
[Bug #83](https://github.com/python-versioneer/python-versioneer/issues/83) describes
this one, but upgrading to a newer version of setuptools should probably
resolve it.
## Updating Versioneer
To upgrade your project to a new release of Versioneer, do the following:
* install the new Versioneer (`pip install -U versioneer` or equivalent)
* edit `setup.cfg`, if necessary, to include any new configuration settings
indicated by the release notes. See [UPGRADING](./UPGRADING.md) for details.
* re-run `versioneer install` in your source tree, to replace
`SRC/_version.py`
* commit any changed files
## Future Directions
This tool is designed to make it easily extended to other version-control
systems: all VCS-specific components are in separate directories like
src/git/ . The top-level `versioneer.py` script is assembled from these
components by running make-versioneer.py . In the future, make-versioneer.py
will take a VCS name as an argument, and will construct a version of
`versioneer.py` that is specific to the given VCS. It might also take the
configuration arguments that are currently provided manually during
installation by editing setup.py . Alternatively, it might go the other
direction and include code from all supported VCS systems, reducing the
number of intermediate scripts.
## Similar projects
* [setuptools_scm](https://github.com/pypa/setuptools_scm/) - a non-vendored build-time
dependency
* [minver](https://github.com/jbweston/miniver) - a lightweight reimplementation of
versioneer
* [versioningit](https://github.com/jwodder/versioningit) - a PEP 518-based setuptools
plugin
## License
To make Versioneer easier to embed, all its code is dedicated to the public
domain. The `_version.py` that it creates is also in the public domain.
Specifically, both are released under the Creative Commons "Public Domain
Dedication" license (CC0-1.0), as described in
https://creativecommons.org/publicdomain/zero/1.0/ .
[pypi-image]: https://img.shields.io/pypi/v/versioneer.svg
[pypi-url]: https://pypi.python.org/pypi/versioneer/
[travis-image]:
https://img.shields.io/travis/com/python-versioneer/python-versioneer.svg
[travis-url]: https://travis-ci.com/github/python-versioneer/python-versioneer
"""
# pylint:disable=invalid-name,import-outside-toplevel,missing-function-docstring
# pylint:disable=missing-class-docstring,too-many-branches,too-many-statements
# pylint:disable=raise-missing-from,too-many-lines,too-many-locals,import-error
# pylint:disable=too-few-public-methods,redefined-outer-name,consider-using-with
# pylint:disable=attribute-defined-outside-init,too-many-arguments
import configparser
import errno
import json
import os
import re
import subprocess
import sys
from typing import Callable, Dict
class VersioneerConfig:
"""Container for Versioneer configuration parameters."""
def get_root():
"""Get the project root directory.
We require that all commands are run from the project root, i.e. the
directory that contains setup.py, setup.cfg, and versioneer.py .
"""
root = os.path.realpath(os.path.abspath(os.getcwd()))
setup_py = os.path.join(root, "setup.py")
versioneer_py = os.path.join(root, "versioneer.py")
if not (os.path.exists(setup_py) or os.path.exists(versioneer_py)):
# allow 'python path/to/setup.py COMMAND'
root = os.path.dirname(os.path.realpath(os.path.abspath(sys.argv[0])))
setup_py = os.path.join(root, "setup.py")
versioneer_py = os.path.join(root, "versioneer.py")
if not (os.path.exists(setup_py) or os.path.exists(versioneer_py)):
err = (
"Versioneer was unable to run the project root directory. "
"Versioneer requires setup.py to be executed from "
"its immediate directory (like 'python setup.py COMMAND'), "
"or in a way that lets it use sys.argv[0] to find the root "
"(like 'python path/to/setup.py COMMAND')."
)
raise VersioneerBadRootError(err)
try:
# Certain runtime workflows (setup.py install/develop in a setuptools
# tree) execute all dependencies in a single python process, so
# "versioneer" may be imported multiple times, and python's shared
# module-import table will cache the first one. So we can't use
# os.path.dirname(__file__), as that will find whichever
# versioneer.py was first imported, even in later projects.
my_path = os.path.realpath(os.path.abspath(__file__))
me_dir = os.path.normcase(os.path.splitext(my_path)[0])
vsr_dir = os.path.normcase(os.path.splitext(versioneer_py)[0])
if me_dir != vsr_dir:
print(
"Warning: build in %s is using versioneer.py from %s"
% (os.path.dirname(my_path), versioneer_py)
)
except NameError:
pass
return root
def get_config_from_root(root):
"""Read the project setup.cfg file to determine Versioneer config."""
# This might raise OSError (if setup.cfg is missing), or
# configparser.NoSectionError (if it lacks a [versioneer] section), or
# configparser.NoOptionError (if it lacks "VCS="). See the docstring at
# the top of versioneer.py for instructions on writing your setup.cfg .
setup_cfg = os.path.join(root, "setup.cfg")
parser = configparser.ConfigParser()
with open(setup_cfg, "r") as cfg_file:
parser.read_file(cfg_file)
VCS = parser.get("versioneer", "VCS") # mandatory
# Dict-like interface for non-mandatory entries
section = parser["versioneer"]
cfg = VersioneerConfig()
cfg.VCS = VCS
cfg.style = section.get("style", "")
cfg.versionfile_source = section.get("versionfile_source")
cfg.versionfile_build = section.get("versionfile_build")
cfg.tag_prefix = section.get("tag_prefix")
if cfg.tag_prefix in ("''", '""'):
cfg.tag_prefix = ""
cfg.parentdir_prefix = section.get("parentdir_prefix")
cfg.verbose = section.get("verbose")
return cfg
class NotThisMethod(Exception):
"""Exception raised if a method is not valid for the current scenario."""
# these dictionaries contain VCS-specific tools
LONG_VERSION_PY: Dict[str, str] = {}
HANDLERS: Dict[str, Dict[str, Callable]] = {}
def register_vcs_handler(vcs, method): # decorator
"""Create decorator to mark a method as the handler of a VCS."""
def decorate(f):
"""Store f in HANDLERS[vcs][method]."""
HANDLERS.setdefault(vcs, {})[method] = f
return f
return decorate
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False, env=None):
"""Call the given command(s)."""
assert isinstance(commands, list)
process = None
for command in commands:
try:
dispcmd = str([command] + args)
# remember shell=False, so use git.cmd on windows, not just git
process = subprocess.Popen(
[command] + args,
cwd=cwd,
env=env,
stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr else None),
)
break
except OSError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %s" % dispcmd)
print(e)
return None, None
else:
if verbose:
print("unable to find command, tried %s" % (commands,))
return None, None
stdout = process.communicate()[0].strip().decode()
if process.returncode != 0:
if verbose:
print("unable to run %s (error)" % dispcmd)
print("stdout was %s" % stdout)
return None, process.returncode
return stdout, process.returncode
LONG_VERSION_PY[
"git"
] = r'''
# This file helps to compute a version number in source trees obtained from
# git-archive tarball (such as those provided by githubs download-from-tag
# feature). Distribution tarballs (built by setup.py sdist) and build
# directories (produced by setup.py build) will contain a much shorter file
# that just contains the computed version number.
# This file is released into the public domain. Generated by
# versioneer-0.21 (https://github.com/python-versioneer/python-versioneer)
"""Git implementation of _version.py."""
import errno
import os
import re
import subprocess
import sys
from typing import Callable, Dict
def get_keywords():
"""Get the keywords needed to look up the version information."""
# these strings will be replaced by git during git-archive.
# setup.py/versioneer.py will grep for the variable names, so they must
# each be defined on a line of their own. _version.py will just call
# get_keywords().
git_refnames = "%(DOLLAR)sFormat:%%d%(DOLLAR)s"
git_full = "%(DOLLAR)sFormat:%%H%(DOLLAR)s"
git_date = "%(DOLLAR)sFormat:%%ci%(DOLLAR)s"
keywords = {"refnames": git_refnames, "full": git_full, "date": git_date}
return keywords
class VersioneerConfig:
"""Container for Versioneer configuration parameters."""
def get_config():
"""Create, populate and return the VersioneerConfig() object."""
# these strings are filled in when 'setup.py versioneer' creates
# _version.py
cfg = VersioneerConfig()
cfg.VCS = "git"
cfg.style = "%(STYLE)s"
cfg.tag_prefix = "%(TAG_PREFIX)s"
cfg.parentdir_prefix = "%(PARENTDIR_PREFIX)s"
cfg.versionfile_source = "%(VERSIONFILE_SOURCE)s"
cfg.verbose = False
return cfg
class NotThisMethod(Exception):
"""Exception raised if a method is not valid for the current scenario."""
LONG_VERSION_PY: Dict[str, str] = {}
HANDLERS: Dict[str, Dict[str, Callable]] = {}
def register_vcs_handler(vcs, method): # decorator
"""Create decorator to mark a method as the handler of a VCS."""
def decorate(f):
"""Store f in HANDLERS[vcs][method]."""
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False,
env=None):
"""Call the given command(s)."""
assert isinstance(commands, list)
process = None
for command in commands:
try:
dispcmd = str([command] + args)
# remember shell=False, so use git.cmd on windows, not just git
process = subprocess.Popen([command] + args, cwd=cwd, env=env,
stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr
else None))
break
except OSError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %%s" %% dispcmd)
print(e)
return None, None
else:
if verbose:
print("unable to find command, tried %%s" %% (commands,))
return None, None
stdout = process.communicate()[0].strip().decode()
if process.returncode != 0:
if verbose:
print("unable to run %%s (error)" %% dispcmd)
print("stdout was %%s" %% stdout)
return None, process.returncode
return stdout, process.returncode
def versions_from_parentdir(parentdir_prefix, root, verbose):
"""Try to determine the version from the parent directory name.
Source tarballs conventionally unpack into a directory that includes both
the project name and a version string. We will also support searching up
two directory levels for an appropriately named parent directory
"""
rootdirs = []
for _ in range(3):
dirname = os.path.basename(root)
if dirname.startswith(parentdir_prefix):
return {"version": dirname[len(parentdir_prefix):],
"full-revisionid": None,
"dirty": False, "error": None, "date": None}
rootdirs.append(root)
root = os.path.dirname(root) # up a level
if verbose:
print("Tried directories %%s but none started with prefix %%s" %%
(str(rootdirs), parentdir_prefix))
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
"""Extract version information from the given file."""
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
with open(versionfile_abs, "r") as fobj:
for line in fobj:
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
if line.strip().startswith("git_date ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["date"] = mo.group(1)
except OSError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
"""Get version information from git keywords."""
if "refnames" not in keywords:
raise NotThisMethod("Short version file found")
date = keywords.get("date")
if date is not None:
# Use only the last line. Previous lines may contain GPG signature
# information.
date = date.splitlines()[-1]
# git-2.2.0 added "%%cI", which expands to an ISO-8601 -compliant
# datestamp. However we prefer "%%ci" (which expands to an "ISO-8601
# -like" string, which we must then edit to make compliant), because
# it's been around since git-1.5.3, and it's too difficult to
# discover which version we're using, or to work around using an
# older one.
date = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = {r.strip() for r in refnames.strip("()").split(",")}
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = {r[len(TAG):] for r in refs if r.startswith(TAG)}
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %%d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = {r for r in refs if re.search(r'\d', r)}
if verbose:
print("discarding '%%s', no digits" %% ",".join(refs - tags))
if verbose:
print("likely tags: %%s" %% ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
# Filter out refs that exactly match prefix or that don't start
# with a number once the prefix is stripped (mostly a concern
# when prefix is '')
if not re.match(r'\d', r):
continue
if verbose:
print("picking %%s" %% r)
return {"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": None,
"date": date}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": "no suitable tags", "date": None}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, root, verbose, runner=run_command):
"""Get version from 'git describe' in the root of the source tree.
This only gets called if the git-archive 'subst' keywords were *not*
expanded, and _version.py hasn't already been rewritten with a short
version string, meaning we're inside a checked out source tree.
"""
GITS = ["git"]
TAG_PREFIX_REGEX = "*"
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
TAG_PREFIX_REGEX = r"\*"
_, rc = runner(GITS, ["rev-parse", "--git-dir"], cwd=root,
hide_stderr=True)
if rc != 0:
if verbose:
print("Directory %%s not under git control" %% root)
raise NotThisMethod("'git rev-parse --git-dir' returned error")
# if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
# if there isn't one, this yields HEX[-dirty] (no NUM)
describe_out, rc = runner(GITS, ["describe", "--tags", "--dirty",
"--always", "--long",
"--match",
"%%s%%s" %% (tag_prefix, TAG_PREFIX_REGEX)],
cwd=root)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out, rc = runner(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
branch_name, rc = runner(GITS, ["rev-parse", "--abbrev-ref", "HEAD"],
cwd=root)
# --abbrev-ref was added in git-1.6.3
if rc != 0 or branch_name is None:
raise NotThisMethod("'git rev-parse --abbrev-ref' returned error")
branch_name = branch_name.strip()
if branch_name == "HEAD":
# If we aren't exactly on a branch, pick a branch which represents
# the current commit. If all else fails, we are on a branchless
# commit.
branches, rc = runner(GITS, ["branch", "--contains"], cwd=root)
# --contains was added in git-1.5.4
if rc != 0 or branches is None:
raise NotThisMethod("'git branch --contains' returned error")
branches = branches.split("\n")
# Remove the first line if we're running detached
if "(" in branches[0]:
branches.pop(0)
# Strip off the leading "* " from the list of branches.
branches = [branch[2:] for branch in branches]
if "master" in branches:
branch_name = "master"
elif not branches:
branch_name = None
else:
# Pick the first branch that is returned. Good or bad.
branch_name = branches[0]
pieces["branch"] = branch_name
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[:git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe)
if not mo:
# unparsable. Maybe git-describe is misbehaving?
pieces["error"] = ("unable to parse git-describe output: '%%s'"
%% describe_out)
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%%s' doesn't start with prefix '%%s'"
print(fmt %% (full_tag, tag_prefix))
pieces["error"] = ("tag '%%s' doesn't start with prefix '%%s'"
%% (full_tag, tag_prefix))
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix):]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out, rc = runner(GITS, ["rev-list", "HEAD", "--count"], cwd=root)
pieces["distance"] = int(count_out) # total number of commits
# commit date: see ISO-8601 comment in git_versions_from_keywords()
date = runner(GITS, ["show", "-s", "--format=%%ci", "HEAD"], cwd=root)[0].strip()
# Use only the last line. Previous lines may contain GPG signature
# information.
date = date.splitlines()[-1]
pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
return pieces
def plus_or_dot(pieces):
"""Return a + if we don't already have one, else return a ."""
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
"""Build up version string, with post-release "local version identifier".
Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
Exceptions:
1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%%d.g%%s" %% (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%%d.g%%s" %% (pieces["distance"],
pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_branch(pieces):
"""TAG[[.dev0]+DISTANCE.gHEX[.dirty]] .
The ".dev0" means not master branch. Note that .dev0 sorts backwards
(a feature branch will appear "older" than the master branch).
Exceptions:
1: no tags. 0[.dev0]+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
if pieces["branch"] != "master":
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "%%d.g%%s" %% (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0"
if pieces["branch"] != "master":
rendered += ".dev0"
rendered += "+untagged.%%d.g%%s" %% (pieces["distance"],
pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def pep440_split_post(ver):
"""Split pep440 version string at the post-release segment.
Returns the release segments before the post-release and the
post-release version number (or -1 if no post-release segment is present).
"""
vc = str.split(ver, ".post")
return vc[0], int(vc[1] or 0) if len(vc) == 2 else None
def render_pep440_pre(pieces):
"""TAG[.postN.devDISTANCE] -- No -dirty.
Exceptions:
1: no tags. 0.post0.devDISTANCE
"""
if pieces["closest-tag"]:
if pieces["distance"]:
# update the post release segment
tag_version, post_version = pep440_split_post(pieces["closest-tag"])
rendered = tag_version
if post_version is not None:
rendered += ".post%%d.dev%%d" %% (post_version+1, pieces["distance"])
else:
rendered += ".post0.dev%%d" %% (pieces["distance"])
else:
# no commits, use the tag as the version
rendered = pieces["closest-tag"]
else:
# exception #1
rendered = "0.post0.dev%%d" %% pieces["distance"]
return rendered
def render_pep440_post(pieces):
"""TAG[.postDISTANCE[.dev0]+gHEX] .
The ".dev0" means dirty. Note that .dev0 sorts backwards
(a dirty tree will appear "older" than the corresponding clean one),
but you shouldn't be releasing software with -dirty anyways.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%%d" %% pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%%s" %% pieces["short"]
else:
# exception #1
rendered = "0.post%%d" %% pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g%%s" %% pieces["short"]
return rendered
def render_pep440_post_branch(pieces):
"""TAG[.postDISTANCE[.dev0]+gHEX[.dirty]] .
The ".dev0" means not master branch.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]+gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%%d" %% pieces["distance"]
if pieces["branch"] != "master":
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%%s" %% pieces["short"]
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0.post%%d" %% pieces["distance"]
if pieces["branch"] != "master":
rendered += ".dev0"
rendered += "+g%%s" %% pieces["short"]
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_old(pieces):
"""TAG[.postDISTANCE[.dev0]] .
The ".dev0" means dirty.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%%d" %% pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%%d" %% pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
"""TAG[-DISTANCE-gHEX][-dirty].
Like 'git describe --tags --dirty --always'.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%%d-g%%s" %% (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
"""TAG-DISTANCE-gHEX[-dirty].
Like 'git describe --tags --dirty --always -long'.
The distance/hash is unconditional.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%%d-g%%s" %% (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render(pieces, style):
"""Render the given version pieces into the requested style."""
if pieces["error"]:
return {"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"],
"date": None}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-branch":
rendered = render_pep440_branch(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-post-branch":
rendered = render_pep440_post_branch(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%%s'" %% style)
return {"version": rendered, "full-revisionid": pieces["long"],
"dirty": pieces["dirty"], "error": None,
"date": pieces.get("date")}
def get_versions():
"""Get version information or return default if unable to do so."""
# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
# __file__, we can work backwards from there to the root. Some
# py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
# case we can only use expanded keywords.
cfg = get_config()
verbose = cfg.verbose
try:
return git_versions_from_keywords(get_keywords(), cfg.tag_prefix,
verbose)
except NotThisMethod:
pass
try:
root = os.path.realpath(__file__)
# versionfile_source is the relative path from the top of the source
# tree (where the .git directory might live) to this file. Invert
# this to find the root from __file__.
for _ in cfg.versionfile_source.split('/'):
root = os.path.dirname(root)
except NameError:
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to find root of source tree",
"date": None}
try:
pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)
return render(pieces, cfg.style)
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
return versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
except NotThisMethod:
pass
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to compute version", "date": None}
'''
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
"""Extract version information from the given file."""
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
with open(versionfile_abs, "r") as fobj:
for line in fobj:
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
if line.strip().startswith("git_date ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["date"] = mo.group(1)
except OSError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
"""Get version information from git keywords."""
if "refnames" not in keywords:
raise NotThisMethod("Short version file found")
date = keywords.get("date")
if date is not None:
# Use only the last line. Previous lines may contain GPG signature
# information.
date = date.splitlines()[-1]
# git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant
# datestamp. However we prefer "%ci" (which expands to an "ISO-8601
# -like" string, which we must then edit to make compliant), because
# it's been around since git-1.5.3, and it's too difficult to
# discover which version we're using, or to work around using an
# older one.
date = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = {r.strip() for r in refnames.strip("()").split(",")}
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = {r[len(TAG) :] for r in refs if r.startswith(TAG)}
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = {r for r in refs if re.search(r"\d", r)}
if verbose:
print("discarding '%s', no digits" % ",".join(refs - tags))
if verbose:
print("likely tags: %s" % ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix) :]
# Filter out refs that exactly match prefix or that don't start
# with a number once the prefix is stripped (mostly a concern
# when prefix is '')
if not re.match(r"\d", r):
continue
if verbose:
print("picking %s" % r)
return {
"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False,
"error": None,
"date": date,
}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {
"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False,
"error": "no suitable tags",
"date": None,
}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, root, verbose, runner=run_command):
"""Get version from 'git describe' in the root of the source tree.
This only gets called if the git-archive 'subst' keywords were *not*
expanded, and _version.py hasn't already been rewritten with a short
version string, meaning we're inside a checked out source tree.
"""
GITS = ["git"]
TAG_PREFIX_REGEX = "*"
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
TAG_PREFIX_REGEX = r"\*"
_, rc = runner(GITS, ["rev-parse", "--git-dir"], cwd=root, hide_stderr=True)
if rc != 0:
if verbose:
print("Directory %s not under git control" % root)
raise NotThisMethod("'git rev-parse --git-dir' returned error")
# if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
# if there isn't one, this yields HEX[-dirty] (no NUM)
describe_out, rc = runner(
GITS,
[
"describe",
"--tags",
"--dirty",
"--always",
"--long",
"--match",
"%s%s" % (tag_prefix, TAG_PREFIX_REGEX),
],
cwd=root,
)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out, rc = runner(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
branch_name, rc = runner(GITS, ["rev-parse", "--abbrev-ref", "HEAD"], cwd=root)
# --abbrev-ref was added in git-1.6.3
if rc != 0 or branch_name is None:
raise NotThisMethod("'git rev-parse --abbrev-ref' returned error")
branch_name = branch_name.strip()
if branch_name == "HEAD":
# If we aren't exactly on a branch, pick a branch which represents
# the current commit. If all else fails, we are on a branchless
# commit.
branches, rc = runner(GITS, ["branch", "--contains"], cwd=root)
# --contains was added in git-1.5.4
if rc != 0 or branches is None:
raise NotThisMethod("'git branch --contains' returned error")
branches = branches.split("\n")
# Remove the first line if we're running detached
if "(" in branches[0]:
branches.pop(0)
# Strip off the leading "* " from the list of branches.
branches = [branch[2:] for branch in branches]
if "master" in branches:
branch_name = "master"
elif not branches:
branch_name = None
else:
# Pick the first branch that is returned. Good or bad.
branch_name = branches[0]
pieces["branch"] = branch_name
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[: git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r"^(.+)-(\d+)-g([0-9a-f]+)$", git_describe)
if not mo:
# unparsable. Maybe git-describe is misbehaving?
pieces["error"] = "unable to parse git-describe output: '%s'" % describe_out
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%s' doesn't start with prefix '%s'"
print(fmt % (full_tag, tag_prefix))
pieces["error"] = "tag '%s' doesn't start with prefix '%s'" % (
full_tag,
tag_prefix,
)
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix) :]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out, rc = runner(GITS, ["rev-list", "HEAD", "--count"], cwd=root)
pieces["distance"] = int(count_out) # total number of commits
# commit date: see ISO-8601 comment in git_versions_from_keywords()
date = runner(GITS, ["show", "-s", "--format=%ci", "HEAD"], cwd=root)[0].strip()
# Use only the last line. Previous lines may contain GPG signature
# information.
date = date.splitlines()[-1]
pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
return pieces
def do_vcs_install(manifest_in, versionfile_source, ipy):
"""Git-specific installation logic for Versioneer.
For Git, this means creating/changing .gitattributes to mark _version.py
for export-subst keyword substitution.
"""
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
files = [manifest_in, versionfile_source]
if ipy:
files.append(ipy)
try:
my_path = __file__
if my_path.endswith(".pyc") or my_path.endswith(".pyo"):
my_path = os.path.splitext(my_path)[0] + ".py"
versioneer_file = os.path.relpath(my_path)
except NameError:
versioneer_file = "versioneer.py"
files.append(versioneer_file)
present = False
try:
with open(".gitattributes", "r") as fobj:
for line in fobj:
if line.strip().startswith(versionfile_source):
if "export-subst" in line.strip().split()[1:]:
present = True
break
except OSError:
pass
if not present:
with open(".gitattributes", "a+") as fobj:
fobj.write(f"{versionfile_source} export-subst\n")
files.append(".gitattributes")
run_command(GITS, ["add", "--"] + files)
def versions_from_parentdir(parentdir_prefix, root, verbose):
"""Try to determine the version from the parent directory name.
Source tarballs conventionally unpack into a directory that includes both
the project name and a version string. We will also support searching up
two directory levels for an appropriately named parent directory
"""
rootdirs = []
for _ in range(3):
dirname = os.path.basename(root)
if dirname.startswith(parentdir_prefix):
return {
"version": dirname[len(parentdir_prefix) :],
"full-revisionid": None,
"dirty": False,
"error": None,
"date": None,
}
rootdirs.append(root)
root = os.path.dirname(root) # up a level
if verbose:
print(
"Tried directories %s but none started with prefix %s"
% (str(rootdirs), parentdir_prefix)
)
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
SHORT_VERSION_PY = """
# This file was generated by 'versioneer.py' (0.21) from
# revision-control system data, or from the parent directory name of an
# unpacked source archive. Distribution tarballs contain a pre-generated copy
# of this file.
import json
version_json = '''
%s
''' # END VERSION_JSON
def get_versions():
return json.loads(version_json)
"""
def versions_from_file(filename):
"""Try to determine the version from _version.py if present."""
try:
with open(filename) as f:
contents = f.read()
except OSError:
raise NotThisMethod("unable to read _version.py")
mo = re.search(
r"version_json = '''\n(.*)''' # END VERSION_JSON", contents, re.M | re.S
)
if not mo:
mo = re.search(
r"version_json = '''\r\n(.*)''' # END VERSION_JSON", contents, re.M | re.S
)
if not mo:
raise NotThisMethod("no version_json in _version.py")
return json.loads(mo.group(1))
def write_to_version_file(filename, versions):
"""Write the given version number to the given _version.py file."""
os.unlink(filename)
contents = json.dumps(versions, sort_keys=True, indent=1, separators=(",", ": "))
with open(filename, "w") as f:
f.write(SHORT_VERSION_PY % contents)
print("set %s to '%s'" % (filename, versions["version"]))
def plus_or_dot(pieces):
"""Return a + if we don't already have one, else return a ."""
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
"""Build up version string, with post-release "local version identifier".
Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
Exceptions:
1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_branch(pieces):
"""TAG[[.dev0]+DISTANCE.gHEX[.dirty]] .
The ".dev0" means not master branch. Note that .dev0 sorts backwards
(a feature branch will appear "older" than the master branch).
Exceptions:
1: no tags. 0[.dev0]+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
if pieces["branch"] != "master":
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0"
if pieces["branch"] != "master":
rendered += ".dev0"
rendered += "+untagged.%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def pep440_split_post(ver):
"""Split pep440 version string at the post-release segment.
Returns the release segments before the post-release and the
post-release version number (or -1 if no post-release segment is present).
"""
vc = str.split(ver, ".post")
return vc[0], int(vc[1] or 0) if len(vc) == 2 else None
def render_pep440_pre(pieces):
"""TAG[.postN.devDISTANCE] -- No -dirty.
Exceptions:
1: no tags. 0.post0.devDISTANCE
"""
if pieces["closest-tag"]:
if pieces["distance"]:
# update the post release segment
tag_version, post_version = pep440_split_post(pieces["closest-tag"])
rendered = tag_version
if post_version is not None:
rendered += ".post%d.dev%d" % (post_version + 1, pieces["distance"])
else:
rendered += ".post0.dev%d" % (pieces["distance"])
else:
# no commits, use the tag as the version
rendered = pieces["closest-tag"]
else:
# exception #1
rendered = "0.post0.dev%d" % pieces["distance"]
return rendered
def render_pep440_post(pieces):
"""TAG[.postDISTANCE[.dev0]+gHEX] .
The ".dev0" means dirty. Note that .dev0 sorts backwards
(a dirty tree will appear "older" than the corresponding clean one),
but you shouldn't be releasing software with -dirty anyways.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%s" % pieces["short"]
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g%s" % pieces["short"]
return rendered
def render_pep440_post_branch(pieces):
"""TAG[.postDISTANCE[.dev0]+gHEX[.dirty]] .
The ".dev0" means not master branch.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]+gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["branch"] != "master":
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%s" % pieces["short"]
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["branch"] != "master":
rendered += ".dev0"
rendered += "+g%s" % pieces["short"]
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_old(pieces):
"""TAG[.postDISTANCE[.dev0]] .
The ".dev0" means dirty.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
"""TAG[-DISTANCE-gHEX][-dirty].
Like 'git describe --tags --dirty --always'.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
"""TAG-DISTANCE-gHEX[-dirty].
Like 'git describe --tags --dirty --always -long'.
The distance/hash is unconditional.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render(pieces, style):
"""Render the given version pieces into the requested style."""
if pieces["error"]:
return {
"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"],
"date": None,
}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-branch":
rendered = render_pep440_branch(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-post-branch":
rendered = render_pep440_post_branch(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%s'" % style)
return {
"version": rendered,
"full-revisionid": pieces["long"],
"dirty": pieces["dirty"],
"error": None,
"date": pieces.get("date"),
}
class VersioneerBadRootError(Exception):
"""The project root directory is unknown or missing key files."""
def get_versions(verbose=False):
"""Get the project version from whatever source is available.
Returns dict with two keys: 'version' and 'full'.
"""
if "versioneer" in sys.modules:
# see the discussion in cmdclass.py:get_cmdclass()
del sys.modules["versioneer"]
root = get_root()
cfg = get_config_from_root(root)
assert cfg.VCS is not None, "please set [versioneer]VCS= in setup.cfg"
handlers = HANDLERS.get(cfg.VCS)
assert handlers, "unrecognized VCS '%s'" % cfg.VCS
verbose = verbose or cfg.verbose
assert (
cfg.versionfile_source is not None
), "please set versioneer.versionfile_source"
assert cfg.tag_prefix is not None, "please set versioneer.tag_prefix"
versionfile_abs = os.path.join(root, cfg.versionfile_source)
# extract version from first of: _version.py, VCS command (e.g. 'git
# describe'), parentdir. This is meant to work for developers using a
# source checkout, for users of a tarball created by 'setup.py sdist',
# and for users of a tarball/zipball created by 'git archive' or github's
# download-from-tag feature or the equivalent in other VCSes.
get_keywords_f = handlers.get("get_keywords")
from_keywords_f = handlers.get("keywords")
if get_keywords_f and from_keywords_f:
try:
keywords = get_keywords_f(versionfile_abs)
ver = from_keywords_f(keywords, cfg.tag_prefix, verbose)
if verbose:
print("got version from expanded keyword %s" % ver)
return ver
except NotThisMethod:
pass
try:
ver = versions_from_file(versionfile_abs)
if verbose:
print("got version from file %s %s" % (versionfile_abs, ver))
return ver
except NotThisMethod:
pass
from_vcs_f = handlers.get("pieces_from_vcs")
if from_vcs_f:
try:
pieces = from_vcs_f(cfg.tag_prefix, root, verbose)
ver = render(pieces, cfg.style)
if verbose:
print("got version from VCS %s" % ver)
return ver
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
ver = versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
if verbose:
print("got version from parentdir %s" % ver)
return ver
except NotThisMethod:
pass
if verbose:
print("unable to compute version")
return {
"version": "0+unknown",
"full-revisionid": None,
"dirty": None,
"error": "unable to compute version",
"date": None,
}
def get_version():
"""Get the short version string for this project."""
return get_versions()["version"]
def get_cmdclass(cmdclass=None):
"""Get the custom setuptools/distutils subclasses used by Versioneer.
If the package uses a different cmdclass (e.g. one from numpy), it
should be provide as an argument.
"""
if "versioneer" in sys.modules:
del sys.modules["versioneer"]
# this fixes the "python setup.py develop" case (also 'install' and
# 'easy_install .'), in which subdependencies of the main project are
# built (using setup.py bdist_egg) in the same python process. Assume
# a main project A and a dependency B, which use different versions
# of Versioneer. A's setup.py imports A's Versioneer, leaving it in
# sys.modules by the time B's setup.py is executed, causing B to run
# with the wrong versioneer. Setuptools wraps the sub-dep builds in a
# sandbox that restores sys.modules to it's pre-build state, so the
# parent is protected against the child's "import versioneer". By
# removing ourselves from sys.modules here, before the child build
# happens, we protect the child from the parent's versioneer too.
# Also see https://github.com/python-versioneer/python-versioneer/issues/52
cmds = {} if cmdclass is None else cmdclass.copy()
# we add "version" to both distutils and setuptools
from distutils.core import Command
class cmd_version(Command):
description = "report generated version string"
user_options = []
boolean_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
vers = get_versions(verbose=True)
print("Version: %s" % vers["version"])
print(" full-revisionid: %s" % vers.get("full-revisionid"))
print(" dirty: %s" % vers.get("dirty"))
print(" date: %s" % vers.get("date"))
if vers["error"]:
print(" error: %s" % vers["error"])
cmds["version"] = cmd_version
# we override "build_py" in both distutils and setuptools
#
# most invocation pathways end up running build_py:
# distutils/build -> build_py
# distutils/install -> distutils/build ->..
# setuptools/bdist_wheel -> distutils/install ->..
# setuptools/bdist_egg -> distutils/install_lib -> build_py
# setuptools/install -> bdist_egg ->..
# setuptools/develop -> ?
# pip install:
# copies source tree to a tempdir before running egg_info/etc
# if .git isn't copied too, 'git describe' will fail
# then does setup.py bdist_wheel, or sometimes setup.py install
# setup.py egg_info -> ?
# we override different "build_py" commands for both environments
if "build_py" in cmds:
_build_py = cmds["build_py"]
elif "setuptools" in sys.modules:
from setuptools.command.build_py import build_py as _build_py
else:
from distutils.command.build_py import build_py as _build_py
class cmd_build_py(_build_py):
def run(self):
root = get_root()
cfg = get_config_from_root(root)
versions = get_versions()
_build_py.run(self)
# now locate _version.py in the new build/ directory and replace
# it with an updated value
if cfg.versionfile_build:
target_versionfile = os.path.join(self.build_lib, cfg.versionfile_build)
print("UPDATING %s" % target_versionfile)
write_to_version_file(target_versionfile, versions)
cmds["build_py"] = cmd_build_py
if "build_ext" in cmds:
_build_ext = cmds["build_ext"]
elif "setuptools" in sys.modules:
from setuptools.command.build_ext import build_ext as _build_ext
else:
from distutils.command.build_ext import build_ext as _build_ext
class cmd_build_ext(_build_ext):
def run(self):
root = get_root()
cfg = get_config_from_root(root)
versions = get_versions()
_build_ext.run(self)
if self.inplace:
# build_ext --inplace will only build extensions in
# build/lib<..> dir with no _version.py to write to.
# As in place builds will already have a _version.py
# in the module dir, we do not need to write one.
return
# now locate _version.py in the new build/ directory and replace
# it with an updated value
target_versionfile = os.path.join(self.build_lib, cfg.versionfile_build)
print("UPDATING %s" % target_versionfile)
write_to_version_file(target_versionfile, versions)
cmds["build_ext"] = cmd_build_ext
if "cx_Freeze" in sys.modules: # cx_freeze enabled?
from cx_Freeze.dist import build_exe as _build_exe
# nczeczulin reports that py2exe won't like the pep440-style string
# as FILEVERSION, but it can be used for PRODUCTVERSION, e.g.
# setup(console=[{
# "version": versioneer.get_version().split("+", 1)[0], # FILEVERSION
# "product_version": versioneer.get_version(),
# ...
class cmd_build_exe(_build_exe):
def run(self):
root = get_root()
cfg = get_config_from_root(root)
versions = get_versions()
target_versionfile = cfg.versionfile_source
print("UPDATING %s" % target_versionfile)
write_to_version_file(target_versionfile, versions)
_build_exe.run(self)
os.unlink(target_versionfile)
with open(cfg.versionfile_source, "w") as f:
LONG = LONG_VERSION_PY[cfg.VCS]
f.write(
LONG
% {
"DOLLAR": "$",
"STYLE": cfg.style,
"TAG_PREFIX": cfg.tag_prefix,
"PARENTDIR_PREFIX": cfg.parentdir_prefix,
"VERSIONFILE_SOURCE": cfg.versionfile_source,
}
)
cmds["build_exe"] = cmd_build_exe
del cmds["build_py"]
if "py2exe" in sys.modules: # py2exe enabled?
from py2exe.distutils_buildexe import py2exe as _py2exe
class cmd_py2exe(_py2exe):
def run(self):
root = get_root()
cfg = get_config_from_root(root)
versions = get_versions()
target_versionfile = cfg.versionfile_source
print("UPDATING %s" % target_versionfile)
write_to_version_file(target_versionfile, versions)
_py2exe.run(self)
os.unlink(target_versionfile)
with open(cfg.versionfile_source, "w") as f:
LONG = LONG_VERSION_PY[cfg.VCS]
f.write(
LONG
% {
"DOLLAR": "$",
"STYLE": cfg.style,
"TAG_PREFIX": cfg.tag_prefix,
"PARENTDIR_PREFIX": cfg.parentdir_prefix,
"VERSIONFILE_SOURCE": cfg.versionfile_source,
}
)
cmds["py2exe"] = cmd_py2exe
# we override different "sdist" commands for both environments
if "sdist" in cmds:
_sdist = cmds["sdist"]
elif "setuptools" in sys.modules:
from setuptools.command.sdist import sdist as _sdist
else:
from distutils.command.sdist import sdist as _sdist
class cmd_sdist(_sdist):
def run(self):
versions = get_versions()
self._versioneer_generated_versions = versions
# unless we update this, the command will keep using the old
# version
self.distribution.metadata.version = versions["version"]
return _sdist.run(self)
def make_release_tree(self, base_dir, files):
root = get_root()
cfg = get_config_from_root(root)
_sdist.make_release_tree(self, base_dir, files)
# now locate _version.py in the new base_dir directory
# (remembering that it may be a hardlink) and replace it with an
# updated value
target_versionfile = os.path.join(base_dir, cfg.versionfile_source)
print("UPDATING %s" % target_versionfile)
write_to_version_file(
target_versionfile, self._versioneer_generated_versions
)
cmds["sdist"] = cmd_sdist
return cmds
CONFIG_ERROR = """
setup.cfg is missing the necessary Versioneer configuration. You need
a section like:
[versioneer]
VCS = git
style = pep440
versionfile_source = src/myproject/_version.py
versionfile_build = myproject/_version.py
tag_prefix =
parentdir_prefix = myproject-
You will also need to edit your setup.py to use the results:
import versioneer
setup(version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(), ...)
Please read the docstring in ./versioneer.py for configuration instructions,
edit setup.cfg, and re-run the installer or 'python versioneer.py setup'.
"""
SAMPLE_CONFIG = """
# See the docstring in versioneer.py for instructions. Note that you must
# re-run 'versioneer.py setup' after changing this section, and commit the
# resulting files.
[versioneer]
#VCS = git
#style = pep440
#versionfile_source =
#versionfile_build =
#tag_prefix =
#parentdir_prefix =
"""
OLD_SNIPPET = """
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
"""
INIT_PY_SNIPPET = """
from . import {0}
__version__ = {0}.get_versions()['version']
"""
def do_setup():
"""Do main VCS-independent setup function for installing Versioneer."""
root = get_root()
try:
cfg = get_config_from_root(root)
except (OSError, configparser.NoSectionError, configparser.NoOptionError) as e:
if isinstance(e, (OSError, configparser.NoSectionError)):
print("Adding sample versioneer config to setup.cfg", file=sys.stderr)
with open(os.path.join(root, "setup.cfg"), "a") as f:
f.write(SAMPLE_CONFIG)
print(CONFIG_ERROR, file=sys.stderr)
return 1
print(" creating %s" % cfg.versionfile_source)
with open(cfg.versionfile_source, "w") as f:
LONG = LONG_VERSION_PY[cfg.VCS]
f.write(
LONG
% {
"DOLLAR": "$",
"STYLE": cfg.style,
"TAG_PREFIX": cfg.tag_prefix,
"PARENTDIR_PREFIX": cfg.parentdir_prefix,
"VERSIONFILE_SOURCE": cfg.versionfile_source,
}
)
ipy = os.path.join(os.path.dirname(cfg.versionfile_source), "__init__.py")
if os.path.exists(ipy):
try:
with open(ipy, "r") as f:
old = f.read()
except OSError:
old = ""
module = os.path.splitext(os.path.basename(cfg.versionfile_source))[0]
snippet = INIT_PY_SNIPPET.format(module)
if OLD_SNIPPET in old:
print(" replacing boilerplate in %s" % ipy)
with open(ipy, "w") as f:
f.write(old.replace(OLD_SNIPPET, snippet))
elif snippet not in old:
print(" appending to %s" % ipy)
with open(ipy, "a") as f:
f.write(snippet)
else:
print(" %s unmodified" % ipy)
else:
print(" %s doesn't exist, ok" % ipy)
ipy = None
# Make sure both the top-level "versioneer.py" and versionfile_source
# (PKG/_version.py, used by runtime code) are in MANIFEST.in, so
# they'll be copied into source distributions. Pip won't be able to
# install the package without this.
manifest_in = os.path.join(root, "MANIFEST.in")
simple_includes = set()
try:
with open(manifest_in, "r") as f:
for line in f:
if line.startswith("include "):
for include in line.split()[1:]:
simple_includes.add(include)
except OSError:
pass
# That doesn't cover everything MANIFEST.in can do
# (http://docs.python.org/2/distutils/sourcedist.html#commands), so
# it might give some false negatives. Appending redundant 'include'
# lines is safe, though.
if "versioneer.py" not in simple_includes:
print(" appending 'versioneer.py' to MANIFEST.in")
with open(manifest_in, "a") as f:
f.write("include versioneer.py\n")
else:
print(" 'versioneer.py' already in MANIFEST.in")
if cfg.versionfile_source not in simple_includes:
print(
" appending versionfile_source ('%s') to MANIFEST.in"
% cfg.versionfile_source
)
with open(manifest_in, "a") as f:
f.write("include %s\n" % cfg.versionfile_source)
else:
print(" versionfile_source already in MANIFEST.in")
# Make VCS-specific changes. For git, this means creating/changing
# .gitattributes to mark _version.py for export-subst keyword
# substitution.
do_vcs_install(manifest_in, cfg.versionfile_source, ipy)
return 0
def scan_setup_py():
"""Validate the contents of setup.py against Versioneer's expectations."""
found = set()
setters = False
errors = 0
with open("setup.py", "r") as f:
for line in f.readlines():
if "import versioneer" in line:
found.add("import")
if "versioneer.get_cmdclass()" in line:
found.add("cmdclass")
if "versioneer.get_version()" in line:
found.add("get_version")
if "versioneer.VCS" in line:
setters = True
if "versioneer.versionfile_source" in line:
setters = True
if len(found) != 3:
print("")
print("Your setup.py appears to be missing some important items")
print("(but I might be wrong). Please make sure it has something")
print("roughly like the following:")
print("")
print(" import versioneer")
print(" setup( version=versioneer.get_version(),")
print(" cmdclass=versioneer.get_cmdclass(), ...)")
print("")
errors += 1
if setters:
print("You should remove lines like 'versioneer.VCS = ' and")
print("'versioneer.versionfile_source = ' . This configuration")
print("now lives in setup.cfg, and should be removed from setup.py")
print("")
errors += 1
return errors
if __name__ == "__main__":
cmd = sys.argv[1]
if cmd == "setup":
errors = do_setup()
errors += scan_setup_py()
if errors:
sys.exit(1)
| geopandas/geopandas | versioneer.py | Python | bsd-3-clause | 80,049 | [
"Brian"
] | af355ac4100f79018973351a91141dbf00931ef07e29e633404ec033d8f5c89b |
#@+leo-ver=5-thin
#@+node:lee.20141224110313.46: * @file wsgi.py
#@@language python
#@@tabwidth -4
#@+<<decorations>>
#@+node:lee.20141215164031.47: ** <<decorations>>
import cherrypy
import os
from symbol import *
import random
from mako.lookup import TemplateLookup
#@-<<decorations>>
#@+others
#@+node:lee.20141215164031.48: ** folder setting
_curdir = os.path.join(os.getcwd(), os.path.dirname(__file__))
if 'OPENSHIFT_REPO_DIR' in os.environ.keys():
# 表示程式在雲端執行
data_dir = os.environ['OPENSHIFT_DATA_DIR']
tmp_dir = data_dir + 'tmp'
templates_dir = os.environ['OPENSHIFT_REPO_DIR'] + 'templates'
static_dir = os.environ['OPENSHIFT_REPO_DIR'] + 'static'
std_dir = os.environ['OPENSHIFT_REPO_DIR'] + 'std/'
else:
# 表示程式在近端執行
data_dir = _curdir + "/local_data/"
templates_dir = _curdir + "/templates"
tmp_dir = data_dir + '/tmp'
static_dir = _curdir + '/static'
std_dir = _curdir + '/std/'
env = TemplateLookup(directories=[templates_dir], input_encoding = 'utf-8', output_encoding = 'utf-8', )
if not os.path.exists(data_dir):
os.makedirs(data_dir)
if not os.path.exists(tmp_dir):
os.makedirs(tmp_dir)
#@+node:lee.20141221203113.57: ** student setting
std_class = 'a'
std_list = None
ta_mode = False
ta_list = None
if not os.path.exists(tmp_dir):
os.makedirs(tmp_dir)
#@+node:lee.20141221203113.57: ** student setting
std_class = 'a'
std_list = None
ta_mode = True
ta_list = None
if std_class == 'a':
std_list = [["403231{0:02d}".format(s), "active"] for s in range(1, 58)]
else:
std_list = [['40031226', 'active'], ['40223216', 'active']] + [["403232{0:02d}".format(s), "active"] for s in range(1, 57)]
if ta_mode:
ta_list = [('example', 'active'), ('example1','active'), ('example2','active')]
#@+node:lee.20141215164031.50: ** class Final
class Final(object):
#@+others
#@+node:lee.20141215164031.51: *3* _cp_config
_cp_config = {
'tools.encode.encoding': 'utf-8',
'tools.sessions.on' : True,
'tools.sessions.storage_type' : 'file',
'tools.sessions.locking' : 'early',
'tools.sessions.storage_path' : tmp_dir,
'tools.sessions.timeout' : 60,
}
#@+node:lee.20141215164031.52: *3* def index
@cherrypy.expose
def index(self):
# get template
tmpl = env.get_template('index.html')
# student list 40323101 - 40323157
# use 40323100 to demonstrate example
if ta_mode:
content_list = std_list + ta_list
else:
content_list = std_list
return tmpl.render(title='index', students=content_list)
#@-others
#@+node:lee.20141215164031.86: ** def error_page_404
# handle page 404
def error_page_404(status, message, traceback, version):
tmpl = env.get_template('404.html')
return tmpl.render(title='404')
cherrypy.config.update({'error_page.404': error_page_404})
#@+node:lee.20141221203113.43: ** import std module to root
root = Final()
import imp
# import all std module, if not import success, pass
# use student numbert to be sub path
# e.g. 127.0.0.1/40323100/
# if visitor visit not exsit page, raise 404
# 40323100 - 57, 40323100 is an example page.
for n, (std, status) in enumerate(std_list):
try:
mod = imp.load_source(std, std_dir + std_class + '%s.py' % std)
setattr(root, std, mod.Application())
except:
std_list[n][1] = 'inactive'
# import ta module
if ta_mode:
for n, (std, status) in enumerate(ta_list):
try:
mod = imp.load_source(std, std_dir + '%s.py' % std)
setattr(root, std, mod.Application())
except:
ta_list[n][1] = 'inactive'
#@+node:lee.20141221203113.44: ** application_conf
# set up app conf
application_conf = {
'/static': {
'tools.staticdir.on': True,
'tools.staticdir.dir': static_dir
},
}
#@+node:lee.20141215164031.60: ** run env
if 'OPENSHIFT_REPO_DIR' in os.environ.keys():
# 在 openshift
application = cherrypy.Application(root, config = application_conf)
else:
# 在其他環境下執行
cherrypy.quickstart(root, config = application_conf)
#@-others
#@-leo
| dora40323106/2014cpa_final_project | wsgi.py | Python | gpl-3.0 | 4,218 | [
"VisIt"
] | 5f7e6d45be461ce5b564f8eb50329b8392e8a5dd6bf7e1ea9dcf1ad8920fec43 |
'''
This code is used only for calibration and testing to ensure the neuron models implemented using BRIAN simulator
have the same dynamics as those implemented by Yamazaki and Nagao (2012), which we base our model off of
'''
from pylab import *
class YamazakiNeuron(object):
def __init__(self, Vth, Cm, El, Eex, Einh, Eahp, gl, g_ex_, g_inh_,
g_ahp_, r_ex, r_inh, tau_ex, tau_inh, tau_ahp, I_spont,
dt):
# scalar values
self.Vth, self.Cm, self.El, self.Eex = Vth, Cm, El, Eex
self.Einh, self.Eahp, self.gl, self.g_ex_ = Einh, Eahp, gl, g_ex_
self.g_inh_, self.g_ahp_ = g_inh_, g_ahp_
self.I_spont = I_spont
# vector values
self.r_ex = r_ex
self.r_inh = r_inh
self.tau_ex = tau_ex
self.tau_inh = tau_inh
self.tau_ahp = tau_ahp
# decay
# Yamazaki and Nagao implement the decay as exp(-dt/tau)
# this is a numerical approximation to how BRIAN performs
# decays with differential equations. Using (1-dt/tau)
# exactly matches the results from BRIAN (since this is a
# Taylor series approximation to exp(-dt/tau). If this isn't
# used then the time constants have to be rescaled to get
# Yamazaki and BRIAN to match up.
if tau_ex is not None:
self.decay_ex = exp(-dt/tau_ex)#1-dt/tau_ex #
else:
self.decay_ex = 0.
if tau_inh is not None:
self.decay_inh = exp(-dt/tau_inh)#1-dt/tau_inh #
else:
self.decay_inh = 0.
self.decay_ahp = exp(-dt/tau_ahp)#1-dt/tau_ahp #
# psp
self.psp_ex = zeros_like(r_ex)
self.psp_inh = zeros_like(r_inh)
self.g_ex = 0.
self.g_inh = 0.
self.g_ahp = 0.
self.u = El
self.dt = dt
self.just_spiked = False
if self.I_spont > 0.:
self.update_u()
def update_u(self):
dudt = (1./self.Cm)*(-self.gl * (self.u-self.El)
-self.g_ex_ * self.g_ex * (self.u-self.Eex)
-self.g_inh_ * self.g_inh * (self.u-self.Einh)
-self.g_ahp_ * self.g_ahp * (self.u-self.Eahp)
+self.I_spont
)
self.u += dudt*self.dt
def update_psp(self, ex_spike, inh_spike, w_ex, w_inh):
self.psp_ex = self.psp_ex * self.decay_ex + ex_spike
self.psp_inh = self.psp_inh * self.decay_inh + inh_spike
self.g_ex = w_ex * dot(self.psp_ex.T, self.r_ex)
self.g_inh = w_inh * dot(self.psp_inh.T, self.r_inh)
def update(self, ex_spike, inh_spike, w_ex, w_inh, reset_V=True):
self.update_psp(ex_spike, inh_spike, w_ex, w_inh)
if self.just_spiked:
self.g_ahp = 1.0
self.just_spiked = False
else:
self.g_ahp *= self.decay_ahp
self.update_u()
if self.u >= self.Vth:
if reset_V:
self.u = self.El
self.just_spiked = True
return 1.
else:
return 0.
| blennon/MLI_PKJ_net | MLI_PKJ_net/yamazaki_neuron.py | Python | mit | 3,270 | [
"Brian",
"NEURON"
] | a0a6434a581f21fa000d71e848475b2cabe42dec05b14247845787efd8792ba0 |
from typing import Any, DefaultDict, Dict, List, Set, Tuple, TypeVar, Text, \
Union, Optional, Sequence, AbstractSet, Pattern, AnyStr, Callable, Iterable
from typing.re import Match
from zerver.lib.str_utils import NonBinaryStr
from django.db import models
from django.db.models.query import QuerySet
from django.db.models import Manager, CASCADE
from django.conf import settings
from django.contrib.auth.models import AbstractBaseUser, UserManager, \
PermissionsMixin
import django.contrib.auth
from django.core.exceptions import ValidationError
from django.core.validators import URLValidator, MinLengthValidator, \
RegexValidator
from django.dispatch import receiver
from zerver.lib.cache import cache_with_key, flush_user_profile, flush_realm, \
user_profile_by_api_key_cache_key, \
user_profile_by_id_cache_key, user_profile_by_email_cache_key, \
user_profile_cache_key, generic_bulk_cached_fetch, cache_set, flush_stream, \
display_recipient_cache_key, cache_delete, active_user_ids_cache_key, \
get_stream_cache_key, active_user_dicts_in_realm_cache_key, \
bot_dicts_in_realm_cache_key, active_user_dict_fields, \
bot_dict_fields, flush_message, bot_profile_cache_key
from zerver.lib.utils import make_safe_digest, generate_random_token
from zerver.lib.str_utils import ModelReprMixin
from django.db import transaction
from django.utils.timezone import now as timezone_now
from django.contrib.sessions.models import Session
from zerver.lib.timestamp import datetime_to_timestamp
from django.db.models.signals import pre_save, post_save, post_delete
from django.utils.translation import ugettext_lazy as _
from zerver.lib import cache
from zerver.lib.validator import check_int, check_float, check_string, \
check_short_string
from django.utils.encoding import force_text
from bitfield import BitField
from bitfield.types import BitHandler
from collections import defaultdict
from datetime import timedelta
import pylibmc
import re
import logging
import sre_constants
import time
import datetime
import sys
MAX_SUBJECT_LENGTH = 60
MAX_MESSAGE_LENGTH = 10000
MAX_LANGUAGE_ID_LENGTH = 50 # type: int
STREAM_NAMES = TypeVar('STREAM_NAMES', Sequence[Text], AbstractSet[Text])
def query_for_ids(query, user_ids, field):
# type: (QuerySet, List[int], str) -> QuerySet
'''
This function optimizes searches of the form
`user_profile_id in (1, 2, 3, 4)` by quickly
building the where clauses. Profiling shows significant
speedups over the normal Django-based approach.
Use this very carefully! Also, the caller should
guard against empty lists of user_ids.
'''
assert(user_ids)
value_list = ', '.join(str(int(user_id)) for user_id in user_ids)
clause = '%s in (%s)' % (field, value_list)
query = query.extra(
where=[clause]
)
return query
# Doing 1000 remote cache requests to get_display_recipient is quite slow,
# so add a local cache as well as the remote cache cache.
per_request_display_recipient_cache = {} # type: Dict[int, List[Dict[str, Any]]]
def get_display_recipient_by_id(recipient_id, recipient_type, recipient_type_id):
# type: (int, int, Optional[int]) -> Union[Text, List[Dict[str, Any]]]
"""
returns: an object describing the recipient (using a cache).
If the type is a stream, the type_id must be an int; a string is returned.
Otherwise, type_id may be None; an array of recipient dicts is returned.
"""
if recipient_id not in per_request_display_recipient_cache:
result = get_display_recipient_remote_cache(recipient_id, recipient_type, recipient_type_id)
per_request_display_recipient_cache[recipient_id] = result
return per_request_display_recipient_cache[recipient_id]
def get_display_recipient(recipient):
# type: (Recipient) -> Union[Text, List[Dict[str, Any]]]
return get_display_recipient_by_id(
recipient.id,
recipient.type,
recipient.type_id
)
def flush_per_request_caches():
# type: () -> None
global per_request_display_recipient_cache
per_request_display_recipient_cache = {}
global per_request_realm_filters_cache
per_request_realm_filters_cache = {}
@cache_with_key(lambda *args: display_recipient_cache_key(args[0]),
timeout=3600*24*7)
def get_display_recipient_remote_cache(recipient_id, recipient_type, recipient_type_id):
# type: (int, int, Optional[int]) -> Union[Text, List[Dict[str, Any]]]
"""
returns: an appropriate object describing the recipient. For a
stream this will be the stream name as a string. For a huddle or
personal, it will be an array of dicts about each recipient.
"""
if recipient_type == Recipient.STREAM:
assert recipient_type_id is not None
stream = Stream.objects.get(id=recipient_type_id)
return stream.name
# The main priority for ordering here is being deterministic.
# Right now, we order by ID, which matches the ordering of user
# names in the left sidebar.
user_profile_list = (UserProfile.objects.filter(subscription__recipient_id=recipient_id)
.select_related()
.order_by('id'))
return [{'email': user_profile.email,
'full_name': user_profile.full_name,
'short_name': user_profile.short_name,
'id': user_profile.id,
'is_mirror_dummy': user_profile.is_mirror_dummy} for user_profile in user_profile_list]
def get_realm_emoji_cache_key(realm):
# type: (Realm) -> Text
return u'realm_emoji:%s' % (realm.id,)
class Realm(ModelReprMixin, models.Model):
MAX_REALM_NAME_LENGTH = 40
MAX_REALM_SUBDOMAIN_LENGTH = 40
AUTHENTICATION_FLAGS = [u'Google', u'Email', u'GitHub', u'LDAP', u'Dev', u'RemoteUser']
name = models.CharField(max_length=MAX_REALM_NAME_LENGTH, null=True) # type: Optional[Text]
string_id = models.CharField(max_length=MAX_REALM_SUBDOMAIN_LENGTH, unique=True) # type: Text
restricted_to_domain = models.BooleanField(default=False) # type: bool
invite_required = models.BooleanField(default=True) # type: bool
invite_by_admins_only = models.BooleanField(default=False) # type: bool
inline_image_preview = models.BooleanField(default=True) # type: bool
inline_url_embed_preview = models.BooleanField(default=True) # type: bool
create_stream_by_admins_only = models.BooleanField(default=False) # type: bool
add_emoji_by_admins_only = models.BooleanField(default=False) # type: bool
mandatory_topics = models.BooleanField(default=False) # type: bool
show_digest_email = models.BooleanField(default=True) # type: bool
name_changes_disabled = models.BooleanField(default=False) # type: bool
email_changes_disabled = models.BooleanField(default=False) # type: bool
description = models.TextField(null=True) # type: Optional[Text]
allow_message_editing = models.BooleanField(default=True) # type: bool
DEFAULT_MESSAGE_CONTENT_EDIT_LIMIT_SECONDS = 600 # if changed, also change in admin.js
message_content_edit_limit_seconds = models.IntegerField(default=DEFAULT_MESSAGE_CONTENT_EDIT_LIMIT_SECONDS) # type: int
message_retention_days = models.IntegerField(null=True) # type: Optional[int]
allow_edit_history = models.BooleanField(default=True) # type: bool
# Valid org_types are {CORPORATE, COMMUNITY}
CORPORATE = 1
COMMUNITY = 2
org_type = models.PositiveSmallIntegerField(default=CORPORATE) # type: int
date_created = models.DateTimeField(default=timezone_now) # type: datetime.datetime
notifications_stream = models.ForeignKey('Stream', related_name='+', null=True, blank=True, on_delete=CASCADE) # type: Optional[Stream]
deactivated = models.BooleanField(default=False) # type: bool
default_language = models.CharField(default=u'en', max_length=MAX_LANGUAGE_ID_LENGTH) # type: Text
authentication_methods = BitField(flags=AUTHENTICATION_FLAGS,
default=2**31 - 1) # type: BitHandler
waiting_period_threshold = models.PositiveIntegerField(default=0) # type: int
# Define the types of the various automatically managed properties
property_types = dict(
add_emoji_by_admins_only=bool,
allow_edit_history=bool,
create_stream_by_admins_only=bool,
default_language=Text,
description=Text,
email_changes_disabled=bool,
invite_required=bool,
invite_by_admins_only=bool,
inline_image_preview=bool,
inline_url_embed_preview=bool,
mandatory_topics=bool,
message_retention_days=(int, type(None)),
name=Text,
name_changes_disabled=bool,
restricted_to_domain=bool,
waiting_period_threshold=int,
) # type: Dict[str, Union[type, Tuple[type, ...]]]
ICON_FROM_GRAVATAR = u'G'
ICON_UPLOADED = u'U'
ICON_SOURCES = (
(ICON_FROM_GRAVATAR, 'Hosted by Gravatar'),
(ICON_UPLOADED, 'Uploaded by administrator'),
)
icon_source = models.CharField(default=ICON_FROM_GRAVATAR, choices=ICON_SOURCES,
max_length=1) # type: Text
icon_version = models.PositiveSmallIntegerField(default=1) # type: int
DEFAULT_NOTIFICATION_STREAM_NAME = u'announce'
def authentication_methods_dict(self):
# type: () -> Dict[Text, bool]
"""Returns the a mapping from authentication flags to their status,
showing only those authentication flags that are supported on
the current server (i.e. if EmailAuthBackend is not configured
on the server, this will not return an entry for "Email")."""
# This mapping needs to be imported from here due to the cyclic
# dependency.
from zproject.backends import AUTH_BACKEND_NAME_MAP
ret = {} # type: Dict[Text, bool]
supported_backends = {backend.__class__ for backend in django.contrib.auth.get_backends()}
for k, v in self.authentication_methods.iteritems():
backend = AUTH_BACKEND_NAME_MAP[k]
if backend in supported_backends:
ret[k] = v
return ret
def __unicode__(self):
# type: () -> Text
return u"<Realm: %s %s>" % (self.string_id, self.id)
@cache_with_key(get_realm_emoji_cache_key, timeout=3600*24*7)
def get_emoji(self):
# type: () -> Dict[Text, Optional[Dict[str, Iterable[Text]]]]
return get_realm_emoji_uncached(self)
def get_admin_users(self):
# type: () -> Sequence[UserProfile]
# TODO: Change return type to QuerySet[UserProfile]
return UserProfile.objects.filter(realm=self, is_realm_admin=True,
is_active=True).select_related()
def get_active_users(self):
# type: () -> Sequence[UserProfile]
# TODO: Change return type to QuerySet[UserProfile]
return UserProfile.objects.filter(realm=self, is_active=True).select_related()
def get_bot_domain(self):
# type: () -> str
# Remove the port. Mainly needed for development environment.
external_host = settings.EXTERNAL_HOST.split(':')[0]
if self.subdomain not in [None, ""]:
return "%s.%s" % (self.string_id, external_host)
return external_host
def get_notifications_stream(self):
# type: () -> Optional[Realm]
if self.notifications_stream is not None and not self.notifications_stream.deactivated:
return self.notifications_stream
return None
@property
def subdomain(self):
# type: () -> Optional[Text]
if settings.REALMS_HAVE_SUBDOMAINS:
return self.string_id
return None
@property
def uri(self):
# type: () -> str
if self.subdomain not in [None, ""]:
return '%s%s.%s' % (settings.EXTERNAL_URI_SCHEME,
self.subdomain, settings.EXTERNAL_HOST)
return settings.ROOT_DOMAIN_URI
@property
def host(self):
# type: () -> str
if self.subdomain not in [None, ""]:
return "%s.%s" % (self.subdomain, settings.EXTERNAL_HOST)
return settings.EXTERNAL_HOST
@property
def is_zephyr_mirror_realm(self):
# type: () -> bool
return self.string_id == "zephyr"
@property
def webathena_enabled(self):
# type: () -> bool
return self.is_zephyr_mirror_realm
@property
def presence_disabled(self):
# type: () -> bool
return self.is_zephyr_mirror_realm
class Meta(object):
permissions = (
('administer', "Administer a realm"),
('api_super_user', "Can send messages as other users for mirroring"),
)
post_save.connect(flush_realm, sender=Realm)
def get_realm(string_id):
# type: (Text) -> Realm
return Realm.objects.filter(string_id=string_id).first()
def completely_open(realm):
# type: (Optional[Realm]) -> bool
# This realm is completely open to everyone on the internet to
# join. E-mail addresses do not need to match a realmdomain and
# an invite from an existing user is not required.
if realm is None:
return False
return not realm.invite_required and not realm.restricted_to_domain
def get_unique_non_system_realm():
# type: () -> Optional[Realm]
realms = Realm.objects.filter(deactivated=False)
# On production installations, the (usually "zulip.com") system
# realm is an empty realm just used for system bots, so don't
# include it in this accounting.
realms = realms.exclude(string_id__in=settings.SYSTEM_ONLY_REALMS)
if len(realms) != 1:
return None
return realms[0]
def get_unique_open_realm():
# type: () -> Optional[Realm]
"""We only return a realm if there is a unique non-system-only realm,
it is completely open, and there are no subdomains."""
if settings.REALMS_HAVE_SUBDOMAINS:
return None
realm = get_unique_non_system_realm()
if realm is None:
return None
if realm.invite_required or realm.restricted_to_domain:
return None
return realm
def name_changes_disabled(realm):
# type: (Optional[Realm]) -> bool
if realm is None:
return settings.NAME_CHANGES_DISABLED
return settings.NAME_CHANGES_DISABLED or realm.name_changes_disabled
class RealmDomain(models.Model):
realm = models.ForeignKey(Realm, on_delete=CASCADE) # type: Realm
# should always be stored lowercase
domain = models.CharField(max_length=80, db_index=True) # type: Text
allow_subdomains = models.BooleanField(default=False)
class Meta(object):
unique_together = ("realm", "domain")
def can_add_realm_domain(domain):
# type: (Text) -> bool
if settings.REALMS_HAVE_SUBDOMAINS:
return True
if RealmDomain.objects.filter(domain=domain).exists():
return False
return True
# These functions should only be used on email addresses that have
# been validated via django.core.validators.validate_email
#
# Note that we need to use some care, since can you have multiple @-signs; e.g.
# "tabbott@test"@zulip.com
# is valid email address
def email_to_username(email):
# type: (Text) -> Text
return "@".join(email.split("@")[:-1]).lower()
# Returns the raw domain portion of the desired email address
def email_to_domain(email):
# type: (Text) -> Text
return email.split("@")[-1].lower()
class GetRealmByDomainException(Exception):
pass
def get_realm_by_email_domain(email):
# type: (Text) -> Optional[Realm]
if settings.REALMS_HAVE_SUBDOMAINS:
raise GetRealmByDomainException(
"Cannot get realm from email domain when settings.REALMS_HAVE_SUBDOMAINS = True")
domain = email_to_domain(email)
query = RealmDomain.objects.select_related('realm')
# Search for the longest match. If found return immediately. Since in case of
# settings.REALMS_HAVE_SUBDOMAINS=True, we have a unique mapping between the
# realm and domain so don't worry about `allow_subdomains` being True or False.
realm_domain = query.filter(domain=domain).first()
if realm_domain is not None:
return realm_domain.realm
else:
# Since we have not found any match. We will now try matching the parent domain.
# Filter out the realm domains with `allow_subdomains=False` so that we don't end
# up matching 'test.zulip.com' wrongly to (realm, 'zulip.com', False).
query = query.filter(allow_subdomains=True)
while len(domain) > 0:
subdomain, sep, domain = domain.partition('.')
realm_domain = query.filter(domain=domain).first()
if realm_domain is not None:
return realm_domain.realm
return None
# Is a user with the given email address allowed to be in the given realm?
# (This function does not check whether the user has been invited to the realm.
# So for invite-only realms, this is the test for whether a user can be invited,
# not whether the user can sign up currently.)
def email_allowed_for_realm(email, realm):
# type: (Text, Realm) -> bool
if not realm.restricted_to_domain:
return True
domain = email_to_domain(email)
query = RealmDomain.objects.filter(realm=realm)
if query.filter(domain=domain).exists():
return True
else:
query = query.filter(allow_subdomains=True)
while len(domain) > 0:
subdomain, sep, domain = domain.partition('.')
if query.filter(domain=domain).exists():
return True
return False
def get_realm_domains(realm):
# type: (Realm) -> List[Dict[str, Text]]
return list(realm.realmdomain_set.values('domain', 'allow_subdomains'))
class RealmEmoji(ModelReprMixin, models.Model):
author = models.ForeignKey('UserProfile', blank=True, null=True, on_delete=CASCADE)
realm = models.ForeignKey(Realm, on_delete=CASCADE) # type: Realm
# Second part of the regex (negative lookbehind) disallows names ending with one of the punctuation characters
name = models.TextField(validators=[MinLengthValidator(1),
RegexValidator(regex=r'^[0-9a-z.\-_]+(?<![.\-_])$',
message=_("Invalid characters in emoji name"))]) # type: Text
file_name = models.TextField(db_index=True, null=True) # type: Optional[Text]
deactivated = models.BooleanField(default=False) # type: bool
PATH_ID_TEMPLATE = "{realm_id}/emoji/{emoji_file_name}"
class Meta(object):
unique_together = ("realm", "name")
def __unicode__(self):
# type: () -> Text
return u"<RealmEmoji(%s): %s %s>" % (self.realm.string_id, self.name, self.file_name)
def get_realm_emoji_uncached(realm):
# type: (Realm) -> Dict[Text, Dict[str, Any]]
d = {}
from zerver.lib.emoji import get_emoji_url
for row in RealmEmoji.objects.filter(realm=realm).select_related('author'):
author = None
if row.author:
author = {
'id': row.author.id,
'email': row.author.email,
'full_name': row.author.full_name}
d[row.name] = dict(source_url=get_emoji_url(row.file_name, row.realm_id),
deactivated=row.deactivated,
author=author)
return d
def flush_realm_emoji(sender, **kwargs):
# type: (Any, **Any) -> None
realm = kwargs['instance'].realm
cache_set(get_realm_emoji_cache_key(realm),
get_realm_emoji_uncached(realm),
timeout=3600*24*7)
post_save.connect(flush_realm_emoji, sender=RealmEmoji)
post_delete.connect(flush_realm_emoji, sender=RealmEmoji)
def filter_pattern_validator(value):
# type: (Text) -> None
regex = re.compile(r'(?:[\w\-#]*)(\(\?P<\w+>.+\))')
error_msg = 'Invalid filter pattern, you must use the following format OPTIONAL_PREFIX(?P<id>.+)'
if not regex.match(str(value)):
raise ValidationError(error_msg)
try:
re.compile(value)
except sre_constants.error:
# Regex is invalid
raise ValidationError(error_msg)
def filter_format_validator(value):
# type: (str) -> None
regex = re.compile(r'^[\.\/:a-zA-Z0-9_?=-]+%\(([a-zA-Z0-9_-]+)\)s[a-zA-Z0-9_-]*$')
if not regex.match(value):
raise ValidationError('URL format string must be in the following format: `https://example.com/%(\w+)s`')
class RealmFilter(models.Model):
realm = models.ForeignKey(Realm, on_delete=CASCADE) # type: Realm
pattern = models.TextField(validators=[filter_pattern_validator]) # type: Text
url_format_string = models.TextField(validators=[URLValidator(), filter_format_validator]) # type: Text
class Meta(object):
unique_together = ("realm", "pattern")
def __unicode__(self):
# type: () -> Text
return u"<RealmFilter(%s): %s %s>" % (self.realm.string_id, self.pattern, self.url_format_string)
def get_realm_filters_cache_key(realm_id):
# type: (int) -> Text
return u'all_realm_filters:%s' % (realm_id,)
# We have a per-process cache to avoid doing 1000 remote cache queries during page load
per_request_realm_filters_cache = {} # type: Dict[int, List[Tuple[Text, Text, int]]]
def realm_in_local_realm_filters_cache(realm_id):
# type: (int) -> bool
return realm_id in per_request_realm_filters_cache
def realm_filters_for_realm(realm_id):
# type: (int) -> List[Tuple[Text, Text, int]]
if not realm_in_local_realm_filters_cache(realm_id):
per_request_realm_filters_cache[realm_id] = realm_filters_for_realm_remote_cache(realm_id)
return per_request_realm_filters_cache[realm_id]
@cache_with_key(get_realm_filters_cache_key, timeout=3600*24*7)
def realm_filters_for_realm_remote_cache(realm_id):
# type: (int) -> List[Tuple[Text, Text, int]]
filters = []
for realm_filter in RealmFilter.objects.filter(realm_id=realm_id):
filters.append((realm_filter.pattern, realm_filter.url_format_string, realm_filter.id))
return filters
def all_realm_filters():
# type: () -> Dict[int, List[Tuple[Text, Text, int]]]
filters = defaultdict(list) # type: DefaultDict[int, List[Tuple[Text, Text, int]]]
for realm_filter in RealmFilter.objects.all():
filters[realm_filter.realm_id].append((realm_filter.pattern, realm_filter.url_format_string, realm_filter.id))
return filters
def flush_realm_filter(sender, **kwargs):
# type: (Any, **Any) -> None
realm_id = kwargs['instance'].realm_id
cache_delete(get_realm_filters_cache_key(realm_id))
try:
per_request_realm_filters_cache.pop(realm_id)
except KeyError:
pass
post_save.connect(flush_realm_filter, sender=RealmFilter)
post_delete.connect(flush_realm_filter, sender=RealmFilter)
class UserProfile(ModelReprMixin, AbstractBaseUser, PermissionsMixin):
DEFAULT_BOT = 1
"""
Incoming webhook bots are limited to only sending messages via webhooks.
Thus, it is less of a security risk to expose their API keys to third-party services,
since they can't be used to read messages.
"""
INCOMING_WEBHOOK_BOT = 2
# This value is also being used in static/js/settings_bots.js. On updating it here, update it there as well.
OUTGOING_WEBHOOK_BOT = 3
"""
Embedded bots run within the Zulip server itself; events are added to the
embedded_bots queue and then handled by a QueueProcessingWorker.
"""
EMBEDDED_BOT = 4
# For now, don't allow creating other bot types via the UI
ALLOWED_BOT_TYPES = [
DEFAULT_BOT,
INCOMING_WEBHOOK_BOT,
OUTGOING_WEBHOOK_BOT,
]
SERVICE_BOT_TYPES = [
OUTGOING_WEBHOOK_BOT,
EMBEDDED_BOT
]
# Fields from models.AbstractUser minus last_name and first_name,
# which we don't use; email is modified to make it indexed and unique.
email = models.EmailField(blank=False, db_index=True, unique=True) # type: Text
is_staff = models.BooleanField(default=False) # type: bool
is_active = models.BooleanField(default=True, db_index=True) # type: bool
is_realm_admin = models.BooleanField(default=False, db_index=True) # type: bool
is_bot = models.BooleanField(default=False, db_index=True) # type: bool
bot_type = models.PositiveSmallIntegerField(null=True, db_index=True) # type: Optional[int]
is_api_super_user = models.BooleanField(default=False, db_index=True) # type: bool
date_joined = models.DateTimeField(default=timezone_now) # type: datetime.datetime
is_mirror_dummy = models.BooleanField(default=False) # type: bool
bot_owner = models.ForeignKey('self', null=True, on_delete=models.SET_NULL) # type: Optional[UserProfile]
long_term_idle = models.BooleanField(default=False, db_index=True) # type: bool
USERNAME_FIELD = 'email'
MAX_NAME_LENGTH = 100
MIN_NAME_LENGTH = 3
API_KEY_LENGTH = 32
NAME_INVALID_CHARS = ['*', '`', '>', '"', '@']
# Our custom site-specific fields
full_name = models.CharField(max_length=MAX_NAME_LENGTH) # type: Text
short_name = models.CharField(max_length=MAX_NAME_LENGTH) # type: Text
# pointer points to Message.id, NOT UserMessage.id.
pointer = models.IntegerField() # type: int
last_pointer_updater = models.CharField(max_length=64) # type: Text
realm = models.ForeignKey(Realm, on_delete=CASCADE) # type: Realm
api_key = models.CharField(max_length=API_KEY_LENGTH) # type: Text
tos_version = models.CharField(null=True, max_length=10) # type: Optional[Text]
last_active_message_id = models.IntegerField(null=True) # type: int
### Notifications settings. ###
# Stream notifications.
enable_stream_desktop_notifications = models.BooleanField(default=False) # type: bool
enable_stream_push_notifications = models.BooleanField(default=False) # type: bool
enable_stream_sounds = models.BooleanField(default=False) # type: bool
# PM + @-mention notifications.
enable_desktop_notifications = models.BooleanField(default=True) # type: bool
pm_content_in_desktop_notifications = models.BooleanField(default=True) # type: bool
enable_sounds = models.BooleanField(default=True) # type: bool
enable_offline_email_notifications = models.BooleanField(default=True) # type: bool
enable_offline_push_notifications = models.BooleanField(default=True) # type: bool
enable_online_push_notifications = models.BooleanField(default=False) # type: bool
enable_digest_emails = models.BooleanField(default=True) # type: bool
# Old notification field superseded by existence of stream notification
# settings.
default_desktop_notifications = models.BooleanField(default=True) # type: bool
###
last_reminder = models.DateTimeField(default=timezone_now, null=True) # type: Optional[datetime.datetime]
rate_limits = models.CharField(default=u"", max_length=100) # type: Text # comma-separated list of range:max pairs
# Default streams
default_sending_stream = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE) # type: Optional[Stream]
default_events_register_stream = models.ForeignKey('zerver.Stream', null=True, related_name='+', on_delete=CASCADE) # type: Optional[Stream]
default_all_public_streams = models.BooleanField(default=False) # type: bool
# UI vars
enter_sends = models.NullBooleanField(default=False) # type: Optional[bool]
autoscroll_forever = models.BooleanField(default=False) # type: bool
left_side_userlist = models.BooleanField(default=False) # type: bool
emoji_alt_code = models.BooleanField(default=False) # type: bool
# display settings
twenty_four_hour_time = models.BooleanField(default=False) # type: bool
default_language = models.CharField(default=u'en', max_length=MAX_LANGUAGE_ID_LENGTH) # type: Text
high_contrast_mode = models.BooleanField(default=False) # type: bool
# Hours to wait before sending another email to a user
EMAIL_REMINDER_WAITPERIOD = 24
# Minutes to wait before warning a bot owner that their bot sent a message
# to a nonexistent stream
BOT_OWNER_STREAM_ALERT_WAITPERIOD = 1
AVATAR_FROM_GRAVATAR = u'G'
AVATAR_FROM_USER = u'U'
AVATAR_SOURCES = (
(AVATAR_FROM_GRAVATAR, 'Hosted by Gravatar'),
(AVATAR_FROM_USER, 'Uploaded by user'),
)
avatar_source = models.CharField(default=AVATAR_FROM_GRAVATAR, choices=AVATAR_SOURCES, max_length=1) # type: Text
avatar_version = models.PositiveSmallIntegerField(default=1) # type: int
TUTORIAL_WAITING = u'W'
TUTORIAL_STARTED = u'S'
TUTORIAL_FINISHED = u'F'
TUTORIAL_STATES = ((TUTORIAL_WAITING, "Waiting"),
(TUTORIAL_STARTED, "Started"),
(TUTORIAL_FINISHED, "Finished"))
tutorial_status = models.CharField(default=TUTORIAL_WAITING, choices=TUTORIAL_STATES, max_length=1) # type: Text
# Contains serialized JSON of the form:
# [("step 1", true), ("step 2", false)]
# where the second element of each tuple is if the step has been
# completed.
onboarding_steps = models.TextField(default=u'[]') # type: Text
alert_words = models.TextField(default=u'[]') # type: Text # json-serialized list of strings
objects = UserManager() # type: UserManager
DEFAULT_UPLOADS_QUOTA = 1024*1024*1024
quota = models.IntegerField(default=DEFAULT_UPLOADS_QUOTA) # type: int
# The maximum length of a timezone in pytz.all_timezones is 32.
# Setting max_length=40 is a safe choice.
# In Django, the convention is to use empty string instead of Null
# for text based fields. For more information, see
# https://docs.djangoproject.com/en/1.10/ref/models/fields/#django.db.models.Field.null.
timezone = models.CharField(max_length=40, default=u'') # type: Text
# Emojisets
APPLE_EMOJISET = u'apple'
EMOJIONE_EMOJISET = u'emojione'
GOOGLE_EMOJISET = u'google'
TWITTER_EMOJISET = u'twitter'
EMOJISET_CHOICES = ((APPLE_EMOJISET, _("Apple style")),
(EMOJIONE_EMOJISET, _("Emoji One style")),
(GOOGLE_EMOJISET, _("Google style")),
(TWITTER_EMOJISET, _("Twitter style")))
emojiset = models.CharField(default=GOOGLE_EMOJISET, choices=EMOJISET_CHOICES, max_length=20) # type: Text
# Define the types of the various automatically managed properties
property_types = dict(
default_language=Text,
emoji_alt_code=bool,
emojiset=Text,
left_side_userlist=bool,
timezone=Text,
twenty_four_hour_time=bool,
high_contrast_mode=bool,
)
notification_setting_types = dict(
enable_desktop_notifications=bool,
enable_digest_emails=bool,
enable_offline_email_notifications=bool,
enable_offline_push_notifications=bool,
enable_online_push_notifications=bool,
enable_sounds=bool,
enable_stream_desktop_notifications=bool,
enable_stream_push_notifications=bool,
enable_stream_sounds=bool,
pm_content_in_desktop_notifications=bool,
)
@property
def profile_data(self):
# type: () -> List[Dict[str, Union[int, float, Text]]]
values = CustomProfileFieldValue.objects.filter(user_profile=self)
user_data = {v.field_id: v.value for v in values}
data = [] # type: List[Dict[str, Union[int, float, Text]]]
for field in custom_profile_fields_for_realm(self.realm_id):
value = user_data.get(field.id, None)
field_type = field.field_type
if value is not None:
converter = field.FIELD_CONVERTERS[field_type]
value = converter(value)
field_data = {} # type: Dict[str, Union[int, float, Text]]
for k, v in field.as_dict().items():
field_data[k] = v
field_data['value'] = value
data.append(field_data)
return data
def can_admin_user(self, target_user):
# type: (UserProfile) -> bool
"""Returns whether this user has permission to modify target_user"""
if target_user.bot_owner == self:
return True
elif self.is_realm_admin and self.realm == target_user.realm:
return True
else:
return False
def __unicode__(self):
# type: () -> Text
return u"<UserProfile: %s %s>" % (self.email, self.realm)
@property
def is_incoming_webhook(self):
# type: () -> bool
return self.bot_type == UserProfile.INCOMING_WEBHOOK_BOT
@property
def is_outgoing_webhook_bot(self):
# type: () -> bool
return self.bot_type == UserProfile.OUTGOING_WEBHOOK_BOT
@property
def is_embedded_bot(self):
# type: () -> bool
return self.bot_type == UserProfile.EMBEDDED_BOT
@property
def is_service_bot(self):
# type: () -> bool
return self.is_bot and self.bot_type in UserProfile.SERVICE_BOT_TYPES
@staticmethod
def emojiset_choices():
# type: () -> Dict[Text, Text]
return {emojiset[0]: force_text(emojiset[1]) for emojiset in UserProfile.EMOJISET_CHOICES}
@staticmethod
def emails_from_ids(user_ids):
# type: (Sequence[int]) -> Dict[int, Text]
rows = UserProfile.objects.filter(id__in=user_ids).values('id', 'email')
return {row['id']: row['email'] for row in rows}
def can_create_streams(self):
# type: () -> bool
diff = (timezone_now() - self.date_joined).days
if self.is_realm_admin:
return True
elif self.realm.create_stream_by_admins_only:
return False
if diff >= self.realm.waiting_period_threshold:
return True
return False
def major_tos_version(self):
# type: () -> int
if self.tos_version is not None:
return int(self.tos_version.split('.')[0])
else:
return -1
def receives_offline_notifications(user_profile):
# type: (UserProfile) -> bool
return ((user_profile.enable_offline_email_notifications or
user_profile.enable_offline_push_notifications) and
not user_profile.is_bot)
def receives_online_notifications(user_profile):
# type: (UserProfile) -> bool
return (user_profile.enable_online_push_notifications and
not user_profile.is_bot)
def receives_stream_notifications(user_profile):
# type: (UserProfile) -> bool
return (user_profile.enable_stream_push_notifications and
not user_profile.is_bot)
def remote_user_to_email(remote_user):
# type: (Text) -> Text
if settings.SSO_APPEND_DOMAIN is not None:
remote_user += "@" + settings.SSO_APPEND_DOMAIN
return remote_user
# Make sure we flush the UserProfile object from our remote cache
# whenever we save it.
post_save.connect(flush_user_profile, sender=UserProfile)
class PreregistrationUser(models.Model):
email = models.EmailField() # type: Text
referred_by = models.ForeignKey(UserProfile, null=True, on_delete=CASCADE) # Optional[UserProfile]
streams = models.ManyToManyField('Stream') # type: Manager
invited_at = models.DateTimeField(auto_now=True) # type: datetime.datetime
realm_creation = models.BooleanField(default=False)
# Indicates whether the user needs a password. Users who were
# created via SSO style auth (e.g. GitHub/Google) generally do not.
password_required = models.BooleanField(default=True)
# status: whether an object has been confirmed.
# if confirmed, set to confirmation.settings.STATUS_ACTIVE
status = models.IntegerField(default=0) # type: int
realm = models.ForeignKey(Realm, null=True, on_delete=CASCADE) # type: Optional[Realm]
class MultiuseInvite(models.Model):
referred_by = models.ForeignKey(UserProfile, on_delete=CASCADE) # Optional[UserProfile]
streams = models.ManyToManyField('Stream') # type: Manager
realm = models.ForeignKey(Realm, on_delete=CASCADE) # type: Realm
class EmailChangeStatus(models.Model):
new_email = models.EmailField() # type: Text
old_email = models.EmailField() # type: Text
updated_at = models.DateTimeField(auto_now=True) # type: datetime.datetime
user_profile = models.ForeignKey(UserProfile, on_delete=CASCADE) # type: UserProfile
# status: whether an object has been confirmed.
# if confirmed, set to confirmation.settings.STATUS_ACTIVE
status = models.IntegerField(default=0) # type: int
realm = models.ForeignKey(Realm, on_delete=CASCADE) # type: Realm
class AbstractPushDeviceToken(models.Model):
APNS = 1
GCM = 2
KINDS = (
(APNS, 'apns'),
(GCM, 'gcm'),
)
kind = models.PositiveSmallIntegerField(choices=KINDS) # type: int
# The token is a unique device-specific token that is
# sent to us from each device:
# - APNS token if kind == APNS
# - GCM registration id if kind == GCM
token = models.CharField(max_length=4096, unique=True) # type: bytes
last_updated = models.DateTimeField(auto_now=True) # type: datetime.datetime
# [optional] Contains the app id of the device if it is an iOS device
ios_app_id = models.TextField(null=True) # type: Optional[Text]
class Meta(object):
abstract = True
class PushDeviceToken(AbstractPushDeviceToken):
# The user who's device this is
user = models.ForeignKey(UserProfile, db_index=True, on_delete=CASCADE) # type: UserProfile
def generate_email_token_for_stream():
# type: () -> str
return generate_random_token(32)
class Stream(ModelReprMixin, models.Model):
MAX_NAME_LENGTH = 60
name = models.CharField(max_length=MAX_NAME_LENGTH, db_index=True) # type: Text
realm = models.ForeignKey(Realm, db_index=True, on_delete=CASCADE) # type: Realm
invite_only = models.NullBooleanField(default=False) # type: Optional[bool]
# Used by the e-mail forwarder. The e-mail RFC specifies a maximum
# e-mail length of 254, and our max stream length is 30, so we
# have plenty of room for the token.
email_token = models.CharField(
max_length=32, default=generate_email_token_for_stream) # type: str
description = models.CharField(max_length=1024, default=u'') # type: Text
date_created = models.DateTimeField(default=timezone_now) # type: datetime.datetime
deactivated = models.BooleanField(default=False) # type: bool
def __unicode__(self):
# type: () -> Text
return u"<Stream: %s>" % (self.name,)
def is_public(self):
# type: () -> bool
# All streams are private in Zephyr mirroring realms.
return not self.invite_only and not self.realm.is_zephyr_mirror_realm
class Meta(object):
unique_together = ("name", "realm")
def num_subscribers(self):
# type: () -> int
return Subscription.objects.filter(
recipient__type=Recipient.STREAM,
recipient__type_id=self.id,
user_profile__is_active=True,
active=True
).count()
# This is stream information that is sent to clients
def to_dict(self):
# type: () -> Dict[str, Any]
return dict(name=self.name,
stream_id=self.id,
description=self.description,
invite_only=self.invite_only)
post_save.connect(flush_stream, sender=Stream)
post_delete.connect(flush_stream, sender=Stream)
# The Recipient table is used to map Messages to the set of users who
# received the message. It is implemented as a set of triples (id,
# type_id, type). We have 3 types of recipients: Huddles (for group
# private messages), UserProfiles (for 1:1 private messages), and
# Streams. The recipient table maps a globally unique recipient id
# (used by the Message table) to the type-specific unique id (the
# stream id, user_profile id, or huddle id).
class Recipient(ModelReprMixin, models.Model):
type_id = models.IntegerField(db_index=True) # type: int
type = models.PositiveSmallIntegerField(db_index=True) # type: int
# Valid types are {personal, stream, huddle}
PERSONAL = 1
STREAM = 2
HUDDLE = 3
class Meta(object):
unique_together = ("type", "type_id")
# N.B. If we used Django's choice=... we would get this for free (kinda)
_type_names = {
PERSONAL: 'personal',
STREAM: 'stream',
HUDDLE: 'huddle'}
def type_name(self):
# type: () -> str
# Raises KeyError if invalid
return self._type_names[self.type]
def __unicode__(self):
# type: () -> Text
display_recipient = get_display_recipient(self)
return u"<Recipient: %s (%d, %s)>" % (display_recipient, self.type_id, self.type)
class MutedTopic(ModelReprMixin, models.Model):
user_profile = models.ForeignKey(UserProfile, on_delete=CASCADE)
stream = models.ForeignKey(Stream, on_delete=CASCADE)
recipient = models.ForeignKey(Recipient, on_delete=CASCADE)
topic_name = models.CharField(max_length=MAX_SUBJECT_LENGTH)
class Meta(object):
unique_together = ('user_profile', 'stream', 'topic_name')
def __unicode__(self):
# type: () -> Text
return u"<MutedTopic: (%s, %s, %s)>" % (self.user_profile.email, self.stream.name, self.topic_name)
class Client(ModelReprMixin, models.Model):
name = models.CharField(max_length=30, db_index=True, unique=True) # type: Text
def __unicode__(self):
# type: () -> Text
return u"<Client: %s>" % (self.name,)
get_client_cache = {} # type: Dict[Text, Client]
def get_client(name):
# type: (Text) -> Client
# Accessing KEY_PREFIX through the module is necessary
# because we need the updated value of the variable.
cache_name = cache.KEY_PREFIX + name
if cache_name not in get_client_cache:
result = get_client_remote_cache(name)
get_client_cache[cache_name] = result
return get_client_cache[cache_name]
def get_client_cache_key(name):
# type: (Text) -> Text
return u'get_client:%s' % (make_safe_digest(name),)
@cache_with_key(get_client_cache_key, timeout=3600*24*7)
def get_client_remote_cache(name):
# type: (Text) -> Client
(client, _) = Client.objects.get_or_create(name=name)
return client
# get_stream_backend takes either a realm id or a realm
@cache_with_key(get_stream_cache_key, timeout=3600*24*7)
def get_stream_backend(stream_name, realm_id):
# type: (Text, int) -> Stream
return Stream.objects.select_related("realm").get(
name__iexact=stream_name.strip(), realm_id=realm_id)
def stream_name_in_use(stream_name, realm_id):
# type: (Text, int) -> bool
return Stream.objects.filter(
name__iexact=stream_name.strip(),
realm_id=realm_id
).exists()
def get_active_streams(realm):
# type: (Optional[Realm]) -> QuerySet
"""
Return all streams (including invite-only streams) that have not been deactivated.
"""
return Stream.objects.filter(realm=realm, deactivated=False)
def get_stream(stream_name, realm):
# type: (Text, Realm) -> Stream
return get_stream_backend(stream_name, realm.id)
def bulk_get_streams(realm, stream_names):
# type: (Realm, STREAM_NAMES) -> Dict[Text, Any]
def fetch_streams_by_name(stream_names):
# type: (List[Text]) -> Sequence[Stream]
#
# This should be just
#
# Stream.objects.select_related("realm").filter(name__iexact__in=stream_names,
# realm_id=realm_id)
#
# But chaining __in and __iexact doesn't work with Django's
# ORM, so we have the following hack to construct the relevant where clause
if len(stream_names) == 0:
return []
upper_list = ", ".join(["UPPER(%s)"] * len(stream_names))
where_clause = "UPPER(zerver_stream.name::text) IN (%s)" % (upper_list,)
return get_active_streams(realm.id).select_related("realm").extra(
where=[where_clause],
params=stream_names)
return generic_bulk_cached_fetch(lambda stream_name: get_stream_cache_key(stream_name, realm.id),
fetch_streams_by_name,
[stream_name.lower() for stream_name in stream_names],
id_fetcher=lambda stream: stream.name.lower())
def get_recipient_cache_key(type, type_id):
# type: (int, int) -> Text
return u"%s:get_recipient:%s:%s" % (cache.KEY_PREFIX, type, type_id,)
@cache_with_key(get_recipient_cache_key, timeout=3600*24*7)
def get_recipient(type, type_id):
# type: (int, int) -> Recipient
return Recipient.objects.get(type_id=type_id, type=type)
def bulk_get_recipients(type, type_ids):
# type: (int, List[int]) -> Dict[int, Any]
def cache_key_function(type_id):
# type: (int) -> Text
return get_recipient_cache_key(type, type_id)
def query_function(type_ids):
# type: (List[int]) -> Sequence[Recipient]
# TODO: Change return type to QuerySet[Recipient]
return Recipient.objects.filter(type=type, type_id__in=type_ids)
return generic_bulk_cached_fetch(cache_key_function, query_function, type_ids,
id_fetcher=lambda recipient: recipient.type_id)
def sew_messages_and_reactions(messages, reactions):
# type: (List[Dict[str, Any]], List[Dict[str, Any]]) -> List[Dict[str, Any]]
"""Given a iterable of messages and reactions stitch reactions
into messages.
"""
# Add all messages with empty reaction item
for message in messages:
message['reactions'] = []
# Convert list of messages into dictionary to make reaction stitching easy
converted_messages = {message['id']: message for message in messages}
for reaction in reactions:
converted_messages[reaction['message_id']]['reactions'].append(
reaction)
return list(converted_messages.values())
class AbstractMessage(ModelReprMixin, models.Model):
sender = models.ForeignKey(UserProfile, on_delete=CASCADE) # type: UserProfile
recipient = models.ForeignKey(Recipient, on_delete=CASCADE) # type: Recipient
subject = models.CharField(max_length=MAX_SUBJECT_LENGTH, db_index=True) # type: Text
content = models.TextField() # type: Text
rendered_content = models.TextField(null=True) # type: Optional[Text]
rendered_content_version = models.IntegerField(null=True) # type: Optional[int]
pub_date = models.DateTimeField('date published', db_index=True) # type: datetime.datetime
sending_client = models.ForeignKey(Client, on_delete=CASCADE) # type: Client
last_edit_time = models.DateTimeField(null=True) # type: Optional[datetime.datetime]
edit_history = models.TextField(null=True) # type: Optional[Text]
has_attachment = models.BooleanField(default=False, db_index=True) # type: bool
has_image = models.BooleanField(default=False, db_index=True) # type: bool
has_link = models.BooleanField(default=False, db_index=True) # type: bool
class Meta(object):
abstract = True
def __unicode__(self):
# type: () -> Text
display_recipient = get_display_recipient(self.recipient)
return u"<%s: %s / %s / %r>" % (self.__class__.__name__, display_recipient,
self.subject, self.sender)
class ArchivedMessage(AbstractMessage):
archive_timestamp = models.DateTimeField(default=timezone_now, db_index=True) # type: datetime.datetime
class Message(AbstractMessage):
def topic_name(self):
# type: () -> Text
"""
Please start using this helper to facilitate an
eventual switch over to a separate topic table.
"""
return self.subject
def get_realm(self):
# type: () -> Realm
return self.sender.realm
def save_rendered_content(self):
# type: () -> None
self.save(update_fields=["rendered_content", "rendered_content_version"])
@staticmethod
def need_to_render_content(rendered_content, rendered_content_version, bugdown_version):
# type: (Optional[Text], Optional[int], int) -> bool
return (rendered_content is None or
rendered_content_version is None or
rendered_content_version < bugdown_version)
def to_log_dict(self):
# type: () -> Dict[str, Any]
return dict(
id = self.id,
sender_id = self.sender.id,
sender_email = self.sender.email,
sender_realm_str = self.sender.realm.string_id,
sender_full_name = self.sender.full_name,
sender_short_name = self.sender.short_name,
sending_client = self.sending_client.name,
type = self.recipient.type_name(),
recipient = get_display_recipient(self.recipient),
subject = self.topic_name(),
content = self.content,
timestamp = datetime_to_timestamp(self.pub_date))
@staticmethod
def get_raw_db_rows(needed_ids):
# type: (List[int]) -> List[Dict[str, Any]]
# This is a special purpose function optimized for
# callers like get_messages_backend().
fields = [
'id',
'subject',
'pub_date',
'last_edit_time',
'edit_history',
'content',
'rendered_content',
'rendered_content_version',
'recipient_id',
'recipient__type',
'recipient__type_id',
'sender_id',
'sending_client__name',
'sender__email',
'sender__full_name',
'sender__short_name',
'sender__realm__id',
'sender__realm__string_id',
'sender__avatar_source',
'sender__avatar_version',
'sender__is_mirror_dummy',
]
messages = Message.objects.filter(id__in=needed_ids).values(*fields)
"""Adding one-many or Many-Many relationship in values results in N X
results.
Link: https://docs.djangoproject.com/en/1.8/ref/models/querysets/#values
"""
reactions = Reaction.get_raw_db_rows(needed_ids)
return sew_messages_and_reactions(messages, reactions)
def sent_by_human(self):
# type: () -> bool
sending_client = self.sending_client.name.lower()
return (sending_client in ('zulipandroid', 'zulipios', 'zulipdesktop',
'zulipmobile', 'zulipelectron', 'snipe',
'website', 'ios', 'android')) or (
'desktop app' in sending_client)
@staticmethod
def content_has_attachment(content):
# type: (Text) -> Match
return re.search(r'[/\-]user[\-_]uploads[/\.-]', content)
@staticmethod
def content_has_image(content):
# type: (Text) -> bool
return bool(re.search(r'[/\-]user[\-_]uploads[/\.-]\S+\.(bmp|gif|jpg|jpeg|png|webp)', content, re.IGNORECASE))
@staticmethod
def content_has_link(content):
# type: (Text) -> bool
return ('http://' in content or
'https://' in content or
'/user_uploads' in content or
(settings.ENABLE_FILE_LINKS and 'file:///' in content))
@staticmethod
def is_status_message(content, rendered_content):
# type: (Text, Text) -> bool
"""
Returns True if content and rendered_content are from 'me_message'
"""
if content.startswith('/me ') and '\n' not in content:
if rendered_content.startswith('<p>') and rendered_content.endswith('</p>'):
return True
return False
def update_calculated_fields(self):
# type: () -> None
# TODO: rendered_content could also be considered a calculated field
content = self.content
self.has_attachment = bool(Message.content_has_attachment(content))
self.has_image = bool(Message.content_has_image(content))
self.has_link = bool(Message.content_has_link(content))
@receiver(pre_save, sender=Message)
def pre_save_message(sender, **kwargs):
# type: (Any, **Any) -> None
if kwargs['update_fields'] is None or "content" in kwargs['update_fields']:
message = kwargs['instance']
message.update_calculated_fields()
def get_context_for_message(message):
# type: (Message) -> QuerySet[Message]
# TODO: Change return type to QuerySet[Message]
return Message.objects.filter(
recipient_id=message.recipient_id,
subject=message.subject,
id__lt=message.id,
pub_date__gt=message.pub_date - timedelta(minutes=15),
).order_by('-id')[:10]
post_save.connect(flush_message, sender=Message)
class Reaction(ModelReprMixin, models.Model):
user_profile = models.ForeignKey(UserProfile, on_delete=CASCADE) # type: UserProfile
message = models.ForeignKey(Message, on_delete=CASCADE) # type: Message
emoji_name = models.TextField() # type: Text
emoji_code = models.TextField() # type: Text
UNICODE_EMOJI = u'unicode_emoji'
REALM_EMOJI = u'realm_emoji'
ZULIP_EXTRA_EMOJI = u'zulip_extra_emoji'
REACTION_TYPES = ((UNICODE_EMOJI, _("Unicode emoji")),
(REALM_EMOJI, _("Realm emoji")),
(ZULIP_EXTRA_EMOJI, _("Zulip extra emoji")))
reaction_type = models.CharField(default=UNICODE_EMOJI, choices=REACTION_TYPES, max_length=30) # type: Text
class Meta(object):
unique_together = ("user_profile", "message", "emoji_name")
@staticmethod
def get_raw_db_rows(needed_ids):
# type: (List[int]) -> List[Dict[str, Any]]
fields = ['message_id', 'emoji_name', 'emoji_code', 'reaction_type',
'user_profile__email', 'user_profile__id', 'user_profile__full_name']
return Reaction.objects.filter(message_id__in=needed_ids).values(*fields)
# Whenever a message is sent, for each user subscribed to the
# corresponding Recipient object, we add a row to the UserMessage
# table indicating that that user received that message. This table
# allows us to quickly query any user's last 1000 messages to generate
# the home view.
#
# Additionally, the flags field stores metadata like whether the user
# has read the message, starred or collapsed the message, was
# mentioned in the message, etc.
#
# UserMessage is the largest table in a Zulip installation, even
# though each row is only 4 integers.
class AbstractUserMessage(ModelReprMixin, models.Model):
user_profile = models.ForeignKey(UserProfile, on_delete=CASCADE) # type: UserProfile
# WARNING: We removed the previously-final flag,
# is_me_message, without clearing any values it might have had in
# the database. So when we next add a flag, you need to do a
# migration to set it to 0 first
ALL_FLAGS = ['read', 'starred', 'collapsed', 'mentioned', 'wildcard_mentioned',
'summarize_in_home', 'summarize_in_stream', 'force_expand', 'force_collapse',
'has_alert_word', "historical"]
flags = BitField(flags=ALL_FLAGS, default=0) # type: BitHandler
class Meta(object):
abstract = True
unique_together = ("user_profile", "message")
@staticmethod
def where_unread():
# type: () -> str
# Use this for Django ORM queries where we are getting lots
# of rows. This custom SQL plays nice with our partial indexes.
# Grep the code for example usage.
return 'flags & 1 = 0'
def flags_list(self):
# type: () -> List[str]
flags = int(self.flags)
return self.flags_list_for_flags(flags)
@staticmethod
def flags_list_for_flags(flags):
# type: (int) -> List[str]
'''
This function is highly optimized, because it actually slows down
sending messages in a naive implementation.
'''
names = AbstractUserMessage.ALL_FLAGS
return [
names[i]
for i in range(len(names))
if flags & (2 ** i)
]
def __unicode__(self):
# type: () -> Text
display_recipient = get_display_recipient(self.message.recipient)
return u"<%s: %s / %s (%s)>" % (self.__class__.__name__, display_recipient,
self.user_profile.email, self.flags_list())
class ArchivedUserMessage(AbstractUserMessage):
message = models.ForeignKey(ArchivedMessage, on_delete=CASCADE) # type: Message
archive_timestamp = models.DateTimeField(default=timezone_now, db_index=True) # type: datetime.datetime
class UserMessage(AbstractUserMessage):
message = models.ForeignKey(Message, on_delete=CASCADE) # type: Message
def parse_usermessage_flags(val):
# type: (int) -> List[str]
flags = []
mask = 1
for flag in UserMessage.ALL_FLAGS:
if val & mask:
flags.append(flag)
mask <<= 1
return flags
class AbstractAttachment(ModelReprMixin, models.Model):
file_name = models.TextField(db_index=True) # type: Text
# path_id is a storage location agnostic representation of the path of the file.
# If the path of a file is http://localhost:9991/user_uploads/a/b/abc/temp_file.py
# then its path_id will be a/b/abc/temp_file.py.
path_id = models.TextField(db_index=True, unique=True) # type: Text
owner = models.ForeignKey(UserProfile, on_delete=CASCADE) # type: UserProfile
realm = models.ForeignKey(Realm, blank=True, null=True, on_delete=CASCADE) # type: Optional[Realm]
is_realm_public = models.BooleanField(default=False) # type: bool
create_time = models.DateTimeField(default=timezone_now,
db_index=True) # type: datetime.datetime
size = models.IntegerField(null=True) # type: Optional[int]
class Meta(object):
abstract = True
def __unicode__(self):
# type: () -> Text
return u"<%s: %s>" % (self.__class__.__name__, self.file_name,)
class ArchivedAttachment(AbstractAttachment):
archive_timestamp = models.DateTimeField(default=timezone_now, db_index=True) # type: datetime.datetime
messages = models.ManyToManyField(ArchivedMessage) # type: Manager
class Attachment(AbstractAttachment):
messages = models.ManyToManyField(Message) # type: Manager
def is_claimed(self):
# type: () -> bool
return self.messages.count() > 0
def to_dict(self):
# type: () -> Dict[str, Any]
return {
'id': self.id,
'name': self.file_name,
'path_id': self.path_id,
'size': self.size,
# convert to JavaScript-style UNIX timestamp so we can take
# advantage of client timezones.
'create_time': time.mktime(self.create_time.timetuple()) * 1000,
'messages': [{
'id': m.id,
'name': time.mktime(m.pub_date.timetuple()) * 1000
} for m in self.messages.all()]
}
def validate_attachment_request(user_profile, path_id):
# type: (UserProfile, Text) -> Optional[bool]
try:
attachment = Attachment.objects.get(path_id=path_id)
messages = attachment.messages.all()
if user_profile == attachment.owner:
# If you own the file, you can access it.
return True
elif attachment.is_realm_public and attachment.realm == user_profile.realm:
# Any user in the realm can access realm-public files
return True
elif UserMessage.objects.filter(user_profile=user_profile, message__in=messages).exists():
# If it was sent in a private message or private stream
# message, then anyone who received that message can access it.
return True
else:
return False
except Attachment.DoesNotExist:
return None
def get_old_unclaimed_attachments(weeks_ago):
# type: (int) -> Sequence[Attachment]
# TODO: Change return type to QuerySet[Attachment]
delta_weeks_ago = timezone_now() - datetime.timedelta(weeks=weeks_ago)
old_attachments = Attachment.objects.filter(messages=None, create_time__lt=delta_weeks_ago)
return old_attachments
class Subscription(ModelReprMixin, models.Model):
user_profile = models.ForeignKey(UserProfile, on_delete=CASCADE) # type: UserProfile
recipient = models.ForeignKey(Recipient, on_delete=CASCADE) # type: Recipient
active = models.BooleanField(default=True) # type: bool
in_home_view = models.NullBooleanField(default=True) # type: Optional[bool]
DEFAULT_STREAM_COLOR = u"#c2c2c2"
color = models.CharField(max_length=10, default=DEFAULT_STREAM_COLOR) # type: Text
pin_to_top = models.BooleanField(default=False) # type: bool
desktop_notifications = models.BooleanField(default=True) # type: bool
audible_notifications = models.BooleanField(default=True) # type: bool
push_notifications = models.BooleanField(default=False) # type: bool
# Combination desktop + audible notifications superseded by the
# above.
notifications = models.BooleanField(default=False) # type: bool
class Meta(object):
unique_together = ("user_profile", "recipient")
def __unicode__(self):
# type: () -> Text
return u"<Subscription: %r -> %s>" % (self.user_profile, self.recipient)
@cache_with_key(user_profile_by_id_cache_key, timeout=3600*24*7)
def get_user_profile_by_id(uid):
# type: (int) -> UserProfile
return UserProfile.objects.select_related().get(id=uid)
@cache_with_key(user_profile_by_email_cache_key, timeout=3600*24*7)
def get_user_profile_by_email(email):
# type: (Text) -> UserProfile
return UserProfile.objects.select_related().get(email__iexact=email.strip())
@cache_with_key(user_profile_by_api_key_cache_key, timeout=3600*24*7)
def get_user_profile_by_api_key(api_key):
# type: (Text) -> UserProfile
return UserProfile.objects.select_related().get(api_key=api_key)
@cache_with_key(user_profile_cache_key, timeout=3600*24*7)
def get_user(email, realm):
# type: (Text, Realm) -> UserProfile
return UserProfile.objects.select_related().get(email__iexact=email.strip(), realm=realm)
def get_user_including_cross_realm(email, realm=None):
# type: (Text, Optional[Realm]) -> UserProfile
if email in get_cross_realm_emails():
return get_system_bot(email)
assert realm is not None
return get_user(email, realm)
@cache_with_key(bot_profile_cache_key, timeout=3600*24*7)
def get_system_bot(email):
# type: (Text) -> UserProfile
return UserProfile.objects.select_related().get(email__iexact=email.strip())
@cache_with_key(active_user_dicts_in_realm_cache_key, timeout=3600*24*7)
def get_active_user_dicts_in_realm(realm_id):
# type: (int) -> List[Dict[str, Any]]
return UserProfile.objects.filter(
realm_id=realm_id,
is_active=True
).values(*active_user_dict_fields)
@cache_with_key(active_user_ids_cache_key, timeout=3600*24*7)
def active_user_ids(realm_id):
# type: (int) -> List[int]
query = UserProfile.objects.filter(
realm_id=realm_id,
is_active=True
).values_list('id', flat=True)
return list(query)
@cache_with_key(bot_dicts_in_realm_cache_key, timeout=3600*24*7)
def get_bot_dicts_in_realm(realm):
# type: (Realm) -> List[Dict[str, Any]]
return UserProfile.objects.filter(realm=realm, is_bot=True).values(*bot_dict_fields)
def get_owned_bot_dicts(user_profile, include_all_realm_bots_if_admin=True):
# type: (UserProfile, bool) -> List[Dict[str, Any]]
if user_profile.is_realm_admin and include_all_realm_bots_if_admin:
result = get_bot_dicts_in_realm(user_profile.realm)
else:
result = UserProfile.objects.filter(realm=user_profile.realm, is_bot=True,
bot_owner=user_profile).values(*bot_dict_fields)
# TODO: Remove this import cycle
from zerver.lib.avatar import avatar_url_from_dict
return [{'email': botdict['email'],
'user_id': botdict['id'],
'full_name': botdict['full_name'],
'bot_type': botdict['bot_type'],
'is_active': botdict['is_active'],
'api_key': botdict['api_key'],
'default_sending_stream': botdict['default_sending_stream__name'],
'default_events_register_stream': botdict['default_events_register_stream__name'],
'default_all_public_streams': botdict['default_all_public_streams'],
'owner': botdict['bot_owner__email'],
'avatar_url': avatar_url_from_dict(botdict),
}
for botdict in result]
def get_prereg_user_by_email(email):
# type: (Text) -> PreregistrationUser
# A user can be invited many times, so only return the result of the latest
# invite.
return PreregistrationUser.objects.filter(email__iexact=email.strip()).latest("invited_at")
def get_cross_realm_emails():
# type: () -> Set[Text]
return set(settings.CROSS_REALM_BOT_EMAILS)
# The Huddle class represents a group of individuals who have had a
# Group Private Message conversation together. The actual membership
# of the Huddle is stored in the Subscription table just like with
# Streams, and a hash of that list is stored in the huddle_hash field
# below, to support efficiently mapping from a set of users to the
# corresponding Huddle object.
class Huddle(models.Model):
# TODO: We should consider whether using
# CommaSeparatedIntegerField would be better.
huddle_hash = models.CharField(max_length=40, db_index=True, unique=True) # type: Text
def get_huddle_hash(id_list):
# type: (List[int]) -> Text
id_list = sorted(set(id_list))
hash_key = ",".join(str(x) for x in id_list)
return make_safe_digest(hash_key)
def huddle_hash_cache_key(huddle_hash):
# type: (Text) -> Text
return u"huddle_by_hash:%s" % (huddle_hash,)
def get_huddle(id_list):
# type: (List[int]) -> Huddle
huddle_hash = get_huddle_hash(id_list)
return get_huddle_backend(huddle_hash, id_list)
@cache_with_key(lambda huddle_hash, id_list: huddle_hash_cache_key(huddle_hash), timeout=3600*24*7)
def get_huddle_backend(huddle_hash, id_list):
# type: (Text, List[int]) -> Huddle
with transaction.atomic():
(huddle, created) = Huddle.objects.get_or_create(huddle_hash=huddle_hash)
if created:
recipient = Recipient.objects.create(type_id=huddle.id,
type=Recipient.HUDDLE)
subs_to_create = [Subscription(recipient=recipient,
user_profile_id=user_profile_id)
for user_profile_id in id_list]
Subscription.objects.bulk_create(subs_to_create)
return huddle
def clear_database():
# type: () -> None
pylibmc.Client(['127.0.0.1']).flush_all()
model = None # type: Any
for model in [Message, Stream, UserProfile, Recipient,
Realm, Subscription, Huddle, UserMessage, Client,
DefaultStream]:
model.objects.all().delete()
Session.objects.all().delete()
class UserActivity(models.Model):
user_profile = models.ForeignKey(UserProfile, on_delete=CASCADE) # type: UserProfile
client = models.ForeignKey(Client, on_delete=CASCADE) # type: Client
query = models.CharField(max_length=50, db_index=True) # type: Text
count = models.IntegerField() # type: int
last_visit = models.DateTimeField('last visit') # type: datetime.datetime
class Meta(object):
unique_together = ("user_profile", "client", "query")
class UserActivityInterval(models.Model):
MIN_INTERVAL_LENGTH = datetime.timedelta(minutes=15)
user_profile = models.ForeignKey(UserProfile, on_delete=CASCADE) # type: UserProfile
start = models.DateTimeField('start time', db_index=True) # type: datetime.datetime
end = models.DateTimeField('end time', db_index=True) # type: datetime.datetime
class UserPresence(models.Model):
user_profile = models.ForeignKey(UserProfile, on_delete=CASCADE) # type: UserProfile
client = models.ForeignKey(Client, on_delete=CASCADE) # type: Client
# Valid statuses
ACTIVE = 1
IDLE = 2
timestamp = models.DateTimeField('presence changed') # type: datetime.datetime
status = models.PositiveSmallIntegerField(default=ACTIVE) # type: int
@staticmethod
def status_to_string(status):
# type: (int) -> str
if status == UserPresence.ACTIVE:
return 'active'
elif status == UserPresence.IDLE:
return 'idle'
else:
raise ValueError('Unknown status: %s' % (status,))
@staticmethod
def get_status_dict_by_user(user_profile):
# type: (UserProfile) -> Dict[Text, Dict[Any, Any]]
query = UserPresence.objects.filter(user_profile=user_profile).values(
'client__name',
'status',
'timestamp',
'user_profile__email',
'user_profile__id',
'user_profile__enable_offline_push_notifications',
)
presence_rows = list(query)
mobile_user_ids = set() # type: Set[int]
if PushDeviceToken.objects.filter(user=user_profile).exists():
mobile_user_ids.add(user_profile.id)
return UserPresence.get_status_dicts_for_rows(presence_rows, mobile_user_ids)
@staticmethod
def get_status_dict_by_realm(realm_id):
# type: (int) -> Dict[Text, Dict[Any, Any]]
user_profile_ids = UserProfile.objects.filter(
realm_id=realm_id,
is_active=True,
is_bot=False
).order_by('id').values_list('id', flat=True)
user_profile_ids = list(user_profile_ids)
if not user_profile_ids:
return {}
two_weeks_ago = timezone_now() - datetime.timedelta(weeks=2)
query = UserPresence.objects.filter(
timestamp__gte=two_weeks_ago
).values(
'client__name',
'status',
'timestamp',
'user_profile__email',
'user_profile__id',
'user_profile__enable_offline_push_notifications',
)
query = query_for_ids(
query=query,
user_ids=user_profile_ids,
field='user_profile_id'
)
presence_rows = list(query)
mobile_query = PushDeviceToken.objects.distinct(
'user_id'
).values_list(
'user_id',
flat=True
)
mobile_query = query_for_ids(
query=mobile_query,
user_ids=user_profile_ids,
field='user_id'
)
mobile_user_ids = set(mobile_query)
return UserPresence.get_status_dicts_for_rows(presence_rows, mobile_user_ids)
@staticmethod
def get_status_dicts_for_rows(presence_rows, mobile_user_ids):
# type: (List[Dict[str, Any]], Set[int]) -> Dict[Text, Dict[Any, Any]]
info_row_dct = defaultdict(list) # type: DefaultDict[Text, List[Dict[str, Any]]]
for row in presence_rows:
email = row['user_profile__email']
client_name = row['client__name']
status = UserPresence.status_to_string(row['status'])
dt = row['timestamp']
timestamp = datetime_to_timestamp(dt)
push_enabled = row['user_profile__enable_offline_push_notifications']
has_push_devices = row['user_profile__id'] in mobile_user_ids
pushable = (push_enabled and has_push_devices)
info = dict(
client=client_name,
status=status,
dt=dt,
timestamp=timestamp,
pushable=pushable,
)
info_row_dct[email].append(info)
user_statuses = dict() # type: Dict[str, Dict[str, Any]]
for email, info_rows in info_row_dct.items():
# Note that datetime values have sub-second granularity, which is
# mostly important for avoiding test flakes, but it's also technically
# more precise for real users.
by_time = lambda row: row['dt']
most_recent_info = max(info_rows, key=by_time)
# We don't send datetime values to the client.
for r in info_rows:
del r['dt']
client_dict = {info['client']: info for info in info_rows}
user_statuses[email] = client_dict
# The word "aggegrated" here is possibly misleading.
# It's really just the most recent client's info.
user_statuses[email]['aggregated'] = dict(
client=most_recent_info['client'],
status=most_recent_info['status'],
timestamp=most_recent_info['timestamp'],
)
return user_statuses
@staticmethod
def to_presence_dict(client_name, status, dt, push_enabled=False,
has_push_devices=False):
# type: (Text, int, datetime.datetime, bool, bool) -> Dict[str, Any]
presence_val = UserPresence.status_to_string(status)
timestamp = datetime_to_timestamp(dt)
return dict(
client=client_name,
status=presence_val,
timestamp=timestamp,
pushable=(push_enabled and has_push_devices),
)
def to_dict(self):
# type: () -> Dict[str, Any]
return UserPresence.to_presence_dict(
self.client.name,
self.status,
self.timestamp
)
@staticmethod
def status_from_string(status):
# type: (NonBinaryStr) -> Optional[int]
if status == 'active':
status_val = UserPresence.ACTIVE # type: Optional[int] # See https://github.com/python/mypy/issues/2611
elif status == 'idle':
status_val = UserPresence.IDLE
else:
status_val = None
return status_val
class Meta(object):
unique_together = ("user_profile", "client")
class DefaultStream(models.Model):
realm = models.ForeignKey(Realm, on_delete=CASCADE) # type: Realm
stream = models.ForeignKey(Stream, on_delete=CASCADE) # type: Stream
class Meta(object):
unique_together = ("realm", "stream")
class AbstractScheduledJob(models.Model):
scheduled_timestamp = models.DateTimeField(db_index=True) # type: datetime.datetime
# JSON representation of arguments to consumer
data = models.TextField() # type: Text
class Meta(object):
abstract = True
class ScheduledEmail(AbstractScheduledJob):
# Exactly one of user or address should be set. These are used to
# filter the set of ScheduledEmails.
user = models.ForeignKey(UserProfile, null=True, on_delete=CASCADE) # type: UserProfile
# Just the address part of a full "name <address>" email address
address = models.EmailField(null=True, db_index=True) # type: Text
# Valid types are below
WELCOME = 1
DIGEST = 2
INVITATION_REMINDER = 3
type = models.PositiveSmallIntegerField() # type: int
def __str__(self):
# type: () -> Text
return u"<ScheduledEmail: %s %s %s>" % (self.type, self.user or self.address,
self.scheduled_timestamp)
EMAIL_TYPES = {
'followup_day1': ScheduledEmail.WELCOME,
'followup_day2': ScheduledEmail.WELCOME,
'digest': ScheduledEmail.DIGEST,
'invitation_reminder': ScheduledEmail.INVITATION_REMINDER,
}
class RealmAuditLog(ModelReprMixin, models.Model):
realm = models.ForeignKey(Realm, on_delete=CASCADE) # type: Realm
acting_user = models.ForeignKey(UserProfile, null=True, related_name='+', on_delete=CASCADE) # type: Optional[UserProfile]
modified_user = models.ForeignKey(UserProfile, null=True, related_name='+', on_delete=CASCADE) # type: Optional[UserProfile]
modified_stream = models.ForeignKey(Stream, null=True, on_delete=CASCADE) # type: Optional[Stream]
event_last_message_id = models.IntegerField(null=True) # type: Optional[int]
event_type = models.CharField(max_length=40) # type: Text
event_time = models.DateTimeField(db_index=True) # type: datetime.datetime
# If True, event_time is an overestimate of the true time. Can be used
# by migrations when introducing a new event_type.
backfilled = models.BooleanField(default=False) # type: bool
extra_data = models.TextField(null=True) # type: Optional[Text]
def __unicode__(self):
# type: () -> str
if self.modified_user is not None:
return u"<RealmAuditLog: %s %s %s>" % (self.modified_user, self.event_type, self.event_time)
if self.modified_stream is not None:
return u"<RealmAuditLog: %s %s %s>" % (self.modified_stream, self.event_type, self.event_time)
return "<RealmAuditLog: %s %s %s>" % (self.realm, self.event_type, self.event_time)
class UserHotspot(models.Model):
user = models.ForeignKey(UserProfile, on_delete=CASCADE) # type: UserProfile
hotspot = models.CharField(max_length=30) # type: Text
timestamp = models.DateTimeField(default=timezone_now) # type: datetime.datetime
class Meta(object):
unique_together = ("user", "hotspot")
class CustomProfileField(models.Model):
realm = models.ForeignKey(Realm, on_delete=CASCADE) # type: Realm
name = models.CharField(max_length=100) # type: Text
INTEGER = 1
FLOAT = 2
SHORT_TEXT = 3
LONG_TEXT = 4
FIELD_TYPE_DATA = [
# Type, Name, Validator, Converter
(INTEGER, u'Integer', check_int, int),
(FLOAT, u'Float', check_float, float),
(SHORT_TEXT, u'Short Text', check_short_string, str),
(LONG_TEXT, u'Long Text', check_string, str),
] # type: List[Tuple[int, Text, Callable[[str, Any], str], Callable[[Any], Any]]]
FIELD_VALIDATORS = {item[0]: item[2] for item in FIELD_TYPE_DATA} # type: Dict[int, Callable[[str, Any], str]]
FIELD_CONVERTERS = {item[0]: item[3] for item in FIELD_TYPE_DATA} # type: Dict[int, Callable[[Any], Any]]
FIELD_TYPE_CHOICES = [(item[0], item[1]) for item in FIELD_TYPE_DATA] # type: List[Tuple[int, Text]]
field_type = models.PositiveSmallIntegerField(choices=FIELD_TYPE_CHOICES,
default=SHORT_TEXT) # type: int
class Meta(object):
unique_together = ('realm', 'name')
def as_dict(self):
# type: () -> Dict[str, Union[int, Text]]
return {
'id': self.id,
'name': self.name,
'type': self.field_type,
}
def custom_profile_fields_for_realm(realm_id):
# type: (int) -> List[CustomProfileField]
return CustomProfileField.objects.filter(realm=realm_id).order_by('name')
class CustomProfileFieldValue(models.Model):
user_profile = models.ForeignKey(UserProfile, on_delete=CASCADE) # type: UserProfile
field = models.ForeignKey(CustomProfileField, on_delete=CASCADE) # type: CustomProfileField
value = models.TextField() # type: Text
class Meta(object):
unique_together = ('user_profile', 'field')
# Interfaces for services
# They provide additional functionality like parsing message to obtain query url, data to be sent to url,
# and parsing the response.
GENERIC_INTERFACE = u'GenericService'
SLACK_INTERFACE = u'SlackOutgoingWebhookService'
# A Service corresponds to either an outgoing webhook bot or an embedded bot.
# The type of Service is determined by the bot_type field of the referenced
# UserProfile.
#
# If the Service is an outgoing webhook bot:
# - name is any human-readable identifier for the Service
# - base_url is the address of the third-party site
# - token is used for authentication with the third-party site
#
# If the Service is an embedded bot:
# - name is the canonical name for the type of bot (e.g. 'xkcd' for an instance
# of the xkcd bot); multiple embedded bots can have the same name, but all
# embedded bots with the same name will run the same code
# - base_url and token are currently unused
class Service(models.Model):
name = models.CharField(max_length=UserProfile.MAX_NAME_LENGTH) # type: Text
# Bot user corresponding to the Service. The bot_type of this user
# deterines the type of service. If non-bot services are added later,
# user_profile can also represent the owner of the Service.
user_profile = models.ForeignKey(UserProfile, on_delete=CASCADE) # type: UserProfile
base_url = models.TextField() # type: Text
token = models.TextField() # type: Text
# Interface / API version of the service.
interface = models.PositiveSmallIntegerField(default=1) # type: int
# Valid interfaces are {generic, zulip_bot_service, slack}
GENERIC = 1
SLACK = 2
ALLOWED_INTERFACE_TYPES = [
GENERIC,
SLACK,
]
# N.B. If we used Django's choice=... we would get this for free (kinda)
_interfaces = {
GENERIC: GENERIC_INTERFACE,
SLACK: SLACK_INTERFACE,
} # type: Dict[int, Text]
def interface_name(self):
# type: () -> Text
# Raises KeyError if invalid
return self._interfaces[self.interface]
def get_realm_outgoing_webhook_services_name(realm):
# type: (Realm) -> List[Any]
return list(Service.objects.filter(user_profile__realm=realm, user_profile__is_bot=True,
user_profile__bot_type=UserProfile.OUTGOING_WEBHOOK_BOT).values('name'))
def get_bot_services(user_profile_id):
# type: (str) -> List[Service]
return list(Service.objects.filter(user_profile__id=user_profile_id))
def get_service_profile(user_profile_id, service_name):
# type: (str, str) -> Service
return Service.objects.get(user_profile__id=user_profile_id, name=service_name)
| amanharitsh123/zulip | zerver/models.py | Python | apache-2.0 | 82,673 | [
"VisIt"
] | 1716dacd42c413e566a22c0073c4dccf94ecd8128c541b58392a8deed88c0fa6 |
import numpy as np
from ase.data import atomic_numbers, chemical_symbols
from ase.units import Bohr
from gpaw.setup import Setups
from gpaw.xc import XC
from gpaw.mpi import world
Bondi64jpc_vdWradii = { # units Anstrom
'He' : 1.40,
'Ne' : 1.54,
'Ar' : 1.88,
'Kr' : 2.02,
'Xe' : 2.16
}
def vdWradii(symbols, xc):
"""Find the elements van der Waals radius.
Method proposed in:
Tkatchenko and Scheffler PRL 102 (2009) 073005
The returned radii are given in Angstroms.
"""
Z_rare_gas = [atomic_numbers[symbol] for symbol in Bondi64jpc_vdWradii]
Z_rare_gas.sort()
if isinstance(xc, str):
xc = XC(xc)
def get_density(Z):
"""Return density and radial grid from setup."""
# load setup
setups = Setups([Z], 'paw', {}, 2,
xc, world)
setup = setups[0].data
# create density
n_g = setup.nc_g.copy()
for f, phi_g in zip(setup.f_j, setup.phi_jg):
n_g += f * phi_g**2
return n_g, setup.rgd.r_g
radii = []
radius = {}
for symbol in symbols:
Z = atomic_numbers[symbol]
if symbol not in radius:
# find the rare gas of the elements row
Zrg = None
for Zr in Z_rare_gas:
if Zrg is None and Z <= Zr:
Zrg = Zr
n_g, r_g = get_density(Zrg)
# find density at R
R = Bondi64jpc_vdWradii[chemical_symbols[Zrg]] / Bohr
n = 0
while r_g[n] < R:
n += 1
# linear interpolation
ncut = (n_g[n-1] +
(n_g[n] - n_g[n-1]) * (R - r_g[n-1]) / (r_g[n] - r_g[n-1]))
# print "Z, Zrg, ncut", Z, Zrg, ncut
# find own R at this density
n_g, r_g = get_density(Z)
n = 0
while n_g[n] > ncut:
n += 1
# linear interpolation
R = (r_g[n-1] +
(r_g[n] - r_g[n-1]) * (ncut - n_g[n-1]) / (n_g[n] - n_g[n-1]))
radius[symbol] = R * Bohr
radii.append(radius[symbol])
return radii
| ajylee/gpaw-rtxs | gpaw/analyse/vdwradii.py | Python | gpl-3.0 | 2,195 | [
"ASE",
"GPAW"
] | 523ac887dbe8e3d6994e49473abba287d406923757ca5174d2f035c5f7553f4e |
""" Python test discovery, setup and run of test functions. """
import re
import fnmatch
import functools
import py
import inspect
import sys
import pytest
from _pytest.mark import MarkDecorator, MarkerError
from py._code.code import TerminalRepr
try:
import enum
except ImportError: # pragma: no cover
# Only available in Python 3.4+ or as a backport
enum = None
import _pytest
import _pytest._pluggy as pluggy
cutdir2 = py.path.local(_pytest.__file__).dirpath()
cutdir1 = py.path.local(pluggy.__file__.rstrip("oc"))
NoneType = type(None)
NOTSET = object()
isfunction = inspect.isfunction
isclass = inspect.isclass
callable = py.builtin.callable
# used to work around a python2 exception info leak
exc_clear = getattr(sys, 'exc_clear', lambda: None)
# The type of re.compile objects is not exposed in Python.
REGEX_TYPE = type(re.compile(''))
if hasattr(inspect, 'signature'):
def _format_args(func):
return str(inspect.signature(func))
else:
def _format_args(func):
return inspect.formatargspec(*inspect.getargspec(func))
def _has_positional_arg(func):
return func.__code__.co_argcount
def filter_traceback(entry):
return entry.path != cutdir1 and not entry.path.relto(cutdir2)
def get_real_func(obj):
""" gets the real function object of the (possibly) wrapped object by
functools.wraps or functools.partial.
"""
while hasattr(obj, "__wrapped__"):
obj = obj.__wrapped__
if isinstance(obj, functools.partial):
obj = obj.func
return obj
def getfslineno(obj):
# xxx let decorators etc specify a sane ordering
obj = get_real_func(obj)
if hasattr(obj, 'place_as'):
obj = obj.place_as
fslineno = py.code.getfslineno(obj)
assert isinstance(fslineno[1], int), obj
return fslineno
def getimfunc(func):
try:
return func.__func__
except AttributeError:
try:
return func.im_func
except AttributeError:
return func
def safe_getattr(object, name, default):
""" Like getattr but return default upon any Exception.
Attribute access can potentially fail for 'evil' Python objects.
See issue214
"""
try:
return getattr(object, name, default)
except Exception:
return default
class FixtureFunctionMarker:
def __init__(self, scope, params,
autouse=False, yieldctx=False, ids=None):
self.scope = scope
self.params = params
self.autouse = autouse
self.yieldctx = yieldctx
self.ids = ids
def __call__(self, function):
if isclass(function):
raise ValueError(
"class fixtures not supported (may be in the future)")
function._pytestfixturefunction = self
return function
def fixture(scope="function", params=None, autouse=False, ids=None):
""" (return a) decorator to mark a fixture factory function.
This decorator can be used (with or or without parameters) to define
a fixture function. The name of the fixture function can later be
referenced to cause its invocation ahead of running tests: test
modules or classes can use the pytest.mark.usefixtures(fixturename)
marker. Test functions can directly use fixture names as input
arguments in which case the fixture instance returned from the fixture
function will be injected.
:arg scope: the scope for which this fixture is shared, one of
"function" (default), "class", "module", "session".
:arg params: an optional list of parameters which will cause multiple
invocations of the fixture function and all of the tests
using it.
:arg autouse: if True, the fixture func is activated for all tests that
can see it. If False (the default) then an explicit
reference is needed to activate the fixture.
:arg ids: list of string ids each corresponding to the params
so that they are part of the test id. If no ids are provided
they will be generated automatically from the params.
"""
if callable(scope) and params is None and autouse == False:
# direct decoration
return FixtureFunctionMarker(
"function", params, autouse)(scope)
if params is not None and not isinstance(params, (list, tuple)):
params = list(params)
return FixtureFunctionMarker(scope, params, autouse, ids=ids)
def yield_fixture(scope="function", params=None, autouse=False, ids=None):
""" (return a) decorator to mark a yield-fixture factory function
(EXPERIMENTAL).
This takes the same arguments as :py:func:`pytest.fixture` but
expects a fixture function to use a ``yield`` instead of a ``return``
statement to provide a fixture. See
http://pytest.org/en/latest/yieldfixture.html for more info.
"""
if callable(scope) and params is None and autouse == False:
# direct decoration
return FixtureFunctionMarker(
"function", params, autouse, yieldctx=True)(scope)
else:
return FixtureFunctionMarker(scope, params, autouse,
yieldctx=True, ids=ids)
defaultfuncargprefixmarker = fixture()
def pyobj_property(name):
def get(self):
node = self.getparent(getattr(pytest, name))
if node is not None:
return node.obj
doc = "python %s object this node was collected from (can be None)." % (
name.lower(),)
return property(get, None, None, doc)
def pytest_addoption(parser):
group = parser.getgroup("general")
group.addoption('--fixtures', '--funcargs',
action="store_true", dest="showfixtures", default=False,
help="show available fixtures, sorted by plugin appearance")
parser.addini("usefixtures", type="args", default=[],
help="list of default fixtures to be used with this project")
parser.addini("python_files", type="args",
default=['test_*.py', '*_test.py'],
help="glob-style file patterns for Python test module discovery")
parser.addini("python_classes", type="args", default=["Test",],
help="prefixes or glob names for Python test class discovery")
parser.addini("python_functions", type="args", default=["test",],
help="prefixes or glob names for Python test function and "
"method discovery")
group.addoption("--import-mode", default="prepend",
choices=["prepend", "append"], dest="importmode",
help="prepend/append to sys.path when importing test modules, "
"default is to prepend.")
def pytest_cmdline_main(config):
if config.option.showfixtures:
showfixtures(config)
return 0
def pytest_generate_tests(metafunc):
# those alternative spellings are common - raise a specific error to alert
# the user
alt_spellings = ['parameterize', 'parametrise', 'parameterise']
for attr in alt_spellings:
if hasattr(metafunc.function, attr):
msg = "{0} has '{1}', spelling should be 'parametrize'"
raise MarkerError(msg.format(metafunc.function.__name__, attr))
try:
markers = metafunc.function.parametrize
except AttributeError:
return
for marker in markers:
metafunc.parametrize(*marker.args, **marker.kwargs)
def pytest_configure(config):
config.addinivalue_line("markers",
"parametrize(argnames, argvalues): call a test function multiple "
"times passing in different arguments in turn. argvalues generally "
"needs to be a list of values if argnames specifies only one name "
"or a list of tuples of values if argnames specifies multiple names. "
"Example: @parametrize('arg1', [1,2]) would lead to two calls of the "
"decorated test function, one with arg1=1 and another with arg1=2."
"see http://pytest.org/latest/parametrize.html for more info and "
"examples."
)
config.addinivalue_line("markers",
"usefixtures(fixturename1, fixturename2, ...): mark tests as needing "
"all of the specified fixtures. see http://pytest.org/latest/fixture.html#usefixtures "
)
def pytest_sessionstart(session):
session._fixturemanager = FixtureManager(session)
@pytest.hookimpl(trylast=True)
def pytest_namespace():
raises.Exception = pytest.fail.Exception
return {
'fixture': fixture,
'yield_fixture': yield_fixture,
'raises' : raises,
'collect': {
'Module': Module, 'Class': Class, 'Instance': Instance,
'Function': Function, 'Generator': Generator,
'_fillfuncargs': fillfixtures}
}
@fixture(scope="session")
def pytestconfig(request):
""" the pytest config object with access to command line opts."""
return request.config
@pytest.hookimpl(trylast=True)
def pytest_pyfunc_call(pyfuncitem):
testfunction = pyfuncitem.obj
if pyfuncitem._isyieldedfunction():
testfunction(*pyfuncitem._args)
else:
funcargs = pyfuncitem.funcargs
testargs = {}
for arg in pyfuncitem._fixtureinfo.argnames:
testargs[arg] = funcargs[arg]
testfunction(**testargs)
return True
def pytest_collect_file(path, parent):
ext = path.ext
if ext == ".py":
if not parent.session.isinitpath(path):
for pat in parent.config.getini('python_files'):
if path.fnmatch(pat):
break
else:
return
ihook = parent.session.gethookproxy(path)
return ihook.pytest_pycollect_makemodule(path=path, parent=parent)
def pytest_pycollect_makemodule(path, parent):
return Module(path, parent)
@pytest.hookimpl(hookwrapper=True)
def pytest_pycollect_makeitem(collector, name, obj):
outcome = yield
res = outcome.get_result()
if res is not None:
raise StopIteration
# nothing was collected elsewhere, let's do it here
if isclass(obj):
if collector.istestclass(obj, name):
Class = collector._getcustomclass("Class")
outcome.force_result(Class(name, parent=collector))
elif collector.istestfunction(obj, name):
# mock seems to store unbound methods (issue473), normalize it
obj = getattr(obj, "__func__", obj)
if not isfunction(obj):
collector.warn(code="C2", message=
"cannot collect %r because it is not a function."
% name, )
if getattr(obj, "__test__", True):
if is_generator(obj):
res = Generator(name, parent=collector)
else:
res = list(collector._genfunctions(name, obj))
outcome.force_result(res)
def is_generator(func):
try:
return py.code.getrawcode(func).co_flags & 32 # generator function
except AttributeError: # builtin functions have no bytecode
# assume them to not be generators
return False
class PyobjContext(object):
module = pyobj_property("Module")
cls = pyobj_property("Class")
instance = pyobj_property("Instance")
class PyobjMixin(PyobjContext):
def obj():
def fget(self):
try:
return self._obj
except AttributeError:
self._obj = obj = self._getobj()
return obj
def fset(self, value):
self._obj = value
return property(fget, fset, None, "underlying python object")
obj = obj()
def _getobj(self):
return getattr(self.parent.obj, self.name)
def getmodpath(self, stopatmodule=True, includemodule=False):
""" return python path relative to the containing module. """
chain = self.listchain()
chain.reverse()
parts = []
for node in chain:
if isinstance(node, Instance):
continue
name = node.name
if isinstance(node, Module):
assert name.endswith(".py")
name = name[:-3]
if stopatmodule:
if includemodule:
parts.append(name)
break
parts.append(name)
parts.reverse()
s = ".".join(parts)
return s.replace(".[", "[")
def _getfslineno(self):
return getfslineno(self.obj)
def reportinfo(self):
# XXX caching?
obj = self.obj
if hasattr(obj, 'compat_co_firstlineno'):
# nose compatibility
fspath = sys.modules[obj.__module__].__file__
if fspath.endswith(".pyc"):
fspath = fspath[:-1]
lineno = obj.compat_co_firstlineno
else:
fspath, lineno = getfslineno(obj)
modpath = self.getmodpath()
assert isinstance(lineno, int)
return fspath, lineno, modpath
class PyCollector(PyobjMixin, pytest.Collector):
def funcnamefilter(self, name):
return self._matches_prefix_or_glob_option('python_functions', name)
def isnosetest(self, obj):
""" Look for the __test__ attribute, which is applied by the
@nose.tools.istest decorator
"""
return safe_getattr(obj, '__test__', False)
def classnamefilter(self, name):
return self._matches_prefix_or_glob_option('python_classes', name)
def istestfunction(self, obj, name):
return (
(self.funcnamefilter(name) or self.isnosetest(obj))
and safe_getattr(obj, "__call__", False) and getfixturemarker(obj) is None
)
def istestclass(self, obj, name):
return self.classnamefilter(name) or self.isnosetest(obj)
def _matches_prefix_or_glob_option(self, option_name, name):
"""
checks if the given name matches the prefix or glob-pattern defined
in ini configuration.
"""
for option in self.config.getini(option_name):
if name.startswith(option):
return True
# check that name looks like a glob-string before calling fnmatch
# because this is called for every name in each collected module,
# and fnmatch is somewhat expensive to call
elif ('*' in option or '?' in option or '[' in option) and \
fnmatch.fnmatch(name, option):
return True
return False
def collect(self):
if not getattr(self.obj, "__test__", True):
return []
# NB. we avoid random getattrs and peek in the __dict__ instead
# (XXX originally introduced from a PyPy need, still true?)
dicts = [getattr(self.obj, '__dict__', {})]
for basecls in inspect.getmro(self.obj.__class__):
dicts.append(basecls.__dict__)
seen = {}
l = []
for dic in dicts:
for name, obj in dic.items():
if name in seen:
continue
seen[name] = True
res = self.makeitem(name, obj)
if res is None:
continue
if not isinstance(res, list):
res = [res]
l.extend(res)
l.sort(key=lambda item: item.reportinfo()[:2])
return l
def makeitem(self, name, obj):
#assert self.ihook.fspath == self.fspath, self
return self.ihook.pytest_pycollect_makeitem(
collector=self, name=name, obj=obj)
def _genfunctions(self, name, funcobj):
module = self.getparent(Module).obj
clscol = self.getparent(Class)
cls = clscol and clscol.obj or None
transfer_markers(funcobj, cls, module)
fm = self.session._fixturemanager
fixtureinfo = fm.getfixtureinfo(self, funcobj, cls)
metafunc = Metafunc(funcobj, fixtureinfo, self.config,
cls=cls, module=module)
methods = []
if hasattr(module, "pytest_generate_tests"):
methods.append(module.pytest_generate_tests)
if hasattr(cls, "pytest_generate_tests"):
methods.append(cls().pytest_generate_tests)
if methods:
self.ihook.pytest_generate_tests.call_extra(methods,
dict(metafunc=metafunc))
else:
self.ihook.pytest_generate_tests(metafunc=metafunc)
Function = self._getcustomclass("Function")
if not metafunc._calls:
yield Function(name, parent=self, fixtureinfo=fixtureinfo)
else:
# add funcargs() as fixturedefs to fixtureinfo.arg2fixturedefs
add_funcarg_pseudo_fixture_def(self, metafunc, fm)
for callspec in metafunc._calls:
subname = "%s[%s]" %(name, callspec.id)
yield Function(name=subname, parent=self,
callspec=callspec, callobj=funcobj,
fixtureinfo=fixtureinfo,
keywords={callspec.id:True})
def add_funcarg_pseudo_fixture_def(collector, metafunc, fixturemanager):
# this function will transform all collected calls to a functions
# if they use direct funcargs (i.e. direct parametrization)
# because we want later test execution to be able to rely on
# an existing FixtureDef structure for all arguments.
# XXX we can probably avoid this algorithm if we modify CallSpec2
# to directly care for creating the fixturedefs within its methods.
if not metafunc._calls[0].funcargs:
return # this function call does not have direct parametrization
# collect funcargs of all callspecs into a list of values
arg2params = {}
arg2scope = {}
for callspec in metafunc._calls:
for argname, argvalue in callspec.funcargs.items():
assert argname not in callspec.params
callspec.params[argname] = argvalue
arg2params_list = arg2params.setdefault(argname, [])
callspec.indices[argname] = len(arg2params_list)
arg2params_list.append(argvalue)
if argname not in arg2scope:
scopenum = callspec._arg2scopenum.get(argname,
scopenum_function)
arg2scope[argname] = scopes[scopenum]
callspec.funcargs.clear()
# register artificial FixtureDef's so that later at test execution
# time we can rely on a proper FixtureDef to exist for fixture setup.
arg2fixturedefs = metafunc._arg2fixturedefs
for argname, valuelist in arg2params.items():
# if we have a scope that is higher than function we need
# to make sure we only ever create an according fixturedef on
# a per-scope basis. We thus store and cache the fixturedef on the
# node related to the scope.
scope = arg2scope[argname]
node = None
if scope != "function":
node = get_scope_node(collector, scope)
if node is None:
assert scope == "class" and isinstance(collector, Module)
# use module-level collector for class-scope (for now)
node = collector
if node and argname in node._name2pseudofixturedef:
arg2fixturedefs[argname] = [node._name2pseudofixturedef[argname]]
else:
fixturedef = FixtureDef(fixturemanager, '', argname,
get_direct_param_fixture_func,
arg2scope[argname],
valuelist, False, False)
arg2fixturedefs[argname] = [fixturedef]
if node is not None:
node._name2pseudofixturedef[argname] = fixturedef
def get_direct_param_fixture_func(request):
return request.param
class FuncFixtureInfo:
def __init__(self, argnames, names_closure, name2fixturedefs):
self.argnames = argnames
self.names_closure = names_closure
self.name2fixturedefs = name2fixturedefs
def _marked(func, mark):
""" Returns True if :func: is already marked with :mark:, False otherwise.
This can happen if marker is applied to class and the test file is
invoked more than once.
"""
try:
func_mark = getattr(func, mark.name)
except AttributeError:
return False
return mark.args == func_mark.args and mark.kwargs == func_mark.kwargs
def transfer_markers(funcobj, cls, mod):
# XXX this should rather be code in the mark plugin or the mark
# plugin should merge with the python plugin.
for holder in (cls, mod):
try:
pytestmark = holder.pytestmark
except AttributeError:
continue
if isinstance(pytestmark, list):
for mark in pytestmark:
if not _marked(funcobj, mark):
mark(funcobj)
else:
if not _marked(funcobj, pytestmark):
pytestmark(funcobj)
class Module(pytest.File, PyCollector):
""" Collector for test classes and functions. """
def _getobj(self):
return self._memoizedcall('_obj', self._importtestmodule)
def collect(self):
self.session._fixturemanager.parsefactories(self)
return super(Module, self).collect()
def _importtestmodule(self):
# we assume we are only called once per module
importmode = self.config.getoption("--import-mode")
try:
mod = self.fspath.pyimport(ensuresyspath=importmode)
except SyntaxError:
raise self.CollectError(
py.code.ExceptionInfo().getrepr(style="short"))
except self.fspath.ImportMismatchError:
e = sys.exc_info()[1]
raise self.CollectError(
"import file mismatch:\n"
"imported module %r has this __file__ attribute:\n"
" %s\n"
"which is not the same as the test file we want to collect:\n"
" %s\n"
"HINT: remove __pycache__ / .pyc files and/or use a "
"unique basename for your test file modules"
% e.args
)
#print "imported test module", mod
self.config.pluginmanager.consider_module(mod)
return mod
def setup(self):
setup_module = xunitsetup(self.obj, "setUpModule")
if setup_module is None:
setup_module = xunitsetup(self.obj, "setup_module")
if setup_module is not None:
#XXX: nose compat hack, move to nose plugin
# if it takes a positional arg, its probably a pytest style one
# so we pass the current module object
if _has_positional_arg(setup_module):
setup_module(self.obj)
else:
setup_module()
fin = getattr(self.obj, 'tearDownModule', None)
if fin is None:
fin = getattr(self.obj, 'teardown_module', None)
if fin is not None:
#XXX: nose compat hack, move to nose plugin
# if it takes a positional arg, it's probably a pytest style one
# so we pass the current module object
if _has_positional_arg(fin):
finalizer = lambda: fin(self.obj)
else:
finalizer = fin
self.addfinalizer(finalizer)
class Class(PyCollector):
""" Collector for test methods. """
def collect(self):
if hasinit(self.obj):
self.warn("C1", "cannot collect test class %r because it has a "
"__init__ constructor" % self.obj.__name__)
return []
return [self._getcustomclass("Instance")(name="()", parent=self)]
def setup(self):
setup_class = xunitsetup(self.obj, 'setup_class')
if setup_class is not None:
setup_class = getattr(setup_class, 'im_func', setup_class)
setup_class = getattr(setup_class, '__func__', setup_class)
setup_class(self.obj)
fin_class = getattr(self.obj, 'teardown_class', None)
if fin_class is not None:
fin_class = getattr(fin_class, 'im_func', fin_class)
fin_class = getattr(fin_class, '__func__', fin_class)
self.addfinalizer(lambda: fin_class(self.obj))
class Instance(PyCollector):
def _getobj(self):
obj = self.parent.obj()
return obj
def collect(self):
self.session._fixturemanager.parsefactories(self)
return super(Instance, self).collect()
def newinstance(self):
self.obj = self._getobj()
return self.obj
class FunctionMixin(PyobjMixin):
""" mixin for the code common to Function and Generator.
"""
def setup(self):
""" perform setup for this test function. """
if hasattr(self, '_preservedparent'):
obj = self._preservedparent
elif isinstance(self.parent, Instance):
obj = self.parent.newinstance()
self.obj = self._getobj()
else:
obj = self.parent.obj
if inspect.ismethod(self.obj):
setup_name = 'setup_method'
teardown_name = 'teardown_method'
else:
setup_name = 'setup_function'
teardown_name = 'teardown_function'
setup_func_or_method = xunitsetup(obj, setup_name)
if setup_func_or_method is not None:
setup_func_or_method(self.obj)
fin = getattr(obj, teardown_name, None)
if fin is not None:
self.addfinalizer(lambda: fin(self.obj))
def _prunetraceback(self, excinfo):
if hasattr(self, '_obj') and not self.config.option.fulltrace:
code = py.code.Code(get_real_func(self.obj))
path, firstlineno = code.path, code.firstlineno
traceback = excinfo.traceback
ntraceback = traceback.cut(path=path, firstlineno=firstlineno)
if ntraceback == traceback:
ntraceback = ntraceback.cut(path=path)
if ntraceback == traceback:
#ntraceback = ntraceback.cut(excludepath=cutdir2)
ntraceback = ntraceback.filter(filter_traceback)
if not ntraceback:
ntraceback = traceback
excinfo.traceback = ntraceback.filter()
# issue364: mark all but first and last frames to
# only show a single-line message for each frame
if self.config.option.tbstyle == "auto":
if len(excinfo.traceback) > 2:
for entry in excinfo.traceback[1:-1]:
entry.set_repr_style('short')
def _repr_failure_py(self, excinfo, style="long"):
if excinfo.errisinstance(pytest.fail.Exception):
if not excinfo.value.pytrace:
return str(excinfo.value)
return super(FunctionMixin, self)._repr_failure_py(excinfo,
style=style)
def repr_failure(self, excinfo, outerr=None):
assert outerr is None, "XXX outerr usage is deprecated"
style = self.config.option.tbstyle
if style == "auto":
style = "long"
return self._repr_failure_py(excinfo, style=style)
class Generator(FunctionMixin, PyCollector):
def collect(self):
# test generators are seen as collectors but they also
# invoke setup/teardown on popular request
# (induced by the common "test_*" naming shared with normal tests)
self.session._setupstate.prepare(self)
# see FunctionMixin.setup and test_setupstate_is_preserved_134
self._preservedparent = self.parent.obj
l = []
seen = {}
for i, x in enumerate(self.obj()):
name, call, args = self.getcallargs(x)
if not callable(call):
raise TypeError("%r yielded non callable test %r" %(self.obj, call,))
if name is None:
name = "[%d]" % i
else:
name = "['%s']" % name
if name in seen:
raise ValueError("%r generated tests with non-unique name %r" %(self, name))
seen[name] = True
l.append(self.Function(name, self, args=args, callobj=call))
return l
def getcallargs(self, obj):
if not isinstance(obj, (tuple, list)):
obj = (obj,)
# explict naming
if isinstance(obj[0], py.builtin._basestring):
name = obj[0]
obj = obj[1:]
else:
name = None
call, args = obj[0], obj[1:]
return name, call, args
def hasinit(obj):
init = getattr(obj, '__init__', None)
if init:
if init != object.__init__:
return True
def fillfixtures(function):
""" fill missing funcargs for a test function. """
try:
request = function._request
except AttributeError:
# XXX this special code path is only expected to execute
# with the oejskit plugin. It uses classes with funcargs
# and we thus have to work a bit to allow this.
fm = function.session._fixturemanager
fi = fm.getfixtureinfo(function.parent, function.obj, None)
function._fixtureinfo = fi
request = function._request = FixtureRequest(function)
request._fillfixtures()
# prune out funcargs for jstests
newfuncargs = {}
for name in fi.argnames:
newfuncargs[name] = function.funcargs[name]
function.funcargs = newfuncargs
else:
request._fillfixtures()
_notexists = object()
class CallSpec2(object):
def __init__(self, metafunc):
self.metafunc = metafunc
self.funcargs = {}
self._idlist = []
self.params = {}
self._globalid = _notexists
self._globalid_args = set()
self._globalparam = _notexists
self._arg2scopenum = {} # used for sorting parametrized resources
self.keywords = {}
self.indices = {}
def copy(self, metafunc):
cs = CallSpec2(self.metafunc)
cs.funcargs.update(self.funcargs)
cs.params.update(self.params)
cs.keywords.update(self.keywords)
cs.indices.update(self.indices)
cs._arg2scopenum.update(self._arg2scopenum)
cs._idlist = list(self._idlist)
cs._globalid = self._globalid
cs._globalid_args = self._globalid_args
cs._globalparam = self._globalparam
return cs
def _checkargnotcontained(self, arg):
if arg in self.params or arg in self.funcargs:
raise ValueError("duplicate %r" %(arg,))
def getparam(self, name):
try:
return self.params[name]
except KeyError:
if self._globalparam is _notexists:
raise ValueError(name)
return self._globalparam
@property
def id(self):
return "-".join(map(str, filter(None, self._idlist)))
def setmulti(self, valtypes, argnames, valset, id, keywords, scopenum,
param_index):
for arg,val in zip(argnames, valset):
self._checkargnotcontained(arg)
valtype_for_arg = valtypes[arg]
getattr(self, valtype_for_arg)[arg] = val
self.indices[arg] = param_index
self._arg2scopenum[arg] = scopenum
if val is _notexists:
self._emptyparamspecified = True
self._idlist.append(id)
self.keywords.update(keywords)
def setall(self, funcargs, id, param):
for x in funcargs:
self._checkargnotcontained(x)
self.funcargs.update(funcargs)
if id is not _notexists:
self._idlist.append(id)
if param is not _notexists:
assert self._globalparam is _notexists
self._globalparam = param
for arg in funcargs:
self._arg2scopenum[arg] = scopenum_function
class FuncargnamesCompatAttr:
""" helper class so that Metafunc, Function and FixtureRequest
don't need to each define the "funcargnames" compatibility attribute.
"""
@property
def funcargnames(self):
""" alias attribute for ``fixturenames`` for pre-2.3 compatibility"""
return self.fixturenames
class Metafunc(FuncargnamesCompatAttr):
"""
Metafunc objects are passed to the ``pytest_generate_tests`` hook.
They help to inspect a test function and to generate tests according to
test configuration or values specified in the class or module where a
test function is defined.
:ivar fixturenames: set of fixture names required by the test function
:ivar function: underlying python test function
:ivar cls: class object where the test function is defined in or ``None``.
:ivar module: the module object where the test function is defined in.
:ivar config: access to the :class:`_pytest.config.Config` object for the
test session.
:ivar funcargnames:
.. deprecated:: 2.3
Use ``fixturenames`` instead.
"""
def __init__(self, function, fixtureinfo, config, cls=None, module=None):
self.config = config
self.module = module
self.function = function
self.fixturenames = fixtureinfo.names_closure
self._arg2fixturedefs = fixtureinfo.name2fixturedefs
self.cls = cls
self._calls = []
self._ids = py.builtin.set()
def parametrize(self, argnames, argvalues, indirect=False, ids=None,
scope=None):
""" Add new invocations to the underlying test function using the list
of argvalues for the given argnames. Parametrization is performed
during the collection phase. If you need to setup expensive resources
see about setting indirect to do it rather at test setup time.
:arg argnames: a comma-separated string denoting one or more argument
names, or a list/tuple of argument strings.
:arg argvalues: The list of argvalues determines how often a
test is invoked with different argument values. If only one
argname was specified argvalues is a list of values. If N
argnames were specified, argvalues must be a list of N-tuples,
where each tuple-element specifies a value for its respective
argname.
:arg indirect: The list of argnames or boolean. A list of arguments'
names (subset of argnames). If True the list contains all names from
the argnames. Each argvalue corresponding to an argname in this list will
be passed as request.param to its respective argname fixture
function so that it can perform more expensive setups during the
setup phase of a test rather than at collection time.
:arg ids: list of string ids, or a callable.
If strings, each is corresponding to the argvalues so that they are
part of the test id.
If callable, it should take one argument (a single argvalue) and return
a string or return None. If None, the automatically generated id for that
argument will be used.
If no ids are provided they will be generated automatically from
the argvalues.
:arg scope: if specified it denotes the scope of the parameters.
The scope is used for grouping tests by parameter instances.
It will also override any fixture-function defined scope, allowing
to set a dynamic scope using test context or configuration.
"""
# individual parametrized argument sets can be wrapped in a series
# of markers in which case we unwrap the values and apply the mark
# at Function init
newkeywords = {}
unwrapped_argvalues = []
for i, argval in enumerate(argvalues):
while isinstance(argval, MarkDecorator):
newmark = MarkDecorator(argval.markname,
argval.args[:-1], argval.kwargs)
newmarks = newkeywords.setdefault(i, {})
newmarks[newmark.markname] = newmark
argval = argval.args[-1]
unwrapped_argvalues.append(argval)
argvalues = unwrapped_argvalues
if not isinstance(argnames, (tuple, list)):
argnames = [x.strip() for x in argnames.split(",") if x.strip()]
if len(argnames) == 1:
argvalues = [(val,) for val in argvalues]
if not argvalues:
argvalues = [(_notexists,) * len(argnames)]
if scope is None:
scope = "function"
scopenum = scopes.index(scope)
valtypes = {}
for arg in argnames:
if arg not in self.fixturenames:
raise ValueError("%r uses no fixture %r" %(self.function, arg))
if indirect is True:
valtypes = dict.fromkeys(argnames, "params")
elif indirect is False:
valtypes = dict.fromkeys(argnames, "funcargs")
elif isinstance(indirect, (tuple, list)):
valtypes = dict.fromkeys(argnames, "funcargs")
for arg in indirect:
if arg not in argnames:
raise ValueError("indirect given to %r: fixture %r doesn't exist" %(
self.function, arg))
valtypes[arg] = "params"
idfn = None
if callable(ids):
idfn = ids
ids = None
if ids and len(ids) != len(argvalues):
raise ValueError('%d tests specified with %d ids' %(
len(argvalues), len(ids)))
if not ids:
ids = idmaker(argnames, argvalues, idfn)
newcalls = []
for callspec in self._calls or [CallSpec2(self)]:
for param_index, valset in enumerate(argvalues):
assert len(valset) == len(argnames)
newcallspec = callspec.copy(self)
newcallspec.setmulti(valtypes, argnames, valset, ids[param_index],
newkeywords.get(param_index, {}), scopenum,
param_index)
newcalls.append(newcallspec)
self._calls = newcalls
def addcall(self, funcargs=None, id=_notexists, param=_notexists):
""" (deprecated, use parametrize) Add a new call to the underlying
test function during the collection phase of a test run. Note that
request.addcall() is called during the test collection phase prior and
independently to actual test execution. You should only use addcall()
if you need to specify multiple arguments of a test function.
:arg funcargs: argument keyword dictionary used when invoking
the test function.
:arg id: used for reporting and identification purposes. If you
don't supply an `id` an automatic unique id will be generated.
:arg param: a parameter which will be exposed to a later fixture function
invocation through the ``request.param`` attribute.
"""
assert funcargs is None or isinstance(funcargs, dict)
if funcargs is not None:
for name in funcargs:
if name not in self.fixturenames:
pytest.fail("funcarg %r not used in this function." % name)
else:
funcargs = {}
if id is None:
raise ValueError("id=None not allowed")
if id is _notexists:
id = len(self._calls)
id = str(id)
if id in self._ids:
raise ValueError("duplicate id %r" % id)
self._ids.add(id)
cs = CallSpec2(self)
cs.setall(funcargs, id, param)
self._calls.append(cs)
def _idval(val, argname, idx, idfn):
if idfn:
try:
s = idfn(val)
if s:
return s
except Exception:
pass
if isinstance(val, (float, int, str, bool, NoneType)):
return str(val)
elif isinstance(val, REGEX_TYPE):
return val.pattern
elif enum is not None and isinstance(val, enum.Enum):
return str(val)
elif isclass(val) and hasattr(val, '__name__'):
return val.__name__
return str(argname)+str(idx)
def _idvalset(idx, valset, argnames, idfn):
this_id = [_idval(val, argname, idx, idfn)
for val, argname in zip(valset, argnames)]
return "-".join(this_id)
def idmaker(argnames, argvalues, idfn=None):
ids = [_idvalset(valindex, valset, argnames, idfn)
for valindex, valset in enumerate(argvalues)]
if len(set(ids)) < len(ids):
# user may have provided a bad idfn which means the ids are not unique
ids = [str(i) + testid for i, testid in enumerate(ids)]
return ids
def showfixtures(config):
from _pytest.main import wrap_session
return wrap_session(config, _showfixtures_main)
def _showfixtures_main(config, session):
import _pytest.config
session.perform_collect()
curdir = py.path.local()
tw = _pytest.config.create_terminal_writer(config)
verbose = config.getvalue("verbose")
fm = session._fixturemanager
available = []
for argname, fixturedefs in fm._arg2fixturedefs.items():
assert fixturedefs is not None
if not fixturedefs:
continue
fixturedef = fixturedefs[-1]
loc = getlocation(fixturedef.func, curdir)
available.append((len(fixturedef.baseid),
fixturedef.func.__module__,
curdir.bestrelpath(loc),
fixturedef.argname, fixturedef))
available.sort()
currentmodule = None
for baseid, module, bestrel, argname, fixturedef in available:
if currentmodule != module:
if not module.startswith("_pytest."):
tw.line()
tw.sep("-", "fixtures defined from %s" %(module,))
currentmodule = module
if verbose <= 0 and argname[0] == "_":
continue
if verbose > 0:
funcargspec = "%s -- %s" %(argname, bestrel,)
else:
funcargspec = argname
tw.line(funcargspec, green=True)
loc = getlocation(fixturedef.func, curdir)
doc = fixturedef.func.__doc__ or ""
if doc:
for line in doc.strip().split("\n"):
tw.line(" " + line.strip())
else:
tw.line(" %s: no docstring available" %(loc,),
red=True)
def getlocation(function, curdir):
import inspect
fn = py.path.local(inspect.getfile(function))
lineno = py.builtin._getcode(function).co_firstlineno
if fn.relto(curdir):
fn = fn.relto(curdir)
return "%s:%d" %(fn, lineno+1)
# builtin pytest.raises helper
def raises(expected_exception, *args, **kwargs):
""" assert that a code block/function call raises @expected_exception
and raise a failure exception otherwise.
This helper produces a ``py.code.ExceptionInfo()`` object.
If using Python 2.5 or above, you may use this function as a
context manager::
>>> with raises(ZeroDivisionError):
... 1/0
Or you can specify a callable by passing a to-be-called lambda::
>>> raises(ZeroDivisionError, lambda: 1/0)
<ExceptionInfo ...>
or you can specify an arbitrary callable with arguments::
>>> def f(x): return 1/x
...
>>> raises(ZeroDivisionError, f, 0)
<ExceptionInfo ...>
>>> raises(ZeroDivisionError, f, x=0)
<ExceptionInfo ...>
A third possibility is to use a string to be executed::
>>> raises(ZeroDivisionError, "f(0)")
<ExceptionInfo ...>
Performance note:
-----------------
Similar to caught exception objects in Python, explicitly clearing
local references to returned ``py.code.ExceptionInfo`` objects can
help the Python interpreter speed up its garbage collection.
Clearing those references breaks a reference cycle
(``ExceptionInfo`` --> caught exception --> frame stack raising
the exception --> current frame stack --> local variables -->
``ExceptionInfo``) which makes Python keep all objects referenced
from that cycle (including all local variables in the current
frame) alive until the next cyclic garbage collection run. See the
official Python ``try`` statement documentation for more detailed
information.
"""
__tracebackhide__ = True
if expected_exception is AssertionError:
# we want to catch a AssertionError
# replace our subclass with the builtin one
# see https://github.com/pytest-dev/pytest/issues/176
from _pytest.assertion.util import BuiltinAssertionError \
as expected_exception
msg = ("exceptions must be old-style classes or"
" derived from BaseException, not %s")
if isinstance(expected_exception, tuple):
for exc in expected_exception:
if not isclass(exc):
raise TypeError(msg % type(exc))
elif not isclass(expected_exception):
raise TypeError(msg % type(expected_exception))
if not args:
return RaisesContext(expected_exception)
elif isinstance(args[0], str):
code, = args
assert isinstance(code, str)
frame = sys._getframe(1)
loc = frame.f_locals.copy()
loc.update(kwargs)
#print "raises frame scope: %r" % frame.f_locals
try:
code = py.code.Source(code).compile()
py.builtin.exec_(code, frame.f_globals, loc)
# XXX didn'T mean f_globals == f_locals something special?
# this is destroyed here ...
except expected_exception:
return py.code.ExceptionInfo()
else:
func = args[0]
try:
func(*args[1:], **kwargs)
except expected_exception:
return py.code.ExceptionInfo()
pytest.fail("DID NOT RAISE")
class RaisesContext(object):
def __init__(self, expected_exception):
self.expected_exception = expected_exception
self.excinfo = None
def __enter__(self):
self.excinfo = object.__new__(py.code.ExceptionInfo)
return self.excinfo
def __exit__(self, *tp):
__tracebackhide__ = True
if tp[0] is None:
pytest.fail("DID NOT RAISE")
if sys.version_info < (2, 7):
# py26: on __exit__() exc_value often does not contain the
# exception value.
# http://bugs.python.org/issue7853
if not isinstance(tp[1], BaseException):
exc_type, value, traceback = tp
tp = exc_type, exc_type(value), traceback
self.excinfo.__init__(tp)
return issubclass(self.excinfo.type, self.expected_exception)
#
# the basic pytest Function item
#
class Function(FunctionMixin, pytest.Item, FuncargnamesCompatAttr):
""" a Function Item is responsible for setting up and executing a
Python test function.
"""
_genid = None
def __init__(self, name, parent, args=None, config=None,
callspec=None, callobj=NOTSET, keywords=None, session=None,
fixtureinfo=None):
super(Function, self).__init__(name, parent, config=config,
session=session)
self._args = args
if callobj is not NOTSET:
self.obj = callobj
self.keywords.update(self.obj.__dict__)
if callspec:
self.callspec = callspec
self.keywords.update(callspec.keywords)
if keywords:
self.keywords.update(keywords)
if fixtureinfo is None:
fixtureinfo = self.session._fixturemanager.getfixtureinfo(
self.parent, self.obj, self.cls,
funcargs=not self._isyieldedfunction())
self._fixtureinfo = fixtureinfo
self.fixturenames = fixtureinfo.names_closure
self._initrequest()
def _initrequest(self):
self.funcargs = {}
if self._isyieldedfunction():
assert not hasattr(self, "callspec"), (
"yielded functions (deprecated) cannot have funcargs")
else:
if hasattr(self, "callspec"):
callspec = self.callspec
assert not callspec.funcargs
self._genid = callspec.id
if hasattr(callspec, "param"):
self.param = callspec.param
self._request = FixtureRequest(self)
@property
def function(self):
"underlying python 'function' object"
return getattr(self.obj, 'im_func', self.obj)
def _getobj(self):
name = self.name
i = name.find("[") # parametrization
if i != -1:
name = name[:i]
return getattr(self.parent.obj, name)
@property
def _pyfuncitem(self):
"(compatonly) for code expecting pytest-2.2 style request objects"
return self
def _isyieldedfunction(self):
return getattr(self, "_args", None) is not None
def runtest(self):
""" execute the underlying test function. """
self.ihook.pytest_pyfunc_call(pyfuncitem=self)
def setup(self):
# check if parametrization happend with an empty list
try:
self.callspec._emptyparamspecified
except AttributeError:
pass
else:
fs, lineno = self._getfslineno()
pytest.skip("got empty parameter set, function %s at %s:%d" %(
self.function.__name__, fs, lineno))
super(Function, self).setup()
fillfixtures(self)
scope2props = dict(session=())
scope2props["module"] = ("fspath", "module")
scope2props["class"] = scope2props["module"] + ("cls",)
scope2props["instance"] = scope2props["class"] + ("instance", )
scope2props["function"] = scope2props["instance"] + ("function", "keywords")
def scopeproperty(name=None, doc=None):
def decoratescope(func):
scopename = name or func.__name__
def provide(self):
if func.__name__ in scope2props[self.scope]:
return func(self)
raise AttributeError("%s not available in %s-scoped context" % (
scopename, self.scope))
return property(provide, None, None, func.__doc__)
return decoratescope
class FixtureRequest(FuncargnamesCompatAttr):
""" A request for a fixture from a test or fixture function.
A request object gives access to the requesting test context
and has an optional ``param`` attribute in case
the fixture is parametrized indirectly.
"""
def __init__(self, pyfuncitem):
self._pyfuncitem = pyfuncitem
#: fixture for which this request is being performed
self.fixturename = None
#: Scope string, one of "function", "cls", "module", "session"
self.scope = "function"
self._funcargs = {}
self._fixturedefs = {}
fixtureinfo = pyfuncitem._fixtureinfo
self._arg2fixturedefs = fixtureinfo.name2fixturedefs.copy()
self._arg2index = {}
self.fixturenames = fixtureinfo.names_closure
self._fixturemanager = pyfuncitem.session._fixturemanager
@property
def node(self):
""" underlying collection node (depends on current request scope)"""
return self._getscopeitem(self.scope)
def _getnextfixturedef(self, argname):
fixturedefs = self._arg2fixturedefs.get(argname, None)
if fixturedefs is None:
# we arrive here because of a a dynamic call to
# getfuncargvalue(argname) usage which was naturally
# not known at parsing/collection time
fixturedefs = self._fixturemanager.getfixturedefs(
argname, self._pyfuncitem.parent.nodeid)
self._arg2fixturedefs[argname] = fixturedefs
# fixturedefs list is immutable so we maintain a decreasing index
index = self._arg2index.get(argname, 0) - 1
if fixturedefs is None or (-index > len(fixturedefs)):
raise FixtureLookupError(argname, self)
self._arg2index[argname] = index
return fixturedefs[index]
@property
def config(self):
""" the pytest config object associated with this request. """
return self._pyfuncitem.config
@scopeproperty()
def function(self):
""" test function object if the request has a per-function scope. """
return self._pyfuncitem.obj
@scopeproperty("class")
def cls(self):
""" class (can be None) where the test function was collected. """
clscol = self._pyfuncitem.getparent(pytest.Class)
if clscol:
return clscol.obj
@property
def instance(self):
""" instance (can be None) on which test function was collected. """
# unittest support hack, see _pytest.unittest.TestCaseFunction
try:
return self._pyfuncitem._testcase
except AttributeError:
function = getattr(self, "function", None)
if function is not None:
return py.builtin._getimself(function)
@scopeproperty()
def module(self):
""" python module object where the test function was collected. """
return self._pyfuncitem.getparent(pytest.Module).obj
@scopeproperty()
def fspath(self):
""" the file system path of the test module which collected this test. """
return self._pyfuncitem.fspath
@property
def keywords(self):
""" keywords/markers dictionary for the underlying node. """
return self.node.keywords
@property
def session(self):
""" pytest session object. """
return self._pyfuncitem.session
def addfinalizer(self, finalizer):
""" add finalizer/teardown function to be called after the
last test within the requesting test context finished
execution. """
# XXX usually this method is shadowed by fixturedef specific ones
self._addfinalizer(finalizer, scope=self.scope)
def _addfinalizer(self, finalizer, scope):
colitem = self._getscopeitem(scope)
self._pyfuncitem.session._setupstate.addfinalizer(
finalizer=finalizer, colitem=colitem)
def applymarker(self, marker):
""" Apply a marker to a single test function invocation.
This method is useful if you don't want to have a keyword/marker
on all function invocations.
:arg marker: a :py:class:`_pytest.mark.MarkDecorator` object
created by a call to ``pytest.mark.NAME(...)``.
"""
try:
self.node.keywords[marker.markname] = marker
except AttributeError:
raise ValueError(marker)
def raiseerror(self, msg):
""" raise a FixtureLookupError with the given message. """
raise self._fixturemanager.FixtureLookupError(None, self, msg)
def _fillfixtures(self):
item = self._pyfuncitem
fixturenames = getattr(item, "fixturenames", self.fixturenames)
for argname in fixturenames:
if argname not in item.funcargs:
item.funcargs[argname] = self.getfuncargvalue(argname)
def cached_setup(self, setup, teardown=None, scope="module", extrakey=None):
""" (deprecated) Return a testing resource managed by ``setup`` &
``teardown`` calls. ``scope`` and ``extrakey`` determine when the
``teardown`` function will be called so that subsequent calls to
``setup`` would recreate the resource. With pytest-2.3 you often
do not need ``cached_setup()`` as you can directly declare a scope
on a fixture function and register a finalizer through
``request.addfinalizer()``.
:arg teardown: function receiving a previously setup resource.
:arg setup: a no-argument function creating a resource.
:arg scope: a string value out of ``function``, ``class``, ``module``
or ``session`` indicating the caching lifecycle of the resource.
:arg extrakey: added to internal caching key of (funcargname, scope).
"""
if not hasattr(self.config, '_setupcache'):
self.config._setupcache = {} # XXX weakref?
cachekey = (self.fixturename, self._getscopeitem(scope), extrakey)
cache = self.config._setupcache
try:
val = cache[cachekey]
except KeyError:
self._check_scope(self.fixturename, self.scope, scope)
val = setup()
cache[cachekey] = val
if teardown is not None:
def finalizer():
del cache[cachekey]
teardown(val)
self._addfinalizer(finalizer, scope=scope)
return val
def getfuncargvalue(self, argname):
""" Dynamically retrieve a named fixture function argument.
As of pytest-2.3, it is easier and usually better to access other
fixture values by stating it as an input argument in the fixture
function. If you only can decide about using another fixture at test
setup time, you may use this function to retrieve it inside a fixture
function body.
"""
return self._get_active_fixturedef(argname).cached_result[0]
def _get_active_fixturedef(self, argname):
try:
return self._fixturedefs[argname]
except KeyError:
try:
fixturedef = self._getnextfixturedef(argname)
except FixtureLookupError:
if argname == "request":
class PseudoFixtureDef:
cached_result = (self, [0], None)
scope = "function"
return PseudoFixtureDef
raise
# remove indent to prevent the python3 exception
# from leaking into the call
result = self._getfuncargvalue(fixturedef)
self._funcargs[argname] = result
self._fixturedefs[argname] = fixturedef
return fixturedef
def _get_fixturestack(self):
current = self
l = []
while 1:
fixturedef = getattr(current, "_fixturedef", None)
if fixturedef is None:
l.reverse()
return l
l.append(fixturedef)
current = current._parent_request
def _getfuncargvalue(self, fixturedef):
# prepare a subrequest object before calling fixture function
# (latter managed by fixturedef)
argname = fixturedef.argname
funcitem = self._pyfuncitem
scope = fixturedef.scope
try:
param = funcitem.callspec.getparam(argname)
except (AttributeError, ValueError):
param = NOTSET
param_index = 0
else:
# indices might not be set if old-style metafunc.addcall() was used
param_index = funcitem.callspec.indices.get(argname, 0)
# if a parametrize invocation set a scope it will override
# the static scope defined with the fixture function
paramscopenum = funcitem.callspec._arg2scopenum.get(argname)
if paramscopenum is not None:
scope = scopes[paramscopenum]
subrequest = SubRequest(self, scope, param, param_index, fixturedef)
# check if a higher-level scoped fixture accesses a lower level one
subrequest._check_scope(argname, self.scope, scope)
# clear sys.exc_info before invoking the fixture (python bug?)
# if its not explicitly cleared it will leak into the call
exc_clear()
try:
# call the fixture function
val = fixturedef.execute(request=subrequest)
finally:
# if fixture function failed it might have registered finalizers
self.session._setupstate.addfinalizer(fixturedef.finish,
subrequest.node)
return val
def _check_scope(self, argname, invoking_scope, requested_scope):
if argname == "request":
return
if scopemismatch(invoking_scope, requested_scope):
# try to report something helpful
lines = self._factorytraceback()
pytest.fail("ScopeMismatch: You tried to access the %r scoped "
"fixture %r with a %r scoped request object, "
"involved factories\n%s" %(
(requested_scope, argname, invoking_scope, "\n".join(lines))),
pytrace=False)
def _factorytraceback(self):
lines = []
for fixturedef in self._get_fixturestack():
factory = fixturedef.func
fs, lineno = getfslineno(factory)
p = self._pyfuncitem.session.fspath.bestrelpath(fs)
args = _format_args(factory)
lines.append("%s:%d: def %s%s" %(
p, lineno, factory.__name__, args))
return lines
def _getscopeitem(self, scope):
if scope == "function":
# this might also be a non-function Item despite its attribute name
return self._pyfuncitem
node = get_scope_node(self._pyfuncitem, scope)
if node is None and scope == "class":
# fallback to function item itself
node = self._pyfuncitem
assert node
return node
def __repr__(self):
return "<FixtureRequest for %r>" %(self.node)
class SubRequest(FixtureRequest):
""" a sub request for handling getting a fixture from a
test function/fixture. """
def __init__(self, request, scope, param, param_index, fixturedef):
self._parent_request = request
self.fixturename = fixturedef.argname
if param is not NOTSET:
self.param = param
self.param_index = param_index
self.scope = scope
self._fixturedef = fixturedef
self.addfinalizer = fixturedef.addfinalizer
self._pyfuncitem = request._pyfuncitem
self._funcargs = request._funcargs
self._fixturedefs = request._fixturedefs
self._arg2fixturedefs = request._arg2fixturedefs
self._arg2index = request._arg2index
self.fixturenames = request.fixturenames
self._fixturemanager = request._fixturemanager
def __repr__(self):
return "<SubRequest %r for %r>" % (self.fixturename, self._pyfuncitem)
class ScopeMismatchError(Exception):
""" A fixture function tries to use a different fixture function which
which has a lower scope (e.g. a Session one calls a function one)
"""
scopes = "session module class function".split()
scopenum_function = scopes.index("function")
def scopemismatch(currentscope, newscope):
return scopes.index(newscope) > scopes.index(currentscope)
class FixtureLookupError(LookupError):
""" could not return a requested Fixture (missing or invalid). """
def __init__(self, argname, request, msg=None):
self.argname = argname
self.request = request
self.fixturestack = request._get_fixturestack()
self.msg = msg
def formatrepr(self):
tblines = []
addline = tblines.append
stack = [self.request._pyfuncitem.obj]
stack.extend(map(lambda x: x.func, self.fixturestack))
msg = self.msg
if msg is not None:
stack = stack[:-1] # the last fixture raise an error, let's present
# it at the requesting side
for function in stack:
fspath, lineno = getfslineno(function)
try:
lines, _ = inspect.getsourcelines(get_real_func(function))
except IOError:
error_msg = "file %s, line %s: source code not available"
addline(error_msg % (fspath, lineno+1))
else:
addline("file %s, line %s" % (fspath, lineno+1))
for i, line in enumerate(lines):
line = line.rstrip()
addline(" " + line)
if line.lstrip().startswith('def'):
break
if msg is None:
fm = self.request._fixturemanager
available = []
for name, fixturedef in fm._arg2fixturedefs.items():
parentid = self.request._pyfuncitem.parent.nodeid
faclist = list(fm._matchfactories(fixturedef, parentid))
if faclist:
available.append(name)
msg = "fixture %r not found" % (self.argname,)
msg += "\n available fixtures: %s" %(", ".join(available),)
msg += "\n use 'py.test --fixtures [testpath]' for help on them."
return FixtureLookupErrorRepr(fspath, lineno, tblines, msg, self.argname)
class FixtureLookupErrorRepr(TerminalRepr):
def __init__(self, filename, firstlineno, tblines, errorstring, argname):
self.tblines = tblines
self.errorstring = errorstring
self.filename = filename
self.firstlineno = firstlineno
self.argname = argname
def toterminal(self, tw):
#tw.line("FixtureLookupError: %s" %(self.argname), red=True)
for tbline in self.tblines:
tw.line(tbline.rstrip())
for line in self.errorstring.split("\n"):
tw.line(" " + line.strip(), red=True)
tw.line()
tw.line("%s:%d" % (self.filename, self.firstlineno+1))
class FixtureManager:
"""
pytest fixtures definitions and information is stored and managed
from this class.
During collection fm.parsefactories() is called multiple times to parse
fixture function definitions into FixtureDef objects and internal
data structures.
During collection of test functions, metafunc-mechanics instantiate
a FuncFixtureInfo object which is cached per node/func-name.
This FuncFixtureInfo object is later retrieved by Function nodes
which themselves offer a fixturenames attribute.
The FuncFixtureInfo object holds information about fixtures and FixtureDefs
relevant for a particular function. An initial list of fixtures is
assembled like this:
- ini-defined usefixtures
- autouse-marked fixtures along the collection chain up from the function
- usefixtures markers at module/class/function level
- test function funcargs
Subsequently the funcfixtureinfo.fixturenames attribute is computed
as the closure of the fixtures needed to setup the initial fixtures,
i. e. fixtures needed by fixture functions themselves are appended
to the fixturenames list.
Upon the test-setup phases all fixturenames are instantiated, retrieved
by a lookup of their FuncFixtureInfo.
"""
_argprefix = "pytest_funcarg__"
FixtureLookupError = FixtureLookupError
FixtureLookupErrorRepr = FixtureLookupErrorRepr
def __init__(self, session):
self.session = session
self.config = session.config
self._arg2fixturedefs = {}
self._holderobjseen = set()
self._arg2finish = {}
self._nodeid_and_autousenames = [("", self.config.getini("usefixtures"))]
session.config.pluginmanager.register(self, "funcmanage")
def getfixtureinfo(self, node, func, cls, funcargs=True):
if funcargs and not hasattr(node, "nofuncargs"):
if cls is not None:
startindex = 1
else:
startindex = None
argnames = getfuncargnames(func, startindex)
else:
argnames = ()
usefixtures = getattr(func, "usefixtures", None)
initialnames = argnames
if usefixtures is not None:
initialnames = usefixtures.args + initialnames
fm = node.session._fixturemanager
names_closure, arg2fixturedefs = fm.getfixtureclosure(initialnames,
node)
return FuncFixtureInfo(argnames, names_closure, arg2fixturedefs)
def pytest_plugin_registered(self, plugin):
nodeid = None
try:
p = py.path.local(plugin.__file__)
except AttributeError:
pass
else:
# construct the base nodeid which is later used to check
# what fixtures are visible for particular tests (as denoted
# by their test id)
if p.basename.startswith("conftest.py"):
nodeid = p.dirpath().relto(self.config.rootdir)
if p.sep != "/":
nodeid = nodeid.replace(p.sep, "/")
self.parsefactories(plugin, nodeid)
def _getautousenames(self, nodeid):
""" return a tuple of fixture names to be used. """
autousenames = []
for baseid, basenames in self._nodeid_and_autousenames:
if nodeid.startswith(baseid):
if baseid:
i = len(baseid)
nextchar = nodeid[i:i+1]
if nextchar and nextchar not in ":/":
continue
autousenames.extend(basenames)
# make sure autousenames are sorted by scope, scopenum 0 is session
autousenames.sort(
key=lambda x: self._arg2fixturedefs[x][-1].scopenum)
return autousenames
def getfixtureclosure(self, fixturenames, parentnode):
# collect the closure of all fixtures , starting with the given
# fixturenames as the initial set. As we have to visit all
# factory definitions anyway, we also return a arg2fixturedefs
# mapping so that the caller can reuse it and does not have
# to re-discover fixturedefs again for each fixturename
# (discovering matching fixtures for a given name/node is expensive)
parentid = parentnode.nodeid
fixturenames_closure = self._getautousenames(parentid)
def merge(otherlist):
for arg in otherlist:
if arg not in fixturenames_closure:
fixturenames_closure.append(arg)
merge(fixturenames)
arg2fixturedefs = {}
lastlen = -1
while lastlen != len(fixturenames_closure):
lastlen = len(fixturenames_closure)
for argname in fixturenames_closure:
if argname in arg2fixturedefs:
continue
fixturedefs = self.getfixturedefs(argname, parentid)
if fixturedefs:
arg2fixturedefs[argname] = fixturedefs
merge(fixturedefs[-1].argnames)
return fixturenames_closure, arg2fixturedefs
def pytest_generate_tests(self, metafunc):
for argname in metafunc.fixturenames:
faclist = metafunc._arg2fixturedefs.get(argname)
if faclist:
fixturedef = faclist[-1]
if fixturedef.params is not None:
func_params = getattr(getattr(metafunc.function, 'parametrize', None), 'args', [[None]])
# skip directly parametrized arguments
argnames = func_params[0]
if not isinstance(argnames, (tuple, list)):
argnames = [x.strip() for x in argnames.split(",") if x.strip()]
if argname not in func_params and argname not in argnames:
metafunc.parametrize(argname, fixturedef.params,
indirect=True, scope=fixturedef.scope,
ids=fixturedef.ids)
else:
continue # will raise FixtureLookupError at setup time
def pytest_collection_modifyitems(self, items):
# separate parametrized setups
items[:] = reorder_items(items)
def parsefactories(self, node_or_obj, nodeid=NOTSET, unittest=False):
if nodeid is not NOTSET:
holderobj = node_or_obj
else:
holderobj = node_or_obj.obj
nodeid = node_or_obj.nodeid
if holderobj in self._holderobjseen:
return
self._holderobjseen.add(holderobj)
autousenames = []
for name in dir(holderobj):
obj = getattr(holderobj, name, None)
if not callable(obj):
continue
# fixture functions have a pytest_funcarg__ prefix (pre-2.3 style)
# or are "@pytest.fixture" marked
marker = getfixturemarker(obj)
if marker is None:
if not name.startswith(self._argprefix):
continue
marker = defaultfuncargprefixmarker
name = name[len(self._argprefix):]
elif not isinstance(marker, FixtureFunctionMarker):
# magic globals with __getattr__ might have got us a wrong
# fixture attribute
continue
else:
assert not name.startswith(self._argprefix)
fixturedef = FixtureDef(self, nodeid, name, obj,
marker.scope, marker.params,
yieldctx=marker.yieldctx,
unittest=unittest, ids=marker.ids)
faclist = self._arg2fixturedefs.setdefault(name, [])
if fixturedef.has_location:
faclist.append(fixturedef)
else:
# fixturedefs with no location are at the front
# so this inserts the current fixturedef after the
# existing fixturedefs from external plugins but
# before the fixturedefs provided in conftests.
i = len([f for f in faclist if not f.has_location])
faclist.insert(i, fixturedef)
if marker.autouse:
autousenames.append(name)
if autousenames:
self._nodeid_and_autousenames.append((nodeid or '', autousenames))
def getfixturedefs(self, argname, nodeid):
try:
fixturedefs = self._arg2fixturedefs[argname]
except KeyError:
return None
else:
return tuple(self._matchfactories(fixturedefs, nodeid))
def _matchfactories(self, fixturedefs, nodeid):
for fixturedef in fixturedefs:
if nodeid.startswith(fixturedef.baseid):
yield fixturedef
def fail_fixturefunc(fixturefunc, msg):
fs, lineno = getfslineno(fixturefunc)
location = "%s:%s" % (fs, lineno+1)
source = py.code.Source(fixturefunc)
pytest.fail(msg + ":\n\n" + str(source.indent()) + "\n" + location,
pytrace=False)
def call_fixture_func(fixturefunc, request, kwargs, yieldctx):
if yieldctx:
if not is_generator(fixturefunc):
fail_fixturefunc(fixturefunc,
msg="yield_fixture requires yield statement in function")
iter = fixturefunc(**kwargs)
next = getattr(iter, "__next__", None)
if next is None:
next = getattr(iter, "next")
res = next()
def teardown():
try:
next()
except StopIteration:
pass
else:
fail_fixturefunc(fixturefunc,
"yield_fixture function has more than one 'yield'")
request.addfinalizer(teardown)
else:
if is_generator(fixturefunc):
fail_fixturefunc(fixturefunc,
msg="pytest.fixture functions cannot use ``yield``. "
"Instead write and return an inner function/generator "
"and let the consumer call and iterate over it.")
res = fixturefunc(**kwargs)
return res
class FixtureDef:
""" A container for a factory definition. """
def __init__(self, fixturemanager, baseid, argname, func, scope, params,
yieldctx, unittest=False, ids=None):
self._fixturemanager = fixturemanager
self.baseid = baseid or ''
self.has_location = baseid is not None
self.func = func
self.argname = argname
self.scope = scope
self.scopenum = scopes.index(scope or "function")
self.params = params
startindex = unittest and 1 or None
self.argnames = getfuncargnames(func, startindex=startindex)
self.yieldctx = yieldctx
self.unittest = unittest
self.ids = ids
self._finalizer = []
def addfinalizer(self, finalizer):
self._finalizer.append(finalizer)
def finish(self):
try:
while self._finalizer:
func = self._finalizer.pop()
func()
finally:
# even if finalization fails, we invalidate
# the cached fixture value
if hasattr(self, "cached_result"):
del self.cached_result
def execute(self, request):
# get required arguments and register our own finish()
# with their finalization
kwargs = {}
for argname in self.argnames:
fixturedef = request._get_active_fixturedef(argname)
result, arg_cache_key, exc = fixturedef.cached_result
request._check_scope(argname, request.scope, fixturedef.scope)
kwargs[argname] = result
if argname != "request":
fixturedef.addfinalizer(self.finish)
my_cache_key = request.param_index
cached_result = getattr(self, "cached_result", None)
if cached_result is not None:
result, cache_key, err = cached_result
if my_cache_key == cache_key:
if err is not None:
py.builtin._reraise(*err)
else:
return result
# we have a previous but differently parametrized fixture instance
# so we need to tear it down before creating a new one
self.finish()
assert not hasattr(self, "cached_result")
fixturefunc = self.func
if self.unittest:
if request.instance is not None:
# bind the unbound method to the TestCase instance
fixturefunc = self.func.__get__(request.instance)
else:
# the fixture function needs to be bound to the actual
# request.instance so that code working with "self" behaves
# as expected.
if request.instance is not None:
fixturefunc = getimfunc(self.func)
if fixturefunc != self.func:
fixturefunc = fixturefunc.__get__(request.instance)
try:
result = call_fixture_func(fixturefunc, request, kwargs,
self.yieldctx)
except Exception:
self.cached_result = (None, my_cache_key, sys.exc_info())
raise
self.cached_result = (result, my_cache_key, None)
return result
def __repr__(self):
return ("<FixtureDef name=%r scope=%r baseid=%r >" %
(self.argname, self.scope, self.baseid))
def num_mock_patch_args(function):
""" return number of arguments used up by mock arguments (if any) """
patchings = getattr(function, "patchings", None)
if not patchings:
return 0
mock = sys.modules.get("mock", sys.modules.get("unittest.mock", None))
if mock is not None:
return len([p for p in patchings
if not p.attribute_name and p.new is mock.DEFAULT])
return len(patchings)
def getfuncargnames(function, startindex=None):
# XXX merge with main.py's varnames
#assert not inspect.isclass(function)
realfunction = function
while hasattr(realfunction, "__wrapped__"):
realfunction = realfunction.__wrapped__
if startindex is None:
startindex = inspect.ismethod(function) and 1 or 0
if realfunction != function:
startindex += num_mock_patch_args(function)
function = realfunction
if isinstance(function, functools.partial):
argnames = inspect.getargs(py.code.getrawcode(function.func))[0]
partial = function
argnames = argnames[len(partial.args):]
if partial.keywords:
for kw in partial.keywords:
argnames.remove(kw)
else:
argnames = inspect.getargs(py.code.getrawcode(function))[0]
defaults = getattr(function, 'func_defaults',
getattr(function, '__defaults__', None)) or ()
numdefaults = len(defaults)
if numdefaults:
return tuple(argnames[startindex:-numdefaults])
return tuple(argnames[startindex:])
# algorithm for sorting on a per-parametrized resource setup basis
# it is called for scopenum==0 (session) first and performs sorting
# down to the lower scopes such as to minimize number of "high scope"
# setups and teardowns
def reorder_items(items):
argkeys_cache = {}
for scopenum in range(0, scopenum_function):
argkeys_cache[scopenum] = d = {}
for item in items:
keys = set(get_parametrized_fixture_keys(item, scopenum))
if keys:
d[item] = keys
return reorder_items_atscope(items, set(), argkeys_cache, 0)
def reorder_items_atscope(items, ignore, argkeys_cache, scopenum):
if scopenum >= scopenum_function or len(items) < 3:
return items
items_done = []
while 1:
items_before, items_same, items_other, newignore = \
slice_items(items, ignore, argkeys_cache[scopenum])
items_before = reorder_items_atscope(
items_before, ignore, argkeys_cache,scopenum+1)
if items_same is None:
# nothing to reorder in this scope
assert items_other is None
return items_done + items_before
items_done.extend(items_before)
items = items_same + items_other
ignore = newignore
def slice_items(items, ignore, scoped_argkeys_cache):
# we pick the first item which uses a fixture instance in the
# requested scope and which we haven't seen yet. We slice the input
# items list into a list of items_nomatch, items_same and
# items_other
if scoped_argkeys_cache: # do we need to do work at all?
it = iter(items)
# first find a slicing key
for i, item in enumerate(it):
argkeys = scoped_argkeys_cache.get(item)
if argkeys is not None:
argkeys = argkeys.difference(ignore)
if argkeys: # found a slicing key
slicing_argkey = argkeys.pop()
items_before = items[:i]
items_same = [item]
items_other = []
# now slice the remainder of the list
for item in it:
argkeys = scoped_argkeys_cache.get(item)
if argkeys and slicing_argkey in argkeys and \
slicing_argkey not in ignore:
items_same.append(item)
else:
items_other.append(item)
newignore = ignore.copy()
newignore.add(slicing_argkey)
return (items_before, items_same, items_other, newignore)
return items, None, None, None
def get_parametrized_fixture_keys(item, scopenum):
""" return list of keys for all parametrized arguments which match
the specified scope. """
assert scopenum < scopenum_function # function
try:
cs = item.callspec
except AttributeError:
pass
else:
# cs.indictes.items() is random order of argnames but
# then again different functions (items) can change order of
# arguments so it doesn't matter much probably
for argname, param_index in cs.indices.items():
if cs._arg2scopenum[argname] != scopenum:
continue
if scopenum == 0: # session
key = (argname, param_index)
elif scopenum == 1: # module
key = (argname, param_index, item.fspath)
elif scopenum == 2: # class
key = (argname, param_index, item.fspath, item.cls)
yield key
def xunitsetup(obj, name):
meth = getattr(obj, name, None)
if getfixturemarker(meth) is None:
return meth
def getfixturemarker(obj):
""" return fixturemarker or None if it doesn't exist or raised
exceptions."""
try:
return getattr(obj, "_pytestfixturefunction", None)
except KeyboardInterrupt:
raise
except Exception:
# some objects raise errors like request (from flask import request)
# we don't expect them to be fixture functions
return None
scopename2class = {
'class': Class,
'module': Module,
'function': pytest.Item,
}
def get_scope_node(node, scope):
cls = scopename2class.get(scope)
if cls is None:
if scope == "session":
return node.session
raise ValueError("unknown scope")
return node.getparent(cls)
| chillbear/pytest | _pytest/python.py | Python | mit | 85,253 | [
"VisIt"
] | 926e39ba36126bd1528d2e95d88f7d11a16126c95c91016c117e960cf5587a1e |
#!/usr/bin/env python
########################################################################
# File : dirac-admin-get-job-pilots
# Author : Stuart Paterson
########################################################################
"""
Retrieve info about pilots that have matched a given Job
Example:
$ dirac-admin-get-job-pilots 1848
{'https://marlb.in2p3.fr:9000/bqYViq6KrVgGfr6wwgT45Q': {'AccountingSent': 'False',
'BenchMark': 8.1799999999999997,
'Broker': 'marwms.in2p3.fr',
'DestinationSite': 'lpsc-ce.in2p3.fr',
'GridSite': 'LCG.LPSC.fr',
'GridType': 'gLite',
'Jobs': [1848L],
'LastUpdateTime': datetime.datetime(2011, 2, 21, 12, 39, 10),
'OutputReady': 'True',
'OwnerDN': '/O=GRID/C=FR/O=CNRS/OU=LPC/CN=Sebastien Guizard',
'OwnerGroup': '/biomed',
'ParentID': 0L,
'PilotID': 2247L,
'PilotJobReference': 'https://marlb.in2p3.fr:9000/biq6KT45Q',
'PilotStamp': '',
'Status': 'Done',
'SubmissionTime': datetime.datetime(2011, 2, 21, 12, 27, 52),
'TaskQueueID': 399L}}
"""
# pylint: disable=wrong-import-position
from DIRAC.Core.Utilities.DIRACScript import DIRACScript as Script
@Script()
def main():
# Registering arguments will automatically add their description to the help menu
Script.registerArgument(["JobID: DIRAC ID of the Job"])
# parseCommandLine show help when mandatory arguments are not specified or incorrect argument
_, args = Script.parseCommandLine(ignoreErrors=True)
from DIRAC import exit as DIRACExit
from DIRAC.Interfaces.API.DiracAdmin import DiracAdmin
diracAdmin = DiracAdmin()
exitCode = 0
errorList = []
for job in args:
try:
job = int(job)
except Exception as x:
errorList.append((job, "Expected integer for jobID"))
exitCode = 2
continue
result = diracAdmin.getJobPilots(job)
if not result["OK"]:
errorList.append((job, result["Message"]))
exitCode = 2
for error in errorList:
print("ERROR %s: %s" % error)
DIRACExit(exitCode)
if __name__ == "__main__":
main()
| ic-hep/DIRAC | src/DIRAC/Interfaces/scripts/dirac_admin_get_job_pilots.py | Python | gpl-3.0 | 3,067 | [
"DIRAC"
] | c14687db947ece3a203fa78b958b2eaba477da1fd3cb23242be8e720a793eb0c |
# Copyright (C) 2010-2011 Ben Breslauer
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from PyQt4.QtGui import QWidget, QAction, QTableWidgetItem
from PyQt4.QtCore import Qt
import Util, math, numpy, scipy.optimize
from numpy import pi, e
from Wave import Wave
from Module import Module
from gui.SubWindows import SubWindow
from ui.Ui_CurveFitting import Ui_CurveFitting
class CurveFitting(Module):
"""Module to fit waves to a function."""
# To add new fitting functions, do the following:
# 1) Edit GUI
# 2) Modify _parameterTableDefaults
# 3) Create a fit[Function] method to do the fitting
# 4) Call fit[Function] from doFit
# Default values for parameter table
# Each dict entry is a list of lists. The inner list contains a row of values.
_parameterTableDefaults = {
'Polynomial': [
['p0', 1],
['p1', 1],
['p2', 1],
],
'Sinusoid': [
['p0', 1],
['p1', 1],
['p2', 1],
['p3', 1],
],
'Power Law': [
['y0', 0],
['a', 1],
['k', 1],
],
'Exponential': [
['y0', 0],
['A', 1],
['b', 1],
],
'Logarithm': [
['y0', 0],
['a', 1],
['base', 10],
],
'Gaussian': [
['amp', 1],
['mean', 0],
['width', 1],
],
'Lorentzian': [
['amp', 1],
['mean', 0],
['hwhm', 1],
],
}
def __init__(self):
Module.__init__(self)
def buildWidget(self):
self._widget = QWidget()
self._ui = Ui_CurveFitting()
self._ui.setupUi(self._widget)
self.setModels()
self.setupSpinBoxes()
self.setupParameterTableData()
# Connect button signals
self._ui.doFitButton.clicked.connect(self.doFit)
self._ui.closeButton.clicked.connect(self.closeWindow)
self._ui.function.currentIndexChanged[str].connect(self.changeFunction)
self._ui.function.currentIndexChanged[str].connect(self.connectSlotsOnFunctionChange)
self._ui.initialValuesWave.activated[str].connect(self.changeInitialValuesWave)
self._ui.useInitialValuesWave.toggled[bool].connect(self.changeInitialValuesWaveFromCheckbox)
self._ui.useWaveForInterpolation.toggled[bool].connect(self.catchInterpolationWaveGroupBoxCheck)
self._ui.useDomainForInterpolation.toggled[bool].connect(self.catchInterpolationDomainGroupBoxCheck)
self.connectSlotsOnFunctionChange('')
def setModels(self):
# Set up model and view
self._allWavesListModel = self._app.model('appWaves')
self._ui.xWave.setModel(self._allWavesListModel)
self._ui.yWave.setModel(self._allWavesListModel)
self._ui.weightWave.setModel(self._allWavesListModel)
self._ui.initialValuesWave.setModel(self._allWavesListModel)
self._ui.interpolationWave.setModel(self._allWavesListModel)
def setupSpinBoxes(self):
self._ui.dataRangeStart.addWaveView(self._ui.xWave)
self._ui.dataRangeEnd.addWaveView(self._ui.xWave)
self._ui.dataRangeStart.addWaveView(self._ui.yWave)
self._ui.dataRangeEnd.addWaveView(self._ui.yWave)
self._ui.interpolationWaveRangeStart.addWaveView(self._ui.interpolationWave)
self._ui.interpolationWaveRangeEnd.addWaveView(self._ui.interpolationWave)
def setupParameterTableData(self):
self._parameterTableData = {}
self._currentFunction = None
self.changeFunction('')
def closeWindow(self):
self._widget.parent().close()
def connectSlotsOnFunctionChange(self, newFunctionName):
"""
Disconnect slots dependent on which function is chosen.
If polynomial function is chosen, connect slot to update parameter table
on degree change.
"""
# Disconnect slots
try:
self._ui.polynomialDegree.valueChanged[int].disconnect(self.changePolynomialDegree)
except:
pass
# Connect polynomial degree change
if Util.getWidgetValue(self._ui.function) == 'Polynomial':
self._ui.polynomialDegree.valueChanged[int].connect(self.changePolynomialDegree)
def catchInterpolationWaveGroupBoxCheck(self, checked):
# Set the opposite check for the domain group box
Util.setWidgetValue(self._ui.useDomainForInterpolation, not checked)
def catchInterpolationDomainGroupBoxCheck(self, checked):
# Set the opposite check for the wave group box
Util.setWidgetValue(self._ui.useWaveForInterpolation, not checked)
def saveParameterTable(self):
"""
Save the parameters for the current function to the object.
"""
if self._currentFunction:
self._parameterTableData[self._currentFunction] = self.getCurrentParameterTable()
def changeFunction(self, newFunctionName):
# Save parameters for old function
self.saveParameterTable()
#if self._currentFunction:
# self._parameterTableData[self._currentFunction] = self.getCurrentParameterTable()
# Now update _currentFunction to the function that is currently selected.
# If this method was called because the user selected a different function,
# then this will be modified. If it was called because the fit curve button
# was pressed, then its value will not be changed.
self._currentFunction = Util.getWidgetValue(self._ui.function)
# Enter in parameters for new function
# If there are previously user-entered values, then use them
# else, if a wave is selected, then use that
# else, use the initial values
# Either way, if there are blank entries, then use initial values for them
# Clear the table, but leave all the column headers
for rowIndex in range(self._ui.parameterTable.rowCount()):
self._ui.parameterTable.removeRow(0)
parameters = []
# If there is saved data, use it
if self._currentFunction in self._parameterTableData:
parameters = self._parameterTableData[self._currentFunction]
# If there aren't enough rows for all the parameters, extend with
# initial values. This will also occur if no parameters had been saved.
savedParametersLength = len(parameters)
defaultParameters = self._parameterTableDefaults[self._currentFunction]
if savedParametersLength < len(defaultParameters):
parameters.extend(defaultParameters[len(parameters):])
# Use wave if requested by the user
if Util.getWidgetValue(self._ui.useInitialValuesWave):
# Convert from QString to str
waveName = str(Util.getWidgetValue(self._ui.initialValuesWave))
if self._app.waves().wave(waveName) is None:
# waveName is not a name of a wave
pass
else:
waveData = self._app.waves().wave(waveName).data()
for i in range(savedParametersLength, len(defaultParameters)):
parameters[i][1] = waveData[i]
self.writeParametersToTable(parameters)
def writeParametersToTable(self, parameters, startRow=0):
# Determine how many rows the table should have
numRows = startRow + len(parameters)
self._ui.parameterTable.setRowCount(numRows)
# Now actually write to the table
for rowIndex, row in enumerate(parameters, startRow):
for colIndex, value in enumerate(row):
item = QTableWidgetItem(str(value))
if colIndex == 0:
# parameter name, do not want it editable
item.setFlags(Qt.ItemIsEnabled)
self._ui.parameterTable.setItem(rowIndex, colIndex, item)
def changePolynomialDegree(self, newDegree):
# If decreasing the degree, just remove the last entries
# If increasing the degree,
# If a wave is selected, then use that for the new values
# else, use the initial values
desiredNumRows = newDegree + 1
currentNumRows = self._ui.parameterTable.rowCount()
if desiredNumRows == currentNumRows:
# Nothing to do
return
# Set defaults
rows = []
for d in range(desiredNumRows):
rows.append(['p' + str(d), 1])
self._parameterTableDefaults['Polynomial'] = rows
# Update table
self._ui.parameterTable.setRowCount(desiredNumRows)
if desiredNumRows < currentNumRows:
# We are done, because no rows need to be edited
return
# Degree is being increased
parameters = self._parameterTableDefaults['Polynomial'][currentNumRows:desiredNumRows]
if Util.getWidgetValue(self._ui.useInitialValuesWave):
# Convert from QString to str
waveName = str(Util.getWidgetValue(self._ui.initialValuesWave))
if self._app.waves().wave(waveName) is None:
# waveName is not a name of a wave
pass
else:
waveData = self._app.waves().wave(waveName).data(currentNumRows, desiredNumRows)
for index, value in enumerate(waveData):
parameters[index][1] = value
self.writeParametersToTable(parameters, currentNumRows)
def changeInitialValuesWaveFromCheckbox(self, checked):
"""
If the useInitialValuesWave checkbox is checked, then
call changeInitialValuesWave.
"""
if checked:
self.changeInitialValuesWave(str(Util.getWidgetValue(self._ui.initialValuesWave)))
def changeInitialValuesWave(self, waveName):
# Use the wave for as many parameters as possible
# if the wave is too long, then just use the first n values
# if the wave is too short, then leave the current value in place
# if there is no current value, then use the initial values
if Util.getWidgetValue(self._ui.useInitialValuesWave):
# Get the current values, with any undefined values using the initial values
parameters = self.currentParametersBackedByDefaults()
# Now get the wave values
parameters = self.updateParametersListWithWave(parameters, waveName)
# Set the table to the parameters
self.writeParametersToTable(parameters)
def updateParametersListWithWave(self, parameters, waveName):
"""
Given a list of parameter table rows, and the name of a wave,
this will update the parameter values with the entries in the wave.
"""
waveName = str(waveName)
if self._app.waves().wave(waveName) is None:
# waveName is not a name of a wave
return parameters
waveData = self._app.waves().wave(waveName).data(0, len(parameters))
for i in range(len(waveData)):
parameters[i][1] = waveData[i]
return parameters
def currentParametersBackedByDefaults(self):
# Start with initial values
parameters = self._parameterTableDefaults[self._currentFunction]
# Then get the current values and update parameters with it
currentParameters = self.getCurrentParameterTable()
for rowIndex, row in enumerate(currentParameters):
parameters[rowIndex] = row
return parameters
def getCurrentParameterTable(self):
"""
Save data to a 2-d array mimicking the table.
"""
# FIXME only works with text right now. Need to add in support for check boxes
# Maybe do this by creating a QTableWidgetItem option in Util.getWidgetValue
# and using QTableWidget.cellWidget to get the indiv. cells
table = []
row = []
for rowIndex in range(self._ui.parameterTable.rowCount()):
for colIndex in range(self._ui.parameterTable.columnCount()):
try:
row.append(str(self._ui.parameterTable.item(rowIndex, colIndex).text()))
except AttributeError:
row.append('')
table.append(row)
row = []
return table
def parameterColumnValues(self, functionName, columnNum):
"""
Return a list of the values of a specific column in the parameter table.
"""
if functionName not in self._parameterTableData:
return None
tableData = self._parameterTableData[functionName]
values = [str(row[columnNum]) for row in tableData]
return values
def parameterNames(self, functionName):
"""
Return a list of the names of the parameters for the given function.
"""
return self.parameterColumnValues(functionName, 0)
def parameterInitialValues(self, functionName):
"""
Return a list of the initial values of the parameters (NOT the default values) for the given function.
"""
values = self.parameterColumnValues(functionName, 1)
initialValues = [float(v) if Util.isNumber(v) else 1 for v in values]
return initialValues
def doFit(self):
# save user-defined parameters
self.saveParameterTable()
# Get all waves that are selected before doing anything else
# If any waves are created, as they are in the output tab section,
# then the wave combo boxes are refreshed, and the previous selection
# is lost
xWaveName = Util.getWidgetValue(self._ui.xWave)
yWaveName = Util.getWidgetValue(self._ui.yWave)
weightWaveName = Util.getWidgetValue(self._ui.weightWave)
interpolationDomainWaveName = Util.getWidgetValue(self._ui.interpolationWave)
# Get data tab
dataRangeStart = Util.getWidgetValue(self._ui.dataRangeStart)
dataRangeEnd = Util.getWidgetValue(self._ui.dataRangeEnd)
xWave = self._app.waves().wave(xWaveName)
yWave = self._app.waves().wave(yWaveName)
xLength = xWave.length()
yLength = yWave.length()
# Verify data range limits are valid
if dataRangeStart > xLength or dataRangeStart > yLength:
dataRangeStart = 0
if dataRangeEnd > xLength or dataRangeEnd > yLength:
dataRangeEnd = min(xLength, yLength) - 1
xData = xWave.data(dataRangeStart, dataRangeEnd + 1)
yData = yWave.data(dataRangeStart, dataRangeEnd + 1)
# Get weights, if required by user
if Util.getWidgetValue(self._ui.useWeights):
weightWave = self._app.waves().wave(weightWaveName)
weightLength = weightWave.length()
weightData = weightWave.data(dataRangeStart, dataRangeEnd + 1)
# If weighting inversely, invert the weights
if Util.getWidgetValue(self._ui.weightIndirectly):
weightData = [1./w if w != 0 else 0 for w in weightData]
if len(weightData) != len(yData):
print "The number of weight points is not the same as the number of y points."
return 1
else:
weightData = None
# Get output tab
outputOptions = {}
outputWaves = {}
outputOptions['createTable'] = Util.getWidgetValue(self._ui.createTable)
outputOptions['outputParameters'] = Util.getWidgetValue(self._ui.outputParameters)
if outputOptions['outputParameters']:
outputOptions['saveLabels'] = Util.getWidgetValue(self._ui.saveLabels)
# Create saveLabels wave
if outputOptions['saveLabels']:
saveLabelsDestination = self._app.waves().findGoodWaveName(Util.getWidgetValue(self._ui.saveLabelsDestination))
outputWaves['saveLabelsWave'] = Wave(saveLabelsDestination, 'String')
self._app.waves().addWave(outputWaves['saveLabelsWave'])
# Create parameter wave
parameterDestination = self._app.waves().findGoodWaveName(Util.getWidgetValue(self._ui.parameterDestination))
outputWaves['parameterWave'] = Wave(parameterDestination, 'Decimal')
self._app.waves().addWave(outputWaves['parameterWave'])
outputOptions['outputInterpolation'] = Util.getWidgetValue(self._ui.outputInterpolation)
if outputOptions['outputInterpolation']:
# Create interpolation wave
interpolationDestination = self._app.waves().findGoodWaveName(Util.getWidgetValue(self._ui.interpolationDestination))
outputWaves['interpolationDestinationWave'] = Wave(interpolationDestination, 'Decimal')
self._app.waves().addWave(outputWaves['interpolationDestinationWave'])
if Util.getWidgetValue(self._ui.useWaveForInterpolation):
# Using an already-existing wave for the interpolation points.
interpolationDomainWave = self._app.waves().wave(interpolationDomainWaveName)
interpolationWaveRangeStart = Util.getWidgetValue(self._ui.interpolationWaveRangeStart)
interpolationWaveRangeEnd = Util.getWidgetValue(self._ui.interpolationWaveRangeEnd)
outputWaves['interpolationDomainWave'] = interpolationDomainWave
# Start the wave with as many blanks as necessary in order to get the destination wave
# to line up correctly with the domain wave, for easy plotting.
outputWaves['interpolationDestinationWave'].extend([''] * interpolationWaveRangeStart)
# Verify data range limits are valid
interpolationDomainLength = interpolationDomainWave.length()
if interpolationWaveRangeStart > interpolationDomainLength:
interpolationWaveRangeStart = 0
if interpolationWaveRangeEnd > interpolationDomainLength:
interpolationWaveRangeEnd = interpolationDomainLength - 1
outputOptions['interpolationDomainWaveData'] = interpolationDomainWave.data(interpolationWaveRangeStart, interpolationWaveRangeEnd + 1)
else:
# Creating a new wave based on a domain and number of points.
customWaveName = Util.getWidgetValue(self._ui.interpolationCustomWaveName)
customLowerLimit = float(Util.getWidgetValue(self._ui.interpolationCustomLowerLimit))
customUpperLimit = float(Util.getWidgetValue(self._ui.interpolationCustomUpperLimit))
customNumPoints = Util.getWidgetValue(self._ui.interpolationCustomNumPoints)
outputOptions['interpolationDomainWaveData'] = numpy.linspace(customLowerLimit, customUpperLimit, customNumPoints, endpoint=True)
interpolationDomainWaveName = self._app.waves().findGoodWaveName(customWaveName)
outputWaves['interpolationDomainWave'] = Wave(interpolationDomainWaveName, 'Decimal', outputOptions['interpolationDomainWaveData'])
self._app.waves().addWave(outputWaves['interpolationDomainWave'])
outputOptions['saveResiduals'] = Util.getWidgetValue(self._ui.saveResiduals)
if outputOptions['saveResiduals']:
residualsDestination = self._app.waves().findGoodWaveName(Util.getWidgetValue(self._ui.residualsDestination))
outputWaves['residualsWave'] = Wave(residualsDestination, 'Decimal')
self._app.waves().addWave(outputWaves['residualsWave'])
# If the fit is not done to all the data in the wave, then we need to add blanks to the beginning
# of the residual wave because the residuals will only be calculated for the part of the data that
# was actually fit.
outputWaves['residualsWave'].extend([''] * dataRangeStart)
# Save the x wave, in case it is different from the interpolationDomainWave
outputWaves['xWave'] = xWave
# Determine the function and call the appropriate method
functionName = Util.getWidgetValue(self._ui.function)
if functionName == 'Polynomial':
self.fitPolynomial(xData, yData, weightData, outputWaves, outputOptions)
elif functionName == 'Sinusoid':
self.fitSinusoid(xData, yData, weightData, outputWaves, outputOptions)
elif functionName == 'Power Law':
self.fitPowerLaw(xData, yData, weightData, outputWaves, outputOptions)
elif functionName == 'Exponential':
self.fitExponential(xData, yData, weightData, outputWaves, outputOptions)
elif functionName == 'Logarithm':
self.fitLogarithm(xData, yData, weightData, outputWaves, outputOptions)
elif functionName == 'Gaussian':
self.fitGaussian(xData, yData, weightData, outputWaves, outputOptions)
elif functionName == 'Lorentzian':
self.fitLorentzian(xData, yData, weightData, outputWaves, outputOptions)
def fitPolynomial(self, xData, yData, weightData=None, outputWaves={}, outputOptions={}):
# Get the degree of the polynomial the user wants to use
degree = Util.getWidgetValue(self._ui.polynomialDegree)
def polynomialFunction(p, x):
# If x is a list, then val needs to be a list
# If x is a number, then val needs to be a number
if isinstance(x, list):
val = numpy.array([p[0]] * len(x))
else:
val = p[0]
# Add x, x^2, x^3, etc entries
for d in range(1, degree + 1):
val += numpy.multiply(p[d], numpy.power(x, d))
return val
parameterNames = self.parameterNames('Polynomial')
initialValues = self.parameterInitialValues('Polynomial')
if initialValues is None:
initialValues = [1] * degree
self.fitFunction(polynomialFunction, parameterNames, initialValues, xData, yData, weightData, outputWaves, outputOptions, 'Polynomial Fit')
def fitSinusoid(self, xData, yData, weightData=None, outputWaves={}, outputOptions={}):
sinusoidFunction = lambda p, x: p[0] + p[1] * numpy.cos(x / p[2] * 2. * numpy.pi + p[3])
parameterNames = self.parameterNames('Sinusoid')
initialValues = self.parameterInitialValues('Sinusoid')
if initialValues is None:
initialValues = [1, 1, 1, 1]
self.fitFunction(sinusoidFunction, parameterNames, initialValues, xData, yData, weightData, outputWaves, outputOptions, 'Sinusoid Fit')
def fitPowerLaw(self, xData, yData, weightData=None, outputWaves={}, outputOptions={}):
powerLawFunction = lambda p, x: numpy.add(p[0], numpy.multiply(p[1], numpy.power(x, p[2])))
parameterNames = self.parameterNames('Power Law')
initialValues = self.parameterInitialValues('Power Law')
if initialValues is None:
initialValues = [0, 1, 1]
self.fitFunction(powerLawFunction, parameterNames, initialValues, xData, yData, weightData, outputWaves, outputOptions, 'Power Law Fit')
def fitExponential(self, xData, yData, weightData=None, outputWaves={}, outputOptions={}):
exponentialFunction = lambda p, x: numpy.add(p[0], numpy.multiply(p[1], numpy.power(numpy.e, numpy.multiply(p[2], x))))
parameterNames = self.parameterNames('Exponential')
initialValues = self.parameterInitialValues('Exponential')
if initialValues is None:
initialValues = [0, 1, 1]
self.fitFunction(exponentialFunction, parameterNames, initialValues, xData, yData, weightData, outputWaves, outputOptions, 'Exponential Fit')
def fitLogarithm(self, xData, yData, weightData=None, outputWaves={}, outputOptions={}):
# There is no numpy log function where you can specify a custom base, so we'll define one
customBaseLog = lambda base, x: numpy.divide(numpy.log(x), numpy.log(base))
logarithmFunction = lambda p, x: numpy.add(p[0], numpy.multiply(p[1], customBaseLog(p[2], x)))
parameterNames = self.parameterNames('Logarithm')
initialValues = self.parameterInitialValues('Logarithm')
if initialValues is None:
initialValues = [0, 1, 10]
self.fitFunction(logarithmFunction, parameterNames, initialValues, xData, yData, weightData, outputWaves, outputOptions, 'Logarithm Fit')
def fitGaussian(self, xData, yData, weightData=None, outputWaves={}, outputOptions={}):
gaussianFunction = lambda p, x: numpy.multiply(p[0], numpy.power(numpy.e, numpy.divide(-1 * numpy.power((numpy.subtract(x, p[1])), 2), 2 * numpy.power(p[2], 2))))
parameterNames = self.parameterNames('Gaussian')
initialValues = self.parameterInitialValues('Gaussian')
if initialValues is None:
initialValues = [1, 0, 1]
self.fitFunction(gaussianFunction, parameterNames, initialValues, xData, yData, weightData, outputWaves, outputOptions, 'Gaussian Fit')
def fitLorentzian(self, xData, yData, weightData=None, outputWaves={}, outputOptions={}):
lorentzianFunction = lambda p, x: numpy.divide(numpy.multiply(p[0], p[2]), numpy.add(numpy.power(numpy.subtract(x, p[1]), 2), numpy.power(p[2], 2)))
parameterNames = self.parameterNames('Lorentzian')
initialValues = self.parameterInitialValues('Lorentzian')
if initialValues is None:
initialValues = [1, 0, 1]
self.fitFunction(lorentzianFunction, parameterNames, initialValues, xData, yData, weightData, outputWaves, outputOptions, 'Lorentzian Fit')
def fitFunction(self, function, parameterNames, initialValues, xData, yData, weightData=None, outputWaves={}, outputOptions={}, tableName='Fit'):
# Can also include initial guesses for the parameters, as well as sigma's for weighting of the ydata
# Need to fail with error message if the leastsq call does not succeed
# Do the fit
result = self.fitFunctionLeastSquares(function, initialValues, xData, yData, weightData)
parameters = result[0]
tableWaves = []
# Deal with the parameter-related waves
if outputOptions['outputParameters']:
# save parameter labels
if outputOptions['saveLabels']:
tableWaves.append(outputWaves['saveLabelsWave'])
outputWaves['saveLabelsWave'].extend(parameterNames)
tableWaves.append(outputWaves['parameterWave'])
# save parameters to a wave
outputWaves['parameterWave'].extend(parameters)
# Do the interpolation
if outputOptions['outputInterpolation']:
domain = outputOptions['interpolationDomainWaveData']
determinedFunction = lambda x: function(parameters, x)
for val in domain:
outputWaves['interpolationDestinationWave'].push(determinedFunction(val))
tableWaves.append(outputWaves['interpolationDomainWave'])
tableWaves.append(outputWaves['interpolationDestinationWave'])
# Do the residuals
if outputOptions['saveResiduals']:
residualsFunc = lambda p, x, y: numpy.subtract(y, function(p, x))
residuals = residualsFunc(parameters, xData, yData)
outputWaves['residualsWave'].extend(residuals)
tableWaves.append(outputWaves['xWave'])
tableWaves.append(outputWaves['residualsWave'])
# Create table
if outputOptions['createTable']:
self.createTable(tableWaves, tableName)
def fitFunctionLeastSquares(self, func, guess, xData, yData, weightData=None):
"""
Do a least squares fit for a generic function.
func must have the signature (p, x) where p is a list of parameters
and x is a float.
guess is the user's guess of the parameters, and must be a list of
length len(p).
xData and yData are the data to fit.
"""
if weightData is None:
#errorFunc = lambda p, x, y: func(p, x) - y
errorFunc = lambda p, x, y: numpy.subtract(func(p, x), y)
return scipy.optimize.leastsq(errorFunc, guess[:], args=(xData, yData), full_output=True)
else:
errorFunc = lambda p, x, y, w: numpy.multiply(w, numpy.subtract(func(p, x), y))
return scipy.optimize.leastsq(errorFunc, guess[:], args=(xData, yData, weightData), full_output=True)
#return scipy.optimize.leastsq(errorFunc, guess[:], args=(xData, yData), full_output=True)
def createTable(self, waves=[], title='Fit'):
if len(waves) == 0:
return
self._app.createTable(waves, title)
def load(self):
self.window = SubWindow(self._app.ui.workspace)
self.menuEntry = QAction(self._app)
self.menuEntry.setObjectName("actionCurveFiting")
self.menuEntry.setText("Curve Fitting")
self.menuEntry.triggered.connect(self.window.show)
self.menu = vars(self._app.ui)["menuData"]
self.menu.addAction(self.menuEntry)
self.buildWidget()
self.window.setWidget(self._widget)
self._widget.setParent(self.window)
self.window.hide()
def unload(self):
self._widget.deleteLater()
self.window.deleteLater()
self.menu.removeAction(self.menuEntry)
def reload(self):
self.setModels()
self.setupSpinBoxes()
self.setupParameterTableData()
Util.setWidgetValue(self._ui.function, 'Polynomial')
Util.setWidgetValue(self._ui.useInitialValuesWave, False)
| bbreslauer/PySciPlot | src/modules/CurveFitting.py | Python | gpl-3.0 | 30,735 | [
"Gaussian"
] | d3a988ec52c637eaecebc788c1d07a646c1c3fa90d108ae256123d872566a887 |
from __future__ import absolute_import
from __future__ import print_function
import logging
import sys
import os
import os.path
import multiprocessing as mp
import warnings
# os.environ['R_LIBS_USER'] = '/project/projectdirs/metatlas/r_pkgs/'
# curr_ld_lib_path = ''
from metatlas.datastructures import metatlas_objects as metob
from metatlas.io import metatlas_get_data_helper_fun as ma_data
from metatlas.io import write_utils
from metatlas.tools import spectralprocessing as sp
from metatlas.plots import chromplotplus as cpp
from metatlas.io.metatlas_get_data_helper_fun import extract
# from metatlas import gui
from textwrap import fill, TextWrapper
# import qgrid
import pandas as pd
import dill
import numpy as np
import json
import matplotlib.pyplot as plt
from rdkit import Chem
from rdkit.Chem import Draw, rdDepictor
from rdkit.Chem.Draw import rdMolDraw2D
from itertools import cycle
from ipywidgets import interact, interactive
import ipywidgets as widgets
from IPython.display import display
import getpass
from ast import literal_eval
from datetime import datetime
from matplotlib.widgets import Slider, RadioButtons
from matplotlib.widgets import AxesWidget
import matplotlib.patches
import gspread
# from oauth2client.client import SignedJwtAssertionCredentials
from oauth2client.service_account import ServiceAccountCredentials
import six
from six.moves import range
from six.moves import zip
from functools import reduce
from io import StringIO
logger = logging.getLogger(__name__)
ADDUCT_INFO = {'[2M+H]': {'charge': '1',
'color': '#fabebe',
'common': True,
'comp_num': '2',
'mass': '1.0073'},
'[2M-H]': {'charge': '-1',
'color': '#008080',
'common': True,
'comp_num': '2',
'mass': '-1.0073'},
'[M+2H]': {'charge': '2',
'color': '#ffe119',
'common': True,
'comp_num': '1',
'mass': '2.0146'},
'[M+2Na]': {'charge': '2',
'color': '#fffac8',
'common': False,
'comp_num': '1',
'mass': '45.9784'},
'[M+Cl]': {'charge': '-1',
'color': '#d2f53c',
'common': True,
'comp_num': '1',
'mass': '34.9694'},
'[M+H-H2O]': {'charge': '1',
'color': '#911eb4',
'common': True,
'comp_num': '1',
'mass': '-17.0033'},
'[M+H]': {'charge': '1',
'color': '#3cb44b',
'common': True,
'comp_num': '1',
'mass': '1.0073'},
'[M+K]': {'charge': '1',
'color': '#aa6e28',
'common': False,
'comp_num': '1',
'mass': '38.963158'},
'[M+NH4]': {'charge': '1',
'color': '#0082c8',
'common': True,
'comp_num': '1',
'mass': '18.0338'},
'[M+Na]': {'charge': '1',
'color': '#f58231',
'common': True,
'comp_num': '1',
'mass': '22.9892'},
'[M+acetate]': {'charge': '-1',
'color': '#808000',
'common': False,
'comp_num': '1',
'mass': '59.0139'},
'[M+formate]': {'charge': '-1',
'color': '#5500FF',
'common': False,
'comp_num': '1',
'mass': '44.998201'},
'[M-2H]': {'charge': '-2',
'color': '#f032e6',
'common': True,
'comp_num': '1',
'mass': '-2.014552904'},
'[M-H+2Na]': {'charge': '1',
'color': '#000080',
'common': False,
'comp_num': '1',
'mass': '44.9711'},
'[M-H+Cl]': {'charge': '-2',
'color': '#ffd8b1',
'common': False,
'comp_num': '1',
'mass': '33.9621'},
'[M-H+Na]': {'charge': '0',
'color': '#e6beff',
'common': False,
'comp_num': '1',
'mass': '21.98194425'},
'[M-H]': {'charge': '-1',
'color': '#46f0f0',
'common': True,
'comp_num': '1',
'mass': '-1.0073'},
'[M-e]': {'charge': '1',
'color': '#aaffc3',
'common': False,
'comp_num': '1',
'mass': '-0.0005'},
'[M]': {'charge': '0',
'color': '#e6194b',
'common': True,
'comp_num': '1',
'mass': '0'}}
def get_google_sheet(notebook_name = "Sheet name",
token='/project/projectdirs/metatlas/projects/google_sheets_auth/ipython to sheets demo-9140f8697062.json',
sheet_name = 'Sheet1',
literal_cols=None):
"""
Returns a pandas data frame from the google sheet.
Assumes header row is first row.
To use the token hard coded in the token field,
the sheet must be shared with:
metatlas-ipython-nersc@ipython-to-sheets-demo.iam.gserviceaccount.com
Unique sheet names are a requirement of this approach.
"""
# scope = ['https://spreadsheets.google.com/feeds']
# scope = ['https://www.googleapis.com/auth/spreadsheets']
scope = ['https://spreadsheets.google.com/feeds', 'https://www.googleapis.com/auth/drive']
#this is deprecated as of january, but we have pinned the version of oauth2.
#see https://github.com/google/oauth2client/issues/401
# json_key = json.load(open(token))
# credentials = SignedJwtAssertionCredentials(json_key['client_email'], json_key['private_key'].encode(), scope)
credentials = ServiceAccountCredentials.from_json_keyfile_name(token, scope)
#here is the new way incase the version pin is removed
#credentials = ServiceAccountCredentials.from_json_keyfile_name(token, scope)
gc = gspread.authorize(credentials)
wks = gc.open(notebook_name)
istd_qc_data = wks.worksheet(sheet_name).get_all_values()
headers = istd_qc_data.pop(0)
df = pd.DataFrame(istd_qc_data,columns=headers)
# Use round trip through read_csv to infer dtypes
s = StringIO()
df.to_csv(s)
df2 = pd.read_csv(StringIO(s.getvalue()))
if 'Unnamed: 0' in df2.columns:
df2.drop(columns=['Unnamed: 0'],inplace=True)
#turn list elements into lists instead of strings
if literal_cols is not None:
for col in literal_cols:
df2[col] = df2[col].apply(literal_eval)
df2 = df2.fillna('')
return df2
class adjust_rt_for_selected_compound(object):
def __init__(self,
data,
include_lcmsruns=None,
exclude_lcmsruns=None,
include_groups=None,
exclude_groups=None,
msms_hits=None,
color_me='',
compound_idx=0,
width=10,
height=4,
y_scale='linear',
alpha=0.5,
min_max_color='green',
peak_color='darkviolet',
slider_color='ghostwhite',
y_max='auto',
y_min=0,
peak_flags=None,
msms_flags=None,
adjustable_rt_peak=False):
"""
INPUTS:
data: a metatlas_dataset where info on files and compounds are stored
include/exclude lcmsruns/groups: list of substrings to filter on
msms_hits: output of get_msms_hits routine
color_me: '' or list of tuples with (color string, filename substring)
that define line colors in the EIC plot
compound_idx: atlas index number of the first compound to display
width: width value in inches for the plots and sliders
height: height value in inches for each of the plots
y_scale: 'linear' or 'log' for y-axis of EIC plot
alpha: transparency factor for lines in EIC plot
min_max_color: matplotlib color string for the sliders and vertical lines
peak_color: matplotlib color string for the slider and vertical line
slider_color: matplotlib color string for the background of the sliders
y_max: y-axis maximum on EIC plot or 'auto' to fix to data
y_min: y-axis minimum on EIC plot
peak_flags: list of strings that define radio buttons for EIC plot
None or '' gets a default set of buttons
msms_flags: list of strings that define radio buttons for MSMS plot
None or '' gets a default set of buttons
adjustable_rt_peak: Boolean - is the RT peak slider movable?
OUTPUTS:
Writes RT min/max/peak changes to database
Writes changes to the 'Flag' radio buttons to database
Key Bindings:
Next compound: 'l' or right arrow
Previous compound: 'h' or left arrow
Next MSMS hit: 'j' or down arrow
Previous MSMS hit: 'k' or up arrow
Cycle zoom on MSMS plot: 'z'
Flag for removal: 'x'
Toggle highlighting of overlapping RT ranges for similar compounds: 's'
"""
logger.debug("Initializing new instance of %s.", self.__class__.__name__)
self.data = data
self.msms_hits = msms_hits.sort_values('score', ascending=False)
self.color_me = color_me if color_me != '' else [['black', '']]
self.compound_idx = compound_idx
self.width = width
self.height = height
self.y_scale = y_scale
self.alpha = alpha
self.min_max_color = min_max_color
self.peak_color = peak_color
self.slider_color = slider_color
self.y_max = y_max
self.y_min = y_min
self.peak_flags = peak_flags
self.msms_flags = msms_flags
self.adjustable_rt_peak = adjustable_rt_peak
self.file_names = ma_data.get_file_names(self.data)
self.configure_flags()
self.data = filter_runs(self.data, include_lcmsruns, include_groups, exclude_lcmsruns, exclude_groups)
self.similar_rects = []
# only the atlas owner can change RT limits or flags
self.enable_edit = getpass.getuser() == self.data.atlas.username
self.hit_ctr = 0
self.msms_zoom_factor = 1
# native matplotlib key bindings that we want to override
disable_keyboard_shortcuts({'keymap.yscale': ['l'],
'keymap.xscale': ['k'],
'keymap.save': ['s'],
'keymap.home': ['h']})
# Turn On interactive plot
plt.ion()
self.layout_figure()
# create all event handlers
self.fig.canvas.callbacks.connect('pick_event', self.on_pick)
self.fig.canvas.mpl_connect('key_press_event', self.press)
self.fig.canvas.mpl_connect('motion_notify_event', self.on_motion)
self.set_plot_data()
def set_plot_data(self):
logger.debug('Starting replot')
self.similar_compounds = self.get_similar_compounds()
self.eic_plot()
self.filter_hits()
self.msms_plot()
self.flag_radio_buttons()
plt.show()
logger.debug('Finished replot')
def eic_plot(self):
logger.debug('Starting eic_plot')
self.ax.set_title('')
self.ax.set_xlabel('Retention Time')
# set y-scale and bounds if provided
self.ax.set_yscale(self.y_scale)
if self.y_max != 'auto':
self.ax.set_ylim(self.y_min, self.y_max)
self.ax.set_ylabel(self.get_ms1_y_axis_label())
if self.y_scale == 'linear':
self.ax.ticklabel_format(style='sci', axis='y', scilimits=(0, 0))
self.display_eic_data()
self.y_max_slider()
idx = 0 if self.y_scale == 'linear' else 1
self.lin_log_radio = self.create_radio_buttons(self.lin_log_ax, ('linear', 'log'),
self.set_lin_log, active_idx=idx)
self.rt_bounds()
self.highlight_similar_compounds()
logger.debug('Finished eic_plot')
def flag_radio_buttons(self):
my_id = self.data[0][self.compound_idx]['identification']
if my_id.ms1_notes in self.peak_flags:
peak_flag_index = self.peak_flags.index(my_id.ms1_notes)
else:
peak_flag_index = 0
logger.debug('Setting peak flag radio button with index %d', peak_flag_index)
self.peak_flag_radio = self.create_radio_buttons(self.peak_flag_ax, self.peak_flags,
self.set_peak_flag,
active_idx=peak_flag_index)
self.peak_flag_radio.active = self.enable_edit
if my_id.ms2_notes in self.msms_flags:
msms_flag_index = self.msms_flags.index(my_id.ms2_notes)
else:
msms_flag_index = 0
logger.debug('Setting msms flag radio button with index %d', msms_flag_index)
self.msms_flag_radio = self.create_radio_buttons(self.msms_flag_ax, self.msms_flags,
self.set_msms_flag,
active_idx=msms_flag_index)
self.msms_flag_radio.active = self.enable_edit
def y_max_slider(self):
(self.slider_y_min, self.slider_y_max) = self.ax.get_ylim()
self.y_scale_slider = Slider(self.y_scale_ax, '', self.slider_y_min, self.slider_y_max,
valfmt='', valinit=self.slider_y_max, color=self.peak_color,
orientation='vertical')
self.y_scale_slider.hline.set_linewidth(0)
self.y_scale_slider.on_changed(self.update_y_scale)
def rt_bounds(self):
# put vlines on plot before creating sliders, as adding the vlines may increase plot
# width, as the vline could occur outside of the data points
rt = self.data.rts[self.compound_idx]
self.min_line = self.ax.axvline(rt.rt_min, color=self.min_max_color, linewidth=4.0)
self.max_line = self.ax.axvline(rt.rt_max, color=self.min_max_color, linewidth=4.0)
self.peak_line = self.ax.axvline(rt.rt_peak, color=self.peak_color, linewidth=4.0)
self.rt_min_slider = self.rt_slider(self.rt_min_ax, 'RT min', rt.rt_min,
self.min_max_color, self.update_rt_min)
self.rt_max_slider = self.rt_slider(self.rt_max_ax, 'RT max', rt.rt_max,
self.min_max_color, self.update_rt_max)
self.rt_peak_slider = self.rt_slider(self.rt_peak_ax, 'RT peak', rt.rt_peak,
self.peak_color, self.update_rt_peak)
self.rt_peak_slider.active = self.adjustable_rt_peak and self.enable_edit
self.rt_min_slider.active = self.enable_edit
self.rt_max_slider.active = self.enable_edit
def rt_slider(self, axes, label, valinit, color, on_changed):
min_x, max_x = self.ax.get_xlim()
slider = Slider(axes, label, min_x, max_x, valinit=valinit, color=color)
slider.vline.set_linewidth(0)
slider.on_changed(on_changed)
return slider
def unhighlight_similar_compounds(self):
[i.remove() for i in self.similar_rects]
self.similar_rects = []
def highlight_similar_compounds(self):
self.unhighlight_similar_compounds()
min_y, max_y = self.ax.get_ylim()
min_x, max_x = self.ax.get_xlim()
height = max_y - min_y
for compound in self.similar_compounds:
if compound['index'] == self.compound_idx:
continue
width = abs(compound['rt'].rt_max - compound['rt'].rt_min)
if compound['rt'].rt_min+width < min_x or compound['rt'].rt_min > max_x:
continue # would be off the plot
color = 'red' if compound['overlaps'] else 'blue'
rect = matplotlib.patches.Rectangle((compound['rt'].rt_min, min_y), width, height,
linewidth=0, alpha=0.12, facecolor=color)
text_x = max(compound['rt'].rt_min, min_x)
text_y = max_y - (max_y - min_y)*0.05 # drop just below top of plot
text = self.ax.text(text_x, text_y, compound['label'], fontsize='small')
self.ax.add_patch(rect)
self.similar_rects.append(text)
self.similar_rects.append(rect)
def display_eic_data(self):
for sample in self.data: # loop through the files
eic = sample[self.compound_idx]['data']['eic']
if eic and len(eic['rt']) > 0:
x = np.asarray(eic['rt'])
y = np.asarray(eic['intensity'])
x = x[y > 0]
y = y[y > 0] # y[y<0.0] = 0.0
label = sample[self.compound_idx]['lcmsrun'].name.replace('.mzML', '')
for i, (color, label_filter) in enumerate(self.color_me):
if label_filter in label:
zorder = len(self.color_me) + 2 - i
else:
zorder = 1
color = 'black'
self.ax.plot(x, y, '-', zorder=zorder, linewidth=2, alpha=self.alpha,
picker=True, pickradius=5, color=color, label=label)
def configure_flags(self):
default_peak = ('keep', 'remove', 'unresolvable isomers', 'poor peak shape')
default_msms = ('no selection',
'-1, bad match - should remove compound',
'0, no ref match available or no MSMS collected',
'0.5, co-isolated precursor, partial match',
'0.5, partial match of fragments',
'1, perfect match to internal reference library',
'1, perfect match to external reference library',
'1, co-isolated precursor but all reference ions are in sample spectrum')
if self.peak_flags is None or self.peak_flags == '':
self.peak_flags = default_peak
if self.msms_flags is None or self.msms_flags == '':
self.msms_flags = default_msms
def get_ms1_y_axis_label(self):
ident = self.data[0][self.compound_idx]['identification']
if ident.name:
compound_name = ident.name.split('///')[0]
elif ident.compound[-1].name:
compound_name = ident.compound[-1].name
else:
compound_name = 'nameless compound'
try:
adduct = ident.mz_references[0].adduct
except (KeyError, AttributeError):
return '%d, %s' % (self.compound_idx, compound_name)
return '%d, %s\n%s' % (self.compound_idx, compound_name, adduct)
def filter_hits(self):
ident = self.data[0][self.compound_idx]['identification']
inchi_key = extract(ident, ['compound', -1, 'inchi_key'])
hits_mz_tolerance = ident.mz_references[-1].mz_tolerance*1e-6
mz_theoretical = ident.mz_references[0].mz
my_scan_rt = self.msms_hits.index.get_level_values('msms_scan')
self.hits = self.msms_hits[(my_scan_rt >= float(self.data.rts[self.compound_idx].rt_min)) &
(my_scan_rt <= float(self.data.rts[self.compound_idx].rt_max)) &
(self.msms_hits['inchi_key'] == inchi_key) &
within_tolerance(self.msms_hits['measured_precursor_mz'],
mz_theoretical, hits_mz_tolerance)]
def msms_plot(self, font_scale=10.0):
logger.debug('Starting msms_plot')
compound = None
hit_file_name = None
if not self.hits.empty:
hit_file_name, compound = get_hit_metadata(self.data, self.hits, self.file_names,
self.hit_ctr, self.compound_idx)
mz_header, rt_header, cpd_header = get_msms_plot_headers(self.data, self.hits, self.hit_ctr,
self.compound_idx, compound,
self.similar_compounds,
self.file_names)
cpd_header_wrap = fill(cpd_header, width=int(self.width * font_scale)) # text wrap
hit_ref_id, hit_score, hit_query, hit_ref = get_msms_plot_data(self.hits, self.hit_ctr)
self.ax2.cla()
self.ax2.ticklabel_format(style='sci', axis='y', scilimits=(0, 0))
self.mz_lines = plot_msms_comparison2(0, mz_header, rt_header, cpd_header_wrap, hit_ref_id,
hit_file_name, hit_score, self.ax2, hit_query,
hit_ref, self.msms_zoom_factor)
min_x = self.ax2.get_xlim()[0] # fails if original location is not within plot
self.mz_annot = self.ax2.annotate('', xy=(min_x, 0), visible=False)
logger.debug('Finished msms_plot')
def layout_figure(self):
self.gui_scale_factor = self.height/3.25 if self.height < 3.25 else 1
base_font_size = 10
y_slider_width = 0.01
plt.rcParams.update({'font.size': base_font_size * self.gui_scale_factor})
max_radio_label_len = max([len(x) for x in self.peak_flags+self.msms_flags])
self.plot_hspace = 2.2/(self.height*2)
self.plot_left_pos = 1.0/self.width
# not using a fixed width font, so this assume a even distribution of character widths
# in the radio button labels
self.plot_right_pos = 1.0-(0.7/self.width +
max_radio_label_len*0.075/self.width*self.gui_scale_factor)
self.plot_top_pos = 1.0-(0.396/(self.height*(2+self.plot_hspace)))
self.plot_bottom_pos = 1.32/(self.height*(2+self.plot_hspace))
# create figure and first axes
combined_plot_height = self.height * (2 + self.plot_hspace)
self.fig, (self.ax2, self.ax) = plt.subplots(2, 1, figsize=(self.width,
combined_plot_height))
plt.subplots_adjust(left=self.plot_left_pos, right=self.plot_right_pos,
bottom=self.plot_bottom_pos, top=self.plot_top_pos,
hspace=self.plot_hspace)
y_axis_height = (self.plot_top_pos - self.plot_bottom_pos) * \
(1-self.plot_hspace/(2+self.plot_hspace))/2
self.layout_rt_sliders()
self.layout_y_scale_slider(y_slider_width, y_axis_height)
self.layout_radio_buttons(y_slider_width, y_axis_height)
def layout_y_scale_slider(self, y_slider_width, y_axis_height):
self.y_scale_ax = plt.axes([self.plot_right_pos, self.plot_bottom_pos,
y_slider_width, y_axis_height],
facecolor=self.slider_color)
def layout_radio_buttons(self, y_slider_width, y_axis_height):
self.radio_button_radius = 0.02 * self.gui_scale_factor
radio_button_axes_width = 1-self.plot_right_pos
self.lin_log_ax = layout_radio_button_set([self.plot_left_pos,
self.plot_bottom_pos,
radio_button_axes_width,
y_axis_height],
anchor='NW')
self.peak_flag_ax = layout_radio_button_set([self.plot_right_pos + y_slider_width,
self.plot_bottom_pos,
radio_button_axes_width,
y_axis_height])
self.msms_flag_ax = layout_radio_button_set([self.plot_right_pos,
self.plot_top_pos - y_axis_height,
radio_button_axes_width,
y_axis_height])
def layout_rt_sliders(self):
x_axis_label_height = 0.5
combined_plot_height = self.height * (2 + self.plot_hspace)
rt_slider_height = (self.plot_bottom_pos-x_axis_label_height/combined_plot_height)/4.0
rt_slider_width = self.plot_right_pos - self.plot_left_pos
self.rt_peak_ax = plt.axes([self.plot_left_pos, 0, rt_slider_width, rt_slider_height],
facecolor=self.slider_color)
self.rt_max_ax = plt.axes([self.plot_left_pos, rt_slider_height*1.5,
rt_slider_width, rt_slider_height],
facecolor=self.slider_color)
self.rt_min_ax = plt.axes([self.plot_left_pos, rt_slider_height*3.0,
rt_slider_width, rt_slider_height],
facecolor=self.slider_color)
def create_radio_buttons(self, axes, labels, on_click_handler, active_idx=0):
buttons = RadioButtons(axes, labels, active=active_idx)
for circle in buttons.circles:
circle.set_radius(self.radio_button_radius)
buttons.on_clicked(on_click_handler)
return buttons
def set_lin_log(self, label):
logger.debug('Y-scale of EIC plot set to %s scale.', label)
self.ax.set_yscale(label)
if label == 'linear':
self.ax.ticklabel_format(style='sci', axis='y', scilimits=(0, 0))
self.fig.canvas.draw_idle()
def set_flag(self, name, value):
logger.debug('Setting flag "%s" to "%s".', name, value)
self.data.set_note(self.compound_idx, name, value)
def set_peak_flag(self, label):
self.set_flag('ms1_notes', label)
def set_msms_flag(self, label):
self.set_flag('ms2_notes', label)
def on_pick(self, event):
thisline = event.artist
thisline.set_color('cyan')
label = thisline.get_label()
self.ax.set_title(label, fontsize=7)
logger.debug("Sample %s selected on EIC plot via mouse click event.", label)
def on_motion(self, event):
if event.inaxes == self.ax2: # in msms mirror plot
for collection in self.mz_lines:
found, ind = collection.contains(event)
if found:
segments = collection.get_segments()
vertice = segments[ind["ind"][0]]
mz = vertice[0][0]
self.mz_annot.set_text(f"{mz:.5f}")
self.mz_annot.xyann = (mz, event.ydata)
self.mz_annot.set_visible(True)
self.fig.canvas.draw_idle()
return
if self.mz_annot.get_visible():
self.mz_annot.set_visible(False)
self.fig.canvas.draw_idle()
def update_plots(self):
self.msms_zoom_factor = 1
self.ax.cla()
self.ax2.cla()
self.rt_peak_ax.cla()
self.rt_min_ax.cla()
self.rt_max_ax.cla()
self.y_scale_ax.cla()
self.set_plot_data()
def press(self, event):
if event.key in ['right', 'l']:
if self.compound_idx + 1 < len(self.data[0]):
self.compound_idx += 1
logger.debug("Increasing compound_idx to %d (inchi_key:%s adduct:%s).",
self.compound_idx,
self.data[0][self.compound_idx]['identification'].compound[0].inchi_key,
self.data[0][self.compound_idx]['identification'].mz_references[0].adduct
)
self.hit_ctr = 0
self.update_plots()
elif event.key in ['left', 'h']:
if self.compound_idx > 0:
self.compound_idx -= 1
logger.debug("Decreasing compound_idx to %d (inchi_key:%s adduct:%s).",
self.compound_idx,
self.data[0][self.compound_idx]['identification'].compound[0].inchi_key,
self.data[0][self.compound_idx]['identification'].mz_references[0].adduct
)
self.hit_ctr = 0
self.update_plots()
elif event.key in ['up', 'k']:
if self.hit_ctr > 0:
self.hit_ctr -= 1
logger.debug("Decreasing hit_ctr to %d.", self.hit_ctr)
self.update_plots()
elif event.key in ['down', 'j']:
if self.hit_ctr < len(self.hits) - 1:
logger.debug("Increasing hit_ctr to %d.", self.hit_ctr)
self.hit_ctr += 1
self.update_plots()
elif event.key == 'x':
if not self.enable_edit:
self.warn_if_not_atlas_owner()
return
logger.debug("Removing compound %d via 'x' key event.", self.compound_idx)
self.peak_flag_radio.set_active(1)
elif event.key == 'z':
self.msms_zoom_factor = 1 if self.msms_zoom_factor == 25 else self.msms_zoom_factor * 5
logger.debug("Setting msms zoom factor to %d.", self.msms_zoom_factor)
self.msms_plot()
elif event.key == 's':
if self.similar_rects:
logger.debug("Removing highlight of similar compounds on EIC plot.")
self.unhighlight_similar_compounds()
else:
self.similar_compounds = self.get_similar_compounds()
logger.debug("Enabling highlight of similar compounds on EIC plot.")
self.highlight_similar_compounds()
def update_y_scale(self, val):
if self.slider_y_min < 0:
self.slider_y_min = -0.2 * val
else:
self.slider_y_min = 0.02
self.ax.set_ylim(self.slider_y_min, val)
self.fig.canvas.draw_idle()
def warn_if_not_atlas_owner(self):
user = getpass.getuser()
if user != self.data.atlas.username:
text = ("YOU ARE %s. YOU ARE NOT THE ATLAS OWNER."
"YOU ARE NOT ALLOWED TO EDIT VALUES WITH THE RT CORRECTOR.")
self.ax.set_title(text % user)
logger.warning(text, user)
def update_rt(self, which, val):
"""
inputs:
which: 'rt_min', 'rt_max', or 'rt_peak'
val: new RT value
"""
logger.debug("Updating %s to %0.4f", which, val)
slider = {'rt_min': self.rt_min_slider, 'rt_peak': self.rt_peak_slider,
'rt_max': self.rt_max_slider}
line = {'rt_min': self.min_line, 'rt_peak': self.peak_line, 'rt_max': self.max_line}
self.data.set_rt(self.compound_idx, which, val)
slider[which].valinit = val
line[which].set_xdata((val, val))
if which != 'rt_peak':
self.msms_zoom_factor = 1
self.filter_hits()
self.similar_compounds = self.get_similar_compounds()
self.highlight_similar_compounds()
self.msms_plot()
self.fig.canvas.draw_idle()
def update_rt_min(self, val):
self.update_rt('rt_min', val)
def update_rt_max(self, val):
self.update_rt('rt_max', val)
def update_rt_peak(self, val):
self.update_rt('rt_peak', val)
def get_similar_compounds(self, use_labels=True):
"""
inputs:
use_labels: if True use compound labels in output instead of compound names
returns:
list of dicts containing information on compounds with similar mz values or mono
isotopic MW when compared to self.compound_idx
each dict contains:
index: position in self.data[0]
label: compound name or label string
rt: a metatlas.datastructures.metatlas_objects.RtReference
overlaps: True if compound has RT bounds overlapping with those of self.compound_idx
"""
cid = self.data[0][self.compound_idx]['identification']
if len(cid.compound) == 0:
return []
out = []
cid_mz_ref = cid.mz_references[0].mz
cid_mass = cid.compound[0].mono_isotopic_molecular_weight
for compound_iter_idx, _ in enumerate(self.data[0]):
cpd_iter_id = self.data[0][compound_iter_idx]['identification']
if len(cpd_iter_id.compound) == 0:
continue
mass = cpd_iter_id.compound[0].mono_isotopic_molecular_weight
mz_ref = cpd_iter_id.mz_references[0].mz
if (mz_ref-0.005 <= cid_mz_ref <= mz_ref+0.005) or (mass-0.005 <= cid_mass <= mass+0.005):
out.append({'index': compound_iter_idx,
'label': cpd_iter_id.name if use_labels else cpd_iter_id.compound[0].name,
'rt': self.data.rts[compound_iter_idx],
'overlaps': rt_range_overlaps(self.data.rts[self.compound_idx],
self.data.rts[compound_iter_idx])})
return out
class adjust_mz_for_selected_compound(object):
def __init__(self,
data,
include_lcmsruns = None,
exclude_lcmsruns = None,
include_groups = None,
exclude_groups = None,
compound_idx = 0,
width = 12,
height = 6,
y_scale='linear',
alpha = 0.5,
min_max_color = 'sage',
peak_color = 'darkviolet',
slider_color = 'ghostwhite',
y_max = 'auto',
y_min = 0):
"""
data: a metatlas_dataset where files and compounds are stored.
for example,
self.metatlas_dataset[file_idx][compound_idx]['identification'].rt_references[-1].unique_id
is the unique id to the retention time reference for a compound in a file.
width: specify a width value in inches for the plots and slides
height: specify a width value in inches for the plots and slides
min_max_color & peak_color: specify a valid matplotlib color string for the slider and vertical bars
slider_color: background color for the sliders. Must be a valid matplotlib color
Press Left and Right arrow keys to move to the next or previous compound
"""
self.compound_idx = compound_idx
self.width = width
self.height = height
self.y_scale = y_scale
self.alpha = alpha
self.min_max_color = min_max_color
self.peak_color = peak_color
self.slider_color = slider_color
self.y_max = y_max
self.y_min = y_min
self.data = filter_runs(data, include_lcmsruns, include_groups, exclude_lcmsruns, exclude_groups)
# create figure and first axes
self.fig,self.ax = plt.subplots(figsize=(width, height))
plt.subplots_adjust(left=0.09, bottom=0.275)
# plt.ticklabel_format(style='plain', axis='x')
# plt.ticklabel_format(style='sci', axis='y', scilimits=(0,0))
# warn the user if they do not own the atlas; and can not edit its values
self.enable_edit = True
self.atlas = metob.retrieve('Atlas',unique_id = self.data[0][0]['atlas_unique_id'],username='*')[-1]
logger.info("loaded file for username = %s", self.atlas.username)
if getpass.getuser() != self.atlas.username:
self.ax.set_title("YOUR ARE %s YOU ARE NOT ALLOWED TO EDIT VALUES THE RT CORRECTOR. USERNAMES ARE NOT THE SAME"%getpass.getuser())
self.enable_edit = False
#create all event handlers
self.fig.canvas.callbacks.connect('pick_event', self.on_pick)
self.fig.canvas.mpl_connect('key_press_event', self.press)
#create the plot
self.set_plot_data()
def set_plot_data(self):
self.ax.ticklabel_format(style='sci', axis='y', scilimits=(0,0))
self.ax.ticklabel_format(useOffset=False, style='plain', axis='x')
default_data = self.data[0][self.compound_idx]
if default_data['identification'].name:
compound_str = default_data['identification'].name.split('///')[0]
elif default_data['identification'].compound[-1].name:
compound_str = default_data['identification'].compound[-1].name
else:
compound_str = 'nameless compound'
compound_str = '%d, %s'%(self.compound_idx, compound_str)
self.ax.set_title('')
self.ax.set_ylabel('%s'%compound_str)
self.ax.set_xlabel('Retention Time')
self.my_mz = metob.retrieve('MZReference',
unique_id = default_data['identification'].mz_references[-1].unique_id, username='*')[-1]
for i,d in enumerate(self.data): #this loops through the files
if d[self.compound_idx]['data']['ms1_summary']:
# if len(d[self.compound_idx]['data']['ms1_summary']['rt']) > 0:
x = d[self.compound_idx]['data']['ms1_summary']['mz_centroid']
y = d[self.compound_idx]['data']['ms1_summary']['peak_height']
x = np.asarray(x)
y = np.asarray(y)
self.ax.plot(x,y,'k.',linewidth=2.0,alpha=self.alpha, picker=5, label = d[self.compound_idx]['lcmsrun'].name.replace('.mzML',''))
mz_delta = self.my_mz.mz_tolerance*self.my_mz.mz/1e6
self.min_line = self.ax.axvline(self.my_mz.mz-mz_delta, color=self.min_max_color,linewidth=4.0)
self.max_line = self.ax.axvline(self.my_mz.mz+mz_delta, color=self.min_max_color,linewidth=4.0)
self.peak_line = self.ax.axvline(self.my_mz.mz, color=self.peak_color,linewidth=4.0)
min_x = self.ax.get_xlim()[0]
max_x = self.ax.get_xlim()[1]
print((min_x,max_x))
self.mz_peak_ax = plt.axes([0.09, 0.05, 0.81, 0.03], axisbg=self.slider_color)
self.mz_max_ax = plt.axes([0.09, 0.1, 0.81, 0.03], axisbg=self.slider_color)
self.mz_min_ax = plt.axes([0.09, 0.15, 0.81, 0.03], axisbg=self.slider_color)
self.mz_min_slider = Slider(self.mz_min_ax, 'mz min', min_x, max_x, valinit=self.my_mz.mz-mz_delta,color=self.min_max_color,valfmt='%1.4f')
self.mz_min_slider.vline.set_color('black')
self.mz_min_slider.vline.set_linewidth(4)
self.mz_max_slider = Slider(self.mz_max_ax, 'mz max', min_x, max_x, valinit=self.my_mz.mz+mz_delta,color=self.min_max_color,valfmt='%1.4f')
self.mz_max_slider.vline.set_color('black')
self.mz_max_slider.vline.set_linewidth(4)
self.mz_peak_slider = Slider(self.mz_peak_ax,'mz peak', min_x, max_x, valinit=self.my_mz.mz,color=self.peak_color,valfmt='%1.4f')
self.mz_peak_slider.vline.set_color('black')
self.mz_peak_slider.vline.set_linewidth(4)
# if self.enable_edit:
# self.rt_min_slider.on_changed(self.update_rt)
# self.rt_max_slider.on_changed(self.update_rt)
# self.rt_peak_slider.on_changed(self.update_rt)
self.lin_log_ax = plt.axes([0.1, 0.75, 0.1, 0.15])#, axisbg=axcolor)
self.lin_log_ax.axis('off')
self.lin_log_radio = RadioButtons(self.lin_log_ax, ('linear', 'log'))
self.lin_log_radio.on_clicked(self.set_lin_log)
def set_lin_log(self,label):
self.ax.set_yscale(label)
self.ax.ticklabel_format(style='sci', axis='y', scilimits=(0,0))
self.fig.canvas.draw_idle()
def on_pick(self,event):
thisline = event.artist
thisline.set_color('red')
self.ax.set_title(thisline.get_label(), fontsize=7)
def press(self,event):
if event.key == 'right':
if self.compound_idx + 1 < len(self.data[0]):
self.compound_idx += 1
self.ax.cla()
self.mz_peak_ax.cla()
self.mz_min_ax.cla()
self.mz_max_ax.cla()
self.set_plot_data()
if event.key == 'left':
if self.compound_idx > 0:
self.compound_idx -= 1
self.ax.cla()
self.mz_peak_ax.cla()
self.mz_min_ax.cla()
self.mz_max_ax.cla()
self.set_plot_data()
# def update_rt(self,val):
# self.my_rt.rt_min = self.rt_min_slider.val
# self.my_rt.rt_max = self.rt_max_slider.val
# self.my_rt.rt_peak = self.rt_peak_slider.val
# self.rt_min_slider.valinit = self.my_rt.rt_min
# self.rt_max_slider.valinit = self.my_rt.rt_max
# self.rt_peak_slider.valinit = self.my_rt.rt_peak
# metob.store(self.my_rt)
# self.min_line.set_xdata((self.my_rt.rt_min,self.my_rt.rt_min))
# self.max_line.set_xdata((self.my_rt.rt_max,self.my_rt.rt_max))
# self.peak_line.set_xdata((self.my_rt.rt_peak,self.my_rt.rt_peak))
# self.fig.canvas.draw_idle()
def replace_compound_id_with_name(x):
id_list = literal_eval(x)
if id_list:
found_compound = metob.retrieve('Compounds',unique_id=id_list[0],username='*')
return found_compound[-1].name
else:
return ''
def make_compound_id_df(data):
ids = []
for d in data[0]:
ids.append(d['identification'])
df = metob.to_dataframe(ids)
df['compound'] = df['compound'].apply(replace_compound_id_with_name).astype('str')
df['rt_unique_id'] = df['rt_references'].apply(lambda x: literal_eval(x))
# df['mz_unique_id'] = df['mz_references'].apply(lambda x: literal_eval(x))
# df['frag_unique_id'] = df['frag_references'].apply(lambda x: literal_eval(x))
df = df[['compound','name','username','rt_unique_id']]#,'mz_unique_id','frag_unique_id']]
return df
def show_compound_grid(input_fname = '',input_dataset=[]):
"""
Provide a valid path to data in or a dataset
"""
if not input_dataset:
print("loading...")
data = ma_data.get_dill_data(input_fname)
else:
data = input_dataset
atlas_in_data = metob.retrieve('Atlas',unique_id = data[0][0]['atlas_unique_id'],username='*')
print(("loaded file for username = ", atlas_in_data[0].username))
username = getpass.getuser()
if username != atlas_in_data[0].username:
print(("YOUR ARE", username, "YOU ARE NOT ALLOWED TO EDIT VALUES THE RT CORRECTOR. USERNAMES ARE NOT THE SAME"))
#return
compound_df = make_compound_id_df(data)
#compound_grid = gui.create_qgrid([])
#compound_grid.df = compound_df
compound_grid = qgrid.QGridWidget(df=compound_df)#,set_grid_option={'show_toolbar',True})
#qgrid.show_grid(compound_df,show_toolbar=True)
compound_grid.export()
#display(compound_grid)
return data,compound_grid
def getcommonletters(strlist):
"""
Parameters
----------
strlist
Returns
-------
"""
return ''.join([x[0] for x in zip(*strlist) if reduce(lambda a,b:(a == b) and a or None,x)])
def findcommonstart(strlist):
"""
Parameters
----------
strlist
Returns
-------
"""
strlist = strlist[:]
prev = None
while True:
common = getcommonletters(strlist)
if common == prev:
break
strlist.append(common)
prev = common
return getcommonletters(strlist)
def plot_all_compounds_for_each_file(input_dataset = [], input_fname = '', include_lcmsruns = [],exclude_lcmsruns = [], nCols = 8, scale_y=True , output_loc=''):
"""
Parameters
----------
kwargs
Returns
-------
"""
if not input_dataset:
data = ma_data.get_dill_data(os.path.expandvars(input_fname))
else:
data = input_dataset
data = filter_runs(data, include_lcmsruns, include_lcmsruns, exclude_lcmsruns, exclude_lcmsruns)
compound_names = ma_data.get_compound_names(data)[0]
file_names = ma_data.get_file_names(data)
output_loc = os.path.expandvars('output_loc')
nRows = int(np.ceil(len(compound_names)/float(nCols)))
xmin = 0
xmax = 210
subrange = float(xmax-xmin)/float(nCols) # scale factor for the x-axis
y_max = list()
if scale_y:
for file_idx,my_file in enumerate(file_names):
temp = -1
counter = 0
for compound_idx,compound in enumerate(compound_names):
d = data[file_idx][compound_idx]
if len(d['data']['eic']['rt']) > 0:
counter += 1
y = max(d['data']['eic']['intensity'])
if y > temp:
temp = y
#y_max.append(temp)
y_max += [temp] * counter
else:
for file_idx,my_file in enumerate(file_names):
for compound_idx,compound in enumerate(compound_names):
d = data[file_idx][compound_idx]
if len(d['data']['eic']['rt']) > 0:
y_max.append(max(d['data']['eic']['intensity']))
y_max = cycle(y_max)
# create ouput dir
if not os.path.exists(output_loc):
os.makedirs(output_loc)
for file_idx,my_file in enumerate(file_names):
ax = plt.subplot(111)#, aspect='equal')
plt.setp(ax, 'frame_on', False)
ax.set_ylim([0, nRows+7])
col = 0
row = nRows+6
counter = 1
for compound_idx,compound in enumerate(compound_names):
if col == nCols:
row -= 1.3
col = 0
d = data[file_idx][compound_idx]
rt_min = d['identification'].rt_references[0].rt_min
rt_max = d['identification'].rt_references[0].rt_max
rt_peak = d['identification'].rt_references[0].rt_peak
if len(d['data']['eic']['rt']) > 0:
x = d['data']['eic']['rt']
y = d['data']['eic']['intensity']
y = y/next(y_max)
new_x = (x-x[0])*subrange/float(x[-1]-x[0])+col*(subrange+2) ## remapping the x-range
xlbl = np.array_str(np.linspace(min(x), max(x), 8), precision=2)
rt_min_ = (rt_min-x[0])*subrange/float(x[-1]-x[0])+col*(subrange+2)
rt_max_ = (rt_max-x[0])*subrange/float(x[-1]-x[0])+col*(subrange+2)
rt_peak_ = (rt_peak-x[0])*subrange/float(x[-1]-x[0])+col*(subrange+2)
ax.plot(new_x, y+row,'k-')#,ms=1, mew=0, mfc='b', alpha=1.0)]
#ax.annotate('plot={}'.format(col+1),(max(new_x)/2+col*subrange,row-0.1), size=5,ha='center')
ax.annotate(xlbl,(min(new_x),row-0.1), size=2)
ax.annotate('{0},{1},{2},{3}'.format(compound,rt_min, rt_peak, rt_max),(min(new_x),row-0.2), size=2)#,ha='center')
myWhere = np.logical_and(new_x>=rt_min_, new_x<=rt_max_ )
ax.fill_between(new_x,min(y)+row,y+row,myWhere, facecolor='c', alpha=0.3)
col += 1
else:
new_x = np.asarray([0,1])#(x-x[0])*subrange/float(x[-1]-x[0])+col*(subrange+2) ## remapping the x-range
ax.plot(new_x, new_x-new_x+row,'r-')#,ms=1, mew=0, mfc='b', alpha=1.0)]
ax.annotate(compound,(min(new_x),row-0.1), size=2)
col += 1
counter += 1
plt.title(my_file)
fig = plt.gcf()
fig.set_size_inches(nRows*1.0, nCols*4.0)
fig.savefig(os.path.join(output_loc, my_file + '-' + str(counter) + '.pdf'))
plt.clf()
def plot_all_files_for_each_compound(input_dataset = [], input_fname = '', include_lcmsruns = [],exclude_lcmsruns = [], nCols = 8, scale_y=True , output_loc=''):
"""
Parameters
----------
kwargs
Returns
-------
"""
if not input_dataset:
data = ma_data.get_dill_data(os.path.expandvars(input_fname))
else:
data = input_dataset
data = filter_runs(data, include_lcmsruns, include_lcmsruns, exclude_lcmsruns, exclude_lcmsruns)
compound_names = ma_data.get_compound_names(data)[0]
file_names = ma_data.get_file_names(data)
output_loc = os.path.expandvars(output_loc)
nRows = int(np.ceil(len(file_names)/float(nCols)))
print(('nrows = ', nRows))
xmin = 0
xmax = 210
subrange = float(xmax-xmin)/float(nCols) # scale factor for the x-axis
y_max = list()
if scale_y:
for compound_idx,compound in enumerate(compound_names):
temp = -1
counter = 0
for file_idx,my_file in enumerate(file_names):
d = data[file_idx][compound_idx]
if len(d['data']['eic']['rt']) > 0:
counter += 1
y = max(d['data']['eic']['intensity'])
if y > temp:
temp = y
y_max += [temp] * counter
else:
for compound_idx,compound in enumerate(compound_names):
for file_idx,my_file in enumerate(file_names):
d = data[file_idx][compound_idx]
if len(d['data']['eic']['rt']) > 0:
y_max.append(max(d['data']['eic']['intensity']))
print(("length of ymax is ", len(y_max)))
y_max = cycle(y_max)
# create ouput dir
if not os.path.exists(output_loc):
os.makedirs(output_loc)
plt.ioff()
for compound_idx,compound in enumerate(compound_names):
ax = plt.subplot(111)#, aspect='equal')
plt.setp(ax, 'frame_on', False)
ax.set_ylim([0, nRows+7])
col = 0
row = nRows+6
counter = 1
for file_idx,my_file in enumerate(file_names):
if col == nCols:
row -= 1.3
col = 0
d = data[file_idx][compound_idx]
#file_name = compound_names[compound_idx]
rt_min = d['identification'].rt_references[0].rt_min
rt_max = d['identification'].rt_references[0].rt_max
rt_peak = d['identification'].rt_references[0].rt_peak
if len(d['data']['eic']['rt']) > 0:
x = d['data']['eic']['rt']
y = d['data']['eic']['intensity']
y = y/next(y_max)
new_x = (x-x[0])*subrange/float(x[-1]-x[0])+col*(subrange+2) ## remapping the x-range
xlbl = np.array_str(np.linspace(min(x), max(x), 8), precision=2)
rt_min_ = (rt_min-x[0])*subrange/float(x[-1]-x[0])+col*(subrange+2)
rt_max_ = (rt_max-x[0])*subrange/float(x[-1]-x[0])+col*(subrange+2)
rt_peak_ = (rt_peak-x[0])*subrange/float(x[-1]-x[0])+col*(subrange+2)
ax.plot(new_x, y+row,'k-')#,ms=1, mew=0, mfc='b', alpha=1.0)]
#ax.annotate('plot={}'.format(col+1),(max(new_x)/2+col*subrange,row-0.1), size=5,ha='center')
ax.annotate(xlbl,(min(new_x),row-0.1), size=2)
ax.annotate('{0},{1},{2},{3}'.format(my_file,rt_min, rt_peak, rt_max),(min(new_x),row-0.2), size=2)#,ha='center')
myWhere = np.logical_and(new_x>=rt_min_, new_x<=rt_max_ )
#ax.fill_between(new_x,min(y)+row,y+row,myWhere, facecolor='c', alpha=0.3)
col += 1
else:
new_x = np.asarray([0,1])
ax.plot(new_x, new_x-new_x+row,'r-')#,ms=1, mew=0, mfc='b', alpha=1.0)]
# y = [0,1]#(x-x[0])*subrange/float(x[-1]-x[0])+col*(subrange+2) ## remapping the x-range
# ax.plot(new_x, y-y+row,'r-')#,ms=1, mew=0, mfc='b', alpha=1.0)]
ax.annotate(my_file,(min(new_x),row-0.1), size=1)
col += 1
counter += 1
plt.title(compound)
fig = plt.gcf()
fig.set_size_inches(nRows*1.0,nCols*4.0)
fig.savefig(os.path.join(output_loc, compound + '-' + str(counter) + '.pdf'))
plt.close(fig)
""" contribution from Hans de Winter """
def _InitialiseNeutralisationReactions():
patts= (
# Imidazoles
('[n+;H]','n'),
# Amines
('[N+;!H0]','N'),
# Carboxylic acids and alcohols
('[$([O-]);!$([O-][#7])]','O'),
# Thiols
('[S-;X1]','S'),
# Sulfonamides
('[$([N-;X2]S(=O)=O)]','N'),
# Enamines
('[$([N-;X2][C,N]=C)]','N'),
# Tetrazoles
('[n-]','[nH]'),
# Sulfoxides
('[$([S-]=O)]','S'),
# Amides
('[$([N-]C=O)]','N'),
)
return [(Chem.MolFromSmarts(x),Chem.MolFromSmiles(y,False)) for x,y in patts]
def desalt(mol):
#input is an rdkit mol
#returns an rdkit mol keeping the biggest component
#returns original mol if only one component
#returns a boolean indicated if cleaning was necessary
d = Chem.rdmolops.GetMolFrags(mol) #these are atom indices
if len(d) == 1: #If there are fragments or multiple molecules this will be greater than 1
return mol,False
my_smiles=Chem.MolToSmiles(mol)
parent_atom_count=0;
disconnected=my_smiles.split('.')
#With GetMolFrags, we've already established that there is more than one disconnected structure
for s in disconnected:
little_mol=Chem.MolFromSmiles(s)
count = little_mol.GetNumAtoms()
if count > parent_atom_count:
parent_atom_count = count
parent_mol = little_mol
return parent_mol,True
""" contribution from Hans de Winter """
def _InitialiseNeutralisationReactions():
patts= (
# Imidazoles
('[n+;H]','n'),
# Amines
('[N+;!H0]','N'),
# Carboxylic acids and alcohols
('[$([O-]);!$([O-][#7])]','O'),
# Thiols
('[S-;X1]','S'),
# Sulfonamides
('[$([N-;X2]S(=O)=O)]','N'),
# Enamines
('[$([N-;X2][C,N]=C)]','N'),
# Tetrazoles
('[n-]','[nH]'),
# Sulfoxides
('[$([S-]=O)]','S'),
# Amides
('[$([N-]C=O)]','N'),
)
return [(Chem.MolFromSmarts(x),Chem.MolFromSmiles(y,False)) for x,y in patts]
def NeutraliseCharges(mol, reactions=None):
reactions=_InitialiseNeutralisationReactions()
replaced = False
for i,(reactant, product) in enumerate(reactions):
while mol.HasSubstructMatch(reactant):
replaced = True
rms = Chem.AllChem.ReplaceSubstructs(mol, reactant, product)
rms_smiles = Chem.MolToSmiles(rms[0])
mol = Chem.MolFromSmiles(rms_smiles)
if replaced:
return (mol, True) #Chem.MolToSmiles(mol,True)
else:
return (mol, False)
def drawStructure_Fragment(pactolus_tree,fragment_idx,myMol,myMol_w_Hs):
from copy import deepcopy
fragment_atoms = np.where(pactolus_tree[fragment_idx]['atom_bool_arr'])[0]
depth_of_hit = np.sum(pactolus_tree[fragment_idx]['bond_bool_arr'])
mol2 = deepcopy(myMol_w_Hs)
# Now set the atoms you'd like to remove to dummy atoms with atomic number 0
fragment_atoms = np.where(pactolus_tree[fragment_idx]['atom_bool_arr']==False)[0]
for f in fragment_atoms:
mol2.GetAtomWithIdx(f).SetAtomicNum(0)
# Now remove dummy atoms using a query
mol3 = Chem.DeleteSubstructs(mol2, Chem.MolFromSmarts('[#0]'))
mol3 = Chem.RemoveHs(mol3)
# You get what you are looking for
return moltosvg(mol3),depth_of_hit
def moltosvg(mol,molSize=(450,150),kekulize=True):
mc = Chem.Mol(mol.ToBinary())
if kekulize:
try:
Chem.Kekulize(mc)
except:
mc = Chem.Mol(mol.ToBinary())
if not mc.GetNumConformers():
rdDepictor.Compute2DCoords(mc)
drawer = rdMolDraw2D.MolDraw2DSVG(molSize[0],molSize[1])
drawer.DrawMolecule(mc)
drawer.FinishDrawing()
svg = drawer.GetDrawingText()
# It seems that the svg renderer used doesn't quite hit the spec.
# Here are some fixes to make it work in the notebook, although I think
# the underlying issue needs to be resolved at the generation step
return svg.replace('svg:','')
def get_ion_from_fragment(frag_info,spectrum):
hit_indices = np.where(np.sum(frag_info,axis=1))
hit = spectrum[hit_indices,:][0]
return hit,hit_indices
def calculate_median_of_internal_standards(dataset_for_median,atlas,include_lcmsruns = [],exclude_lcmsruns = [], include_groups = [],exclude_groups = []):
dataset_for_median = filter_runs(dataset_for_median, include_lcmsruns, include_groups,
exclude_lcmsruns, exclude_groups)
internal_standard_vals = []
for i,dd in enumerate(dataset_for_median): #loop through files
for j,d in enumerate(dd): #loop through compounds
if atlas.compound_identifications[j].internal_standard_id != 'nan':
save_dict = {'file_name':d['lcmsrun'].name,'internal_standard_id':atlas.compound_identifications[j].internal_standard_id}
for fieldname in ['peak_height','peak_area']:
if (not d['data']['ms1_summary']) or (not d['data']['ms1_summary'][fieldname]):
v = 0
else:
v = d['data']['ms1_summary'][fieldname]
save_dict[fieldname] = v
internal_standard_vals.append(save_dict)
return internal_standard_vals
def normalize_peaks_by_internal_standard(metatlas_dataset,atlas,include_lcmsruns = [],exclude_lcmsruns = [], include_groups = [],exclude_groups = []):
"""
Takes in a metatlas dataset and an atlas. Returns a metatlas dataset with
ms1_summary peak_height and peak_area normalized by internal standard where
user selected in their atlas.
The compound_identification in the atlas has the followign fields:
internal_standard_id = MetUnicode(help='Freetext identifier for an internal standard')
do_normalization = MetBool(False)
internal_standard_to_use = MetUnicode(help='identifier of which internal standard to normalize by')
Peaks are normalized by:
I_normalized = I_molecule_in_file / I_standard_in_file * MEDIAN(I_standard_in_good_files)
"good files" for calculating the median intensity of the standard are identified
by exclude_lcmsruns=[]
The patterns in exclude_lcmsruns will remove files that you don't want to use for calculating the median intensity
"""
internal_standard_vals = calculate_median_of_internal_standards(metatlas_dataset,atlas,include_lcmsruns = include_lcmsruns,
exclude_lcmsruns =exclude_lcmsruns, include_groups =include_groups,exclude_groups =exclude_groups)
median_vals = pd.DataFrame(internal_standard_vals).drop('file_name',axis=1).groupby('internal_standard_id').median()
df = pd.DataFrame(internal_standard_vals)#.drop('peak_height',axis=1)
norm_dfs = {}
norm_dfs['peak_area'] = df.pivot(index='internal_standard_id', columns='file_name', values='peak_area')
norm_dfs['peak_height'] = df.pivot(index='internal_standard_id', columns='file_name', values='peak_height')
for i,dd in enumerate(metatlas_dataset): #loop through files
if dd[0]['lcmsrun'].name in norm_dfs['peak_area'].columns: #make sure the file name is in the normalization dataframe
for j,d in enumerate(dd): #loop through compounds
if atlas.compound_identifications[j].do_normalization == True:
for fieldname in ['peak_height','peak_area']:
if (not d['data']['ms1_summary']) or (not d['data']['ms1_summary'][fieldname]):
v = 0
else:
norm_val = norm_dfs[fieldname].loc[atlas.compound_identifications[j].internal_standard_to_use,d['lcmsrun'].name]
median_val = median_vals.loc[atlas.compound_identifications[j].internal_standard_to_use,fieldname]
metatlas_dataset[i][j]['data']['ms1_summary'][fieldname] = d['data']['ms1_summary'][fieldname] / norm_val * median_val
return metatlas_dataset
#plot msms and annotate
#compound name
#formula
#adduct
#theoretical m/z
#histogram of retention times
#scatter plot of retention time with peak area
#retention time
#print all chromatograms
#structure
def filter_runs(data, include_lcmsruns=None, include_groups=None, exclude_lcmsruns=None, exclude_groups=None):
"""filter runs from the metatlas dataset"""
if include_lcmsruns:
data = filter_lcmsruns_in_dataset_by_include_list(data, 'lcmsrun', include_lcmsruns)
if include_groups:
data = filter_lcmsruns_in_dataset_by_include_list(data, 'group', include_groups)
if exclude_lcmsruns:
data = filter_lcmsruns_in_dataset_by_exclude_list(data, 'lcmsrun', exclude_lcmsruns)
if exclude_groups:
data = filter_lcmsruns_in_dataset_by_exclude_list(data, 'group', exclude_groups)
return data
def make_output_dataframe(input_fname='', input_dataset=None, include_lcmsruns=None, exclude_lcmsruns=None, include_groups=None, exclude_groups=None, output_loc="", fieldname='peak_height', use_labels=False, short_names_df=None, summarize=False, polarity='', overwrite=True):
"""
fieldname can be: peak_height, peak_area, mz_centroid, rt_centroid, mz_peak, rt_peak
"""
if not input_dataset:
data = ma_data.get_dill_data(os.path.expandvars(input_fname))
else:
data = input_dataset
data = filter_runs(data, include_lcmsruns, include_groups, exclude_lcmsruns, exclude_groups)
compound_names = ma_data.get_compound_names(data, use_labels=use_labels)[0]
file_names = ma_data.get_file_names(data)
group_names = ma_data.get_group_names(data)
group_shortnames = ma_data.get_group_shortnames(data)
output_loc = os.path.expandvars(output_loc)
out = pd.DataFrame(index=compound_names, columns=file_names, dtype=float)
for i, sample in enumerate(data):
for j, compound in enumerate(sample):
ids = ['data', 'ms1_summary', fieldname]
out.loc[compound_names[j], file_names[i]] = ma_data.extract(compound, ids, 0)
columns = []
if short_names_df is None:
short_names_df = pd.DataFrame()
for i, name in enumerate(file_names):
columns.append((group_names[i], name))
out.columns = pd.MultiIndex.from_tuples(columns, names=['group', 'file'])
else:
for i, name in enumerate(file_names):
temp = [group_names[i], name, group_shortnames[i]]
temp.extend(short_names_df.loc[name.split('.')[0]].values.tolist())
columns.append(tuple(temp))
out.columns = pd.MultiIndex.from_tuples(columns, names=['group', 'file', 'short groupname', 'sample treatment', 'short filename', 'short samplename'])
out = out.reindex(sorted(out.columns), axis=1)
if summarize:
out.columns = out.columns.droplevel()
out = append_stats_columns(out)
if output_loc:
prefix = f"{polarity}_" if polarity != '' else ''
df_path = os.path.join(output_loc, f"{prefix}{fieldname}.tab")
write_utils.check_existing_file(df_path, overwrite)
out.to_csv(df_path, sep="\t")
logger.info('Exported %s to %s.', fieldname, df_path)
return out
def append_stats_columns(in_df):
stats = pd.DataFrame(dtype=float)
stats['mean'] = in_df.mean(numeric_only=True, axis=1)
stats['median'] = in_df.median(numeric_only=True, axis=1)
stats['min'] = in_df.min(numeric_only=True, axis=1)
stats['max'] = in_df.max(numeric_only=True, axis=1)
stats['standard deviation'] = in_df.std(numeric_only=True, axis=1)
stats['standard error'] = in_df.sem(numeric_only=True, axis=1)
stats['#NaNs'] = in_df.isin(['NaN']).sum(axis=1)
return pd.concat([in_df, stats], axis=1)
def file_with_max_precursor_intensity(data,compound_idx):
idx = None
my_max = 0
for i,d in enumerate(data):
if 'data' in list(d[compound_idx]['data']['msms'].keys()):
if type(d[compound_idx]['data']['msms']['data']) != list:#.has_key('precursor_intensity'):
temp = d[compound_idx]['data']['msms']['data']['precursor_intensity']
if len(temp)>0:
m = max(temp)
if m > my_max:
my_max = m
idx = i
return idx,my_max
def file_with_max_ms1_intensity(data,compound_idx):
idx = None
my_max = 0
for i,d in enumerate(data):
if d[compound_idx]['data']['eic'] and 'intensity' in list(d[compound_idx]['data']['eic'].keys()) and d[compound_idx]['data']['eic']['intensity'] != []:
temp = max(d[compound_idx]['data']['eic']['intensity'])
if temp > my_max:
my_max = temp
idx = i
return idx,my_max
def file_with_max_score(data, frag_refs, compound_idx, filter_by):
idx = []
max_score = np.nan
best_ref_spec = []
for file_idx in range(len(data)):
#empty can look like this:
# {'eic': {'rt': [], 'intensity': [], 'mz': []}, 'ms1_summary': {'num_ms1_datapoints': 0.0, 'rt_centroid': nan, 'mz_peak': nan, 'peak_height': nan, 'rt_peak': nan, 'peak_area': nan, 'mz_centroid': nan}, 'msms': {'data': {'rt': array([], dtype=float64), 'collision_energy': array([], dtype=float64), 'i': array([], dtype=float64), 'precursor_intensity': array([], dtype=float64), 'precursor_MZ': array([], dtype=float64), 'mz': array([], dtype=float64)}}}
#or empty can look like this:
# {'eic': None, 'ms1_summary': None, 'msms': {'data': []}}
if ('data' in list(data[file_idx][compound_idx]['data']['msms'].keys())) and \
(isinstance(data[file_idx][compound_idx]['data']['msms']['data'],dict)) and \
('rt' in list(data[file_idx][compound_idx]['data']['msms']['data'].keys())) and \
(len(data[file_idx][compound_idx]['data']['msms']['data']['rt'])>0):
msv_sample_scans = np.array([data[file_idx][compound_idx]['data']['msms']['data']['mz'], data[file_idx][compound_idx]['data']['msms']['data']['i']])
rt_of_msv_sample = np.array(data[file_idx][compound_idx]['data']['msms']['data']['rt'])
scan_idxs = [i+1
for i in range(rt_of_msv_sample.size-1)
if rt_of_msv_sample[i] != rt_of_msv_sample[i+1]]
for i, msv_sample in enumerate(np.split(msv_sample_scans, scan_idxs, axis=1)):
for f, frag in sp.filter_frag_refs(data, frag_refs, compound_idx, file_idx, filter_by).iterrows():
msv_ref = sp.sort_ms_vector_by_mz(np.array(frag['mz_intensities']).T)
score = sp.score_ms_vectors_composite(*sp.pairwise_align_ms_vectors(msv_sample, msv_ref, .005, 'shape'))
if score > max_score or np.isnan(max_score):
max_score = score
idx = file_idx
best_ref_spec = [frag['mz_intensities']]
return idx, max_score, best_ref_spec
def plot_errorbar_plots(df,output_loc='', use_shortnames=True, ylabel=""):
output_loc = os.path.expandvars(output_loc)
if not os.path.exists(output_loc):
os.makedirs(output_loc)
plt.ioff()
for compound in df.index:
if 'short groupname' in df.columns.names and use_shortnames:
m = df.loc[compound].groupby(level='short groupname').mean()
e = df.loc[compound].groupby(level='short groupname').std()
c = df.loc[compound].groupby(level='short groupname').count()
else:
m = df.loc[compound].groupby(level='group').mean()
e = df.loc[compound].groupby(level='group').std()
c = df.loc[compound].groupby(level='group').count()
for i in range(len(e)):
if c[i]>0:
e[i] = e[i] / c[i]**0.5
f, ax = plt.subplots(1, 1,figsize=(12,12))
m.plot(yerr=e, kind='bar',ax=ax)
ax.set_title(compound,fontsize=12,weight='bold')
if ylabel != "":
plt.ylabel(ylabel)
plt.tight_layout()
f.savefig(os.path.join(output_loc, compound + '_errorbar.pdf'))
#f.clear()
plt.close(f)#f.clear()
def make_boxplot_plots(df, output_loc='', use_shortnames=True, ylabel="", overwrite=True):
output_loc = os.path.expandvars(output_loc)
plt.ioff()
for compound in df.index:
f, ax = plt.subplots(1, 1,figsize=(12,12))
if use_shortnames and 'short groupname' in df.columns.names:
g = df.loc[compound].groupby(level='short groupname')
g.apply(pd.DataFrame).plot(kind='box',ax=ax)
else:
g = df.loc[compound].groupby(level='group')
g.apply(pd.DataFrame).plot(kind='box',ax=ax)
for i, (n, grp) in enumerate(g):
x = [i+1] *len(grp)
x = np.random.normal(x, 0.04, size=len(x))
plt.scatter(x, grp)
ax.set_title(compound,fontsize=12,weight='bold')
plt.xticks(rotation=90)
if ylabel != "":
plt.ylabel(ylabel)
plt.tight_layout()
fig_path = os.path.join(output_loc, compound + '_boxplot.pdf')
write_utils.check_existing_file(fig_path, overwrite)
f.savefig(fig_path)
#f.clear()
plt.close(f)#f.clear()
logger.info('Exported box plot of %s for %s at %s.', ylabel, compound, fig_path)
def frag_refs_to_json(json_dir = '/project/projectdirs/metatlas/projects/sharepoint/', name = 'frag_refs', save = True):
ids = metob.retrieve('CompoundIdentification',username='*')
frag_refs = [cid for cid in ids if cid.frag_references]
data = {'head_id': [],
'inchi_key': [],
'neutralized_inchi_key': [],
'neutralized_2d_inchi_key': [],
'polarity': [],
'collision_energy': [],
'technique': [],
'precursor_mz': [],
'mz_intensities': []}
for fr in frag_refs:
data['head_id'].append(fr.frag_references[0].head_id),
data['inchi_key'].append(fr.compound[0].inchi_key)
data['neutralized_inchi_key'].append(fr.compound[0].neutralized_inchi_key)
data['neutralized_2d_inchi_key'].append(fr.compound[0].neutralized_2d_inchi_key)
data['polarity'].append(fr.frag_references[0].polarity)
data['precursor_mz'].append(fr.frag_references[0].precursor_mz)
data['mz_intensities'].append([(m.mz, m.intensity) for m in fr.frag_references[0].mz_intensities])
data['collision_energy'].append(fr.frag_references[0].collision_energy)
data['technique'].append(fr.frag_references[0].technique)
if save:
with open(os.path.join(json_dir, name + '.json'), 'w') as text_file:
text_file.write(json.dumps(data))
else:
return json.dumps(data)
# def get_idenficications_with_fragrefs():
# """
# Select all CompoundIdentifications that have a fragmentation reference
# """
def make_identification_figure(frag_json_dir = '/project/projectdirs/metatlas/projects/sharepoint/', frag_json_name = 'frag_refs',
input_fname = '', input_dataset = [], include_lcmsruns = [],
exclude_lcmsruns = [], include_groups = [], exclude_groups = [], output_loc = [], use_labels=False):
output_loc = os.path.expandvars(output_loc)
if not os.path.exists(output_loc):
os.makedirs(output_loc)
if not input_dataset:
data = ma_data.get_dill_data(os.path.expandvars(input_fname))
else:
data = input_dataset
data = filter_runs(data, include_lcmsruns, include_groups, exclude_lcmsruns, exclude_groups)
compound_names = ma_data.get_compound_names(data,use_labels=use_labels)[0]
file_names = ma_data.get_file_names(data)
# print(len(data),len(data[0]),len(compound_names))
frag_refs = pd.read_json(os.path.join(frag_json_dir, frag_json_name + ".json"))
for compound_idx in range(len(compound_names)):
file_idx = None
file_precursor_intensity = 0
score = None
ref_spec = []
if any([(len(data[i][compound_idx]['identification'].compound)!=0) and (data[i][compound_idx]['identification'].compound is not None) for i in range(len(file_names))]):
# print('checking for compound ids')
file_idx, score, ref_spec = file_with_max_score(data, frag_refs, compound_idx, 'inchi_key and rt and polarity')
if ~isinstance(file_idx,int): #There is not a reference for that compound
file_idx = file_with_max_precursor_intensity(data,compound_idx)[0]
# print('found one',file_idx)
else:
file_idx = file_with_max_precursor_intensity(data,compound_idx)[0]
# print(file_idx,compound_idx, compound_names[compound_idx])
if isinstance(file_idx,int):
# print('printing')
# print(file_idx,compound_idx)
fig = plt.figure(figsize=(20,20))
# fig = plt.figure()
ax = fig.add_subplot(211)
ax.set_title(compound_names[compound_idx],fontsize=12,weight='bold')
ax.set_xlabel('m/z',fontsize=12,weight='bold')
ax.set_ylabel('intensity',fontsize=12,weight='bold')
#TODO: iterate across all collision energies
precursor_intensity = data[file_idx][compound_idx]['data']['msms']['data']['precursor_intensity']
idx_max = np.argwhere(precursor_intensity == np.max(precursor_intensity)).flatten()
mz = data[file_idx][compound_idx]['data']['msms']['data']['mz'][idx_max]
zeros = np.zeros(data[file_idx][compound_idx]['data']['msms']['data']['mz'][idx_max].shape)
intensity = data[file_idx][compound_idx]['data']['msms']['data']['i'][idx_max]
ax.vlines(mz,zeros,intensity,colors='r',linewidth = 2)
sx = np.argsort(intensity)[::-1]
labels = [1.001e9]
for i in sx:
if np.min(np.abs(mz[i] - labels)) > 0.1 and intensity[i] > 0.02 * np.max(intensity):
ax.annotate('%5.4f'%mz[i], xy=(mz[i], 1.01*intensity[i]),rotation = 90, horizontalalignment = 'center', verticalalignment = 'left')
labels.append(mz[i])
# precursor_mz = data[file_idx][compound_idx]['data']['msms']['precursor_mz'])
# print data[file_idx][compound_idx]['data']['msms']['polarity']
if ref_spec:
ref_mz = []
ref_intensity = []
ref_zeros = []
for s in ref_spec[0]:
ref_mz.append(s[0])
ref_intensity.append(s[1]*-1)
ref_zeros.append(0)
s = -1* intensity[sx[0]] / min(ref_intensity)
# L = plt.ylim()
# print data[file_idx][compound_idx]['identification'].compound[0].name, float(intensity[sx[0]]), float(min(ref_intensity))
ax.vlines(ref_mz,ref_zeros,[r*s for r in ref_intensity],colors='r',linewidth = 2)
# print "we have reference spectra", len(ref_spec[0])
plt.ioff()
plt.axhline()
plt.tight_layout()
L = plt.ylim()
plt.ylim(L[0],L[1]*1.12)
if data[file_idx][compound_idx]['identification'].compound:
inchi = data[file_idx][compound_idx]['identification'].compound[0].inchi
myMol = Chem.MolFromInchi(inchi.encode('utf-8'))
# myMol,neutralised = NeutraliseCharges(myMol)
if myMol:
image = Draw.MolToImage(myMol, size = (300,300) )
ax2 = fig.add_subplot(223)
ax2.imshow(image)
ax2.axis('off')
# SVG(moltosvg(myMol))
ax3 = fig.add_subplot(224)
ax3.set_xlim(0,1)
mz_theoretical = data[file_idx][compound_idx]['identification'].mz_references[0].mz
mz_measured = data[file_idx][compound_idx]['data']['ms1_summary']['mz_centroid']
if not mz_measured:
mz_measured = 0
delta_mz = abs(mz_theoretical - mz_measured)
delta_ppm = delta_mz / mz_theoretical * 1e6
rt_theoretical = data[file_idx][compound_idx]['identification'].rt_references[0].rt_peak
rt_measured = data[file_idx][compound_idx]['data']['ms1_summary']['rt_peak']
if not rt_measured:
rt_measured = 0
ax3.text(0,1,'%s'%os.path.basename(data[file_idx][compound_idx]['lcmsrun'].hdf5_file),fontsize=12)
ax3.text(0,0.95,'%s %s'%(compound_names[compound_idx], data[file_idx][compound_idx]['identification'].mz_references[0].adduct),fontsize=12)
ax3.text(0,0.9,'m/z theoretical = %5.4f, measured = %5.4f, %5.4f ppm difference'%(mz_theoretical, mz_measured, delta_ppm),fontsize=12)
ax3.text(0,0.85,'Expected Elution of %5.2f minutes, %5.2f min actual'%(rt_theoretical,rt_measured),fontsize=12)
if score != None:
ax3.text(0,0.80,'Score: %f'%(score),fontsize=12)
ax3.set_ylim(0.2,1.01)
ax3.axis('off')
# plt.show()
fig.savefig(os.path.join(output_loc, compound_names[compound_idx] + '.pdf'))
plt.close()
def top_five_scoring_files(data, frag_refs, compound_idx, filter_by):
file_idxs = []
ref_idxs = []
scores = []
msv_sample_list = []
msv_ref_list = []
rt_list = []
for file_idx in range(len(data)):
try:
assert(isinstance(data[file_idx][compound_idx]['data']['msms']['data'], dict))
except AssertionError:
continue
except IndexError:
continue
except KeyError:
continue
msv_sample_scans = np.array([data[file_idx][compound_idx]['data']['msms']['data']['mz'], data[file_idx][compound_idx]['data']['msms']['data']['i']])
rt_of_msv_sample = np.array(data[file_idx][compound_idx]['data']['msms']['data']['rt'])
scan_idxs = [i+1
for i in range(rt_of_msv_sample.size-1)
if rt_of_msv_sample[i] != rt_of_msv_sample[i+1]]
for i, msv_sample in enumerate(np.split(msv_sample_scans, scan_idxs, axis=1)):
current_best_score = None
current_best_ref_idx = None
current_best_msv_sample = None
current_best_msv_ref = None
current_best_rt = None
for ref_idx, frag in sp.filter_frag_refs(data, frag_refs, compound_idx, file_idx, filter_by).iterrows():
msv_ref = np.array(frag['mz_intensities']).T
msv_sample_aligned, msv_ref_aligned = sp.pairwise_align_ms_vectors(msv_sample, msv_ref, .005, 'shape')
score = sp.score_ms_vectors_composite(msv_sample_aligned, msv_ref_aligned)
if current_best_score == None or score > current_best_score:
current_best_score = score
current_best_ref_idx = ref_idx
current_best_msv_sample = msv_sample_aligned
current_best_msv_ref = msv_ref_aligned
current_best_rt = np.split(rt_of_msv_sample, scan_idxs)[i][0]
if current_best_score:
scores.append(current_best_score)
file_idxs.append(file_idx)
ref_idxs.append(current_best_ref_idx)
msv_sample_list.append(current_best_msv_sample)
msv_ref_list.append(current_best_msv_ref)
rt_list.append(current_best_rt)
return list(zip(*sorted(zip(file_idxs, ref_idxs, scores, msv_sample_list, msv_ref_list, rt_list), key=lambda l: l[2], reverse=True)[:5]))
def plot_msms_comparison(i, score, ax, msv_sample, msv_ref):
msv_sample_matches, msv_ref_matches, msv_sample_nonmatches, msv_ref_nonmatches = sp.partition_aligned_ms_vectors(msv_sample, msv_ref)
msv_sample_unaligned = np.concatenate((msv_sample_matches, msv_sample_nonmatches), axis=1)
msv_ref_unaligned = np.concatenate((msv_ref_matches, msv_ref_nonmatches), axis=1)
sample_mz = msv_sample_nonmatches[0]
sample_zeros = np.zeros(msv_sample_nonmatches[0].shape)
sample_intensity = msv_sample_nonmatches[1]
ax.vlines(sample_mz, sample_zeros, sample_intensity, colors='r', linewidth=1)
shared_mz = msv_sample_matches[0]
shared_zeros = np.zeros(msv_sample_matches[0].shape)
shared_sample_intensity = msv_sample_matches[1]
ax.vlines(shared_mz, shared_zeros, shared_sample_intensity, colors='g', linewidth=1)
most_intense_idxs = np.argsort(msv_sample_unaligned[1])[::-1]
if i == 0:
ax.set_title('%.4f' % score, fontsize=8, weight='bold')
ax.set_xlabel('m/z', fontsize=8, weight='bold')
ax.set_ylabel('intensity', fontsize=8, weight='bold')
ax.tick_params(axis='both', which='major', labelsize=6)
labels = [1.001e9]
intensity_requirement = [m for m in most_intense_idxs
if
np.min(np.abs(msv_sample_unaligned[0][m] - labels)) > 0.1
and msv_sample_unaligned[1][m] > 0.2 * np.max(msv_sample_unaligned[1])]
for m in max([most_intense_idxs[:6], intensity_requirement], key=len):
if np.min(np.abs(msv_sample_unaligned[0][m] - labels)) > 0.1 and msv_sample_unaligned[1][m] > 0.02 * np.max(msv_sample_unaligned[1]):
ax.annotate('%5.4f' % msv_sample_unaligned[0][m],
xy=(msv_sample_unaligned[0][m], 1.01 * msv_sample_unaligned[1][m]),
rotation=90,
horizontalalignment='left', verticalalignment='center',
size=4)
labels.append(msv_sample_unaligned[0][m])
if msv_ref_unaligned[0].size > 0:
ref_scale = -1 * np.max(msv_sample_unaligned[1]) / np.max(msv_ref_unaligned[1])
ref_mz = msv_ref_nonmatches[0]
ref_zeros = np.zeros(msv_ref_nonmatches[0].shape)
ref_intensity = ref_scale * msv_ref_nonmatches[1]
shared_ref_intensity = ref_scale * msv_ref_matches[1]
ax.vlines(ref_mz, ref_zeros, ref_intensity, colors='r', linewidth=1)
ax.vlines(shared_mz, shared_zeros, shared_ref_intensity, colors='g', linewidth=1)
ax.axhline()
ylim = ax.get_ylim()
ax.set_ylim(ylim[0], ylim[1] * 1.33)
def plot_msms_comparison2(i, mz_header, rt, cpd_header, ref_id, filename, score, ax, msv_sample, msv_ref, zoom_factor=1):
pickradius = 10
msv_sample_matches, msv_ref_matches, msv_sample_nonmatches, msv_ref_nonmatches = sp.partition_aligned_ms_vectors(msv_sample, msv_ref)
msv_sample_unaligned = np.concatenate((msv_sample_matches, msv_sample_nonmatches), axis=1)
msv_ref_unaligned = np.concatenate((msv_ref_matches, msv_ref_nonmatches), axis=1)
sample_mz = msv_sample_nonmatches[0]
sample_zeros = np.zeros(msv_sample_nonmatches[0].shape)
sample_intensity = msv_sample_nonmatches[1]
lines = [ax.vlines(sample_mz, sample_zeros, sample_intensity, colors='r', linewidth=1, pickradius=pickradius)]
shared_mz = msv_sample_matches[0]
shared_zeros = np.zeros(msv_sample_matches[0].shape)
shared_sample_intensity = msv_sample_matches[1]
lines.append(ax.vlines(shared_mz, shared_zeros, shared_sample_intensity, colors='g', linewidth=1, pickradius=pickradius))
most_intense_idxs = np.argsort(msv_sample_unaligned[1])[::-1]
if i == 0:
ax.set_title('MSMS ref ID = %s\n%s' % (ref_id, filename), fontsize='small', fontstretch='condensed')
if cpd_header == "":
ax.set_xlabel('m/z\nScore = %.4f, %s\n%s' % (score, rt, mz_header), weight='bold', fontsize=7)
else:
ax.set_xlabel('m/z\nScore = %.4f, %s\n%s\n%s' % (score, rt, mz_header, cpd_header), weight='bold', fontsize=7)
ax.set_ylabel('intensity')
labels = [1.001e9]
intensity_requirement = [m for m in most_intense_idxs
if
np.min(np.abs(msv_sample_unaligned[0][m] - labels)) > 0.1
and msv_sample_unaligned[1][m] > 0.2 * np.max(msv_sample_unaligned[1])]
for m in max([most_intense_idxs[:6], intensity_requirement], key=len):
if np.min(np.abs(msv_sample_unaligned[0][m] - labels)) > 0.1 and msv_sample_unaligned[1][m] > 0.02 * np.max(msv_sample_unaligned[1]):
ax.annotate('%5.4f' % msv_sample_unaligned[0][m],
xy=(msv_sample_unaligned[0][m], msv_sample_unaligned[1][m]),
size=6)
labels.append(msv_sample_unaligned[0][m])
if msv_ref_unaligned[0].size > 0:
ref_scale = -1 * np.max(msv_sample_unaligned[1]) / np.max(msv_ref_unaligned[1])
ref_mz = msv_ref_nonmatches[0]
ref_zeros = np.zeros(msv_ref_nonmatches[0].shape)
ref_intensity = ref_scale * msv_ref_nonmatches[1]
shared_ref_intensity = ref_scale * msv_ref_matches[1]
lines.append(ax.vlines(ref_mz, ref_zeros, ref_intensity, colors='r', linewidth=1, pickradius=pickradius))
lines.append(ax.vlines(shared_mz, shared_zeros, shared_ref_intensity, colors='g', linewidth=1, pickradius=pickradius))
ax.axhline()
ylim = ax.get_ylim()
new_ylim = 0 if ylim[0] == 0 else ylim[0]/zoom_factor
ax.set_ylim(new_ylim, ylim[1]/zoom_factor)
return lines
def plot_structure(ax, compound, dimensions):
if compound:
inchi = compound[0].inchi
myMol = Chem.MolFromInchi(inchi.encode('utf-8'))
if myMol:
image = Draw.MolToImage(myMol, size=(dimensions, dimensions))
ax.imshow(image)
ax.axis('off')
def plot_ema_compound_info(ax, compound_info, label=''):
wrapper = TextWrapper(width=28, break_on_hyphens=True)
if compound_info.compound:
name = ['Name:', wrapper.fill(compound_info.compound[0].name)]
label = ['Label:', wrapper.fill(label)]
formula = ['Formula:', compound_info.compound[0].formula]
polarity = ['Polarity:', compound_info.mz_references[0].detected_polarity]
neutral_mass = ['Monoisotopic Mass:', compound_info.compound[0].mono_isotopic_molecular_weight]
theoretical_mz = ['Theoretical M/Z:', compound_info.mz_references[0].mz]
adduct = ['Adduct:', compound_info.mz_references[0].adduct]
cell_text = [name, label, formula, polarity, neutral_mass, theoretical_mz, adduct]
ema_compound_info_table = ax.table(cellText=cell_text,
colLabels=['', 'EMA Compound Info'],
bbox=[0.0, 0.0, 1, 1], loc='top left')
ema_compound_info_table.scale(1, .7)
ema_compound_info_table.auto_set_font_size(False)
ema_compound_info_table.set_fontsize(4)
cellDict = ema_compound_info_table.get_celld()
for i in range(len(cell_text)+1):
cellDict[(i,0)].set_width(0.3)
cellDict[(i,1)]._loc = 'center'
ax.axis('off')
def plot_eic(ax, data, compound_idx):
for file_idx in range(len(data)):
rt_min = data[file_idx][compound_idx]['identification'].rt_references[0].rt_min
rt_max = data[file_idx][compound_idx]['identification'].rt_references[0].rt_max
rt_peak = data[file_idx][compound_idx]['identification'].rt_references[0].rt_peak
try:
assert len(data[file_idx][compound_idx]['data']['eic']['rt']) > 1
x = np.asarray(data[file_idx][compound_idx]['data']['eic']['rt'])
y = np.asarray(data[file_idx][compound_idx]['data']['eic']['intensity'])
ax.plot(x, y, 'k-', linewidth=.1, alpha=min(1, 10*(1./len(data))))
myWhere = np.logical_and(x>=rt_min, x<=rt_max )
ax.fill_between(x,0,y,myWhere, facecolor='c', alpha=.25)
except (AssertionError, TypeError):
pass
# ax.tick_params(labelbottom='off')
ax.xaxis.set_tick_params(labelsize=5)
ax.yaxis.set_tick_params(labelsize=5)
ax.tick_params(axis='y', labelsize=5)
ax.get_yaxis().get_major_formatter().set_useOffset(True)
#ax.get_yaxis().set_visible(False)
ax.axvline(rt_min, color='k', linewidth=1.0)
ax.axvline(rt_max, color='k', linewidth=1.0)
ax.axvline(rt_peak, color='r', linewidth=1.0)
def plot_score_and_ref_file(ax, score, rt, ref):
ax.text(0.5, 1, '%.4f'%score,
weight='bold',
horizontalalignment='center',
verticalalignment='top',
fontsize=4,
transform=ax.transAxes)
ax.text(0, .45, fill(str(ref) + ' RT=%5.3f'%rt, width=26),
horizontalalignment='left',
verticalalignment='center',
rotation='vertical',
fontsize=2,
transform=ax.transAxes)
def get_refs(file_name, ref_dtypes, ref_index):
"""Load msms refs from file_name, returns pandas Dataframe"""
return pd.read_csv(file_name,
sep='\t',
dtype=ref_dtypes
).set_index(ref_index)
def convert_to_centroid(sample_df):
max_peaks, _ = sp.peakdet(sample_df[1], 1000.0)
if max_peaks.shape[0] > 0:
idx = max_peaks[:, 0].astype(int).flatten()
return sample_df[:, idx]
return np.zeros((0, 0))
def search_ms_refs(msv_sample, query, inchi_key, polarity, precursor_mz, pre_mz_ppm, frag_mz_tolerance, ref_loc, ref_dtypes, ref_index, ref_df):
return sp.search_ms_refs(msv_sample, **locals())
def get_msms_hits_per_compound(rt_mz_i_df, msms_scan, do_centroid, query, inchi_key, polarity,
precursor_mz, pre_mz_ppm, frag_mz_tolerance, ref_loc, ref_dtypes,
ref_index, ref_df):
msv_sample = rt_mz_i_df.loc[rt_mz_i_df['rt'] == msms_scan,
['mz', 'i', 'rt', 'precursor_MZ', 'precursor_intensity']]
precursor_mz_sample = msv_sample['precursor_MZ'].values[0]
msv_sample.sort_values('mz', inplace=True)
msv_sample = msv_sample[['mz', 'i']].values.T
msv_sample = convert_to_centroid(msv_sample) if do_centroid else msv_sample
# Filter ions greater than 2.5 + precursor M/Z
msv_sample = msv_sample[:, msv_sample[0] < precursor_mz_sample + 2.5]
if msv_sample.size > 0:
return search_ms_refs(msv_sample, query, inchi_key, polarity, precursor_mz,
pre_mz_ppm, frag_mz_tolerance, ref_loc, ref_dtypes,
ref_index, ref_df), msv_sample
return pd.DataFrame(), msv_sample
def get_empty_scan_df(columns):
return pd.DataFrame(data={'database': [np.nan], 'id': [np.nan]},
index=pd.MultiIndex.from_tuples([(np.nan, np.nan)], names=['database', 'id']),
columns=columns)
def get_msms_hits(metatlas_dataset, extra_time=False, keep_nonmatches=False,
pre_query='database == "metatlas"', query=None, ref_dtypes=None,
ref_loc=None, ref_df=None, frag_mz_tolerance=.005, ref_index=None,
do_centroid=False):
if query is None:
pre_mz_decimal = ".5*(@pre_mz_ppm**-decimal)/(decimal+1)"
offset = f".5*(({pre_mz_decimal} + .005 + ({pre_mz_decimal} - .005)**2)**.5)"
query = ("(@inchi_key == inchi_key) and "
"(@polarity == polarity) and "
f"( (@precursor_mz - {offset}) <= precursor_mz <= (@precursor_mz + {offset}) )")
if ref_dtypes is None:
ref_dtypes = {'database': str, 'id': str, 'name': str,
'spectrum': object, 'decimal': int, 'precursor_mz': float,
'polarity': str, 'adduct': str, 'fragmentation_method': str,
'collision_energy': str, 'instrument': str, 'instrument_type': str,
'formula': str, 'exact_mass': float,
'inchi_key': str, 'inchi': str, 'smiles': str}
if ref_index is None:
ref_index = ['database', 'id']
if ref_loc is None:
ref_loc = '/global/project/projectdirs/metatlas/projects/spectral_libraries/msms_refs_v2.tab'
if ref_df is None:
ref_df = get_refs(ref_loc, ref_dtypes, ref_index)
ref_df = ref_df.query(pre_query).copy()
ref_df.loc[:, 'spectrum'] = ref_df['spectrum'].apply(lambda s: np.array(json.loads(s)))
file_names = ma_data.get_file_names(metatlas_dataset)
compound_names = ma_data.get_compound_names(metatlas_dataset)[0]
index_cols = ref_df.index.names + ['file_name', 'msms_scan']
all_cols = index_cols + ['score', 'num_matches', 'msv_query_aligned', 'msv_ref_aligned', 'name', 'adduct',
'inchi_key', 'precursor_mz', 'measured_precursor_mz',
'measured_precursor_intensity']
msms_hits = pd.DataFrame(columns=all_cols).set_index(index_cols)
for compound_idx, _ in enumerate(compound_names):
sys.stdout.write('\r'+'Processing: {} / {} compounds.'.format(compound_idx+1, len(compound_names)))
sys.stdout.flush()
cid = metatlas_dataset[0][compound_idx]['identification']
name = cid.name.split('///')[0] if cid.name else getattr(cid.compound[-1], 'name', None)
adduct = ma_data.extract(cid, ['mz_references', 0, 'adduct'], None)
inchi_key = ma_data.extract(cid, ['compound', 0, 'inchi_key'], '')
pre_mz_ppm = cid.mz_references[0].mz_tolerance
precursor_mz = cid.mz_references[0].mz
rt_min = cid.rt_references[0].rt_min
rt_max = cid.rt_references[0].rt_max
for file_idx, file_name in enumerate(file_names):
mfc = metatlas_dataset[file_idx][compound_idx]
polarity = mfc['identification'].mz_references[0].detected_polarity
try:
assert {'rt', 'i', 'precursor_MZ', 'mz'}.issubset(set(mfc['data']['msms']['data'].keys()))
except (KeyError, AssertionError, AttributeError):
continue
rt_mz_i_df = pd.DataFrame({k: mfc['data']['msms']['data'][k]
for k in ['rt', 'mz', 'i', 'precursor_MZ', 'precursor_intensity']}
).sort_values(['rt', 'mz'])
for msms_scan in rt_mz_i_df.rt.unique():
if not extra_time and not rt_min <= msms_scan <= rt_max:
continue
scan_df, msv_sample = get_msms_hits_per_compound(rt_mz_i_df, msms_scan, do_centroid,
query, inchi_key, polarity,
precursor_mz, pre_mz_ppm,
frag_mz_tolerance, ref_loc, ref_dtypes,
ref_index, ref_df)
precursor = rt_mz_i_df.loc[rt_mz_i_df['rt'] == msms_scan, ['precursor_MZ', 'precursor_intensity']]
hits = len(scan_df) > 0
if not hits and not keep_nonmatches:
continue
if not hits and keep_nonmatches:
# leave out the cols that are used in the index
scan_df = get_empty_scan_df(all_cols[2:])
scan_df['file_name'] = file_name
scan_df['msms_scan'] = msms_scan
scan_df['name'] = name
scan_df['adduct'] = adduct
scan_df['inchi_key'] = inchi_key
scan_df['precursor_mz'] = precursor_mz
scan_df['measured_precursor_mz'] = precursor['precursor_MZ'].values[0]
scan_df['measured_precursor_intensity'] = precursor['precursor_intensity'].values[0]
scan_df.set_index(['file_name', 'msms_scan'], append=True, inplace=True)
if not hits and keep_nonmatches:
scan_df['num_matches'] = 0
scan_df['score'] = precursor['precursor_intensity'].values[0]
scan_df['msv_query_aligned'] = [msv_sample]
scan_df['msv_ref_aligned'] = [np.full_like(msv_sample, np.nan)]
msms_hits = msms_hits.append(scan_df)
sys.stdout.write('\n'+'Done!!!\n')
return msms_hits
def make_chromatograms(input_dataset=[], include_lcmsruns=[], exclude_lcmsruns=[], include_groups=[], exclude_groups=[], group='index', share_y=True, save=True, output_loc=[], short_names_df=pd.DataFrame(), short_names_header=None, polarity='', overwrite=False):
input_dataset = filter_runs(input_dataset, include_lcmsruns, include_groups,
exclude_lcmsruns, exclude_groups)
file_names = ma_data.get_file_names(input_dataset)
if short_names_df.empty:
if short_names_header != None:
sys.stdout.write('short_names_df not provided. Using full_filename for the plots!')
short_names_df = pd.DataFrame()
elif short_names_header == None:
sys.stdout.write('short_names_header not provided. Using full_filename for the plots!')
short_names_df = pd.DataFrame()
elif short_names_header not in short_names_df.columns:
sys.stdout.write('short_names_header not found in short_names_df. Using full_filename for the plots!')
short_names_df = pd.DataFrame()
else:
short_names_df = short_names_df[[short_names_header]]
short_names_df.columns=['shortname']
os.makedirs(output_loc, exist_ok=True)
compound_names = ma_data.get_compound_names(input_dataset,use_labels=True)[0]
args_list = []
prefix = f"{polarity}_" if polarity != '' else ''
chromatogram_dir = os.path.join(output_loc, f"{prefix}compound_EIC_chromatograms")
for compound_idx, my_compound in enumerate(compound_names):
my_data = [input_dataset[file_idx][compound_idx] for file_idx, _ in enumerate(file_names)]
kwargs = {'data': my_data,
'file_name': os.path.join(chromatogram_dir, my_compound+'.pdf'),
'group': group,
'save': save,
'share_y': share_y,
'names': file_names,
'shortname': short_names_df,
'overwrite': overwrite}
args_list.append(kwargs)
max_processes = 4
with mp.Pool(processes=min(max_processes, len(input_dataset[0]))) as pool:
pool.map(cpp.chromplotplus, args_list)
def make_identification_figure_v2(input_fname='', input_dataset=[], include_lcmsruns=[], exclude_lcmsruns=[],
include_groups=[], exclude_groups=[], output_loc=[], msms_hits=None,
use_labels=False, intensity_sorted_matches=False,
short_names_df=pd.DataFrame(), polarity='', overwrite=True):
prefix = '' if polarity == '' else f"{polarity}_"
output_loc = os.path.join(output_loc, f"{prefix}msms_mirror_plots")
if not input_dataset:
data = ma_data.get_dill_data(os.path.expandvars(input_fname))
else:
data = input_dataset
data = filter_runs(data, include_lcmsruns, include_groups, exclude_lcmsruns, exclude_groups)
if msms_hits is not None:
msms_hits_df = msms_hits.reset_index().sort_values('score', ascending=False)
compound_names = ma_data.get_compound_names(data, use_labels)[0]
file_names = ma_data.get_file_names(data)
match = pd.DataFrame()
plt.ioff()
plt.clf()
for compound_idx, _ in enumerate(compound_names):
file_idxs, scores, msv_sample_list, msv_ref_list, rt_list = [], [], [], [], []
inchi_key = extract(data, [0, compound_idx, "identification", "compound", 0, "inchi_key"], "")
# Find 5 best file and reference pairs by score
try:
rt_ref = data[0][compound_idx]['identification'].rt_references[0]
mz_ref = data[0][compound_idx]['identification'].mz_references[0]
comp_msms_hits = msms_hits_df[(msms_hits_df['inchi_key'] == inchi_key)
& (msms_hits_df['msms_scan'] >= rt_ref.rt_min)
& (msms_hits_df['msms_scan'] <= rt_ref.rt_max)
& within_tolerance(
msms_hits_df['precursor_mz'].values.astype(float),
mz_ref.mz,
mz_ref.mz_tolerance*1e-6
)
].drop_duplicates('file_name').head(5)
comp_msms_hits = comp_msms_hits[comp_msms_hits['file_name'].isin(file_names)]
file_idxs = [file_names.index(f) for f in comp_msms_hits['file_name']]
scores = comp_msms_hits['score'].values.tolist()
msv_sample_list = comp_msms_hits['msv_query_aligned'].values.tolist()
msv_ref_list = comp_msms_hits['msv_ref_aligned'].values.tolist()
rt_list = comp_msms_hits['msms_scan'].values.tolist()
except (IndexError, TypeError):
file_idx = None
max_intensity = 0
for file_idx, _ in enumerate(data):
try:
temp = max(data[file_idx][compound_idx]['data']['eic']['intensity'])
if temp > max_intensity:
max_file_idx = file_idx
max_intensity = temp
except (ValueError, TypeError):
continue
file_idxs = [max_file_idx]
msv_sample_list = [np.array([0, np.nan]).T]
msv_ref_list = [np.array([0, np.nan]).T]
scores = [np.nan]
# Plot if compound yields any scores
if file_idxs and file_idxs[0] is not None:
# Top 5 MSMS Spectra
top_5_axis = [plt.subplot2grid((24, 24), (0, 0), rowspan=12, colspan=12)]
for i in [0, 3, 6, 9]:
top_5_axis.append(plt.subplot2grid((24, 24), (i, 12), rowspan=3, colspan=3))
top_5_axis[-1].tick_params(axis='both', length=2)
top_5_axis[-1].set_xticklabels([])
top_5_axis[-1].set_yticklabels([])
for i, (score, axis) in enumerate(zip(scores, top_5_axis)):
plot_msms_comparison(i, score, axis, msv_sample_list[i], msv_ref_list[i])
def no_axis_plot(i):
axis = plt.subplot2grid((24, 24), (i, 15), rowspan=3, colspan=1)
axis.axis('off')
return axis
# Next Best Scores and Filenames
next_best = [no_axis_plot(i) for i in [0, 3, 6, 9]]
if short_names_df.empty:
for i, (score, axis) in enumerate(zip(scores[1:], next_best)):
plot_score_and_ref_file(axis, score, rt_list[i+1], os.path.basename(data[file_idxs[i+1]][compound_idx]['lcmsrun'].hdf5_file))
else:
for i, (score, ax) in enumerate(zip(scores[1:], next_best)):
short_samplename = short_names_df.loc[os.path.basename(data[file_idxs[i+1]][compound_idx]['lcmsrun'].hdf5_file).split('.')[0], 'short_samplename'][0]
plot_score_and_ref_file(ax, score, rt_list[i+1], short_samplename)
# EMA Compound Info
if file_idxs and file_idxs[0] is not None:
ax3 = plt.subplot2grid((24, 24), (0, 16), rowspan=6, colspan=8)
plot_ema_compound_info(ax3, data[file_idxs[0]][compound_idx]['identification'])
else:
ax3 = plt.subplot2grid((24, 24), (0, 0), rowspan=6, colspan=8)
plot_ema_compound_info(ax3, data[0][compound_idx]['identification'])
# Structure
if file_idxs and file_idxs[0] is not None:
ax5 = plt.subplot2grid((24, 24), (13, 0), rowspan=6, colspan=6)
plot_structure(ax5, data[file_idxs[0]][compound_idx]['identification'].compound, 100)
else:
ax5 = plt.subplot2grid((24, 24), (13, 0), rowspan=6, colspan=6)
plot_structure(ax5, data[0][compound_idx]['identification'].compound, 100)
# EIC
if file_idxs and file_idxs[0] is not None:
ax6 = plt.subplot2grid((21, 21), (6, 15), rowspan=5, colspan=6)
plot_eic(ax6, data, compound_idx)
else:
ax6 = plt.subplot2grid((21, 21), (6, 0), rowspan=5, colspan=6)
plot_eic(ax6, data, compound_idx)
# Old code
if file_idxs and file_idxs[0] is not None:
ax7 = plt.subplot2grid((24, 24), (15, 6), rowspan=9, colspan=20)
mz_theoretical = data[file_idxs[0]][compound_idx]['identification'].mz_references[0].mz
mz_measured = data[file_idxs[0]][compound_idx]['data']['ms1_summary']['mz_centroid']
if not mz_measured:
mz_measured = 0
delta_mz = abs(mz_theoretical - mz_measured)
delta_ppm = delta_mz / mz_theoretical * 1e6
rt_theoretical = data[file_idxs[0]][compound_idx]['identification'].rt_references[0].rt_peak
rt_measured = data[file_idxs[0]][compound_idx]['data']['ms1_summary']['rt_peak']
if not rt_measured:
rt_measured = 0
ax7.text(0,1,'%s'%fill(os.path.basename(data[file_idxs[0]][compound_idx]['lcmsrun'].hdf5_file), width=54),fontsize=8)
ax7.text(0,0.9,'%s %s'%(compound_names[compound_idx], data[file_idxs[0]][compound_idx]['identification'].mz_references[0].adduct),fontsize=8)
ax7.text(0,0.85,'Measured M/Z = %5.4f, %5.4f ppm difference'%(mz_measured, delta_ppm),fontsize=8)
ax7.text(0,0.8,'Expected Elution of %5.2f minutes, %5.2f min actual'%(rt_theoretical,rt_measured),fontsize=8)
if len(rt_list) > 0:
ax7.text(0,0.7,'MSMS Scan at %5.3f minutes'%rt_list[0],fontsize=8)
msv_sample_matches = sp.partition_aligned_ms_vectors(msv_sample_list[0], msv_ref_list[0])[0]
if intensity_sorted_matches:
msv_sample_matches = msv_sample_matches[:, msv_sample_matches[1].argsort()[::-1]]
if len(msv_sample_matches[0]) > 0:
mz_sample_matches = msv_sample_matches[0].tolist()
threshold_mz_sample_matches = sp.remove_ms_vector_noise(msv_sample_matches)[0].tolist()
else:
mz_sample_matches = [np.nan]
threshold_mz_sample_matches = [np.nan]
ax7.text(0,0.6,
fill('Matching M/Zs above 1E-3*max: ' + ', '.join(['%5.3f'%m for m in threshold_mz_sample_matches]), width=90) + '\n\n' +
fill('All Matching M/Zs: ' + ', '.join(['%5.3f'%m for m in mz_sample_matches]), width=90),
fontsize=6, verticalalignment='top')
match.loc[compound_idx, 'label'] = compound_names[compound_idx]
match.loc[compound_idx, 'file name'] = file_names[file_idxs[0]]
match.loc[compound_idx, 'RT'] = rt_list[0]
match.loc[compound_idx, 'score'] = scores[0]
match.loc[compound_idx, 'Matching M/Zs above 1E-3*max'] = ', '.join(['%5.3f' % m for m in threshold_mz_sample_matches])
match.loc[compound_idx, 'All matching M/Zs'] = ','.join(['%5.3f' % m for m in mz_sample_matches])
ax7.set_ylim(.5,1.1)
ax7.axis('off')
with warnings.catch_warnings():
warnings.filterwarnings("ignore", message="tight_layout not applied: number of rows in subplot specifications must be multiples of one another.")
plt.tight_layout()
fig_path = os.path.join(output_loc, compound_names[compound_idx] + '.pdf')
write_utils.check_existing_file(fig_path, overwrite)
plt.savefig(fig_path)
plt.close()
logger.info('Exported identification figures for %s to %s.', compound_names[compound_idx], fig_path)
match_path = os.path.join(output_loc, 'MatchingMZs.tab')
write_utils.export_dataframe(match, match_path, 'matching MZs', overwrite, sep='\t')
def plot_ms1_spectra(polarity = None, mz_min = 5, mz_max = 5, input_fname = '', input_dataset = [], compound_names = [], include_lcmsruns = [], exclude_lcmsruns = [], include_groups = [], exclude_groups = [], output_loc = []):
"""
Plot three views of ms1 spectra for compounds in input_dataset using file with highest RT peak of a polarity:
Unscaled: plots ms1 spectra within window of mz_min and mz_max
Scaled: plots ms1 spectra within window of mz_min and mz_max scaling mz of compound to 70%
Full Range: plots ms1 spectra without window (unscaled)
"""
print('here I am')
if not input_dataset:
data = ma_data.get_dill_data(os.path.expandvars(input_fname))
else:
data = input_dataset
data = filter_runs(data, include_lcmsruns, include_groups, exclude_lcmsruns, exclude_groups)
#Make sure there is data
assert(len(data) != 0)
all_compound_names = ma_data.get_compound_names(data)[0]
#Set default compound list to all compounds in input_dataset
if not compound_names:
compound_names = all_compound_names
#Find implicit polarity and make sure there is not more than one
if 'POS' in include_lcmsruns or 'NEG' in exclude_lcmsruns:
assert(polarity == None or polarity == 'positive')
polarity = 'positive'
if 'NEG' in include_lcmsruns or 'POS' in exclude_lcmsruns:
assert(polarity == None or polarity == 'negative')
polarity = 'negative'
if 'POS' in include_groups or 'NEG' in exclude_groups:
assert(polarity == None or polarity == 'positive')
polarity = 'positive'
if 'NEG' in include_groups or 'POS' in exclude_groups:
assert(polarity == None or polarity == 'negative')
polarity = 'negative'
assert(polarity == 'positive' or polarity == 'negative')
#Additional variables used acorss all compounds
lcms_polarity = 'ms1_' + polarity[:3]
titles = ['Unscaled', 'Scaled', 'Full Range']
for compound_idx in [i for i,c in enumerate(all_compound_names) if c in compound_names]:
print(('compound is',compound_idx))
#Find file_idx of with highest RT peak
highest = 0
file_idx = None
for i,d in enumerate(data):
if d[compound_idx]['identification'].mz_references[0].detected_polarity == polarity:
if d[compound_idx]['data']['ms1_summary']['peak_height'] > highest:
highest = d[compound_idx]['data']['ms1_summary']['peak_height']
file_idx = i
lcms_data = ma_data.df_container_from_metatlas_file(data[file_idx][compound_idx]['lcmsrun'].hdf5_file)
#Find RT and mz peak for compound in file
rt_peak = data[file_idx][compound_idx]['data']['ms1_summary']['rt_peak']
rt_peak_actual = lcms_data[lcms_polarity].iloc[(lcms_data[lcms_polarity].rt - rt_peak).abs().argsort()[0]].rt
mz_peak_actual = data[file_idx][compound_idx]['data']['ms1_summary']['mz_peak']
#Create and sort dataframe containing RT peak, mz and intensity
df_all = lcms_data[lcms_polarity][(lcms_data[lcms_polarity].rt == rt_peak_actual)]
df_all.sort_values('i',ascending=False,inplace=True)
#Limit prior dataframe to +/- mz_min, mz_max
df_window = df_all[(df_all['mz'] > mz_peak_actual - mz_min) &
(df_all['mz'] < mz_peak_actual + mz_max) ]
#Plot compound name, mz, and RT peak
plt.ioff()
fig = plt.gcf()
fig.suptitle('%s, m/z: %5.4f, rt: %f'%(all_compound_names[compound_idx], mz_peak_actual, rt_peak_actual),
fontsize=8,weight='bold')
#Create axes for different views of ms1 spectra (unscaled, scaled, and full range)
ax1 = plt.subplot2grid((11, 12), (0, 0), rowspan=5, colspan=5)
ax2 = plt.subplot2grid((11, 12), (0, 7), rowspan=5, colspan=5)
ax3 = plt.subplot2grid((11, 12), (6, 0), rowspan=5, colspan=12)
#Plot ms1 spectra
for ax_idx,(ax,df) in enumerate(zip([ax1, ax2, ax3], [df_window, df_window, df_all])):
ax.set_xlabel('m/z',fontsize=8,weight='bold')
ax.set_ylabel('intensity',fontsize=8,weight='bold')
ax.tick_params(axis='both', which='major', labelsize=6)
ax.set_title(titles[ax_idx],fontsize=8,weight='bold')
mzs = df['mz']
zeros = np.zeros(len(df['mz']))
intensities = df['i']
ax.vlines(mzs, zeros, intensities, colors='r',linewidth = 2)
labels = [1.001e9]
for i,row in df.iloc[:6].iterrows():
ax.annotate('%.4f'%row.mz, xy=(row.mz, 1.03*row.i),rotation = 90, horizontalalignment = 'center', verticalalignment = 'left', fontsize=6)
labels.append(row.mz)
ax.axhline(0)
if ax_idx != 2:
ax.set_xlim(mz_peak_actual - mz_min, mz_peak_actual + mz_max)
ylim = ax.get_ylim()
if ax_idx == 1:
ax.set_ylim(ylim[0], df[((mz_peak_actual - .05 < df['mz']) & (df['mz'] < mz_peak_actual + .05))].iloc[0]['i']*1.43)
else:
ax.set_ylim(ylim[0], ylim[1]*1.43)
if not os.path.exists(output_loc):
os.makedirs(output_loc)
plt.savefig(os.path.join(output_loc, all_compound_names[compound_idx] + '.pdf'))
def export_atlas_to_spreadsheet(atlas, output_filename=None):
"""
inputs:
atlas: metatlas.datastructures.metatlas_objects.Atlas or metatlas_dataset
output_filename: location to save csv
output:
returns a pandas DataFrame containing atlas
Saves output DataFrame to output_filename in csv format.
"""
# cols is a list of tuples, with column name as first value, and extract() ids list as second value
cols = [(c, ['compound', 0, c]) for c in metob.Compound.class_trait_names() if not c.startswith('_')]
cols = sorted(cols, key=lambda x: x[0])
cols.extend([('label', ['name']), ('id_notes', ['description'])])
cols.extend([(c, [c]) for c in ['ms1_notes', 'ms2_notes', 'identification_notes']])
cols.extend([(c, ['rt_references', 0, c]) for c in ['rt_min', 'rt_max', 'rt_peak']])
cols.extend([(c, ['mz_references', 0, c]) for c in ['mz', 'mz_tolerance', 'adduct']])
cols.append(('polarity', ['mz_references', 0, 'detected_polarity']))
out = pd.DataFrame()
is_atlas = isinstance(atlas, metob.Atlas)
compound_ids = atlas.compound_identifications if is_atlas else [i['identification'] for i in atlas[0]]
for i, my_id in enumerate(compound_ids):
for column_name, ids in cols:
out.loc[i, column_name] = extract(my_id, ids)
if output_filename:
os.makedirs(os.path.dirname(output_filename), exist_ok=True)
out.to_csv(output_filename)
return out
def get_data_for_groups_and_atlas(group,myAtlas,output_filename,use_set1 = False):
"""
get and pickle everything This is MSMS, raw MS1 datapoints, compound, group info, and file info
"""
data = []
import copy as copy
for i,treatment_groups in enumerate(group):
for j in range(len(treatment_groups.items)):
myFile = treatment_groups.items[j].hdf5_file
# try:
# rt_reference_index = int(treatment_groups.name[-1]) - 1
# except:
# rt_reference_index = 3
print((i, len(group), myFile))
row = []
for compound in myAtlas.compound_identifications:
result = {}
result['atlas_name'] = myAtlas.name
result['atlas_unique_id'] = myAtlas.unique_id
result['lcmsrun'] = treatment_groups.items[j]
result['group'] = treatment_groups
temp_compound = copy.deepcopy(compound)
if use_set1:
if '_Set1' in treatment_groups.name:
temp_compound.rt_references[0].rt_min -= 0.2
temp_compound.rt_references[0].rt_max -= 0.2
temp_compound.rt_references[0].rt_peak -= 0.2
temp_compound.mz_references[0].mz_tolerance = 20
result['identification'] = temp_compound
result['data'] = ma_data.get_data_for_a_compound(temp_compound.mz_references[0],
temp_compound.rt_references[0],
[ 'ms1_summary', 'eic', 'msms' ],
myFile,0.2)
# print result['data']['ms1_summary']
row.append(result)
data.append(row)
with open(output_filename,'w') as f:
dill.dump(data,f)
def compound_indices_marked_remove(data):
"""
inputs:
data: metatlas_dataset
outputs:
list of compound_idx of the compound identifications with ms1_notes to remove
"""
return [i for i, j in enumerate(data[0]) if is_remove(extract(j, ['identification', 'ms1_notes']))]
def is_remove(obj):
""" is obj a string that starts with 'remove' (case insensitive)? """
return isinstance(obj, str) and obj.lower().startswith('remove')
def first_not_none(obj, default):
""" returns obj if it is not None, otherwise returns default """
return default if obj is None else obj
def filter_by_remove(atlas_df, data):
"""
inputs:
atlas_df: pandas DataFrame containing an atlas
data: metatlas_dataset
outputs:
a tuple containing 2 pandas DataFrames:
atlas_df where ms1_notes begins with 'remove'
atlas_df where ms1_notes does not begin with 'remove'
ms1_notes comparison with 'remove' is case insensitive.
"""
rm_idxs = compound_indices_marked_remove(data)
keep_idxs = atlas_df.index.difference(rm_idxs)
return(atlas_df.iloc[keep_idxs].copy(), atlas_df.iloc[rm_idxs].copy())
def get_intensity(compound):
"""
inputs:
compound: a CompoundIdentification object
returns a list of intensity values or an empty list if the intensity attribute does not exist
"""
return ma_data.extract(compound, ['data', 'eic', 'intensity'], [])
def filter_atlas(atlas_df, data, num_data_points_passing=5, peak_height_passing=1e6):
"""
inputs:
atlas_df: panda DataFrame containing an atlas
data: metatlas_dataset
num_data_points_passing: number of points in EIC that must be exceeded in one or more samples
in order for the compound to remain in the atlas
peak_height_passing: max intensity in EIC that must be exceeded in one or more samples
in order for the compound to remain in the atlas
returns a pandas DataFrame containing the updated atlas
"""
keep_idxs = strong_signal_compound_idxs(data, num_data_points_passing, peak_height_passing)
return atlas_df.iloc[keep_idxs].reset_index(drop=True)
def strong_signal_compound_idxs(data, num_points_passing, peak_height_passing):
"""
inputs:
data: metatlas_dataset
num_data_points_passing: number of points in EIC that must be exceeded in one or more samples
in order for the compound to remain in the atlas
peak_height_passing: max intensity in EIC that must be exceeded in one or more samples
in order for the compound to remain in the atlas
returns list of indices that are above the thresholds
"""
num_passing = np.array([
[len(get_intensity(compound)) > num_points_passing for compound in sample]
for sample in data]
).any(axis=0)
peak_passing = np.array([
[np.array(get_intensity(compound)+[0]).max() > peak_height_passing for compound in sample]
for sample in data]
).any(axis=0)
return np.flatnonzero(num_passing & peak_passing).tolist()
def filter_metatlas_objects_to_most_recent(object_list,field):
#from datetime import datetime, date
#remove from list if another copy exists that is newer
unique_values = []
for i,a in enumerate(object_list):
unique_values.append( getattr(a,field) )
unique_values = list(set(unique_values))
keep_object_list = []
for u in unique_values:
old_last_modified = 0
for i,a in enumerate(object_list):
if getattr(a,field) == u:
last_modified = getattr(a,'last_modified')
if last_modified > old_last_modified:
keep_object = a
old_last_modified = last_modified
keep_object_list.append(keep_object)
return keep_object_list
# print i, a.name, datetime.utcfromtimestamp(a.last_modified)
def get_metatlas_atlas(name = '%%',username = '*', most_recent = True,do_print = True):
from datetime import datetime, date
atlas = metob.retrieve('Atlas',name = name,username=username)
if most_recent:
atlas = filter_metatlas_objects_to_most_recent(atlas,'name')
if do_print:
for i,a in enumerate(atlas):
print((i, len(a.compound_identifications),a.name, datetime.utcfromtimestamp(a.last_modified)))
return atlas
class interact_get_metatlas_files():
def __init__(self, experiment = '%violacein%', name = '%_%', most_recent = True):
self.experiment = experiment
self.name = name
self.most_recent = most_recent
# http://ipywidgets.readthedocs.io/en/latest/examples/Using%20Interact.html
self.w = interact(self.Task, experiment=self.experiment, name=self.name, most_recent = self.most_recent,__manual=True)#continuous_update=False)#
def Task(self,experiment,name,most_recent):
self.experiment = experiment
self.name = name
self.most_recent = most_recent
self.files = get_metatlas_files(experiment = experiment,name = name,most_recent = most_recent)#self.most_recent)
txt = widgets.Text()
txt.value = '%d Files were found matching that pattern'%len(self.files)
display(txt)
def get_metatlas_files(experiment = '%%',name = '%%',most_recent = True):
"""
experiment is the folder name
name is the filename
"""
files = metob.retrieve('LcmsRun',experiment=experiment,name=name, username='*')
if most_recent:
files = filter_metatlas_objects_to_most_recent(files,'mzml_file')
return files
def make_prefilled_fileinfo_sheet(groups, filename):
#make a prefilled fileinfo sheet for editing groups manually and reimport to workflow
with open(filename,'w') as fid:
fid.write('mzml_file\tgroup\tdescription\tshort_name\n')
for g in groups:
for f in g.items:
fid.write('%s\t%s\t%s\t%s\n'% (f.mzml_file, g.name, f.description, g.short_name))
def make_empty_fileinfo_sheet(filename,flist):
#dump all the files to a spreadheet, download it, and make a "filled in" one.
with open(filename,'w') as fid:
fid.write('mzml_file\tgroup\tdescription\n')
for f in flist:
fid.write('%s\t\t\n'%f.mzml_file)
def make_groups_from_fileinfo_sheet(filename,filetype='tab',store=False):
'''
'''
if filetype == 'tab':
df = pd.read_csv(filename,sep='\t')
elif filetype == 'csv':
df = pd.read_csv(filename,sep=',')
elif filetype == 'df':
df = filename
else:
df = pd.read_excel(filename)
grouped = df.groupby(by='group')
return_groups = []
for g in grouped.groups.keys():
indices = grouped.groups[g]
myGroup = metob.Group()
myGroup.name = '%s'%g
myGroup.description = df.loc[indices[0],'description']
file_set = []
for i in indices:
file_set.append(metob.retrieve('LcmsRun',mzml_file='%%%s'%df.loc[i,'mzml_file'],username='*')[0])
myGroup.items = file_set
return_groups.append(myGroup)
if store:
metob.store(myGroup)
return return_groups
def check_compound_names(atlas_df):
"""
inputs:
atlas_df: pandas dataframe representation of an atlas
throws ValueError if some compounds are not found in the database
"""
bad_names = []
for _, row in atlas_df.iterrows():
if pd.notna(row.inchi_key):
if not metob.retrieve('Compounds', inchi_key=row.inchi_key, username='*'):
bad_names.append(row.inchi_key)
if bad_names:
raise ValueError(f"Compound not found in database: {', '.join(bad_names)}.")
def check_filenames(atlas_df, field):
"""
inputs:
atlas_df: pandas dataframe representation of an atlas
field: column name in atlas_df to test for valid lcmsruns
throws ValueError if values in atlas_df[field] are not in database as lcmsruns
"""
bad_files = []
for _, row in atlas_df.iterrows():
if field in row:
name = row[field].replace('.mzmL', '')
if not metob.retrieve('Lcmsruns', name=f"%{name}%", username='*'):
bad_files.append(row[field])
if bad_files:
raise ValueError(f"LCMS runs not found in database: {', '.join(bad_files)}.")
# def get_formatted_atlas_from_google_sheet(polarity='POS',
# method='QE_HILIC',
# mz_tolerance=10):
# import metatlas.ms_monitor_util as mmu
# df2 = mmu.get_ms_monitor_reference_data()
# #print df.head()
# #df2 = pd.DataFrame(df[1:],columns=df[0])
# fields_to_keep = [ 'name',
# 'label',
# 'inchi_key',
# 'mz_%s'%polarity,
# 'rt_min_%s'%method,
# 'rt_max_%s'%method,
# 'rt_peak_%s'%method,
# 'file_mz_%s_%s'%(method,polarity),
# 'file_rt_%s_%s'%(method,polarity),
# 'file_msms_%s_%s'%(method,polarity)]
# fields_there = []
# for f in fields_to_keep:
# if f in df2.keys():
# fields_there.append(f)
# df3 = df2.loc[:,fields_there]
# df3['mz_tolerance'] = mz_tolerance
# if polarity == 'POS':
# df3['polarity'] = 'positive'
# else:
# df3['polarity'] = 'negative'
# renamed_columns = [c.replace('_%s'%method,'').replace('_%s'%polarity,'') for c in df3.columns]
# for i,c in enumerate(df3.columns):
# df3 = df3.rename(columns = {c:renamed_columns[i]})
# df3 = df3[df3['mz'] != '']
# return df3
def _clean_dataframe(dataframe, required_columns=None, lower_case_col_names=True):
"""
inputs:
dataframe: pandas dataframe
required_columns: list of column names that must have a non-NA values
lower_case_col_names: should column names be modified to lower case
Modifies dataframe in place. The following rows removed:
fully empty (all fields have NA values)
containing required_columns with 1 or more NA values
"""
dataframe.dropna(how="all", inplace=True)
if required_columns is not None and len(required_columns) > 0:
dataframe.dropna(how="any", subset=required_columns, inplace=True)
if lower_case_col_names:
dataframe.columns = [x.lower() for x in dataframe.columns]
def _add_columns(dataframe, column_names, default_values=None):
"""
inputs:
dataframe: pandas dataframe
column_names: list of column names to add to dataframe if they do not already exist
default_values: a single default value for all columns or a list of default values
the same length as column_names
Modifies the dataframe in place
"""
assert isinstance(column_names, list)
num_col = len(column_names)
if isinstance(default_values, str):
default_values = [default_values]
num_default = 1 if default_values is None else len(default_values)
assert num_default in [1, num_col]
default_values = [default_values]*num_col if num_default == 1 else default_values
for name, default in zip(column_names, default_values):
if name not in dataframe.columns:
dataframe[name] = default
def _get_dataframe(filename_or_df=None, filetype=None, sheetname=None):
"""
inputs:
filename_or_df: a filename to an excel, tsv or csv file, or a pandas DataFrame
filetype: a string in dataframe, excel, tab, csv
sheetname: name of a sheet in an excel file, or get first sheet if None
returns a pandas Dataframe
"""
assert filetype in ['dataframe', 'excel', 'tab', 'csv']
if filetype == 'dataframe':
return filename_or_df.copy()
if filetype == 'excel':
return pd.read_excel(filename_or_df, sheetname=0 if sheetname is None else sheetname)
return pd.read_csv(filename_or_df, sep='\t' if filetype == 'tab' else ',')
def get_compound_identification(row, polarity, mz_tolerance):
my_id = metob.CompoundIdentification()
# currently, all copies of the molecule are returned. The 0 is the most recent one.
compound_list = metob.retrieve('Compounds', inchi_key=row.inchi_key, username='*')
if compound_list is None:
return None
my_id.compound = compound_list[-1:]
my_id.name = row.label if isinstance(row.label, str) else 'no label'
_copy_attributes(row, my_id, ['do_normalization', 'internal_standard_id', 'internal_standard_to_use',
'identification_notes', 'ms1_notes', 'ms2_notes'])
my_id.mz_references = get_mz_references(row, polarity, mz_tolerance)
my_id.rt_references = get_rt_references(row)
my_id.frag_references = get_frag_references(row, my_id.name, polarity,
my_id.mz_references[0], my_id.rt_references[0])
my_id.intensity_references = []
return my_id
def get_mz_references(row, polarity, mz_tolerance=None):
assert polarity in ['positive', 'negative']
mzr = metob.MzReference()
mzr.mz = row.mz
# TODO: calculate the mz from theoretical adduct and modification if provided.
# my_id.mz_references[0].mz = c.MonoIso topic_molecular_weight + 1.007276
if mz_tolerance is not None:
mzr.mz_tolerance = mz_tolerance
else:
try:
mzr.mz_tolerance = row.mz_tolerance
except AttributeError:
mzr.mz_tolerance = row.mz_threshold
mzr.mz_tolerance_units = 'ppm'
mzr.detected_polarity = polarity
# if 'file_mz' in atlas_df.keys():
# f = metob.retrieve('Lcmsruns',name = '%%%s%%'%atlas_df.file_mz[x],username = '*')[0]
# mzRef.lcms_run = f
if pd.notna(row.adduct):
mzr.adduct = row.adduct
return [mzr]
def get_rt_references(row):
rtr = metob.RtReference()
rtr.rt_units = 'min'
_copy_attributes(row, rtr, ['rt_min', 'rt_max', 'rt_peak'], error_on_missing=True)
# if 'file_rt' in atlas_df.keys():
# f = metob.retrieve('Lcmsruns',name = '%%%s%%'%atlas_df.file_rt[x],username = '*')[0]
# rtr.lcms_run = f
return [rtr]
def get_frag_references(row, name, polarity, mz_ref, rt_ref):
"""
inputs:
row: atlas_df row for the compound identification of interest
name: compound name
polarity: positive or negative
mz_ref: MzReference object
rt_ref: RtReference object
returns an array of FragmentationReferences or empty array if no msms data is found
"""
assert polarity in ['positive', 'negative']
try:
run_name = row.file_msms.replace('.mzmL', '')
run = metob.retrieve('Lcmsruns', name=f"%{run_name}%", username='*')[0]
except (AttributeError, IndexError):
return []
data = ma_data.get_data_for_a_compound(mz_ref, rt_ref, ['msms'], run.hdf5_file, extra_time=0.3)
if not isinstance(data['msms']['data'], np.ndarray):
return []
frag_ref = metob.FragmentationReference()
frag_ref.lcms_run = run
frag_ref.polarity = polarity
frag_ref.precursor_mz = row.mz
precursor_intensity = data['msms']['data']['precursor_intensity']
idx_max = np.argwhere(precursor_intensity == np.max(precursor_intensity)).flatten()
mz_list = data['msms']['data']['mz'][idx_max]
intensity_list = data['msms']['data']['i'][idx_max]
frag_ref.mz_intensities = get_spectrum(mz_list, intensity_list)
logger.info('Found reference msms spectrum for %s in file %s.', name, row.file_msms)
return [frag_ref]
def get_spectrum(mz_list, intensity_list):
"""
inputs:
mz_list: list of mz values
intensity_list: list of intensities values
returns a list of MzIntensityPairs()
"""
assert len(mz_list) == len(intensity_list)
spectrum = []
for msms_mz, intensity in zip(mz_list, intensity_list):
spectrum.append(metob.MzIntensityPair())
spectrum[-1].mz = msms_mz
spectrum[-1].intensity = intensity
return spectrum
def get_atlas(name, atlas_df, polarity, mz_tolerance):
"""
inputs:
name: string with name of atlas
atlas_df: pandas DataFrame with atlas definition
polarity: positive or negative
mz_tolerance: float to set for all mz_tolerance values
returns an Atlas object
atlas_df should not contain empty strings, use np.NaN instead
"""
atlas = metob.Atlas()
atlas.name = name
atlas.compound_identifications = []
for _, row in atlas_df.iterrows():
my_id = get_compound_identification(row, polarity, mz_tolerance)
if my_id is None:
logger.warning(('get_atlas() dropping compound %s '
'(inchi_key %s) because it is not in the database.'), row.label, row.inchi_key)
else:
atlas.compound_identifications.append(my_id)
return atlas
def make_atlas_from_spreadsheet(filename, atlas_name, filetype, sheetname=None,
polarity=None, store=False, mz_tolerance=None):
'''
specify polarity as 'positive' or 'negative'
'''
logger.debug('Generating atlas named %s from %s source.', atlas_name, filetype)
atlas_df = _get_dataframe(filename, filetype, sheetname)
_clean_dataframe(atlas_df, required_columns=['inchi_key', 'label'])
_add_columns(atlas_df, column_names=['adduct'], default_values=[np.NaN])
check_compound_names(atlas_df)
check_filenames(atlas_df, 'file_msms')
atlas = get_atlas(atlas_name, atlas_df, polarity, mz_tolerance)
if store:
logger.debug('Saving atlas named %s to DB.', atlas_name)
metob.store(atlas)
return atlas
def _copy_attributes(source, dest, attribute_list, default_list=None, error_on_missing=False):
"""
inputs:
source: object to copy attributes from
dest: object to copy attributes to
attribute_list: list of string containing attribute names
default_list: list of default values corresponding to same positions in attribute_list
Modifies dest in place to have all attributes from attribute_list with values coming from
source or default_list. If source does not contain the attribute and default_list is None,
then do not add the attribute to dest if it does not already exist.
"""
if default_list is None:
for attribute in attribute_list:
if error_on_missing:
setattr(dest, attribute, getattr(source, attribute))
else:
try:
setattr(dest, attribute, getattr(source, attribute))
except AttributeError:
pass
else:
for attribute, default in zip(attribute_list, default_list):
setattr(dest, attribute, getattr(source, attribute, default))
def filter_empty_metatlas_objects(object_list,field):
filtered_list = []
for i,g in enumerate(object_list):
try:
#This bare try/accept is to handle the invalid groups left over in the database from the original objects.
#These groups don't conform to the current schema and will throw an error when you query their attributes.
if (len(getattr(g,field)) > 0):
filtered_list.append(g)
except:
pass
return filtered_list
def filter_metatlas_objects_by_list(object_list, field, filter_list):
"""
inputs:
object_list: iterable to be filtered by its attribute values
field: name of attribute to filter on
filter_list: strings that are tested to see if they are substrings of the attribute value
returns filtered list of objects that have a match in filter_list
"""
return filter_by_list(object_list, lambda x: getattr(x, field), filter_list)
def remove_metatlas_objects_by_list(object_list, field, filter_list):
"""
inputs:
object_list: iterable to be filtered by its attribute values
field: name of attribute to filter on
filter_list: strings that are tested to see if they are substrings of the attribute value
returns filtered list of objects that do not have matches to filter_list
"""
return filter_by_list(object_list, lambda x: getattr(x, field), filter_list, include=False)
def filter_by_list(data, key_func, term_list, include=True):
"""
inputs:
data: iterable to be filtered
key_func: function that takes a member of d and returns string to compare with term_list
term_list: strings that are tested to see if they are substrings of key_func return value
include: if True, then matches are included in output, else matches are excluded
"""
allow = any if include else lambda x: not any(x)
return [d for d in data if allow(ext in key_func(d) for ext in term_list)]
def filter_lcmsruns_in_dataset_by_include_list(metatlas_dataset, selector, include_list):
"""
Returns a metatlas dataset containing LCMS runs or groups (denoted by selector) that have substrings
listed in the include list.
selector can be 'lcmsrun' or 'group'
include_list will look something like this: ['QC','Blank']
"""
return filter_by_list(metatlas_dataset, lambda x: x[0][selector].name, include_list)
def filter_lcmsruns_in_dataset_by_exclude_list(metatlas_dataset, selector, exclude_list):
"""
Returns a metatlas dataset containing LCMS runs or groups (denoted by selector) that have substrings
not listed in the include list.
selector can be 'lcmsrun' or 'group'
exclude_list will look something like this: ['QC','Blank']
"""
return filter_by_list(metatlas_dataset, lambda x: x[0][selector].name, exclude_list, include=False)
def filter_compounds_in_dataset_by_exclude_list(metatlas_dataset,exclude_list):
"""
Since the rows of the dataset are expected to line up with an atlas export, this is probably not a good idea to use.
"""
filtered_dataset = []
for d_row in metatlas_dataset:
filtered_row = []
for d in d_row:
if not any(ext in d['identification'].name for ext in exclude_list):
if not any(ext in d['identification'].compound[0].name for ext in exclude_list):
filtered_row.append(d)
filtered_dataset.append(filtered_row)
return filtered_dataset
def filter_compounds_in_dataset_by_include_list(metatlas_dataset,include_list):
"""
Since the rows of the dataset are expected to line up with an atlas export, this is probably not a good idea to use.
"""
filtered_dataset = []
for d_row in metatlas_dataset:
filtered_row = []
for d in d_row:
if any(ext in d['identification'].name for ext in include_list):
if any(ext in d['identification'].compound[0].name for ext in include_list):
filtered_row.append(d)
filtered_dataset.append(filtered_row)
return filtered_dataset
def select_groups_for_analysis(name = '%', description = [], username = '*', do_print = True, most_recent = True, remove_empty = True, include_list = [], exclude_list = []):
if description:
groups = metob.retrieve('Groups', name = name, description = description, username=username)
else:
groups = metob.retrieve('Groups', name = name, username=username)
if most_recent:
groups = filter_metatlas_objects_to_most_recent(groups,'name')
if include_list:
groups = filter_metatlas_objects_by_list(groups,'name',include_list)
if exclude_list:
groups = remove_metatlas_objects_by_list(groups,'name',exclude_list)
if remove_empty:
groups = filter_empty_metatlas_objects(groups,'items')
if do_print:
from datetime import datetime, date
for i,a in enumerate(groups):
print((i, a.name, datetime.utcfromtimestamp(a.last_modified)))
return groups
def disable_keyboard_shortcuts(mapping):
"""
Takes a dictionary with a subset of keys from plot.rcParams and values
are arrays of strings, which are keyboard short cuts to be removed
"""
for action, remove_keys_list in mapping.items():
for key_combo in remove_keys_list:
if action in plt.rcParams and key_combo in plt.rcParams[action]:
plt.rcParams[action].remove(key_combo)
def get_msms_plot_headers(data, hits, hit_ctr, compound_idx, compound, similar_compounds, file_names):
"""
inputs:
data: metatlas_dataset-like object
hits: dataframe
hit_ctr: the index in hits of the current hit
compound_idx: index of curent compound in 2nd dim of data
compound: object for current compound
returns:
tuple of strings
(mz_header, rt_header, cpd_header)
"""
if not hits.empty:
rt_ms2 = hits.index.get_level_values('msms_scan')[hit_ctr]
mz_precursor = hits['measured_precursor_mz'].iloc[hit_ctr]
file_idx = file_with_max_ms1_intensity(data, compound_idx)[0]
rt_theoretical = data[file_idx][compound_idx]['identification'].rt_references[0].rt_peak
mz_theoretical = data[file_idx][compound_idx]['identification'].mz_references[0].mz
mz_measured = data[file_idx][compound_idx]['data']['ms1_summary']['mz_centroid']
rt_ms1 = data[file_idx][compound_idx]['data']['ms1_summary']['rt_peak']
delta_mz = abs(mz_theoretical - mz_measured)
delta_ppm = delta_mz / mz_theoretical * 1e6
mz_header = ["m/z theoretical = %5.4f" % mz_theoretical,
"m/z measured = %5.4f" % mz_measured,
"ppm diff = %3.2f" % delta_ppm]
rt_header = ["RT theoretical = %3.2f" % rt_theoretical,
"RT MS1 measured = %3.2f" % rt_ms1]
if not hits.empty:
mz_header.insert(0, "precursor m/z = %5.4f" % mz_precursor)
rt_header.append("RT MS2 measured = %3.2f" % rt_ms2)
return (', '.join(mz_header), ', '.join(rt_header),
get_similar_compounds_header(similar_compounds, compound_idx))
def get_similar_compounds_header(similar_compounds, compound_idx):
"""
inputs:
similar_compounds: the output from get_similar_compounds()
compound_index: index of current compound being considered
returns:
"""
if len(similar_compounds) < 2:
return ''
joined = '; '.join([_similar_compound_to_str(compound, compound_idx) for compound in similar_compounds])
return f"Similar Compounds = {joined}"
def _similar_compound_to_str(sdict, compound_idx):
"""
inputs:
sdict: a dict returned from get_similar_compounds()
compound_index: index of current compound being considered
returns:
string with only non-breaking spaces or '*' if dict represents current compound
"""
if sdict['index'] == compound_idx:
return '*'
return f"{sdict['index']}, {sdict['label']} {{RT-{sdict['rt'].rt_peak:.2f}}}".replace(' ', '\xa0')
def get_msms_plot_data(hits, hit_ctr):
"""
inputs:
hits: dataframe of msms hits
hit_ctr: index of current hit in hits
returns:
tuple: (hit_ref_id, hit_score, hit_query, hit_ref)
"""
if hits.empty:
hit_ref_id = "N/A"
hit_score = np.nan
hit_query = np.full((2, 2, ), np.nan)
hit_ref = hit_query
else:
hit_ref_id = hits.index.get_level_values('id')[hit_ctr]
hit_score = hits['score'].iloc[hit_ctr]
hit_query = hits['msv_query_aligned'][hit_ctr:hit_ctr+1].iloc[0]
hit_ref = hits['msv_ref_aligned'][hit_ctr:hit_ctr+1].iloc[0]
return (hit_ref_id, hit_score, hit_query, hit_ref)
def get_hit_metadata(data, hits, file_names, hit_ctr, compound_idx):
"""
returns a tuple containing:
file name (without path) of the hit or the string 'None'
compound object for the hit or None
"""
if not hits.empty:
hit_file_name = hits.index.get_level_values('file_name')[hit_ctr]
return (hit_file_name, data[int(file_names.index(hit_file_name))][compound_idx])
file_idx = file_with_max_ms1_intensity(data, compound_idx)[0]
if file_idx:
return (os.path.basename(data[file_idx][compound_idx]['lcmsrun'].hdf5_file),
data[file_idx][compound_idx])
return ('None', None)
def within_tolerance(measured, theoretical, tolerance):
""" Returns True if normalized, absolute difference is with tolerance """
return abs(measured - theoretical)/theoretical <= tolerance
def layout_radio_button_set(area, anchor='SW'):
"""
inputs:
area: [left, bottom, width, height]
anchor: string for anchor direction
returns:
an axes for radiobuttons at area with axis off and equal aspect ratio
"""
axes = plt.axes(area, anchor=anchor, aspect='equal')
axes.axis('off')
return axes
def rt_range_overlaps(rt1, rt2):
"""
inputs:
rt1: metatlas.datastructures.metatlas_objects.RtReference
rt2: metatlas.datastructures.metatlas_objects.RtReference
returns:
True if there is overlap in the RT min-max regions of rt1 and and rt2
"""
return ((rt2.rt_min <= rt1.rt_min <= rt2.rt_max) or (rt2.rt_min <= rt1.rt_max <= rt2.rt_max) or
(rt1.rt_min <= rt2.rt_min <= rt1.rt_max) or (rt1.rt_min <= rt2.rt_max <= rt1.rt_max))
| metabolite-atlas/metatlas | metatlas/plots/dill2plots.py | Python | bsd-3-clause | 147,019 | [
"RDKit"
] | 30f5268aa3d1a2050e32072fa835b9ac226a2416c23bcee2f171da22f4bac4fd |
#!/usr/bin/env python3
########################################################################
# Solves problem 129 from projectEuler.net.
# Finds the n so that the first R(k) divisible by n is > R(10 ** 6).
# Copyright (C) 2011 Santiago Alessandri
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# You can contact me at san.lt.ss@gmail.com
# Visit my wiki at http://wiki.san-ss.com.ar
# Visit my blog at http://blog.san-ss.com.ar
########################################################################
from CommonFunctions import *
from itertools import *
def A(n):
i = 2
while (mod_pow(10, i, 9 * n) != 1):
i += 1
return i
limit = 10 ** 6
if __name__ == '__main__':
for n in count(1000001, 2):
if str(n)[-1] == '5':
continue
x = A(n)
if x > limit:
break
print("The result is:", n)
| sanSS/programming-contests | project-euler/problem129.py | Python | gpl-3.0 | 1,490 | [
"VisIt"
] | 8ae9a33d8f280dacb2cee1f9a06ed369e2137189fa45fb759dc8eaea1ed1784f |
# coding: utf-8
"""Test mdn.compatibility"""
from __future__ import unicode_literals
from django.utils.six import text_type
from mdn.compatibility import (
CellVersion, CompatFeatureVisitor, CompatFootnoteVisitor,
CompatSectionExtractor, CompatSupportVisitor, Footnote,
compat_feature_grammar, compat_support_grammar, compat_footnote_grammar)
from mdn.kumascript import KumaVisitor, kumascript_grammar
from webplatformcompat.models import Feature, Support
from .base import TestCase
class TestCompatSectionExtractor(TestCase):
def setUp(self):
self.feature = self.get_instance('Feature', 'web-css-background-size')
self.visitor = KumaVisitor()
self.version = self.get_instance('Version', ('firefox_desktop', '1.0'))
def construct_html(
self, header=None, pre_table=None, feature=None,
browser=None, support=None, after_table=None):
"""Create a basic compatibility section."""
return """\
{header}
{pre_table}
<div id="compat-desktop">
<table class="compat-table">
<tbody>
<tr>
<th>Feature</th>
<th>{browser}</th>
</tr>
<tr>
<td>{feature}</td>
<td>{support}</td>
</tr>
</tbody>
</table>
</div>
{after_table}
""".format(
header=header or (
'<h2 id="Browser_compatibility">Browser compatibility</h2>'),
pre_table=pre_table or '<div>{{CompatibilityTable}}</div>',
browser=browser or 'Firefox',
feature=feature or '<code>contain</code> and <code>cover</code>',
support=support or '1.0',
after_table=after_table or '')
def get_default_compat_div(self):
browser_id = self.version.browser_id
version_id = self.version.id
return {
'name': u'desktop',
'browsers': [{
'id': browser_id, 'name': 'Firefox for Desktop',
'slug': 'firefox_desktop'}],
'versions': [{
'browser': browser_id, 'id': version_id, 'version': '1.0'}],
'features': [{
'id': '_contain and cover',
'name': '<code>contain</code> and <code>cover</code>',
'slug': 'web-css-background-size_contain_and_cover'}],
'supports': [{
'feature': '_contain and cover',
'id': '__contain and cover-%s' % version_id,
'support': 'yes', 'version': version_id}]}
def assert_extract(
self, html, compat_divs=None, footnotes=None, issues=None):
parsed = kumascript_grammar['html'].parse(html)
out = self.visitor.visit(parsed)
extractor = CompatSectionExtractor(feature=self.feature, elements=out)
extracted = extractor.extract()
self.assertEqual(extracted['compat_divs'], compat_divs or [])
self.assertEqual(extracted['footnotes'], footnotes or {})
self.assertEqual(extracted['issues'], issues or [])
def test_standard(self):
html = self.construct_html()
expected = self.get_default_compat_div()
self.assert_extract(html, [expected])
def test_unknown_browser(self):
html = self.construct_html(browser='Fire')
expected = self.get_default_compat_div()
expected['browsers'][0] = {
'id': '_Fire', 'name': 'Fire', 'slug': '_Fire'}
expected['versions'][0] = {
'id': '_Fire-1.0', 'version': '1.0', 'browser': '_Fire'}
expected['supports'][0] = {
'id': u'__contain and cover-_Fire-1.0',
'support': 'yes',
'feature': '_contain and cover',
'version': '_Fire-1.0'}
issue = ('unknown_browser', 205, 218, {'name': 'Fire'})
self.assert_extract(html, [expected], issues=[issue])
def test_wrong_first_column_header(self):
# All known pages use "Feature" for first column, but be ready
html = self.construct_html()
html = html.replace('<th>Feature</th>', '<th>Features</th>')
expected = self.get_default_compat_div()
issue = ('feature_header', 180, 197, {'header': 'Features'})
self.assert_extract(html, [expected], issues=[issue])
def test_footnote(self):
html = self.construct_html(
support="1.0 [1]",
after_table="<p>[1] This is a footnote.</p>")
expected = self.get_default_compat_div()
expected['supports'][0]['footnote'] = 'This is a footnote.'
expected['supports'][0]['footnote_id'] = ('1', 322, 325)
self.assert_extract(html, [expected])
def test_footnote_mismatch(self):
html = self.construct_html(
support="1.0 [1]",
after_table="<p>[2] Oops, footnote ID is wrong.</p>")
expected = self.get_default_compat_div()
expected['supports'][0]['footnote_id'] = ('1', 322, 325)
footnotes = {'2': ('Oops, footnote ID is wrong.', 374, 412)}
issues = [
('footnote_missing', 322, 325, {'footnote_id': '1'}),
('footnote_unused', 374, 412, {'footnote_id': '2'})]
self.assert_extract(
html, [expected], footnotes=footnotes, issues=issues)
def test_extra_row_cell(self):
# https://developer.mozilla.org/en-US/docs/Web/JavaScript/
# Reference/Global_Objects/WeakSet, March 2015
html = self.construct_html()
html = html.replace(
"<td>1.0</td>", "<td>1.0</td><td>{{CompatUnknown()}}</td>")
self.assertTrue('CompatUnknown' in html)
expected = self.get_default_compat_div()
issue = ('extra_cell', 326, 354, {})
self.assert_extract(html, [expected], issues=[issue])
def test_compat_mobile_table(self):
mobile = """
<div id="compat-mobile">
<table class="compat-table">
<tbody>
<tr><th>Feature</th><th>Safari Mobile</th></tr>
<tr>
<td><code>contain</code> and <code>cover</code></td>
<td>1.0 [1]</td>
</tr>
</tbody>
</table>
</div>
<p></p>
<p>[1] It's really supported.</p>
"""
html = self.construct_html(after_table=mobile)
expected_desktop = self.get_default_compat_div()
expected_mobile = {
'name': 'mobile',
'browsers': [{
'id': '_Safari for iOS',
'name': 'Safari for iOS',
'slug': '_Safari for iOS',
}],
'features': [{
'id': '_contain and cover',
'name': '<code>contain</code> and <code>cover</code>',
'slug': 'web-css-background-size_contain_and_cover',
}],
'versions': [{
'id': '_Safari for iOS-1.0',
'version': '1.0',
'browser': '_Safari for iOS',
}],
'supports': [{
'id': '__contain and cover-_Safari for iOS-1.0',
'feature': '_contain and cover',
'support': 'yes',
'version': '_Safari for iOS-1.0',
'footnote': "It's really supported.",
'footnote_id': ('1', 581, 584),
}],
}
issue = ('unknown_browser', 465, 487, {'name': 'Safari Mobile'})
self.assert_extract(
html, [expected_desktop, expected_mobile], issues=[issue])
def test_pre_content(self):
header_plus = (
'<h2 id="Browser_compatibility">Browser compatibility</h2>'
'<p>Here\'s some extra content.</p>')
html = self.construct_html(header=header_plus)
expected = self.get_default_compat_div()
issue = ('skipped_content', 57, 90, {})
self.assert_extract(html, [expected], issues=[issue])
def test_feature_issue(self):
html = self.construct_html(
feature='<code>contain</code> and <code>cover</code> [1]')
expected = self.get_default_compat_div()
issue = ('footnote_feature', 300, 304, {})
self.assert_extract(html, [expected], issues=[issue])
def test_support_issue(self):
html = self.construct_html(support="1.0 (or earlier)")
expected = self.get_default_compat_div()
issue = ('inline_text', 322, 334, {'text': '(or earlier)'})
self.assert_extract(html, [expected], issues=[issue])
def test_footnote_issue(self):
html = self.construct_html(after_table="<p>Here's some text.</p>")
expected = self.get_default_compat_div()
issue = ('footnote_no_id', 370, 394, {})
self.assert_extract(html, [expected], issues=[issue])
def test_table_div_wraps_h3(self):
# https://developer.mozilla.org/en-US/docs/Web/API/AudioBufferSourceNode
html = self.construct_html()
html = html.replace(
'</div>', "<h3>Gecko Notes</h3><p>It rocks</p></div>")
expected = self.get_default_compat_div()
issues = [
('skipped_content', 58, 126, {}),
('footnote_gap', 434, 438, {}),
('footnote_no_id', 418, 433, {})]
self.assert_extract(html, [expected], issues=issues)
def test_support_colspan_exceeds_table_width(self):
# https://developer.mozilla.org/en-US/docs/Web/API/KeyboardEvent
html = self.construct_html()
html = html.replace("<td>1.0", '<td colspan="2">1.0')
expected = self.get_default_compat_div()
issue = ('cell_out_of_bounds', 314, 338, {})
self.assert_extract(html, [expected], issues=[issue])
class TestFootnote(TestCase):
def test_numeric(self):
footnote = Footnote(raw='[1]', footnote_id='1')
self.assertEqual('[1]', text_type(footnote))
self.assertEqual('1', footnote.footnote_id)
self.assertEqual('1', footnote.raw_footnote)
def test_stars(self):
# TODO: replace "convert to '3'" with raw '***'
footnote = Footnote(raw='[***]', footnote_id='***')
self.assertEqual('[3]', text_type(footnote))
self.assertEqual('3', footnote.footnote_id)
self.assertEqual('***', footnote.raw_footnote)
class TestFeatureGrammar(TestCase):
def test_standard(self):
text = '<td>contain and cover</td>'
parsed = compat_feature_grammar['html'].parse(text)
assert parsed
def test_rowspan(self):
text = '<td rowspan="2">Two-line feature</td>'
parsed = compat_feature_grammar['html'].parse(text)
assert parsed
def test_cell_with_footnote(self):
text = '<td>Bad Footnote [1]</td>'
parsed = compat_feature_grammar['html'].parse(text)
assert parsed
class TestFeatureVisitor(TestCase):
scope = 'compatibility feature'
def setUp(self):
self.parent_feature = self.get_instance(
'Feature', 'web-css-background-size')
self.visitor = CompatFeatureVisitor(parent_feature=self.parent_feature)
def assert_feature(
self, contents, feature_id, name, slug, canonical=False,
experimental=False, standardized=True, obsolete=False,
issues=None):
row_cell = "<td>%s</td>" % contents
parsed = compat_feature_grammar['html'].parse(row_cell)
self.visitor.visit(parsed)
feature_dict = self.visitor.to_feature_dict()
self.assertEqual(issues or [], self.visitor.issues)
self.assertEqual(feature_id, feature_dict['id'])
self.assertEqual(slug, feature_dict['slug'])
self.assertEqual(name, feature_dict['name'])
if canonical:
self.assertTrue(feature_dict['canonical'])
else:
self.assertFalse('canonical' in feature_dict)
if experimental:
self.assertTrue(feature_dict['experimental'])
else:
self.assertFalse('experimental' in feature_dict)
if obsolete:
self.assertTrue(feature_dict['obsolete'])
else:
self.assertFalse('obsolete' in feature_dict)
if standardized:
self.assertFalse('standardized' in feature_dict)
else:
self.assertFalse(feature_dict['standardized'])
def test_remove_whitespace(self):
cell = (
' Support for<br>\n <code>contain</code> and'
' <code>cover</code> ')
feature_id = '_support for contain and cover'
name = 'Support for <code>contain</code> and <code>cover</code>'
slug = 'web-css-background-size_support_for_contain_and_co'
self.assert_feature(cell, feature_id, name, slug)
def test_code_sequence(self):
# https://developer.mozilla.org/en-US/docs/Web/CSS/display
cell = (
'<code>none</code>, <code>inline</code> and'
' <code>block</code>')
feature_id = '_none, inline and block'
name = '<code>none</code>, <code>inline</code> and <code>block</code>'
slug = 'web-css-background-size_none_inline_and_block'
self.assert_feature(cell, feature_id, name, slug)
def test_canonical(self):
# https://developer.mozilla.org/en-US/docs/Web/CSS/display
cell = '<code>list-item</code>'
feature_id = '_list-item'
name = 'list-item'
slug = 'web-css-background-size_list-item'
self.assert_feature(cell, feature_id, name, slug, canonical=True)
def test_canonical_match(self):
name = 'list-item'
slug = 'slug-list-item'
feature = self.create(
Feature, parent=self.parent_feature, name={'zxx': name}, slug=slug)
cell = '<code>list-item</code>'
self.assert_feature(cell, feature.id, name, slug, canonical=True)
def test_ks_experimental(self):
cell = '<code>grid</code> {{experimental_inline}}'
feature_id = '_grid'
name = 'grid'
slug = 'web-css-background-size_grid'
self.assert_feature(
cell, feature_id, name, slug, canonical=True, experimental=True)
def test_ks_non_standard_inline(self):
# https://developer.mozilla.org/en-US/docs/Web/API/AnimationEvent
cell = '<code>initAnimationEvent()</code> {{non-standard_inline}}'
feature_id = '_initanimationevent()'
name = 'initAnimationEvent()'
slug = 'web-css-background-size_initanimationevent'
self.assert_feature(
cell, feature_id, name, slug, canonical=True, standardized=False)
def test_ks_deprecated_inline(self):
cell = '<code>initAnimationEvent()</code> {{deprecated_inline}}'
feature_id = '_initanimationevent()'
name = 'initAnimationEvent()'
slug = 'web-css-background-size_initanimationevent'
self.assert_feature(
cell, feature_id, name, slug, canonical=True, obsolete=True)
def test_ks_obsolete_inline(self):
# https://developer.mozilla.org/en-US/docs/Web/API/WebSockets_API
cell = 'Version -76 support {{obsolete_inline}}'
feature_id = '_version -76 support'
name = 'Version -76 support'
slug = 'web-css-background-size_version_-76_support'
self.assert_feature(cell, feature_id, name, slug, obsolete=True)
def test_ks_htmlelement(self):
cell = '{{ HTMLElement("progress") }}'
feature_id = '_progress'
name = '<progress>'
slug = 'web-css-background-size_progress'
self.assert_feature(cell, feature_id, name, slug, canonical=True)
def test_ks_domxref(self):
cell = '{{domxref("DeviceProximityEvent")}}'
feature_id = '_deviceproximityevent'
name = 'DeviceProximityEvent'
slug = 'web-css-background-size_deviceproximityevent'
self.assert_feature(cell, feature_id, name, slug, canonical=True)
def test_unknown_kumascript(self):
cell = 'feature foo {{bar}}'
feature_id = '_feature foo'
name = 'feature foo'
slug = 'web-css-background-size_feature_foo'
issue = ('unknown_kumascript', 16, 23,
{'name': 'bar', 'args': [], 'kumascript': '{{bar}}',
'scope': self.scope})
self.assert_feature(cell, feature_id, name, slug, issues=[issue])
def test_nonascii_name(self):
# https://developer.mozilla.org/en-US/docs/Web/CSS/font-variant
cell = '<code>ß</code> → <code>SS</code>'
feature_id = '_\xdf \u2192 ss'
name = '<code>\xdf</code> \u2192 <code>SS</code>'
slug = 'web-css-background-size_ss'
self.assert_feature(cell, feature_id, name, slug)
def test_footnote(self):
# https://developer.mozilla.org/en-US/docs/Web/CSS/text-align
cell = 'Block alignment values [1] {{not_standard_inline}}'
feature_id = '_block alignment values'
name = 'Block alignment values'
slug = 'web-css-background-size_block_alignment_values'
issue = ('footnote_feature', 27, 31, {})
self.assert_feature(
cell, feature_id, name, slug, standardized=False, issues=[issue])
def test_bracket(self):
# https://developer.mozilla.org/en-US/docs/Web/HTML/Element/input
cell = 'accept=[file extension]'
feature_id = '_accept=[file extension]'
name = 'accept=[file extension]'
slug = 'web-css-background-size_accept_file_extension'
self.assert_feature(cell, feature_id, name, slug)
def test_digit(self):
# https://developer.mozilla.org/en-US/docs/Web/CSS/transform
cell = '3D Support'
feature_id = '_3d support'
name = '3D Support'
slug = 'web-css-background-size_3d_support'
self.assert_feature(cell, feature_id, name, slug)
def test_link(self):
# https://developer.mozilla.org/en-US/docs/Web/API/EventSource
cell = ('<a href="/En/HTTP_access_control">'
'Cross-Origin Resource Sharing</a><br>')
feature_id = '_cross-origin resource sharing'
name = 'Cross-Origin Resource Sharing'
slug = 'web-css-background-size_cross-origin_resource_shar'
issue = (
'tag_dropped', 4, 38, {'tag': 'a', 'scope': self.scope})
self.assert_feature(cell, feature_id, name, slug, issues=[issue])
def test_p(self):
# https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Statements/const
cell = '<p>Reassignment fails</p>'
feature_id = '_reassignment fails'
name = 'Reassignment fails'
slug = 'web-css-background-size_reassignment_fails'
issue = ('tag_dropped', 4, 7, {'tag': 'p', 'scope': self.scope})
self.assert_feature(cell, feature_id, name, slug, issues=[issue])
def test_span(self):
cell = '<span class="strong">Strong</span>'
feature_id = '_strong'
name = 'Strong'
slug = 'web-css-background-size_strong'
issue = ('tag_dropped', 4, 25, {'tag': 'span', 'scope': self.scope})
self.assert_feature(cell, feature_id, name, slug, issues=[issue])
def test_table(self):
# https://developer.mozilla.org/en-US/docs/Web/API/MediaStreamTrack
cell = """\
<table class="compat-table">
<tbody>
<tr>
<td><code>.stop()</code></td>
</tr>
</tbody>
</table>"""
feature_id = '_.stop()'
name = '.stop()'
slug = 'web-css-background-size_stop'
issues = [
('tag_dropped', 4, 32, {'tag': 'table', 'scope': self.scope}),
('tag_dropped', 35, 42, {'tag': 'tbody', 'scope': self.scope}),
('tag_dropped', 47, 51, {'tag': 'tr', 'scope': self.scope}),
('tag_dropped', 58, 62, {'tag': 'td', 'scope': self.scope})]
self.assert_feature(
cell, feature_id, name, slug, canonical=True, issues=issues)
class TestSupportGrammar(TestCase):
def assert_version(self, text, version, eng_version=None):
ws1, version_node, ws2 = (
compat_support_grammar["cell_version"].parse(text))
match = version_node.match.groupdict()
expected = {"version": version, "eng_version": eng_version}
self.assertEqual(expected, match)
def test_version_number(self):
self.assert_version("1", version="1")
def test_cell_version_number_dotted(self):
self.assert_version("1.0", version="1.0")
def test_cell_version_number_spaces(self):
self.assert_version("1 ", version="1")
def test_cell_version_number_dotted_spaces(self):
self.assert_version("1.0\n\t", version="1.0")
def test_cell_version_number_with_engine(self):
self.assert_version("1.0 (85)", version="1.0", eng_version="85")
def test_cell_version_number_with_dotted_engine(self):
self.assert_version("5.0 (532.5)", version="5.0", eng_version="532.5")
def assert_no_prefix(self, text):
node = compat_support_grammar["cell_noprefix"].parse(text)
self.assertEqual(text, node.text)
def test_unprefixed(self):
# https://developer.mozilla.org/en-US/docs/Web/API/AudioContext.createBufferSource
self.assert_no_prefix(" (unprefixed) ")
def test_noprefix(self):
# https://developer.mozilla.org/en-US/docs/Web/API/Navigator.vibrate
self.assert_no_prefix(" (no prefix) ")
def test_without_prefix_naked(self):
# https://developer.mozilla.org/en-US/docs/Web/CSS/text-decoration-line
self.assert_no_prefix("without prefix")
def test_without_prefix(self):
# https://developer.mozilla.org/en-US/docs/Web/API/BatteryManager
self.assert_no_prefix(" (without prefix) ")
def assert_partial(self, text):
node = compat_support_grammar['cell_partial'].parse(text)
self.assertEqual(text, node.text)
def test_comma_partial(self):
# https://developer.mozilla.org/en-US/docs/Web/API/IDBCursor
self.assert_partial(", partial")
def test_parens_partal(self):
# https://developer.mozilla.org/en-US/docs/Web/CSS/text-decoration
self.assert_partial("(partial)")
class TestCompatVersion(TestCase):
def test_dotted(self):
version = CellVersion(raw='1.0', version='1.0')
self.assertEqual('1.0', version.version)
self.assertEqual('1.0', text_type(version))
def test_plain(self):
version = CellVersion(raw='1', version='1')
self.assertEqual('1.0', version.version)
self.assertEqual('1.0', text_type(version))
def test_with_engine(self):
version = CellVersion(
raw='1.0 (85)', version='1.0', engine_version='85')
self.assertEqual('1.0', version.version)
self.assertEqual('1.0 (85)', text_type(version))
class TestSupportVisitor(TestCase):
scope = 'compatibility support'
def setUp(self):
self.feature_id = '_feature'
self.browser_id = '_browser'
self.browser_name = 'Browser'
self.browser_slug = 'browser'
def set_browser(self, browser):
self.browser_id = browser.id
self.browser_name = browser.name
self.browser_slug = browser.slug
def assert_support(
self, contents, expected_versions=None, expected_supports=None,
issues=None):
row_cell = "<td>%s</td>" % contents
parsed = compat_support_grammar['html'].parse(row_cell)
self.visitor = CompatSupportVisitor(
self.feature_id, self.browser_id, self.browser_name,
self.browser_slug)
self.visitor.visit(parsed)
expected_versions = expected_versions or []
expected_supports = expected_supports or []
self.assertEqual(len(expected_versions), len(expected_supports))
for version, support in zip(expected_versions, expected_supports):
if 'id' not in version:
version['id'] = '_{}-{}'.format(
self.browser_name, version['version'])
version['browser'] = self.browser_id
if 'id' not in support:
support['id'] = '_{}-{}'.format(self.feature_id, version['id'])
support['version'] = version['id']
support['feature'] = self.feature_id
self.assertEqual(expected_versions, self.visitor.versions)
self.assertEqual(expected_supports, self.visitor.supports)
self.assertEqual(issues or [], self.visitor.issues)
def test_version(self):
self.assert_support('1.0', [{'version': '1.0'}], [{'support': 'yes'}])
def test_version_matches(self):
version = self.get_instance('Version', ('firefox_desktop', '1.0'))
self.set_browser(version.browser)
self.assert_support(
'1.0', [{'version': '1.0', 'id': version.id}],
[{'support': 'yes'}])
def test_new_version_existing_browser(self):
browser = self.get_instance('Browser', 'firefox_desktop')
self.set_browser(browser)
issue = (
'unknown_version', 4, 7,
{'browser_id': browser.id,
'browser_name': {"en": "Firefox for Desktop"},
'browser_slug': 'firefox_desktop', 'version': '2.0'})
self.assert_support(
'2.0', [{'version': '2.0'}], [{'support': 'yes'}], issues=[issue])
def test_support_matches(self):
version = self.get_instance('Version', ('firefox_desktop', '1.0'))
self.set_browser(version.browser)
feature = self.get_instance(
'Feature', 'web-css-background-size-contain_and_cover')
self.feature_id = feature.id
support = self.create(Support, version=version, feature=feature)
self.assert_support(
'1.0',
[{'version': '1.0', 'id': version.id}],
[{'support': 'yes', 'id': support.id}])
def test_compatno(self):
self.assert_support(
'{{CompatNo}}',
[{'version': 'current'}], [{'support': 'no'}])
def test_compatversionunknown(self):
self.assert_support(
'{{CompatVersionUnknown}}',
[{'version': 'current'}], [{'support': 'yes'}])
def test_compatunknown(self):
self.assert_support('{{CompatUnknown}}', [], [])
def test_compatgeckodesktop(self):
self.assert_support(
'{{CompatGeckoDesktop("1")}}',
[{'version': '1.0'}], [{'support': 'yes'}])
def test_compatgeckodesktop_bad_num(self):
self.assert_support(
'{{CompatGeckoDesktop("1.1")}}',
issues=[('compatgeckodesktop_unknown', 4, 33, {'version': '1.1'})])
def test_compatgeckofxos(self):
self.assert_support(
'{{CompatGeckoFxOS("7")}}',
[{'version': '1.0'}], [{'support': 'yes'}])
def test_compatgeckofxos_bad_version(self):
self.assert_support(
'{{CompatGeckoFxOS("999999")}}',
issues=[('compatgeckofxos_unknown', 4, 33, {'version': '999999'})])
def test_compatgeckofxos_bad_override(self):
self.assert_support(
'{{CompatGeckoFxOS("18","5.0")}}',
issues=[('compatgeckofxos_override', 4, 35,
{'override': '5.0', 'version': '18'})])
def test_compatgeckomobile(self):
self.assert_support(
'{{CompatGeckoMobile("1")}}',
[{'version': '1.0'}], [{'support': 'yes'}])
def test_compatandroid(self):
self.assert_support(
'{{CompatAndroid("3.0")}}',
[{'version': '3.0'}], [{'support': 'yes'}])
def test_compatnightly(self):
self.assert_support(
'{{CompatNightly}}',
[{'version': 'nightly'}], [{'support': 'yes'}])
def test_unknown_kumascript(self):
issues = [(
'unknown_kumascript', 4, 19,
{'name': 'UnknownKuma', 'args': [],
'scope': 'compatibility support',
'kumascript': "{{UnknownKuma}}"})]
self.assert_support('{{UnknownKuma}}', issues=issues)
def test_with_prefix_and_break(self):
self.assert_support(
('{{CompatVersionUnknown}}{{property_prefix("-webkit")}}<br>\n'
' 2.3'),
[{'version': 'current'}, {'version': '2.3'}],
[{'support': 'yes', 'prefix': '-webkit'}, {'support': 'yes'}])
def test_p_tags(self):
self.assert_support(
'<p>4.0</p><p>32</p>',
[{'version': '4.0'}, {'version': '32.0'}],
[{'support': 'yes'}, {'support': 'yes'}])
def test_two_line_note(self):
self.assert_support(
'18<br>\n(behind a pref) [1]',
[{'version': '18.0'}],
[{'support': 'yes', 'footnote_id': ('1', 27, 30)}],
issues=[('inline_text', 10, 27, {'text': '(behind a pref)'})])
def test_removed_in_gecko(self):
self.assert_support(
('{{ CompatGeckoMobile("6.0") }}<br>'
'Removed in {{ CompatGeckoMobile("23.0") }}'),
[{'version': '6.0'}, {'version': '23.0'}],
[{'support': 'yes'}, {'support': 'no'}])
def test_multi_br(self):
self.assert_support(
('{{ CompatGeckoMobile("6.0") }}<br><br>'
'Removed in {{ CompatGeckoMobile("23.0") }}'),
[{'version': '6.0'}, {'version': '23.0'}],
[{'support': 'yes'}, {'support': 'no'}])
def test_removed_in_version(self):
self.assert_support(
'Removed in 32',
[{'version': '32.0'}], [{'support': 'no'}])
def test_unprefixed(self):
# https://developer.mozilla.org/en-US/docs/Web/API/AudioContext.createBufferSource
self.assert_support(
'32 (unprefixed)',
[{'version': '32.0'}], [{'support': 'yes'}])
def test_partial(self):
# https://developer.mozilla.org/en-US/docs/Web/API/IDBCursor
self.assert_support(
'10, partial',
[{'version': '10.0'}], [{'support': 'partial'}])
def test_unmatched_free_text(self):
self.assert_support(
'32 (or earlier)',
[{'version': '32.0'}], [{'support': 'yes'}],
issues=[('inline_text', 7, 19, {'text': '(or earlier)'})])
def test_code_block(self):
# https://developer.mozilla.org/en-US/docs/Web/CSS/order
self.assert_support(
'32 with alt name <code>foobar</code>',
[{'version': '32.0'}], [{'support': 'yes'}],
issues=[
('inline_text', 7, 21, {'text': 'with alt name'}),
('inline_text', 21, 40, {'text': '<code>foobar</code>'})])
def test_spaces(self):
self.assert_support(' ')
def test_prefix_plus_footnote(self):
self.assert_support(
'18{{property_prefix("-webkit")}} [1]',
[{'version': '18.0'}],
[{'support': 'partial', 'prefix': '-webkit',
'footnote_id': ('1', 37, 40)}])
def test_prefix_double_footnote(self):
# https://developer.mozilla.org/en-US/docs/Web/API/CSSSupportsRule
self.assert_support(
'{{ CompatGeckoDesktop("17") }} [1][2]',
[{'version': '17.0'}],
[{'support': 'yes', 'footnote_id': ('1', 35, 38)}],
issues=[('footnote_multiple', 38, 41,
{'prev_footnote_id': '1', 'footnote_id': '2'})])
def test_double_footnote_link_sup(self):
# https://developer.mozilla.org/en-US/docs/Web/CSS/flex
self.assert_support(
'{{CompatGeckoDesktop("20.0")}} '
'<sup><a href="#bc2">[2]</a><a href="#bc3">[3]</a></sup>',
[{'version': '20.0'}],
[{'support': 'yes', 'footnote_id': ('2', 55, 58)}],
issues=[('footnote_multiple', 77, 80,
{'prev_footnote_id': '2', 'footnote_id': '3'})])
def test_star_footnote(self):
# TODO: use raw footnote once footnote section is converted
self.assert_support(
'{{CompatGeckoDesktop("20.0")}} [***]',
[{'version': '20.0'}],
[{'support': 'yes', 'footnote_id': ('3', 35, 40)}])
def test_nbsp(self):
self.assert_support(
'15 {{property_prefix("webkit")}}',
[{'version': '15.0'}], [{'support': 'yes', 'prefix': 'webkit'}])
def test_other_kumascript(self):
issue = (
'unexpected_kumascript', 7, 30,
{'kumascript': '{{experimental_inline}}',
'name': 'experimental_inline', 'args': [], 'scope': self.scope,
'expected_scopes': 'compatibility feature'})
self.assert_support(
'22 {{experimental_inline}}',
[{'version': '22.0'}], [{'support': 'yes'}], issues=[issue])
def test_multiversion_prefix_no(self):
# https://developer.mozilla.org/en-US/docs/Web/API/Text/replaceWholeText
self.assert_support(
'{{CompatVersionUnknown}} [1] <br> 30.0 <br> {{CompatNo}} 41.0',
[{'version': 'current'}, {'version': '30.0'}, {'version': '41.0'}],
[{'support': 'yes', 'footnote_id': ('1', 29, 33)},
{'support': 'yes'}, {'support': 'no'}])
def test_multiversion_suffix_no(self):
self.assert_support(
'{{CompatVersionUnknown}} [1] <br> 30.0 <br> 41.0 {{CompatNo}}',
[{'version': 'current'}, {'version': '30.0'}, {'version': '41.0'}],
[{'support': 'yes', 'footnote_id': ('1', 29, 33)},
{'support': 'yes'}, {'support': 'no'}])
class TestFootnoteGrammar(TestCase):
def test_footnote_paragraph(self):
footnotes = '<p>[2] A footnote</p>'
parsed = compat_footnote_grammar['html'].parse(footnotes)
self.assertEqual(footnotes, parsed.text)
class TestFootnoteVisitor(TestCase):
scope = 'compatibility footnote'
def setUp(self):
self.visitor = CompatFootnoteVisitor()
def assert_footnotes(self, content, expected, issues=None):
parsed = compat_footnote_grammar['html'].parse(content)
self.visitor.visit(parsed)
footnotes = self.visitor.finalize_footnotes()
self.assertEqual(expected, footnotes)
self.assertEqual(issues or [], self.visitor.issues)
def test_empty(self):
footnotes = '\n'
expected = {}
self.assert_footnotes(footnotes, expected)
def test_simple(self):
footnotes = "<p>[1] A footnote.</p>"
expected = {'1': ('A footnote.', 0, 22)}
self.assert_footnotes(footnotes, expected)
def test_multi_paragraph(self):
footnotes = "<p>[1] Footnote line 1.</p><p>Footnote line 2.</p>"
expected = {
'1': ("<p>Footnote line 1.</p>\n<p>Footnote line 2.</p>", 0, 50)}
self.assert_footnotes(footnotes, expected)
def test_multiple_footnotes(self):
footnotes = "<p>[1] Footnote 1.</p><p>[2] Footnote 2.</p>"
expected = {'1': ('Footnote 1.', 0, 22), '2': ('Footnote 2.', 22, 44)}
self.assert_footnotes(footnotes, expected)
def test_kumascript_cssxref(self):
footnotes = '<p>[1] Use {{cssxref("-moz-border-image")}}</p>'
expected = {
'1': (
'Use <a href="https://developer.mozilla.org/en-US/docs/Web/'
'CSS/-moz-border-image"><code>-moz-border-image</code></a>',
0, 47)}
self.assert_footnotes(footnotes, expected)
def test_unknown_kumascriptscript(self):
footnotes = (
"<p>[1] Footnote {{UnknownKuma}} but the beat continues.</p>")
expected = {'1': ('Footnote but the beat continues.', 0, 59)}
issue = (
'unknown_kumascript', 16, 32,
{'name': 'UnknownKuma', 'args': [], 'scope': 'footnote',
'kumascript': '{{UnknownKuma}}'})
self.assert_footnotes(footnotes, expected, issues=[issue])
def test_pre_section(self):
footnotes = '<p>[1] Here\'s some code:</p><pre>foo = bar</pre>'
expected = {
'1': ("<p>Here's some code:</p>\n<pre>foo = bar</pre>", 0, 48)}
self.assert_footnotes(footnotes, expected)
def test_pre_without_footnotes(self):
footnotes = '<p>Here\'s some code:</p><pre>foo = bar</pre>'
issues = [
('footnote_no_id', 0, 24, {}),
('footnote_no_id', 24, 44, {})]
self.assert_footnotes(footnotes, {}, issues)
def test_pre_with_attrs_section(self):
# https://developer.mozilla.org/en-US/docs/Web/CSS/white-space
footnotes = (
'<p>[1] Here\'s some code:</p>\n'
'<pre class="brush:css">\n'
'.foo {background-image: url(bg-image.png);}\n'
'</pre>')
expected = {
'1': (
"<p>Here's some code:</p>\n<pre>\n"
".foo {background-image: url(bg-image.png);}\n</pre>",
0, 103)}
issue = (
'unexpected_attribute', 34, 51,
{'ident': 'class', 'node_type': 'pre', 'value': 'brush:css',
'expected': 'no attributes'})
self.assert_footnotes(footnotes, expected, issues=[issue])
def test_asterisk(self):
footnotes = "<p>[*] A footnote</p>"
expected = {'1': ('A footnote', 0, 21)}
self.assert_footnotes(footnotes, expected)
def test_bad_footnote(self):
footnotes = "<p>A footnote.</p>"
issue = ('footnote_no_id', 0, 18, {})
self.assert_footnotes(footnotes, {}, issues=[issue])
def test_bad_footnote_prefix(self):
footnotes = "<p>Footnote [1] - The content.</p>"
expected = {'1': ('- The content.', 16, 30)}
issue = ('footnote_no_id', 3, 12, {})
self.assert_footnotes(footnotes, expected, issues=[issue])
def test_bad_footnote_unknown_kumascript(self):
# https://developer.mozilla.org/en-US/docs/Web/SVG/Element/color-profile
footnotes = '<p>{{SVGRef}}</p>'
issue = (
'unknown_kumascript', 3, 13,
{'name': 'SVGRef', 'args': [], 'kumascript': '{{SVGRef}}',
'scope': u'footnote'})
self.assert_footnotes(footnotes, {}, issues=[issue])
def test_empty_paragraph_no_footnotes(self):
footnotes = ('<p> </p>\n')
self.assert_footnotes(footnotes, {})
def test_empty_paragraph_invalid_footnote(self):
footnotes = (
'<p> </p>\n'
'<p>Invalid footnote.</p>\n'
'<p> </p>')
issue = ('footnote_no_id', 9, 33, {})
self.assert_footnotes(footnotes, {}, issues=[issue])
self.assertEqual(footnotes[9:33], '<p>Invalid footnote.</p>')
def test_empty_paragraphs_trimmed(self):
footnote = (
'<p> </p>\n'
'<p>[1] Valid footnote.</p>'
'<p> </p>'
'<p>Continues footnote 1.</p>')
expected = {
'1': (
'<p>Valid footnote.</p>\n<p>Continues footnote 1.</p>',
9, 73)}
self.assert_footnotes(footnote, expected)
def test_code(self):
footnote = (
'<p>[1] From Firefox 31 to 35, <code>will-change</code>'
' was available...</p>')
expected = {
'1': (
'From Firefox 31 to 35, <code>will-change</code>'
' was available...', 0, 75)}
self.assert_footnotes(footnote, expected)
def test_span(self):
# https://developer.mozilla.org/en-US/docs/Web/Events/DOMContentLoaded
footnote = (
'<p>[1]<span style="font-size: 14px; line-height: 18px;">'
'Bubbling for this event is supported by at least Gecko 1.9.2,'
' Chrome 6, and Safari 4.</span></p>')
expected = {
'1': ('Bubbling for this event is supported by at least Gecko'
' 1.9.2, Chrome 6, and Safari 4.', 0, 152)}
issue = ('tag_dropped', 6, 56, {'scope': 'footnote', 'tag': 'span'})
self.assert_footnotes(footnote, expected, issues=[issue])
def test_a(self):
# https://developer.mozilla.org/en-US/docs/Web/SVG/SVG_as_an_Image
footnote = (
'<p>[1] Compatibility data from'
'<a href="http://caniuse.com" title="http://caniuse.com">'
'caniuse.com</a>.</p>')
expected = {
'1': ('Compatibility data from <a href="http://caniuse.com">'
'caniuse.com</a>.', 0, 106)}
self.assert_footnotes(footnote, expected)
def test_br_start(self):
# https://developer.mozilla.org/en-US/docs/Web/API/VRFieldOfViewReadOnly/downDegrees
footnote = "<p><br>\n[1] To find information on Chrome's WebVR...</p>"
expected = {'1': ("To find information on Chrome's WebVR...", 0, 56)}
self.assert_footnotes(footnote, expected)
def test_br_end(self):
# https://developer.mozilla.org/en-US/docs/Web/Events/wheel
footnote = "<p>[1] Here's a footnote. <br></p>"
expected = {'1': ("Here's a footnote.", 0, 34)}
self.assert_footnotes(footnote, expected)
def test_br_footnotes(self):
# https://developer.mozilla.org/en-US/docs/Web/API/URLUtils/hash
footnote = "<p>[1] Footnote 1.<br>[2] Footnote 2.</p>"
expected = {'1': ("Footnote 1.", 7, 18), '2': ("Footnote 2.", 26, 37)}
self.assert_footnotes(footnote, expected)
| renoirb/browsercompat | mdn/tests/test_compatibility.py | Python | mpl-2.0 | 41,045 | [
"VisIt"
] | 3b401840437aa1e1e826d6596ca702e108e9f17909574a535deb91223eef03d4 |
"""
========================================
Special functions (:mod:`scipy.special`)
========================================
.. module:: scipy.special
Nearly all of the functions below are universal functions and follow
broadcasting and automatic array-looping rules. Exceptions are noted.
Error handling
==============
Errors are handled by returning nans, or other appropriate values.
Some of the special function routines will emit warnings when an error
occurs. By default this is disabled. To enable such messages use
``errprint(1)``, and to disable such messages use ``errprint(0)``.
Example:
>>> print scipy.special.bdtr(-1,10,0.3)
>>> scipy.special.errprint(1)
>>> print scipy.special.bdtr(-1,10,0.3)
.. autosummary::
:toctree: generated/
errprint
SpecialFunctionWarning -- Warning that can be issued with ``errprint(True)``
Available functions
===================
Airy functions
--------------
.. autosummary::
:toctree: generated/
airy -- Airy functions and their derivatives.
airye -- Exponentially scaled Airy functions
ai_zeros -- [+]Zeros of Airy functions Ai(x) and Ai'(x)
bi_zeros -- [+]Zeros of Airy functions Bi(x) and Bi'(x)
Elliptic Functions and Integrals
--------------------------------
.. autosummary::
:toctree: generated/
ellipj -- Jacobian elliptic functions
ellipk -- Complete elliptic integral of the first kind.
ellipkm1 -- ellipkm1(x) == ellipk(1 - x)
ellipkinc -- Incomplete elliptic integral of the first kind.
ellipe -- Complete elliptic integral of the second kind.
ellipeinc -- Incomplete elliptic integral of the second kind.
Bessel Functions
----------------
.. autosummary::
:toctree: generated/
jv -- Bessel function of real-valued order and complex argument.
jve -- Exponentially scaled Bessel function.
yn -- Bessel function of second kind (integer order).
yv -- Bessel function of the second kind (real-valued order).
yve -- Exponentially scaled Bessel function of the second kind.
kn -- Modified Bessel function of the second kind (integer order).
kv -- Modified Bessel function of the second kind (real order).
kve -- Exponentially scaled modified Bessel function of the second kind.
iv -- Modified Bessel function.
ive -- Exponentially scaled modified Bessel function.
hankel1 -- Hankel function of the first kind.
hankel1e -- Exponentially scaled Hankel function of the first kind.
hankel2 -- Hankel function of the second kind.
hankel2e -- Exponentially scaled Hankel function of the second kind.
The following is not an universal function:
.. autosummary::
:toctree: generated/
lmbda -- [+]Sequence of lambda functions with arbitrary order v.
Zeros of Bessel Functions
^^^^^^^^^^^^^^^^^^^^^^^^^
These are not universal functions:
.. autosummary::
:toctree: generated/
jnjnp_zeros -- [+]Zeros of integer-order Bessel functions and derivatives sorted in order.
jnyn_zeros -- [+]Zeros of integer-order Bessel functions and derivatives as separate arrays.
jn_zeros -- [+]Zeros of Jn(x)
jnp_zeros -- [+]Zeros of Jn'(x)
yn_zeros -- [+]Zeros of Yn(x)
ynp_zeros -- [+]Zeros of Yn'(x)
y0_zeros -- [+]Complex zeros: Y0(z0)=0 and values of Y0'(z0)
y1_zeros -- [+]Complex zeros: Y1(z1)=0 and values of Y1'(z1)
y1p_zeros -- [+]Complex zeros of Y1'(z1')=0 and values of Y1(z1')
Faster versions of common Bessel Functions
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
.. autosummary::
:toctree: generated/
j0 -- Bessel function of order 0.
j1 -- Bessel function of order 1.
y0 -- Bessel function of second kind of order 0.
y1 -- Bessel function of second kind of order 1.
i0 -- Modified Bessel function of order 0.
i0e -- Exponentially scaled modified Bessel function of order 0.
i1 -- Modified Bessel function of order 1.
i1e -- Exponentially scaled modified Bessel function of order 1.
k0 -- Modified Bessel function of the second kind of order 0.
k0e -- Exponentially scaled modified Bessel function of the second kind of order 0.
k1 -- Modified Bessel function of the second kind of order 1.
k1e -- Exponentially scaled modified Bessel function of the second kind of order 1.
Integrals of Bessel Functions
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
.. autosummary::
:toctree: generated/
itj0y0 -- Basic integrals of j0 and y0 from 0 to x.
it2j0y0 -- Integrals of (1-j0(t))/t from 0 to x and y0(t)/t from x to inf.
iti0k0 -- Basic integrals of i0 and k0 from 0 to x.
it2i0k0 -- Integrals of (i0(t)-1)/t from 0 to x and k0(t)/t from x to inf.
besselpoly -- Integral of a Bessel function: Jv(2* a* x) * x[+]lambda from x=0 to 1.
Derivatives of Bessel Functions
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
.. autosummary::
:toctree: generated/
jvp -- Nth derivative of Jv(v,z)
yvp -- Nth derivative of Yv(v,z)
kvp -- Nth derivative of Kv(v,z)
ivp -- Nth derivative of Iv(v,z)
h1vp -- Nth derivative of H1v(v,z)
h2vp -- Nth derivative of H2v(v,z)
Spherical Bessel Functions
^^^^^^^^^^^^^^^^^^^^^^^^^^
These are not universal functions:
.. autosummary::
:toctree: generated/
sph_jn -- [+]Sequence of spherical Bessel functions, jn(z)
sph_yn -- [+]Sequence of spherical Bessel functions, yn(z)
sph_jnyn -- [+]Sequence of spherical Bessel functions, jn(z) and yn(z)
sph_in -- [+]Sequence of spherical Bessel functions, in(z)
sph_kn -- [+]Sequence of spherical Bessel functions, kn(z)
sph_inkn -- [+]Sequence of spherical Bessel functions, in(z) and kn(z)
Riccati-Bessel Functions
^^^^^^^^^^^^^^^^^^^^^^^^
These are not universal functions:
.. autosummary::
:toctree: generated/
riccati_jn -- [+]Sequence of Ricatti-Bessel functions of first kind.
riccati_yn -- [+]Sequence of Ricatti-Bessel functions of second kind.
Struve Functions
----------------
.. autosummary::
:toctree: generated/
struve -- Struve function --- Hv(x)
modstruve -- Modified Struve function --- Lv(x)
itstruve0 -- Integral of H0(t) from 0 to x
it2struve0 -- Integral of H0(t)/t from x to Inf.
itmodstruve0 -- Integral of L0(t) from 0 to x.
Raw Statistical Functions
-------------------------
.. seealso:: :mod:`scipy.stats`: Friendly versions of these functions.
.. autosummary::
:toctree: generated/
bdtr -- Sum of terms 0 through k of the binomial pdf.
bdtrc -- Sum of terms k+1 through n of the binomial pdf.
bdtri -- Inverse of bdtr
btdtr -- Integral from 0 to x of beta pdf.
btdtri -- Quantiles of beta distribution
fdtr -- Integral from 0 to x of F pdf.
fdtrc -- Integral from x to infinity under F pdf.
fdtri -- Inverse of fdtrc
gdtr -- Integral from 0 to x of gamma pdf.
gdtrc -- Integral from x to infinity under gamma pdf.
gdtria -- Inverse with respect to `a` of gdtr.
gdtrib -- Inverse with respect to `b` of gdtr.
gdtrix -- Inverse with respect to `x` of gdtr.
nbdtr -- Sum of terms 0 through k of the negative binomial pdf.
nbdtrc -- Sum of terms k+1 to infinity under negative binomial pdf.
nbdtri -- Inverse of nbdtr
ncfdtr -- CDF of non-central t distribution.
ncfdtridfd -- Find degrees of freedom (denominator) of noncentral F distribution.
ncfdtridfn -- Find degrees of freedom (numerator) of noncentral F distribution.
ncfdtri -- Inverse CDF of noncentral F distribution.
ncfdtrinc -- Find noncentrality parameter of noncentral F distribution.
nctdtr -- CDF of noncentral t distribution.
nctdtridf -- Find degrees of freedom of noncentral t distribution.
nctdtrit -- Inverse CDF of noncentral t distribution.
nctdtrinc -- Find noncentrality parameter of noncentral t distribution.
nrdtrimn -- Find mean of normal distribution from cdf and std.
nrdtrisd -- Find std of normal distribution from cdf and mean.
pdtr -- Sum of terms 0 through k of the Poisson pdf.
pdtrc -- Sum of terms k+1 to infinity of the Poisson pdf.
pdtri -- Inverse of pdtr
stdtr -- Integral from -infinity to t of the Student-t pdf.
stdtridf --
stdtrit --
chdtr -- Integral from 0 to x of the Chi-square pdf.
chdtrc -- Integral from x to infnity of Chi-square pdf.
chdtri -- Inverse of chdtrc.
ndtr -- Integral from -infinity to x of standard normal pdf
ndtri -- Inverse of ndtr (quantiles)
smirnov -- Kolmogorov-Smirnov complementary CDF for one-sided test statistic (Dn+ or Dn-)
smirnovi -- Inverse of smirnov.
kolmogorov -- The complementary CDF of the (scaled) two-sided test statistic (Kn*) valid for large n.
kolmogi -- Inverse of kolmogorov
tklmbda -- Tukey-Lambda CDF
logit --
expit --
boxcox -- Compute the Box-Cox transformation.
boxcox1p -- Compute the Box-Cox transformation.
Information Theory Functions
----------------------------
.. autosummary::
:toctree: generated/
entr -- entr(x) = -x*log(x)
rel_entr -- rel_entr(x, y) = x*log(x/y)
kl_div -- kl_div(x, y) = x*log(x/y) - x + y
huber -- Huber loss function.
pseudo_huber -- Pseudo-Huber loss function.
Gamma and Related Functions
---------------------------
.. autosummary::
:toctree: generated/
gamma -- Gamma function.
gammaln -- Log of the absolute value of the gamma function.
gammasgn -- Sign of the gamma function.
gammainc -- Incomplete gamma integral.
gammaincinv -- Inverse of gammainc.
gammaincc -- Complemented incomplete gamma integral.
gammainccinv -- Inverse of gammaincc.
beta -- Beta function.
betaln -- Log of the absolute value of the beta function.
betainc -- Incomplete beta integral.
betaincinv -- Inverse of betainc.
psi -- Logarithmic derivative of the gamma function.
rgamma -- One divided by the gamma function.
polygamma -- Nth derivative of psi function.
multigammaln -- Log of the multivariate gamma.
digamma -- Digamma function (derivative of the logarithm of gamma).
Error Function and Fresnel Integrals
------------------------------------
.. autosummary::
:toctree: generated/
erf -- Error function.
erfc -- Complemented error function (1- erf(x))
erfcx -- Scaled complemented error function exp(x**2)*erfc(x)
erfi -- Imaginary error function, -i erf(i x)
erfinv -- Inverse of error function
erfcinv -- Inverse of erfc
wofz -- Fadeeva function.
dawsn -- Dawson's integral.
fresnel -- Fresnel sine and cosine integrals.
fresnel_zeros -- Complex zeros of both Fresnel integrals
modfresnelp -- Modified Fresnel integrals F_+(x) and K_+(x)
modfresnelm -- Modified Fresnel integrals F_-(x) and K_-(x)
These are not universal functions:
.. autosummary::
:toctree: generated/
erf_zeros -- [+]Complex zeros of erf(z)
fresnelc_zeros -- [+]Complex zeros of Fresnel cosine integrals
fresnels_zeros -- [+]Complex zeros of Fresnel sine integrals
Legendre Functions
------------------
.. autosummary::
:toctree: generated/
lpmv -- Associated Legendre Function of arbitrary non-negative degree v.
sph_harm -- Spherical Harmonics (complex-valued) Y^m_n(theta,phi)
These are not universal functions:
.. autosummary::
:toctree: generated/
clpmn -- [+]Associated Legendre Function of the first kind for complex arguments.
lpn -- [+]Legendre Functions (polynomials) of the first kind
lqn -- [+]Legendre Functions of the second kind.
lpmn -- [+]Associated Legendre Function of the first kind for real arguments.
lqmn -- [+]Associated Legendre Function of the second kind.
Ellipsoidal Harmonics
---------------------
.. autosummary::
:toctree: generated/
ellip_harm -- Ellipsoidal harmonic E
ellip_harm_2 -- Ellipsoidal harmonic F
ellip_normal -- Ellipsoidal normalization constant
Orthogonal polynomials
----------------------
The following functions evaluate values of orthogonal polynomials:
.. autosummary::
:toctree: generated/
assoc_laguerre
eval_legendre
eval_chebyt
eval_chebyu
eval_chebyc
eval_chebys
eval_jacobi
eval_laguerre
eval_genlaguerre
eval_hermite
eval_hermitenorm
eval_gegenbauer
eval_sh_legendre
eval_sh_chebyt
eval_sh_chebyu
eval_sh_jacobi
The functions below, in turn, return the polynomial coefficients in
:ref:`orthopoly1d` objects, which function similarly as :ref:`numpy.poly1d`.
The :ref:`orthopoly1d` class also has an attribute ``weights`` which returns
the roots, weights, and total weights for the appropriate form of Gaussian
quadrature. These are returned in an ``n x 3`` array with roots in the first
column, weights in the second column, and total weights in the final column.
Note that ``orthopoly1d`` objects are converted to ``poly1d`` when doing
arithmetic, and lose information of the original orthogonal polynomial.
.. autosummary::
:toctree: generated/
legendre -- [+]Legendre polynomial P_n(x) (lpn -- for function).
chebyt -- [+]Chebyshev polynomial T_n(x)
chebyu -- [+]Chebyshev polynomial U_n(x)
chebyc -- [+]Chebyshev polynomial C_n(x)
chebys -- [+]Chebyshev polynomial S_n(x)
jacobi -- [+]Jacobi polynomial P^(alpha,beta)_n(x)
laguerre -- [+]Laguerre polynomial, L_n(x)
genlaguerre -- [+]Generalized (Associated) Laguerre polynomial, L^alpha_n(x)
hermite -- [+]Hermite polynomial H_n(x)
hermitenorm -- [+]Normalized Hermite polynomial, He_n(x)
gegenbauer -- [+]Gegenbauer (Ultraspherical) polynomials, C^(alpha)_n(x)
sh_legendre -- [+]shifted Legendre polynomial, P*_n(x)
sh_chebyt -- [+]shifted Chebyshev polynomial, T*_n(x)
sh_chebyu -- [+]shifted Chebyshev polynomial, U*_n(x)
sh_jacobi -- [+]shifted Jacobi polynomial, J*_n(x) = G^(p,q)_n(x)
.. warning::
Computing values of high-order polynomials (around ``order > 20``) using
polynomial coefficients is numerically unstable. To evaluate polynomial
values, the ``eval_*`` functions should be used instead.
Hypergeometric Functions
------------------------
.. autosummary::
:toctree: generated/
hyp2f1 -- Gauss hypergeometric function (2F1)
hyp1f1 -- Confluent hypergeometric function (1F1)
hyperu -- Confluent hypergeometric function (U)
hyp0f1 -- Confluent hypergeometric limit function (0F1)
hyp2f0 -- Hypergeometric function (2F0)
hyp1f2 -- Hypergeometric function (1F2)
hyp3f0 -- Hypergeometric function (3F0)
Parabolic Cylinder Functions
----------------------------
.. autosummary::
:toctree: generated/
pbdv -- Parabolic cylinder function Dv(x) and derivative.
pbvv -- Parabolic cylinder function Vv(x) and derivative.
pbwa -- Parabolic cylinder function W(a,x) and derivative.
These are not universal functions:
.. autosummary::
:toctree: generated/
pbdv_seq -- [+]Sequence of parabolic cylinder functions Dv(x)
pbvv_seq -- [+]Sequence of parabolic cylinder functions Vv(x)
pbdn_seq -- [+]Sequence of parabolic cylinder functions Dn(z), complex z
Mathieu and Related Functions
-----------------------------
.. autosummary::
:toctree: generated/
mathieu_a -- Characteristic values for even solution (ce_m)
mathieu_b -- Characteristic values for odd solution (se_m)
These are not universal functions:
.. autosummary::
:toctree: generated/
mathieu_even_coef -- [+]sequence of expansion coefficients for even solution
mathieu_odd_coef -- [+]sequence of expansion coefficients for odd solution
The following return both function and first derivative:
.. autosummary::
:toctree: generated/
mathieu_cem -- Even Mathieu function
mathieu_sem -- Odd Mathieu function
mathieu_modcem1 -- Even modified Mathieu function of the first kind
mathieu_modcem2 -- Even modified Mathieu function of the second kind
mathieu_modsem1 -- Odd modified Mathieu function of the first kind
mathieu_modsem2 -- Odd modified Mathieu function of the second kind
Spheroidal Wave Functions
-------------------------
.. autosummary::
:toctree: generated/
pro_ang1 -- Prolate spheroidal angular function of the first kind
pro_rad1 -- Prolate spheroidal radial function of the first kind
pro_rad2 -- Prolate spheroidal radial function of the second kind
obl_ang1 -- Oblate spheroidal angular function of the first kind
obl_rad1 -- Oblate spheroidal radial function of the first kind
obl_rad2 -- Oblate spheroidal radial function of the second kind
pro_cv -- Compute characteristic value for prolate functions
obl_cv -- Compute characteristic value for oblate functions
pro_cv_seq -- Compute sequence of prolate characteristic values
obl_cv_seq -- Compute sequence of oblate characteristic values
The following functions require pre-computed characteristic value:
.. autosummary::
:toctree: generated/
pro_ang1_cv -- Prolate spheroidal angular function of the first kind
pro_rad1_cv -- Prolate spheroidal radial function of the first kind
pro_rad2_cv -- Prolate spheroidal radial function of the second kind
obl_ang1_cv -- Oblate spheroidal angular function of the first kind
obl_rad1_cv -- Oblate spheroidal radial function of the first kind
obl_rad2_cv -- Oblate spheroidal radial function of the second kind
Kelvin Functions
----------------
.. autosummary::
:toctree: generated/
kelvin -- All Kelvin functions (order 0) and derivatives.
kelvin_zeros -- [+]Zeros of All Kelvin functions (order 0) and derivatives
ber -- Kelvin function ber x
bei -- Kelvin function bei x
berp -- Derivative of Kelvin function ber x
beip -- Derivative of Kelvin function bei x
ker -- Kelvin function ker x
kei -- Kelvin function kei x
kerp -- Derivative of Kelvin function ker x
keip -- Derivative of Kelvin function kei x
These are not universal functions:
.. autosummary::
:toctree: generated/
ber_zeros -- [+]Zeros of Kelvin function bei x
bei_zeros -- [+]Zeros of Kelvin function ber x
berp_zeros -- [+]Zeros of derivative of Kelvin function ber x
beip_zeros -- [+]Zeros of derivative of Kelvin function bei x
ker_zeros -- [+]Zeros of Kelvin function kei x
kei_zeros -- [+]Zeros of Kelvin function ker x
kerp_zeros -- [+]Zeros of derivative of Kelvin function ker x
keip_zeros -- [+]Zeros of derivative of Kelvin function kei x
Combinatorics
-------------
.. autosummary::
:toctree: generated/
comb -- [+]Combinations of N things taken k at a time, "N choose k"
perm -- [+]Permutations of N things taken k at a time, "k-permutations of N"
Other Special Functions
-----------------------
.. autosummary::
:toctree: generated/
agm -- Arithmetic-Geometric Mean
bernoulli -- Bernoulli numbers
binom -- Binomial coefficient.
diric -- Dirichlet function (periodic sinc)
euler -- Euler numbers
expn -- Exponential integral.
exp1 -- Exponential integral of order 1 (for complex argument)
expi -- Another exponential integral -- Ei(x)
factorial -- The factorial function, n! = special.gamma(n+1)
factorial2 -- Double factorial, (n!)!
factorialk -- [+](...((n!)!)!...)! where there are k '!'
shichi -- Hyperbolic sine and cosine integrals.
sici -- Integral of the sinc and "cosinc" functions.
spence -- Dilogarithm integral.
lambertw -- Lambert W function
zeta -- Riemann zeta function of two arguments.
zetac -- Standard Riemann zeta function minus 1.
Convenience Functions
---------------------
.. autosummary::
:toctree: generated/
cbrt -- Cube root.
exp10 -- 10 raised to the x power.
exp2 -- 2 raised to the x power.
radian -- radian angle given degrees, minutes, and seconds.
cosdg -- cosine of the angle given in degrees.
sindg -- sine of the angle given in degrees.
tandg -- tangent of the angle given in degrees.
cotdg -- cotangent of the angle given in degrees.
log1p -- log(1+x)
expm1 -- exp(x)-1
cosm1 -- cos(x)-1
round -- round the argument to the nearest integer. If argument ends in 0.5 exactly, pick the nearest even integer.
xlogy -- x*log(y)
xlog1py -- x*log1p(y)
.. [+] in the description indicates a function which is not a universal
.. function and does not follow broadcasting and automatic
.. array-looping rules.
"""
from __future__ import division, print_function, absolute_import
from ._ufuncs import *
from .basic import *
from . import specfun
from . import orthogonal
from .orthogonal import *
from .spfun_stats import multigammaln
from ._ellip_harm import ellip_harm, ellip_harm_2, ellip_normal
from .lambertw import lambertw
__all__ = [s for s in dir() if not s.startswith('_')]
from numpy.dual import register_func
register_func('i0',i0)
del register_func
from numpy.testing import Tester
test = Tester().test
| chaluemwut/fbserver | venv/lib/python2.7/site-packages/scipy/special/__init__.py | Python | apache-2.0 | 21,520 | [
"Gaussian"
] | 525ce0eee1fe5b46d0b6e1081f99972402fde38720717ca5ee32402aad8a9257 |
def gather_exposures(filter, cluster):
search_params = {'path':path, 'cluster':cluster, 'filter':filter, 'PHOTCONF':PHOTCONF, 'DATACONF':os.environ['DATACONF'], 'TEMPDIR':TEMPDIR,'fwhm':1.00}
searchstr = "/%(path)s/%(filter)s*/SCIENCE/*fits" % search_params
print searchstr
files = glob(searchstr)
files.sort()
print files
exposures = {}
# first 30 files
print files[0:30]
for file in files: #[0:30]:
if string.find(file,'wcs') == -1 and string.find(file,'.sub.fits') == -1:
res = re.split('_',re.split('/',file)[-1])
print res
if not exposures.has_key(res[0]): exposures[res[0]] = {}
if not exposures[res[0]].has_key('images'): exposures[res[0]]['images'] = []
if not exposures[res[0]].has_key('keywords'): exposures[res[0]]['keywords'] = {}
exposures[res[0]]['images'].append(file) # res[0] is the root of the image name
print 'hey', file
reload(utilities)
if not exposures[res[0]]['keywords'].has_key('ROTATION'): #if exposure does not have keywords yet, then get them
exposures[res[0]]['keywords']['filter'] = filter
res2 = re.split('/',file)
for r in res2:
if string.find(r,filter) != -1:
print r
exposures[res[0]]['keywords']['date'] = r.replace(filter + '_','')
exposures[res[0]]['keywords']['fil_directory'] = r
search_params['fil_directory'] = r
kws = utilities.get_header_kw(file,['ROTATION','OBJECT','GABODSID']) # return KEY/NA if not SUBARU
for kw in kws.keys():
exposures[res[0]]['keywords'][kw] = kws[kw]
exposures_zero = {}
exposures_one = {}
print '$$$$$'
print 'separating into different camera rotations'
for exposure in exposures.keys():
print exposure,exposures[exposure]['keywords']['ROTATION']
if int(exposures[exposure]['keywords']['ROTATION']) == 1:
exposures_one[exposure] = exposures[exposure]
if int(exposures[exposure]['keywords']['ROTATION']) == 0:
exposures_zero[exposure] = exposures[exposure]
return exposures
class image:
def __init__(self,exposure):
self.exposure = exposure
self.gain = float(self.exposure['keywords']['PIXSCALE'])
self.pixscale = float(self.exposure['keywords']['PIXSCALE'])
def get_root(self,image):
def find_seeing(self):
''' quick run through for seeing '''
children = []
for image in exposure['images']:
child = os.fork()
if child:
children.append(child)
else:
params = copy(search_params)
params['GAIN'] = self.gain
params['PIXSCALE'] = self.pixscale
ROOT = re.split('\.',re.split('\/',image)[-1])[0]
params['ROOT'] = ROOT
NUM = re.split('O',re.split('\_',ROOT)[1])[0]
params['NUM'] = NUM
print ROOT
weightim = "/%(path)s/%(fil_directory)s/WEIGHTS/%(ROOT)s.weight.fits" % params
#flagim = "/%(path)s/%(fil_directory)s/WEIGHTS/globalflag_%(NUM)s.fits" % params
#finalflagim = TEMPDIR + "flag_%(ROOT)s.fits" % params
params['finalflagim'] = weightim
#os.system('rm ' + finalflagim)
#command = "ic -p 16 '1 %2 %1 0 == ?' " + weightim + " " + flagim + " > " + finalflagim
#utilities.run(command)
#raw_input()
command = "sex /%(path)s/%(fil_directory)s/SCIENCE/%(ROOT)s.fits -c %(PHOTCONF)s/singleastrom.conf.sex \
-FLAG_IMAGE ''\
-FLAG_TYPE MAX\
-CATALOG_NAME %(TEMPDIR)s/seeing_%(ROOT)s.cat \
-FILTER_NAME %(PHOTCONF)s/default.conv\
-CATALOG_TYPE 'ASCII' \
-DETECT_MINAREA 8 -DETECT_THRESH 8.\
-ANALYSIS_THRESH 8 \
-WEIGHT_IMAGE /%(path)s/%(fil_directory)s/WEIGHTS/%(ROOT)s.weight.fits\
-WEIGHT_TYPE MAP_WEIGHT\
-PARAMETERS_NAME %(PHOTCONF)s/singleastrom.ascii.flag.sex" % params
print command
os.system(command)
sys.exit(0)
for child in children:
os.waitpid(child,0)
command = 'cat ' + TEMPDIR + 'seeing_' + kw + '*cat > ' + TEMPDIR + 'paste_seeing_' + kw + '.cat'
utilities.run(command)
file_seeing = TEMPDIR + '/paste_seeing_' + kw + '.cat'
PIXSCALE = float(exposure['keywords']['PIXSCALE'])
reload(utilities)
print file_seeing, kw, PIXSCALE, exposure['keywords']['PIXSCALE']
fwhm = utilities.calc_seeing(file_seeing,10,PIXSCALE)
def sextract(self,exposures):
#from config_bonn import appendix, cluster, tag, arc, filter_root
import utilities
import os, re, bashreader, sys, string
from glob import glob
from copy import copy
dict = bashreader.parseFile('progs.ini')
for key in dict.keys():
os.environ[key] = str(dict[key])
TEMPDIR = '/tmp/'
PHOTCONF = './photconf/'
path='/nfs/slac/g/ki/ki05/anja/SUBARU/%(cluster)s/' % {'cluster':cluster}
print exposures.keys()
print exposures_zero.keys()
print exposures_one.keys()
print 'hey2!!!!!!'
print exposures
first = exposures[exposures.keys()[0]]
first['images'] = first['images']
exposures = {exposures.keys()[0]: first}
#exp_tmp = {}
#for exposure in exposures.keys()[2:4]:
# exp_tmp[exposure] = exposures[exposure]
#exposures = exp_tmp
#exposures = {exposures.keys()[0]: exposures[exposures.keys()[0:4]]}
print exposures
print 'stop1'
#temporary method
measure_fwhm = 1
for kw in exposures.keys(): # now go through exposure by exposure
exposure = exposures[kw]
print kw, exposure['images']
print exposure['images']
''' get the CRPIX values '''
start = 1
for image in exposure['images']:
print image
res = re.split('\_\d+',re.split('\/',image)[-1])
#print res
imroot = "/%(path)s/%(fil_directory)s/SCIENCE/" % search_params
im = imroot + res[0] + '_1' + res[1]
#print im
crpix = utilities.get_header_kw(image,['CRPIX1','CRPIX2','NAXIS1','NAXIS2'])
if start == 1:
crpixzero = copy(crpix)
crpixhigh = copy(crpix)
start = 0
from copy import copy
if float(crpix['CRPIX1']) > float(crpixzero['CRPIX1']) and float(crpix['CRPIX2']) > float(crpixzero['CRPIX2']):
crpixzero = copy(crpix)
if float(crpix['CRPIX1']) < float(crpixhigh['CRPIX1']) and float(crpix['CRPIX2']) < float(crpixhigh['CRPIX2']):
crpixhigh = copy(crpix)
print crpix, crpixzero, crpixhigh
LENGTH1 = abs(float(crpixhigh['CRPIX1']) - float(crpixzero['CRPIX1'])) + float(crpix['NAXIS1'])
LENGTH2 = abs(float(crpixhigh['CRPIX2']) - float(crpixzero['CRPIX2'])) + float(crpix['NAXIS2'])
print crpixhigh['CRPIX1'], crpixzero['CRPIX1'], crpix['NAXIS1'], crpix['NAXIS2']
print exposure['images']
'exposures'
children = []
for image in exposure['images']:
child = os.fork()
if child:
children.append(child)
else:
print fwhm
params = copy(search_params)
finalflagim = TEMPDIR + "flag_%(ROOT)s.fits" % params
weightim = "/%(path)s/%(fil_directory)s/WEIGHTS/%(ROOT)s.weight.fits" % params
#flagim = "/%(path)s/%(fil_directory)s/WEIGHTS/globalflag_%(NUM)s.fits" % params
#finalflagim = TEMPDIR + "flag_%(ROOT)s.fits" % params
params['finalflagim'] = weightim
im = "/%(path)s/%(fil_directory)s/SCIENCE/%(ROOT)s.fits" % params
crpix = utilities.get_header_kw(im,['CRPIX1','CRPIX2'])
command = "sex /%(path)s/%(fil_directory)s/SCIENCE/%(ROOT)s.fits -c %(PHOTCONF)s/phot.conf.sex \
-PARAMETERS_NAME %(PHOTCONF)s/phot.param.sex \
-CATALOG_NAME %(TEMPDIR)s/%(ROOT)s.cat \
-FILTER_NAME %(DATACONF)s/default.conv\
-FILTER Y \
-FLAG_TYPE MAX\
-FLAG_IMAGE ''\
-SEEING_FWHM %(fwhm).3f \
-DETECT_MINAREA 10 -DETECT_THRESH 10 -ANALYSIS_THRESH 10 \
-MAG_ZEROPOINT 27.0 \
-GAIN %(GAIN).3f \
-WEIGHT_IMAGE /%(path)s/%(fil_directory)s/WEIGHTS/%(ROOT)s.weight.fits\
-WEIGHT_TYPE MAP_WEIGHT" % params
#-CHECKIMAGE_TYPE BACKGROUND,APERTURES,SEGMENTATION\
#-CHECKIMAGE_NAME /%(path)s/%(fil_directory)s/PHOTOMETRY/coadd.background.fits,/%(path)s/%(fil_directory)s/PHOTOMETRY/coadd.apertures.fits,/%(path)s/%(fil_directory)s/PHOTOMETRY/coadd.segmentation.fits\
catname = "%(TEMPDIR)s/%(ROOT)s.cat" % params
filtcatname = "%(TEMPDIR)s/%(ROOT)s.filt.cat" % params
print command
utilities.run(command,[catname])
utilities.run('ldacfilter -i ' + catname + ' -o ' + filtcatname + ' -t LDAC_OBJECTS\
-c "(CLASS_STAR > 0.5);"',[filtcatname])
import commands
lines = commands.getoutput('ldactoasc -s -b -i ' + filtcatname + ' -t LDAC_OBJECTS | wc -l')
import re
res = re.split('\n',lines)
print lines
if int(res[-1]) == 0: sys.exit(0)
command = 'scamp ' + filtcatname + " -SOLVE_PHOTOM N -ASTREF_CATALOG SDSS-R6 -CHECKPLOT_TYPE NONE -WRITE_XML N "
print command
utilities.run(command)
#headfile = "%(TEMPDIR)s/%(ROOT)s.head" % params
headfile = "%(TEMPDIR)s/%(ROOT)s.filt.head" % params
hf = open(headfile,'r').readlines()
hdict = {}
for line in hf:
import re
if string.find(line,'=') != -1:
res = re.split('=',line)
name = res[0].replace(' ','')
res = re.split('/',res[1])
value = res[0].replace(' ','')
print name, value
hdict[name] = value
imfix = "/tmp/%(ROOT)s.fixwcs.fits" % params
command = "cp " + im + " " + imfix
utilities.run(command)
for name in ['CRVAL1','CRVAL2','CD1_1','CD1_2','CD2_1','CD2_2','CRPIX1','CRPIX1']:
command = 'sethead ' + imfix + ' ' + name + '=' + hdict[name]
print command
os.system(command)
command = "sex /tmp/%(ROOT)s.fixwcs.fits -c %(PHOTCONF)s/phot.conf.sex \
-PARAMETERS_NAME %(PHOTCONF)s/phot.param.sex \
-CATALOG_NAME %(TEMPDIR)s/%(ROOT)s.fixwcs.cat \
-FILTER_NAME %(DATACONF)s/default.conv\
-FILTER Y \
-FLAG_TYPE MAX\
-FLAG_IMAGE ''\
-SEEING_FWHM %(fwhm).3f \
-DETECT_MINAREA 5 -DETECT_THRESH 5 -ANALYSIS_THRESH 5 \
-MAG_ZEROPOINT 27.0 \
-GAIN %(GAIN).3f \
-WEIGHT_IMAGE /%(path)s/%(fil_directory)s/WEIGHTS/%(ROOT)s.weight.fits\
-WEIGHT_TYPE MAP_WEIGHT" % params
#-CHECKIMAGE_TYPE BACKGROUND,APERTURES,SEGMENTATION\
#-CHECKIMAGE_NAME /%(path)s/%(fil_directory)s/PHOTOMETRY/coadd.background.fits,/%(path)s/%(fil_directory)s/PHOTOMETRY/coadd.apertures.fits,/%(path)s/%(fil_directory)s/PHOTOMETRY/coadd.segmentation.fits\
catname = "%(TEMPDIR)s/%(ROOT)s.cat" % params
print command
utilities.run(command,[catname])
command = 'ldacconv -b 1 -c R -i ' + TEMPDIR + params['ROOT'] + '.fixwcs.cat -o ' + TEMPDIR + params['ROOT'] + '.conv'
print command
utilities.run(command)
# Xpos_ABS is difference of CRPIX and zero CRPIX
command = 'ldaccalc -i ' + TEMPDIR + params['ROOT'] + '.conv -o ' + TEMPDIR + params['ROOT'] + '.newpos -t OBJECTS -c "(Xpos + ' + str(float(crpixzero['CRPIX1']) - float(crpix['CRPIX1'])) + ');" -k FLOAT -n Xpos_ABS "" -c "(Ypos + ' + str(float(crpixzero['CRPIX2']) - float(crpix['CRPIX2'])) + ');" -k FLOAT -n Ypos_ABS "" -c "(Ypos*0 + ' + str(params['NUM']) + ');" -k FLOAT -n CHIP "" '
#command = 'ldaccalc -i ' + TEMPDIR + params['ROOT'] + '.conv -o ' + TEMPDIR + params['ROOT'] + '.newpos -t OBJECTS -c "(' + str(crpix['CRPIX1']) + ' - Xpos);" -k FLOAT -n Xpos_ABS "" -c "(' + str(crpix['CRPIX2']) + ' - Ypos);" -k FLOAT -n Ypos_ABS "" -c "(Ypos*0 + ' + str(params['NUM']) + ');" -k FLOAT -n CHIP "" '
print command
utilities.run(command)
sys.exit(0)
for child in children:
#print 'waiting for' child
os.waitpid(child,0)
from glob import glob
outcat = TEMPDIR + 'tmppaste_' + kw + '.cat'
newposlist = glob(TEMPDIR + kw + '*newpos')
if len(newposlist) > 1:
command = 'ldacpaste -i ' + TEMPDIR + kw + '*newpos -o ' + outcat
print command
else:
command = 'cp ' + newposlist[0] + ' ' + outcat
utilities.run(command)
os.system('ldactoasc -i ' + outcat + ' -b -s -k MAG_APER MAGERR_APER -t OBJECTS > /tmp/' + kw + 'aper')
os.system('asctoldac -i /tmp/' + kw + 'aper -o /tmp/' + kw + 'cat1 -t OBJECTS -c ./photconf/MAG_APER.conf')
outfinal = TEMPDIR + 'paste_' + kw + '.cat'
os.system('ldacjoinkey -i ' + outcat + ' -p /tmp/' + kw + 'cat1 -o ' + outfinal + ' -k MAG_APER1 MAG_APER2 MAGERR_APER1 MAGERR_APER2')
exposures[kw]['pasted_cat'] = outfinal
return exposures, LENGTH1, LENGTH2
def match_simple(self,exposures,cluster):
import os
print exposures
starcat ='/nfs/slac/g/ki/ki05/anja/SUBARU/%(cluster)s/PHOTOMETRY/sdssstar.cat' % {'cluster':cluster}
galaxycat ='/nfs/slac/g/ki/ki05/anja/SUBARU/%(cluster)s/PHOTOMETRY/sdssgalaxy.cat' % {'cluster':cluster}
path='/nfs/slac/g/ki/ki05/anja/SUBARU/%(cluster)s/' % {'cluster':cluster}
illum_path='/nfs/slac/g/ki/ki05/anja/SUBARU/ILLUMINATION/' % {'cluster':cluster}
#os.system('mkdir -p ' + path + 'PHOTOMETRY/ILLUMINATION/')
os.system('mkdir -p ' + path + 'PHOTOMETRY/ILLUMINATION/STAR/')
os.system('mkdir -p ' + path + 'PHOTOMETRY/ILLUMINATION/GALAXY/')
from glob import glob
for type,cat in [['galaxy',galaxycat],['star',starcat]]:
if len(glob(cat)) == 0:
image = exposures[exposures.keys()[0]]['images'][0]
print image
import retrieve_test
retrieve_test.run(image,cat,type)
for exposure in exposures.keys():
print exposure + 'aa'
catalog = exposures[exposure]['pasted_cat']
filter = exposures[exposure]['keywords']['filter']
ROTATION = exposures[exposure]['keywords']['ROTATION']
#GABODSID = exposures[exposure]['keywords']['GABODSID']
OBJECT = exposures[exposure]['keywords']['OBJECT']
print catalog
outcat = path + 'PHOTOMETRY/ILLUMINATION/' + type + '/' + 'matched_' + exposure + '_' + filter + '_' + ROTATION + '_' + type + '.cat'
outcat_dir = path + 'PHOTOMETRY/ILLUMINATION/' + type + '/' + ROTATION + '/' + OBJECT + '/'
os.system('mkdir -p ' + outcat_dir)
file = 'matched_' + exposure + '.cat'
linkdir = illum_path + '/' + filter + '/' + ROTATION + '/' + OBJECT + '/'
#outcatlink = linkdir + 'matched_' + exposure + '_' + cluster + '_' + GABODSID + '.cat'
outcatlink = linkdir + 'matched_' + exposure + '_' + cluster + '_' + type + '.cat'
os.system('mkdir -p ' + linkdir)
os.system('rm ' + outcat)
command = 'match_simple.sh ' + catalog + ' ' + cat + ' ' + outcat
print command
os.system(command)
exposures[exposure]['matched_cat_' + type] = outcat
os.system('rm ' + outcatlink)
command = 'ln -s ' + outcat + ' ' + outcatlink
print command
os.system(command)
return exposures
def phot(self,exposures,filter,type,LENGTH1,LENGTH2):
import utilities
info = {'B':{'filter':'g','color1':'gmr','color2':'umg','EXTCOEFF':-0.2104,'COLCOEFF':0.0},\
'W-J-B':{'filter':'g','color1':'gmr','color2':'umg','EXTCOEFF':-0.2104,'COLCOEFF':0.0},\
'W-J-V':{'filter':'g','color1':'gmr','color2':'rmi','EXTCOEFF':-0.1202,'COLCOEFF':0.0},\
'W-C-RC':{'filter':'r','color1':'rmi','color2':'gmr','EXTCOEFF':-0.0925,'COLCOEFF':0.0},\
'W-C-IC':{'filter':'i','color1':'imz','color2':'rmi','EXTCOEFF':-0.02728,'COLCOEFF':0.0},\
'W-S-Z+':{'filter':'z','color1':'imz','color2':'rmi','EXTCOEFF':0.0,'COLCOEFF':0.0}}
import mk_saturation_plot,os,re
os.environ['BONN_TARGET'] = cluster
os.environ['INSTRUMENT'] = 'SUBARU'
stars_0 = []
stars_90 = []
for exposure in exposures.keys():
ROTATION = exposures[exposure]['keywords']['ROTATION']
print ROTATION
import os
ppid = str(os.getppid())
from glob import glob
for type in ['galaxy','star']:
file = exposures[exposure]['matched_cat_' + type]
print file
if type == 'galaxy':
mag='MAG_AUTO'
magerr='MAGERR_AUTO'
if type == 'star':
mag='MAG_APER2'
magerr='MAGERR_APER2'
for filter in [filter]:
print 'filter', filter
os.environ['BONN_FILTER'] = filter
dict = info[filter]
print base + file
utilities.run('ldacfilter -i ' + base + file + ' -o /tmp/good.stars' + ppid + ' -t PSSC\
-c "(Flag!=-99);"',['/tmp/good.stars' + ppid])
#command = 'ldacfilter -i ' + base + file + ' -o /tmp/good.stars' + ppid + ' -t PSSC -c "(((SEx_IMAFLAGS_ISO=0 AND SEx_CLASS_STAR>0.0) AND (SEx_Flag=0)) AND Flag=0);"'
#print command
#raw_input()
#utilities.run(command,['/tmp/good.stars' + ppid])
#run('ldacfilter -i ' + base + file + ' -o /tmp/good.stars -t PSSC\
# -c "(SEx_CLASS_STAR>0.00);"',['/tmp/good.stars'])
utilities.run('ldacfilter -i /tmp/good.stars' + ppid + ' -o /tmp/good.colors' + ppid + ' -t PSSC\
-c "(' + dict['color1'] + '>-900) AND (' + dict['color2'] + '>-900);"',['/tmp/good.colors'])
#utilities.run('ldacfilter -i /tmp/good.stars' + ppid + ' -o /tmp/good.colors' + ppid + ' -t PSSC\
# -c "(' + dict['color1'] + '>-900) AND (' + dict['color2'] + '>-900);"',['/tmp/good.colors' + ppid])
utilities.run('ldaccalc -i /tmp/good.colors' + ppid + ' -t PSSC -c "(' + dict['filter'] + 'mag - SEx_' + mag + ');" -k FLOAT -n magdiff "" -o /tmp/all.diff.cat' + ppid ,['/tmp/all.diff.cat' + ppid] )
#utilities.run('ldactoasc -b -q -i /tmp/all.diff.cat' + ppid + ' -t PSSC -k SEx_' + mag + ' ' + dict['filter'] + 'mag SEx_FLUX_RADIUS SEx_CLASS_STAR ' + dict['filter'] + 'err ' + dict['color1'] + ' > /tmp/mk_sat' + ppid,['/tmp/mk_sat' + ppid] )
utilities.run('ldactoasc -b -q -i /tmp/all.diff.cat' + ppid + ' -t PSSC -k SEx_FLUX_MAX ' + dict['filter'] + 'mag SEx_FLUX_RADIUS SEx_CLASS_STAR ' + dict['filter'] + 'err ' + dict['color1'] + ' > /tmp/mk_sat' + ppid,['/tmp/mk_sat' + ppid] )
#utilities.run('ldactoasc -b -q -i ' + base + file + ' -t PSSC -k SEx_' + mag + ' ' + dict['filter'] + 'mag SEx_FLUX_RADIUS SEx_CLASS_STAR ' + dict['filter'] + 'err ' + dict['color1'] + ' > /tmp/mk_sat' + ppid,['/tmp/mk_sat' + ppid] )
# run('ldactoasc -b -q -i ' + base + file + ' -t PSSC -k SEx_' + mag + '_' + filter + ' ' + dict['filter'] + 'mag SEx_FLUX_RADIUS SEx_CLASS_STAR ' + dict['filter'] + 'err ' + dict['color1'] + ' > /tmp/mk_sat',['/tmp/mk_sat'] )
val = []
val = raw_input("Look at the saturation plot?")
if len(val)>0:
if val[0] == 'y' or val[0] == 'Y':
mk_saturation_plot.mk_saturation('/tmp/mk_sat' + ppid,filter)
# make stellar saturation plot
#lower_mag,upper_mag,lower_diff,upper_diff = re.split('\s+',open('box' + filter,'r').readlines()[0])
lower_mag = str(10)
upper_mag = str(14.0)
lower_diff = str(5)
upper_diff = str(9)
if type == 'star':
lower_mag = str(13.2)
#utilities.run('ldacfilter -i /tmp/all.diff.cat' + ppid + ' -t PSSC\
# -c "(((SEx_' + mag + '>' + lower_mag + ') AND (SEx_' + mag + '<' + upper_mag + ')) AND (magdiff>' + lower_diff + ')) AND (magdiff<' + upper_diff + ');"\
# -o /tmp/filt.mag.cat' + ppid ,['/tmp/filt.mag.cat' + ppid])
#utilities.run('ldacfilter -i /tmp/all.diff.cat' + ppid + ' -t PSSC\
# -c "(((' + dict['filter'] + 'mag>' + lower_mag + ') AND (' + dict['filter'] + 'mag<' + upper_mag + ')) AND (magdiff>' + lower_diff + ')) AND (magdiff<' + upper_diff + ');"\
# -o /tmp/filt.mag.cat' + ppid ,['/tmp/filt.mag.cat' + ppid])
utilities.run('ldactoasc -b -q -i /tmp/all.diff.cat' + ppid + ' -t PSSC -k SEx_Xpos_ABS SEx_Ypos_ABS > /tmp/positions' + ppid,['/tmp/positions' + ppid] )
utilities.run('ldacaddkey -i /tmp/all.diff.cat' + ppid + ' -o /tmp/filt.airmass.cat' + ppid + ' -t PSSC -k AIRMASS 0.0 FLOAT "" ',['/tmp/filt.airmass.cat' + ppid] )
utilities.run('ldactoasc -b -q -i /tmp/filt.airmass.cat' + ppid + ' -t PSSC -k SEx_' + mag + ' ' + dict['filter'] + 'mag ' + dict['color1'] + ' ' + dict['color2'] + ' AIRMASS SEx_' + magerr + ' ' + dict['filter'] + 'err SEx_Xpos_ABS SEx_Ypos_ABS > /tmp/input.asc' + ppid ,['/tmp/input.asc' + ppid] )
#utilities.run('ldactoasc -b -q -i /tmp/filt.airmass.cat -t PSSC -k SEx_' + mag + ' ' + dict['filter'] + 'mag ' + dict['color1'] + ' ' + dict['color2'] + ' AIRMASS SEx_' + magerr + ' ' + dict['filter'] + 'err SEx_Ra SEx_Dec > /tmp/input.asc',['/tmp/input.asc'] )
# fit photometry
#utilities.run("./photo_abs_new.py --input=/tmp/input.asc \
# --output=/tmp/photo_res --extinction="+str(dict['EXTCOEFF'])+" \
# --color="+str(dict['COLCOEFF'])+" --night=-1 --label="+dict['color1']+" --sigmareject=3\
# --step=STEP_1 --bandcomp="+dict['filter']+" --color1="+dict['color1']+" --color2="+dict['color2'])
import photo_abs_new
good_stars = photo_abs_new.run_through('illumination',infile='/tmp/input.asc' + ppid,output='/tmp/photo_res',extcoeff=dict['color1'],sigmareject=3,step='STEP_1',bandcomp=dict['filter'],color1which=dict['color1'],color2which=dict['color2'])
if int(ROTATION) == 0:
stars_0.append(good_stars)
if int(ROTATION) == 1:
stars_90.append(good_stars)
from copy import copy
print 'running calcDataIllum'
if len(stars_0)> 0:
dict = copy(stars_0[0])
blank_0 = {}
for key in dict.keys():
blank_0[key] = []
for i in range(len(stars_0)):
for j in range(len(stars_0[i][key])): blank_0[key].append(stars_0[i][key][j])
#print key, blank[key]
photo_abs_new.calcDataIllum('illumination',LENGTH1, LENGTH2, 1000, blank_0['corr_data'], blank_0['airmass_good'], blank_0['color1_good'], blank_0['color2_good'], blank_0['magErr_good'], blank_0['X_good'], blank_0['Y_good'],rot=0)
if len(stars_90)> 0:
dict = copy(stars_90[0])
blank_90 = {}
for key in dict.keys():
blank_90[key] = []
for i in range(len(stars_90)):
for j in range(len(stars_90[i][key])): blank_90[key].append(stars_90[i][key][j])
#print key, blank[key]
photo_abs_new.calcDataIllum('illumination',LENGTH1, LENGTH2, 1000, blank_90['corr_data'], blank_90['airmass_good'], blank_90['color1_good'], blank_90['color2_good'], blank_90['magErr_good'], blank_90['X_good'], blank_90['Y_good'],rot=0)
#photo_abs_new.calcDataIllum('illumination',1000, blank_90['corr_data'], blank_90['airmass_good'], blank_90['color1_good'], blank_90['color2_good'], blank_90['magErr_good'], blank_90['X_good'], blank_90['Y_good'],rot=1)
from config_bonn import cluster
filter = 'W-C-IC'
import pickle
#filters = ['W-J-B','W-J-V','W-C-RC','W-C-IC','W-S-Z+']
#for filter in filters:
if reopen:
f = open('/tmp/tmppickle' + cluster + filter,'r')
m = pickle.Unpickler(f)
exposures, LENGTH1, LENGTH2 = m.load()
print image.latest
if 1: images = gather_exposures(filter,cluster)
print images
''' strip down exposure list '''
for key in exposures.keys():
print exposures[key]['images']
for image in exposures:
if 1: image.find_seeing(exposures) # save seeing info?
if 1: image.sextract(exposures)
if 1: image.match_simple(exposures,cluster)
if 1: image.phot(exposures,filter,type,LENGTH1,LENGTH2)
if save:
f = open('/tmp/tmppickle' + cluster + filter,'w')
m = pickle.Pickler(f)
pickle.dump([exposures,LENGTH1,LENGTH2],m)
f.close()
| deapplegate/wtgpipeline | non_essentials/calc_test/calc_tmp.savefeb10.py | Python | mit | 31,312 | [
"Galaxy"
] | 76cf72dc03e0b29cd3e14db8567108f7cb93b7d46103338859584cc04e04297a |
"""Joint variant calling with multiple samples: aka squaring off, or backfilling.
Handles the N+1 problem of variant calling by combining and recalling samples
previously calling individually (or in smaller batches). Recalls at all positions found
variable in any of the input samples within each batch. Takes a general approach supporting
GATK's incremental joint discovery (http://www.broadinstitute.org/gatk/guide/article?id=3893)
and FreeBayes's N+1 approach (https://groups.google.com/d/msg/freebayes/-GK4zI6NsYY/Wpcp8nt_PVMJ)
as implemented in bcbio.variation.recall (https://github.com/chapmanb/bcbio.variation.recall).
"""
import collections
import math
import os
import pysam
import toolz as tz
from bcbio import broad, utils
from bcbio.bam import ref
from bcbio.distributed.split import grouped_parallel_split_combine
from bcbio.distributed.transaction import file_transaction
from bcbio.pipeline import config_utils, region
from bcbio.pipeline import datadict as dd
from bcbio.provenance import do
from bcbio.variation import bamprep, gatkjoint, genotype, multi, vcfutils
SUPPORTED = {"general": ["freebayes", "platypus", "samtools"],
"gatk": ["gatk-haplotype"],
"gvcf": ["strelka2"],
"sentieon": ["haplotyper"]}
# ## CWL joint calling targets
def batch_for_jointvc(items):
batch_groups = collections.defaultdict(list)
for data in [utils.to_single_data(x) for x in items]:
vc = dd.get_variantcaller(data)
if genotype.is_joint(data):
batches = dd.get_batches(data) or dd.get_sample_name(data)
if not isinstance(batches, (list, tuple)):
batches = [batches]
else:
batches = [dd.get_sample_name(data)]
for b in batches:
data = utils.deepish_copy(data)
data["vrn_file_gvcf"] = data["vrn_file"]
batch_groups[(b, vc)].append(data)
return batch_groups.values()
def run_jointvc(items):
items = [utils.to_single_data(x) for x in items]
data = items[0]
if not dd.get_jointcaller(data):
data["config"]["algorithm"]["jointcaller"] = "%s-joint" % dd.get_variantcaller(data)
# GenomicsDBImport uses 1-based coordinates. That's unexpected, convert over to these.
chrom, coords = data["region"].split(":")
start, end = coords.split("-")
ready_region = "%s:%s-%s" % (chrom, int(start) + 1, end)
str_region = ready_region.replace(":", "_")
out_file = os.path.join(utils.safe_makedir(os.path.join(dd.get_work_dir(data), "joint",
dd.get_variantcaller(data), str_region)),
"%s-%s-%s.vcf.gz" % (dd.get_batches(data)[0], dd.get_variantcaller(data), str_region))
joint_out = square_batch_region(data, ready_region, [], [d["vrn_file"] for d in items], out_file)[0]
data["vrn_file_region"] = joint_out["vrn_file"]
return data
def concat_batch_variantcalls_jointvc(items):
concat_out = genotype.concat_batch_variantcalls(items, region_block=False, skip_jointcheck=True)
return {"vrn_file_joint": concat_out["vrn_file"]}
def finalize_jointvc(items):
return [utils.to_single_data(x) for x in items]
def _get_callable_regions(data):
"""Retrieve regions to parallelize by from callable regions, variant regions or chromosomes
"""
import pybedtools
callable_files = data.get("callable_regions") or data.get("variant_regions")
if callable_files:
assert len(callable_files) == 1
regions = [(r.chrom, int(r.start), int(r.stop)) for r in pybedtools.BedTool(callable_files[0])]
else:
work_bam = list(tz.take(1, filter(lambda x: x.endswith(".bam"), data["work_bams"])))
if work_bam:
with pysam.Samfile(work_bam[0], "rb") as pysam_bam:
regions = [(chrom, 0, length) for (chrom, length) in zip(pysam_bam.references,
pysam_bam.lengths)]
else:
regions = [(r.name, 0, r.size) for r in
ref.file_contigs(dd.get_ref_file(data), data["config"])]
return regions
def _split_by_callable_region(data):
"""Split by callable or variant regions.
We expect joint calling to be deep in numbers of samples per region, so prefer
splitting aggressively by regions.
"""
batch = tz.get_in(("metadata", "batch"), data)
jointcaller = tz.get_in(("config", "algorithm", "jointcaller"), data)
name = batch if batch else tz.get_in(("rgnames", "sample"), data)
out_dir = utils.safe_makedir(os.path.join(data["dirs"]["work"], "joint", jointcaller, name))
utils.safe_makedir(os.path.join(out_dir, "inprep"))
parts = []
for feat in _get_callable_regions(data):
region_dir = utils.safe_makedir(os.path.join(out_dir, feat[0]))
region_prep_dir = os.path.join(region_dir, "inprep")
if not os.path.exists(region_prep_dir):
os.symlink(os.path.join(os.pardir, "inprep"), region_prep_dir)
region_outfile = os.path.join(region_dir, "%s-%s.vcf.gz" % (batch, region.to_safestr(feat)))
parts.append((feat, data["work_bams"], data["vrn_files"], region_outfile))
out_file = os.path.join(out_dir, "%s-joint.vcf.gz" % name)
return out_file, parts
def _is_jointcaller_compatible(data):
"""Match variant caller inputs to compatible joint callers.
"""
jointcaller = tz.get_in(("config", "algorithm", "jointcaller"), data)
variantcaller = tz.get_in(("config", "algorithm", "variantcaller"), data)
if isinstance(variantcaller, (list, tuple)) and len(variantcaller) == 1:
variantcaller = variantcaller[0]
return jointcaller == "%s-joint" % variantcaller or not variantcaller
def square_off(samples, run_parallel):
"""Perform joint calling at all variants within a batch.
"""
to_process = []
extras = []
for data in [utils.to_single_data(x) for x in samples]:
added = False
if tz.get_in(("metadata", "batch"), data):
for add in genotype.handle_multiple_callers(data, "jointcaller"):
if _is_jointcaller_compatible(add):
added = True
to_process.append([add])
if not added:
extras.append([data])
processed = grouped_parallel_split_combine(to_process, _split_by_callable_region,
multi.group_batches_joint, run_parallel,
"square_batch_region", "concat_variant_files",
"vrn_file", ["region", "sam_ref", "config"])
return _combine_to_jointcaller(processed) + extras
def _combine_to_jointcaller(processed):
"""Add joint calling information to variants, while collapsing independent regions.
"""
by_vrn_file = collections.OrderedDict()
for data in (x[0] for x in processed):
key = (tz.get_in(("config", "algorithm", "jointcaller"), data), data["vrn_file"])
if key not in by_vrn_file:
by_vrn_file[key] = []
by_vrn_file[key].append(data)
out = []
for grouped_data in by_vrn_file.values():
cur = grouped_data[0]
out.append([cur])
return out
def want_gvcf(items):
jointcaller = tz.get_in(("config", "algorithm", "jointcaller"), items[0])
want_gvcf = any("gvcf" in dd.get_tools_on(d) for d in items)
return jointcaller or want_gvcf
def get_callers():
return ["%s-joint" % x for x in SUPPORTED["general"]] + \
["%s-merge" % x for x in SUPPORTED["general"]] + \
["%s-joint" % x for x in SUPPORTED["gatk"]] + \
["%s-joint" % x for x in SUPPORTED["gvcf"]] + \
["%s-joint" % x for x in SUPPORTED["sentieon"]]
def square_batch_region(data, region, bam_files, vrn_files, out_file):
"""Perform squaring of a batch in a supplied region, with input BAMs
"""
from bcbio.variation import sentieon
if not utils.file_exists(out_file):
jointcaller = tz.get_in(("config", "algorithm", "jointcaller"), data)
if jointcaller in ["%s-joint" % x for x in SUPPORTED["general"]]:
_square_batch_bcbio_variation(data, region, bam_files, vrn_files, out_file, "square")
elif jointcaller in ["%s-merge" % x for x in SUPPORTED["general"]]:
_square_batch_bcbio_variation(data, region, bam_files, vrn_files, out_file, "merge")
elif jointcaller in ["%s-joint" % x for x in SUPPORTED["gatk"]]:
gatkjoint.run_region(data, region, vrn_files, out_file)
elif jointcaller in ["%s-joint" % x for x in SUPPORTED["gvcf"]]:
merge_gvcfs(data, region, vrn_files, out_file)
elif jointcaller in ["%s-joint" % x for x in SUPPORTED["sentieon"]]:
sentieon.run_gvcftyper(vrn_files, out_file, region, data)
else:
raise ValueError("Unexpected joint calling approach: %s." % jointcaller)
if region:
data["region"] = region
data = _fix_orig_vcf_refs(data)
data["vrn_file"] = out_file
return [data]
def _fix_orig_vcf_refs(data):
"""Supply references to initial variantcalls if run in addition to batching.
"""
variantcaller = tz.get_in(("config", "algorithm", "variantcaller"), data)
if variantcaller:
data["vrn_file_orig"] = data["vrn_file"]
for i, sub in enumerate(data.get("group_orig", [])):
sub_vrn = sub.pop("vrn_file", None)
if sub_vrn:
sub["vrn_file_orig"] = sub_vrn
data["group_orig"][i] = sub
return data
def _square_batch_bcbio_variation(data, region, bam_files, vrn_files, out_file,
todo="square"):
"""Run squaring or merging analysis using bcbio.variation.recall.
"""
ref_file = tz.get_in(("reference", "fasta", "base"), data)
cores = tz.get_in(("config", "algorithm", "num_cores"), data, 1)
resources = config_utils.get_resources("bcbio-variation-recall", data["config"])
# adjust memory by cores but leave room for run program memory
memcores = int(math.ceil(float(cores) / 5.0))
jvm_opts = config_utils.adjust_opts(resources.get("jvm_opts", ["-Xms250m", "-Xmx2g"]),
{"algorithm": {"memory_adjust": {"direction": "increase",
"magnitude": memcores}}})
# Write unique VCFs and BAMs to input file
input_file = "%s-inputs.txt" % os.path.splitext(out_file)[0]
with open(input_file, "w") as out_handle:
out_handle.write("\n".join(sorted(list(set(vrn_files)))) + "\n")
if todo == "square":
out_handle.write("\n".join(sorted(list(set(bam_files)))) + "\n")
variantcaller = tz.get_in(("config", "algorithm", "jointcaller"), data).replace("-joint", "")
cmd = ["bcbio-variation-recall", todo] + jvm_opts + broad.get_default_jvm_opts() + \
["-c", cores, "-r", bamprep.region_to_gatk(region)]
if todo == "square":
cmd += ["--caller", variantcaller]
cmd += [out_file, ref_file, input_file]
bcbio_env = utils.get_bcbio_env()
cmd = " ".join(str(x) for x in cmd)
do.run(cmd, "%s in region: %s" % (cmd, bamprep.region_to_gatk(region)), env=bcbio_env)
return out_file
def merge_gvcfs(data, region, vrn_files, out_file):
"""Simple merging of gVCFs with gvcftools.
merge_variants does appear to work correctly, so we remove gVCF parts
with extract_variants and then combine the merged samples together.
Longer term we plan to replace this with
agg (https://github.com/Illumina/agg) or
GLnexus (https://github.com/dnanexus-rnd/GLnexus).
"""
if not utils.file_exists(out_file):
region = bamprep.region_to_gatk(region)
vcfutils.merge_variant_files([_extract_variants_from_gvcf(f, region, out_file, data) for f in vrn_files],
out_file, dd.get_ref_file(data), data["config"], region)
return vcfutils.bgzip_and_index(out_file, data["config"])
def _extract_variants_from_gvcf(in_file, region, base_out_file, data):
"""Extract only variants from the original gVCF.
"""
out_file = os.path.join(os.path.dirname(base_out_file),
"%s-%s-varonly.vcf.gz" % (utils.splitext_plus(os.path.basename(in_file))[0],
region.replace(":", "_")))
if not utils.file_uptodate(out_file, in_file):
with file_transaction(data, out_file) as tx_out_file:
cmd = "bcftools view -r {region} {in_file} | extract_variants | bgzip -c > {tx_out_file}"
do.run(cmd.format(**locals()), "Extract variants from gVCF %s %s" % (dd.get_sample_name(data), region))
return vcfutils.bgzip_and_index(out_file, data["config"])
| biocyberman/bcbio-nextgen | bcbio/variation/joint.py | Python | mit | 12,850 | [
"pysam"
] | d4e9c02b2bf50e85c9dda9fe80bf91a2fe1ce508d605b76b5959a4d01991c185 |
#!/usr/bin/env python
# This tests vtkAMRExtractLevel
import vtk
from vtk.test import Testing
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
class TestAMRExtractLevel(Testing.vtkTest):
def testAMR(self):
filename= VTK_DATA_ROOT +"/Data/AMR/Enzo/DD0010/moving7_0010.hierarchy"
level = 1
reader = vtk.vtkAMREnzoReader()
reader.SetFileName(filename);
reader.SetMaxLevel(10);
reader.SetCellArrayStatus("TotalEnergy",1)
filter = vtk.vtkExtractLevel()
filter.AddLevel(level);
filter.SetInputConnection(reader.GetOutputPort())
filter.Update()
amr = reader.GetOutputDataObject(0)
out = filter.GetOutputDataObject(0)
self.assertEqual(out.GetNumberOfBlocks(), amr.GetNumberOfDataSets(level))
if __name__ == "__main__":
Testing.main([(TestAMRExtractLevel, 'test')])
| hlzz/dotfiles | graphics/VTK-7.0.0/Filters/AMR/Testing/Python/TestAMRExtractLevel.py | Python | bsd-3-clause | 882 | [
"VTK"
] | 88c0326b4354fd78562f698bf5caf859c259e7f9421ffa9a86c8733675a5caa2 |
import pickle
from io import BytesIO
import numpy as np
import scipy.sparse
import pytest
from sklearn.datasets import load_digits, load_iris
from sklearn.model_selection import train_test_split
from sklearn.model_selection import cross_val_score
from sklearn.utils._testing import assert_almost_equal
from sklearn.utils._testing import assert_array_equal
from sklearn.utils._testing import assert_array_almost_equal
from sklearn.utils._testing import assert_raises
from sklearn.utils._testing import assert_raise_message
from sklearn.utils._testing import assert_warns
from sklearn.utils._testing import assert_no_warnings
from sklearn.utils._testing import ignore_warnings
from sklearn.naive_bayes import GaussianNB, BernoulliNB
from sklearn.naive_bayes import MultinomialNB, ComplementNB
from sklearn.naive_bayes import CategoricalNB
# Data is just 6 separable points in the plane
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]])
y = np.array([1, 1, 1, 2, 2, 2])
# A bit more random tests
rng = np.random.RandomState(0)
X1 = rng.normal(size=(10, 3))
y1 = (rng.normal(size=(10)) > 0).astype(int)
# Data is 6 random integer points in a 100 dimensional space classified to
# three classes.
X2 = rng.randint(5, size=(6, 100))
y2 = np.array([1, 1, 2, 2, 3, 3])
def test_gnb():
# Gaussian Naive Bayes classification.
# This checks that GaussianNB implements fit and predict and returns
# correct values for a simple toy dataset.
clf = GaussianNB()
y_pred = clf.fit(X, y).predict(X)
assert_array_equal(y_pred, y)
y_pred_proba = clf.predict_proba(X)
y_pred_log_proba = clf.predict_log_proba(X)
assert_array_almost_equal(np.log(y_pred_proba), y_pred_log_proba, 8)
# Test whether label mismatch between target y and classes raises
# an Error
# FIXME Remove this test once the more general partial_fit tests are merged
assert_raises(ValueError, GaussianNB().partial_fit, X, y, classes=[0, 1])
def test_gnb_prior():
# Test whether class priors are properly set.
clf = GaussianNB().fit(X, y)
assert_array_almost_equal(np.array([3, 3]) / 6.0,
clf.class_prior_, 8)
clf.fit(X1, y1)
# Check that the class priors sum to 1
assert_array_almost_equal(clf.class_prior_.sum(), 1)
def test_gnb_sample_weight():
"""Test whether sample weights are properly used in GNB. """
# Sample weights all being 1 should not change results
sw = np.ones(6)
clf = GaussianNB().fit(X, y)
clf_sw = GaussianNB().fit(X, y, sw)
assert_array_almost_equal(clf.theta_, clf_sw.theta_)
assert_array_almost_equal(clf.sigma_, clf_sw.sigma_)
# Fitting twice with half sample-weights should result
# in same result as fitting once with full weights
sw = rng.rand(y.shape[0])
clf1 = GaussianNB().fit(X, y, sample_weight=sw)
clf2 = GaussianNB().partial_fit(X, y, classes=[1, 2], sample_weight=sw / 2)
clf2.partial_fit(X, y, sample_weight=sw / 2)
assert_array_almost_equal(clf1.theta_, clf2.theta_)
assert_array_almost_equal(clf1.sigma_, clf2.sigma_)
# Check that duplicate entries and correspondingly increased sample
# weights yield the same result
ind = rng.randint(0, X.shape[0], 20)
sample_weight = np.bincount(ind, minlength=X.shape[0])
clf_dupl = GaussianNB().fit(X[ind], y[ind])
clf_sw = GaussianNB().fit(X, y, sample_weight)
assert_array_almost_equal(clf_dupl.theta_, clf_sw.theta_)
assert_array_almost_equal(clf_dupl.sigma_, clf_sw.sigma_)
def test_gnb_neg_priors():
"""Test whether an error is raised in case of negative priors"""
clf = GaussianNB(priors=np.array([-1., 2.]))
assert_raises(ValueError, clf.fit, X, y)
def test_gnb_priors():
"""Test whether the class prior override is properly used"""
clf = GaussianNB(priors=np.array([0.3, 0.7])).fit(X, y)
assert_array_almost_equal(clf.predict_proba([[-0.1, -0.1]]),
np.array([[0.825303662161683,
0.174696337838317]]), 8)
assert_array_almost_equal(clf.class_prior_, np.array([0.3, 0.7]))
def test_gnb_priors_sum_isclose():
# test whether the class prior sum is properly tested"""
X = np.array([[-1, -1], [-2, -1], [-3, -2], [-4, -5], [-5, -4],
[1, 1], [2, 1], [3, 2], [4, 4], [5, 5]])
priors = np.array([0.08, 0.14, 0.03, 0.16, 0.11, 0.16, 0.07, 0.14,
0.11, 0.0])
Y = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10])
clf = GaussianNB(priors=priors)
# smoke test for issue #9633
clf.fit(X, Y)
def test_gnb_wrong_nb_priors():
""" Test whether an error is raised if the number of prior is different
from the number of class"""
clf = GaussianNB(priors=np.array([.25, .25, .25, .25]))
assert_raises(ValueError, clf.fit, X, y)
def test_gnb_prior_greater_one():
"""Test if an error is raised if the sum of prior greater than one"""
clf = GaussianNB(priors=np.array([2., 1.]))
assert_raises(ValueError, clf.fit, X, y)
def test_gnb_prior_large_bias():
"""Test if good prediction when class prior favor largely one class"""
clf = GaussianNB(priors=np.array([0.01, 0.99]))
clf.fit(X, y)
assert clf.predict([[-0.1, -0.1]]) == np.array([2])
def test_gnb_check_update_with_no_data():
""" Test when the partial fit is called without any data"""
# Create an empty array
prev_points = 100
mean = 0.
var = 1.
x_empty = np.empty((0, X.shape[1]))
tmean, tvar = GaussianNB._update_mean_variance(prev_points, mean,
var, x_empty)
assert tmean == mean
assert tvar == var
def test_gnb_pfit_wrong_nb_features():
"""Test whether an error is raised when the number of feature changes
between two partial fit"""
clf = GaussianNB()
# Fit for the first time the GNB
clf.fit(X, y)
# Partial fit a second time with an incoherent X
assert_raises(ValueError, clf.partial_fit, np.hstack((X, X)), y)
def test_gnb_partial_fit():
clf = GaussianNB().fit(X, y)
clf_pf = GaussianNB().partial_fit(X, y, np.unique(y))
assert_array_almost_equal(clf.theta_, clf_pf.theta_)
assert_array_almost_equal(clf.sigma_, clf_pf.sigma_)
assert_array_almost_equal(clf.class_prior_, clf_pf.class_prior_)
clf_pf2 = GaussianNB().partial_fit(X[0::2, :], y[0::2], np.unique(y))
clf_pf2.partial_fit(X[1::2], y[1::2])
assert_array_almost_equal(clf.theta_, clf_pf2.theta_)
assert_array_almost_equal(clf.sigma_, clf_pf2.sigma_)
assert_array_almost_equal(clf.class_prior_, clf_pf2.class_prior_)
def test_gnb_naive_bayes_scale_invariance():
# Scaling the data should not change the prediction results
iris = load_iris()
X, y = iris.data, iris.target
labels = [GaussianNB().fit(f * X, y).predict(f * X)
for f in [1E-10, 1, 1E10]]
assert_array_equal(labels[0], labels[1])
assert_array_equal(labels[1], labels[2])
# TODO: Remove in version 0.26
@pytest.mark.parametrize("cls", [MultinomialNB, ComplementNB, BernoulliNB,
CategoricalNB])
def test_discretenb_deprecated_coef_intercept(cls):
est = cls().fit(X2, y2)
for att in ["coef_", "intercept_"]:
with pytest.warns(FutureWarning):
hasattr(est, att)
@pytest.mark.parametrize("cls", [MultinomialNB, BernoulliNB, CategoricalNB])
def test_discretenb_prior(cls):
# Test whether class priors are properly set.
clf = cls().fit(X2, y2)
assert_array_almost_equal(np.log(np.array([2, 2, 2]) / 6.0),
clf.class_log_prior_, 8)
@pytest.mark.parametrize("cls", [MultinomialNB, BernoulliNB, CategoricalNB])
def test_discretenb_partial_fit(cls):
clf1 = cls()
clf1.fit([[0, 1], [1, 0], [1, 1]], [0, 1, 1])
clf2 = cls()
clf2.partial_fit([[0, 1], [1, 0], [1, 1]], [0, 1, 1], classes=[0, 1])
assert_array_equal(clf1.class_count_, clf2.class_count_)
if cls is CategoricalNB:
for i in range(len(clf1.category_count_)):
assert_array_equal(clf1.category_count_[i],
clf2.category_count_[i])
else:
assert_array_equal(clf1.feature_count_, clf2.feature_count_)
clf3 = cls()
# all categories have to appear in the first partial fit
clf3.partial_fit([[0, 1]], [0], classes=[0, 1])
clf3.partial_fit([[1, 0]], [1])
clf3.partial_fit([[1, 1]], [1])
assert_array_equal(clf1.class_count_, clf3.class_count_)
if cls is CategoricalNB:
# the categories for each feature of CategoricalNB are mapped to an
# index chronologically with each call of partial fit and therefore
# the category_count matrices cannot be compared for equality
for i in range(len(clf1.category_count_)):
assert_array_equal(clf1.category_count_[i].shape,
clf3.category_count_[i].shape)
assert_array_equal(np.sum(clf1.category_count_[i], axis=1),
np.sum(clf3.category_count_[i], axis=1))
# assert category 0 occurs 1x in the first class and 0x in the 2nd
# class
assert_array_equal(clf1.category_count_[0][0], np.array([1, 0]))
# assert category 1 occurs 0x in the first class and 2x in the 2nd
# class
assert_array_equal(clf1.category_count_[0][1], np.array([0, 2]))
# assert category 0 occurs 0x in the first class and 1x in the 2nd
# class
assert_array_equal(clf1.category_count_[1][0], np.array([0, 1]))
# assert category 1 occurs 1x in the first class and 1x in the 2nd
# class
assert_array_equal(clf1.category_count_[1][1], np.array([1, 1]))
else:
assert_array_equal(clf1.feature_count_, clf3.feature_count_)
@pytest.mark.parametrize('cls', [BernoulliNB, MultinomialNB, GaussianNB,
CategoricalNB])
def test_discretenb_pickle(cls):
# Test picklability of discrete naive Bayes classifiers
clf = cls().fit(X2, y2)
y_pred = clf.predict(X2)
store = BytesIO()
pickle.dump(clf, store)
clf = pickle.load(BytesIO(store.getvalue()))
assert_array_equal(y_pred, clf.predict(X2))
# Test pickling of estimator trained with partial_fit
clf2 = cls().partial_fit(X2[:3], y2[:3], classes=np.unique(y2))
clf2.partial_fit(X2[3:], y2[3:])
store = BytesIO()
pickle.dump(clf2, store)
clf2 = pickle.load(BytesIO(store.getvalue()))
assert_array_equal(y_pred, clf2.predict(X2))
@pytest.mark.parametrize('cls', [BernoulliNB, MultinomialNB, GaussianNB,
CategoricalNB])
def test_discretenb_input_check_fit(cls):
# Test input checks for the fit method
# check shape consistency for number of samples at fit time
assert_raises(ValueError, cls().fit, X2, y2[:-1])
# check shape consistency for number of input features at predict time
clf = cls().fit(X2, y2)
assert_raises(ValueError, clf.predict, X2[:, :-1])
@pytest.mark.parametrize('cls', [BernoulliNB, MultinomialNB, CategoricalNB])
def test_discretenb_input_check_partial_fit(cls):
# check shape consistency
assert_raises(ValueError, cls().partial_fit, X2, y2[:-1],
classes=np.unique(y2))
# classes is required for first call to partial fit
assert_raises(ValueError, cls().partial_fit, X2, y2)
# check consistency of consecutive classes values
clf = cls()
clf.partial_fit(X2, y2, classes=np.unique(y2))
assert_raises(ValueError, clf.partial_fit, X2, y2,
classes=np.arange(42))
# check consistency of input shape for partial_fit
assert_raises(ValueError, clf.partial_fit, X2[:, :-1], y2)
# check consistency of input shape for predict
assert_raises(ValueError, clf.predict, X2[:, :-1])
# TODO: Remove in version 0.26
@ignore_warnings(category=FutureWarning)
def test_discretenb_predict_proba():
# Test discrete NB classes' probability scores
# The 100s below distinguish Bernoulli from multinomial.
# FIXME: write a test to show this.
X_bernoulli = [[1, 100, 0], [0, 1, 0], [0, 100, 1]]
X_multinomial = [[0, 1], [1, 3], [4, 0]]
# test binary case (1-d output)
y = [0, 0, 2] # 2 is regression test for binary case, 02e673
for cls, X in zip([BernoulliNB, MultinomialNB],
[X_bernoulli, X_multinomial]):
clf = cls().fit(X, y)
assert clf.predict(X[-1:]) == 2
assert clf.predict_proba([X[0]]).shape == (1, 2)
assert_array_almost_equal(clf.predict_proba(X[:2]).sum(axis=1),
np.array([1., 1.]), 6)
# test multiclass case (2-d output, must sum to one)
y = [0, 1, 2]
for cls, X in zip([BernoulliNB, MultinomialNB],
[X_bernoulli, X_multinomial]):
clf = cls().fit(X, y)
assert clf.predict_proba(X[0:1]).shape == (1, 3)
assert clf.predict_proba(X[:2]).shape == (2, 3)
assert_almost_equal(np.sum(clf.predict_proba([X[1]])), 1)
assert_almost_equal(np.sum(clf.predict_proba([X[-1]])), 1)
assert_almost_equal(np.sum(np.exp(clf.class_log_prior_)), 1)
assert_almost_equal(np.sum(np.exp(clf.intercept_)), 1)
@pytest.mark.parametrize('cls', [BernoulliNB, MultinomialNB, CategoricalNB])
def test_discretenb_uniform_prior(cls):
# Test whether discrete NB classes fit a uniform prior
# when fit_prior=False and class_prior=None
clf = cls()
clf.set_params(fit_prior=False)
clf.fit([[0], [0], [1]], [0, 0, 1])
prior = np.exp(clf.class_log_prior_)
assert_array_almost_equal(prior, np.array([.5, .5]))
@pytest.mark.parametrize('cls', [BernoulliNB, MultinomialNB, CategoricalNB])
def test_discretenb_provide_prior(cls):
# Test whether discrete NB classes use provided prior
clf = cls(class_prior=[0.5, 0.5])
clf.fit([[0], [0], [1]], [0, 0, 1])
prior = np.exp(clf.class_log_prior_)
assert_array_almost_equal(prior, np.array([.5, .5]))
# Inconsistent number of classes with prior
assert_raises(ValueError, clf.fit, [[0], [1], [2]], [0, 1, 2])
assert_raises(ValueError, clf.partial_fit, [[0], [1]], [0, 1],
classes=[0, 1, 1])
@pytest.mark.parametrize('cls', [BernoulliNB, MultinomialNB, CategoricalNB])
def test_discretenb_provide_prior_with_partial_fit(cls):
# Test whether discrete NB classes use provided prior
# when using partial_fit
iris = load_iris()
iris_data1, iris_data2, iris_target1, iris_target2 = train_test_split(
iris.data, iris.target, test_size=0.4, random_state=415)
for prior in [None, [0.3, 0.3, 0.4]]:
clf_full = cls(class_prior=prior)
clf_full.fit(iris.data, iris.target)
clf_partial = cls(class_prior=prior)
clf_partial.partial_fit(iris_data1, iris_target1,
classes=[0, 1, 2])
clf_partial.partial_fit(iris_data2, iris_target2)
assert_array_almost_equal(clf_full.class_log_prior_,
clf_partial.class_log_prior_)
@pytest.mark.parametrize('cls', [BernoulliNB, MultinomialNB, CategoricalNB])
def test_discretenb_sample_weight_multiclass(cls):
# check shape consistency for number of samples at fit time
X = [
[0, 0, 1],
[0, 1, 1],
[0, 1, 1],
[1, 0, 0],
]
y = [0, 0, 1, 2]
sample_weight = np.array([1, 1, 2, 2], dtype=np.float64)
sample_weight /= sample_weight.sum()
clf = cls().fit(X, y, sample_weight=sample_weight)
assert_array_equal(clf.predict(X), [0, 1, 1, 2])
# Check sample weight using the partial_fit method
clf = cls()
clf.partial_fit(X[:2], y[:2], classes=[0, 1, 2],
sample_weight=sample_weight[:2])
clf.partial_fit(X[2:3], y[2:3], sample_weight=sample_weight[2:3])
clf.partial_fit(X[3:], y[3:], sample_weight=sample_weight[3:])
assert_array_equal(clf.predict(X), [0, 1, 1, 2])
# TODO: Remove in version 0.26
@ignore_warnings(category=FutureWarning)
@pytest.mark.parametrize('cls', [BernoulliNB, MultinomialNB])
def test_discretenb_coef_intercept_shape(cls):
# coef_ and intercept_ should have shapes as in other linear models.
# Non-regression test for issue #2127.
X = [[1, 0, 0], [1, 1, 1]]
y = [1, 2] # binary classification
clf = cls()
clf.fit(X, y)
assert clf.coef_.shape == (1, 3)
assert clf.intercept_.shape == (1,)
@pytest.mark.parametrize('kind', ('dense', 'sparse'))
def test_mnnb(kind):
# Test Multinomial Naive Bayes classification.
# This checks that MultinomialNB implements fit and predict and returns
# correct values for a simple toy dataset.
if kind == 'dense':
X = X2
elif kind == 'sparse':
X = scipy.sparse.csr_matrix(X2)
# Check the ability to predict the learning set.
clf = MultinomialNB()
assert_raises(ValueError, clf.fit, -X, y2)
y_pred = clf.fit(X, y2).predict(X)
assert_array_equal(y_pred, y2)
# Verify that np.log(clf.predict_proba(X)) gives the same results as
# clf.predict_log_proba(X)
y_pred_proba = clf.predict_proba(X)
y_pred_log_proba = clf.predict_log_proba(X)
assert_array_almost_equal(np.log(y_pred_proba), y_pred_log_proba, 8)
# Check that incremental fitting yields the same results
clf2 = MultinomialNB()
clf2.partial_fit(X[:2], y2[:2], classes=np.unique(y2))
clf2.partial_fit(X[2:5], y2[2:5])
clf2.partial_fit(X[5:], y2[5:])
y_pred2 = clf2.predict(X)
assert_array_equal(y_pred2, y2)
y_pred_proba2 = clf2.predict_proba(X)
y_pred_log_proba2 = clf2.predict_log_proba(X)
assert_array_almost_equal(np.log(y_pred_proba2), y_pred_log_proba2, 8)
assert_array_almost_equal(y_pred_proba2, y_pred_proba)
assert_array_almost_equal(y_pred_log_proba2, y_pred_log_proba)
# Partial fit on the whole data at once should be the same as fit too
clf3 = MultinomialNB()
clf3.partial_fit(X, y2, classes=np.unique(y2))
y_pred3 = clf3.predict(X)
assert_array_equal(y_pred3, y2)
y_pred_proba3 = clf3.predict_proba(X)
y_pred_log_proba3 = clf3.predict_log_proba(X)
assert_array_almost_equal(np.log(y_pred_proba3), y_pred_log_proba3, 8)
assert_array_almost_equal(y_pred_proba3, y_pred_proba)
assert_array_almost_equal(y_pred_log_proba3, y_pred_log_proba)
def test_mnb_prior_unobserved_targets():
# test smoothing of prior for yet unobserved targets
# Create toy training data
X = np.array([[0, 1], [1, 0]])
y = np.array([0, 1])
clf = MultinomialNB()
assert_no_warnings(
clf.partial_fit, X, y, classes=[0, 1, 2]
)
assert clf.predict([[0, 1]]) == 0
assert clf.predict([[1, 0]]) == 1
assert clf.predict([[1, 1]]) == 0
# add a training example with previously unobserved class
assert_no_warnings(
clf.partial_fit, [[1, 1]], [2]
)
assert clf.predict([[0, 1]]) == 0
assert clf.predict([[1, 0]]) == 1
assert clf.predict([[1, 1]]) == 2
# TODO: Remove in version 0.26
@ignore_warnings(category=FutureWarning)
def test_mnb_sample_weight():
clf = MultinomialNB()
clf.fit([[1, 2], [1, 2], [1, 0]],
[0, 0, 1],
sample_weight=[1, 1, 4])
assert_array_equal(clf.predict([[1, 0]]), [1])
positive_prior = np.exp(clf.intercept_[0])
assert_array_almost_equal([1 - positive_prior, positive_prior],
[1 / 3., 2 / 3.])
def test_bnb():
# Tests that BernoulliNB when alpha=1.0 gives the same values as
# those given for the toy example in Manning, Raghavan, and
# Schuetze's "Introduction to Information Retrieval" book:
# https://nlp.stanford.edu/IR-book/html/htmledition/the-bernoulli-model-1.html
# Training data points are:
# Chinese Beijing Chinese (class: China)
# Chinese Chinese Shanghai (class: China)
# Chinese Macao (class: China)
# Tokyo Japan Chinese (class: Japan)
# Features are Beijing, Chinese, Japan, Macao, Shanghai, and Tokyo
X = np.array([[1, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 1, 0, 1, 0, 0],
[0, 1, 1, 0, 0, 1]])
# Classes are China (0), Japan (1)
Y = np.array([0, 0, 0, 1])
# Fit BernoulliBN w/ alpha = 1.0
clf = BernoulliNB(alpha=1.0)
clf.fit(X, Y)
# Check the class prior is correct
class_prior = np.array([0.75, 0.25])
assert_array_almost_equal(np.exp(clf.class_log_prior_), class_prior)
# Check the feature probabilities are correct
feature_prob = np.array([[0.4, 0.8, 0.2, 0.4, 0.4, 0.2],
[1 / 3.0, 2 / 3.0, 2 / 3.0, 1 / 3.0, 1 / 3.0,
2 / 3.0]])
assert_array_almost_equal(np.exp(clf.feature_log_prob_), feature_prob)
# Testing data point is:
# Chinese Chinese Chinese Tokyo Japan
X_test = np.array([[0, 1, 1, 0, 0, 1]])
# Check the predictive probabilities are correct
unnorm_predict_proba = np.array([[0.005183999999999999,
0.02194787379972565]])
predict_proba = unnorm_predict_proba / np.sum(unnorm_predict_proba)
assert_array_almost_equal(clf.predict_proba(X_test), predict_proba)
def test_bnb_feature_log_prob():
# Test for issue #4268.
# Tests that the feature log prob value computed by BernoulliNB when
# alpha=1.0 is equal to the expression given in Manning, Raghavan,
# and Schuetze's "Introduction to Information Retrieval" book:
# http://nlp.stanford.edu/IR-book/html/htmledition/the-bernoulli-model-1.html
X = np.array([[0, 0, 0], [1, 1, 0], [0, 1, 0], [1, 0, 1], [0, 1, 0]])
Y = np.array([0, 0, 1, 2, 2])
# Fit Bernoulli NB w/ alpha = 1.0
clf = BernoulliNB(alpha=1.0)
clf.fit(X, Y)
# Manually form the (log) numerator and denominator that
# constitute P(feature presence | class)
num = np.log(clf.feature_count_ + 1.0)
denom = np.tile(np.log(clf.class_count_ + 2.0), (X.shape[1], 1)).T
# Check manual estimate matches
assert_array_almost_equal(clf.feature_log_prob_, (num - denom))
def test_cnb():
# Tests ComplementNB when alpha=1.0 for the toy example in Manning,
# Raghavan, and Schuetze's "Introduction to Information Retrieval" book:
# https://nlp.stanford.edu/IR-book/html/htmledition/the-bernoulli-model-1.html
# Training data points are:
# Chinese Beijing Chinese (class: China)
# Chinese Chinese Shanghai (class: China)
# Chinese Macao (class: China)
# Tokyo Japan Chinese (class: Japan)
# Features are Beijing, Chinese, Japan, Macao, Shanghai, and Tokyo.
X = np.array([[1, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 1, 0, 1, 0, 0],
[0, 1, 1, 0, 0, 1]])
# Classes are China (0), Japan (1).
Y = np.array([0, 0, 0, 1])
# Check that weights are correct. See steps 4-6 in Table 4 of
# Rennie et al. (2003).
theta = np.array([
[
(0 + 1) / (3 + 6),
(1 + 1) / (3 + 6),
(1 + 1) / (3 + 6),
(0 + 1) / (3 + 6),
(0 + 1) / (3 + 6),
(1 + 1) / (3 + 6)
],
[
(1 + 1) / (6 + 6),
(3 + 1) / (6 + 6),
(0 + 1) / (6 + 6),
(1 + 1) / (6 + 6),
(1 + 1) / (6 + 6),
(0 + 1) / (6 + 6)
]])
weights = np.zeros(theta.shape)
normed_weights = np.zeros(theta.shape)
for i in range(2):
weights[i] = -np.log(theta[i])
normed_weights[i] = weights[i] / weights[i].sum()
# Verify inputs are nonnegative.
clf = ComplementNB(alpha=1.0)
assert_raises(ValueError, clf.fit, -X, Y)
clf.fit(X, Y)
# Check that counts/weights are correct.
feature_count = np.array([[1, 3, 0, 1, 1, 0], [0, 1, 1, 0, 0, 1]])
assert_array_equal(clf.feature_count_, feature_count)
class_count = np.array([3, 1])
assert_array_equal(clf.class_count_, class_count)
feature_all = np.array([1, 4, 1, 1, 1, 1])
assert_array_equal(clf.feature_all_, feature_all)
assert_array_almost_equal(clf.feature_log_prob_, weights)
clf = ComplementNB(alpha=1.0, norm=True)
clf.fit(X, Y)
assert_array_almost_equal(clf.feature_log_prob_, normed_weights)
def test_categoricalnb():
# Check the ability to predict the training set.
clf = CategoricalNB()
y_pred = clf.fit(X2, y2).predict(X2)
assert_array_equal(y_pred, y2)
X3 = np.array([[1, 4], [2, 5]])
y3 = np.array([1, 2])
clf = CategoricalNB(alpha=1, fit_prior=False)
clf.fit(X3, y3)
assert_array_equal(clf.n_categories_, np.array([3, 6]))
# Check error is raised for X with negative entries
X = np.array([[0, -1]])
y = np.array([1])
error_msg = "Negative values in data passed to CategoricalNB (input X)"
assert_raise_message(ValueError, error_msg, clf.predict, X)
assert_raise_message(ValueError, error_msg, clf.fit, X, y)
# Check error is raised for incorrect X
X = np.array([[1, 4, 1], [2, 5, 6]])
msg = "Expected input with 2 features, got 3 instead"
assert_raise_message(ValueError, msg, clf.predict, X)
# Test alpha
X3_test = np.array([[2, 5]])
# alpha=1 increases the count of all categories by one so the final
# probability for each category is not 50/50 but 1/3 to 2/3
bayes_numerator = np.array([[1/3*1/3, 2/3*2/3]])
bayes_denominator = bayes_numerator.sum()
assert_array_almost_equal(clf.predict_proba(X3_test),
bayes_numerator / bayes_denominator)
# Assert category_count has counted all features
assert len(clf.category_count_) == X3.shape[1]
# Check sample_weight
X = np.array([[0, 0], [0, 1], [0, 0], [1, 1]])
y = np.array([1, 1, 2, 2])
clf = CategoricalNB(alpha=1, fit_prior=False)
clf.fit(X, y)
assert_array_equal(clf.predict(np.array([[0, 0]])), np.array([1]))
assert_array_equal(clf.n_categories_, np.array([2, 2]))
for factor in [1., 0.3, 5, 0.0001]:
X = np.array([[0, 0], [0, 1], [0, 0], [1, 1]])
y = np.array([1, 1, 2, 2])
sample_weight = np.array([1, 1, 10, 0.1]) * factor
clf = CategoricalNB(alpha=1, fit_prior=False)
clf.fit(X, y, sample_weight=sample_weight)
assert_array_equal(clf.predict(np.array([[0, 0]])), np.array([2]))
assert_array_equal(clf.n_categories_, np.array([2, 2]))
@pytest.mark.parametrize(
"min_categories, exp_X1_count, exp_X2_count, new_X, exp_n_categories_",
[
# check min_categories with int > observed categories
(3, np.array([[2, 0, 0], [1, 1, 0]]), np.array([[1, 1, 0], [1, 1, 0]]),
np.array([[0, 2]]), np.array([3, 3]),
),
# check with list input
([3, 4], np.array([[2, 0, 0], [1, 1, 0]]),
np.array([[1, 1, 0, 0], [1, 1, 0, 0]]), np.array([[0, 3]]),
np.array([3, 4]),
),
# check min_categories with min less than actual
([1, np.array([[2, 0], [1, 1]]), np.array([[1, 1], [1, 1]]),
np.array([[0, 1]]), np.array([2, 2])]
),
]
)
def test_categoricalnb_with_min_categories(min_categories, exp_X1_count,
exp_X2_count, new_X,
exp_n_categories_):
X_n_categories = np.array([[0, 0], [0, 1], [0, 0], [1, 1]])
y_n_categories = np.array([1, 1, 2, 2])
expected_prediction = np.array([1])
clf = CategoricalNB(alpha=1, fit_prior=False,
min_categories=min_categories)
clf.fit(X_n_categories, y_n_categories)
X1_count, X2_count = clf.category_count_
assert_array_equal(X1_count, exp_X1_count)
assert_array_equal(X2_count, exp_X2_count)
predictions = clf.predict(new_X)
assert_array_equal(predictions, expected_prediction)
assert_array_equal(clf.n_categories_, exp_n_categories_)
@pytest.mark.parametrize(
"min_categories, error_msg",
[
('bad_arg', "'min_categories' should have integral"),
([[3, 2], [2, 4]], "'min_categories' should have shape"),
(1., "'min_categories' should have integral"),
]
)
def test_categoricalnb_min_categories_errors(min_categories, error_msg):
X = np.array([[0, 0], [0, 1], [0, 0], [1, 1]])
y = np.array([1, 1, 2, 2])
clf = CategoricalNB(alpha=1, fit_prior=False,
min_categories=min_categories)
with pytest.raises(ValueError, match=error_msg):
clf.fit(X, y)
def test_alpha():
# Setting alpha=0 should not output nan results when p(x_i|y_j)=0 is a case
X = np.array([[1, 0], [1, 1]])
y = np.array([0, 1])
nb = BernoulliNB(alpha=0.)
assert_warns(UserWarning, nb.partial_fit, X, y, classes=[0, 1])
assert_warns(UserWarning, nb.fit, X, y)
prob = np.array([[1, 0], [0, 1]])
assert_array_almost_equal(nb.predict_proba(X), prob)
nb = MultinomialNB(alpha=0.)
assert_warns(UserWarning, nb.partial_fit, X, y, classes=[0, 1])
assert_warns(UserWarning, nb.fit, X, y)
prob = np.array([[2. / 3, 1. / 3], [0, 1]])
assert_array_almost_equal(nb.predict_proba(X), prob)
nb = CategoricalNB(alpha=0.)
assert_warns(UserWarning, nb.fit, X, y)
prob = np.array([[1., 0.], [0., 1.]])
assert_array_almost_equal(nb.predict_proba(X), prob)
# Test sparse X
X = scipy.sparse.csr_matrix(X)
nb = BernoulliNB(alpha=0.)
assert_warns(UserWarning, nb.fit, X, y)
prob = np.array([[1, 0], [0, 1]])
assert_array_almost_equal(nb.predict_proba(X), prob)
nb = MultinomialNB(alpha=0.)
assert_warns(UserWarning, nb.fit, X, y)
prob = np.array([[2. / 3, 1. / 3], [0, 1]])
assert_array_almost_equal(nb.predict_proba(X), prob)
# Test for alpha < 0
X = np.array([[1, 0], [1, 1]])
y = np.array([0, 1])
expected_msg = ('Smoothing parameter alpha = -1.0e-01. '
'alpha should be > 0.')
b_nb = BernoulliNB(alpha=-0.1)
m_nb = MultinomialNB(alpha=-0.1)
c_nb = CategoricalNB(alpha=-0.1)
assert_raise_message(ValueError, expected_msg, b_nb.fit, X, y)
assert_raise_message(ValueError, expected_msg, m_nb.fit, X, y)
assert_raise_message(ValueError, expected_msg, c_nb.fit, X, y)
b_nb = BernoulliNB(alpha=-0.1)
m_nb = MultinomialNB(alpha=-0.1)
assert_raise_message(ValueError, expected_msg, b_nb.partial_fit,
X, y, classes=[0, 1])
assert_raise_message(ValueError, expected_msg, m_nb.partial_fit,
X, y, classes=[0, 1])
def test_alpha_vector():
X = np.array([[1, 0], [1, 1]])
y = np.array([0, 1])
# Setting alpha=np.array with same length
# as number of features should be fine
alpha = np.array([1, 2])
nb = MultinomialNB(alpha=alpha)
nb.partial_fit(X, y, classes=[0, 1])
# Test feature probabilities uses pseudo-counts (alpha)
feature_prob = np.array([[1 / 2, 1 / 2], [2 / 5, 3 / 5]])
assert_array_almost_equal(nb.feature_log_prob_, np.log(feature_prob))
# Test predictions
prob = np.array([[5 / 9, 4 / 9], [25 / 49, 24 / 49]])
assert_array_almost_equal(nb.predict_proba(X), prob)
# Test alpha non-negative
alpha = np.array([1., -0.1])
expected_msg = ('Smoothing parameter alpha = -1.0e-01. '
'alpha should be > 0.')
m_nb = MultinomialNB(alpha=alpha)
assert_raise_message(ValueError, expected_msg, m_nb.fit, X, y)
# Test that too small pseudo-counts are replaced
ALPHA_MIN = 1e-10
alpha = np.array([ALPHA_MIN / 2, 0.5])
m_nb = MultinomialNB(alpha=alpha)
m_nb.partial_fit(X, y, classes=[0, 1])
assert_array_almost_equal(m_nb._check_alpha(),
[ALPHA_MIN, 0.5],
decimal=12)
# Test correct dimensions
alpha = np.array([1., 2., 3.])
m_nb = MultinomialNB(alpha=alpha)
expected_msg = ('alpha should be a scalar or a numpy array '
'with shape [n_features]')
assert_raise_message(ValueError, expected_msg, m_nb.fit, X, y)
def test_check_accuracy_on_digits():
# Non regression test to make sure that any further refactoring / optim
# of the NB models do not harm the performance on a slightly non-linearly
# separable dataset
X, y = load_digits(return_X_y=True)
binary_3v8 = np.logical_or(y == 3, y == 8)
X_3v8, y_3v8 = X[binary_3v8], y[binary_3v8]
# Multinomial NB
scores = cross_val_score(MultinomialNB(alpha=10), X, y, cv=10)
assert scores.mean() > 0.86
scores = cross_val_score(MultinomialNB(alpha=10), X_3v8, y_3v8, cv=10)
assert scores.mean() > 0.94
# Bernoulli NB
scores = cross_val_score(BernoulliNB(alpha=10), X > 4, y, cv=10)
assert scores.mean() > 0.83
scores = cross_val_score(BernoulliNB(alpha=10), X_3v8 > 4, y_3v8, cv=10)
assert scores.mean() > 0.92
# Gaussian NB
scores = cross_val_score(GaussianNB(), X, y, cv=10)
assert scores.mean() > 0.77
scores = cross_val_score(GaussianNB(var_smoothing=0.1), X, y, cv=10)
assert scores.mean() > 0.89
scores = cross_val_score(GaussianNB(), X_3v8, y_3v8, cv=10)
assert scores.mean() > 0.86
| ndingwall/scikit-learn | sklearn/tests/test_naive_bayes.py | Python | bsd-3-clause | 33,293 | [
"Gaussian"
] | 97ee8b5d5f69a6c687330a3c476ebb33341fc7aa305e2507d5b5a06c43e0ee1d |
# -*- coding: ISO-8859-15 -*-
# =============================================================================
# Copyright (c) 2004, 2006 Sean C. Gillies
# Copyright (c) 2007 STFC <http://www.stfc.ac.uk>
#
# Authors :
# Dominic Lowe <d.lowe@rl.ac.uk>
#
# Contact email: d.lowe@rl.ac.uk
# =============================================================================
from __future__ import (absolute_import, division, print_function)
from bcube_owslib.coverage.wcsBase import WCSBase, WCSCapabilitiesReader, ServiceException
from urllib import urlencode
from bcube_owslib.util import openURL, testXMLValue
from bcube_owslib.crs import Crs
import logging
from bcube_owslib.util import log
# function to save writing out WCS namespace in full each time
def ns(tag):
return '{http://www.opengis.net/wcs}' + tag
class WebCoverageService_1_0_0(WCSBase):
"""Abstraction for OGC Web Coverage Service (WCS), version 1.0.0
Implements IWebCoverageService.
"""
def __getitem__(self, name):
''' check contents dictionary to allow dict like access to service layers'''
if name in self.__getattribute__('contents').keys():
return self.__getattribute__('contents')[name]
else:
raise KeyError("No content named %s" % name)
def __init__(self, url, xml, cookies):
self.version = '1.0.0'
self.url = url
self.cookies = cookies
# initialize from saved capability document or access the server
reader = WCSCapabilitiesReader(self.version, self.cookies)
if xml:
self._capabilities = reader.readString(xml)
else:
self._capabilities = reader.read(self.url)
# check for exceptions
se = self._capabilities.find('ServiceException')
if se is not None:
err_message = str(se.text).strip()
raise ServiceException(err_message, xml)
# serviceIdentification metadata
subelem = self._capabilities.find(ns('Service'))
self.identification = ServiceIdentification(subelem)
# serviceProvider metadata
subelem = self._capabilities.find(ns('Service/') + ns('responsibleParty'))
self.provider = ServiceProvider(subelem)
# serviceOperations metadata
self.operations = []
for elem in self._capabilities.find(ns('Capability/') + ns('Request'))[:]:
self.operations.append(OperationMetadata(elem))
# serviceContents metadata
self.contents = {}
for elem in self._capabilities.findall(
ns('ContentMetadata/') + ns('CoverageOfferingBrief')
):
cm = ContentMetadata(elem, self)
self.contents[cm.id] = cm
# Some WCS servers (wrongly) advertise 'Content' OfferingBrief instead.
if self.contents == {}:
for elem in self._capabilities.findall(
ns('ContentMetadata/') + ns('ContentOfferingBrief')
):
cm = ContentMetadata(elem, self)
self.contents[cm.id] = cm
# exceptions
self.exceptions = [f.text for f
in self._capabilities.findall('Capability/Exception/Format')]
def items(self):
'''supports dict-like items() access'''
items = []
for item in self.contents:
items.append((item, self.contents[item]))
return items
def __makeString(self, value):
# using repr unconditionally breaks things in some
# circumstances if a value is already a string
if type(value) is not str:
sval = repr(value)
else:
sval = value
return sval
def getCoverage(self, identifier=None, bbox=None, time=None,
format=None, crs=None, width=None, height=None, resx=None, resy=None,
resz=None, parameter=None, method='Get', **kwargs):
"""Request and return a coverage from the WCS as a file-like object
note: additional **kwargs helps with multi-version implementation
core keyword arguments should be supported cross version
example:
cvg=wcs.getCoverage(identifier=['TuMYrRQ4'], timeSequence=['2792-06-01T00:00:00.0'],
bbox=(-112,36,-106,41),format='cf-netcdf')
is equivalent to:
http://myhost/mywcs?SERVICE=WCS&REQUEST=GetCoverage&IDENTIFIER=TuMYrRQ4&VERSION=1.1.0&BOUNDINGBOX=-180,-90,180,90&TIME=2792-06-01T00:00:00.0&FORMAT=cf-netcdf
"""
if log.isEnabledFor(logging.DEBUG):
log.debug('''WCS 1.0.0 DEBUG: Parameters passed to GetCoverage: identifier=%s, bbox=%s,
time=%s, format=%s, crs=%s, width=%s, height=%s, resx=%s, resy=%s, resz=%s,
parameter=%s, method=%s, other_arguments=%s''' % (
identifier,
bbox,
time,
format,
crs,
width,
height,
resx,
resy,
resz,
parameter,
method,
str(kwargs)
)
)
try:
base_url = next(
(
m.get('url') for m in self.getOperationByName('GetCoverage').methods
if m.get('type').lower() == method.lower()
)
)
except StopIteration:
base_url = self.url
if log.isEnabledFor(logging.DEBUG):
log.debug('WCS 1.0.0 DEBUG: base url of server: %s' % base_url)
# process kwargs
request = {'version': self.version, 'request': 'GetCoverage', 'service': 'WCS'}
assert len(identifier) > 0
request['Coverage'] = identifier
# request['identifier'] = ','.join(identifier)
if bbox:
request['BBox'] = ','.join([self.__makeString(x) for x in bbox])
else:
request['BBox'] = None
if time:
request['time'] = ','.join(time)
if crs:
request['crs'] = crs
request['format'] = format
if width:
request['width'] = width
if height:
request['height'] = height
if resx:
request['resx'] = resx
if resy:
request['resy'] = resy
if resz:
request['resz'] = resz
# anything else e.g. vendor specific parameters must go through kwargs
if kwargs:
for kw in kwargs:
request[kw] = kwargs[kw]
# encode and request
data = urlencode(request)
if log.isEnabledFor(logging.DEBUG):
log.debug('WCS 1.0.0 DEBUG: Second part of URL: %s' % data)
u = openURL(base_url, data, method, self.cookies)
return u
def getOperationByName(self, name):
"""Return a named operation item."""
for item in self.operations:
if item.name == name:
return item
raise KeyError("No operation named %s" % name)
class OperationMetadata(object):
"""Abstraction for WCS metadata.
Implements IMetadata.
"""
def __init__(self, elem):
"""."""
self.name = elem.tag.split('}')[1]
# self.formatOptions = [f.text for f in elem.findall('{http://www.opengis.net/wcs/1.1/ows}
# Parameter/{http://www.opengis.net/wcs/1.1/ows}AllowedValues/{http://www.opengis.net/wcs/1.1/ows}Value')]
self.methods = []
for resource in elem.findall(
ns('DCPType/') + ns('HTTP/') + ns('Get/') + ns('OnlineResource')):
url = resource.attrib['{http://www.w3.org/1999/xlink}href']
self.methods.append({'type': 'Get', 'url': url})
for resource in elem.findall(
ns('DCPType/') + ns('HTTP/') + ns('Post/') + ns('OnlineResource')):
url = resource.attrib['{http://www.w3.org/1999/xlink}href']
self.methods.append({'type': 'Post', 'url': url})
class ServiceIdentification(object):
""" Abstraction for ServiceIdentification metadata """
def __init__(self, elem):
# properties
self.type = 'OGC:WCS'
self.version = '1.0.0'
self.service = testXMLValue(elem.find(ns('name')))
self.abstract = testXMLValue(elem.find(ns('description')))
self.title = testXMLValue(elem.find(ns('label')))
self.keywords = [f.text for f in elem.findall(ns('keywords') + '/' + ns('keyword'))]
# note: differs from 'rights' in interface
self.fees = elem.find(ns('fees')).text
self.accessconstraints = elem.find(ns('accessConstraints')).text
class ServiceProvider(object):
""" Abstraction for WCS ResponsibleParty
Implements IServiceProvider"""
def __init__(self, elem):
# it's not uncommon for the service provider info to be missing
# so handle case where None is passed in
if elem is None:
self.name = None
self.url = None
self.contact = None
else:
self.name = testXMLValue(elem.find(ns('organisationName')))
# there is no definitive place for url WCS, repeat organisationName
self.url = self.name
self.contact = ContactMetadata(elem)
class ContactMetadata(object):
''' implements IContactMetadata'''
def __init__(self, elem):
try:
self.name = elem.find(ns('individualName')).text
except AttributeError:
self.name = None
try:
self.organization = elem.find(ns('organisationName')).text
except AttributeError:
self.organization = None
try:
self.address = elem.find(
ns('contactInfo') + '/' + ns('address') + '/' + ns('deliveryPoint')
).text
except AttributeError:
self.address = None
try:
self.city = elem.find(
ns('contactInfo') + '/' + ns('address') + '/' + ns('city')
).text
except AttributeError:
self.city = None
try:
self.region = elem.find(
ns('contactInfo') + '/' + ns('address') + '/' + ns('administrativeArea')
).text
except AttributeError:
self.region = None
try:
self.postcode = elem.find(
ns('contactInfo') + '/' + ns('address') + '/' + ns('postalCode')
).text
except AttributeError:
self.postcode = None
try:
self.country = elem.find(
ns('contactInfo') + '/' + ns('address') + '/' + ns('country')
).text
except AttributeError:
self.country = None
try:
self.email = elem.find(
ns('contactInfo') + '/' + ns('address') + '/' + ns('electronicMailAddress')
).text
except AttributeError:
self.email = None
class ContentMetadata(object):
"""
Implements IContentMetadata
"""
def __init__(self, elem, service):
"""Initialize. service is required so that describeCoverage requests may be made"""
# TODO - examine the parent for bounding box info.
# self._parent=parent
self._elem = elem
self._service = service
self.id = elem.find(ns('name')).text
self.title = testXMLValue(elem.find(ns('label')))
self.abstract = testXMLValue(elem.find(ns('description')))
self.keywords = [f.text for f in elem.findall(ns('keywords') + '/' + ns('keyword'))]
self.boundingBoxes = None
self.boundingBoxWGS84 = None
b = elem.find(ns('lonLatEnvelope'))
if b is not None:
gmlpositions = b.findall('{http://www.opengis.net/gml}pos')
lc = gmlpositions[0].text
uc = gmlpositions[1].text
self.boundingBoxWGS84 = (
float(lc.split()[0]), float(lc.split()[1]),
float(uc.split()[0]), float(uc.split()[1]),
)
# others not used but needed for iContentMetadata harmonisation
self.styles = None
self.crsOptions = None
self.defaulttimeposition = None
self.attribution = None
self.timepositions = None
self.metadataUrls = []
time_elems = b.findall('{http://www.opengis.net/gml}timePosition')
if time_elems is not None:
self.timepositions = []
for time_elem in time_elems:
if time_elem.text:
self.timepositions.append(time_elem.text.strip())
# NOTE: removing all of the DescribeCoverage
# # grid is either a gml:Grid or a gml:RectifiedGrid if supplied
# # as part of the DescribeCoverage response.
# def _getGrid(self):
# if not hasattr(self, 'descCov'):
# self.descCov = self._service.getDescribeCoverage(self.id)
# gridelem = self.descCov.find(
# ns('CoverageOffering/') +
# ns('domainSet/') + ns('spatialDomain/') +
# '{http://www.opengis.net/gml}RectifiedGrid'
# )
# if gridelem is not None:
# grid = RectifiedGrid(gridelem)
# else:
# gridelem = self.descCov.find(
# ns('CoverageOffering/') +
# ns('domainSet/') +
# ns('spatialDomain/') +
# '{http://www.opengis.net/gml}Grid'
# )
# grid = Grid(gridelem)
# return grid
# grid = property(_getGrid, None)
# # timelimits are the start/end times, timepositions are all timepoints.
# # WCS servers can declare one or both or neither of these.
# def _getTimeLimits(self):
# timepoints, timelimits = [], []
# b = self._elem.find(ns('lonLatEnvelope'))
# if b is not None:
# timepoints = b.findall('{http://www.opengis.net/gml}timePosition')
# else:
# # have to make a describeCoverage request...
# if not hasattr(self, 'descCov'):
# self.descCov = self._service.getDescribeCoverage(self.id)
# for pos in self.descCov.findall(
# ns('CoverageOffering/') +
# ns('domainSet/') +
# ns('temporalDomain/') +
# '{http://www.opengis.net/gml}timePosition'
# ):
# timepoints.append(pos)
# if timepoints:
# timelimits = [timepoints[0].text, timepoints[1].text]
# return timelimits
# timelimits = property(_getTimeLimits, None)
# def _getTimePositions(self):
# timepositions = []
# if not hasattr(self, 'descCov'):
# self.descCov = self._service.getDescribeCoverage(self.id)
# for pos in self.descCov.findall(
# ns('CoverageOffering/') +
# ns('domainSet/') +
# ns('temporalDomain/') +
# '{http://www.opengis.net/gml}timePosition'):
# timepositions.append(pos.text)
# return timepositions
# timepositions = property(_getTimePositions, None)
# def _getOtherBoundingBoxes(self):
# ''' incomplete, should return other bounding boxes not in WGS84
# #TODO: find any other bounding boxes. Need to check for gml:EnvelopeWithTimePeriod.'''
# bboxes = []
# if not hasattr(self, 'descCov'):
# self.descCov = self._service.getDescribeCoverage(self.id)
# for envelope in self.descCov.findall(
# ns('CoverageOffering/') +
# ns('domainSet/') +
# ns('spatialDomain/') +
# '{http://www.opengis.net/gml}Envelope'):
# bbox = {}
# bbox['nativeSrs'] = envelope.attrib['srsName']
# gmlpositions = envelope.findall('{http://www.opengis.net/gml}pos')
# lc = gmlpositions[0].text.split()
# uc = gmlpositions[1].text.split()
# bbox['bbox'] = (
# float(lc[0]), float(lc[1]),
# float(uc[0]), float(uc[1])
# )
# bboxes.append(bbox)
# return bboxes
# boundingboxes = property(_getOtherBoundingBoxes, None)
# def _getSupportedCRSProperty(self):
# # gets supported crs info
# crss = []
# for elem in self._service.getDescribeCoverage(self.id).findall(
# ns('CoverageOffering/') + ns('supportedCRSs/') + ns('responseCRSs')):
# for crs in elem.text.split(' '):
# crss.append(Crs(crs))
# for elem in self._service.getDescribeCoverage(self.id).findall(
# ns('CoverageOffering/') + ns('supportedCRSs/') + ns('requestResponseCRSs')):
# for crs in elem.text.split(' '):
# crss.append(Crs(crs))
# for elem in self._service.getDescribeCoverage(self.id).findall(
# ns('CoverageOffering/') + ns('supportedCRSs/') + ns('nativeCRSs')):
# for crs in elem.text.split(' '):
# crss.append(Crs(crs))
# return crss
# supportedCRS = property(_getSupportedCRSProperty, None)
# def _getSupportedFormatsProperty(self):
# # gets supported formats info
# frmts = []
# for elem in self._service.getDescribeCoverage(self.id).findall(
# ns('CoverageOffering/') + ns('supportedFormats/') + ns('formats')):
# frmts.append(elem.text)
# return frmts
# supportedFormats = property(_getSupportedFormatsProperty, None)
# def _getAxisDescriptionsProperty(self):
# # gets any axis descriptions contained in the rangeset
# # (requires a DescribeCoverage call to server).
# axisDescs = []
# for elem in self._service.getDescribeCoverage(self.id).findall(
# ns('CoverageOffering/') +
# ns('rangeSet/') +
# ns('RangeSet/') +
# ns('axisDescription/') +
# ns('AxisDescription')):
# axisDescs.append(AxisDescription(elem)) # create a 'AxisDescription' object.
# return axisDescs
# axisDescriptions = property(_getAxisDescriptionsProperty, None)
# Adding classes to represent gml:grid and gml:rectifiedgrid. One of these is used for the cvg.grid
# property (where cvg is a member of the contents dictionary)
# There is no simple way to convert the offset values in a rectifiedgrid grid to real values without
# CRS understanding, therefore this is beyond the current scope of owslib, so the representation
# here is purely to provide access to the information in the GML.
class Grid(object):
''' Simple grid class to provide axis and value information for a gml grid '''
def __init__(self, grid):
self.axislabels = []
self.dimension = None
self.lowlimits = []
self.highlimits = []
if grid is not None:
self.dimension = int(grid.get('dimension'))
self.lowlimits = grid.find(
'{http://www.opengis.net/gml}limits/{http://www.opengis.net/gml}GridEnvelope/{http://www.opengis.net/gml}low'
).text.split(' ')
self.highlimits = grid.find(
'{http://www.opengis.net/gml}limits/{http://www.opengis.net/gml}GridEnvelope/{http://www.opengis.net/gml}high'
).text.split(' ')
for axis in grid.findall('{http://www.opengis.net/gml}axisName'):
self.axislabels.append(axis.text)
class RectifiedGrid(Grid):
''' RectifiedGrid class, extends Grid with additional offset vector information '''
def __init__(self, rectifiedgrid):
super(RectifiedGrid, self).__init__(rectifiedgrid)
self.origin = rectifiedgrid.find(
'{http://www.opengis.net/gml}origin/{http://www.opengis.net/gml}pos'
).text.split()
self.offsetvectors = []
for offset in rectifiedgrid.findall('{http://www.opengis.net/gml}offsetVector'):
self.offsetvectors.append(offset.text.split())
class AxisDescription(object):
''' Class to represent the AxisDescription element optionally found
as part of the RangeSet and used to
define ordinates of additional dimensions such as wavelength bands or pressure levels'''
def __init__(self, axisdescElem):
self.name = self.label = None
self.values = []
for elem in axisdescElem.getchildren():
if elem.tag == ns('name'):
self.name = elem.text
elif elem.tag == ns('label'):
self.label = elem.text
elif elem.tag == ns('values'):
for child in elem.getchildren():
self.values.append(child.text)
| b-cube/pipeline-demo | demo/bcube_owslib/coverage/wcs100.py | Python | mit | 20,709 | [
"NetCDF"
] | b11173853304700b151070d54aef45a5bd3814ecd73a585db4e5d89e8e03ea8a |
# -*- coding: utf-8 -*-
""" Tests for student account views. """
import logging
import re
from unittest import skipUnless
from urllib import urlencode
import mock
import ddt
from django.conf import settings
from django.core import mail
from django.core.files.uploadedfile import SimpleUploadedFile
from django.core.urlresolvers import reverse
from django.contrib import messages
from django.contrib.auth import get_user_model
from django.contrib.messages.middleware import MessageMiddleware
from django.test import TestCase
from django.test.utils import override_settings
from django.http import HttpRequest
from edx_oauth2_provider.tests.factories import ClientFactory, AccessTokenFactory, RefreshTokenFactory
from edx_rest_api_client import exceptions
from nose.plugins.attrib import attr
from oauth2_provider.models import (
AccessToken as dot_access_token,
RefreshToken as dot_refresh_token
)
from provider.oauth2.models import (
AccessToken as dop_access_token,
RefreshToken as dop_refresh_token
)
from testfixtures import LogCapture
from commerce.models import CommerceConfiguration
from commerce.tests import TEST_API_URL, TEST_API_SIGNING_KEY, factories
from commerce.tests.mocks import mock_get_orders
from course_modes.models import CourseMode
from openedx.core.djangoapps.oauth_dispatch.tests import factories as dot_factories
from openedx.core.djangoapps.programs.tests.mixins import ProgramsApiConfigMixin
from openedx.core.djangoapps.user_api.accounts.api import activate_account, create_account
from openedx.core.djangoapps.user_api.accounts import EMAIL_MAX_LENGTH
from openedx.core.djangolib.js_utils import dump_js_escaped_json
from openedx.core.djangolib.testing.utils import CacheIsolationTestCase
from student.tests.factories import UserFactory
from student_account.views import account_settings_context, get_user_orders
from third_party_auth.tests.testutil import simulate_running_pipeline, ThirdPartyAuthTestMixin
from util.testing import UrlResetMixin
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from openedx.core.djangoapps.theming.tests.test_util import with_comprehensive_theme_context
LOGGER_NAME = 'audit'
User = get_user_model() # pylint:disable=invalid-name
@ddt.ddt
class StudentAccountUpdateTest(CacheIsolationTestCase, UrlResetMixin):
""" Tests for the student account views that update the user's account information. """
USERNAME = u"heisenberg"
ALTERNATE_USERNAME = u"walt"
OLD_PASSWORD = u"ḅḷüëṡḳÿ"
NEW_PASSWORD = u"🄱🄸🄶🄱🄻🅄🄴"
OLD_EMAIL = u"walter@graymattertech.com"
NEW_EMAIL = u"walt@savewalterwhite.com"
INVALID_ATTEMPTS = 100
INVALID_EMAILS = [
None,
u"",
u"a",
"no_domain",
"no+domain",
"@",
"@domain.com",
"test@no_extension",
# Long email -- subtract the length of the @domain
# except for one character (so we exceed the max length limit)
u"{user}@example.com".format(
user=(u'e' * (EMAIL_MAX_LENGTH - 11))
)
]
INVALID_KEY = u"123abc"
URLCONF_MODULES = ['student_accounts.urls']
ENABLED_CACHES = ['default']
def setUp(self):
super(StudentAccountUpdateTest, self).setUp()
# Create/activate a new account
activation_key = create_account(self.USERNAME, self.OLD_PASSWORD, self.OLD_EMAIL)
activate_account(activation_key)
# Login
result = self.client.login(username=self.USERNAME, password=self.OLD_PASSWORD)
self.assertTrue(result)
@skipUnless(settings.ROOT_URLCONF == 'lms.urls', 'Test only valid in LMS')
def test_password_change(self):
# Request a password change while logged in, simulating
# use of the password reset link from the account page
response = self._change_password()
self.assertEqual(response.status_code, 200)
# Check that an email was sent
self.assertEqual(len(mail.outbox), 1)
# Retrieve the activation link from the email body
email_body = mail.outbox[0].body
result = re.search(r'(?P<url>https?://[^\s]+)', email_body)
self.assertIsNot(result, None)
activation_link = result.group('url')
# Visit the activation link
response = self.client.get(activation_link)
self.assertEqual(response.status_code, 200)
# Submit a new password and follow the redirect to the success page
response = self.client.post(
activation_link,
# These keys are from the form on the current password reset confirmation page.
{'new_password1': self.NEW_PASSWORD, 'new_password2': self.NEW_PASSWORD},
follow=True
)
self.assertEqual(response.status_code, 200)
self.assertContains(response, "Your password has been reset.")
# Log the user out to clear session data
self.client.logout()
# Verify that the new password can be used to log in
result = self.client.login(username=self.USERNAME, password=self.NEW_PASSWORD)
self.assertTrue(result)
# Try reusing the activation link to change the password again
# Visit the activation link again.
response = self.client.get(activation_link)
self.assertEqual(response.status_code, 200)
self.assertContains(response, "This password reset link is invalid. It may have been used already.")
self.client.logout()
# Verify that the old password cannot be used to log in
result = self.client.login(username=self.USERNAME, password=self.OLD_PASSWORD)
self.assertFalse(result)
# Verify that the new password continues to be valid
result = self.client.login(username=self.USERNAME, password=self.NEW_PASSWORD)
self.assertTrue(result)
@ddt.data(True, False)
def test_password_change_logged_out(self, send_email):
# Log the user out
self.client.logout()
# Request a password change while logged out, simulating
# use of the password reset link from the login page
if send_email:
response = self._change_password(email=self.OLD_EMAIL)
self.assertEqual(response.status_code, 200)
else:
# Don't send an email in the POST data, simulating
# its (potentially accidental) omission in the POST
# data sent from the login page
response = self._change_password()
self.assertEqual(response.status_code, 400)
def test_access_token_invalidation_logged_out(self):
self.client.logout()
user = User.objects.get(email=self.OLD_EMAIL)
self._create_dop_tokens(user)
self._create_dot_tokens(user)
response = self._change_password(email=self.OLD_EMAIL)
self.assertEqual(response.status_code, 200)
self.assert_access_token_destroyed(user)
def test_access_token_invalidation_logged_in(self):
user = User.objects.get(email=self.OLD_EMAIL)
self._create_dop_tokens(user)
self._create_dot_tokens(user)
response = self._change_password()
self.assertEqual(response.status_code, 200)
self.assert_access_token_destroyed(user)
def test_password_change_inactive_user(self):
# Log out the user created during test setup
self.client.logout()
# Create a second user, but do not activate it
create_account(self.ALTERNATE_USERNAME, self.OLD_PASSWORD, self.NEW_EMAIL)
# Send the view the email address tied to the inactive user
response = self._change_password(email=self.NEW_EMAIL)
# Expect that the activation email is still sent,
# since the user may have lost the original activation email.
self.assertEqual(response.status_code, 200)
self.assertEqual(len(mail.outbox), 1)
def test_password_change_no_user(self):
# Log out the user created during test setup
self.client.logout()
with LogCapture(LOGGER_NAME, level=logging.INFO) as logger:
# Send the view an email address not tied to any user
response = self._change_password(email=self.NEW_EMAIL)
self.assertEqual(response.status_code, 200)
logger.check((LOGGER_NAME, 'INFO', 'Invalid password reset attempt'))
def test_password_change_rate_limited(self):
# Log out the user created during test setup, to prevent the view from
# selecting the logged-in user's email address over the email provided
# in the POST data
self.client.logout()
# Make many consecutive bad requests in an attempt to trigger the rate limiter
for __ in xrange(self.INVALID_ATTEMPTS):
self._change_password(email=self.NEW_EMAIL)
response = self._change_password(email=self.NEW_EMAIL)
self.assertEqual(response.status_code, 403)
@ddt.data(
('post', 'password_change_request', []),
)
@ddt.unpack
def test_require_http_method(self, correct_method, url_name, args):
wrong_methods = {'get', 'put', 'post', 'head', 'options', 'delete'} - {correct_method}
url = reverse(url_name, args=args)
for method in wrong_methods:
response = getattr(self.client, method)(url)
self.assertEqual(response.status_code, 405)
def _change_password(self, email=None):
"""Request to change the user's password. """
data = {}
if email:
data['email'] = email
return self.client.post(path=reverse('password_change_request'), data=data)
def _create_dop_tokens(self, user=None):
"""Create dop access token for given user if user provided else for default user."""
if not user:
user = User.objects.get(email=self.OLD_EMAIL)
client = ClientFactory()
access_token = AccessTokenFactory(user=user, client=client)
RefreshTokenFactory(user=user, client=client, access_token=access_token)
def _create_dot_tokens(self, user=None):
"""Create dop access token for given user if user provided else for default user."""
if not user:
user = User.objects.get(email=self.OLD_EMAIL)
application = dot_factories.ApplicationFactory(user=user)
access_token = dot_factories.AccessTokenFactory(user=user, application=application)
dot_factories.RefreshTokenFactory(user=user, application=application, access_token=access_token)
def assert_access_token_destroyed(self, user):
"""Assert all access tokens are destroyed."""
self.assertFalse(dot_access_token.objects.filter(user=user).exists())
self.assertFalse(dot_refresh_token.objects.filter(user=user).exists())
self.assertFalse(dop_access_token.objects.filter(user=user).exists())
self.assertFalse(dop_refresh_token.objects.filter(user=user).exists())
@attr(shard=3)
@ddt.ddt
class StudentAccountLoginAndRegistrationTest(ThirdPartyAuthTestMixin, UrlResetMixin, ModuleStoreTestCase):
""" Tests for the student account views that update the user's account information. """
USERNAME = "bob"
EMAIL = "bob@example.com"
PASSWORD = "password"
URLCONF_MODULES = ['embargo']
@mock.patch.dict(settings.FEATURES, {'EMBARGO': True})
def setUp(self):
super(StudentAccountLoginAndRegistrationTest, self).setUp()
# For these tests, three third party auth providers are enabled by default:
self.configure_google_provider(enabled=True, visible=True)
self.configure_facebook_provider(enabled=True, visible=True)
self.configure_dummy_provider(
visible=True,
enabled=True,
icon_class='',
icon_image=SimpleUploadedFile('icon.svg', '<svg><rect width="50" height="100"/></svg>'),
)
@ddt.data(
("signin_user", "login"),
("register_user", "register"),
)
@ddt.unpack
def test_login_and_registration_form(self, url_name, initial_mode):
response = self.client.get(reverse(url_name))
expected_data = '"initial_mode": "{mode}"'.format(mode=initial_mode)
self.assertContains(response, expected_data)
@ddt.data("signin_user", "register_user")
def test_login_and_registration_form_already_authenticated(self, url_name):
# Create/activate a new account and log in
activation_key = create_account(self.USERNAME, self.PASSWORD, self.EMAIL)
activate_account(activation_key)
result = self.client.login(username=self.USERNAME, password=self.PASSWORD)
self.assertTrue(result)
# Verify that we're redirected to the dashboard
response = self.client.get(reverse(url_name))
self.assertRedirects(response, reverse("dashboard"))
@ddt.data(
(None, "signin_user"),
(None, "register_user"),
("edx.org", "signin_user"),
("edx.org", "register_user"),
)
@ddt.unpack
def test_login_and_registration_form_signin_preserves_params(self, theme, url_name):
params = [
('course_id', 'edX/DemoX/Demo_Course'),
('enrollment_action', 'enroll'),
]
# The response should have a "Sign In" button with the URL
# that preserves the querystring params
with with_comprehensive_theme_context(theme):
response = self.client.get(reverse(url_name), params)
expected_url = '/login?{}'.format(self._finish_auth_url_param(params + [('next', '/dashboard')]))
self.assertContains(response, expected_url)
# Add additional parameters:
params = [
('course_id', 'edX/DemoX/Demo_Course'),
('enrollment_action', 'enroll'),
('course_mode', CourseMode.DEFAULT_MODE_SLUG),
('email_opt_in', 'true'),
('next', '/custom/final/destination')
]
# Verify that this parameter is also preserved
with with_comprehensive_theme_context(theme):
response = self.client.get(reverse(url_name), params)
expected_url = '/login?{}'.format(self._finish_auth_url_param(params))
self.assertContains(response, expected_url)
@mock.patch.dict(settings.FEATURES, {"ENABLE_THIRD_PARTY_AUTH": False})
@ddt.data("signin_user", "register_user")
def test_third_party_auth_disabled(self, url_name):
response = self.client.get(reverse(url_name))
self._assert_third_party_auth_data(response, None, None, [])
@ddt.data(
("signin_user", None, None),
("register_user", None, None),
("signin_user", "google-oauth2", "Google"),
("register_user", "google-oauth2", "Google"),
("signin_user", "facebook", "Facebook"),
("register_user", "facebook", "Facebook"),
("signin_user", "dummy", "Dummy"),
("register_user", "dummy", "Dummy"),
)
@ddt.unpack
def test_third_party_auth(self, url_name, current_backend, current_provider):
params = [
('course_id', 'course-v1:Org+Course+Run'),
('enrollment_action', 'enroll'),
('course_mode', CourseMode.DEFAULT_MODE_SLUG),
('email_opt_in', 'true'),
('next', '/custom/final/destination'),
]
# Simulate a running pipeline
if current_backend is not None:
pipeline_target = "student_account.views.third_party_auth.pipeline"
with simulate_running_pipeline(pipeline_target, current_backend):
response = self.client.get(reverse(url_name), params)
# Do NOT simulate a running pipeline
else:
response = self.client.get(reverse(url_name), params)
# This relies on the THIRD_PARTY_AUTH configuration in the test settings
expected_providers = [
{
"id": "oa2-dummy",
"name": "Dummy",
"iconClass": None,
"iconImage": settings.MEDIA_URL + "icon.svg",
"loginUrl": self._third_party_login_url("dummy", "login", params),
"registerUrl": self._third_party_login_url("dummy", "register", params)
},
{
"id": "oa2-facebook",
"name": "Facebook",
"iconClass": "fa-facebook",
"iconImage": None,
"loginUrl": self._third_party_login_url("facebook", "login", params),
"registerUrl": self._third_party_login_url("facebook", "register", params)
},
{
"id": "oa2-google-oauth2",
"name": "Google",
"iconClass": "fa-google-plus",
"iconImage": None,
"loginUrl": self._third_party_login_url("google-oauth2", "login", params),
"registerUrl": self._third_party_login_url("google-oauth2", "register", params)
},
]
self._assert_third_party_auth_data(response, current_backend, current_provider, expected_providers)
def test_hinted_login(self):
params = [("next", "/courses/something/?tpa_hint=oa2-google-oauth2")]
response = self.client.get(reverse('signin_user'), params)
self.assertContains(response, '"third_party_auth_hint": "oa2-google-oauth2"')
@override_settings(SITE_NAME=settings.MICROSITE_TEST_HOSTNAME)
def test_microsite_uses_old_login_page(self):
# Retrieve the login page from a microsite domain
# and verify that we're served the old page.
resp = self.client.get(
reverse("signin_user"),
HTTP_HOST=settings.MICROSITE_TEST_HOSTNAME
)
self.assertContains(resp, "Log into your Test Site Account")
self.assertContains(resp, "login-form")
def test_microsite_uses_old_register_page(self):
# Retrieve the register page from a microsite domain
# and verify that we're served the old page.
resp = self.client.get(
reverse("register_user"),
HTTP_HOST=settings.MICROSITE_TEST_HOSTNAME
)
self.assertContains(resp, "Register for Test Site")
self.assertContains(resp, "register-form")
def test_login_registration_xframe_protected(self):
resp = self.client.get(
reverse("register_user"),
{},
HTTP_REFERER="http://localhost/iframe"
)
self.assertEqual(resp['X-Frame-Options'], 'DENY')
self.configure_lti_provider(name='Test', lti_hostname='localhost', lti_consumer_key='test_key', enabled=True)
resp = self.client.get(
reverse("register_user"),
HTTP_REFERER="http://localhost/iframe"
)
self.assertEqual(resp['X-Frame-Options'], 'ALLOW')
def _assert_third_party_auth_data(self, response, current_backend, current_provider, providers):
"""Verify that third party auth info is rendered correctly in a DOM data attribute. """
finish_auth_url = None
if current_backend:
finish_auth_url = reverse("social:complete", kwargs={"backend": current_backend}) + "?"
auth_info = {
"currentProvider": current_provider,
"providers": providers,
"secondaryProviders": [],
"finishAuthUrl": finish_auth_url,
"errorMessage": None,
}
auth_info = dump_js_escaped_json(auth_info)
expected_data = '"third_party_auth": {auth_info}'.format(
auth_info=auth_info
)
self.assertContains(response, expected_data)
def _third_party_login_url(self, backend_name, auth_entry, login_params):
"""Construct the login URL to start third party authentication. """
return u"{url}?auth_entry={auth_entry}&{param_str}".format(
url=reverse("social:begin", kwargs={"backend": backend_name}),
auth_entry=auth_entry,
param_str=self._finish_auth_url_param(login_params),
)
def _finish_auth_url_param(self, params):
"""
Make the next=... URL parameter that indicates where the user should go next.
>>> _finish_auth_url_param([('next', '/dashboard')])
'/account/finish_auth?next=%2Fdashboard'
"""
return urlencode({
'next': '/account/finish_auth?{}'.format(urlencode(params))
})
@override_settings(ECOMMERCE_API_URL=TEST_API_URL, ECOMMERCE_API_SIGNING_KEY=TEST_API_SIGNING_KEY)
class AccountSettingsViewTest(ThirdPartyAuthTestMixin, TestCase, ProgramsApiConfigMixin):
""" Tests for the account settings view. """
USERNAME = 'student'
PASSWORD = 'password'
FIELDS = [
'country',
'gender',
'language',
'level_of_education',
'password',
'year_of_birth',
'preferred_language',
'time_zone',
]
@mock.patch("django.conf.settings.MESSAGE_STORAGE", 'django.contrib.messages.storage.cookie.CookieStorage')
def setUp(self):
super(AccountSettingsViewTest, self).setUp()
self.user = UserFactory.create(username=self.USERNAME, password=self.PASSWORD)
CommerceConfiguration.objects.create(cache_ttl=10, enabled=True)
self.client.login(username=self.USERNAME, password=self.PASSWORD)
self.request = HttpRequest()
self.request.user = self.user
# For these tests, two third party auth providers are enabled by default:
self.configure_google_provider(enabled=True, visible=True)
self.configure_facebook_provider(enabled=True, visible=True)
# Python-social saves auth failure notifcations in Django messages.
# See pipeline.get_duplicate_provider() for details.
self.request.COOKIES = {}
MessageMiddleware().process_request(self.request)
messages.error(self.request, 'Facebook is already in use.', extra_tags='Auth facebook')
def test_context(self):
context = account_settings_context(self.request)
user_accounts_api_url = reverse("accounts_api", kwargs={'username': self.user.username})
self.assertEqual(context['user_accounts_api_url'], user_accounts_api_url)
user_preferences_api_url = reverse('preferences_api', kwargs={'username': self.user.username})
self.assertEqual(context['user_preferences_api_url'], user_preferences_api_url)
for attribute in self.FIELDS:
self.assertIn(attribute, context['fields'])
self.assertEqual(
context['user_accounts_api_url'], reverse("accounts_api", kwargs={'username': self.user.username})
)
self.assertEqual(
context['user_preferences_api_url'], reverse('preferences_api', kwargs={'username': self.user.username})
)
self.assertEqual(context['duplicate_provider'], 'facebook')
self.assertEqual(context['auth']['providers'][0]['name'], 'Facebook')
self.assertEqual(context['auth']['providers'][1]['name'], 'Google')
def test_view(self):
"""
Test that all fields are visible
"""
view_path = reverse('account_settings')
response = self.client.get(path=view_path)
for attribute in self.FIELDS:
self.assertIn(attribute, response.content)
def test_header_with_programs_listing_enabled(self):
"""
Verify that tabs header will be shown while program listing is enabled.
"""
self.create_programs_config(program_listing_enabled=True)
view_path = reverse('account_settings')
response = self.client.get(path=view_path)
self.assertContains(response, '<li class="tab-nav-item">')
def test_header_with_programs_listing_disabled(self):
"""
Verify that nav header will be shown while program listing is disabled.
"""
self.create_programs_config(program_listing_enabled=False)
view_path = reverse('account_settings')
response = self.client.get(path=view_path)
self.assertContains(response, '<li class="item nav-global-01">')
def test_commerce_order_detail(self):
with mock_get_orders():
order_detail = get_user_orders(self.user)
user_order = mock_get_orders.default_response['results'][0]
expected = [
{
'number': user_order['number'],
'price': user_order['total_excl_tax'],
'title': user_order['lines'][0]['title'],
'order_date': 'Jan 01, 2016',
'receipt_url': '/commerce/checkout/receipt/?orderNum=' + user_order['number']
}
]
self.assertEqual(order_detail, expected)
def test_commerce_order_detail_exception(self):
with mock_get_orders(exception=exceptions.HttpNotFoundError):
order_detail = get_user_orders(self.user)
self.assertEqual(order_detail, [])
def test_incomplete_order_detail(self):
response = {
'results': [
factories.OrderFactory(
status='Incomplete',
lines=[
factories.OrderLineFactory(
product=factories.ProductFactory(attribute_values=[factories.ProductAttributeFactory()])
)
]
)
]
}
with mock_get_orders(response=response):
order_detail = get_user_orders(self.user)
self.assertEqual(order_detail, [])
def test_honor_course_order_detail(self):
response = {
'results': [
factories.OrderFactory(
lines=[
factories.OrderLineFactory(
product=factories.ProductFactory(attribute_values=[factories.ProductAttributeFactory(
name='certificate_type',
value='honor'
)])
)
]
)
]
}
with mock_get_orders(response=response):
order_detail = get_user_orders(self.user)
self.assertEqual(order_detail, [])
def test_order_history_with_no_product(self):
response = {
'results': [
factories.OrderFactory(
lines=[
factories.OrderLineFactory(
product=None
),
factories.OrderLineFactory(
product=factories.ProductFactory(attribute_values=[factories.ProductAttributeFactory(
name='certificate_type',
value='verified'
)])
)
]
)
]
}
with mock_get_orders(response=response):
order_detail = get_user_orders(self.user)
self.assertEqual(len(order_detail), 1)
@override_settings(SITE_NAME=settings.MICROSITE_LOGISTRATION_HOSTNAME)
class MicrositeLogistrationTests(TestCase):
"""
Test to validate that microsites can display the logistration page
"""
def test_login_page(self):
"""
Make sure that we get the expected logistration page on our specialized
microsite
"""
resp = self.client.get(
reverse('signin_user'),
HTTP_HOST=settings.MICROSITE_LOGISTRATION_HOSTNAME
)
self.assertEqual(resp.status_code, 200)
self.assertIn('<div id="login-and-registration-container"', resp.content)
def test_registration_page(self):
"""
Make sure that we get the expected logistration page on our specialized
microsite
"""
resp = self.client.get(
reverse('register_user'),
HTTP_HOST=settings.MICROSITE_LOGISTRATION_HOSTNAME
)
self.assertEqual(resp.status_code, 200)
self.assertIn('<div id="login-and-registration-container"', resp.content)
@override_settings(SITE_NAME=settings.MICROSITE_TEST_HOSTNAME)
def test_no_override(self):
"""
Make sure we get the old style login/registration if we don't override
"""
resp = self.client.get(
reverse('signin_user'),
HTTP_HOST=settings.MICROSITE_TEST_HOSTNAME
)
self.assertEqual(resp.status_code, 200)
self.assertNotIn('<div id="login-and-registration-container"', resp.content)
resp = self.client.get(
reverse('register_user'),
HTTP_HOST=settings.MICROSITE_TEST_HOSTNAME
)
self.assertEqual(resp.status_code, 200)
self.assertNotIn('<div id="login-and-registration-container"', resp.content)
| itsjeyd/edx-platform | lms/djangoapps/student_account/test/test_views.py | Python | agpl-3.0 | 28,886 | [
"VisIt"
] | dfc36a0dfde30050a0e5a32c149ffa55504ba7df23263be9841f924e5fadba05 |
# -*- coding: utf-8 -*-
"""
Acceptance tests for CMS Video Module.
"""
import os
from mock import patch
from nose.plugins.attrib import attr
from unittest import skipIf
from ...pages.studio.auto_auth import AutoAuthPage
from ...pages.studio.overview import CourseOutlinePage
from ...pages.studio.video.video import VideoComponentPage
from ...fixtures.course import CourseFixture, XBlockFixtureDesc
from ..helpers import UniqueCourseTest, is_youtube_available, YouTubeStubConfig
@skipIf(is_youtube_available() is False, 'YouTube is not available!')
class CMSVideoBaseTest(UniqueCourseTest):
"""
CMS Video Module Base Test Class
"""
def setUp(self):
"""
Initialization of pages and course fixture for tests
"""
super(CMSVideoBaseTest, self).setUp()
self.video = VideoComponentPage(self.browser)
# This will be initialized later
self.unit_page = None
self.outline = CourseOutlinePage(
self.browser,
self.course_info['org'],
self.course_info['number'],
self.course_info['run']
)
self.course_fixture = CourseFixture(
self.course_info['org'], self.course_info['number'],
self.course_info['run'], self.course_info['display_name']
)
self.assets = []
self.addCleanup(YouTubeStubConfig.reset)
def _create_course_unit(self, youtube_stub_config=None, subtitles=False):
"""
Create a Studio Video Course Unit and Navigate to it.
Arguments:
youtube_stub_config (dict)
subtitles (bool)
"""
if youtube_stub_config:
YouTubeStubConfig.configure(youtube_stub_config)
if subtitles:
self.assets.append('subs_3_yD_cEKoCk.srt.sjson')
self.navigate_to_course_unit()
def _create_video(self):
"""
Create Xblock Video Component.
"""
self.video.create_video()
video_xblocks = self.video.xblocks()
# Total video xblock components count should be equals to 2
# Why 2? One video component is created by default for each test. Please see
# test_studio_video_module.py:CMSVideoTest._create_course_unit
# And we are creating second video component here.
self.assertTrue(video_xblocks == 2)
def _install_course_fixture(self):
"""
Prepare for tests by creating a course with a section, subsection, and unit.
Performs the following:
Create a course with a section, subsection, and unit
Create a user and make that user a course author
Log the user into studio
"""
if self.assets:
self.course_fixture.add_asset(self.assets)
# Create course with Video component
self.course_fixture.add_children(
XBlockFixtureDesc('chapter', 'Test Section').add_children(
XBlockFixtureDesc('sequential', 'Test Subsection').add_children(
XBlockFixtureDesc('vertical', 'Test Unit').add_children(
XBlockFixtureDesc('video', 'Video')
)
)
)
).install()
# Auto login and register the course
AutoAuthPage(
self.browser,
staff=False,
username=self.course_fixture.user.get('username'),
email=self.course_fixture.user.get('email'),
password=self.course_fixture.user.get('password')
).visit()
def _navigate_to_course_unit_page(self):
"""
Open the course from the dashboard and expand the section and subsection and click on the Unit link
The end result is the page where the user is editing the newly created unit
"""
# Visit Course Outline page
self.outline.visit()
# Visit Unit page
self.unit_page = self.outline.section('Test Section').subsection('Test Subsection').expand_subsection().unit(
'Test Unit').go_to()
self.video.wait_for_video_component_render()
def navigate_to_course_unit(self):
"""
Install the course with required components and navigate to course unit page
"""
self._install_course_fixture()
self._navigate_to_course_unit_page()
def edit_component(self, xblock_index=1):
"""
Open component Edit Dialog for first component on page.
Arguments:
xblock_index: number starting from 1 (0th entry is the unit page itself)
"""
self.unit_page.xblocks[xblock_index].edit()
def open_advanced_tab(self):
"""
Open components advanced tab.
"""
# The 0th entry is the unit page itself.
self.unit_page.xblocks[1].open_advanced_tab()
def open_basic_tab(self):
"""
Open components basic tab.
"""
# The 0th entry is the unit page itself.
self.unit_page.xblocks[1].open_basic_tab()
def save_unit_settings(self):
"""
Save component settings.
"""
# The 0th entry is the unit page itself.
self.unit_page.xblocks[1].save_settings()
@attr('shard_4')
class CMSVideoTest(CMSVideoBaseTest):
"""
CMS Video Test Class
"""
def test_youtube_stub_proxy(self):
"""
Scenario: YouTube stub server proxies YouTube API correctly
Given youtube stub server proxies YouTube API
And I have created a Video component
Then I can see video button "play"
And I click video button "play"
Then I can see video button "pause"
"""
self._create_course_unit(youtube_stub_config={'youtube_api_blocked': False})
self.assertTrue(self.video.is_button_shown('play'))
self.video.click_player_button('play')
self.video.wait_for_state('playing')
self.assertTrue(self.video.is_button_shown('pause'))
def test_youtube_stub_blocks_youtube_api(self):
"""
Scenario: YouTube stub server can block YouTube API
Given youtube stub server blocks YouTube API
And I have created a Video component
Then I do not see video button "play"
"""
self._create_course_unit(youtube_stub_config={'youtube_api_blocked': True})
self.assertFalse(self.video.is_button_shown('play'))
def test_autoplay_is_disabled(self):
"""
Scenario: Autoplay is disabled in Studio
Given I have created a Video component
Then when I view the video it does not have autoplay enabled
"""
self._create_course_unit()
self.assertFalse(self.video.is_autoplay_enabled)
def test_video_creation_takes_single_click(self):
"""
Scenario: Creating a video takes a single click
And creating a video takes a single click
"""
self._create_course_unit()
# This will create a video by doing a single click and then ensure that video is created
self._create_video()
def test_captions_hidden_correctly(self):
"""
Scenario: Captions are hidden correctly
Given I have created a Video component with subtitles
And I have hidden captions
Then when I view the video it does not show the captions
"""
self._create_course_unit(subtitles=True)
self.video.hide_captions()
self.assertFalse(self.video.is_captions_visible())
def test_video_controls_shown_correctly(self):
"""
Scenario: Video controls for all videos show correctly
Given I have created two Video components
And first is private video
When I reload the page
Then video controls for all videos are visible
And the error message isn't shown
"""
self._create_course_unit(youtube_stub_config={'youtube_api_private_video': True})
self.video.create_video()
# change id of first default video
self.edit_component(1)
self.open_advanced_tab()
self.video.set_field_value('YouTube ID', 'sampleid123')
self.save_unit_settings()
# again open unit page and check that video controls show for both videos
self._navigate_to_course_unit_page()
self.assertTrue(self.video.is_controls_visible())
# verify that the error message isn't shown by default
self.assertFalse(self.video.is_error_message_shown)
def test_captions_shown_correctly(self):
"""
Scenario: Captions are shown correctly
Given I have created a Video component with subtitles
Then when I view the video it does show the captions
"""
self._create_course_unit(subtitles=True)
self.assertTrue(self.video.is_captions_visible())
def test_captions_toggling(self):
"""
Scenario: Captions are toggled correctly
Given I have created a Video component with subtitles
And I have toggled captions
Then when I view the video it does show the captions
"""
self._create_course_unit(subtitles=True)
self.video.click_player_button('transcript_button')
self.assertFalse(self.video.is_captions_visible())
self.video.click_player_button('transcript_button')
self.assertTrue(self.video.is_captions_visible())
def test_caption_line_focus(self):
"""
Scenario: When enter key is pressed on a caption, an outline shows around it
Given I have created a Video component with subtitles
And Make sure captions are opened
Then I focus on first caption line
And I see first caption line has focused
"""
self._create_course_unit(subtitles=True)
self.video.show_captions()
self.video.focus_caption_line(2)
self.assertTrue(self.video.is_caption_line_focused(2))
def test_slider_range_works(self):
"""
Scenario: When start and end times are specified, a range on slider is shown
Given I have created a Video component with subtitles
And Make sure captions are closed
And I edit the component
And I open tab "Advanced"
And I set value "00:00:12" to the field "Video Start Time"
And I set value "00:00:24" to the field "Video Stop Time"
And I save changes
And I click video button "play"
Then I see a range on slider
"""
self._create_course_unit(subtitles=True)
self.video.hide_captions()
self.edit_component()
self.open_advanced_tab()
self.video.set_field_value('Video Start Time', '00:00:12')
self.video.set_field_value('Video Stop Time', '00:00:24')
self.save_unit_settings()
self.video.click_player_button('play')
@attr('a11y')
class CMSVideoA11yTest(CMSVideoBaseTest):
"""
CMS Video Accessibility Test Class
"""
def setUp(self):
browser = os.environ.get('SELENIUM_BROWSER', 'firefox')
# the a11y tests run in CI under phantomjs which doesn't
# support html5 video or flash player, so the video tests
# don't work in it. We still want to be able to run these
# tests in CI, so override the browser setting if it is
# phantomjs.
if browser == 'phantomjs':
browser = 'firefox'
with patch.dict(os.environ, {'SELENIUM_BROWSER': browser}):
super(CMSVideoA11yTest, self).setUp()
def test_video_player_a11y(self):
# we're loading a shorter transcript to ensure both skip links are available
self._create_course_unit(subtitles=True)
self.edit_component()
self.video.upload_transcript('english_single_transcript.srt')
self.save_unit_settings()
self.video.wait_for_captions()
self.assertTrue(self.video.is_captions_visible())
# limit the scope of the audit to the video player only.
self.outline.a11y_audit.config.set_scope(
include=["div.video"]
)
self.outline.a11y_audit.check_for_accessibility_errors()
| cecep-edu/edx-platform | common/test/acceptance/tests/video/test_studio_video_module.py | Python | agpl-3.0 | 12,152 | [
"VisIt"
] | f1a3b4a1056337b19ba04b4c588a893a6367b6bdaa41e506280db0405eab359b |
import ovito
import math
from ovito.io import import_file
from ovito.modifiers import *
import subprocess
import sys
if len(sys.argv)!=2:
print("%s [file]" % (sys.argv[0]))
sys.exit()
pfile=open(sys.argv[1],"r")
line=pfile.readline()
box_x=float(line.split()[2])
box_y=float(line.split()[3])
box_z=float(line.split()[4])
pfile.close()
stringa="~/bin/convertxyz "+sys.argv[1]+" > xxx.xyz"
subprocess.Popen("~/bin/convertxyz "+sys.argv[1]+" > xxx.xyz",shell=True).wait()
node=import_file("xxx.xyz",columns =["Position.X", "Position.Y", "Position.Z"])
cell = node.source.cell
realcell = cell.matrix.copy()
realcell[2,0]=0
realcell[1,0]=0
realcell[0,0]=box_x
realcell[0,1]=0
realcell[2,1]=0
realcell[1,1]=box_y
realcell[0,2]=0
realcell[1,2]=0
realcell[2,2]=box_z
cell.matrix = realcell
node.add_to_scene()
node.modifiers.append(IdentifyDiamondModifier())
#node.modifiers.append(SelectExpressionModifier(expression="(StructureType!=1) && (StructureType!=4)"))
node.modifiers.append(SelectExpressionModifier(expression="StructureType==0"))
node.modifiers.append(DeleteSelectedParticlesModifier())
legami=CreateBondsModifier()
legami.cutoff=1.4
legami.bonds_display.width=0.12
node.modifiers.append(legami)
#pos = node.source.particle_properties.position # ParticleProperty storing the positions
#pos.display.shape = ParticleDisplay.Shape.Square
| russojohn/watermodeling | scripts/display_water_cell.py | Python | gpl-3.0 | 1,367 | [
"OVITO"
] | d8ecaf614ca8b462aded9a9973a782dffe993f110491ca156183c58e114afffc |
""" This tests only need the PilotAgentsDB, and connects directly to it
Suggestion: for local testing, run this with::
python -m pytest -c ../pytest.ini -vv tests/Integration/WorkloadManagementSystem/Test_PilotAgentsDB.py
"""
# pylint: disable=wrong-import-position
from DIRAC.Core.Base.Script import parseCommandLine
parseCommandLine()
from DIRAC import gLogger
from DIRAC.WorkloadManagementSystem.DB.PilotAgentsDB import PilotAgentsDB
gLogger.setLevel('DEBUG')
paDB = PilotAgentsDB()
def test_basic():
""" usual insert/verify
"""
res = paDB.addPilotTQReference(['pilotRef'], 123, 'ownerDN', 'ownerGroup',)
assert res['OK'] is True
res = paDB.deletePilot('pilotRef')
# FIXME: to expand...
| chaen/DIRAC | tests/Integration/WorkloadManagementSystem/Test_PilotAgentsDB.py | Python | gpl-3.0 | 725 | [
"DIRAC"
] | 7a1be064339899b052094f7813d73bc1856e714495131b441f9bbaaa8d10e4b1 |
# -*- coding: ISO-8859-15 -*-
# =============================================================================
# Copyright (c) 2004, 2006 Sean C. Gillies
# Copyright (c) 2007 STFC <http://www.stfc.ac.uk>
#
# Authors :
# Oliver Clements <olcl@pml.ac.uk>
#
# Contact email: olcl@pml.ac.uk
# =============================================================================
# !!! NOTE: Does not conform to new interfaces yet #################
from owslib.coverage.wcsBase import WCSBase, WCSCapabilitiesReader, ServiceException
from owslib.ows import (
OwsCommon,
ServiceIdentification,
ServiceProvider,
OperationsMetadata,
)
from urllib.parse import urlencode
from owslib.util import openURL, testXMLValue
from owslib.etree import etree
from owslib.crs import Crs
import os
import errno
import dateutil.parser as parser
from datetime import timedelta
import logging
from owslib.util import log, datetime_from_ansi, datetime_from_iso, param_list_to_url_string
# function to save writing out WCS namespace in full each time
def ns(tag):
return "{http://www.opengis.net/ows/2.0}" + tag
def nsWCS2(tag):
return "{http://www.opengis.net/wcs/2.0}" + tag
class WebCoverageService_2_0_1(WCSBase):
"""Abstraction for OGC Web Coverage Service (WCS), version 2.0.1
Implements IWebCoverageService.
"""
def __getitem__(self, name):
""" check contents dictionary to allow dict like access to service layers"""
if name in list(self.__getattribute__("contents").keys()):
return self.__getattribute__("contents")[name]
else:
raise KeyError("No content named %s" % name)
def __init__(self, url, xml, cookies, auth=None, timeout=30, headers=None):
super(WebCoverageService_2_0_1, self).__init__(auth=auth, headers=headers)
self.version = "2.0.1"
self.url = url
self.cookies = cookies
self.timeout = timeout
self.ows_common = OwsCommon(version="2.0.1")
# initialize from saved capability document or access the server
reader = WCSCapabilitiesReader(self.version, self.cookies, self.auth, headers=self.headers)
if xml:
self._capabilities = reader.readString(xml)
else:
self._capabilities = reader.read(self.url, self.timeout)
# check for exceptions
se = self._capabilities.find("ServiceException")
if se is not None:
err_message = str(se.text).strip()
raise ServiceException(err_message, xml)
# serviceIdentification metadata
subelem = self._capabilities.find(ns("ServiceIdentification"))
self.identification = ServiceIdentification(
subelem, namespace=self.ows_common.namespace
)
# serviceProvider metadata
serviceproviderelem = self._capabilities.find(ns("ServiceProvider"))
self.provider = ServiceProvider(
serviceproviderelem, namespace=self.ows_common.namespace
)
# serviceOperations metadata
self.operations = []
for elem in self._capabilities.find(ns("OperationsMetadata"))[:]:
if elem.tag != ns("ExtendedCapabilities"):
self.operations.append(
OperationsMetadata(elem, namespace=self.ows_common.namespace)
)
# serviceContents metadata
self.contents = {}
for elem in self._capabilities.findall(
nsWCS2("Contents/") + nsWCS2("CoverageSummary")
):
cm = ContentMetadata(elem, self)
self.contents[cm.id] = cm
# exceptions
self.exceptions = [
f.text for f in self._capabilities.findall("Capability/Exception/Format")
]
def items(self):
"""supports dict-like items() access"""
items = []
for item in self.contents:
items.append((item, self.contents[item]))
return items
def getCoverage(
self,
identifier=None,
bbox=None,
time=None,
format=None,
subsets=None,
resolutions=None,
sizes=None,
crs=None,
width=None,
height=None,
resx=None,
resy=None,
resz=None,
parameter=None,
method="Get",
timeout=30,
**kwargs
):
"""Request and return a coverage from the WCS as a file-like object
note: additional **kwargs helps with multi-version implementation
core keyword arguments should be supported cross version
example:
cvg=wcs.getCoverage(identifier=['TuMYrRQ4'], timeSequence=['2792-06-01T00:00:00.0'], bbox=(-112,36,-106,41),
format='cf-netcdf')
is equivalent to:
http://myhost/mywcs?SERVICE=WCS&REQUEST=GetCoverage&IDENTIFIER=TuMYrRQ4&VERSION=1.1.0&BOUNDINGBOX=-180,-90,180,90&TIME=2792-06-01T00:00:00.0&FORMAT=cf-netcdf
example 2.0.1 URL
http://earthserver.pml.ac.uk/rasdaman/ows?&SERVICE=WCS&VERSION=2.0.1&REQUEST=GetCoverage
&COVERAGEID=V2_monthly_CCI_chlor_a_insitu_test&SUBSET=Lat(40,50)&SUBSET=Long(-10,0)&SUBSET=ansi(144883,145000)&FORMAT=application/netcdf
cvg=wcs.getCoverage(identifier=['myID'], format='application/netcdf', subsets=[('axisName',min,max),
('axisName',min,max),('axisName',min,max)])
"""
if log.isEnabledFor(logging.DEBUG):
log.debug(
"WCS 2.0.1 DEBUG: Parameters passed to GetCoverage: identifier=%s, bbox=%s, time=%s, format=%s, crs=%s, width=%s, height=%s, resx=%s, resy=%s, resz=%s, parameter=%s, method=%s, other_arguments=%s" # noqa
% (
identifier,
bbox,
time,
format,
crs,
width,
height,
resx,
resy,
resz,
parameter,
method,
str(kwargs),
)
)
try:
base_url = next(
(
m.get("url")
for m in self.getOperationByName("GetCoverage").methods
if m.get("type").lower() == method.lower()
)
)
except StopIteration:
base_url = self.url
log.debug("WCS 2.0.1 DEBUG: base url of server: %s" % base_url)
request = {"version": self.version, "request": "GetCoverage", "service": "WCS"}
assert len(identifier) > 0
request["CoverageID"] = identifier[0]
if crs:
request["crs"] = crs
request["format"] = format
if width:
request["width"] = width
if height:
request["height"] = height
# anything else e.g. vendor specific parameters must go through kwargs
if kwargs:
for kw in kwargs:
request[kw] = kwargs[kw]
# encode and request
data = urlencode(request)
if subsets:
data += param_list_to_url_string(subsets, 'subset')
if resolutions:
log.debug('Adding vendor-specific RESOLUTION parameter.')
data += param_list_to_url_string(resolutions, 'resolution')
if sizes:
log.debug('Adding vendor-specific SIZE parameter.')
data += param_list_to_url_string(sizes, 'size')
log.debug("WCS 2.0.1 DEBUG: Second part of URL: %s" % data)
u = openURL(base_url, data, method, self.cookies, auth=self.auth, timeout=timeout, headers=self.headers)
return u
def getOperationByName(self, name):
"""Return a named operation item."""
for item in self.operations:
if item.name == name:
return item
raise KeyError("No operation named %s" % name)
class ContentMetadata(object):
"""
Implements IContentMetadata
"""
def __init__(self, elem, service):
"""Initialize. service is required so that describeCoverage requests may be made"""
# TODO - examine the parent for bounding box info.
self._elem = elem
self._service = service
self.id = elem.find(nsWCS2("CoverageId")).text
self.title = testXMLValue(elem.find(ns("label")))
self.abstract = testXMLValue(elem.find(ns("description")))
self.keywords = [
f.text for f in elem.findall(ns("keywords") + "/" + ns("keyword"))
]
self.boundingBox = None # needed for iContentMetadata harmonisation
self.boundingBoxWGS84 = None
b = elem.find(ns("lonLatEnvelope"))
if b is not None:
gmlpositions = b.findall("{http://www.opengis.net/gml}pos")
lc = gmlpositions[0].text
uc = gmlpositions[1].text
self.boundingBoxWGS84 = (
float(lc.split()[0]),
float(lc.split()[1]),
float(uc.split()[0]),
float(uc.split()[1]),
)
# others not used but needed for iContentMetadata harmonisation
self.styles = None
self.crsOptions = None
self.defaulttimeposition = None
# grid is either a gml:Grid or a gml:RectifiedGrid if supplied as part of the DescribeCoverage response.
def _getGrid(self):
if not hasattr(self, "descCov"):
self.descCov = self._service.getDescribeCoverage(self.id)
gridelem = self.descCov.find(
nsWCS2("CoverageDescription/") + "{http://www.opengis.net/gml/3.2}domainSet/" + "{http://www.opengis.net/gml/3.3/rgrid}ReferenceableGridByVectors" # noqa
)
if gridelem is not None:
grid = ReferenceableGridByVectors(gridelem)
else:
# HERE I LOOK FOR RECTIFIEDGRID
gridelem = self.descCov.find(
nsWCS2("CoverageDescription/") + "{http://www.opengis.net/gml/3.2}domainSet/" + "{http://www.opengis.net/gml/3.2}RectifiedGrid" # noqa
)
grid = RectifiedGrid(gridelem)
return grid
grid = property(_getGrid, None)
# timelimits are the start/end times, timepositions are all timepoints. WCS servers can declare one
# or both or neither of these.
# in wcs 2.0 this can be gathered from the Envelope tag
def _getTimeLimits(self):
# timepoints, timelimits=[],[]
# b=self._elem.find(ns('lonLatEnvelope'))
# if b is not None:
# timepoints=b.findall('{http://www.opengis.net/gml}timePosition')
# else:
# #have to make a describeCoverage request...
# if not hasattr(self, 'descCov'):
# self.descCov=self._service.getDescribeCoverage(self.id)
# for pos in self.descCov.findall(
# ns('CoverageOffering/')+ns('domainSet/')+ns('temporalDomain/')+'{http://www.opengis.net/gml}timePosition'):
# timepoints.append(pos)
# if timepoints:
# timelimits=[timepoints[0].text,timepoints[1].text]
return [self.timepositions[0], self.timepositions[-1]]
timelimits = property(_getTimeLimits, None)
def _getTimePositions(self):
timepositions = []
if not hasattr(self, "descCov"):
self.descCov = self._service.getDescribeCoverage(self.id)
gridelem = self.descCov.find(
nsWCS2("CoverageDescription/") + "{http://www.opengis.net/gml/3.2}domainSet/" + "{http://www.opengis.net/gml/3.3/rgrid}ReferenceableGridByVectors" # noqa
)
if gridelem is not None:
# irregular time axis
cooeficients = []
grid_axes = gridelem.findall(
"{http://www.opengis.net/gml/3.3/rgrid}generalGridAxis"
)
for elem in grid_axes:
if elem.find(
"{http://www.opengis.net/gml/3.3/rgrid}GeneralGridAxis/{http://www.opengis.net/gml/3.3/rgrid}gridAxesSpanned" # noqa
).text in ["ansi", "unix"]:
cooeficients = elem.find(
"{http://www.opengis.net/gml/3.3/rgrid}GeneralGridAxis/{http://www.opengis.net/gml/3.3/rgrid}coefficients" # noqa
).text.split(" ")
for x in cooeficients:
x = x.replace('"', "")
t_date = datetime_from_iso(x)
timepositions.append(t_date)
else:
# regular time
if len(self.grid.origin) > 2:
t_grid = self.grid
t_date = t_grid.origin[2]
start_pos = parser.parse(t_date, fuzzy=True)
step = float(t_grid.offsetvectors[2][2])
start_pos = start_pos + timedelta(days=(step / 2))
no_steps = int(t_grid.highlimits[2])
for x in range(no_steps):
t_pos = start_pos + timedelta(days=(step * x))
# t_date = datetime_from_ansi(t_pos)
# t_date = t_pos.isoformat()
timepositions.append(t_pos)
else:
# no time axis
timepositions = None
return timepositions
timepositions = property(_getTimePositions, None)
def _getOtherBoundingBoxes(self):
""" incomplete, should return other bounding boxes not in WGS84
#TODO: find any other bounding boxes. Need to check for gml:EnvelopeWithTimePeriod."""
bboxes = []
if not hasattr(self, "descCov"):
self.descCov = self._service.getDescribeCoverage(self.id)
for envelope in self.descCov.findall(
nsWCS2("CoverageDescription/") + "{http://www.opengis.net/gml/3.2}boundedBy/" + "{http://www.opengis.net/gml/3.2}Envelope" # noqa
):
bbox = {}
bbox["nativeSrs"] = envelope.attrib["srsName"]
lc = envelope.find("{http://www.opengis.net/gml/3.2}lowerCorner")
lc = lc.text.split()
uc = envelope.find("{http://www.opengis.net/gml/3.2}upperCorner")
uc = uc.text.split()
bbox["bbox"] = (float(lc[0]), float(lc[1]), float(uc[0]), float(uc[1]))
bboxes.append(bbox)
return bboxes
boundingboxes = property(_getOtherBoundingBoxes, None)
def _getSupportedCRSProperty(self):
# gets supported crs info
crss = []
for elem in self._service.getDescribeCoverage(self.id).findall(
ns("CoverageOffering/") + ns("supportedCRSs/") + ns("responseCRSs")
):
for crs in elem.text.split(" "):
crss.append(Crs(crs))
for elem in self._service.getDescribeCoverage(self.id).findall(
ns("CoverageOffering/") + ns("supportedCRSs/") + ns("requestResponseCRSs")
):
for crs in elem.text.split(" "):
crss.append(Crs(crs))
for elem in self._service.getDescribeCoverage(self.id).findall(
ns("CoverageOffering/") + ns("supportedCRSs/") + ns("nativeCRSs")
):
for crs in elem.text.split(" "):
crss.append(Crs(crs))
return crss
supportedCRS = property(_getSupportedCRSProperty, None)
def _getSupportedFormatsProperty(self):
# gets supported formats info
frmts = []
for elem in self._service._capabilities.findall(
nsWCS2("ServiceMetadata/") + nsWCS2("formatSupported")
):
frmts.append(elem.text)
return frmts
supportedFormats = property(_getSupportedFormatsProperty, None)
def _getAxisDescriptionsProperty(self):
# gets any axis descriptions contained in the rangeset (requires a DescribeCoverage call to server).
axisDescs = []
for elem in self._service.getDescribeCoverage(self.id).findall(
ns("CoverageOffering/") + ns("rangeSet/") + ns("RangeSet/") + ns("axisDescription/") + ns("AxisDescription")
):
axisDescs.append(
AxisDescription(elem)
) # create a 'AxisDescription' object.
return axisDescs
axisDescriptions = property(_getAxisDescriptionsProperty, None)
# Adding classes to represent gml:grid and gml:rectifiedgrid. One of these is used for the cvg.grid property
# (where cvg is a member of the contents dictionary)
# There is no simple way to convert the offset values in a rectifiedgrid grid to real values without CRS understanding,
# therefore this is beyond the current scope of owslib, so the representation here is purely to provide
# access to the information in the GML.
class Grid(object):
""" Simple grid class to provide axis and value information for a gml grid """
def __init__(self, grid):
self.axislabels = []
self.dimension = None
self.lowlimits = []
self.highlimits = []
if grid is not None:
self.dimension = int(grid.get("dimension"))
self.lowlimits = grid.find(
"{http://www.opengis.net/gml/3.2}limits/{http://www.opengis.net/gml/3.2}GridEnvelope/{http://www.opengis.net/gml/3.2}low" # noqa
).text.split(" ")
self.highlimits = grid.find(
"{http://www.opengis.net/gml/3.2}limits/{http://www.opengis.net/gml/3.2}GridEnvelope/{http://www.opengis.net/gml/3.2}high" # noqa
).text.split(" ")
for axis in grid.findall("{http://www.opengis.net/gml/3.2}axisLabels")[
0
].text.split(" "):
self.axislabels.append(axis)
class RectifiedGrid(Grid):
""" RectifiedGrid class, extends Grid with additional offset vector information """
def __init__(self, rectifiedgrid):
super(RectifiedGrid, self).__init__(rectifiedgrid)
self.origin = rectifiedgrid.find(
"{http://www.opengis.net/gml/3.2}origin/{http://www.opengis.net/gml/3.2}Point/{http://www.opengis.net/gml/3.2}pos" # noqa
).text.split()
self.offsetvectors = []
for offset in rectifiedgrid.findall(
"{http://www.opengis.net/gml/3.2}offsetVector"
):
self.offsetvectors.append(offset.text.split())
class ReferenceableGridByVectors(Grid):
""" ReferenceableGridByVectors class, extends Grid with additional vector information """
def __init__(self, refereceablegridbyvectors):
super(ReferenceableGridByVectors, self).__init__(refereceablegridbyvectors)
self.origin = refereceablegridbyvectors.find(
"{http://www.opengis.net/gml/3.3/rgrid}origin/{http://www.opengis.net/gml/3.2}Point/{http://www.opengis.net/gml/3.2}pos" # noqa
).text.split()
self.offsetvectors = []
for offset in refereceablegridbyvectors.findall(
"{http://www.opengis.net/gml/3.3/rgrid}generalGridAxis/{http://www.opengis.net/gml/3.3/rgrid}GeneralGridAxis/{http://www.opengis.net/gml/3.3/rgrid}offsetVector" # noqa
):
self.offsetvectors.append(offset.text.split())
class AxisDescription(object):
""" Class to represent the AxisDescription element optionally found as part of the RangeSet and used to
define ordinates of additional dimensions such as wavelength bands or pressure levels"""
def __init__(self, axisdescElem):
self.name = self.label = None
self.values = []
for elem in axisdescElem.getchildren():
if elem.tag == ns("name"):
self.name = elem.text
elif elem.tag == ns("label"):
self.label = elem.text
elif elem.tag == ns("values"):
for child in elem.getchildren():
self.values.append(child.text)
| tomkralidis/OWSLib | owslib/coverage/wcs201.py | Python | bsd-3-clause | 19,738 | [
"NetCDF"
] | a4fd5682e9d895d5d7fc4eb926099041b078af419bfd9016136705ebba1bec8f |
import os
import re
import time
import json
from collections import OrderedDict
import pkg_resources
import pandas as pd
from Bio import SeqIO
from Bio import Entrez
Entrez.email = "testing@ucsd.edu"
def untag(rule):
return re.sub('\?P<.*?>', '', rule)
def parse_antiSMASH(content):
""" Parse antiSMASH output
"""
rule_table_genes = r"""
(?P<subject_gene> \w+ \"?) \t
\w+ \t
(?P<location_start> \d+) \t
(?P<location_end> \d+) \t
(?P<strands> [+|-]) \t
(?P<product> .*) \n
"""
rule_table_blasthit = r"""
(?P<query_gene> \w+ )\"? \t
(?P<subject_gene> \w+ )\"? \t
(?P<identity> \d+) \t
(?P<blast_score> \d+) \t
(?P<coverage> \d+(?:\.\d+)?) \t
(?P<evalue> \d+\.\d+e[+|-]\d+) \t
\n
"""
rule_query_cluster = r"""
(?P<query_gene> \w+) \s+
(?P<location_start> \d+) \s
(?P<location_end> \d+) \s
(?P<strands> [+|-]) \s
(?P<product> \w+ (?:\s \w+)?) \s* \n+
"""
rule_detail = r"""
>>\n
(?P<id>\d+) \. \s+
(?P<cluster_subject> (?P<locus>\w+)_(?P<cluster>\w+)) \n
Source: \s+ (?P<source>.+?) \s* \n
Type: \s+ (?P<type>.+) \s* \n
Number\ of\ proteins\ with\ BLAST\ hits\ to\ this\ cluster:\ (?P<n_hits> \d+ ) \n
Cumulative\ BLAST\ score:\ (?P<cum_BLAST_score> \d+ )
\n \n
Table\ of\ genes,\ locations,\ strands\ and\ annotations\ of\ subject\ cluster:\n
(?P<TableGenes>
(
""" + untag(rule_table_genes) + r"""
)+
)
\n
Table\ of\ Blast\ hits\ \(query\ gene,\ subject\ gene,\ %identity,\ blast\ score,\ %coverage,\ e-value\): \n
(?P<BlastHit>
(\w+ \t \w+ \"? \t \d+ \t \d+ \t \d+\.\d+ \t \d+\.\d+e[+|-]\d+ \t \n)+
)
\n+
"""
rule = r"""
^
ClusterBlast\ scores\ for\ (?P<target>.*)\n+
Table\ of\ genes,\ locations,\ strands\ and\ annotations\ of\ query\ cluster:\n+
(?P<QueryCluster>
(
""" + untag(rule_query_cluster) + r"""
)+
)
\n \n+
Significant \ hits:\ \n
(?P<SignificantHits>
(\d+ \. \ \w+ \t .* \n+)+
)
\n \n
(?P<Details>
Details:\n\n
(
""" + untag(rule_detail) + r"""
)+
)
\n*
$
"""
parsed = re.search(rule, content, re.VERBOSE).groupdict()
output = {}
for k in ['target', 'QueryCluster', 'SignificantHits']:
output[k] = parsed[k]
QueryCluster = OrderedDict()
for k in re.search(
rule_query_cluster, parsed['QueryCluster'],
re.VERBOSE).groupdict().keys():
QueryCluster[k] = []
for row in re.finditer(
rule_query_cluster, parsed['QueryCluster'], re.VERBOSE):
row = row.groupdict()
for k in row:
QueryCluster[k].append(row[k])
output['QueryCluster'] = QueryCluster
output['SignificantHits'] = OrderedDict()
for row in re.finditer(
r"""(?P<id>\d+) \. \ (?P<cluster_subject> (?P<locus>\w+)_(?P<locus_cluster>\w+)) \t (?P<description>.*) \n+""", parsed['SignificantHits'], re.VERBOSE):
hit = row.groupdict()
cs = hit['cluster_subject']
if cs not in output['SignificantHits']:
output['SignificantHits'][cs] = OrderedDict()
for v in ['id', 'description', 'locus', 'locus_cluster']:
output['SignificantHits'][cs][v] = hit[v]
for block in re.finditer(rule_detail, parsed['Details'], re.VERBOSE):
block = dict(block.groupdict())
content = block['TableGenes']
block['TableGenes'] = OrderedDict()
for k in re.findall('\(\?P<(.*?)>', rule_table_genes):
block['TableGenes'][k] = []
for row in re.finditer(rule_table_genes, content, re.VERBOSE):
row = row.groupdict()
for k in row:
block['TableGenes'][k].append(row[k])
content = block['BlastHit']
block['BlastHit'] = OrderedDict()
for k in re.findall('\(\?P<(.*?)>', rule_table_blasthit):
block['BlastHit'][k] = []
for row in re.finditer(rule_table_blasthit, content, re.VERBOSE):
row = row.groupdict()
for k in row:
block['BlastHit'][k].append(row[k])
for k in block:
output['SignificantHits'][block['cluster_subject']][k] = block[k]
return output
def antiSMASH_to_dataFrame(content):
""" Extract an antiSMASH file as a pandas.DataFrame
"""
parsed = parse_antiSMASH(content)
output = pd.DataFrame()
for cs in parsed['SignificantHits']:
clusterSubject = parsed['SignificantHits'][cs].copy()
df = pd.merge(
pd.DataFrame(clusterSubject['BlastHit']),
pd.DataFrame(clusterSubject['TableGenes']),
on='subject_gene', how='outer')
del(clusterSubject['BlastHit'])
del(clusterSubject['TableGenes'])
for v in clusterSubject:
df[v] = clusterSubject[v]
output = output.append(df, ignore_index=True)
return output
class antiSMASH_file(object):
""" A class to handle antiSMASH file output.
"""
def __init__(self, filename):
self.data = {}
self.load(filename)
def __getitem__(self, item):
return self.data[item]
def keys(self):
return self.data.keys()
def load(self, filename):
self.data = {}
with open(filename, 'r') as f:
parsed = parse_antiSMASH(f.read())
for v in parsed:
self.data[v] = parsed[v]
def efetch_hit(term, seq_start, seq_stop):
""" Fetch the relevant part of a hit
"""
db = "nucleotide"
maxtry = 3
ntry = -1
downloaded = False
while ~downloaded and (ntry <= maxtry):
ntry += 1
try:
handle = Entrez.esearch(db=db, term=term)
record = Entrez.read(handle)
assert len(record['IdList']) == 1, \
"Sorry, I'm not ready to handle more than one record"
handle = Entrez.efetch(db=db, rettype="gb", retmode="text",
id=record['IdList'][0],
seq_start=seq_start, seq_stop=seq_stop)
content = handle.read()
downloaded = True
except:
nap = ntry*3
print "Fail to download (term). I'll take a nap of %s seconds ", \
" and try again."
time.sleep(ntry*3)
return content
def download_hits(filename, output_path):
""" Download the GenBank block for all hits by antiSMASH
"""
c = antiSMASH_file(filename)
for cs in c['SignificantHits'].keys():
locus = c['SignificantHits'][cs]['locus']
table_genes = c['SignificantHits'][cs]['TableGenes']
filename_out = os.path.join(
output_path,
"%s_%s-%s.gbk" % (locus,
min(table_genes['location_start']),
max(table_genes['location_end'])))
if os.path.isfile(filename_out):
print "Already downloaded %s" % filename_out
else:
print "Requesting cluster_subject: %s, start: %s, end: %s" % (
locus,
min(table_genes['location_start']),
max(table_genes['location_end']))
content = efetch_hit(
term=locus,
seq_start=min(table_genes['location_start']),
seq_stop=max(table_genes['location_end']))
print "Saving %s" % filename_out
with open(filename_out, 'w') as f:
f.write(content)
import urlparse
import urllib2
import tempfile
import tarfile
import os
def download_mibig(outputdir, version='1.3'):
""" Download and extract MIBiG files into outputdir
"""
assert version in ['1.0', '1.1', '1.2', '1.3'], \
"Invalid version of MIBiG"
server = 'http://mibig.secondarymetabolites.org'
filename = "mibig_gbk_%s.tar.gz" % version
url = urlparse.urljoin(server, filename)
with tempfile.NamedTemporaryFile(delete=True) as f:
u = urllib2.urlopen(url)
f.write(u.read())
f.file.flush()
tar = tarfile.open(f.name)
tar.extractall(path=outputdir)
tar.close()
# MIBiG was packed with strange files ._*gbk. Let's remove it
for f in [f for f in os.listdir(outputdir) if f[:2] == '._']:
os.remove(os.path.join(outputdir, f))
#def gbk2tablegen(gb_file, strain_id=None):
#def cds_from_gbk(gb_file, strain_id=None):
def cds_from_gbk(gb_file):
gb_record = SeqIO.read(open(gb_file,"rU"), "genbank")
#if strain_id is not None:
# gb_record.id = strain_id
output = pd.DataFrame()
sign = lambda x: '+' if x > 0 else '-'
for feature in gb_record.features:
if feature.type == "CDS":
tmp = {}
tmp = {'BGC': gb_record.id,
'locus_tag': feature.qualifiers['locus_tag'][0],
'start': feature.location.start.position,
'stop': feature.location.end.position,
'strand': sign(feature.location.strand) }
if 'note' in feature.qualifiers:
for note in feature.qualifiers['note']:
product = re.search( r"""smCOG: \s (?P<product>.*?) \s+ \(Score: \s* (?P<score>.*); \s* E-value: \s (?P<e_value>.*?)\);""", note, re.VERBOSE)
if product is not None:
product = product.groupdict()
product['score'] = float(product['score'])
product['e_value'] = float(product['e_value'])
for p in product:
tmp[p] = product[p]
output = output.append(pd.Series(tmp), ignore_index=True)
return output
def find_category_from_product(df):
subcluster = json.loads(
pkg_resources.resource_string(
__name__, 'subcluster_dictionary.json'))
def get_category(product):
for s in subcluster:
if re.search(s, product):
return subcluster[s]
return 'hypothetical'
idx = df['product'].notnull()
df['category'] = df.loc[idx, 'product'].apply(get_category)
df['category'].fillna('hypothetical', inplace=True)
return df
def get_hits(filename, criteria='cum_BLAST_score'):
"""
Reproduces original Tiago's code: table_1_extender.py
In the future allow different criteria. Right now it takes
from the very first block, which has the highest Cumulative
BLAST.
"""
with open(filename) as f:
df = antiSMASH_to_dataFrame(f.read())
df.dropna(subset=['query_gene'], inplace=True)
df.sort_values(by=criteria, ascending=False, na_position='last',
inplace=True)
return df.groupby('query_gene', as_index=False).first()
| NP-Omix/BioCompass | BioCompass/BioCompass.py | Python | bsd-3-clause | 11,128 | [
"BLAST"
] | e2e34400b106d8770142edd3a7d7ec944cb441e5bb433d8c63067d8952e4b79e |
# -*- coding: utf-8 -*-
"""
Deal with DER encoding and decoding.
Adapted from python-ecdsa at https://github.com/warner/python-ecdsa
Copyright (c) 2010 Brian Warner
Portions written in 2005 by Peter Pearson and placed in the public domain.
The MIT License (MIT)
Copyright (c) 2013 by Richard Kiss
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import binascii
bytes_from_int = chr if bytes == str else lambda x: bytes([x])
class UnexpectedDER(Exception):
pass
def encode_integer(r):
assert r >= 0 # can't support negative numbers yet
h = "%x" % r
if len(h) % 2:
h = "0" + h
s = binascii.unhexlify(h.encode("utf8"))
if ord(s[:1]) <= 0x7f:
return b"\x02" + bytes_from_int(len(s)) + s
else:
# DER integers are two's complement, so if the first byte is
# 0x80-0xff then we need an extra 0x00 byte to prevent it from
# looking negative.
return b"\x02" + bytes_from_int(len(s)+1) + b"\x00" + s
def encode_sequence(*encoded_pieces):
total_len = sum([len(p) for p in encoded_pieces])
return b"\x30" + encode_length(total_len) + b"".join(encoded_pieces)
def remove_sequence(string):
if not string.startswith(b"\x30"):
raise UnexpectedDER(
"wanted sequence (0x30), got string length %d %s" % (
len(string), binascii.hexlify(string[:10])))
length, lengthlength = read_length(string[1:])
endseq = 1+lengthlength+length
return string[1+lengthlength:endseq], string[endseq:]
def remove_integer(string, use_broken_open_ssl_mechanism=False):
# OpenSSL treats DER-encoded negative integers (that have their most significant
# bit set) as positive integers. Some apps depend upon this bug.
if not string.startswith(b"\x02"):
raise UnexpectedDER("did not get expected integer 0x02")
length, llen = read_length(string[1:])
if len(string) < 1+llen+length:
raise UnexpectedDER("ran out of integer bytes")
numberbytes = string[1+llen:1+llen+length]
rest = string[1+llen+length:]
v = int(binascii.hexlify(numberbytes), 16)
if ord(numberbytes[:1]) >= 0x80:
if not use_broken_open_ssl_mechanism:
v -= (1 << (8 * length))
return v, rest
def encode_length(l):
assert l >= 0
if l < 0x80:
return bytes_from_int(l)
s = "%x" % l
if len(s) % 2:
s = "0"+s
s = binascii.unhexlify(s)
llen = len(s)
return bytes_from_int(0x80 | llen) + s
def read_length(string):
s0 = ord(string[:1])
if not (s0 & 0x80):
# short form
return (s0 & 0x7f), 1
# else long-form: b0&0x7f is number of additional base256 length bytes,
# big-endian
llen = s0 & 0x7f
if llen > len(string)-1:
raise UnexpectedDER("ran out of length bytes")
return int(binascii.hexlify(string[1:1+llen]), 16), 1+llen
def sigencode_der(r, s):
return encode_sequence(encode_integer(r), encode_integer(s))
def sigdecode_der(sig_der, use_broken_open_ssl_mechanism=True):
# if use_broken_open_ssl_mechanism is true, this is a non-standard implementation
rs_strings, empty = remove_sequence(sig_der)
r, rest = remove_integer(rs_strings, use_broken_open_ssl_mechanism=use_broken_open_ssl_mechanism)
s, empty = remove_integer(rest, use_broken_open_ssl_mechanism=use_broken_open_ssl_mechanism)
return r, s
| cvegaj/ElectriCERT | venv3/lib/python3.6/site-packages/pycoin/tx/script/der.py | Python | gpl-3.0 | 4,353 | [
"Brian"
] | 655236ebe79b4c35efeb0ea38490a70da1174725b71d357b63a25184241035e3 |
"""
gaussian.py: Classes for Gaussian estimation
"""
from __future__ import division
from __future__ import print_function
import numpy as np
from vampyre.common.utils import get_var_shape, repeat_axes, repeat_sum
from vampyre.common.utils import VpException
from vampyre.estim.base import BaseEst
class GaussEst(BaseEst):
""" Gaussian estimator class
Estimator for a Gaussian penalty
:math:`f(z)=(1/2 \\tau_z)\|z-\\bar{z}\|^2+(1/2)\\ln(2\pi \\tau_z)`
When :math:`z` is complex, the factor :math:`1/2` is removed.
:param zmean: prior mean, :math:`\\bar{z}`
:param zvar: prior variance, :math:`\\tau_z`
:param shape: shape of :math:`z`
:param var_axes: axes on which the prior variance is repeated.
This is also the axes on which the variance is averaged.
(default=`all` indicating all axes are averaged, meaning that
the variance is a scalar)
:param zmean_axes: axis on which the prior mean is
repeated. (default=`all` indicating prior mean is repeated
on all axes, meaning that the prior mean is a scalar)
:param Boolean is_complex: indiates if :math:`z` is complex
:param Boolean map_est: indicates if estimator is to perform MAP
or MMSE estimation. This is used for the cost computation.
:param Boolean tune_zvar: indicates if :code:`zvar` is to be
estimated via EM
:param Boolean tune_rvar: indicates if the proximal variance
:code:`rvar` is estimated.
"""
def __init__(self, zmean, zvar, shape,name=None,\
var_axes = (0,), zmean_axes='all',\
is_complex=False, map_est=False, tune_zvar=False,\
tune_rvar=False):
if np.isscalar(shape):
shape = (shape,)
if is_complex:
dtype = np.double
else:
dtype = np.complex
BaseEst.__init__(self,shape=shape,dtype=dtype,name=name,
var_axes=var_axes,type_name='GaussEst', cost_avail=True)
self.zmean = zmean
self.zvar = zvar
self.cost_avail = True
self.is_complex = is_complex
self.map_est = map_est
self.zmean_axes = zmean_axes
self.tune_zvar = tune_zvar
self.tune_rvar = tune_rvar
ndim = len(self.shape)
if self.zmean_axes == 'all':
self.zmean_axes = tuple(range(ndim))
# If zvar is a scalar, then repeat it to the required shape,
# which are all the dimensions not being averaged over
if np.isscalar(self.zvar):
var_shape = get_var_shape(self.shape, self.var_axes)
self.zvar = np.tile(self.zvar, var_shape)
def est_init(self, return_cost=False, ind_out=None, avg_var_cost=True):
"""
Initial estimator.
See the base class :class:`vampyre.estim.base.Estim` for
a complete description.
:param boolean return_cost: Flag indicating if :code:`cost` is
to be returned
:param Boolean avg_var_cost: Average variance and cost.
This should be disabled to obtain per element values.
(Default=True)
:returns: :code:`zmean, zvar, [cost]` which are the
prior mean and variance
"""
# Check if ind_out is valid
if (ind_out != [0]) and (ind_out != None):
raise ValueError("ind_out must be either [0] or None")
zmean = repeat_axes(self.zmean, self.shape, self.zmean_axes)
zvar = self.zvar
if not avg_var_cost:
zvar = repeat_axes(zvar, self.shape, self.var_axes)
if not return_cost:
return zmean, zvar
# Cost including the normalization factor
if self.map_est:
clog = np.log(2*np.pi*self.zvar)
if avg_var_cost:
cost = repeat_sum(clog, self.shape, self.var_axes)
else:
cost = clog
else:
cost = 0
if not self.is_complex:
cost = 0.5*cost
return zmean, zvar, cost
def est(self,r,rvar,return_cost=False,ind_out=None,avg_var_cost=True):
"""
Estimation function
The proximal estimation function as
described in the base class :class:`vampyre.estim.base.Estim`
:param r: Proximal mean
:param rvar: Proximal variance
:param Boolean return_cost: Flag indicating if :code:`cost` is
to be returned
:param Boolean avg_var_cost: Average variance and cost.
This should be disabled to obtain per element values.
(Default=True)
:returns: :code:`zhat, zhatvar, [cost]` which are the posterior
mean, variance and optional cost.
"""
# Check if ind_out is valid
if (ind_out != [0]) and (ind_out != None):
raise ValueError("ind_out must be either [0] or None")
# Infinite variance case
if np.any(rvar==np.Inf):
return self.est_init(return_cost, avg_var_cost)
zhatvar = rvar*self.zvar/(rvar + self.zvar)
gain = self.zvar/(rvar + self.zvar)
gain = repeat_axes(gain,self.shape,self.var_axes,rep=False)
if not avg_var_cost:
zhatvar = repeat_axes(zhatvar,self.shape,self.var_axes)
zhat = gain*(r-self.zmean) + self.zmean
# EM tuning
if self.tune_zvar:
if not avg_var_cost:
raise VpException("must use variance averaging when using auto-tuning")
self.zvar = np.mean(np.abs(zhat-self.zmean)**2, self.var_axes) +\
zhatvar
if not return_cost:
return zhat, zhatvar
# Computes the MAP cost
zvar1 = repeat_axes(self.zvar,self.shape,self.var_axes,rep=False)
rvar1 = repeat_axes(rvar, self.shape,self.var_axes,rep=False)
cost = (np.abs(zhat-self.zmean)**2) / zvar1 \
+ (np.abs(zhat-r)**2) / rvar1
if avg_var_cost:
cost = np.sum(cost)
# Compute cost
nz = np.prod(self.shape)
if self.map_est:
clog = np.log(2*np.pi*self.zvar)
if avg_var_cost:
clog = np.mean(clog)*nz
else:
clog = np.log(2*np.pi*zvar1)
cost += clog
else:
d = np.log(self.zvar/zhatvar)
if avg_var_cost:
cost += np.mean(d)*nz
else:
cost += d
# Scale for real case
if not self.is_complex:
cost = 0.5*cost
return zhat, zhatvar, cost
| GAMPTeam/vampyre | vampyre/estim/gaussian.py | Python | mit | 6,954 | [
"Gaussian"
] | 724e842eec679b804f3f3ec627a59b2af81c9910f076910e4c4b78192bbf1170 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright 2014, Dennis Drescher
# All rights reserved.
#
# This library is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation; either version 2.1 of License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should also have received a copy of the GNU Lesser General Public
# License along with this library in the file named "LICENSE".
# If not, write to the Free Software Foundation, 51 Franklin Street,
# suite 500, Boston, MA 02110-1335, USA or visit their web page on the
# internet at http://www.fsf.org/licenses/lgpl.html.
| thresherdj/shrinkypic | lib/shrinkypic/process/__init__.py | Python | mit | 984 | [
"VisIt"
] | dc68de208dbca3bb04ec595e73890843c2c42f34bdc440cf6d71edaeac882a83 |
# -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding:utf-8 -*-
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 fileencoding=utf-8
#
# MDAnalysis --- https://www.mdanalysis.org
# Copyright (c) 2006-2017 The MDAnalysis Development Team and contributors
# (see the file AUTHORS for the full list of names)
#
# Released under the GNU Public Licence, v2 or any higher version
#
# Please cite your use of MDAnalysis in published work:
#
# R. J. Gowers, M. Linke, J. Barnoud, T. J. E. Reddy, M. N. Melo, S. L. Seyler,
# D. L. Dotson, J. Domanski, S. Buchoux, I. M. Kenney, and O. Beckstein.
# MDAnalysis: A Python package for the rapid analysis of molecular dynamics
# simulations. In S. Benthall and S. Rostrup editors, Proceedings of the 15th
# Python in Science Conference, pages 102-109, Austin, TX, 2016. SciPy.
# doi: 10.25080/majora-629e541a-00e
#
# N. Michaud-Agrawal, E. J. Denning, T. B. Woolf, and O. Beckstein.
# MDAnalysis: A Toolkit for the Analysis of Molecular Dynamics Simulations.
# J. Comput. Chem. 32 (2011), 2319--2327, doi:10.1002/jcc.21787
#
from contextlib import contextmanager
import MDAnalysis as mda
import MDAnalysis.analysis.align as align
import MDAnalysis.analysis.rms as rms
import os
import numpy as np
import pytest
from MDAnalysis import SelectionError, SelectionWarning
from MDAnalysisTests import executable_not_found
from MDAnalysisTests.datafiles import (PSF, DCD, CRD, FASTA, ALIGN_BOUND,
ALIGN_UNBOUND, PDB_helix)
from numpy.testing import (
assert_almost_equal,
assert_equal,
assert_array_equal,
assert_array_almost_equal,
assert_allclose,
)
#Function for Parametrizing conditional raising
@contextmanager
def does_not_raise():
yield
class TestRotationMatrix(object):
a = np.array([[0.1, 0.2, 0.3], [1.1, 1.1, 1.1]])
b = np.array([[0.1, 0.1, 0.1], [1.1, 1.1, 1.1]])
w = np.array([1.3, 2.3])
@pytest.mark.parametrize('a, b, weights', (
(a, b, None),
(a, b, w),
(a.astype(int), b.astype(int), w.astype(np.float32))
))
def test_rotation_matrix_input(self, a, b, weights):
rot, rmsd = align.rotation_matrix(a, b, weights)
assert_equal(rot, np.eye(3))
assert rmsd is None
def test_list_args(self):
a = [[0.1, 0.2, 0.3], [1.1, 1.1, 1.1]]
b = [[0.1, 0.1, 0.1], [1.1, 1.1, 1.1]]
w = [1.3, 2.3]
rot, rmsd = align.rotation_matrix(a, b, w)
assert_equal(rot, np.eye(3))
assert rmsd is None
def test_exception(self):
a = [[0.1, 0.2, 0.3],
[1.1, 1.1, 1.1],
[2, 2, 2]]
b = [[0.1, 0.1, 0.1],
[1.1, 1.1, 1.1]]
with pytest.raises(ValueError):
align.rotation_matrix(a, b)
class TestGetMatchingAtoms(object):
@staticmethod
@pytest.fixture()
def universe():
return mda.Universe(PSF, DCD)
@staticmethod
@pytest.fixture()
def reference():
return mda.Universe(PSF, DCD)
@staticmethod
@pytest.fixture()
def reference_small(reference):
return mda.Merge(reference.select_atoms(
"not name H* and not atom 4AKE 1 CA"))
@pytest.mark.parametrize("strict", (True, False))
def test_match(self, universe, reference, strict,
selection="protein and backbone"):
ref = reference.select_atoms(selection)
mobile = universe.select_atoms(selection)
groups = align.get_matching_atoms(ref, mobile, strict=strict)
assert_equal(groups[0].names, groups[1].names)
@pytest.mark.parametrize("strict", (True, False))
def test_nomatch_atoms_raise(self, universe, reference,
strict, selection="protein and backbone"):
# one atom less but same residues; with strict=False should try
# to get selections (but current code fails, so we also raise SelectionError)
ref = reference.select_atoms(selection).atoms[1:]
mobile = universe.select_atoms(selection)
if strict:
with pytest.raises(SelectionError):
groups = align.get_matching_atoms(ref, mobile, strict=strict)
else:
with pytest.warns(SelectionWarning):
with pytest.raises(SelectionError):
groups = align.get_matching_atoms(ref, mobile, strict=strict)
@pytest.mark.parametrize("strict", (True, False))
def test_nomatch_residues_raise_empty(self, universe, reference_small,
strict, selection="protein and backbone"):
# one atom less and all residues different: will currently create
# empty selections with strict=False, see also
# https://gist.github.com/orbeckst/2686badcd15031e6c946baf9164a683d
ref = reference_small.select_atoms(selection)
mobile = universe.select_atoms(selection)
if strict:
with pytest.raises(SelectionError):
groups = align.get_matching_atoms(ref, mobile, strict=strict)
else:
with pytest.warns(SelectionWarning):
with pytest.raises(SelectionError):
groups = align.get_matching_atoms(ref, mobile, strict=strict)
def test_toggle_atom_mismatch_default_error(self, universe, reference):
selection = ('resname ALA and name CA', 'resname ALA and name O')
with pytest.raises(SelectionError):
rmsd = align.alignto(universe, reference, select=selection)
def test_toggle_atom_mismatch_kwarg_error(self, universe, reference):
selection = ('resname ALA and name CA', 'resname ALA and name O')
with pytest.raises(SelectionError):
rmsd = align.alignto(universe, reference, select=selection, match_atoms=True)
def test_toggle_atom_nomatch(self, universe, reference):
selection = ('resname ALA and name CA', 'resname ALA and name O')
rmsd = align.alignto(universe, reference, select=selection, match_atoms=False)
assert rmsd[0] > 0.01
def test_toggle_atom_nomatch_mismatch_atoms(self, universe, reference):
# mismatching number of atoms, but same number of residues
u = universe.select_atoms('resname ALA and name CA')
u += universe.select_atoms('resname ALA and name O')[-1]
ref = reference.select_atoms('resname ALA and name CA')
with pytest.raises(SelectionError):
align.alignto(u, ref, select='all', match_atoms=False)
@pytest.mark.parametrize('subselection, expectation', [
('resname ALA and name CA', does_not_raise()),
(mda.Universe(PSF, DCD).select_atoms('resname ALA and name CA'), does_not_raise()),
(1234, pytest.raises(TypeError)),
])
def test_subselection_alignto(self, universe, reference, subselection, expectation):
with expectation:
rmsd = align.alignto(universe, reference, subselection=subselection)
assert_almost_equal(rmsd[1], 0.0, decimal=9)
def test_no_atom_masses(self, universe):
#if no masses are present
u = mda.Universe.empty(6, 2, atom_resindex=[0, 0, 0, 1, 1, 1], trajectory=True)
with pytest.warns(SelectionWarning):
align.get_matching_atoms(u.atoms, u.atoms)
def test_one_universe_has_masses(self, universe):
u = mda.Universe.empty(6, 2, atom_resindex=[0, 0, 0, 1, 1, 1], trajectory=True)
ref = mda.Universe.empty(6, 2, atom_resindex=[0, 0, 0, 1, 1, 1], trajectory=True)
ref.add_TopologyAttr('masses')
with pytest.warns(SelectionWarning):
align.get_matching_atoms(u.atoms, ref.atoms)
class TestAlign(object):
@staticmethod
@pytest.fixture()
def universe():
return mda.Universe(PSF, DCD)
@staticmethod
@pytest.fixture()
def reference():
return mda.Universe(PSF, DCD)
def test_rmsd(self, universe, reference):
universe.trajectory[0] # ensure first frame
bb = universe.select_atoms('backbone')
first_frame = bb.positions
universe.trajectory[-1]
last_frame = bb.positions
assert_almost_equal(rms.rmsd(first_frame, first_frame), 0.0, 5,
err_msg="error: rmsd(X,X) should be 0")
# rmsd(A,B) = rmsd(B,A) should be exact but spurious failures in the
# 9th decimal have been observed (see Issue 57 comment #1) so we relax
# the test to 6 decimals.
rmsd = rms.rmsd(first_frame, last_frame, superposition=True)
assert_almost_equal(
rms.rmsd(last_frame, first_frame, superposition=True), rmsd, 6,
err_msg="error: rmsd() is not symmetric")
assert_almost_equal(rmsd, 6.820321761927005, 5,
err_msg="RMSD calculation between 1st and last AdK frame gave wrong answer")
# test masses as weights
last_atoms_weight = universe.atoms.masses
A = universe.trajectory[0]
B = reference.trajectory[-1]
rmsd = align.alignto(universe, reference, weights='mass')
rmsd_sup_weight = rms.rmsd(A, B, weights=last_atoms_weight, center=True,
superposition=True)
assert_almost_equal(rmsd[1], rmsd_sup_weight, 6)
def test_rmsd_custom_mass_weights(self, universe, reference):
last_atoms_weight = universe.atoms.masses
A = universe.trajectory[0]
B = reference.trajectory[-1]
rmsd = align.alignto(universe, reference,
weights=reference.atoms.masses)
rmsd_sup_weight = rms.rmsd(A, B, weights=last_atoms_weight, center=True,
superposition=True)
assert_almost_equal(rmsd[1], rmsd_sup_weight, 6)
def test_rmsd_custom_weights(self, universe, reference):
weights = np.zeros(universe.atoms.n_atoms)
ca = universe.select_atoms('name CA')
weights[ca.indices] = 1
rmsd = align.alignto(universe, reference, select='name CA')
rmsd_weights = align.alignto(universe, reference, weights=weights)
assert_almost_equal(rmsd[1], rmsd_weights[1], 6)
def test_AlignTraj_outfile_default(self, universe, reference, tmpdir):
with tmpdir.as_cwd():
reference.trajectory[-1]
x = align.AlignTraj(universe, reference)
try:
assert os.path.basename(x.filename) == 'rmsfit_adk_dims.dcd'
finally:
x._writer.close()
def test_AlignTraj_outfile_default_exists(self, universe, reference, tmpdir):
reference.trajectory[-1]
outfile = str(tmpdir.join('align_test.dcd'))
align.AlignTraj(universe, reference, filename=outfile).run()
fitted = mda.Universe(PSF, outfile)
# ensure default file exists
with mda.Writer(str(tmpdir.join("rmsfit_align_test.dcd")),
n_atoms=fitted.atoms.n_atoms) as w:
w.write(fitted.atoms)
with tmpdir.as_cwd():
align.AlignTraj(fitted, reference)
# we are careful now. The default does nothing
with pytest.raises(IOError):
align.AlignTraj(fitted, reference, force=False)
def test_AlignTraj_step_works(self, universe, reference, tmpdir):
reference.trajectory[-1]
outfile = str(tmpdir.join('align_test.dcd'))
# this shouldn't throw an exception
align.AlignTraj(universe, reference, filename=outfile).run(step=10)
def test_AlignTraj_deprecated_attribute(self, universe, reference, tmpdir):
reference.trajectory[-1]
outfile = str(tmpdir.join('align_test.dcd'))
x = align.AlignTraj(universe, reference, filename=outfile).run(stop=2)
wmsg = "The `rmsd` attribute was deprecated in MDAnalysis 2.0.0"
with pytest.warns(DeprecationWarning, match=wmsg):
assert_equal(x.rmsd, x.results.rmsd)
def test_AlignTraj(self, universe, reference, tmpdir):
reference.trajectory[-1]
outfile = str(tmpdir.join('align_test.dcd'))
x = align.AlignTraj(universe, reference, filename=outfile).run()
fitted = mda.Universe(PSF, outfile)
assert_almost_equal(x.results.rmsd[0], 6.9290, decimal=3)
assert_almost_equal(x.results.rmsd[-1], 5.2797e-07, decimal=3)
# RMSD against the reference frame
# calculated on Mac OS X x86 with MDA 0.7.2 r689
# VMD: 6.9378711
self._assert_rmsd(reference, fitted, 0, 6.929083044751061)
self._assert_rmsd(reference, fitted, -1, 0.0)
def test_AlignTraj_weighted(self, universe, reference, tmpdir):
outfile = str(tmpdir.join('align_test.dcd'))
x = align.AlignTraj(universe, reference,
filename=outfile, weights='mass').run()
fitted = mda.Universe(PSF, outfile)
assert_almost_equal(x.results.rmsd[0], 0, decimal=3)
assert_almost_equal(x.results.rmsd[-1], 6.9033, decimal=3)
self._assert_rmsd(reference, fitted, 0, 0.0,
weights=universe.atoms.masses)
self._assert_rmsd(reference, fitted, -1, 6.929083032629219,
weights=universe.atoms.masses)
def test_AlignTraj_custom_weights(self, universe, reference, tmpdir):
weights = np.zeros(universe.atoms.n_atoms)
ca = universe.select_atoms('name CA')
weights[ca.indices] = 1
outfile = str(tmpdir.join('align_test.dcd'))
x = align.AlignTraj(universe, reference,
filename=outfile, select='name CA').run()
x_weights = align.AlignTraj(universe, reference,
filename=outfile, weights=weights).run()
assert_array_almost_equal(x.results.rmsd, x_weights.results.rmsd)
def test_AlignTraj_custom_mass_weights(self, universe, reference, tmpdir):
outfile = str(tmpdir.join('align_test.dcd'))
x = align.AlignTraj(universe, reference,
filename=outfile,
weights=reference.atoms.masses).run()
fitted = mda.Universe(PSF, outfile)
assert_almost_equal(x.results.rmsd[0], 0, decimal=3)
assert_almost_equal(x.results.rmsd[-1], 6.9033, decimal=3)
self._assert_rmsd(reference, fitted, 0, 0.0,
weights=universe.atoms.masses)
self._assert_rmsd(reference, fitted, -1, 6.929083032629219,
weights=universe.atoms.masses)
def test_AlignTraj_partial_fit(self, universe, reference, tmpdir):
outfile = str(tmpdir.join('align_test.dcd'))
# fitting on a partial selection should still write the whole topology
align.AlignTraj(universe, reference, select='resid 1-20',
filename=outfile, weights='mass').run()
mda.Universe(PSF, outfile)
def test_AlignTraj_in_memory(self, universe, reference, tmpdir):
outfile = str(tmpdir.join('align_test.dcd'))
reference.trajectory[-1]
x = align.AlignTraj(universe, reference, filename=outfile,
in_memory=True).run()
assert x.filename is None
assert_almost_equal(x.results.rmsd[0], 6.9290, decimal=3)
assert_almost_equal(x.results.rmsd[-1], 5.2797e-07, decimal=3)
# check in memory trajectory
self._assert_rmsd(reference, universe, 0, 6.929083044751061)
self._assert_rmsd(reference, universe, -1, 0.0)
def _assert_rmsd(self, reference, fitted, frame, desired, weights=None):
fitted.trajectory[frame]
rmsd = rms.rmsd(reference.atoms.positions, fitted.atoms.positions,
superposition=True)
assert_almost_equal(rmsd, desired, decimal=5,
err_msg="frame {0:d} of fit does not have "
"expected RMSD".format(frame))
def test_alignto_checks_selections(self, universe, reference):
"""Testing that alignto() fails if selections do not
match (Issue 143)"""
u = universe
def different_size():
a = u.atoms[10:100]
b = u.atoms[10:101]
return align.alignto(a, b)
with pytest.raises(SelectionError):
different_size()
def different_atoms():
a = u.atoms[10:20]
b = u.atoms[10:17] + u.atoms[18:21]
return align.alignto(a, b)
with pytest.raises(SelectionError):
different_atoms()
def test_alignto_partial_universe(self, universe, reference):
u_bound = mda.Universe(ALIGN_BOUND)
u_free = mda.Universe(ALIGN_UNBOUND)
selection = 'segid B'
segB_bound = u_bound.select_atoms(selection)
segB_free = u_free.select_atoms(selection)
segB_free.translate(segB_bound.centroid() - segB_free.centroid())
align.alignto(u_free, u_bound, select=selection)
assert_array_almost_equal(segB_bound.positions, segB_free.positions,
decimal=3)
def _get_aligned_average_positions(ref_files, ref, select="all", **kwargs):
u = mda.Universe(*ref_files, in_memory=True)
prealigner = align.AlignTraj(u, ref, select=select, **kwargs).run()
ag = u.select_atoms(select)
reference_coordinates = u.trajectory.timeseries(asel=ag).mean(axis=1)
rmsd = sum(prealigner.results.rmsd/len(u.trajectory))
return reference_coordinates, rmsd
class TestAverageStructure(object):
ref_files = (PSF, DCD)
@pytest.fixture
def universe(self):
return mda.Universe(*self.ref_files)
@pytest.fixture
def reference(self):
return mda.Universe(PSF, CRD)
def test_average_structure_deprecated_attrs(self, universe, reference):
# Issue #3278 - remove in MDAnalysis 3.0.0
avg = align.AverageStructure(universe, reference).run(stop=2)
wmsg = "The `universe` attribute was deprecated in MDAnalysis 2.0.0"
with pytest.warns(DeprecationWarning, match=wmsg):
assert_equal(avg.universe.atoms.positions,
avg.results.universe.atoms.positions)
wmsg = "The `positions` attribute was deprecated in MDAnalysis 2.0.0"
with pytest.warns(DeprecationWarning, match=wmsg):
assert_equal(avg.positions, avg.results.positions)
wmsg = "The `rmsd` attribute was deprecated in MDAnalysis 2.0.0"
with pytest.warns(DeprecationWarning, match=wmsg):
assert avg.rmsd == avg.results.rmsd
def test_average_structure(self, universe, reference):
ref, rmsd = _get_aligned_average_positions(self.ref_files, reference)
avg = align.AverageStructure(universe, reference).run()
assert_almost_equal(avg.results.universe.atoms.positions, ref,
decimal=4)
assert_almost_equal(avg.results.rmsd, rmsd)
def test_average_structure_mass_weighted(self, universe, reference):
ref, rmsd = _get_aligned_average_positions(self.ref_files, reference, weights='mass')
avg = align.AverageStructure(universe, reference, weights='mass').run()
assert_almost_equal(avg.results.universe.atoms.positions, ref,
decimal=4)
assert_almost_equal(avg.results.rmsd, rmsd)
def test_average_structure_select(self, universe, reference):
select = 'protein and name CA and resid 3-5'
ref, rmsd = _get_aligned_average_positions(self.ref_files, reference, select=select)
avg = align.AverageStructure(universe, reference, select=select).run()
assert_almost_equal(avg.results.universe.atoms.positions, ref,
decimal=4)
assert_almost_equal(avg.results.rmsd, rmsd)
def test_average_structure_no_ref(self, universe):
ref, rmsd = _get_aligned_average_positions(self.ref_files, universe)
avg = align.AverageStructure(universe).run()
assert_almost_equal(avg.results.universe.atoms.positions, ref,
decimal=4)
assert_almost_equal(avg.results.rmsd, rmsd)
def test_average_structure_no_msf(self, universe):
avg = align.AverageStructure(universe).run()
assert not hasattr(avg, 'msf')
def test_mismatch_atoms(self, universe):
u = mda.Merge(universe.atoms[:10])
with pytest.raises(SelectionError):
align.AverageStructure(universe, u)
def test_average_structure_ref_frame(self, universe):
ref_frame = 3
u = mda.Merge(universe.atoms)
# change to ref_frame
universe.trajectory[ref_frame]
u.load_new(universe.atoms.positions)
# back to start
universe.trajectory[0]
ref, rmsd = _get_aligned_average_positions(self.ref_files, u)
avg = align.AverageStructure(universe, ref_frame=ref_frame).run()
assert_almost_equal(avg.results.universe.atoms.positions, ref,
decimal=4)
assert_almost_equal(avg.results.rmsd, rmsd)
def test_average_structure_in_memory(self, universe):
avg = align.AverageStructure(universe, in_memory=True).run()
reference_coordinates = universe.trajectory.timeseries().mean(axis=1)
assert_almost_equal(avg.results.universe.atoms.positions,
reference_coordinates, decimal=4)
assert avg.filename is None
class TestAlignmentProcessing(object):
seq = FASTA
error_msg = "selection string has unexpected length"
def test_fasta2select_aligned(self):
"""test align.fasta2select() on aligned FASTA (Issue 112)"""
sel = align.fasta2select(self.seq, is_aligned=True)
# length of the output strings, not residues or anything real...
assert len(sel['reference']) == 30623, self.error_msg
assert len(sel['mobile']) == 30623, self.error_msg
@pytest.mark.skipif(executable_not_found("clustalw2"),
reason="Test skipped because clustalw2 executable not found")
def test_fasta2select_file(self, tmpdir):
"""test align.fasta2select() on a non-aligned FASTA with default
filenames"""
with tmpdir.as_cwd():
sel = align.fasta2select(self.seq, is_aligned=False,
alnfilename=None, treefilename=None)
assert len(sel['reference']) == 23080, self.error_msg
assert len(sel['mobile']) == 23090, self.error_msg
@pytest.mark.skipif(executable_not_found("clustalw2"),
reason="Test skipped because clustalw2 executable not found")
def test_fasta2select_ClustalW(self, tmpdir):
"""MDAnalysis.analysis.align: test fasta2select() with ClustalW
(Issue 113)"""
alnfile = str(tmpdir.join('alignmentprocessing.aln'))
treefile = str(tmpdir.join('alignmentprocessing.dnd'))
sel = align.fasta2select(self.seq, is_aligned=False,
alnfilename=alnfile, treefilename=treefile)
# numbers computed from alignment with clustalw 2.1 on Mac OS X
# [orbeckst] length of the output strings, not residues or anything
# real...
assert len(sel['reference']) == 23080, self.error_msg
assert len(sel['mobile']) == 23090, self.error_msg
def test_fasta2select_resids(self, tmpdir):
"""test align.fasta2select() when resids provided (Issue #3124)"""
resids = [x for x in range(705)]
sel = align.fasta2select(self.seq, is_aligned=True,
ref_resids=resids, target_resids=resids)
# length of the output strings, not residues or anything real...
assert len(sel['reference']) == 30621, self.error_msg
assert len(sel['mobile']) == 30621, self.error_msg
def test_sequence_alignment():
u = mda.Universe(PSF)
reference = u.atoms
mobile = u.select_atoms("resid 122-159")
aln = align.sequence_alignment(mobile, reference)
assert len(aln) == 5, "return value has wrong tuple size"
seqA, seqB, score, begin, end = aln
assert_equal(seqA, reference.residues.sequence(format="string"),
err_msg="reference sequence mismatch")
assert mobile.residues.sequence(
format="string") in seqB, "mobile sequence mismatch"
assert_almost_equal(score, 54.6)
assert_array_equal([begin, end], [0, reference.n_residues])
def test_alignto_reorder_atomgroups():
# Issue 2977
u = mda.Universe(PDB_helix)
mobile = u.atoms[:4]
ref = u.atoms[[3, 2, 1, 0]]
rmsd = align.alignto(mobile, ref, select='bynum 1-4')
assert_allclose(rmsd, (0.0, 0.0))
| MDAnalysis/mdanalysis | testsuite/MDAnalysisTests/analysis/test_align.py | Python | gpl-2.0 | 24,653 | [
"MDAnalysis",
"VMD"
] | 30e2d34d6fdf9b9f9c137fb4bfa9e9f7b32991b7a4c71869f09009dd26d4640d |
#! /usr/bin/env python
#
# PyWeather
# (c) 2010 Patrick C. McGinty <pyweather@tuxcoder.com>
# (c) 2005 Christopher Blunck <chris@wxnet.org>
#
# You're welcome to redistribute this software under the
# terms of the GNU General Public Licence version 2.0
# or, at your option, any higher version.
#
# You can read the complete GNU GPL in the file COPYING
# which should come along with this software, or visit
# the Free Software Foundation's WEB site http://www.fsf.org
#
import os
from distutils.core import setup
from pathlib import Path
import weather as pkg
name = pkg.__name__
this_directory = Path(__file__).parent
long_description = (this_directory / "README.md").read_text()
setup(name=name,
version=pkg.__version__,
license="GNU GPL",
description=pkg.__doc__,
long_description = long_description,
long_description_content_type="text/markdown",
author="Patrick C. McGinty, Christopher Blunck",
author_email="pyweather@tuxcoder.com, chris@wxnet.org",
url="http://github.com/cmcginty/PyWeather",
download_url="https://github.com/cmcginty/PyWeather/archive/%s.zip" %
pkg.__version__,
packages=[
name,
name + '.services',
name + '.stations',
name + '.units',
],
install_requires=[
'pyserial==3.5'
],
scripts=['scripts/weatherpub.py'],
)
| cmcginty/PyWeather | setup.py | Python | gpl-3.0 | 1,388 | [
"VisIt"
] | e415eddedf510a1cf097d97e5ac6aa5addfbea847191efaeaa53ea838f6d1f3b |
from pkg_resources import get_distribution
__version__ = get_distribution('BioUtil').version
__all__ = ['xzFile', 'xzopen',
'tsv', 'tsvFile', 'tsvRecord',
'vcf', 'vcfFile', 'vcfReader', 'vcfWriter',
'samFile',
'fastaFile', 'fastqFile', 'fastaRecord', 'fastqRecord',
'cachedFasta', 'faidx',
'log'
]
# the order matters to avoid loop
from .xz import xzFile, xzopen
# from . import xz
from .tsv import tsvFile, tsvRecord
# from . import tsv
from .vcf import vcfFile, vcfReader, vcfWriter, _vcf
# from . import vcf
from pysam import AlignmentFile as samFile
import pysam as sam
from .cached_fasta import cachedFasta
fastaReader=cachedFasta
from .fastq import fastqFile, fastqRecord, fastaFile, fastaRecord
import pyfaidx as faidx
from . import log
| sein-tao/pyBioUtil | BioUtil/__init__.py | Python | gpl-2.0 | 808 | [
"pysam"
] | fa38e71646bade2aac18066ff5b49573946b40b41206b1daaffaa3dc0f1c6c17 |
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import division, unicode_literals
'''
Created on Jan 24, 2012
'''
__author__ = "Shyue Ping Ong"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyuep@gmail.com"
__date__ = "Jan 24, 2012"
import unittest
import os
from pymatgen.io.cssr import Cssr
from pymatgen.io.vasp.inputs import Poscar
from pymatgen.core.structure import Structure
test_dir = os.path.join(os.path.dirname(__file__), "..", "..", "..",
'test_files')
class CssrTest(unittest.TestCase):
def setUp(self):
filepath = os.path.join(test_dir, 'POSCAR')
p = Poscar.from_file(filepath)
self.cssr = Cssr(p.structure)
def test_str(self):
expected_string = """10.4118 6.0672 4.7595
90.00 90.00 90.00 SPGR = 1 P 1 OPT = 1
24 0
0 Fe4 P4 O16
1 Fe 0.2187 0.7500 0.4749
2 Fe 0.2813 0.2500 0.9749
3 Fe 0.7187 0.7500 0.0251
4 Fe 0.7813 0.2500 0.5251
5 P 0.0946 0.2500 0.4182
6 P 0.4054 0.7500 0.9182
7 P 0.5946 0.2500 0.0818
8 P 0.9054 0.7500 0.5818
9 O 0.0434 0.7500 0.7071
10 O 0.0966 0.2500 0.7413
11 O 0.1657 0.0461 0.2854
12 O 0.1657 0.4539 0.2854
13 O 0.3343 0.5461 0.7854
14 O 0.3343 0.9539 0.7854
15 O 0.4034 0.7500 0.2413
16 O 0.4566 0.2500 0.2071
17 O 0.5434 0.7500 0.7929
18 O 0.5966 0.2500 0.7587
19 O 0.6657 0.0461 0.2146
20 O 0.6657 0.4539 0.2146
21 O 0.8343 0.5461 0.7146
22 O 0.8343 0.9539 0.7146
23 O 0.9034 0.7500 0.2587
24 O 0.9566 0.2500 0.2929"""
self.assertEqual(str(self.cssr), expected_string)
def test_from_file(self):
filename = os.path.join(test_dir, "Si.cssr")
cssr = Cssr.from_file(filename)
self.assertIsInstance(cssr.structure, Structure)
if __name__ == "__main__":
unittest.main()
| sonium0/pymatgen | pymatgen/io/tests/test_cssr.py | Python | mit | 1,887 | [
"VASP",
"pymatgen"
] | d9b5032f944bb7a8ae8ea20115b09b9fac5b232626dbfea3245fd4293d58a1fb |
"""
cclib (http://cclib.sf.net) is (c) 2006, the cclib development team
and licensed under the LGPL (http://www.gnu.org/copyleft/lgpl.html).
"""
__revision__ = "$Revision: 631 $"
from density import Density
from cspa import CSPA
from mpa import MPA
from lpa import LPA
from opa import OPA
from mbo import MBO
from fragments import FragmentAnalysis
from cda import CDA
| keceli/RMG-Java | source/cclib/method/__init__.py | Python | mit | 370 | [
"cclib"
] | 440e2ca517cc7f2e4612a35e9958a6623b00cb32a005b243e58d13d66e635dec |
#!/usr/bin/env python
"cluster across samples using vsearch or from bam files and bedtools"
# py2/3 compatible
from __future__ import print_function
try:
from builtins import range
from itertools import izip, chain
except ImportError:
from itertools import chain
izip = zip
import os
import pty
import gzip
import glob
import time
import shutil
import random
import select
import socket
import subprocess as sps
import numpy as np
from pysam import AlignmentFile, FastaFile
import ipyrad
from .utils import IPyradError, fullcomp, chroms2ints
class Step6:
def __init__(self, data, force, ipyclient):
self.data = data
self.randomseed = int(self.data.hackersonly.random_seed)
self.isref = bool('ref' in self.data.params.assembly_method)
self.force = force
self.ipyclient = ipyclient
self.print_headers()
self.samples = self.get_subsamples()
self.setup_dirs(force)
# groups/threading information for hierarchical clustering
# ----- DISABLED FOR NOW -------------
# self.cgroups = {}
# self.assign_groups()
# self.hostd = {}
# self.tune_hierarchical_threading()
# NEW CODE TO OVERRIDE HIERARCH CLUSTERING
self.cgroups = {
0: self.samples,
}
self.data.ncpus = len(self.ipyclient.ids)
self.nthreads = len(self.ipyclient.ids)
self.lbview = self.ipyclient.load_balanced_view()
self.thview = self.ipyclient.load_balanced_view()
def print_headers(self):
if self.data._cli:
self.data._print(
"\n{}Step 6: Clustering/Mapping across samples "
.format(self.data._spacer)
)
def setup_dirs(self, force=False):
"set up across and tmpalign dirs and init h5 database file"
self.data.dirs.across = os.path.realpath(os.path.join(
self.data.params.project_dir,
"{}_across".format(self.data.name)))
self.data.tmpdir = os.path.join(
self.data.dirs.across,
"{}-tmpalign".format(self.data.name))
self.data.clust_database = os.path.join(
self.data.dirs.across,
self.data.name + "_clust_database.fa")
# clear out
if force:
odir = self.data.dirs.across
if os.path.exists(odir):
shutil.rmtree(odir)
# make dirs
if not os.path.exists(self.data.dirs.across):
os.mkdir(self.data.dirs.across)
if not os.path.exists(self.data.tmpdir):
os.mkdir(self.data.tmpdir)
def get_subsamples(self):
"Apply state, ncluster, and force filters to select samples"
# bail out if no samples ready
if not hasattr(self.data.stats, "state"):
raise IPyradError("No samples ready for step 6")
# filter samples by state
state4 = self.data.stats.index[self.data.stats.state < 5]
state5 = self.data.stats.index[self.data.stats.state == 5]
state6 = self.data.stats.index[self.data.stats.state > 5]
# tell user which samples are not ready for step5
if state4.any():
print("skipping samples not in state==5:\n{}"
.format(state4.tolist()))
if self.force:
# run all samples above state 4
subs = self.data.stats.index[self.data.stats.state > 4]
subsamples = [self.data.samples[i] for i in subs]
else:
# tell user which samples have already completed step 6
if state6.any():
raise IPyradError(
"Some samples are already in state==6. If you wish to \n" \
+ " create a new database for across sample comparisons \n" \
+ " use the force=True (-f) argument.")
# run all samples in state 5
subsamples = [self.data.samples[i] for i in state5]
# check that kept samples have clusters
checked_samples = []
for sample in subsamples:
if sample.stats.reads_consens:
checked_samples.append(sample)
else:
print("skipping {}; no consensus reads found.")
if not any(checked_samples):
raise IPyradError("no samples ready for step 6")
# sort samples so the largest is first
checked_samples.sort(
key=lambda x: x.stats.reads_consens,
reverse=True,
)
return checked_samples
def assign_groups(self):
"assign samples to groups if not user provided for hierarchical clust"
# to hold group ints mapping to list of sample objects
# {0: [a, b, c], 1: [d, e, f]}
self.cgroups = {}
# use population info to split samples into groups; or assign random
if self.data.populations:
self.cgroups = {}
for idx, val in enumerate(self.data.populations.values()):
self.cgroups[idx] = [self.data.samples[x] for x in val[1]]
# by default let's split taxa into groups of 20-50 samples at a time
else:
# calculate the number of cluster1 jobs to perform:
if len(self.samples) <= 100:
groupsize = 20
elif len(self.samples) <= 500:
groupsize = 50
else:
groupsize = 100
# split samples evenly into groups
alls = self.samples
nalls = len(self.samples)
ngroups = int(np.ceil(nalls / groupsize))
gsize = int(np.ceil(nalls / ngroups))
idx = 0
for samps in range(0, nalls, gsize):
self.cgroups[idx] = alls[samps: samps + gsize]
idx += 1
def tune_hierarchical_threading(self):
"tune threads for across-sample clustering used in denovo assemblies"
# get engine data, skips busy engines.
hosts = {}
for eid in self.ipyclient.ids:
engine = self.ipyclient[eid]
if not engine.outstanding:
hosts[eid] = engine.apply(socket.gethostname)
# get targets on each hostname for spreading jobs out.
self.ipyclient.wait()
hosts = [(eid, i.get()) for (eid, i) in hosts.items()]
hostnames = set([i[1] for i in hosts])
self.hostd = {x: [i[0] for i in hosts if i[1] in x] for x in hostnames}
# calculate the theading of cluster1 jobs:
self.data.ncpus = len(self.ipyclient.ids)
njobs = len(self.cgroups)
nnodes = len(self.hostd)
# how to load-balance cluster2 jobs
# maxthreads = 8 cuz vsearch isn't v efficient above that.
## e.g., 24 cpus; do 2 12-threaded jobs
## e.g., 2 nodes; 40 cpus; do 2 20-threaded jobs or 4 10-threaded jobs
## e.g., 4 nodes; 80 cpus; do 8 10-threaded jobs
if nnodes == 1:
thr = np.floor(self.data.ncpus / njobs).astype(int)
eids = max(1, thr)
eids = max(eids, len(list(self.hostd.values())[0]))
else:
eids = []
for node in self.hostd:
sids = self.hostd[node]
nids = len(sids)
thr = np.floor(nids / (njobs / nnodes)).astype(int)
thr = max(1, thr)
thr = min(thr, nids)
eids.extend(self.hostd[node][::thr])
# set nthreads based on ipcluster dict (default is 2)
#if "threads" in self.data.ipcluster.keys():
# self.nthreads = int(self.data.ipcluster["threads"])
self.nthreads = 2
if self.data.ncpus > 4:
self.nthreads = int(np.floor(
self.data.ncpus) / len(self.cgroups))
eids = self.ipyclient.ids[::self.nthreads]
# create load-balancers
self.lbview = self.ipyclient.load_balanced_view()
self.thview = self.ipyclient.load_balanced_view(targets=eids)
def run(self):
# DENOVO
if self.data.params.assembly_method == "denovo":
# prepare clustering inputs for hierarchical clustering
self.remote_build_concats_tier1()
# if multiple clusters:
if len(self.cgroups.keys()) == 1:
self.remote_cluster_tiers(0)
else:
# send initial clustering jobs (track finished jobs)
self.remote_cluster1()
# prepare second tier inputs
self.remote_build_concats_tier2()
# send cluster2 job (track actual progress)
self.remote_cluster_tiers('x')
# build clusters
self.remote_build_denovo_clusters()
# align denovo clusters
self.remote_align_denovo_clusters()
# concat aligned files
self.concat_alignments()
elif self.data.params.assembly_method == "reference":
# prepare bamfiles (merge and sort)
self.remote_concat_bams()
# get extents of regions using bedtools merge
self.remote_build_ref_regions()
# build clusters from regions
self.remote_build_ref_clusters()
# concat aligned files (This is not necessary, chunk again in s7)
self.concat_alignments()
# clean up step here...
self.data.stats_files.s6 = self.data.clust_database
# set sample states
for sample in self.samples:
sample.stats.state = 6
def remote_build_concats_tier1(self):
"prepares concatenated consens input files for each clust1 group"
start = time.time()
printstr = ("concatenating inputs", "s6")
rasyncs = {}
for jobid, group in self.cgroups.items():
# should we use sample objects or sample names in cgroups?
# Well you gotta choose one! W/o pops file it uses sample objects
# so I made it use sample objects if pop_assign_file is set iao
samples = [i for i in self.samples if i in group]
args = (self.data, jobid, samples, self.randomseed)
rasyncs[jobid] = self.lbview.apply(build_concat_files, *args)
while 1:
ready = [rasyncs[i].ready() for i in rasyncs]
self.data._progressbar(len(ready), sum(ready), start, printstr)
time.sleep(0.5)
if len(ready) == sum(ready):
break
# check for errors
self.data._print("")
for job in rasyncs:
if not rasyncs[job].successful():
rasyncs[job].get()
def remote_cluster1(self):
"send threaded jobs to remote engines"
start = time.time()
printstr = ("clustering tier 1 ", "s6")
rasyncs = {}
for jobid in self.cgroups:
args = (self.data, jobid, self.nthreads)
rasyncs[jobid] = self.thview.apply(cluster, *args)
while 1:
ready = [rasyncs[i].ready() for i in rasyncs]
self.data._progressbar(len(ready), sum(ready), start, printstr)
time.sleep(0.5)
if len(ready) == sum(ready):
break
# check for errors
self.data._print("")
for job in rasyncs:
if not rasyncs[job].successful():
rasyncs[job].get()
def remote_build_concats_tier2(self):
start = time.time()
printstr = ("concatenating inputs", "s6")
args = (self.data, list(self.cgroups.keys()), self.randomseed)
rasync = self.lbview.apply(build_concat_two, *args)
while 1:
ready = rasync.ready()
self.data._progressbar(int(ready), 1, start, printstr)
time.sleep(0.5)
if ready:
break
# check for errors
rasync.wait()
self.data._print("")
if not rasync.successful():
rasync.get()
def remote_cluster_tiers(self, jobid):
start = time.time()
printstr = ("clustering across ", "s6")
# nthreads=0 defaults to using all cores
args = (self.data, jobid, 0, True)
rasync = self.thview.apply(cluster, *args)
prog = 0
while 1:
time.sleep(0.5)
if rasync.stdout:
prog = int(rasync.stdout.split()[-1])
self.data._progressbar(100, int(prog), start, printstr)
if prog == 100:
print("")
break
# check for errors
self.ipyclient.wait()
if not rasync.successful():
rasync.get()
def remote_build_denovo_clusters(self):
"build denovo clusters from vsearch clustered seeds"
# filehandles; if not multiple tiers then 'x' is jobid 0
uhandle = os.path.join(
self.data.dirs.across,
"{}-x.utemp".format(self.data.name))
buildfunc = build_hierarchical_denovo_clusters
if not os.path.exists(uhandle):
uhandle = uhandle.replace("-x.utemp", "-0.utemp")
buildfunc = build_single_denovo_clusters
usort = uhandle + ".sort"
# sort utemp files, count seeds.
start = time.time()
printstr = ("building clusters ", "s6")
async1 = self.lbview.apply(sort_seeds, uhandle)
while 1:
ready = [async1.ready()]
self.data._progressbar(3, sum(ready), start, printstr)
time.sleep(0.1)
if all(ready):
break
async2 = self.lbview.apply(count_seeds, usort)
while 1:
ready = [async1.ready(), async2.ready()]
self.data._progressbar(3, sum(ready), start, printstr)
time.sleep(0.1)
if all(ready):
break
nseeds = async2.get()
# send the clust bit building job to work and track progress
async3 = self.lbview.apply(
buildfunc, *(self.data, usort, nseeds, list(self.cgroups.keys())))
while 1:
ready = [async1.ready(), async2.ready(), async3.ready()]
self.data._progressbar(3, sum(ready), start, printstr)
time.sleep(0.1)
if all(ready):
break
self.data._print("")
# check for errors
for job in [async1, async2, async3]:
if not job.successful():
job.get()
def remote_align_denovo_clusters(self):
"""
Distributes parallel jobs to align_to_array() function.
"""
# get files
globpath = os.path.join(self.data.tmpdir, self.data.name + ".chunk_*")
clustbits = glob.glob(globpath)
# submit jobs to engines
start = time.time()
printstr = ("aligning clusters ", "s6")
jobs = {}
for idx, _ in enumerate(clustbits):
args = [self.data, self.samples, clustbits[idx]]
jobs[idx] = self.lbview.apply(align_to_array, *args)
allwait = len(jobs)
# print progress while bits are aligning
while 1:
finished = [i.ready() for i in jobs.values()]
fwait = sum(finished)
self.data._progressbar(allwait, fwait, start, printstr)
time.sleep(0.4)
if all(finished):
break
# check for errors in muscle_align_across
keys = list(jobs.keys())
for idx in keys:
if not jobs[idx].successful():
jobs[idx].get()
del jobs[idx]
self.data._print("")
def concat_alignments(self):
"""
This step is not necessary... we just chunk it up again in step 7...
it's nice having a file as a product, but why bother...
It creates a header with names of all samples that were present when
step 6 was completed.
"""
# get files
globlist = glob.glob(os.path.join(self.data.tmpdir, "aligned_*.fa"))
clustbits = sorted(
globlist,
key=lambda x: int(x.rsplit("_", 1)[1].split(".")[0]),
)
# store path to clust database
self.data.clust_database = os.path.join(
self.data.dirs.across,
self.data.name + "_clust_database.fa")
# TODO: count nsnps and save it to the JSON for step 7
# TODO: use cat to concatenate chunks
# TODO: with cat be sure empty chunks don't cause problems.
# write clusters to file with a header that has all samples in db
snames = sorted([i.name for i in self.samples])
with open(self.data.clust_database, 'wt') as out:
out.write("#{}\n".format(",@".join(snames)))
for clustfile in clustbits:
with open(clustfile, 'r') as indata:
dat = indata.read()
if dat:
out.write(dat) # + "//\n//\n")
# final cleanup
if os.path.exists(self.data.tmpdir):
shutil.rmtree(self.data.tmpdir)
## REFERENCE BASED FUNCTIONS ---------------------------------
def remote_concat_bams(self):
"merge bam files into a single large sorted indexed bam"
start = time.time()
printstr = ("concatenating bams ", "s6")
catbam = os.path.join(
self.data.dirs.across,
"{}.cat.bam".format(self.data.name)
)
# concatenate consens bamfiles for all samples in this assembly
cmd1 = [
ipyrad.bins.samtools,
"merge",
"-f",
catbam,
]
# Use the sample.files.consens info, rather than data.dirs to allow
# for merging assemblies after step 5 where data.dirs is invalid/empty.
for sample in self.samples:
cmd1.append(sample.files.consens)
proc = sps.Popen(cmd1, stderr=sps.STDOUT, stdout=sps.PIPE)
# progress bar
while not proc.poll() == 0:
self.data._progressbar(3, 0, start, printstr)
time.sleep(0.1)
# parse result
err = proc.communicate()[0].decode()
if proc.returncode:
raise IPyradError(
"error in: {}: {}".format(" ".join(cmd1), err))
# sort the bam file
cmd2 = [
ipyrad.bins.samtools,
"sort",
"-T",
catbam + '.tmp',
"-o",
os.path.join(
self.data.dirs.across,
"{}.cat.sorted.bam".format(self.data.name)
),
catbam,
]
proc = sps.Popen(cmd2, stderr=sps.STDOUT, stdout=sps.PIPE)
# progress bar
while not proc.poll() == 0:
self.data._progressbar(3, 1, start, printstr)
time.sleep(0.1)
# parse result
err = proc.communicate()[0].decode()
if proc.returncode:
raise IPyradError(
"error in: {}: {}".format(" ".join(cmd2), err))
os.remove(catbam)
try:
# index the bam file
cmd3 = [
ipyrad.bins.samtools,
"index",
"-c",
os.path.join(
self.data.dirs.across,
"{}.cat.sorted.bam".format(self.data.name)
),
]
proc = sps.Popen(cmd3, stderr=sps.STDOUT, stdout=sps.PIPE)
# progress bar
while not proc.poll() == 0:
self.data._progressbar(3, 2, start, printstr)
time.sleep(0.1)
# parse result
err = proc.communicate()[0].decode()
if proc.returncode:
raise IPyradError(
"error in: {}: {}".format(" ".join(cmd3), err))
except IPyradError as ipyerror:
# For bam files with large chromosomes (>~500Mb) the .bai indexing
# will fail with this exit message. Try again with .csi indexing.
# https://github.com/dereneaton/ipyrad/issues/435
# Will keep bai as default because this has never come up before
# but it doesn't hurt as a fallback.
if not "hts_idx_check_range" in str(ipyerror):
raise ipyerror
# index the bam file
cmd3 = [
ipyrad.bins.samtools,
"index", "-c",
os.path.join(
self.data.dirs.across,
"{}.cat.sorted.bam".format(self.data.name)
),
]
proc = sps.Popen(cmd3, stderr=sps.STDOUT, stdout=sps.PIPE)
# progress bar
while not proc.poll() == 0:
self.data._progressbar(3, 2, start, printstr)
time.sleep(0.1)
# parse result
err = proc.communicate()[0].decode()
if proc.returncode:
raise IPyradError(
"error in: {}: {}".format(" ".join(cmd3), err))
self.data._progressbar(3, 3, start, printstr)
self.data._print("")
def remote_build_ref_regions(self):
"call bedtools remotely and track progress"
start = time.time()
printstr = ("fetching regions ", "s6")
rasync = self.ipyclient[0].apply(build_ref_regions, self.data)
while 1:
done = rasync.ready()
self.data._progressbar(1, int(done), start, printstr)
time.sleep(0.1)
if done:
break
self.data._print("")
self.regions = rasync.get()
def remote_build_ref_clusters(self):
"build clusters and find variants/indels to store"
# send N jobs each taking chunk of regions
ncpus = self.data.ncpus
nloci = len(self.regions)
optim = int((nloci // ncpus) + (nloci % ncpus))
optim = int(np.ceil(optim / 2))
# send jobs to func
start = time.time()
printstr = ("building database ", "s6")
jobs = {}
for idx, chunk in enumerate(range(0, nloci, optim)):
region = self.regions[chunk: chunk + optim]
if region:
args = (self.data, idx, region)
jobs[idx] = self.lbview.apply(build_ref_clusters, *args)
# print progress while bits are aligning
allwait = len(jobs)
while 1:
finished = [i.ready() for i in jobs.values()]
fwait = sum(finished)
self.data._progressbar(allwait, fwait, start, printstr)
time.sleep(0.4)
if all(finished):
break
# check success
for idx in jobs:
if not jobs[idx].successful():
jobs[idx].get()
self.data._print("")
def resolve_duplicates(keys, arr):
"""
Tries to join together duplicate consens reads that were not previously
collapsed, likely because there was no overlap of the sequences for one
or more samples, but there was for others. Joins two consens reads if the
"""
newkeys = []
snames = np.array([i.rsplit(":", 2)[0].rsplit("_", 1)[0] for i in keys])
newarr = np.zeros((len(set(snames)) + 1, arr.shape[1]), dtype="S1")
# put reference into arr
newarr[0] = arr[0]
# fill rest while merging dups
nidx = 1
seen = set()
for sidx, key in enumerate(keys):
sname = snames[sidx]
if sname not in seen:
# add to list of seen names
seen.add(sname)
# get all rows of data for this sname (+1 b/c ref)
didx = np.where(snames == sname)[0] + 1
if didx.size > 1:
iarr = arr[didx, :].view(np.uint8)
iarr[iarr == 78] = 0
iarr[iarr == 45] = 0
if np.all(np.any(iarr == 0, axis=0)):
newarr[nidx] = iarr.max(axis=0).view("S1")
else:
raise IPyradError("duplicate could not be resolved")
# store key with reference to all dups
ikeys = [keys[i - 1] for i in didx]
fidxs = ";".join([i.rsplit("_", 1)[-1] for i in ikeys])
newkeys.append("{}_{}".format(sname, fidxs))
else:
# store array data and orig key
newarr[nidx] = arr[didx]
newkeys.append(keys[sidx])
nidx += 1
# fill terminal edges with N again since array can increase
newarr[newarr == b""] = b"N"
return newkeys, newarr
def build_ref_regions(data):
"use bedtools to pull in consens reads overlapping some region of ref"
cmd1 = [
ipyrad.bins.bedtools,
"bamtobed",
"-i",
os.path.join(
data.dirs.across,
"{}.cat.sorted.bam".format(data.name)
)
]
cmd2 = [
ipyrad.bins.bedtools,
"merge",
"-d", "0",
"-i", "-",
]
proc1 = sps.Popen(cmd1, stderr=sps.STDOUT, stdout=sps.PIPE)
proc2 = sps.Popen(
cmd2,
stdin=proc1.stdout,
stderr=sps.STDOUT,
stdout=sps.PIPE,
)
result = proc2.communicate()[0].decode()
if proc2.returncode:
raise IPyradError(
"error in {}: {}".format(" ".join(cmd2), result))
regs = [i.split("\t") for i in result.strip().split("\n")]
return [(i, int(j), int(k)) for i, j, k in regs]
def build_ref_clusters(data, idx, iregion):
"""
Given a chunk of regions this will pull in the reference for each region
and then pull in all consens reads matching to that region. It uses cigar
info to align the consens reads with the ref. This also merges consens
from the same sample that were not merged earlier, which is why we expect
no duplicate samples in the output of reference assemblies.
"""
# prepare i/o for bamfile with mapped reads
bamfile = AlignmentFile(
os.path.join(
data.dirs.across,
"{}.cat.sorted.bam".format(data.name)),
'rb')
# dict to map chromosome names to integers
faidict = chroms2ints(data, False)
# prepare i/o for pysam reference indexed
reffai = FastaFile(data.params.reference_sequence)
# store path to cluster bit
outbit = os.path.join(data.tmpdir, "aligned_{}.fa".format(idx))
# get clusters
iregions = iter(iregion)
clusts = []
while 1:
# pull in all consens reads mapping to a bed region
try:
region = next(iregions)
reads = bamfile.fetch(*region)
except StopIteration:
break
# build a dict to reference seqs and cigars by name
mstart = 9e12
mend = 0
rdict = {}
for read in reads:
rstart = read.reference_start
rend = rstart + read.qlen
mstart = min(mstart, rstart)
mend = max(mend, rend)
rdict[read.qname] = (read.seq, read.cigar, rstart, rend)
keys = sorted(rdict.keys(), key=lambda x: x.rsplit(":", 2)[0])
# pull in the reference for this region (1-indexed)
refs = reffai.fetch(region[0], mstart, mend)
# make empty array
rlen = mend - mstart
arr = np.zeros((len(keys) + 1, rlen), dtype=bytes)
arr[0] = list(refs.upper())
# fill arr with remaining samples
for idx, key in enumerate(keys):
seq, cigar, start, end = rdict[key]
# how far ahead of ref start and short of ref end is this read
fidx = start - mstart
eidx = arr.shape[1] - (mend - end)
# enter into the array, trim end if longer than pulled ref
arr[idx + 1, fidx:eidx] = list(seq)[:eidx - fidx]
# mod sequence according to cigar for indels and ambigs
# csums is the location of impute on the seq, so it must be
# incremented by fidx and not extend past eidx
for cidx, cig in enumerate(cigar):
if cig[0] == 4:
csums = sum(i[1] for i in cigar[:cidx])
csums += eidx
if csums < fidx:
arr[idx + 1, csums] = arr[idx + 1, csums].lower()
if cig[0] == 1:
csums = sum(i[1] for i in cigar[:cidx])
csums += eidx
if csums < fidx:
arr[idx + 1, csums] = b"-"
# fill terminal edges with N
arr[arr == b""] = b"N"
# duplicates merge here (only perfect merge on all Ns) and reshape
# the array to match. This will need to be resolved in catgs...
# if it does not merge then
try:
keys, arr = resolve_duplicates(keys, arr)
except IPyradError:
pass
# get consens seq and variant site index
clust = [">reference_{}:{}:{}-{}\n{}".format(
0,
faidict[region[0]] + 1, mstart + 1, mend + 1, # 1-indexed
b"".join(arr[0]).decode()
)]
for idx, key in enumerate(keys):
clust.append(
">{}\n{}".format(key, b"".join(arr[idx + 1]).decode())
)
clusts.append("\n".join(clust))
# dump to temp file until concat in next step.
with open(outbit, 'w') as outfile:
if clusts:
outfile.write("\n//\n//\n".join(clusts) + "\n//\n//\n")
def build_concat_two(data, jobids, randomseed):
seeds = [
os.path.join(
data.dirs.across,
"{}-{}.htemp".format(data.name, jobid)) for jobid in jobids
]
allseeds = os.path.join(
data.dirs.across,
"{}-x-catshuf.fa".format(data.name))
cmd1 = ['cat'] + seeds
cmd2 = [
ipyrad.bins.vsearch,
'--sortbylength', '-',
'--fasta_width', '0',
'--output', allseeds,
]
proc1 = sps.Popen(cmd1, stdout=sps.PIPE, close_fds=True)
proc2 = sps.Popen(cmd2, stdin=proc1.stdout, stdout=sps.PIPE, close_fds=True)
proc2.communicate()
proc1.stdout.close()
def build_concat_files(data, jobid, samples, randomseed):
"""
[This is returnn on an ipengine]
Make a concatenated consens file with sampled alleles (no RSWYMK/rswymk).
Orders reads by length and shuffles randomly within length classes
"""
conshandles = [
sample.files.consens for sample in samples if
sample.stats.reads_consens]
conshandles.sort()
assert conshandles, "no consensus files found"
## concatenate all of the gzipped consens files
cmd = ['cat'] + conshandles
groupcons = os.path.join(
data.dirs.across,
"{}-{}-catcons.gz".format(data.name, jobid))
with open(groupcons, 'w') as output:
call = sps.Popen(cmd, stdout=output, close_fds=True)
call.communicate()
## a string of sed substitutions for temporarily replacing hetero sites
## skips lines with '>', so it doesn't affect taxon names
subs = ["/>/!s/W/A/g", "/>/!s/w/A/g", "/>/!s/R/A/g", "/>/!s/r/A/g",
"/>/!s/M/A/g", "/>/!s/m/A/g", "/>/!s/K/T/g", "/>/!s/k/T/g",
"/>/!s/S/C/g", "/>/!s/s/C/g", "/>/!s/Y/C/g", "/>/!s/y/C/g"]
subs = ";".join(subs)
## impute pseudo-haplo information to avoid mismatch at hetero sites
## the read data with hetero sites is put back into clustered data later.
## pipe passed data from gunzip to sed.
cmd1 = ["gunzip", "-c", groupcons]
cmd2 = ["sed", subs]
proc1 = sps.Popen(cmd1, stdout=sps.PIPE, close_fds=True)
allhaps = groupcons.replace("-catcons.gz", "-cathaps.fa")
with open(allhaps, 'w') as output:
proc2 = sps.Popen(cmd2, stdin=proc1.stdout, stdout=output, close_fds=True)
proc2.communicate()
proc1.stdout.close()
## now sort the file using vsearch
allsort = groupcons.replace("-catcons.gz", "-catsort.fa")
cmd1 = [ipyrad.bins.vsearch,
"--sortbylength", allhaps,
"--fasta_width", "0",
"--output", allsort]
proc1 = sps.Popen(cmd1, close_fds=True)
proc1.communicate()
## shuffle sequences within size classes. Tested seed (8/31/2016)
## shuffling works repeatably with seed.
random.seed(randomseed)
## open an iterator to lengthsorted file and grab two lines at at time
allshuf = groupcons.replace("-catcons.gz", "-catshuf.fa")
outdat = open(allshuf, 'wt')
indat = open(allsort, 'r')
idat = izip(iter(indat), iter(indat))
done = 0
chunk = [next(idat)]
while not done:
## grab 2-lines until they become shorter (unless there's only one)
oldlen = len(chunk[-1][-1])
while 1:
try:
dat = next(idat)
except StopIteration:
done = 1
break
if len(dat[-1]) == oldlen:
chunk.append(dat)
else:
## send the last chunk off to be processed
random.shuffle(chunk)
outdat.write("".join(chain(*chunk)))
## start new chunk
chunk = [dat]
break
## do the last chunk
random.shuffle(chunk)
outdat.write("".join(chain(*chunk)))
indat.close()
outdat.close()
def cluster(data, jobid, nthreads, print_progress=False):
# get files for this jobid
catshuf = os.path.join(
data.dirs.across,
"{}-{}-catshuf.fa".format(data.name, jobid))
uhaplos = os.path.join(
data.dirs.across,
"{}-{}.utemp".format(data.name, jobid))
hhaplos = os.path.join(
data.dirs.across,
"{}-{}.htemp".format(data.name, jobid))
## parameters that vary by datatype
## (too low of cov values yield too many poor alignments)
strand = "plus"
cov = 0.5 # 0.90
if data.params.datatype in ["gbs", "2brad"]:
strand = "both"
cov = 0.60
elif data.params.datatype == "pairgbs":
strand = "both"
cov = 0.75 # 0.90
cmd = [ipyrad.bins.vsearch,
"-cluster_smallmem", catshuf,
"-strand", strand,
"-query_cov", str(cov),
"-minsl", str(0.5),
"-id", str(data.params.clust_threshold),
"-userout", uhaplos,
"-notmatched", hhaplos,
"-userfields", "query+target+qstrand",
"-maxaccepts", "1",
"-maxrejects", "0",
"-fasta_width", "0",
"--minseqlength", str(data.params.filter_min_trim_len),
"-threads", str(nthreads), # "0",
"-fulldp",
"-usersort",
]
if not print_progress:
proc = sps.Popen(cmd, stderr=sps.STDOUT, stdout=sps.PIPE)
out = proc.communicate()
if proc.returncode:
raise IPyradError(out)
else:
(worker, boss) = pty.openpty()
proc = sps.Popen(cmd, stdout=boss, stderr=boss, close_fds=True)
prog = 0
newprog = 0
while 1:
isdat = select.select([worker], [], [], 0)
if isdat[0]:
dat = os.read(worker, 80192).decode()
else:
dat = ""
if "Clustering" in dat:
try:
newprog = int(dat.split()[-1][:-1])
# may raise value error when it gets to the end
except ValueError:
pass
# print progress (do not remove print statement, stdout is parsed)
if newprog != prog:
print(int(newprog))
prog = newprog
time.sleep(0.1)
# break if done
# catches end chunk of printing if clustering went really fast
if "Clusters:" in dat:
break
# another catcher to let vsearch cleanup after clustering is done
proc.wait()
print(100)
def count_seeds(uhandle):
"uses bash commands to quickly count N seeds from utemp file"
with open(uhandle, 'r') as insort:
cmd1 = ["cut", "-f", "2"]
cmd2 = ["uniq"]
cmd3 = ["wc"]
proc1 = sps.Popen(cmd1, stdin=insort, stdout=sps.PIPE, close_fds=True)
proc2 = sps.Popen(cmd2, stdin=proc1.stdout, stdout=sps.PIPE, close_fds=True)
proc3 = sps.Popen(cmd3, stdin=proc2.stdout, stdout=sps.PIPE, close_fds=True)
res = proc3.communicate()
nseeds = int(res[0].split()[0])
proc1.stdout.close()
proc2.stdout.close()
proc3.stdout.close()
return nseeds
def sort_seeds(uhandle):
"sort seeds from cluster results"
cmd = ["sort", "-k", "2", uhandle, "-o", uhandle + ".sort"]
proc = sps.Popen(cmd, close_fds=True)
proc.communicate()
def build_single_denovo_clusters(data, usort, nseeds, *args):
"use this function when not hierarchical clustering"
# load all concat fasta files into a dictionary (memory concerns here...)
conshandle = os.path.join(
data.dirs.across,
"{}-0-catcons.gz".format(data.name),
)
allcons = {}
with gzip.open(conshandle, 'rt') as iocons:
cons = izip(*[iter(iocons)] * 2)
for namestr, seq in cons:
nnn, sss = [i.strip() for i in (namestr, seq)]
allcons[nnn[1:]] = sss
# load all utemp files into a dictionary
usortfile = os.path.join(
data.dirs.across,
"{}-0.utemp.sort".format(data.name)
)
# set optim to approximately 4 chunks per core. Smaller allows for a bit
# cleaner looking progress bar. 40 cores will make 160 files.
# This often does not work as intended. iao 10/26/19
# optim = ((nseeds // (data.ncpus * 4)) + (nseeds % (data.ncpus * 4)))
optim = np.ceil(nseeds / (data.ncpus * 4))
# iterate through usort grabbing seeds and matches
with open(usortfile, 'rt') as insort:
# iterator, seed null, and seqlist null
isort = iter(insort)
loci = 0
lastseed = 0
fseqs = []
seqlist = []
seqsize = 0
while 1:
try:
hit, seed, ori = next(isort).strip().split()
except StopIteration:
break
# store hit if still matching to same seed
if seed == lastseed:
if ori == "-":
seq = fullcomp(allcons[hit])[::-1]
else:
seq = allcons[hit]
fseqs.append(">{}\n{}".format(hit, seq))
# store seed and hit (to a new cluster) if new seed.
else:
# store the last fseq, count it, and clear it
if fseqs:
seqlist.append("\n".join(fseqs))
seqsize += 1
fseqs = []
# occasionally write to file
if seqsize >= optim:
if seqlist:
loci += seqsize
pathname = os.path.join(
data.tmpdir,
"{}.chunk_{}".format(data.name, loci))
with open(pathname, 'wt') as clustout:
clustout.write(
"\n//\n//\n".join(seqlist) + "\n//\n//\n")
# reset counter and list
seqlist = []
seqsize = 0
# store the new seed on top of fseqs
fseqs.append(">{}\n{}".format(seed, allcons[seed]))
lastseed = seed
# store the first hit to the seed
seq = allcons[hit]
if ori == "-":
seq = fullcomp(seq)[::-1]
fseqs.append(">{}\n{}".format(hit, seq))
# write whatever is left over to the clusts file
if fseqs:
seqlist.append("\n".join(fseqs))
seqsize += 1
loci += seqsize
if seqlist:
pathname = os.path.join(data.tmpdir,
data.name + ".chunk_{}".format(loci))
with open(pathname, 'wt') as clustsout:
clustsout.write("\n//\n//\n".join(seqlist) + "\n//\n//\n")
## final progress and cleanup
del allcons
def build_hierarchical_denovo_clusters(data, usort, nseeds, jobids):
"use this function when building clusters from hierarchical clusters"
# load all concat fasta files into a dictionary (memory concerns here...)
allcons = {}
conshandles = [
os.path.join(
data.dirs.across, "{}-{}-catcons.gz".format(data.name, jobid))
for jobid in jobids]
for conshandle in conshandles:
subcons = {}
with gzip.open(conshandle, 'rt') as iocons:
cons = izip(*[iter(iocons)] * 2)
for namestr, seq in cons:
nnn, sss = [i.strip() for i in (namestr, seq)]
subcons[nnn[1:]] = sss
allcons.update(subcons)
del subcons
# load all utemp files into a dictionary
subdict = {}
usortfiles = [
os.path.join(data.dirs.across, "{}-{}.utemp".format(data.name, jobid))
for jobid in jobids
]
for ufile in usortfiles:
with open(ufile, 'r') as inhits:
for line in inhits:
hit, seed, ori = line.strip().split()
if seed not in subdict:
subdict[seed] = [(hit, ori)]
else:
subdict[seed].append((hit, ori))
# set optim to approximately 4 chunks per core. Smaller allows for a bit
# cleaner looking progress bar. 40 cores will make 160 files.
optim = ((nseeds // (data.ncpus * 4)) + (nseeds % (data.ncpus * 4)))
# iterate through usort grabbing seeds and matches
insort = open(usort, 'rt')
isort = iter(insort)
# seed null, and seqlist null
loci = 0
lastseed = 0
fseqs = []
seqlist = []
seqsize = 0
while 1:
try:
hit, seed, ori = next(isort).strip().split()
except StopIteration:
break
# if same seed append match
if seed != lastseed:
# store the last fseq, count it, and clear it
if fseqs:
seqlist.append("\n".join(fseqs))
seqsize += 1
fseqs = []
# occasionally write to file
if seqsize >= optim:
if seqlist:
loci += seqsize
pathname = os.path.join(
data.tmpdir,
"{}.chunk_{}".format(data.name, loci))
with open(pathname, 'wt') as clustout:
clustout.write(
"\n//\n//\n".join(seqlist) + "\n//\n//\n")
# reset counter and list
seqlist = []
seqsize = 0
# store the new seed on top of fseqs
fseqs.append(">{}\n{}".format(seed, allcons[seed]))
lastseed = seed
# expand subhits to seed
uhits = subdict.get(seed)
if uhits:
for ahit, ori in uhits:
if ori == "-":
seq = fullcomp(allcons[ahit])[::-1]
else:
seq = allcons[ahit]
fseqs.append(">{}\n{}".format(ahit, seq))
# expand matches with subdict
hitseqs = [(hit, allcons[hit], ori)]
uhits = subdict.get(hit)
if uhits:
for hit in uhits:
hitseqs.append((hit[0], allcons[hit[0]], hit[1]))
# revcomp if orientation is reversed
for sname, seq, ori in hitseqs:
if ori == "-":
seq = fullcomp(seq)[::-1]
fseqs.append(">{}\n{}".format(sname, seq))
# close handle
insort.close()
## write whatever is left over to the clusts file
if fseqs:
seqlist.append("\n".join(fseqs))
seqsize += 1
loci += seqsize
if seqlist:
pathname = os.path.join(
data.tmpdir,
data.name + ".chunk_{}".format(loci))
with open(pathname, 'wt') as clustsout:
clustsout.write("\n//\n//\n".join(seqlist) + "\n//\n//\n")
## final progress and cleanup
del allcons
def align_to_array(data, samples, chunk):
"""
Opens a tmp clust chunk and iterates over align jobs.
"""
# data are already chunked, read in the whole thing
with open(chunk, 'rt') as infile:
clusts = infile.read().split("//\n//\n")[:-1]
# snames to ensure sorted order
samples.sort(key=lambda x: x.name)
# create a persistent shell for running muscle in.
proc = sps.Popen(["bash"], stdin=sps.PIPE, stdout=sps.PIPE, bufsize=0)
# iterate over clusters until finished
allstack = []
for ldx in range(len(clusts)):
istack = []
lines = clusts[ldx].strip().split("\n")
names = lines[::2]
seqs = lines[1::2]
# skip aligning and continue if duplicates present (locus too big)
# but reshape locs to be same lengths by adding --- to end, this
# simplifies handling them in step7 (they're still always filtered)
unames = set([i.rsplit("_", 1)[0] for i in names])
if len(unames) < len(names):
longname = max([len(i) for i in seqs])
seqs = [i.ljust(longname, "-") for i in seqs]
istack = [">{}\n{}".format(i[1:], j) for i, j in zip(names, seqs)]
allstack.append("\n".join(istack))
continue
# else locus looks good, align it.
# is there a paired-insert in any samples in the locus?
try:
# try to split cluster list at nnnn separator for each read
left = [i.split("nnnn")[0] for i in seqs]
right = [i.split("nnnn")[1] for i in seqs]
# align separately
istack1 = muscle_it(proc, names, left)
istack2 = muscle_it(proc, names, right)
# combine in order
for sdx in range(len(istack1)):
n1, s1 = istack1[sdx].split("\n")
s2 = istack2[sdx].split("\n")[-1]
istack.append(n1 + "\n" + s1 + "nnnn" + s2)
# no insert just align a single locus
except IndexError:
istack = muscle_it(proc, names, seqs)
# store the locus
if istack:
allstack.append("\n".join(istack))
# cleanup
proc.stdout.close()
if proc.stderr:
proc.stderr.close()
proc.stdin.close()
proc.wait()
# write to file when chunk is finished
odx = chunk.rsplit("_")[-1]
alignfile = os.path.join(data.tmpdir, "aligned_{}.fa".format(odx))
with open(alignfile, 'wt') as outfile:
outfile.write("\n//\n//\n".join(allstack) + "\n//\n//\n")
def muscle_it(proc, names, seqs):
"""
Align with muscle, ensure name order, and return as string
"""
istack = []
# append counter to names because muscle doesn't retain order
nnames = [">{};*{}".format(j[1:], i) for i, j in enumerate(names)]
# make back into strings
cl1 = "\n".join(["\n".join(i) for i in zip(nnames, seqs)])
# store allele (lowercase) info, returns mask with lowercases
amask, abool = store_alleles(seqs)
# send align1 to the bash shell (TODO: check for pipe-overflow)
cmd1 = ("echo -e '{}' | {} -quiet -in - ; echo {}"
.format(cl1, ipyrad.bins.muscle, "//\n"))
proc.stdin.write(cmd1.encode())
# read the stdout by line until splitter is reached
align1 = []
for line in iter(proc.stdout.readline, b'//\n'):
align1.append(line.decode())
# reorder b/c muscle doesn't keep order
lines = "".join(align1)[1:].split("\n>")
dalign1 = dict([i.split("\n", 1) for i in lines])
keys = sorted(
dalign1.keys(),
key=lambda x: int(x.rsplit("*")[-1])
)
seqarr = np.zeros(
(len(names), len(dalign1[keys[0]].replace("\n", ""))),
dtype='S1',
)
for kidx, key in enumerate(keys):
concatseq = dalign1[key].replace("\n", "")
seqarr[kidx] = list(concatseq)
# get alleles back using fast jit'd function.
if np.sum(amask):
intarr = seqarr.view(np.uint8)
iamask = retrieve_alleles_after_aligning(intarr, amask)
seqarr[iamask] = np.char.lower(seqarr[iamask])
# sort in sname (alphanumeric) order.
istack = []
wkeys = np.argsort([i.rsplit("_", 1)[0] for i in keys])
for widx in wkeys:
wname = names[widx]
istack.append(
"{}\n{}".format(wname, b"".join(seqarr[widx]).decode()))
return istack
def store_alleles(seqs):
"""
Returns a mask selecting columns with lower case calls, and
a boolean of whether or not any exist. This is used to put them
back into alignments after muscle destroys all this info during
alignment.
"""
# get shape of the array and new empty array
shape = (len(seqs), max([len(i) for i in seqs]))
arrseqs = np.zeros(shape, dtype=np.bytes_)
# iterate over rows
for row in range(arrseqs.shape[0]):
seqsrow = seqs[row]
arrseqs[row, :len(seqsrow)] = list(seqsrow)
# mask the lo...
amask = np.char.islower(arrseqs)
if np.any(amask):
return amask, True
else:
return amask, False
def retrieve_alleles_after_aligning(intarr, amask):
"""
Imputes lower case allele calls back into alignments
while taking account for spacing caused by insertions.
"""
newmask = np.zeros(intarr.shape, dtype=np.bool_)
for ridx in range(intarr.shape[0]):
iarr = intarr[ridx]
indidx = np.where(iarr == 45)[0]
# if no indels then simply use the existing mask
if not indidx.size:
newmask[ridx] = amask[ridx]
# if indels that impute
else:
allrows = np.arange(amask.shape[1])
mask = np.ones(allrows.shape[0], dtype=np.bool_)
for idx in indidx:
if idx < mask.shape[0]:
mask[idx] = False
not_idx = allrows[mask == 1]
# fill in new data into all other spots
newmask[ridx, not_idx] = amask[ridx, :not_idx.shape[0]]
return newmask
| dereneaton/ipyrad | ipyrad/assemble/clustmap_across.py | Python | gpl-3.0 | 50,414 | [
"pysam"
] | f94795073341355a49cf3b6d31a296c8ecd09b5f361fe6bce30b90018a80236d |
# -*- coding: utf-8 -*-
"""
Created on Mon Apr 25 15:50:57 2016
@author: Gaspar Pandy
"""
from build.management.commands.base_build import Command as BaseBuild
from django.db.models import Q
from protein.models import Protein, ProteinConformation, ProteinSequenceType, ProteinSource, ProteinState
from residue.models import Residue
from structure.models import Structure, PdbData, StructureType
from structure.sequence_parser import SequenceParser
from structure.functions import PdbChainSelector, PdbStateIdentifier, get_pdb_ids
from structure.management.commands.structure_yaml_editor import StructureYaml
from construct.functions import *
from common.models import WebResource, WebLink, Publication
from tools.management.commands.blast_recent_PDB import Command as BlastRecentPDB
import Bio.PDB as PDB
from datetime import datetime
import urllib
import re
import json
import os
import xmltodict
import yaml
import shlex
import subprocess
import pprint
import csv
structs_with_missing_x50 = ['5EM9', '5AER', '3BEF', '3LU9', '3HKI', '3HKJ', '1NRR', '1NRQ', '1NRP', '1NRO', '1NRN', '3QDZ', '2ZPK', '1YTV', '4JQI', '6NI2', '5YD3',
'5YD5', '5YD4', '5YY4', '6KVA', '6KVF', '4NUU', '4NUV', '6K3F', '1XWD', '4AY9', '4MQW', '2XWT', '3G04', '4KT1', '4QXE', '4QXF', '4UFR',
'4UFS', '4BSU', '4BSR', '4BSS', '4BST', '5II0', '6PFO', '6PGQ', '3AQF', '4RWF', '4RWG', '5V6Y', '6D1U', '6ZHO', '6ZIS', '6UMG', '6V2E',
'2XDG', '2QKH', '3C59', '3C5T', '3IOL', '5E94', '4ZGM', '5OTW', '2A83', '3CZF', '4ERS', '4LF3', '3L2J', '3H3G', '3C4M', '5EMB', '4Z8J',
'3N94', '3B3I', '3B6S', '3DTX', '3HCV', '5IB1', '5IB3', '5IB2', '5IB5', '5IB4', '5DEG', '5DEF', '1OF2', '1OGT', '2X57', '5AFB', '5CMN',
'6VHH', '2BO2', '2BOU', '2BOX', '4DLO', '5K5T', '5K5S', '5FBK', '5FBH', '4PAS', '4MR7', '4MR8', '4MQE', '4MQF', '4MS3', '4MS4', '4MRM',
'4MS1', '4MR9', '4F11', '4F12', '6M8R', '6OCP', '3KS9', '4XAQ', '4XAS', '5CNI', '5CNJ', '5KZN', '5KZQ', '3SM9', '4XAR', '5CNK', '5CNM',
'6B7H', '3LMK', '6N4Y', '6N4X', '6N50', '3MQ4', '5C5C', '6E5V', '6BSZ', '6BT5', '6C0B', '5BPB', '5BQC', '5UWG', '5BPQ', '5BQE', '6NE1',
'5CL1', '5CM4', '5URZ', '5URY', '6O39', '5URV', '4Z33', '6NE2', '6NE4', '6O3B', '6O3A', '5WBS', '5T44', '5UN6', '5UN5', '6NDZ', '5KZV',
'5KZY', '5KZZ', '7JQD', '2PUX', '2PV9', '6EXJ', '7JNZ', '7NW3', '4F8K', '1RY1', '2J28', '4UE5', '6O9I', '6O9H', '7ALO', '6SKA', '4DLQ',
'5OVP', '6SKE', '5FTT', '5FTU', '4YEB', '4RMK', '4RML', '6JBU', '6IDX', '5KVM', '6V55', '7NJZ', '3N96', '3N93', '3N95', '7D86', '7DO4']
class Command(BaseBuild):
def add_arguments(self, parser):
super(Command, self).add_arguments(parser=parser)
parser.add_argument('--classified', help="Use PDB's 'G protein-coupled receptors' classification", default=False,
action='store_true')
parser.add_argument('-r', help="Query specific receptor(s) with UniProt entry names", default=False, type=str, nargs='+')
def handle(self, *args, **options):
if options['verbosity'] in [0,1,2,3]:
self.verbose = True
else:
self.verbose = False
if options['classified']:
q = QueryPDBClassifiedGPCR()
q.new_xtals(self.verbose)
else:
if options['r']:
self.uniprots = self.fetch_accession_from_entryname(options['r'])
else:
self.uniprots = self.get_all_GPCR_uniprots()
self.yamls = self.get_all_yamls()
self.prepare_input(options['proc'], self.uniprots)
def main_func(self, positions, iteration, count, lock):
if not positions[1]:
uniprot_list = self.uniprots[positions[0]:]
else:
uniprot_list = self.uniprots[positions[0]:positions[1]]
q = QueryPDB(self.uniprots, self.yamls)
brp = BlastRecentPDB()
blast_pdbs = brp.run()
self.blast_uniprot_dict = {}
for b in blast_pdbs:
blast_uniprots = q.pdb_request_by_pdb(b, 'polymer_entity')
if not blast_uniprots:
self.yaml_list.append(b)
continue
for bu in blast_uniprots:
if bu not in self.blast_uniprot_dict:
self.blast_uniprot_dict[bu] = [b]
else:
self.blast_uniprot_dict[bu].append(b)
consider_list, error_list = [], []
print('{} number of receptors to check'.format(len(uniprot_list)))
# uniprot_list = ['P28223']
for uni in uniprot_list:
# print(uni)
q.new_xtals(uni)
for i in q.consider_list:
if i not in consider_list and i not in structs_with_missing_x50:
consider_list.append(i)
for i in q.error_list:
if i not in error_list:
error_list.append(i)
if self.verbose:
print('Missing from db: ', q.db_list)
print('Missing yamls: ', q.yaml_list)
print('Structures with missing x50s: {} structures {}'.format(len(consider_list), consider_list))
print('Structures with an error: {} structures {}'.format(len(error_list), error_list))
def fetch_accession_from_entryname(self, listof_entrynames):
return [i.accession for i in Protein.objects.filter(entry_name__in=listof_entrynames)]
def get_all_GPCR_uniprots(self):
try:
uniprots = [i.accession for i in Protein.objects.filter(accession__isnull=False).filter(family__slug__istartswith='00')]
if len(uniprots)<100:
raise Exception()
except:
uniprots = [i.split('.')[0] for i in os.listdir('/protwis/data/protwis/gpcr/protein_data/uniprot/')]
return uniprots
def get_all_yamls(self):
yamls = [i.split('.')[0] for i in os.listdir('/protwis/data/protwis/gpcr/structure_data/structures/')]
return yamls
class QueryPDB():
''' Queries PDB using GPCRdb protein and structure entries. If those are not available, it uses the structure and uniprot data folders.
'''
def __init__(self, uniprots, yamls):
self.exceptions = []
self.uniprots = uniprots
self.yamls = yamls
self.db_list, self.yaml_list = [], []
self.missing_x50_list = ['4KNG','3N7P','3N7R','3N7S','4HJ0','6DKJ','5OTT','5OTU','5OTV','5OTX','6GB1']
self.missing_x50_exceptions = ['6TPG','6TPJ']
self.consider_list, self.error_list = [], []
def new_xtals(self, uniprot):
''' List GPCR crystal structures missing from GPCRdb and the yaml files. Adds missing structures to DB.
'''
# structs = self.pdb_request_by_uniprot(uniprot)
structs = get_pdb_ids(uniprot)
try:
protein = Protein.objects.get(accession=uniprot)
except:
protein = None
try:
x50s = Residue.objects.filter(protein_conformation__protein=protein, generic_number__label__in=['1x50','2x50','3x50','4x50','5x50','6x50','7x50'])
except:
x50s = None
if uniprot in self.blast_uniprot_dict:
for i in self.blast_uniprot_dict[uniprot]:
if i not in structs:
if structs==['null']:
structs = [i]
else:
structs.append(i)
if structs!=['null']:
for s in structs:
# print(s)
missing_from_db, missing_yaml = False, False
try:
st_obj = Structure.objects.get(pdb_code__index=s)
except:
if s not in self.exceptions:
check = self.pdb_request_by_pdb(s, 'entry')
if check:
self.db_list.append(s)
missing_from_db = True
if s not in self.yamls and s not in self.exceptions:
if s not in self.db_list:
check = self.pdb_request_by_pdb(s, 'entry')
else:
check = True
if check:
self.yaml_list.append(s)
missing_yaml = True
if not missing_from_db:
continue
try:
pdb_data_dict = fetch_pdb_info(s, protein, new_xtal=True)
# pprint.pprint(pdb_data_dict)
exp_method = pdb_data_dict['experimental_method']
if exp_method=='Electron Microscopy':
st_type = StructureType.objects.get(slug='electron-microscopy')
elif exp_method=='X-ray diffraction':
st_type = StructureType.objects.get(slug='x-ray-diffraction')
elif exp_method=='Electron crystallography':
st_type = StructureType.objects.get(slug='electron-crystallography')
if 'deletions' in pdb_data_dict:
for d in pdb_data_dict['deletions']:
presentx50s = []
for x in x50s:
if not d['start']<x.sequence_number<d['end']:
presentx50s.append(x)
# Filter out ones without all 7 x50 positions present in the xtal
if len(presentx50s)!=7:
if s not in self.missing_x50_list:
self.consider_list.append(s)
if s not in self.missing_x50_exceptions:
try:
del self.db_list[self.db_list.index(s)]
missing_from_db = False
del self.yaml_list[self.yaml_list.index(s)]
except:
pass
if 'not_observed' in pdb_data_dict:
for no in pdb_data_dict['not_observed']:
presentx50s = []
for x in x50s:
if not no[0]<x.sequence_number<no[1]:
presentx50s.append(x)
if len(presentx50s)!=7:
if s not in self.missing_x50_list:
self.consider_list.append(s)
if s not in self.missing_x50_exceptions:
try:
del self.db_list[self.db_list.index(s)]
missing_from_db = False
del self.yaml_list[self.yaml_list.index(s)]
except:
pass
else:
print('Warning: no deletions in pdb info, check {}'.format(s))
continue
if missing_from_db:
pref_chain = ''
resolution = pdb_data_dict['resolution']
pdb_code, created = WebLink.objects.get_or_create(index=s, web_resource=WebResource.objects.get(slug='pdb'))
pdbl = PDB.PDBList()
pdbl.retrieve_pdb_file(s, pdir='./', file_format="pdb")
with open('./pdb{}.ent'.format(s).lower(),'r') as f:
lines = f.readlines()
pdb_file = ''
publication_date, pubmed, doi = '','',''
state = ProteinState.objects.get(slug='inactive')
new_prot, created = Protein.objects.get_or_create(entry_name=s.lower(), accession=None, name=s.lower(), sequence=pdb_data_dict['wt_seq'], family=protein.family,
parent=protein, residue_numbering_scheme=protein.residue_numbering_scheme,
sequence_type=ProteinSequenceType.objects.get(slug='mod'), source=ProteinSource.objects.get(name='OTHER'),
species=protein.species)
new_prot_conf, created = ProteinConformation.objects.get_or_create(protein=new_prot, state=state)
for line in lines:
if line.startswith('REVDAT 1'):
publication_date = line[13:22]
if line.startswith('JRNL PMID'):
pubmed = line[19:].strip()
if line.startswith('JRNL DOI'):
doi = line[19:].strip()
pdb_file+=line
pdb_data, created = PdbData.objects.get_or_create(pdb=pdb_file)
d = datetime.strptime(publication_date,'%d-%b-%y')
publication_date = d.strftime('%Y-%m-%d')
try:
if doi!='':
publication = Publication.get_or_create_from_doi(doi)
elif pubmed!='':
publication = Publication.get_or_create_from_pubmed(pubmed)
except:
pass
pcs = PdbChainSelector(s, protein)
pcs.run_dssp()
preferred_chain = pcs.select_chain()
# Run state identification
# Create yaml files
with open(os.sep.join([settings.DATA_DIR, 'structure_data','constructs', '{}.yaml'.format(pdb_code.index)]), 'w') as construct_file:
yaml.dump({'name': pdb_code.index.lower(), 'protein': protein.entry_name}, construct_file, indent=4)
with open(os.sep.join([settings.DATA_DIR, 'structure_data','structures','{}.yaml'.format(pdb_code.index)]), 'w') as structure_file:
struct_yaml_dict = {'construct': pdb_code.index.lower(), 'pdb': pdb_code.index, 'preferred_chain': preferred_chain, 'auxiliary_protein': '',
'ligand': {'name': 'None', 'pubchemId': 'None', 'title': 'None', 'role': '.nan', 'type': 'None'}, 'signaling_protein': 'None', 'state': 'Inactive'}
auxiliary_proteins, ligands = [], []
if pdb_data_dict['ligands']!='None':
for key, values in pdb_data_dict['ligands'].items():
if key in ['SO4','NA','CLR','OLA','OLB','OLC','TAR','NAG','EPE','BU1','ACM','GOL','PEG','PO4','TLA','BOG','CIT','PLM','BMA','MAN','MLI','PGE','SIN','PGO','MES','ZN','NO3','NI','MG','PG4']:
continue
else:
ligands.append({'name': key, 'pubchemId': 'None', 'title': pdb_data_dict['ligands'][key]['comp_name'], 'role': '.nan', 'type': 'None'})
sy = StructureYaml(s+'.yaml')
bril, by = sy.check_aux_protein('BRIL')
t4, ty = sy.check_aux_protein('T4-Lysozyme')
if bril:
auxiliary_proteins.append('BRIL')
if t4:
auxiliary_proteins.append('T4-Lysozyme')
for key, values in pdb_data_dict['auxiliary'].items():
if pdb_data_dict['auxiliary'][key]['subtype'] in ['Expression tag', 'Linker']:
continue
else:
if pdb_data_dict['auxiliary'][key]['subtype']=='Soluble cytochrome b562':
aux_p = 'BRIL'
elif pdb_data_dict['auxiliary'][key]['subtype'] in ['Endolysin','T4-Lysozyme']:
aux_p = 'T4-Lysozyme'
else:
aux_p = pdb_data_dict['auxiliary'][key]['subtype']
if aux_p not in auxiliary_proteins:
auxiliary_proteins.append(aux_p)
for key, values in pdb_data_dict['construct_sequences'].items():
if key!=protein.entry_name and key not in struct_yaml_dict['auxiliary_protein']:
if 'arrestin' in key:
struct_yaml_dict['signaling_protein'] = key
if len(auxiliary_proteins)>1:
struct_yaml_dict['auxiliary_protein'] = ', '.join(auxiliary_proteins)
if len(ligands)>1:
struct_yaml_dict['ligand'] = ligands
yaml.dump(struct_yaml_dict, structure_file, indent=4, default_flow_style=False)
# Build residue table for structure
build_structure_command = shlex.split('/env/bin/python3 manage.py build_structures -f {}.yaml'.format(pdb_code.index))
subprocess.call(build_structure_command)
# Check state
struct = Structure.objects.get(pdb_code__index=pdb_code.index)
pi = PdbStateIdentifier(struct)
pi.run()
if pi.state!=None:
Structure.objects.filter(pdb_code__index=pdb_code.index).update(state=pi.state)
print(pi.state, pi.activation_value)
with open('../../data/protwis/gpcr/structure_data/structures/{}.yaml'.format(pdb_code.index), 'r') as yf:
struct_yaml = yaml.load(yf, Loader=yaml.FullLoader)
struct_yaml['state'] = pi.state.name
try:
struct_yaml['distance'] = round(float(pi.activation_value), 2)
except:
struct_yaml['distance'] = None
with open('../../data/protwis/gpcr/structure_data/structures/{}.yaml'.format(pdb_code.index), 'w') as struct_yaml_file:
yaml.dump(struct_yaml, struct_yaml_file, indent=4, default_flow_style=False)
# Check sodium pocket
new_prot_conf.sodium_pocket()
print('{} added to db (preferred_chain chain: {})'.format(s, preferred_chain))
except Exception as msg:
print(s, msg)
self.error_list.append(s)
del self.db_list[self.db_list.index(s)]
missing_from_db = False
del self.yaml_list[self.yaml_list.index(s)]
def pdb_request_by_uniprot(self, uniprot_id):
url = 'https://www.rcsb.org/pdb/rest/search'
queryText = """
<orgPdbQuery>
<queryType>org.pdb.query.simple.UpAccessionIdQuery</queryType>
<description>Simple query for a list of UniprotKB Accession IDs: {}</description>
<accessionIdList>{}</accessionIdList>
</orgPdbQuery>
""".format(uniprot_id, uniprot_id)
req = urllib.request.Request(url, data=bytes(queryText, 'utf-8'))
f = urllib.request.urlopen(req)
result = f.read()
structures = [i.split(':')[0] for i in result.decode('utf-8').split('\n')[:-1]]
return structures
def pdb_request_by_pdb(self, pdb, request_type):
data = {}
response = urlopen('https://data.rcsb.org/rest/v1/core/entry/{}'.format(pdb))
json_data = json.loads(response.read())
response.close()
if request_type=='entry':
data['method'] = json_data['exptl'][0]['method']
if data['method'].startswith("THEORETICAL") or data['method'] in ['SOLUTION NMR','SOLID-STATE NMR']:
return False
if 'pubmed_id' in json_data['rcsb_entry_container_identifiers']:
data['pubmedId'] = json_data['rcsb_entry_container_identifiers']['pubmed_id']
else:
data['pubmedId'] = None
return True
elif request_type=='polymer_entity':
entity_list = json_data['rcsb_entry_container_identifiers']['entity_ids']
uniprot_ids = []
for i in entity_list:
try:
response2 = urlopen('https://data.rcsb.org/rest/v1/core/polymer_entity/{}/{}'.format(pdb, i))
except urllib.error.HTTPError:
continue
json_data2 = json.loads(response2.read())
response2.close()
try:
uniprot_ids+=json_data2['rcsb_polymer_entity_container_identifiers']['uniprot_ids']
except KeyError:
continue
if len(uniprot_ids)>0:
return uniprot_ids
else:
return False
def pdb_request_by_pdb_deprecated(self, pdb_code):
response = urllib.request.urlopen('https://www.rcsb.org/pdb/rest/describePDB?structureId={}'.format(pdb_code.lower()))
response_mol = urllib.request.urlopen('https://www.rcsb.org/pdb/rest/describeMol?structureId={}'.format(pdb_code.lower()))
str_des = str(response.read())
dic = xmltodict.parse(response_mol.read())
if 'NMR' in str_des or 'extracellular' in str_des:
return 0
if pdb_code in ['4QXE','1XWD','4QXF','4MQW','6B7H','6BSZ','6BT5','5OTW','3G04','3KS9','4XAQ','5II0','6N4X']:
return 0
polymer = dic['molDescription']['structureId']['polymer']
description = ''
if type(polymer)==type([]):
for mol in polymer:
if 'receptor' in mol['polymerDescription']['@description'] or 'Rhodopsin' in mol['polymerDescription']['@description'] or 'Smoothened' in mol['polymerDescription']['@description']:
description = mol['polymerDescription']['@description']
if description=='' or int(mol['@length'])<100:
pass
else:
try:
if polymer['macroMolecule'][0]['accession']['@id'] in self.uniprots:
return 1
else:
raise Exception()
except:
try:
for m in mol['macroMolecule']:
try:
if mol['macroMolecule']['accession']['@id'] in self.uniprots:
return 1
except:
try:
if m['accession']['@id'] in self.uniprots:
return 1
except:
pass
raise Exception()
except:
pass
return 0
else:
if 'receptor' in polymer['polymerDescription']['@description'] or 'Rhodopsin' in polymer['polymerDescription']['@description'] or 'Smoothened' in polymer['polymerDescription']['@description'] or 'Frizzled' in polymer['polymerDescription']['@description']:
if int(polymer['@length'])<100:
return 0
if type(polymer['macroMolecule'])==type([]):
for mM in polymer['macroMolecule']:
if mM['accession']['@id'] in self.uniprots:
return 1
else:
if polymer['macroMolecule']['accession']['@id'] in self.uniprots:
return 1
else:
return 0
else:
return 0
class QueryPDBClassifiedGPCR():
''' Queries PDB using GPCRdb protein and structure entries using the 'G protein-coupled receptors' classification on PDB. Tree node number (<n>248</n>)
need to be updated when new xtals come in.
'''
def __init__(self):
self.num_struct = None
self.new_structures = None
self.new_uniques = None
def list_xtals(self, verbose=True):
''' Lists structures and matching receptors from PDB that are not on GPCRdb yet. '''
url = 'https://www.rcsb.org/pdb/rest/search'
queryText = """
<orgPdbQuery>
<version>head</version>
<queryType>org.pdb.query.simple.TreeQuery</queryType>
<description>TransmembraneTree Search for G Protein-Coupled Receptors (GPCRs)</description>
<t>19</t>
<n>248</n>
<nodeDesc>G Protein-Coupled Receptors (GPCRs)</nodeDesc>
</orgPdbQuery>
"""
req = urllib.request.Request(url, data=bytes(queryText, 'utf-8'))
f = urllib.request.urlopen(req)
result = f.read()
structures = result.decode('utf-8').split('\n')[:-1]
if len(structures)<159:
raise AssertionError('Less than 159 structures, change the pdb query.')
if verbose:
print('Number of GPCR structures on PDB:',len(structures))
new_struct = []
new_unique = []
for i in structures:
response = urllib.request.urlopen('https://www.rcsb.org/pdb/rest/describeMol?structureId={}'.format(i.lower()))
response_des = urllib.request.urlopen('https://www.rcsb.org/pdb/rest/describePDB?structureId={}'.format(i.lower()))
str_text = str(response.read())
str_des = str(response_des.read())
if 'NMR' in str_des:
continue
if 'extracellular' in str_des:
continue
if i=='1EDN':
continue
uniprots = re.findall('accession id="([A-Z0-9]+)"', str_text)
try:
s = Protein.objects.get(entry_name=i.lower())
continue
except:
new_struct.append((i, uniprots))
miss_count = 0
for j in uniprots:
try:
p = Protein.objects.get(accession=j)
try:
parent = Protein.objects.filter(parent=p)
if len(parent)==0:
raise Exception()
except:
miss_count+=1
except:
pass
if miss_count==1:
new_unique.append((i,p))
if verbose:
print('\nStructures not on GPCRdb: ',len(new_struct),'\n',new_struct)
print('\nNew unique structures: ',len(new_unique),'\n',new_unique)
self.num_struct = len(structures)
self.new_structures = new_struct
self.new_uniques = new_unique
def yamls_to_csv():
s_dir = os.sep.join([settings.DATA_DIR, 'structure_data', 'structures'])
c_dir = os.sep.join([settings.DATA_DIR, 'structure_data', 'constructs'])
g_dir = os.sep.join([settings.DATA_DIR, 'g_protein_data'])
yamls = os.listdir(s_dir)
constructs = os.listdir(s_dir)
d = OrderedDict()
for i in yamls:
pdb = i.split('.')[0]
try:
s_obj = Structure.objects.get(pdb_code__index=pdb)
except Structure.DoesNotExist:
continue
with open(os.sep.join([s_dir, i]), 'r') as f1:
s_y = yaml.load(f1, Loader=yaml.FullLoader)
d[pdb] = s_y
with open(os.sep.join([c_dir, i]), 'r') as f2:
c_y = yaml.load(f2, Loader=yaml.FullLoader)
d[pdb]['protein'] = c_y['protein']
d[pdb]['obj'] = s_obj
if type(d[pdb]['ligand'])!=type([]):
d[pdb]['ligand'] = [d[pdb]['ligand']]
# structures.csv
with open(os.sep.join([settings.DATA_DIR, 'structure_data', 'annotation', 'structures.csv']), 'w', newline='') as s_csv:
struct_w = csv.writer(s_csv, delimiter=',', quotechar="'", quoting=csv.QUOTE_MINIMAL)
struct_w.writerow(['PDB', 'Receptor_UniProt', 'Method', 'Resolution', 'State', 'ChainID', 'Note'])
for pdb, vals in d.items():
if vals['obj'].structure_type.name.startswith('X-ray'):
method = 'X-ray'
elif vals['obj'].structure_type.name=='Electron microscopy':
method = 'cryo-EM'
else:
method = vals['obj'].structure_type.name
struct_w.writerow([pdb, vals['protein'], method, vals['obj'].resolution, vals['state'], vals['preferred_chain'], ''])
# ligands.csv
with open(os.sep.join([settings.DATA_DIR, 'structure_data', 'annotation', 'ligands.csv']), 'w', newline='') as l_csv:
lig_w = csv.writer(l_csv, delimiter='\t', quotechar="'", quoting=csv.QUOTE_MINIMAL)
lig_w.writerow(['PDB', 'ChainID', 'Name', 'PubChemID', 'Role', 'Title', 'Type'])
for pdb, vals in d.items():
lig = d[pdb]['ligand']
if isinstance(lig, list):
for l in lig:
if 'chain' in l:
chain = l['chain']
else:
chain = ''
if 'title' not in l:
title = l['name']
else:
title = l['title']
lig_w.writerow([pdb, chain, l['name'], l['pubchemId'], l['role'], title, l['type']])
fusion_prots = OrderedDict()
ramp, grk = OrderedDict(), OrderedDict()
# nanobodies.csv fusion_proteins.csv ramp.csv grk.csv
with open(os.sep.join([settings.DATA_DIR, 'structure_data', 'annotation', 'nanobodies.csv']), 'w', newline='') as n_csv:
nb_w = csv.writer(n_csv, delimiter=',', quotechar="'", quoting=csv.QUOTE_MINIMAL)
nb_w.writerow(['PDB', 'Name'])
for pdb, vals in d.items():
auxs = d[pdb]['auxiliary_protein'].split(',')
for aux in auxs:
if aux=='' or aux=='None' or aux.startswith('GABA'):
continue
if aux[0]==' ':
aux = aux[1:]
if aux.startswith('Antibody') or aux.startswith('scFv') or 'Fab' in aux or 'Nanobody' in aux or aux.startswith('Camelid') or aux.startswith('IgG') or aux in ['Sb51','DN13','Anti-RON nanobody','T-cell surface glycoprotein CD4']:
if aux.startswith('Nanobody '):
aux = aux.split(' ')[0]+'-'+aux.split(' ')[1]
elif aux.startswith('Nanobody') and '-' not in aux and aux!='Nanobody':
aux = 'Nanobody-'+aux[8:]
nb_w.writerow([pdb, aux])
elif aux in ['BRIL', 'T4-Lysozyme', 'Flavodoxin', 'Rubredoxin', 'GlgA glycogen synthase', 'Glycogen synthase', 'Thioredoxin 1', 'TrxA', 'Sialidase NanA']:
fusion_prots[pdb] = aux
elif aux.startswith('RAMP'):
ramp[pdb] = aux
elif aux.startswith('GRK'):
grk[pdb] = aux
else:
print('====',aux)
with open(os.sep.join([settings.DATA_DIR, 'structure_data', 'annotation', 'fusion_proteins.csv']), 'w', newline='') as f_csv:
f_w = csv.writer(f_csv, delimiter=',', quotechar="'", quoting=csv.QUOTE_MINIMAL)
f_w.writerow(['PDB', 'Name'])
for pdb, name in fusion_prots.items():
f_w.writerow([pdb, name])
with open(os.sep.join([settings.DATA_DIR, 'structure_data', 'annotation', 'ramp.csv']), 'w', newline='') as r_csv:
r_w = csv.writer(r_csv, delimiter=',', quotechar="'", quoting=csv.QUOTE_MINIMAL)
r_w.writerow(['PDB', 'Name'])
for pdb, name in ramp.items():
r_w.writerow([pdb, name])
with open(os.sep.join([settings.DATA_DIR, 'structure_data', 'annotation', 'grk.csv']), 'w', newline='') as g_csv:
g_w = csv.writer(g_csv, delimiter=',', quotechar="'", quoting=csv.QUOTE_MINIMAL)
g_w.writerow(['PDB', 'Name'])
for pdb, name in grk.items():
g_w.writerow([pdb, name])
# g proteins
with open(os.sep.join([g_dir, 'complex_model_templates.yaml']), 'r') as f2:
gprots = yaml.load(f2, Loader=yaml.FullLoader)
with open(os.sep.join([settings.DATA_DIR, 'structure_data', 'extra_protein_notes.yaml']), 'r') as f3:
extra = yaml.load(f3, Loader=yaml.FullLoader)
arrestin = OrderedDict()
with open(os.sep.join([settings.DATA_DIR, 'structure_data', 'annotation', 'g_proteins.csv']), 'w', newline='') as gp_csv:
gp_w = csv.writer(gp_csv, delimiter=',', quotechar="'", quoting=csv.QUOTE_MINIMAL)
gp_w.writerow(['PDB', 'Alpha_UniProt', 'Alpha_ChainID', 'Beta_UniProt', 'Beta_ChainID', 'Gamma_UniProt', 'Gamma_ChainID', 'Note'])
gprot_pdbs = []
for alpha, vals_l in gprots.items():
for vals in vals_l:
note = ''
if vals['pdb'] in extra:
note = extra[vals['pdb']]['note']
if vals['beta']['protein']=='None':
beta_prot, beta_chain = '', ''
else:
beta_prot, beta_chain = vals['beta']['protein'], vals['beta']['chain']
if vals['gamma']['protein']=='None':
gamma_prot, gamma_chain = '', ''
else:
gamma_prot, gamma_chain = vals['gamma']['protein'], vals['gamma']['chain']
gprot_pdbs.append(vals['pdb'])
gp_w.writerow([vals['pdb'], alpha, vals['alpha'], beta_prot, beta_chain, gamma_prot, gamma_chain, note])
prot_code = {'GNAS2':'gnas2_human','GNAT1':'gnat1_bovin','GNAT3':'gnat3_bovin'}
for pdb, vals in extra.items():
if 'category' in vals:
if vals['category']=='G alpha':
gp_w.writerow([pdb, vals['prot'], vals['chain'], '', '', '', '', vals['note']])
elif vals['category']=='Arrestin':
arrestin[pdb] = vals
with open(os.sep.join([settings.DATA_DIR, 'structure_data', 'annotation', 'arrestins.csv']), 'w', newline='') as ar_csv:
ar_w = csv.writer(ar_csv, delimiter=',', quotechar="'", quoting=csv.QUOTE_MINIMAL)
ar_w.writerow(['PDB', 'UniProt', 'ChainID', 'Note'])
for pdb, vals in arrestin.items():
ar_w.writerow([pdb, vals['prot'], vals['chain'], vals['note']])
| protwis/protwis | structure/management/commands/new_xtals.py | Python | apache-2.0 | 35,404 | [
"CRYSTAL"
] | e74925c2321649324cbff6f0734626fd953b13726c9cc17c591fa69f813846a4 |
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
import io
def is_binary_file(file):
return isinstance(file, (io.BufferedReader, io.BufferedWriter,
io.BufferedRandom))
# Everything beyond this point will be some kind of hack needed to make
# everything work. It's not pretty and it doesn't make great sense much
# of the time. I am very sorry to the poor soul who has to read beyond.
class StringIO(io.StringIO):
"""Treat Bytes the same as Unicode by decoding ascii, for testing only."""
def __init__(self, string=None, **kwargs):
if isinstance(string, bytes):
string = string.decode()
super(StringIO, self).__init__(string, **kwargs)
class SaneTextIOWrapper(io.TextIOWrapper):
def __init__(self, *args, **kwargs):
super(SaneTextIOWrapper, self).__init__(*args, **kwargs)
self._should_close_buffer = True
def __del__(self):
# Accept the inevitability of the buffer being closed by the destructor
# because of this line in Python 2.7:
# https://github.com/python/cpython/blob/2.7/Modules/_io/iobase.c#L221
self._should_close_buffer = False
# Actually close for Python 3 because it is an override.
# We can't call super because Python 2 doesn't actually
# have a `__del__` method for IOBase (hence this
# workaround). Close is idempotent so it won't matter
# that Python 2 will end up calling this twice
self.close()
def close(self):
# We can't stop Python 2.7 from calling close in the deconstructor
# so instead we can prevent the buffer from being closed with a flag.
# Based on:
# https://github.com/python/cpython/blob/2.7/Lib/_pyio.py#L1586
if self.buffer is not None and not self.closed:
try:
self.flush()
finally:
if self._should_close_buffer:
self.buffer.close()
class WrappedBufferedRandom(io.BufferedRandom):
def __init__(self, *args, **kwargs):
super(WrappedBufferedRandom, self).__init__(*args, **kwargs)
self._should_close_raw = True
def __del__(self):
self._should_close_raw = False
self.close()
# Based on:
# https://github.com/python/cpython/blob/2.7/Lib/_pyio.py#L732
def close(self):
if self.raw is not None and not self.closed:
try:
# may raise BlockingIOError or BrokenPipeError etc
self.flush()
finally:
if self._should_close_raw:
self.raw.close()
class CompressedMixin(object):
"""Act as a bridge between worlds"""
def __init__(self, before_file, *args, **kwargs):
self.streamable = kwargs.pop('streamable', True)
self._should_close_raw = True
self._before_file = before_file
super(CompressedMixin, self).__init__(*args, **kwargs)
def __del__(self):
self._should_close_raw = False
self.close()
@property
def closed(self):
return self.raw.closed or self._before_file.closed
# Based on:
# https://github.com/python/cpython/blob/2.7/Lib/_pyio.py#L732
def close(self):
if self.raw is not None and not self.closed:
try:
# may raise BlockingIOError or BrokenPipeError etc
self.flush()
finally:
if self._should_close_raw:
self.raw.close()
# The above will not usually close the before_file
# We want the decompression to be transparent, so we don't
# want users to deal with this edge case. Instead we can
# just close the original now that we are being closed.
self._before_file.close()
class CompressedBufferedReader(CompressedMixin, io.BufferedReader):
pass
class CompressedBufferedWriter(CompressedMixin, io.BufferedWriter):
pass
class IterableStringReaderIO(io.StringIO):
def __init__(self, iterable, newline):
self._iterable = iterable
super(IterableStringReaderIO, self).__init__(u''.join(iterable),
newline=newline)
class IterableStringWriterIO(IterableStringReaderIO):
def close(self):
if not self.closed:
backup = self.tell()
self.seek(0)
for line in self:
self._iterable.append(line)
self.seek(backup)
super(IterableStringWriterIO, self).close()
| xguse/scikit-bio | skbio/io/_fileobject.py | Python | bsd-3-clause | 4,967 | [
"scikit-bio"
] | dd4a8b99b6b8557f3f7853c2b20171dc6ebfb9fcc6711f834c2dfc7f9e4cae83 |
# node.py
#
# Base class and non-recursive visitor implementation.
# Used by various example files.
import types
class Node:
pass
import types
class NodeVisitor:
def visit(self, node):
stack = [ node ]
last_result = None
while stack:
try:
last = stack[-1]
if isinstance(last, types.GeneratorType):
stack.append(last.send(last_result))
last_result = None
elif isinstance(last, Node):
stack.append(self._visit(stack.pop()))
else:
last_result = stack.pop()
except StopIteration:
stack.pop()
return last_result
def _visit(self, node):
methname = 'visit_' + type(node).__name__
meth = getattr(self, methname, None)
if meth is None:
meth = self.generic_visit
return meth(node)
def generic_visit(self, node):
raise RuntimeError('No {} method'.format('visit_' + type(node).__name__))
| tuanavu/python-cookbook-3rd | src/8/implementing_the_visitor_pattern_without_recursion/node.py | Python | mit | 1,070 | [
"VisIt"
] | bce05a3689bc25f322081e0af4ccc746f831bd789f879f775b0f78ab6274dd9f |
"""
Spatial Error Models with regimes module
"""
__author__ = "Luc Anselin luc.anselin@asu.edu, Pedro V. Amaral pedro.amaral@asu.edu"
import numpy as np
import multiprocessing as mp
import regimes as REGI
import user_output as USER
import summary_output as SUMMARY
from pysal import lag_spatial
from ols import BaseOLS
from twosls import BaseTSLS
from error_sp import BaseGM_Error, BaseGM_Endog_Error, _momentsGM_Error
from utils import set_endog, iter_msg, sp_att, set_warn
from utils import optim_moments, get_spFilter, get_lags
from utils import spdot, RegressionPropsY
from platform import system
class GM_Error_Regimes(RegressionPropsY, REGI.Regimes_Frame):
"""
GMM method for a spatial error model with regimes, with results and diagnostics;
based on Kelejian and Prucha (1998, 1999)[1]_ [2]_.
Parameters
----------
y : array
nx1 array for dependent variable
x : array
Two dimensional array with n rows and one column for each
independent (exogenous) variable, excluding the constant
regimes : list
List of n values with the mapping of each
observation to a regime. Assumed to be aligned with 'x'.
w : pysal W object
Spatial weights object
constant_regi: ['one', 'many']
Switcher controlling the constant term setup. It may take
the following values:
* 'one': a vector of ones is appended to x and held
constant across regimes
* 'many': a vector of ones is appended to x and considered
different per regime (default)
cols2regi : list, 'all'
Argument indicating whether each
column of x should be considered as different per regime
or held constant across regimes (False).
If a list, k booleans indicating for each variable the
option (True if one per regime, False to be held constant).
If 'all' (default), all the variables vary by regime.
regime_err_sep : boolean
If True, a separate regression is run for each regime.
regime_lag_sep : boolean
Always False, kept for consistency, ignored.
vm : boolean
If True, include variance-covariance matrix in summary
results
cores : boolean
Specifies if multiprocessing is to be used
Default: no multiprocessing, cores = False
Note: Multiprocessing may not work on all platforms.
name_y : string
Name of dependent variable for use in output
name_x : list of strings
Names of independent variables for use in output
name_w : string
Name of weights matrix for use in output
name_ds : string
Name of dataset for use in output
name_regimes : string
Name of regime variable for use in the output
Attributes
----------
summary : string
Summary of regression results and diagnostics (note: use in
conjunction with the print command)
betas : array
kx1 array of estimated coefficients
u : array
nx1 array of residuals
e_filtered : array
nx1 array of spatially filtered residuals
predy : array
nx1 array of predicted y values
n : integer
Number of observations
k : integer
Number of variables for which coefficients are estimated
(including the constant)
Only available in dictionary 'multi' when multiple regressions
(see 'multi' below for details)
y : array
nx1 array for dependent variable
x : array
Two dimensional array with n rows and one column for each
independent (exogenous) variable, including the constant
Only available in dictionary 'multi' when multiple regressions
(see 'multi' below for details)
mean_y : float
Mean of dependent variable
std_y : float
Standard deviation of dependent variable
pr2 : float
Pseudo R squared (squared correlation between y and ypred)
Only available in dictionary 'multi' when multiple regressions
(see 'multi' below for details)
vm : array
Variance covariance matrix (kxk)
sig2 : float
Sigma squared used in computations
Only available in dictionary 'multi' when multiple regressions
(see 'multi' below for details)
std_err : array
1xk array of standard errors of the betas
Only available in dictionary 'multi' when multiple regressions
(see 'multi' below for details)
z_stat : list of tuples
z statistic; each tuple contains the pair (statistic,
p-value), where each is a float
Only available in dictionary 'multi' when multiple regressions
(see 'multi' below for details)
name_y : string
Name of dependent variable for use in output
name_x : list of strings
Names of independent variables for use in output
name_w : string
Name of weights matrix for use in output
name_ds : string
Name of dataset for use in output
name_regimes : string
Name of regime variable for use in the output
title : string
Name of the regression method used
Only available in dictionary 'multi' when multiple regressions
(see 'multi' below for details)
regimes : list
List of n values with the mapping of each
observation to a regime. Assumed to be aligned with 'x'.
constant_regi: ['one', 'many']
Ignored if regimes=False. Constant option for regimes.
Switcher controlling the constant term setup. It may take
the following values:
* 'one': a vector of ones is appended to x and held
constant across regimes
* 'many': a vector of ones is appended to x and considered
different per regime
cols2regi : list, 'all'
Ignored if regimes=False. Argument indicating whether each
column of x should be considered as different per regime
or held constant across regimes (False).
If a list, k booleans indicating for each variable the
option (True if one per regime, False to be held constant).
If 'all', all the variables vary by regime.
regime_err_sep : boolean
If True, a separate regression is run for each regime.
kr : int
Number of variables/columns to be "regimized" or subject
to change by regime. These will result in one parameter
estimate by regime for each variable (i.e. nr parameters per
variable)
kf : int
Number of variables/columns to be considered fixed or
global across regimes and hence only obtain one parameter
estimate
nr : int
Number of different regimes in the 'regimes' list
multi : dictionary
Only available when multiple regressions are estimated,
i.e. when regime_err_sep=True and no variable is fixed
across regimes.
Contains all attributes of each individual regression
References
----------
.. [1] Kelejian, H.R., Prucha, I.R. (1998) "A generalized spatial
two-stage least squares procedure for estimating a spatial autoregressive
model with autoregressive disturbances". The Journal of Real State
Finance and Economics, 17, 1.
.. [2] Kelejian, H.R., Prucha, I.R. (1999) "A Generalized Moments
Estimator for the Autoregressive Parameter in a Spatial Model".
International Economic Review, 40, 2.
Examples
--------
We first need to import the needed modules, namely numpy to convert the
data we read into arrays that ``spreg`` understands and ``pysal`` to
perform all the analysis.
>>> import pysal
>>> import numpy as np
Open data on NCOVR US County Homicides (3085 areas) using pysal.open().
This is the DBF associated with the NAT shapefile. Note that
pysal.open() also reads data in CSV format; since the actual class
requires data to be passed in as numpy arrays, the user can read their
data in using any method.
>>> db = pysal.open(pysal.examples.get_path("NAT.dbf"),'r')
Extract the HR90 column (homicide rates in 1990) from the DBF file and make it the
dependent variable for the regression. Note that PySAL requires this to be
an numpy array of shape (n, 1) as opposed to the also common shape of (n, )
that other packages accept.
>>> y_var = 'HR90'
>>> y = np.array([db.by_col(y_var)]).reshape(3085,1)
Extract UE90 (unemployment rate) and PS90 (population structure) vectors from
the DBF to be used as independent variables in the regression. Other variables
can be inserted by adding their names to x_var, such as x_var = ['Var1','Var2','...]
Note that PySAL requires this to be an nxj numpy array, where j is the
number of independent variables (not including a constant). By default
this model adds a vector of ones to the independent variables passed in.
>>> x_var = ['PS90','UE90']
>>> x = np.array([db.by_col(name) for name in x_var]).T
The different regimes in this data are given according to the North and
South dummy (SOUTH).
>>> r_var = 'SOUTH'
>>> regimes = db.by_col(r_var)
Since we want to run a spatial error model, we need to specify
the spatial weights matrix that includes the spatial configuration of the
observations. To do that, we can open an already existing gal file or
create a new one. In this case, we will create one from ``NAT.shp``.
>>> w = pysal.rook_from_shapefile(pysal.examples.get_path("NAT.shp"))
Unless there is a good reason not to do it, the weights have to be
row-standardized so every row of the matrix sums to one. Among other
things, this allows to interpret the spatial lag of a variable as the
average value of the neighboring observations. In PySAL, this can be
easily performed in the following way:
>>> w.transform = 'r'
We are all set with the preliminaries, we are good to run the model. In this
case, we will need the variables and the weights matrix. If we want to
have the names of the variables printed in the output summary, we will
have to pass them in as well, although this is optional.
>>> model = GM_Error_Regimes(y, x, regimes, w=w, name_y=y_var, name_x=x_var, name_regimes=r_var, name_ds='NAT.dbf')
Once we have run the model, we can explore a little bit the output. The
regression object we have created has many attributes so take your time to
discover them. Note that because we are running the classical GMM error
model from 1998/99, the spatial parameter is obtained as a point estimate, so
although you get a value for it (there are for coefficients under
model.betas), you cannot perform inference on it (there are only three
values in model.se_betas). Alternatively, we can have a summary of the
output by typing: model.summary
>>> print model.name_x
['0_CONSTANT', '0_PS90', '0_UE90', '1_CONSTANT', '1_PS90', '1_UE90', 'lambda']
>>> np.around(model.betas, decimals=6)
array([[ 0.074807],
[ 0.786107],
[ 0.538849],
[ 5.103756],
[ 1.196009],
[ 0.600533],
[ 0.364103]])
>>> np.around(model.std_err, decimals=6)
array([ 0.379864, 0.152316, 0.051942, 0.471285, 0.19867 , 0.057252])
>>> np.around(model.z_stat, decimals=6)
array([[ 0.196932, 0.843881],
[ 5.161042, 0. ],
[ 10.37397 , 0. ],
[ 10.829455, 0. ],
[ 6.02007 , 0. ],
[ 10.489215, 0. ]])
>>> np.around(model.sig2, decimals=6)
28.172732
"""
def __init__(self, y, x, regimes, w,
vm=False, name_y=None, name_x=None, name_w=None,
constant_regi='many', cols2regi='all', regime_err_sep=False,
regime_lag_sep=False,
cores=False, name_ds=None, name_regimes=None):
n = USER.check_arrays(y, x)
USER.check_y(y, n)
USER.check_weights(w, y, w_required=True)
self.constant_regi = constant_regi
self.cols2regi = cols2regi
self.name_ds = USER.set_name_ds(name_ds)
self.name_y = USER.set_name_y(name_y)
self.name_w = USER.set_name_w(name_w, w)
self.name_regimes = USER.set_name_ds(name_regimes)
self.n = n
self.y = y
x_constant = USER.check_constant(x)
name_x = USER.set_name_x(name_x, x)
self.name_x_r = name_x
cols2regi = REGI.check_cols2regi(constant_regi, cols2regi, x)
self.regimes_set = REGI._get_regimes_set(regimes)
self.regimes = regimes
USER.check_regimes(self.regimes_set, self.n, x.shape[1])
self.regime_err_sep = regime_err_sep
if regime_err_sep == True:
if set(cols2regi) == set([True]):
self._error_regimes_multi(y, x, regimes, w, cores,
cols2regi, vm, name_x)
else:
raise Exception, "All coefficients must vary accross regimes if regime_err_sep = True."
else:
self.x, self.name_x = REGI.Regimes_Frame.__init__(self, x_constant,
regimes, constant_regi=None, cols2regi=cols2regi, names=name_x)
ols = BaseOLS(y=y, x=self.x)
self.k = ols.x.shape[1]
moments = _momentsGM_Error(w, ols.u)
lambda1 = optim_moments(moments)
xs = get_spFilter(w, lambda1, x_constant)
ys = get_spFilter(w, lambda1, y)
xs = REGI.Regimes_Frame.__init__(self, xs,
regimes, constant_regi=None, cols2regi=cols2regi)[0]
ols2 = BaseOLS(y=ys, x=xs)
# Output
self.predy = spdot(self.x, ols2.betas)
self.u = y - self.predy
self.betas = np.vstack((ols2.betas, np.array([[lambda1]])))
self.sig2 = ols2.sig2n
self.e_filtered = self.u - lambda1 * lag_spatial(w, self.u)
self.vm = self.sig2 * ols2.xtxi
self.title = "SPATIALLY WEIGHTED LEAST SQUARES - REGIMES"
self.name_x.append('lambda')
self.kf += 1
self.chow = REGI.Chow(self)
self._cache = {}
SUMMARY.GM_Error(reg=self, w=w, vm=vm, regimes=True)
def _error_regimes_multi(self, y, x, regimes, w, cores,
cols2regi, vm, name_x):
regi_ids = dict(
(r, list(np.where(np.array(regimes) == r)[0])) for r in self.regimes_set)
results_p = {}
"""
for r in self.regimes_set:
if system() == 'Windows':
results_p[r] = _work_error(*(y,x,regi_ids,r,w,self.name_ds,self.name_y,name_x+['lambda'],self.name_w,self.name_regimes))
is_win = True
else:
pool = mp.Pool(cores)
results_p[r] = pool.apply_async(_work_error,args=(y,x,regi_ids,r,w,self.name_ds,self.name_y,name_x+['lambda'],self.name_w,self.name_regimes, ))
is_win = False
"""
for r in self.regimes_set:
if cores:
pool = mp.Pool(None)
results_p[r] = pool.apply_async(_work_error, args=(
y, x, regi_ids, r, w, self.name_ds, self.name_y, name_x + ['lambda'], self.name_w, self.name_regimes, ))
else:
results_p[r] = _work_error(
*(y, x, regi_ids, r, w, self.name_ds, self.name_y, name_x + ['lambda'], self.name_w, self.name_regimes))
self.kryd = 0
self.kr = len(cols2regi)
self.kf = 0
self.nr = len(self.regimes_set)
self.vm = np.zeros((self.nr * self.kr, self.nr * self.kr), float)
self.betas = np.zeros((self.nr * (self.kr + 1), 1), float)
self.u = np.zeros((self.n, 1), float)
self.predy = np.zeros((self.n, 1), float)
self.e_filtered = np.zeros((self.n, 1), float)
"""
if not is_win:
pool.close()
pool.join()
"""
if cores:
pool.close()
pool.join()
results = {}
self.name_y, self.name_x = [], []
counter = 0
for r in self.regimes_set:
"""
if is_win:
results[r] = results_p[r]
else:
results[r] = results_p[r].get()
"""
if not cores:
results[r] = results_p[r]
else:
results[r] = results_p[r].get()
self.vm[(counter * self.kr):((counter + 1) * self.kr),
(counter * self.kr):((counter + 1) * self.kr)] = results[r].vm
self.betas[
(counter * (self.kr + 1)):((counter + 1) * (self.kr + 1)), ] = results[r].betas
self.u[regi_ids[r], ] = results[r].u
self.predy[regi_ids[r], ] = results[r].predy
self.e_filtered[regi_ids[r], ] = results[r].e_filtered
self.name_y += results[r].name_y
self.name_x += results[r].name_x
counter += 1
self.chow = REGI.Chow(self)
self.multi = results
SUMMARY.GM_Error_multi(
reg=self, multireg=self.multi, vm=vm, regimes=True)
class GM_Endog_Error_Regimes(RegressionPropsY, REGI.Regimes_Frame):
'''
GMM method for a spatial error model with regimes and endogenous variables, with
results and diagnostics; based on Kelejian and Prucha (1998, 1999)[1]_[2]_.
Parameters
----------
y : array
nx1 array for dependent variable
x : array
Two dimensional array with n rows and one column for each
independent (exogenous) variable, excluding the constant
yend : array
Two dimensional array with n rows and one column for each
endogenous variable
q : array
Two dimensional array with n rows and one column for each
external exogenous variable to use as instruments (note:
this should not contain any variables from x)
regimes : list
List of n values with the mapping of each
observation to a regime. Assumed to be aligned with 'x'.
w : pysal W object
Spatial weights object
constant_regi: ['one', 'many']
Switcher controlling the constant term setup. It may take
the following values:
* 'one': a vector of ones is appended to x and held
constant across regimes
* 'many': a vector of ones is appended to x and considered
different per regime (default)
cols2regi : list, 'all'
Argument indicating whether each
column of x should be considered as different per regime
or held constant across regimes (False).
If a list, k booleans indicating for each variable the
option (True if one per regime, False to be held constant).
If 'all' (default), all the variables vary by regime.
regime_err_sep : boolean
If True, a separate regression is run for each regime.
regime_lag_sep : boolean
Always False, kept for consistency, ignored.
vm : boolean
If True, include variance-covariance matrix in summary
results
cores : boolean
Specifies if multiprocessing is to be used
Default: no multiprocessing, cores = False
Note: Multiprocessing may not work on all platforms.
name_y : string
Name of dependent variable for use in output
name_x : list of strings
Names of independent variables for use in output
name_yend : list of strings
Names of endogenous variables for use in output
name_q : list of strings
Names of instruments for use in output
name_w : string
Name of weights matrix for use in output
name_ds : string
Name of dataset for use in output
name_regimes : string
Name of regime variable for use in the output
Attributes
----------
summary : string
Summary of regression results and diagnostics (note: use in
conjunction with the print command)
betas : array
kx1 array of estimated coefficients
u : array
nx1 array of residuals
e_filtered : array
nx1 array of spatially filtered residuals
predy : array
nx1 array of predicted y values
n : integer
Number of observations
k : integer
Number of variables for which coefficients are estimated
(including the constant)
Only available in dictionary 'multi' when multiple regressions
(see 'multi' below for details)
y : array
nx1 array for dependent variable
x : array
Two dimensional array with n rows and one column for each
independent (exogenous) variable, including the constant
Only available in dictionary 'multi' when multiple regressions
(see 'multi' below for details)
yend : array
Two dimensional array with n rows and one column for each
endogenous variable
Only available in dictionary 'multi' when multiple regressions
(see 'multi' below for details)
z : array
nxk array of variables (combination of x and yend)
Only available in dictionary 'multi' when multiple regressions
(see 'multi' below for details)
mean_y : float
Mean of dependent variable
std_y : float
Standard deviation of dependent variable
vm : array
Variance covariance matrix (kxk)
pr2 : float
Pseudo R squared (squared correlation between y and ypred)
Only available in dictionary 'multi' when multiple regressions
(see 'multi' below for details)
sig2 : float
Only available in dictionary 'multi' when multiple regressions
(see 'multi' below for details)
Sigma squared used in computations
std_err : array
1xk array of standard errors of the betas
Only available in dictionary 'multi' when multiple regressions
(see 'multi' below for details)
z_stat : list of tuples
z statistic; each tuple contains the pair (statistic,
p-value), where each is a float
Only available in dictionary 'multi' when multiple regressions
(see 'multi' below for details)
name_y : string
Name of dependent variable for use in output
name_x : list of strings
Names of independent variables for use in output
name_yend : list of strings
Names of endogenous variables for use in output
name_z : list of strings
Names of exogenous and endogenous variables for use in
output
name_q : list of strings
Names of external instruments
name_h : list of strings
Names of all instruments used in ouput
name_w : string
Name of weights matrix for use in output
name_ds : string
Name of dataset for use in output
name_regimes : string
Name of regimes variable for use in output
title : string
Name of the regression method used
Only available in dictionary 'multi' when multiple regressions
(see 'multi' below for details)
regimes : list
List of n values with the mapping of each
observation to a regime. Assumed to be aligned with 'x'.
constant_regi : ['one', 'many']
Ignored if regimes=False. Constant option for regimes.
Switcher controlling the constant term setup. It may take
the following values:
* 'one': a vector of ones is appended to x and held
constant across regimes
* 'many': a vector of ones is appended to x and considered
different per regime
cols2regi : list, 'all'
Ignored if regimes=False. Argument indicating whether each
column of x should be considered as different per regime
or held constant across regimes (False).
If a list, k booleans indicating for each variable the
option (True if one per regime, False to be held constant).
If 'all', all the variables vary by regime.
regime_err_sep : boolean
If True, a separate regression is run for each regime.
kr : int
Number of variables/columns to be "regimized" or subject
to change by regime. These will result in one parameter
estimate by regime for each variable (i.e. nr parameters per
variable)
kf : int
Number of variables/columns to be considered fixed or
global across regimes and hence only obtain one parameter
estimate
nr : int
Number of different regimes in the 'regimes' list
multi : dictionary
Only available when multiple regressions are estimated,
i.e. when regime_err_sep=True and no variable is fixed
across regimes.
Contains all attributes of each individual regression
References
----------
.. [1] Kelejian, H.R., Prucha, I.R. (1998) "A generalized spatial
two-stage least squares procedure for estimating a spatial autoregressive
model with autoregressive disturbances". The Journal of Real State
Finance and Economics, 17, 1.
.. [2] Kelejian, H.R., Prucha, I.R. (1999) "A Generalized Moments
Estimator for the Autoregressive Parameter in a Spatial Model".
International Economic Review, 40, 2.
Examples
--------
We first need to import the needed modules, namely numpy to convert the
data we read into arrays that ``spreg`` understands and ``pysal`` to
perform all the analysis.
>>> import pysal
>>> import numpy as np
Open data on NCOVR US County Homicides (3085 areas) using pysal.open().
This is the DBF associated with the NAT shapefile. Note that
pysal.open() also reads data in CSV format; since the actual class
requires data to be passed in as numpy arrays, the user can read their
data in using any method.
>>> db = pysal.open(pysal.examples.get_path("NAT.dbf"),'r')
Extract the HR90 column (homicide rates in 1990) from the DBF file and make it the
dependent variable for the regression. Note that PySAL requires this to be
an numpy array of shape (n, 1) as opposed to the also common shape of (n, )
that other packages accept.
>>> y_var = 'HR90'
>>> y = np.array([db.by_col(y_var)]).reshape(3085,1)
Extract UE90 (unemployment rate) and PS90 (population structure) vectors from
the DBF to be used as independent variables in the regression. Other variables
can be inserted by adding their names to x_var, such as x_var = ['Var1','Var2','...]
Note that PySAL requires this to be an nxj numpy array, where j is the
number of independent variables (not including a constant). By default
this model adds a vector of ones to the independent variables passed in.
>>> x_var = ['PS90','UE90']
>>> x = np.array([db.by_col(name) for name in x_var]).T
For the endogenous models, we add the endogenous variable RD90 (resource deprivation)
and we decide to instrument for it with FP89 (families below poverty):
>>> yd_var = ['RD90']
>>> yend = np.array([db.by_col(name) for name in yd_var]).T
>>> q_var = ['FP89']
>>> q = np.array([db.by_col(name) for name in q_var]).T
The different regimes in this data are given according to the North and
South dummy (SOUTH).
>>> r_var = 'SOUTH'
>>> regimes = db.by_col(r_var)
Since we want to run a spatial error model, we need to specify the spatial
weights matrix that includes the spatial configuration of the observations
into the error component of the model. To do that, we can open an already
existing gal file or create a new one. In this case, we will create one
from ``NAT.shp``.
>>> w = pysal.rook_from_shapefile(pysal.examples.get_path("NAT.shp"))
Unless there is a good reason not to do it, the weights have to be
row-standardized so every row of the matrix sums to one. Among other
things, this allows to interpret the spatial lag of a variable as the
average value of the neighboring observations. In PySAL, this can be
easily performed in the following way:
>>> w.transform = 'r'
We are all set with the preliminaries, we are good to run the model. In this
case, we will need the variables (exogenous and endogenous), the
instruments and the weights matrix. If we want to
have the names of the variables printed in the output summary, we will
have to pass them in as well, although this is optional.
>>> model = GM_Endog_Error_Regimes(y, x, yend, q, regimes, w=w, name_y=y_var, name_x=x_var, name_yend=yd_var, name_q=q_var, name_regimes=r_var, name_ds='NAT.dbf')
Once we have run the model, we can explore a little bit the output. The
regression object we have created has many attributes so take your time to
discover them. Note that because we are running the classical GMM error
model from 1998/99, the spatial parameter is obtained as a point estimate, so
although you get a value for it (there are for coefficients under
model.betas), you cannot perform inference on it (there are only three
values in model.se_betas). Also, this regression uses a two stage least
squares estimation method that accounts for the endogeneity created by the
endogenous variables included. Alternatively, we can have a summary of the
output by typing: model.summary
>>> print model.name_z
['0_CONSTANT', '0_PS90', '0_UE90', '1_CONSTANT', '1_PS90', '1_UE90', '0_RD90', '1_RD90', 'lambda']
>>> np.around(model.betas, decimals=5)
array([[ 3.59718],
[ 1.0652 ],
[ 0.15822],
[ 9.19754],
[ 1.88082],
[-0.24878],
[ 2.46161],
[ 3.57943],
[ 0.25564]])
>>> np.around(model.std_err, decimals=6)
array([ 0.522633, 0.137555, 0.063054, 0.473654, 0.18335 , 0.072786,
0.300711, 0.240413])
'''
def __init__(self, y, x, yend, q, regimes, w, cores=False,
vm=False, constant_regi='many', cols2regi='all',
regime_err_sep=False, regime_lag_sep=False, name_y=None,
name_x=None, name_yend=None, name_q=None, name_w=None,
name_ds=None, name_regimes=None, summ=True, add_lag=False):
n = USER.check_arrays(y, x, yend, q)
USER.check_y(y, n)
USER.check_weights(w, y, w_required=True)
self.constant_regi = constant_regi
self.cols2regi = cols2regi
self.name_ds = USER.set_name_ds(name_ds)
self.name_regimes = USER.set_name_ds(name_regimes)
self.name_w = USER.set_name_w(name_w, w)
self.n = n
self.y = y
name_x = USER.set_name_x(name_x, x)
if summ:
name_yend = USER.set_name_yend(name_yend, yend)
self.name_y = USER.set_name_y(name_y)
name_q = USER.set_name_q(name_q, q)
self.name_x_r = name_x + name_yend
cols2regi = REGI.check_cols2regi(
constant_regi, cols2regi, x, yend=yend)
self.regimes_set = REGI._get_regimes_set(regimes)
self.regimes = regimes
USER.check_regimes(self.regimes_set, self.n, x.shape[1])
self.regime_err_sep = regime_err_sep
if regime_err_sep == True:
if set(cols2regi) == set([True]):
self._endog_error_regimes_multi(y, x, regimes, w, yend, q, cores,
cols2regi, vm, name_x, name_yend, name_q, add_lag)
else:
raise Exception, "All coefficients must vary accross regimes if regime_err_sep = True."
else:
x_constant = USER.check_constant(x)
q, name_q = REGI.Regimes_Frame.__init__(self, q,
regimes, constant_regi=None, cols2regi='all', names=name_q)
x, name_x = REGI.Regimes_Frame.__init__(self, x_constant,
regimes, constant_regi=None, cols2regi=cols2regi,
names=name_x)
yend2, name_yend = REGI.Regimes_Frame.__init__(self, yend,
regimes, constant_regi=None,
cols2regi=cols2regi, yend=True, names=name_yend)
tsls = BaseTSLS(y=y, x=x, yend=yend2, q=q)
self.k = tsls.z.shape[1]
self.x = tsls.x
self.yend, self.z = tsls.yend, tsls.z
moments = _momentsGM_Error(w, tsls.u)
lambda1 = optim_moments(moments)
xs = get_spFilter(w, lambda1, x_constant)
xs = REGI.Regimes_Frame.__init__(self, xs,
regimes, constant_regi=None, cols2regi=cols2regi)[0]
ys = get_spFilter(w, lambda1, y)
yend_s = get_spFilter(w, lambda1, yend)
yend_s = REGI.Regimes_Frame.__init__(self, yend_s,
regimes, constant_regi=None, cols2regi=cols2regi,
yend=True)[0]
tsls2 = BaseTSLS(ys, xs, yend_s, h=tsls.h)
# Output
self.betas = np.vstack((tsls2.betas, np.array([[lambda1]])))
self.predy = spdot(tsls.z, tsls2.betas)
self.u = y - self.predy
self.sig2 = float(np.dot(tsls2.u.T, tsls2.u)) / self.n
self.e_filtered = self.u - lambda1 * lag_spatial(w, self.u)
self.vm = self.sig2 * tsls2.varb
self.name_x = USER.set_name_x(name_x, x, constant=True)
self.name_yend = USER.set_name_yend(name_yend, yend)
self.name_z = self.name_x + self.name_yend
self.name_z.append('lambda')
self.name_q = USER.set_name_q(name_q, q)
self.name_h = USER.set_name_h(self.name_x, self.name_q)
self.kf += 1
self.chow = REGI.Chow(self)
self._cache = {}
if summ:
self.title = "SPATIALLY WEIGHTED TWO STAGE LEAST SQUARES - REGIMES"
SUMMARY.GM_Endog_Error(reg=self, w=w, vm=vm, regimes=True)
def _endog_error_regimes_multi(self, y, x, regimes, w, yend, q, cores,
cols2regi, vm, name_x, name_yend, name_q, add_lag):
regi_ids = dict(
(r, list(np.where(np.array(regimes) == r)[0])) for r in self.regimes_set)
if add_lag != False:
self.cols2regi += [True]
cols2regi += [True]
self.predy_e = np.zeros((self.n, 1), float)
self.e_pred = np.zeros((self.n, 1), float)
results_p = {}
for r in self.regimes_set:
"""
if system() == 'Windows':
results_p[r] = _work_endog_error(*(y,x,yend,q,regi_ids,r,w,self.name_ds,self.name_y,name_x,name_yend,name_q,self.name_w,self.name_regimes,add_lag))
is_win = True
else:
pool = mp.Pool(cores)
results_p[r] = pool.apply_async(_work_endog_error,args=(y,x,yend,q,regi_ids,r,w,self.name_ds,self.name_y,name_x,name_yend,name_q,self.name_w,self.name_regimes,add_lag, ))
is_win = False
"""
for r in self.regimes_set:
if cores:
pool = mp.Pool(None)
results_p[r] = pool.apply_async(_work_endog_error, args=(
y, x, yend, q, regi_ids, r, w, self.name_ds, self.name_y, name_x, name_yend, name_q, self.name_w, self.name_regimes, add_lag, ))
else:
results_p[r] = _work_endog_error(
*(y, x, yend, q, regi_ids, r, w, self.name_ds, self.name_y, name_x, name_yend, name_q, self.name_w, self.name_regimes, add_lag))
self.kryd, self.kf = 0, 0
self.kr = len(cols2regi)
self.nr = len(self.regimes_set)
self.vm = np.zeros((self.nr * self.kr, self.nr * self.kr), float)
self.betas = np.zeros((self.nr * (self.kr + 1), 1), float)
self.u = np.zeros((self.n, 1), float)
self.predy = np.zeros((self.n, 1), float)
self.e_filtered = np.zeros((self.n, 1), float)
"""
if not is_win:
pool.close()
pool.join()
"""
if cores:
pool.close()
pool.join()
results = {}
self.name_y, self.name_x, self.name_yend, self.name_q, self.name_z, self.name_h = [
], [], [], [], [], []
counter = 0
for r in self.regimes_set:
"""
if is_win:
results[r] = results_p[r]
else:
results[r] = results_p[r].get()
"""
if not cores:
results[r] = results_p[r]
else:
results[r] = results_p[r].get()
self.vm[(counter * self.kr):((counter + 1) * self.kr),
(counter * self.kr):((counter + 1) * self.kr)] = results[r].vm
self.betas[
(counter * (self.kr + 1)):((counter + 1) * (self.kr + 1)), ] = results[r].betas
self.u[regi_ids[r], ] = results[r].u
self.predy[regi_ids[r], ] = results[r].predy
self.e_filtered[regi_ids[r], ] = results[r].e_filtered
self.name_y += results[r].name_y
self.name_x += results[r].name_x
self.name_yend += results[r].name_yend
self.name_q += results[r].name_q
self.name_z += results[r].name_z
self.name_h += results[r].name_h
if add_lag != False:
self.predy_e[regi_ids[r], ] = results[r].predy_e
self.e_pred[regi_ids[r], ] = results[r].e_pred
counter += 1
self.chow = REGI.Chow(self)
self.multi = results
if add_lag != False:
SUMMARY.GM_Combo_multi(
reg=self, multireg=self.multi, vm=vm, regimes=True)
else:
SUMMARY.GM_Endog_Error_multi(
reg=self, multireg=self.multi, vm=vm, regimes=True)
class GM_Combo_Regimes(GM_Endog_Error_Regimes, REGI.Regimes_Frame):
"""
GMM method for a spatial lag and error model with regimes and endogenous
variables, with results and diagnostics; based on Kelejian and Prucha (1998,
1999)[1]_[2]_.
Parameters
----------
y : array
nx1 array for dependent variable
x : array
Two dimensional array with n rows and one column for each
independent (exogenous) variable, excluding the constant
regimes : list
List of n values with the mapping of each
observation to a regime. Assumed to be aligned with 'x'.
yend : array
Two dimensional array with n rows and one column for each
endogenous variable
q : array
Two dimensional array with n rows and one column for each
external exogenous variable to use as instruments (note:
this should not contain any variables from x)
w : pysal W object
Spatial weights object (always needed)
constant_regi: ['one', 'many']
Switcher controlling the constant term setup. It may take
the following values:
* 'one': a vector of ones is appended to x and held
constant across regimes
* 'many': a vector of ones is appended to x and considered
different per regime (default)
cols2regi : list, 'all'
Argument indicating whether each
column of x should be considered as different per regime
or held constant across regimes (False).
If a list, k booleans indicating for each variable the
option (True if one per regime, False to be held constant).
If 'all' (default), all the variables vary by regime.
regime_err_sep : boolean
If True, a separate regression is run for each regime.
regime_lag_sep : boolean
If True, the spatial parameter for spatial lag is also
computed according to different regimes. If False (default),
the spatial parameter is fixed accross regimes.
w_lags : integer
Orders of W to include as instruments for the spatially
lagged dependent variable. For example, w_lags=1, then
instruments are WX; if w_lags=2, then WX, WWX; and so on.
lag_q : boolean
If True, then include spatial lags of the additional
instruments (q).
vm : boolean
If True, include variance-covariance matrix in summary
results
cores : boolean
Specifies if multiprocessing is to be used
Default: no multiprocessing, cores = False
Note: Multiprocessing may not work on all platforms.
name_y : string
Name of dependent variable for use in output
name_x : list of strings
Names of independent variables for use in output
name_yend : list of strings
Names of endogenous variables for use in output
name_q : list of strings
Names of instruments for use in output
name_w : string
Name of weights matrix for use in output
name_ds : string
Name of dataset for use in output
name_regimes : string
Name of regime variable for use in the output
Attributes
----------
summary : string
Summary of regression results and diagnostics (note: use in
conjunction with the print command)
betas : array
kx1 array of estimated coefficients
u : array
nx1 array of residuals
e_filtered : array
nx1 array of spatially filtered residuals
e_pred : array
nx1 array of residuals (using reduced form)
predy : array
nx1 array of predicted y values
predy_e : array
nx1 array of predicted y values (using reduced form)
n : integer
Number of observations
k : integer
Number of variables for which coefficients are estimated
(including the constant)
Only available in dictionary 'multi' when multiple regressions
(see 'multi' below for details)
y : array
nx1 array for dependent variable
x : array
Two dimensional array with n rows and one column for each
independent (exogenous) variable, including the constant
Only available in dictionary 'multi' when multiple regressions
(see 'multi' below for details)
yend : array
Two dimensional array with n rows and one column for each
endogenous variable
Only available in dictionary 'multi' when multiple regressions
(see 'multi' below for details)
z : array
nxk array of variables (combination of x and yend)
Only available in dictionary 'multi' when multiple regressions
(see 'multi' below for details)
mean_y : float
Mean of dependent variable
std_y : float
Standard deviation of dependent variable
vm : array
Variance covariance matrix (kxk)
pr2 : float
Pseudo R squared (squared correlation between y and ypred)
Only available in dictionary 'multi' when multiple regressions
(see 'multi' below for details)
pr2_e : float
Pseudo R squared (squared correlation between y and ypred_e
(using reduced form))
Only available in dictionary 'multi' when multiple regressions
(see 'multi' below for details)
sig2 : float
Sigma squared used in computations (based on filtered
residuals)
Only available in dictionary 'multi' when multiple regressions
(see 'multi' below for details)
std_err : array
1xk array of standard errors of the betas
Only available in dictionary 'multi' when multiple regressions
(see 'multi' below for details)
z_stat : list of tuples
z statistic; each tuple contains the pair (statistic,
p-value), where each is a float
Only available in dictionary 'multi' when multiple regressions
(see 'multi' below for details)
name_y : string
Name of dependent variable for use in output
name_x : list of strings
Names of independent variables for use in output
name_yend : list of strings
Names of endogenous variables for use in output
name_z : list of strings
Names of exogenous and endogenous variables for use in
output
name_q : list of strings
Names of external instruments
name_h : list of strings
Names of all instruments used in ouput
name_w : string
Name of weights matrix for use in output
name_ds : string
Name of dataset for use in output
name_regimes : string
Name of regimes variable for use in output
title : string
Name of the regression method used
Only available in dictionary 'multi' when multiple regressions
(see 'multi' below for details)
regimes : list
List of n values with the mapping of each
observation to a regime. Assumed to be aligned with 'x'.
constant_regi : ['one', 'many']
Ignored if regimes=False. Constant option for regimes.
Switcher controlling the constant term setup. It may take
the following values:
* 'one': a vector of ones is appended to x and held
constant across regimes
* 'many': a vector of ones is appended to x and considered
different per regime
cols2regi : list, 'all'
Ignored if regimes=False. Argument indicating whether each
column of x should be considered as different per regime
or held constant across regimes (False).
If a list, k booleans indicating for each variable the
option (True if one per regime, False to be held constant).
If 'all', all the variables vary by regime.
regime_err_sep : boolean
If True, a separate regression is run for each regime.
regime_lag_sep : boolean
If True, the spatial parameter for spatial lag is also
computed according to different regimes. If False (default),
the spatial parameter is fixed accross regimes.
kr : int
Number of variables/columns to be "regimized" or subject
to change by regime. These will result in one parameter
estimate by regime for each variable (i.e. nr parameters per
variable)
kf : int
Number of variables/columns to be considered fixed or
global across regimes and hence only obtain one parameter
estimate
nr : int
Number of different regimes in the 'regimes' list
multi : dictionary
Only available when multiple regressions are estimated,
i.e. when regime_err_sep=True and no variable is fixed
across regimes.
Contains all attributes of each individual regression
References
----------
.. [1] Kelejian, H.R., Prucha, I.R. (1998) "A generalized spatial
two-stage least squares procedure for estimating a spatial autoregressive
model with autoregressive disturbances". The Journal of Real State
Finance and Economics, 17, 1.
.. [2] Kelejian, H.R., Prucha, I.R. (1999) "A Generalized Moments
Estimator for the Autoregressive Parameter in a Spatial Model".
International Economic Review, 40, 2.
Examples
--------
We first need to import the needed modules, namely numpy to convert the
data we read into arrays that ``spreg`` understands and ``pysal`` to
perform all the analysis.
>>> import numpy as np
>>> import pysal
Open data on NCOVR US County Homicides (3085 areas) using pysal.open().
This is the DBF associated with the NAT shapefile. Note that
pysal.open() also reads data in CSV format; since the actual class
requires data to be passed in as numpy arrays, the user can read their
data in using any method.
>>> db = pysal.open(pysal.examples.get_path("NAT.dbf"),'r')
Extract the HR90 column (homicide rates in 1990) from the DBF file and make it the
dependent variable for the regression. Note that PySAL requires this to be
an numpy array of shape (n, 1) as opposed to the also common shape of (n, )
that other packages accept.
>>> y_var = 'HR90'
>>> y = np.array([db.by_col(y_var)]).reshape(3085,1)
Extract UE90 (unemployment rate) and PS90 (population structure) vectors from
the DBF to be used as independent variables in the regression. Other variables
can be inserted by adding their names to x_var, such as x_var = ['Var1','Var2','...]
Note that PySAL requires this to be an nxj numpy array, where j is the
number of independent variables (not including a constant). By default
this model adds a vector of ones to the independent variables passed in.
>>> x_var = ['PS90','UE90']
>>> x = np.array([db.by_col(name) for name in x_var]).T
The different regimes in this data are given according to the North and
South dummy (SOUTH).
>>> r_var = 'SOUTH'
>>> regimes = db.by_col(r_var)
Since we want to run a spatial lag model, we need to specify
the spatial weights matrix that includes the spatial configuration of the
observations. To do that, we can open an already existing gal file or
create a new one. In this case, we will create one from ``NAT.shp``.
>>> w = pysal.rook_from_shapefile(pysal.examples.get_path("NAT.shp"))
Unless there is a good reason not to do it, the weights have to be
row-standardized so every row of the matrix sums to one. Among other
things, this allows to interpret the spatial lag of a variable as the
average value of the neighboring observations. In PySAL, this can be
easily performed in the following way:
>>> w.transform = 'r'
The Combo class runs an SARAR model, that is a spatial lag+error model.
In this case we will run a simple version of that, where we have the
spatial effects as well as exogenous variables. Since it is a spatial
model, we have to pass in the weights matrix. If we want to
have the names of the variables printed in the output summary, we will
have to pass them in as well, although this is optional.
>>> model = GM_Combo_Regimes(y, x, regimes, w=w, name_y=y_var, name_x=x_var, name_regimes=r_var, name_ds='NAT')
Once we have run the model, we can explore a little bit the output. The
regression object we have created has many attributes so take your time to
discover them. Note that because we are running the classical GMM error
model from 1998/99, the spatial parameter is obtained as a point estimate, so
although you get a value for it (there are for coefficients under
model.betas), you cannot perform inference on it (there are only three
values in model.se_betas). Also, this regression uses a two stage least
squares estimation method that accounts for the endogeneity created by the
spatial lag of the dependent variable. We can have a summary of the
output by typing: model.summary
Alternatively, we can check the betas:
>>> print model.name_z
['0_CONSTANT', '0_PS90', '0_UE90', '1_CONSTANT', '1_PS90', '1_UE90', '_Global_W_HR90', 'lambda']
>>> print np.around(model.betas,4)
[[ 1.4607]
[ 0.958 ]
[ 0.5658]
[ 9.113 ]
[ 1.1338]
[ 0.6517]
[-0.4583]
[ 0.6136]]
And lambda:
>>> print 'lambda: ', np.around(model.betas[-1], 4)
lambda: [ 0.6136]
This class also allows the user to run a spatial lag+error model with the
extra feature of including non-spatial endogenous regressors. This means
that, in addition to the spatial lag and error, we consider some of the
variables on the right-hand side of the equation as endogenous and we
instrument for this. In this case we consider RD90 (resource deprivation)
as an endogenous regressor. We use FP89 (families below poverty)
for this and hence put it in the instruments parameter, 'q'.
>>> yd_var = ['RD90']
>>> yd = np.array([db.by_col(name) for name in yd_var]).T
>>> q_var = ['FP89']
>>> q = np.array([db.by_col(name) for name in q_var]).T
And then we can run and explore the model analogously to the previous combo:
>>> model = GM_Combo_Regimes(y, x, regimes, yd, q, w=w, name_y=y_var, name_x=x_var, name_yend=yd_var, name_q=q_var, name_regimes=r_var, name_ds='NAT')
>>> print model.name_z
['0_CONSTANT', '0_PS90', '0_UE90', '1_CONSTANT', '1_PS90', '1_UE90', '0_RD90', '1_RD90', '_Global_W_HR90', 'lambda']
>>> print model.betas
[[ 3.41963782]
[ 1.04065841]
[ 0.16634393]
[ 8.86544628]
[ 1.85120528]
[-0.24908469]
[ 2.43014046]
[ 3.61645481]
[ 0.03308671]
[ 0.18684992]]
>>> print np.sqrt(model.vm.diagonal())
[ 0.53067577 0.13271426 0.06058025 0.76406411 0.17969783 0.07167421
0.28943121 0.25308326 0.06126529]
>>> print 'lambda: ', np.around(model.betas[-1], 4)
lambda: [ 0.1868]
"""
def __init__(self, y, x, regimes, yend=None, q=None,
w=None, w_lags=1, lag_q=True, cores=False,
constant_regi='many', cols2regi='all',
regime_err_sep=False, regime_lag_sep=False,
vm=False, name_y=None, name_x=None,
name_yend=None, name_q=None,
name_w=None, name_ds=None, name_regimes=None):
n = USER.check_arrays(y, x)
USER.check_y(y, n)
USER.check_weights(w, y, w_required=True)
name_x = USER.set_name_x(name_x, x, constant=True)
self.name_y = USER.set_name_y(name_y)
name_yend = USER.set_name_yend(name_yend, yend)
name_q = USER.set_name_q(name_q, q)
name_q.extend(
USER.set_name_q_sp(name_x, w_lags, name_q, lag_q, force_all=True))
cols2regi = REGI.check_cols2regi(
constant_regi, cols2regi, x, yend=yend, add_cons=False)
self.regimes_set = REGI._get_regimes_set(regimes)
self.regimes = regimes
USER.check_regimes(self.regimes_set, n, x.shape[1])
self.regime_err_sep = regime_err_sep
self.regime_lag_sep = regime_lag_sep
if regime_lag_sep == True:
if regime_err_sep == False:
raise Exception, "For spatial combo models, if spatial lag is set by regimes (regime_lag_sep=True), spatial error must also be set by regimes (regime_err_sep=True)."
add_lag = [w_lags, lag_q]
else:
if regime_err_sep == True:
raise Exception, "For spatial combo models, if spatial error is set by regimes (regime_err_sep=True), all coefficients including lambda (regime_lag_sep=True) must be set by regimes."
cols2regi += [False]
add_lag = False
yend, q = set_endog(y, x, w, yend, q, w_lags, lag_q)
name_yend.append(USER.set_name_yend_sp(self.name_y))
GM_Endog_Error_Regimes.__init__(self, y=y, x=x, yend=yend,
q=q, regimes=regimes, w=w, vm=vm, constant_regi=constant_regi,
cols2regi=cols2regi, regime_err_sep=regime_err_sep, cores=cores,
name_y=self.name_y, name_x=name_x,
name_yend=name_yend, name_q=name_q, name_w=name_w,
name_ds=name_ds, name_regimes=name_regimes, summ=False, add_lag=add_lag)
if regime_err_sep != True:
self.rho = self.betas[-2]
self.predy_e, self.e_pred, warn = sp_att(w, self.y,
self.predy, yend[:, -1].reshape(self.n, 1), self.rho)
set_warn(self, warn)
self.title = "SPATIALLY WEIGHTED TWO STAGE LEAST SQUARES - REGIMES"
SUMMARY.GM_Combo(reg=self, w=w, vm=vm, regimes=True)
def _work_error(y, x, regi_ids, r, w, name_ds, name_y, name_x, name_w, name_regimes):
w_r, warn = REGI.w_regime(w, regi_ids[r], r, transform=True)
y_r = y[regi_ids[r]]
x_r = x[regi_ids[r]]
x_constant = USER.check_constant(x_r)
model = BaseGM_Error(y_r, x_constant, w_r.sparse)
set_warn(model, warn)
model.w = w_r
model.title = "SPATIALLY WEIGHTED LEAST SQUARES ESTIMATION - REGIME %s" % r
model.name_ds = name_ds
model.name_y = '%s_%s' % (str(r), name_y)
model.name_x = ['%s_%s' % (str(r), i) for i in name_x]
model.name_w = name_w
model.name_regimes = name_regimes
return model
def _work_endog_error(y, x, yend, q, regi_ids, r, w, name_ds, name_y, name_x, name_yend, name_q, name_w, name_regimes, add_lag):
w_r, warn = REGI.w_regime(w, regi_ids[r], r, transform=True)
y_r = y[regi_ids[r]]
x_r = x[regi_ids[r]]
if yend is not None:
yend_r = yend[regi_ids[r]]
q_r = q[regi_ids[r]]
else:
yend_r, q_r = None, None
if add_lag != False:
yend_r, q_r = set_endog(
y_r, x_r, w_r, yend_r, q_r, add_lag[0], add_lag[1])
x_constant = USER.check_constant(x_r)
model = BaseGM_Endog_Error(y_r, x_constant, yend_r, q_r, w_r.sparse)
set_warn(model, warn)
if add_lag != False:
model.rho = model.betas[-2]
model.predy_e, model.e_pred, warn = sp_att(w_r, model.y,
model.predy, model.yend[:, -1].reshape(model.n, 1), model.rho)
set_warn(model, warn)
model.w = w_r
model.title = "SPATIALLY WEIGHTED TWO STAGE LEAST SQUARES - REGIME %s" % r
model.name_ds = name_ds
model.name_y = '%s_%s' % (str(r), name_y)
model.name_x = ['%s_%s' % (str(r), i) for i in name_x]
model.name_yend = ['%s_%s' % (str(r), i) for i in name_yend]
model.name_z = model.name_x + model.name_yend + ['lambda']
model.name_q = ['%s_%s' % (str(r), i) for i in name_q]
model.name_h = model.name_x + model.name_q
model.name_w = name_w
model.name_regimes = name_regimes
return model
def _test():
import doctest
start_suppress = np.get_printoptions()['suppress']
np.set_printoptions(suppress=True)
doctest.testmod()
np.set_printoptions(suppress=start_suppress)
if __name__ == '__main__':
_test()
import pysal
import numpy as np
dbf = pysal.open(pysal.examples.get_path('columbus.dbf'), 'r')
y = np.array([dbf.by_col('CRIME')]).T
names_to_extract = ['INC']
x = np.array([dbf.by_col(name) for name in names_to_extract]).T
yd_var = ['HOVAL']
yend = np.array([dbf.by_col(name) for name in yd_var]).T
q_var = ['DISCBD']
q = np.array([dbf.by_col(name) for name in q_var]).T
regimes = regimes = dbf.by_col('NSA')
w = pysal.open(pysal.examples.get_path("columbus.gal"), 'r').read()
w.transform = 'r'
model = GM_Error_Regimes(y, x, regimes=regimes, w=w, name_y='crime', name_x=[
'income'], name_regimes='nsa', name_ds='columbus', regime_err_sep=True)
print model.summary
| dfolch/pysal | pysal/spreg/error_sp_regimes.py | Python | bsd-3-clause | 64,261 | [
"COLUMBUS"
] | 83826780b2e7ffe82e746565e82194d9a778e1804e592eba7af35aec39ec4edb |
from wheezy.http import WSGIApplication
from wheezy.web.middleware import bootstrap_defaults
from wheezy.web.middleware import path_routing_middleware_factory
from config import options
from urls import all_urls
main = WSGIApplication([
bootstrap_defaults(url_mapping=all_urls),
path_routing_middleware_factory
], options)
if __name__ == '__main__':
from wsgiref.handlers import BaseHandler
from wsgiref.simple_server import make_server
try:
print('Visit http://localhost:8080/')
BaseHandler.http_version = '1.1'
make_server('', 8080, main).serve_forever()
except KeyboardInterrupt:
pass
print('\nThanks!') | saggit/memerybox | app.py | Python | mit | 670 | [
"VisIt"
] | 1c80f95dcc2f4d13239ae804f74d9a0f67c408b0e39cb5c15bd912068509c5c8 |
# -*- coding: utf-8 -*-
# Copyright (c) 2016-2017, Zhijiang Yao, Jie Dong and Dongsheng Cao
# All rights reserved.
# This file is part of the PyBioMed.
# The contents are covered by the terms of the BSD license
# which is included in the file license.txt, found at the root
# of the PyBioMed source tree.
"""
##############################################################################
Calculation of Molecular physical/chemical properties based on some special
type of approaches(6), including: LogP; LogP2; MR; TPSA, UI and Hy.You can
freely use and distribute it. If you hava any problem, you could contact
with us timely!
Authors: Zhijiang Yao and Dongsheng Cao.
Date: 2016.06.04
Email: gadsby@163.com and oriental-cds@163.com
##############################################################################
"""
# Core Library modules
import math
# Third party modules
from rdkit import Chem
from rdkit.Chem import Crippen
from rdkit.Chem import MolSurf as MS
Version = 1.0
##############################################################
def CalculateMolLogP(mol):
"""
#################################################################
Cacluation of LogP value based on Crippen method
---->LogP
Usage:
result=CalculateMolLogP(mol)
Input: mol is a molecule object.
Output: result is a numeric value.
#################################################################
"""
return round(Crippen._pyMolLogP(mol), 3)
def CalculateMolLogP2(mol):
"""
#################################################################
Cacluation of LogP^2 value based on Crippen method
---->LogP2
Usage:
result=CalculateMolLogP2(mol)
Input: mol is a molecule object.
Output: result is a numeric value.
#################################################################
"""
res = Crippen._pyMolLogP(mol)
return round(res ** 2, 3)
def CalculateMolMR(mol):
"""
#################################################################
Cacluation of molecular refraction value based on Crippen method
---->MR
Usage:
result=CalculateMolMR(mol)
Input: mol is a molecule object.
Output: result is a numeric value.
#################################################################
"""
return round(Crippen._pyMolMR(mol), 3)
def CalculateTPSA(mol):
"""
#################################################################
calculates the polar surface area of a molecule based upon fragments
Algorithm in:
P. Ertl, B. Rohde, P. Selzer
Fast Calculation of Molecular Polar Surface Area as a Sum of
Fragment-based Contributions and Its Application to the Prediction
of Drug Transport Properties, J.Med.Chem. 43, 3714-3717, 2000
Implementation based on the Daylight contrib program tpsa.
---->TPSA
Usage:
result=CalculateTPSA(mol)
Input: mol is a molecule object.
Output: result is a numeric value.
#################################################################
"""
return round(MS.TPSA(mol), 3)
def _CalculateBondNumber(mol, bondtype="SINGLE"):
"""
#################################################################
**Internal used only*
Calculation of bond counts in a molecule. it may be
SINGLE, DOUBLE, TRIPLE and AROMATIC
#################################################################
"""
i = 0
for bond in mol.GetBonds():
if bond.GetBondType().name == bondtype:
i = i + 1
return i
def CalculateUnsaturationIndex(mol):
"""
#################################################################
Calculation of unsaturation index.
---->UI
Usage:
result=CalculateUnsaturationIndex(mol)
Input: mol is a molecule object.
Output: result is a numeric value.
#################################################################
"""
nd = _CalculateBondNumber(mol, bondtype="DOUBLE")
nt = _CalculateBondNumber(mol, bondtype="TRIPLE")
na = _CalculateBondNumber(mol, bondtype="AROMATIC")
res = math.log((1 + nd + nt + na), 2)
return round(res, 3)
def CalculateHydrophilicityFactor(mol):
"""
#################################################################
Calculation of hydrophilicity factor. The hydrophilicity
index is described in more detail on page 225 of the
Handbook of Molecular Descriptors (Todeschini and Consonni 2000).
---->Hy
Usage:
result=CalculateHydrophilicityFactor(mol)
Input: mol is a molecule object.
Output: result is a numeric value.
#################################################################
"""
nheavy = mol.GetNumAtoms(onlyHeavy=1)
nc = 0
for atom in mol.GetAtoms():
if atom.GetAtomicNum() == 6:
nc = nc + 1
nhy = 0
for atom in mol.GetAtoms():
if (
atom.GetAtomicNum() == 7
or atom.GetAtomicNum() == 8
or atom.GetAtomicNum() == 16
):
atomn = atom.GetNeighbors()
for i in atomn:
if i.GetAtomicNum() == 1:
nhy = nhy + 1
res = (
(1 + nhy) * math.log((1 + nhy), 2)
+ nc * (1.0 / nheavy * math.log(1.0 / nheavy, 2))
+ math.sqrt((nhy + 0.0) / (nheavy ** 2))
)
return round(res, 3)
def CalculateXlogP(mol):
"""
#################################################################
Calculation of Wang octanol water partition coefficient.
---->XLogP
Usage:
result=CalculateXlogP(mol)
Input: mol is a molecule object.
Output: result is a numeric value.
#################################################################
"""
pass
def CalculateXlogP2(mol):
"""
#################################################################
Calculation of Wang octanol water partition coefficient (XLogP^2).
---->XLogP2
Usage:
result=CalculateMolLogP(mol)
Input: mol is a molecule object.
Output: result is a numeric value.
#################################################################
"""
pass
MolecularProperty = {
"LogP": CalculateMolLogP,
"LogP2": CalculateMolLogP2,
"MR": CalculateMolMR,
"TPSA": CalculateTPSA,
"Hy": CalculateHydrophilicityFactor,
"UI": CalculateUnsaturationIndex,
}
def GetMolecularProperty(mol):
"""
#################################################################
Get the dictionary of constitutional descriptors for
given moelcule mol
Usage:
result=GetMolecularProperty(mol)
Input: mol is a molecule object.
Output: result is a dict form containing 6 molecular properties.
#################################################################
"""
result = {}
for DesLabel in MolecularProperty.keys():
result[DesLabel] = MolecularProperty[DesLabel](mol)
return result
##########################################################
if __name__ == "__main__":
smis = ["CCCC", "CCCCC", "CCCCCC", "CC(N)C(=O)O", "CC(N)C(=O)[O-].[Na+]"]
smi5 = ["CCCCCC", "CCC(C)CC", "CC(C)CCC", "CC(C)C(C)C", "CCCCCN", "c1ccccc1N"]
for index, smi in enumerate(smis):
m = Chem.MolFromSmiles(smi)
print(index + 1)
print(smi)
print("\t", GetMolecularProperty(m))
# f.close()
| gadsbyfly/PyBioMed | PyBioMed/PyMolecule/molproperty.py | Python | bsd-3-clause | 7,505 | [
"RDKit"
] | 5ff2dbf6ddb1d03f98a5e5520d17c264c05da7d1dc8503292efa3de77cce6fc7 |
"""
The B{0install} command-line interface.
"""
# Copyright (C) 2011, Thomas Leonard
# See the README file for details, or visit http://0install.net.
from __future__ import print_function
from zeroinstall import _, logger
import os, sys
from optparse import OptionParser
import logging
from zeroinstall import SafeException, DryRun
valid_commands = ['config', 'slave']
class UsageError(Exception): pass
def _ensure_standard_fds():
"""Ensure stdin, stdout and stderr FDs exist, to avoid confusion."""
for std in (0, 1, 2):
try:
os.fstat(std)
except OSError:
fd = os.open(os.devnull, os.O_RDONLY)
if fd != std:
os.dup2(fd, std)
os.close(fd)
class NoCommand(object):
"""Handle --help and --version"""
def add_options(self, parser):
parser.add_option("-V", "--version", help=_("display version information"), action='store_true')
def handle(self, config, options, args):
if options.version:
import zeroinstall
print("0install (zero-install) " + zeroinstall.version)
print("Copyright (C) 2013 Thomas Leonard")
print(_("This program comes with ABSOLUTELY NO WARRANTY,"
"\nto the extent permitted by law."
"\nYou may redistribute copies of this program"
"\nunder the terms of the GNU Lesser General Public License."
"\nFor more information about these matters, see the file named COPYING."))
sys.exit(0)
raise UsageError()
def main(command_args, config = None):
"""Act as if 0install was run with the given arguments.
@type command_args: [str]
@type config: L{zeroinstall.injector.config.Config} | None
@arg command_args: array of arguments (e.g. C{sys.argv[1:]})"""
_ensure_standard_fds()
if config is None:
from zeroinstall.injector.config import load_config
config = load_config()
# The first non-option argument is the command name (or "help" if none is found).
command = None
for i, arg in enumerate(command_args):
if not arg.startswith('-'):
command = arg
command_args = command_args[:i] + command_args[i + 1:]
break
elif arg == '--':
break
verbose = False
try:
# Configure a parser for the given command
my_name = os.path.basename(sys.argv[0])
if my_name == '0install-python-fallback': my_name = '0install' # Hack for python-fallback
if command:
if command not in valid_commands:
raise SafeException(_("Unknown sub-command '%s': try --help") % command)
module_name = command.replace('-', '_')
cmd = __import__('zeroinstall.cmd.' + module_name, globals(), locals(), [module_name], 0)
parser = OptionParser(usage=_("usage: %s %s [OPTIONS] %s") % (my_name, command, cmd.syntax))
else:
cmd = NoCommand()
parser = OptionParser(usage=_("usage: %s COMMAND\n\nTry --help with one of these:%s") %
(my_name, "\n\n0install " + '\n0install '.join(valid_commands)))
parser.add_option("-c", "--console", help=_("never use GUI"), action='store_false', dest='gui')
parser.add_option("", "--dry-run", help=_("just print what would be executed"), action='store_true')
parser.add_option("-g", "--gui", help=_("show graphical policy editor"), action='store_true')
parser.add_option("-v", "--verbose", help=_("more verbose output"), action='count')
parser.add_option("", "--with-store", help=_("add an implementation cache"), action='append', metavar='DIR')
cmd.add_options(parser)
(options, args) = parser.parse_args(command_args)
verbose = options.verbose
if options.verbose:
if options.verbose == 1:
logger.setLevel(logging.INFO)
else:
logger.setLevel(logging.DEBUG)
import zeroinstall
logger.info(_("Running 0install %(version)s %(args)s; Python %(python_version)s"), {'version': zeroinstall.version, 'args': repr(command_args), 'python_version': sys.version})
config.handler.dry_run = bool(options.dry_run)
if config.handler.dry_run:
if options.gui is True:
raise SafeException(_("Can't use --gui with --dry-run"))
options.gui = False
cmd.handle(config, options, args)
except KeyboardInterrupt:
logger.info("KeyboardInterrupt")
sys.exit(1)
except UsageError:
parser.print_help()
sys.exit(1)
except DryRun as ex:
print(_("[dry-run]"), ex)
except SafeException as ex:
if verbose: raise
try:
from zeroinstall.support import unicode
print(unicode(ex), file=sys.stderr)
except:
print(repr(ex), file=sys.stderr)
sys.exit(1)
return
| afb/0install | zeroinstall/cmd/__init__.py | Python | lgpl-2.1 | 4,351 | [
"VisIt"
] | cc57ec418443f95f28b5262ed013e20f4f9d5fcfe8034e1b19bea1da0fd2371f |
from string import capwords
from django.db import models
CAPWORDS_ATTRS = ('name', 'firstname')
class PatientManager(models.Manager):
"""
custum patient manger to modifie create and update
"""
attrs = CAPWORDS_ATTRS
# paremeter to capwords
# def create_patient(self, name=None, firstname=None, birthdate=None):
# """
# every patient creatient must use this
# """
# if not name:
# raise ValueError('Must Include a name when adding a Patient')
# if not firstname:
# raise ValueError('Must Include a firstname when adding a Patient')
# if not birthdate:
# raise ValueError('Must Include a birthdate when adding a Patient')
# patient = self.model(
# name = name,
# firstname= firstname,
# birthdate = birthdate
# )
# print('hello')
# patient.save(using=self.db)
# return patient
def create(self, **kwargs):
"""
enhancement
"""
# capwors certain fields
for i in self.attrs:
kwargs[i] = capwords(kwargs[i])
# recall base create
return super(PatientManager, self).create(**kwargs)
class Patient(models.Model):
"""
ase class of patient.&
Require on ly 3 fields : name, firstname, birthdate
"""
attrs = CAPWORDS_ATTRS
# required Field
name = models.CharField(max_length=50)
firstname = models.CharField(max_length=50)
birthdate = models.DateField()
sexe = models.BooleanField(default=True) #True if women else false
# non required fields
street = models.CharField(blank=True, max_length=200, default="")
postalcode = models.CharField(blank=True, max_length=5, default="")
city = models.CharField(max_length=200, blank=True, default="")
phonenumber = models.CharField(blank=True, max_length=20, default="")
email = models.EmailField(blank=True, max_length=100, default="")
alive = models.BooleanField(default=True)
objects = PatientManager()
def __str__(self):
"""
nice printing Firstname Name
"""
return self.firstname + ' ' + self.name
def save(self, *args, **kwargs):
"""
customizing save method, adds :
- fore capwords for name et firstanme
"""
for i in self.attrs:
setattr(self, i, capwords(getattr(self, i)))
super(Patient, self).save(*args, **kwargs)
"""
champs à ajouter :
date de décès
décédé
médecin traitant déclaré
notes divers
"""
| jgirardet/unolog | unolog/patients/models.py | Python | gpl-3.0 | 2,704 | [
"ASE"
] | 684c5673d32b49f7625e678e938197ef2eb501a982a3c00573ec199503b2ba18 |
import numpy as np
import pytest
from sklearn.utils._testing import assert_allclose, assert_raises
from sklearn.neighbors import KernelDensity, KDTree, NearestNeighbors
from sklearn.neighbors._ball_tree import kernel_norm
from sklearn.pipeline import make_pipeline
from sklearn.datasets import make_blobs
from sklearn.model_selection import GridSearchCV
from sklearn.preprocessing import StandardScaler
from sklearn.exceptions import NotFittedError
import joblib
# XXX Duplicated in test_neighbors_tree, test_kde
def compute_kernel_slow(Y, X, kernel, h):
d = np.sqrt(((Y[:, None, :] - X) ** 2).sum(-1))
norm = kernel_norm(h, X.shape[1], kernel) / X.shape[0]
if kernel == 'gaussian':
return norm * np.exp(-0.5 * (d * d) / (h * h)).sum(-1)
elif kernel == 'tophat':
return norm * (d < h).sum(-1)
elif kernel == 'epanechnikov':
return norm * ((1.0 - (d * d) / (h * h)) * (d < h)).sum(-1)
elif kernel == 'exponential':
return norm * (np.exp(-d / h)).sum(-1)
elif kernel == 'linear':
return norm * ((1 - d / h) * (d < h)).sum(-1)
elif kernel == 'cosine':
return norm * (np.cos(0.5 * np.pi * d / h) * (d < h)).sum(-1)
else:
raise ValueError('kernel not recognized')
def check_results(kernel, bandwidth, atol, rtol, X, Y, dens_true):
kde = KernelDensity(kernel=kernel, bandwidth=bandwidth,
atol=atol, rtol=rtol)
log_dens = kde.fit(X).score_samples(Y)
assert_allclose(np.exp(log_dens), dens_true,
atol=atol, rtol=max(1E-7, rtol))
assert_allclose(np.exp(kde.score(Y)),
np.prod(dens_true),
atol=atol, rtol=max(1E-7, rtol))
@pytest.mark.parametrize(
'kernel',
['gaussian', 'tophat', 'epanechnikov',
'exponential', 'linear', 'cosine'])
@pytest.mark.parametrize('bandwidth', [0.01, 0.1, 1])
def test_kernel_density(kernel, bandwidth):
n_samples, n_features = (100, 3)
rng = np.random.RandomState(0)
X = rng.randn(n_samples, n_features)
Y = rng.randn(n_samples, n_features)
dens_true = compute_kernel_slow(Y, X, kernel, bandwidth)
for rtol in [0, 1E-5]:
for atol in [1E-6, 1E-2]:
for breadth_first in (True, False):
check_results(kernel, bandwidth, atol, rtol,
X, Y, dens_true)
def test_kernel_density_sampling(n_samples=100, n_features=3):
rng = np.random.RandomState(0)
X = rng.randn(n_samples, n_features)
bandwidth = 0.2
for kernel in ['gaussian', 'tophat']:
# draw a tophat sample
kde = KernelDensity(bandwidth=bandwidth, kernel=kernel).fit(X)
samp = kde.sample(100)
assert X.shape == samp.shape
# check that samples are in the right range
nbrs = NearestNeighbors(n_neighbors=1).fit(X)
dist, ind = nbrs.kneighbors(X, return_distance=True)
if kernel == 'tophat':
assert np.all(dist < bandwidth)
elif kernel == 'gaussian':
# 5 standard deviations is safe for 100 samples, but there's a
# very small chance this test could fail.
assert np.all(dist < 5 * bandwidth)
# check unsupported kernels
for kernel in ['epanechnikov', 'exponential', 'linear', 'cosine']:
kde = KernelDensity(bandwidth=bandwidth, kernel=kernel).fit(X)
assert_raises(NotImplementedError, kde.sample, 100)
# non-regression test: used to return a scalar
X = rng.randn(4, 1)
kde = KernelDensity(kernel="gaussian").fit(X)
assert kde.sample().shape == (1, 1)
@pytest.mark.parametrize('algorithm', ['auto', 'ball_tree', 'kd_tree'])
@pytest.mark.parametrize('metric',
['euclidean', 'minkowski', 'manhattan',
'chebyshev', 'haversine'])
def test_kde_algorithm_metric_choice(algorithm, metric):
# Smoke test for various metrics and algorithms
rng = np.random.RandomState(0)
X = rng.randn(10, 2) # 2 features required for haversine dist.
Y = rng.randn(10, 2)
if algorithm == 'kd_tree' and metric not in KDTree.valid_metrics:
assert_raises(ValueError, KernelDensity,
algorithm=algorithm, metric=metric)
else:
kde = KernelDensity(algorithm=algorithm, metric=metric)
kde.fit(X)
y_dens = kde.score_samples(Y)
assert y_dens.shape == Y.shape[:1]
def test_kde_score(n_samples=100, n_features=3):
pass
# FIXME
# rng = np.random.RandomState(0)
# X = rng.random_sample((n_samples, n_features))
# Y = rng.random_sample((n_samples, n_features))
def test_kde_badargs():
assert_raises(ValueError, KernelDensity,
algorithm='blah')
assert_raises(ValueError, KernelDensity,
bandwidth=0)
assert_raises(ValueError, KernelDensity,
kernel='blah')
assert_raises(ValueError, KernelDensity,
metric='blah')
assert_raises(ValueError, KernelDensity,
algorithm='kd_tree', metric='blah')
kde = KernelDensity()
assert_raises(ValueError, kde.fit, np.random.random((200, 10)),
sample_weight=np.random.random((200, 10)))
assert_raises(ValueError, kde.fit, np.random.random((200, 10)),
sample_weight=-np.random.random(200))
def test_kde_pipeline_gridsearch():
# test that kde plays nice in pipelines and grid-searches
X, _ = make_blobs(cluster_std=.1, random_state=1,
centers=[[0, 1], [1, 0], [0, 0]])
pipe1 = make_pipeline(StandardScaler(with_mean=False, with_std=False),
KernelDensity(kernel="gaussian"))
params = dict(kerneldensity__bandwidth=[0.001, 0.01, 0.1, 1, 10])
search = GridSearchCV(pipe1, param_grid=params)
search.fit(X)
assert search.best_params_['kerneldensity__bandwidth'] == .1
def test_kde_sample_weights():
n_samples = 400
size_test = 20
weights_neutral = np.full(n_samples, 3.)
for d in [1, 2, 10]:
rng = np.random.RandomState(0)
X = rng.rand(n_samples, d)
weights = 1 + (10 * X.sum(axis=1)).astype(np.int8)
X_repetitions = np.repeat(X, weights, axis=0)
n_samples_test = size_test // d
test_points = rng.rand(n_samples_test, d)
for algorithm in ['auto', 'ball_tree', 'kd_tree']:
for metric in ['euclidean', 'minkowski', 'manhattan',
'chebyshev']:
if algorithm != 'kd_tree' or metric in KDTree.valid_metrics:
kde = KernelDensity(algorithm=algorithm, metric=metric)
# Test that adding a constant sample weight has no effect
kde.fit(X, sample_weight=weights_neutral)
scores_const_weight = kde.score_samples(test_points)
sample_const_weight = kde.sample(random_state=1234)
kde.fit(X)
scores_no_weight = kde.score_samples(test_points)
sample_no_weight = kde.sample(random_state=1234)
assert_allclose(scores_const_weight, scores_no_weight)
assert_allclose(sample_const_weight, sample_no_weight)
# Test equivalence between sampling and (integer) weights
kde.fit(X, sample_weight=weights)
scores_weight = kde.score_samples(test_points)
sample_weight = kde.sample(random_state=1234)
kde.fit(X_repetitions)
scores_ref_sampling = kde.score_samples(test_points)
sample_ref_sampling = kde.sample(random_state=1234)
assert_allclose(scores_weight, scores_ref_sampling)
assert_allclose(sample_weight, sample_ref_sampling)
# Test that sample weights has a non-trivial effect
diff = np.max(np.abs(scores_no_weight - scores_weight))
assert diff > 0.001
# Test invariance with respect to arbitrary scaling
scale_factor = rng.rand()
kde.fit(X, sample_weight=(scale_factor * weights))
scores_scaled_weight = kde.score_samples(test_points)
assert_allclose(scores_scaled_weight, scores_weight)
def test_sample_weight_invalid():
# Check sample weighting raises errors.
kde = KernelDensity()
data = np.reshape([1., 2., 3.], (-1, 1))
sample_weight = [0.1, -0.2, 0.3]
expected_err = "sample_weight must have positive values"
with pytest.raises(ValueError, match=expected_err):
kde.fit(data, sample_weight=sample_weight)
@pytest.mark.parametrize('sample_weight', [None, [0.1, 0.2, 0.3]])
def test_pickling(tmpdir, sample_weight):
# Make sure that predictions are the same before and after pickling. Used
# to be a bug because sample_weights wasn't pickled and the resulting tree
# would miss some info.
kde = KernelDensity()
data = np.reshape([1., 2., 3.], (-1, 1))
kde.fit(data, sample_weight=sample_weight)
X = np.reshape([1.1, 2.1], (-1, 1))
scores = kde.score_samples(X)
file_path = str(tmpdir.join('dump.pkl'))
joblib.dump(kde, file_path)
kde = joblib.load(file_path)
scores_pickled = kde.score_samples(X)
assert_allclose(scores, scores_pickled)
@pytest.mark.parametrize('method', ['score_samples', 'sample'])
def test_check_is_fitted(method):
# Check that predict raises an exception in an unfitted estimator.
# Unfitted estimators should raise a NotFittedError.
rng = np.random.RandomState(0)
X = rng.randn(10, 2)
kde = KernelDensity()
with pytest.raises(NotFittedError):
getattr(kde, method)(X)
| xuewei4d/scikit-learn | sklearn/neighbors/tests/test_kde.py | Python | bsd-3-clause | 9,814 | [
"Gaussian"
] | 99017f14d73075160081728296849c533c0878a3ebec071cf6af972976caf4db |
#
# Copyright (C) 2013-2019 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""
Testmodule for the VTF file writing.
"""
import sys
import unittest as ut
import numpy as np
import espressomd
from espressomd import interactions
from espressomd.io.writer import vtf
import tempfile
npart = 50
class CommonTests(ut.TestCase):
"""
Class that holds common test methods.
"""
system = espressomd.System(box_l=[1.0, 1.0, 1.0])
# avoid particles to be set outside of the main box, otherwise particle
# positions are folded in the core when writing out and we cannot directly
# compare positions in the dataset and where particles were set. One would
# need to unfold the positions of the hdf5 file.
system.box_l = 3 * [npart]
system.cell_system.skin = 0.4
system.time_step = 0.01
written_pos = None
written_bonds = None
written_atoms = None
types_to_write = None
for i in range(npart):
system.part.add(id=i, pos=np.array(3 * [i], dtype=float),
v=np.array([1.0, 2.0, 3.0]), type=1 + (-1)**i)
system.bonded_inter.add(interactions.FeneBond(k=1., d_r_max=10.0))
system.part[0].add_bond((0, 1))
system.part[0].add_bond((0, 2))
system.part[0].add_bond((0, 3))
system.integrator.run(steps=0)
def test_pos(self):
"""Test if positions have been written properly."""
if self.types_to_write == 'all':
simulation_pos = np.array(
[((i), float(i), float(i), float(i)) for i in range(npart)])
elif 2 in self.types_to_write:
simulation_pos = np.array(
[((i * 2), float(i * 2), float(i * 2), float(i * 2)) for i in range(npart // 2)])
self.assertTrue(np.allclose(
simulation_pos[:, 1:], self.written_pos[:, 1:]),
msg="Positions not written correctly by writevcf!")
def test_bonds(self):
"""Test if bonds have been written properly: just look at number of bonds"""
if self.types_to_write == 'all':
simulation_bonds = np.array([1, 2, 3]) # the two bonded particles
elif 2 in self.types_to_write:
simulation_bonds = np.array(2) # only this one is type 2
self.assertTrue(np.allclose(
np.shape(simulation_bonds), np.shape(self.written_bonds)),
msg="Bonds not written correctly by writevsf!")
def test_atoms(self):
"""Test if atom declarations have been written properly."""
if self.types_to_write == 'all':
simulation_atoms = np.array(
[((i), (1 + (-1)**i)) for i in range(npart)])
elif 2 in self.types_to_write:
simulation_atoms = np.array([((i * 2), 2)
for i in range(npart // 2)])
self.assertTrue(np.allclose(
simulation_atoms[:, 1], self.written_atoms[:, 1]),
msg="Atoms not written correctly by writevsf!")
class VCFTestAll(CommonTests):
"""
Test the writing VTF files: all particle types.
"""
@classmethod
def setUpClass(cls):
"""Prepare a testsystem."""
cls.types_to_write = 'all'
with tempfile.TemporaryFile(mode='w+t') as fp:
vtf.writevcf(cls.system, fp, types=cls.types_to_write)
fp.flush()
fp.seek(0)
cls.written_pos = np.loadtxt(fp, comments="t")
with tempfile.TemporaryFile(mode='w+t') as fp:
vtf.writevsf(cls.system, fp, types=cls.types_to_write)
fp.flush()
fp.seek(0)
cls.written_bonds = np.loadtxt(
fp,
skiprows=1,
comments="a",
delimiter=":",
usecols=[1]) # just the second bonded member
fp.seek(0)
cls.written_atoms = np.loadtxt(
fp, skiprows=1, comments="b", usecols=[
1, 7]) # just the part_ID and type_ID
class VCFTestType(CommonTests):
"""
Test the writing VTF files: only particle types 2 and 23.
"""
@classmethod
def setUpClass(cls):
"""Prepare a testsystem."""
cls.types_to_write = [2, 23]
with tempfile.TemporaryFile(mode='w+') as fp:
vtf.writevcf(cls.system, fp, types=cls.types_to_write)
fp.flush()
fp.seek(0)
cls.written_pos = np.loadtxt(fp, comments="t")
with tempfile.TemporaryFile(mode='w+') as fp:
vtf.writevsf(cls.system, fp, types=cls.types_to_write)
fp.flush()
fp.seek(0)
cls.written_bonds = np.loadtxt(
fp,
skiprows=1,
comments="a",
delimiter=":",
usecols=[1]) # just the second bonded member
fp.seek(0)
cls.written_atoms = np.loadtxt(
fp, skiprows=1, comments="b",
usecols=[1, 7]) # just the part_ID and type_ID
if __name__ == "__main__":
suite = ut.TestLoader().loadTestsFromTestCase(VCFTestAll)
suite.addTests(ut.TestLoader().loadTestsFromTestCase(VCFTestType))
result = ut.TextTestRunner(verbosity=4).run(suite)
sys.exit(not result.wasSuccessful())
| psci2195/espresso-ffans | testsuite/python/writevtf.py | Python | gpl-3.0 | 5,876 | [
"ESPResSo"
] | 13f7e0dbfedbe3acee119dd83e9132cd8d28671c777640d76cabed02603c8fac |
#!/usr/bin/python
"""
This module contains an OpenSoundControl implementation (in Pure Python), based
(somewhat) on the good old 'SimpleOSC' implementation by Daniel Holth & Clinton
McChesney.
This implementation is intended to still be 'simple' to the user, but much more
complete (with OSCServer & OSCClient classes) and much more powerful (the
OSCMultiClient supports subscriptions & message-filtering, OSCMessage &
OSCBundle are now proper container-types)
===============================================================================
OpenSoundControl
===============================================================================
OpenSoundControl is a network-protocol for sending (small) packets of addressed
data over network sockets. This OSC-implementation supports the classical
UDP/IP protocol for sending and receiving packets but provides as well support
for TCP/IP streaming, whereas the message size is prepended as int32 (big
endian) before each message/packet.
OSC-packets come in two kinds:
- OSC-messages consist of an 'address'-string (not to be confused with a
(host:port) network-address!), followed by a string of 'typetags'
associated with the message's arguments (ie. 'payload'), and finally the
arguments themselves, encoded in an OSC-specific way. The OSCMessage class
makes it easy to create & manipulate OSC-messages of this kind in a
'pythonesque' way (that is, OSCMessage-objects behave a lot like lists)
- OSC-bundles are a special type of OSC-message containing only
OSC-messages as 'payload'. Recursively. (meaning; an OSC-bundle could
contain other OSC-bundles, containing OSC-bundles etc.)
OSC-bundles start with the special keyword '#bundle' and do not have an
OSC-address (but the OSC-messages a bundle contains will have OSC-addresses!).
Also, an OSC-bundle can have a timetag, essentially telling the receiving
server to 'hold' the bundle until the specified time. The OSCBundle class
allows easy cration & manipulation of OSC-bundles.
For further information see also http://opensoundcontrol.org/spec-1_0
-------------------------------------------------------------------------------
To send OSC-messages, you need an OSCClient, and to receive OSC-messages you
need an OSCServer.
The OSCClient uses an 'AF_INET / SOCK_DGRAM' type socket (see the 'socket'
module) to send binary representations of OSC-messages to a remote host:port
address.
The OSCServer listens on an 'AF_INET / SOCK_DGRAM' type socket bound to a local
port, and handles incoming requests. Either one-after-the-other (OSCServer) or
in a multi-threaded / multi-process fashion (ThreadingOSCServer/
ForkingOSCServer). If the Server has a callback-function (a.k.a. handler)
registered to 'deal with' (i.e. handle) the received message's OSC-address,
that function is called, passing it the (decoded) message.
The different OSCServers implemented here all support the (recursive) un-
bundling of OSC-bundles, and OSC-bundle timetags.
In fact, this implementation supports:
- OSC-messages with 'i' (int32), 'f' (float32), 'd' (double), 's' (string) and
'b' (blob / binary data) types
- OSC-bundles, including timetag-support
- OSC-address patterns including '*', '?', '{,}' and '[]' wildcards.
(please *do* read the OSC-spec! http://opensoundcontrol.org/spec-1_0 it
explains what these things mean.)
In addition, the OSCMultiClient supports:
- Sending a specific OSC-message to multiple remote servers
- Remote server subscription / unsubscription (through OSC-messages, of course)
- Message-address filtering.
-------------------------------------------------------------------------------
SimpleOSC:
Copyright (c) Daniel Holth & Clinton McChesney.
pyOSC:
Copyright (c) 2008-2010, Artem Baguinski <artm@v2.nl> et al., Stock, V2_Lab, Rotterdam, Netherlands.
Streaming support (OSC over TCP):
Copyright (c) 2010 Uli Franke <uli.franke@weiss.ch>, Weiss Engineering, Uster, Switzerland.
-------------------------------------------------------------------------------
Changelog:
-------------------------------------------------------------------------------
v0.3.0 - 27 Dec. 2007
Started out to extend the 'SimpleOSC' implementation (v0.2.3) by Daniel Holth & Clinton McChesney.
Rewrote OSCMessage
Added OSCBundle
v0.3.1 - 3 Jan. 2008
Added OSClient
Added OSCRequestHandler, loosely based on the original CallbackManager
Added OSCServer
Removed original CallbackManager
Adapted testing-script (the 'if __name__ == "__main__":' block at the end) to use new Server & Client
v0.3.2 - 5 Jan. 2008
Added 'container-type emulation' methods (getitem(), setitem(), __iter__() & friends) to OSCMessage
Added ThreadingOSCServer & ForkingOSCServer
- 6 Jan. 2008
Added OSCMultiClient
Added command-line options to testing-script (try 'python OSC.py --help')
v0.3.3 - 9 Jan. 2008
Added OSC-timetag support to OSCBundle & OSCRequestHandler
Added ThreadingOSCRequestHandler
v0.3.4 - 13 Jan. 2008
Added message-filtering to OSCMultiClient
Added subscription-handler to OSCServer
Added support fon numpy/scipy int & float types. (these get converted to 'standard' 32-bit OSC ints / floats!)
Cleaned-up and added more Docstrings
v0.3.5 - 14 aug. 2008
Added OSCServer.reportErr(...) method
v0.3.6 - 19 April 2010
Added Streaming support (OSC over TCP)
Updated documentation
Moved pattern matching stuff into separate class (OSCAddressSpace) to
facilitate implementation of different server and client architectures.
Callbacks feature now a context (object oriented) but dynamic function
inspection keeps the code backward compatible
Moved testing code into separate testbench (testbench.py)
-----------------
Original Comments
-----------------
> Open SoundControl for Python
> Copyright (C) 2002 Daniel Holth, Clinton McChesney
>
> This library is free software; you can redistribute it and/or modify it under
> the terms of the GNU Lesser General Public License as published by the Free
> Software Foundation; either version 2.1 of the License, or (at your option) any
> later version.
>
> This library is distributed in the hope that it will be useful, but WITHOUT ANY
> WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
> PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
> details.
>
> You should have received a copy of the GNU Lesser General Public License along
> with this library; if not, write to the Free Software Foundation, Inc., 59
> Temple Place, Suite 330, Boston, MA 02111-1307 USA
>
> For questions regarding this module contact Daniel Holth <dholth@stetson.edu>
> or visit http://www.stetson.edu/~ProctoLogic/
>
> Changelog:
> 15 Nov. 2001:
> Removed dependency on Python 2.0 features.
> - dwh
> 13 Feb. 2002:
> Added a generic callback handler.
> - dwh
"""
import math, re, socket, select, string, struct, sys, threading, time, types, array, errno, inspect
from SocketServer import UDPServer, DatagramRequestHandler, ForkingMixIn, ThreadingMixIn, StreamRequestHandler, TCPServer
from contextlib import closing
global version
version = ("0.3","6", "$Rev: 6382 $"[6:-2])
global FloatTypes
FloatTypes = [types.FloatType]
global IntTypes
IntTypes = [types.IntType]
global NTP_epoch
from calendar import timegm
NTP_epoch = timegm((1900,1,1,0,0,0)) # NTP time started in 1 Jan 1900
del timegm
global NTP_units_per_second
NTP_units_per_second = 0x100000000 # about 232 picoseconds
##
# numpy/scipy support:
##
try:
from numpy import typeDict
for ftype in ['float32', 'float64', 'float128']:
try:
FloatTypes.append(typeDict[ftype])
except KeyError:
pass
for itype in ['int8', 'int16', 'int32', 'int64']:
try:
IntTypes.append(typeDict[itype])
IntTypes.append(typeDict['u' + itype])
except KeyError:
pass
# thanks for those...
del typeDict, ftype, itype
except ImportError:
pass
######
#
# OSCMessage classes
#
######
class OSCMessage(object):
""" Builds typetagged OSC messages.
OSCMessage objects are container objects for building OSC-messages.
On the 'front' end, they behave much like list-objects, and on the 'back' end
they generate a binary representation of the message, which can be sent over a network socket.
OSC-messages consist of an 'address'-string (not to be confused with a (host, port) IP-address!),
followed by a string of 'typetags' associated with the message's arguments (ie. 'payload'),
and finally the arguments themselves, encoded in an OSC-specific way.
On the Python end, OSCMessage are lists of arguments, prepended by the message's address.
The message contents can be manipulated much like a list:
>>> msg = OSCMessage("/my/osc/address")
>>> msg.append('something')
>>> msg.insert(0, 'something else')
>>> msg[1] = 'entirely'
>>> msg.extend([1,2,3.])
>>> msg += [4, 5, 6.]
>>> del msg[3:6]
>>> msg.pop(-2)
5
>>> print msg
/my/osc/address ['something else', 'entirely', 1, 6.0]
OSCMessages can be concatenated with the + operator. In this case, the resulting OSCMessage
inherits its address from the left-hand operand. The right-hand operand's address is ignored.
To construct an 'OSC-bundle' from multiple OSCMessage, see OSCBundle!
Additional methods exist for retreiving typetags or manipulating items as (typetag, value) tuples.
"""
def __init__(self, address="", *args):
"""Instantiate a new OSCMessage.
The OSC-address can be specified with the 'address' argument.
The rest of the arguments are appended as data.
"""
self.clear(address)
if len(args)>0:
self.append(*args)
def setAddress(self, address):
"""Set or change the OSC-address
"""
self.address = address
def clear(self, address=""):
"""Clear (or set a new) OSC-address and clear any arguments appended so far
"""
self.address = address
self.clearData()
def clearData(self):
"""Clear any arguments appended so far
"""
self.typetags = ","
self.message = ""
def append(self, argument, typehint=None):
"""Appends data to the message, updating the typetags based on
the argument's type. If the argument is a blob (counted
string) pass in 'b' as typehint.
'argument' may also be a list or tuple, in which case its elements
will get appended one-by-one, all using the provided typehint
"""
if type(argument) == types.DictType:
argument = argument.items()
elif isinstance(argument, OSCMessage):
raise TypeError("Can only append 'OSCMessage' to 'OSCBundle'")
if hasattr(argument, '__iter__'):
for arg in argument:
self.append(arg, typehint)
return
if typehint == 'b':
binary = OSCBlob(argument)
tag = 'b'
elif typehint == 't':
binary = OSCTimeTag(argument)
tag = 't'
else:
tag, binary = OSCArgument(argument, typehint)
self.typetags += tag
self.message += binary
def getBinary(self):
"""Returns the binary representation of the message
"""
binary = OSCString(self.address)
binary += OSCString(self.typetags)
binary += self.message
return binary
def __repr__(self):
"""Returns a string containing the decode Message
"""
return str(decodeOSC(self.getBinary()))
def __str__(self):
"""Returns the Message's address and contents as a string.
"""
return "%s %s" % (self.address, str(self.values()))
def __len__(self):
"""Returns the number of arguments appended so far
"""
return (len(self.typetags) - 1)
def __eq__(self, other):
"""Return True if two OSCMessages have the same address & content
"""
if not isinstance(other, self.__class__):
return False
return (self.address == other.address) and (self.typetags == other.typetags) and (self.message == other.message)
def __ne__(self, other):
"""Return (not self.__eq__(other))
"""
return not self.__eq__(other)
def __add__(self, values):
"""Returns a copy of self, with the contents of 'values' appended
(see the 'extend()' method, below)
"""
msg = self.copy()
msg.extend(values)
return msg
def __iadd__(self, values):
"""Appends the contents of 'values'
(equivalent to 'extend()', below)
Returns self
"""
self.extend(values)
return self
def __radd__(self, values):
"""Appends the contents of this OSCMessage to 'values'
Returns the extended 'values' (list or tuple)
"""
out = list(values)
out.extend(self.values())
if type(values) == types.TupleType:
return tuple(out)
return out
def _reencode(self, items):
"""Erase & rebuild the OSCMessage contents from the given
list of (typehint, value) tuples"""
self.clearData()
for item in items:
self.append(item[1], item[0])
def values(self):
"""Returns a list of the arguments appended so far
"""
return decodeOSC(self.getBinary())[2:]
def tags(self):
"""Returns a list of typetags of the appended arguments
"""
return list(self.typetags.lstrip(','))
def items(self):
"""Returns a list of (typetag, value) tuples for
the arguments appended so far
"""
out = []
values = self.values()
typetags = self.tags()
for i in range(len(values)):
out.append((typetags[i], values[i]))
return out
def __contains__(self, val):
"""Test if the given value appears in the OSCMessage's arguments
"""
return (val in self.values())
def __getitem__(self, i):
"""Returns the indicated argument (or slice)
"""
return self.values()[i]
def __delitem__(self, i):
"""Removes the indicated argument (or slice)
"""
items = self.items()
del items[i]
self._reencode(items)
def _buildItemList(self, values, typehint=None):
if isinstance(values, OSCMessage):
items = values.items()
elif type(values) == types.ListType:
items = []
for val in values:
if type(val) == types.TupleType:
items.append(val[:2])
else:
items.append((typehint, val))
elif type(values) == types.TupleType:
items = [values[:2]]
else:
items = [(typehint, values)]
return items
def __setitem__(self, i, val):
"""Set indicatated argument (or slice) to a new value.
'val' can be a single int/float/string, or a (typehint, value) tuple.
Or, if 'i' is a slice, a list of these or another OSCMessage.
"""
items = self.items()
new_items = self._buildItemList(val)
if type(i) != types.SliceType:
if len(new_items) != 1:
raise TypeError("single-item assignment expects a single value or a (typetag, value) tuple")
new_items = new_items[0]
# finally...
items[i] = new_items
self._reencode(items)
def setItem(self, i, val, typehint=None):
"""Set indicated argument to a new value (with typehint)
"""
items = self.items()
items[i] = (typehint, val)
self._reencode(items)
def copy(self):
"""Returns a deep copy of this OSCMessage
"""
msg = self.__class__(self.address)
msg.typetags = self.typetags
msg.message = self.message
return msg
def count(self, val):
"""Returns the number of times the given value occurs in the OSCMessage's arguments
"""
return self.values().count(val)
def index(self, val):
"""Returns the index of the first occurence of the given value in the OSCMessage's arguments.
Raises ValueError if val isn't found
"""
return self.values().index(val)
def extend(self, values):
"""Append the contents of 'values' to this OSCMessage.
'values' can be another OSCMessage, or a list/tuple of ints/floats/strings
"""
items = self.items() + self._buildItemList(values)
self._reencode(items)
def insert(self, i, val, typehint = None):
"""Insert given value (with optional typehint) into the OSCMessage
at the given index.
"""
items = self.items()
for item in reversed(self._buildItemList(val)):
items.insert(i, item)
self._reencode(items)
def popitem(self, i):
"""Delete the indicated argument from the OSCMessage, and return it
as a (typetag, value) tuple.
"""
items = self.items()
item = items.pop(i)
self._reencode(items)
return item
def pop(self, i):
"""Delete the indicated argument from the OSCMessage, and return it.
"""
return self.popitem(i)[1]
def reverse(self):
"""Reverses the arguments of the OSCMessage (in place)
"""
items = self.items()
items.reverse()
self._reencode(items)
def remove(self, val):
"""Removes the first argument with the given value from the OSCMessage.
Raises ValueError if val isn't found.
"""
items = self.items()
# this is not very efficient...
i = 0
for (t, v) in items:
if (v == val):
break
i += 1
else:
raise ValueError("'%s' not in OSCMessage" % str(m))
# but more efficient than first calling self.values().index(val),
# then calling self.items(), which would in turn call self.values() again...
del items[i]
self._reencode(items)
def __iter__(self):
"""Returns an iterator of the OSCMessage's arguments
"""
return iter(self.values())
def __reversed__(self):
"""Returns a reverse iterator of the OSCMessage's arguments
"""
return reversed(self.values())
def itervalues(self):
"""Returns an iterator of the OSCMessage's arguments
"""
return iter(self.values())
def iteritems(self):
"""Returns an iterator of the OSCMessage's arguments as
(typetag, value) tuples
"""
return iter(self.items())
def itertags(self):
"""Returns an iterator of the OSCMessage's arguments' typetags
"""
return iter(self.tags())
class OSCBundle(OSCMessage):
"""Builds a 'bundle' of OSC messages.
OSCBundle objects are container objects for building OSC-bundles of OSC-messages.
An OSC-bundle is a special kind of OSC-message which contains a list of OSC-messages
(And yes, OSC-bundles may contain other OSC-bundles...)
OSCBundle objects behave much the same as OSCMessage objects, with these exceptions:
- if an item or items to be appended or inserted are not OSCMessage objects,
OSCMessage objectss are created to encapsulate the item(s)
- an OSC-bundle does not have an address of its own, only the contained OSC-messages do.
The OSCBundle's 'address' is inherited by any OSCMessage the OSCBundle object creates.
- OSC-bundles have a timetag to tell the receiver when the bundle should be processed.
The default timetag value (0) means 'immediately'
"""
def __init__(self, address="", time=0):
"""Instantiate a new OSCBundle.
The default OSC-address for newly created OSCMessages
can be specified with the 'address' argument
The bundle's timetag can be set with the 'time' argument
"""
super(OSCBundle, self).__init__(address)
self.timetag = time
def __str__(self):
"""Returns the Bundle's contents (and timetag, if nonzero) as a string.
"""
if (self.timetag > 0.):
out = "#bundle (%s) [" % self.getTimeTagStr()
else:
out = "#bundle ["
if self.__len__():
for val in self.values():
out += "%s, " % str(val)
out = out[:-2] # strip trailing space and comma
return out + "]"
def setTimeTag(self, time):
"""Set or change the OSCBundle's TimeTag
In 'Python Time', that's floating seconds since the Epoch
"""
if time >= 0:
self.timetag = time
def getTimeTagStr(self):
"""Return the TimeTag as a human-readable string
"""
fract, secs = math.modf(self.timetag)
out = time.ctime(secs)[11:19]
out += ("%.3f" % fract)[1:]
return out
def append(self, argument, typehint = None):
"""Appends data to the bundle, creating an OSCMessage to encapsulate
the provided argument unless this is already an OSCMessage.
Any newly created OSCMessage inherits the OSCBundle's address at the time of creation.
If 'argument' is an iterable, its elements will be encapsuated by a single OSCMessage.
Finally, 'argument' can be (or contain) a dict, which will be 'converted' to an OSCMessage;
- if 'addr' appears in the dict, its value overrides the OSCBundle's address
- if 'args' appears in the dict, its value(s) become the OSCMessage's arguments
"""
if isinstance(argument, OSCMessage):
binary = OSCBlob(argument.getBinary())
else:
msg = OSCMessage(self.address)
if type(argument) == types.DictType:
if 'addr' in argument:
msg.setAddress(argument['addr'])
if 'args' in argument:
msg.append(argument['args'], typehint)
else:
msg.append(argument, typehint)
binary = OSCBlob(msg.getBinary())
self.message += binary
self.typetags += 'b'
def getBinary(self):
"""Returns the binary representation of the message
"""
binary = OSCString("#bundle")
binary += OSCTimeTag(self.timetag)
binary += self.message
return binary
def _reencapsulate(self, decoded):
if decoded[0] == "#bundle":
msg = OSCBundle()
msg.setTimeTag(decoded[1])
for submsg in decoded[2:]:
msg.append(self._reencapsulate(submsg))
else:
msg = OSCMessage(decoded[0])
tags = decoded[1].lstrip(',')
for i in range(len(tags)):
msg.append(decoded[2+i], tags[i])
return msg
def values(self):
"""Returns a list of the OSCMessages appended so far
"""
out = []
for decoded in decodeOSC(self.getBinary())[2:]:
out.append(self._reencapsulate(decoded))
return out
def __eq__(self, other):
"""Return True if two OSCBundles have the same timetag & content
"""
if not isinstance(other, self.__class__):
return False
return (self.timetag == other.timetag) and (self.typetags == other.typetags) and (self.message == other.message)
def copy(self):
"""Returns a deep copy of this OSCBundle
"""
copy = super(OSCBundle, self).copy()
copy.timetag = self.timetag
return copy
######
#
# OSCMessage encoding functions
#
######
def OSCString(next):
"""Convert a string into a zero-padded OSC String.
The length of the resulting string is always a multiple of 4 bytes.
The string ends with 1 to 4 zero-bytes ('\x00')
"""
OSCstringLength = math.ceil((len(next)+1) / 4.0) * 4
return struct.pack(">%ds" % (OSCstringLength), str(next))
def OSCBlob(next):
"""Convert a string into an OSC Blob.
An OSC-Blob is a binary encoded block of data, prepended by a 'size' (int32).
The size is always a mutiple of 4 bytes.
The blob ends with 0 to 3 zero-bytes ('\x00')
"""
if type(next) in types.StringTypes:
OSCblobLength = math.ceil((len(next)) / 4.0) * 4
binary = struct.pack(">i%ds" % (OSCblobLength), OSCblobLength, next)
else:
binary = ""
return binary
def OSCArgument(next, typehint=None):
""" Convert some Python types to their
OSC binary representations, returning a
(typetag, data) tuple.
"""
if not typehint:
if type(next) in FloatTypes:
binary = struct.pack(">f", float(next))
tag = 'f'
elif type(next) in IntTypes:
binary = struct.pack(">i", int(next))
tag = 'i'
else:
binary = OSCString(next)
tag = 's'
elif typehint == 'd':
try:
binary = struct.pack(">d", float(next))
tag = 'd'
except ValueError:
binary = OSCString(next)
tag = 's'
elif typehint == 'f':
try:
binary = struct.pack(">f", float(next))
tag = 'f'
except ValueError:
binary = OSCString(next)
tag = 's'
elif typehint == 'i':
try:
binary = struct.pack(">i", int(next))
tag = 'i'
except ValueError:
binary = OSCString(next)
tag = 's'
else:
binary = OSCString(next)
tag = 's'
return (tag, binary)
def OSCTimeTag(time):
"""Convert a time in floating seconds to its
OSC binary representation
"""
if time > 0:
fract, secs = math.modf(time)
secs = secs - NTP_epoch
binary = struct.pack('>LL', long(secs), long(fract * NTP_units_per_second))
else:
binary = struct.pack('>LL', 0, 1)
return binary
######
#
# OSCMessage decoding functions
#
######
def _readString(data):
"""Reads the next (null-terminated) block of data
"""
length = string.find(data,"\0")
nextData = int(math.ceil((length+1) / 4.0) * 4)
return (data[0:length], data[nextData:])
def _readBlob(data):
"""Reads the next (numbered) block of data
"""
length = struct.unpack(">i", data[0:4])[0]
nextData = int(math.ceil((length) / 4.0) * 4) + 4
return (data[4:length+4], data[nextData:])
def _readInt(data):
"""Tries to interpret the next 4 bytes of the data
as a 32-bit integer. """
if(len(data)<4):
print "Error: too few bytes for int", data, len(data)
rest = data
integer = 0
else:
integer = struct.unpack(">i", data[0:4])[0]
rest = data[4:]
return (integer, rest)
def _readLong(data):
"""Tries to interpret the next 8 bytes of the data
as a 64-bit signed integer.
"""
high, low = struct.unpack(">ll", data[0:8])
big = (long(high) << 32) + low
rest = data[8:]
return (big, rest)
def _readTimeTag(data):
"""Tries to interpret the next 8 bytes of the data
as a TimeTag.
"""
high, low = struct.unpack(">LL", data[0:8])
if (high == 0) and (low <= 1):
time = 0.0
else:
time = int(NTP_epoch + high) + float(low / NTP_units_per_second)
rest = data[8:]
return (time, rest)
def _readFloat(data):
"""Tries to interpret the next 4 bytes of the data
as a 32-bit float.
"""
if(len(data)<4):
print "Error: too few bytes for float", data, len(data)
rest = data
float = 0
else:
float = struct.unpack(">f", data[0:4])[0]
rest = data[4:]
return (float, rest)
def _readDouble(data):
"""Tries to interpret the next 8 bytes of the data
as a 64-bit float.
"""
if(len(data)<8):
print "Error: too few bytes for double", data, len(data)
rest = data
float = 0
else:
float = struct.unpack(">d", data[0:8])[0]
rest = data[8:]
return (float, rest)
def decodeOSC(data):
"""Converts a binary OSC message to a Python list.
"""
table = {"i":_readInt, "f":_readFloat, "s":_readString, "b":_readBlob, "d":_readDouble, "t":_readTimeTag}
decoded = []
address, rest = _readString(data)
if address.startswith(","):
typetags = address
address = ""
else:
typetags = ""
if address == "#bundle":
time, rest = _readTimeTag(rest)
decoded.append(address)
decoded.append(time)
while len(rest)>0:
length, rest = _readInt(rest)
decoded.append(decodeOSC(rest[:length]))
rest = rest[length:]
elif len(rest)>0:
if not len(typetags):
typetags, rest = _readString(rest)
decoded.append(address)
decoded.append(typetags)
if typetags.startswith(","):
for tag in typetags[1:]:
value, rest = table[tag](rest)
decoded.append(value)
else:
raise OSCError("OSCMessage's typetag-string lacks the magic ','")
return decoded
######
#
# Utility functions
#
######
def hexDump(bytes):
""" Useful utility; prints the string in hexadecimal.
"""
print "byte 0 1 2 3 4 5 6 7 8 9 A B C D E F"
num = len(bytes)
for i in range(num):
if (i) % 16 == 0:
line = "%02X0 : " % (i/16)
line += "%02X " % ord(bytes[i])
if (i+1) % 16 == 0:
print "%s: %s" % (line, repr(bytes[i-15:i+1]))
line = ""
bytes_left = num % 16
if bytes_left:
print "%s: %s" % (line.ljust(54), repr(bytes[-bytes_left:]))
def getUrlStr(*args):
"""Convert provided arguments to a string in 'host:port/prefix' format
Args can be:
- (host, port)
- (host, port), prefix
- host, port
- host, port, prefix
"""
if not len(args):
return ""
if type(args[0]) == types.TupleType:
host = args[0][0]
port = args[0][1]
args = args[1:]
else:
host = args[0]
port = args[1]
args = args[2:]
if len(args):
prefix = args[0]
else:
prefix = ""
if len(host) and (host != '0.0.0.0'):
try:
(host, _, _) = socket.gethostbyaddr(host)
except socket.error:
pass
else:
host = 'localhost'
if type(port) == types.IntType:
return "%s:%d%s" % (host, port, prefix)
else:
return host + prefix
def parseUrlStr(url):
"""Convert provided string in 'host:port/prefix' format to it's components
Returns ((host, port), prefix)
"""
if not (type(url) in types.StringTypes and len(url)):
return (None, '')
i = url.find("://")
if i > -1:
url = url[i+3:]
i = url.find(':')
if i > -1:
host = url[:i].strip()
tail = url[i+1:].strip()
else:
host = ''
tail = url
for i in range(len(tail)):
if not tail[i].isdigit():
break
else:
i += 1
portstr = tail[:i].strip()
tail = tail[i:].strip()
found = len(tail)
for c in ('/', '+', '-', '*'):
i = tail.find(c)
if (i > -1) and (i < found):
found = i
head = tail[:found].strip()
prefix = tail[found:].strip()
prefix = prefix.strip('/')
if len(prefix) and prefix[0] not in ('+', '-', '*'):
prefix = '/' + prefix
if len(head) and not len(host):
host = head
if len(host):
try:
host = socket.gethostbyname(host)
except socket.error:
pass
try:
port = int(portstr)
except ValueError:
port = None
return ((host, port), prefix)
######
#
# OSCClient class
#
######
class OSCClient(object):
"""Simple OSC Client. Handles the sending of OSC-Packets (OSCMessage or OSCBundle) via a UDP-socket
"""
# set outgoing socket buffer size
sndbuf_size = 4096 * 8
def __init__(self, server=None):
"""Construct an OSC Client.
- server: Local OSCServer-instance this client will use the socket of for transmissions.
If none is supplied, a socket will be created.
"""
self.socket = None
self.setServer(server)
self.client_address = None
def _setSocket(self, skt):
"""Set and configure client socket"""
if self.socket != None:
self.close()
self.socket = skt
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, self.sndbuf_size)
self._fd = self.socket.fileno()
def _ensureConnected(self, address):
"""Make sure client has a socket connected to address"""
if not self.socket:
if len(address) == 4:
address_family = socket.AF_INET6
else:
address_family = socket.AF_INET
self._setSocket(socket.socket(address_family, socket.SOCK_DGRAM))
self.socket.connect(address)
def setServer(self, server):
"""Associate this Client with given server.
The Client will send from the Server's socket.
The Server will use this Client instance to send replies.
"""
if server == None:
if hasattr(self,'server') and self.server:
if self.server.client != self:
raise OSCClientError("Internal inconsistency")
self.server.client.close()
self.server.client = None
self.server = None
return
if not isinstance(server, OSCServer):
raise ValueError("'server' argument is not a valid OSCServer object")
self._setSocket(server.socket.dup())
self.server = server
if self.server.client != None:
self.server.client.close()
self.server.client = self
def close(self):
"""Disconnect & close the Client's socket
"""
if self.socket != None:
self.socket.close()
self.socket = None
def __str__(self):
"""Returns a string containing this Client's Class-name, software-version
and the remote-address it is connected to (if any)
"""
out = self.__class__.__name__
out += " v%s.%s-%s" % version
addr = self.address()
if addr:
out += " connected to osc://%s" % getUrlStr(addr)
else:
out += " (unconnected)"
return out
def __eq__(self, other):
"""Compare function.
"""
if not isinstance(other, self.__class__):
return False
if self.socket and other.socket:
sockEqual = cmp(self.socket._sock, other.socket._sock)
else:
sockEqual = (self.socket == None and other.socket == None)
if not sockEqual:
return False
if self.server and other.server:
return cmp(self.server, other.server)
else:
return self.server == None and other.server == None
def __ne__(self, other):
"""Compare function.
"""
return not self.__eq__(other)
def address(self):
"""Returns a (host,port) tuple of the remote server this client is
connected to or None if not connected to any server.
"""
try:
if self.socket:
return self.socket.getpeername()
else:
return None
except socket.error:
return None
def connect(self, address):
"""Bind to a specific OSC server:
the 'address' argument is a (host, port) tuple
- host: hostname of the remote OSC server,
- port: UDP-port the remote OSC server listens to.
"""
try:
self._ensureConnected(address)
self.client_address = address
except socket.error, e:
self.client_address = None
raise OSCClientError("SocketError: %s" % str(e))
if self.server != None:
self.server.return_port = address[1]
def sendto(self, msg, address, timeout=None):
"""Send the given OSCMessage to the specified address.
- msg: OSCMessage (or OSCBundle) to be sent
- address: (host, port) tuple specifing remote server to send the message to
- timeout: A timeout value for attempting to send. If timeout == None,
this call blocks until socket is available for writing.
Raises OSCClientError when timing out while waiting for the socket.
"""
if not isinstance(msg, OSCMessage):
raise TypeError("'msg' argument is not an OSCMessage or OSCBundle object")
ret = select.select([],[self._fd], [], timeout)
try:
ret[1].index(self._fd)
except:
# for the very rare case this might happen
raise OSCClientError("Timed out waiting for file descriptor")
try:
self._ensureConnected(address)
self.socket.sendall(msg.getBinary())
if self.client_address:
self.socket.connect(self.client_address)
except socket.error, e:
if e[0] in (7, 65): # 7 = 'no address associated with nodename', 65 = 'no route to host'
raise e
else:
raise OSCClientError("while sending to %s: %s" % (str(address), str(e)))
def send(self, msg, timeout=None):
"""Send the given OSCMessage.
The Client must be already connected.
- msg: OSCMessage (or OSCBundle) to be sent
- timeout: A timeout value for attempting to send. If timeout == None,
this call blocks until socket is available for writing.
Raises OSCClientError when timing out while waiting for the socket,
or when the Client isn't connected to a remote server.
"""
if not isinstance(msg, OSCMessage):
raise TypeError("'msg' argument is not an OSCMessage or OSCBundle object")
if not self.socket:
raise OSCClientError("Called send() on non-connected client")
ret = select.select([],[self._fd], [], timeout)
try:
ret[1].index(self._fd)
except:
# for the very rare case this might happen
raise OSCClientError("Timed out waiting for file descriptor")
try:
self.socket.sendall(msg.getBinary())
except socket.error, e:
if e[0] in (7, 65): # 7 = 'no address associated with nodename', 65 = 'no route to host'
raise e
else:
raise OSCClientError("while sending: %s" % str(e))
######
#
# FilterString Utility functions
#
######
def parseFilterStr(args):
"""Convert Message-Filter settings in '+<addr> -<addr> ...' format to a dict of the form
{ '<addr>':True, '<addr>':False, ... }
Returns a list: ['<prefix>', filters]
"""
out = {}
if type(args) in types.StringTypes:
args = [args]
prefix = None
for arg in args:
head = None
for plus in arg.split('+'):
minus = plus.split('-')
plusfs = minus.pop(0).strip()
if len(plusfs):
plusfs = '/' + plusfs.strip('/')
if (head == None) and (plusfs != "/*"):
head = plusfs
elif len(plusfs):
if plusfs == '/*':
out = { '/*':True } # reset all previous filters
else:
out[plusfs] = True
for minusfs in minus:
minusfs = minusfs.strip()
if len(minusfs):
minusfs = '/' + minusfs.strip('/')
if minusfs == '/*':
out = { '/*':False } # reset all previous filters
else:
out[minusfs] = False
if prefix == None:
prefix = head
return [prefix, out]
def getFilterStr(filters):
"""Return the given 'filters' dict as a list of
'+<addr>' | '-<addr>' filter-strings
"""
if not len(filters):
return []
if '/*' in filters.keys():
if filters['/*']:
out = ["+/*"]
else:
out = ["-/*"]
else:
if False in filters.values():
out = ["+/*"]
else:
out = ["-/*"]
for (addr, bool) in filters.items():
if addr == '/*':
continue
if bool:
out.append("+%s" % addr)
else:
out.append("-%s" % addr)
return out
# A translation-table for mapping OSC-address expressions to Python 're' expressions
OSCtrans = string.maketrans("{,}?","(|).")
def getRegEx(pattern):
"""Compiles and returns a 'regular expression' object for the given address-pattern.
"""
# Translate OSC-address syntax to python 're' syntax
pattern = pattern.replace(".", r"\.") # first, escape all '.'s in the pattern.
pattern = pattern.replace("(", r"\(") # escape all '('s.
pattern = pattern.replace(")", r"\)") # escape all ')'s.
pattern = pattern.replace("*", r".*") # replace a '*' by '.*' (match 0 or more characters)
pattern = pattern.translate(OSCtrans) # change '?' to '.' and '{,}' to '(|)'
return re.compile(pattern)
######
#
# OSCMultiClient class
#
######
class OSCMultiClient(OSCClient):
"""'Multiple-Unicast' OSC Client. Handles the sending of OSC-Packets (OSCMessage or OSCBundle) via a UDP-socket
This client keeps a dict of 'OSCTargets'. and sends each OSCMessage to each OSCTarget
The OSCTargets are simply (host, port) tuples, and may be associated with an OSC-address prefix.
the OSCTarget's prefix gets prepended to each OSCMessage sent to that target.
"""
def __init__(self, server=None):
"""Construct a "Multi" OSC Client.
- server: Local OSCServer-instance this client will use the socket of for transmissions.
If none is supplied, a socket will be created.
"""
super(OSCMultiClient, self).__init__(server)
self.targets = {}
def _searchHostAddr(self, host):
"""Search the subscribed OSCTargets for (the first occurence of) given host.
Returns a (host, port) tuple
"""
try:
host = socket.gethostbyname(host)
except socket.error:
pass
for addr in self.targets.keys():
if host == addr[0]:
return addr
raise NotSubscribedError((host, None))
def _updateFilters(self, dst, src):
"""Update a 'filters' dict with values form another 'filters' dict:
- src[a] == True and dst[a] == False: del dst[a]
- src[a] == False and dst[a] == True: del dst[a]
- a not in dst: dst[a] == src[a]
"""
if '/*' in src.keys(): # reset filters
dst.clear() # 'match everything' == no filters
if not src.pop('/*'):
dst['/*'] = False # 'match nothing'
for (addr, bool) in src.items():
if (addr in dst.keys()) and (dst[addr] != bool):
del dst[addr]
else:
dst[addr] = bool
def _setTarget(self, address, prefix=None, filters=None):
"""Add (i.e. subscribe) a new OSCTarget, or change the prefix for an existing OSCTarget.
- address ((host, port) tuple): IP-address & UDP-port
- prefix (string): The OSC-address prefix prepended to the address of each OSCMessage
sent to this OSCTarget (optional)
"""
if address not in self.targets.keys():
self.targets[address] = ["",{}]
if prefix != None:
if len(prefix):
# make sure prefix starts with ONE '/', and does not end with '/'
prefix = '/' + prefix.strip('/')
self.targets[address][0] = prefix
if filters != None:
if type(filters) in types.StringTypes:
(_, filters) = parseFilterStr(filters)
elif type(filters) != types.DictType:
raise TypeError("'filters' argument must be a dict with {addr:bool} entries")
self._updateFilters(self.targets[address][1], filters)
def setOSCTarget(self, address, prefix=None, filters=None):
"""Add (i.e. subscribe) a new OSCTarget, or change the prefix for an existing OSCTarget.
the 'address' argument can be a ((host, port) tuple) : The target server address & UDP-port
or a 'host' (string) : The host will be looked-up
- prefix (string): The OSC-address prefix prepended to the address of each OSCMessage
sent to this OSCTarget (optional)
"""
if type(address) in types.StringTypes:
address = self._searchHostAddr(address)
elif (type(address) == types.TupleType):
(host, port) = address[:2]
try:
host = socket.gethostbyname(host)
except:
pass
address = (host, port)
else:
raise TypeError("'address' argument must be a (host, port) tuple or a 'host' string")
self._setTarget(address, prefix, filters)
def setOSCTargetFromStr(self, url):
"""Adds or modifies a subscribed OSCTarget from the given string, which should be in the
'<host>:<port>[/<prefix>] [+/<filter>]|[-/<filter>] ...' format.
"""
(addr, tail) = parseUrlStr(url)
(prefix, filters) = parseFilterStr(tail)
self._setTarget(addr, prefix, filters)
def _delTarget(self, address, prefix=None):
"""Delete the specified OSCTarget from the Client's dict.
the 'address' argument must be a (host, port) tuple.
If the 'prefix' argument is given, the Target is only deleted if the address and prefix match.
"""
try:
if prefix == None:
del self.targets[address]
elif prefix == self.targets[address][0]:
del self.targets[address]
except KeyError:
raise NotSubscribedError(address, prefix)
def delOSCTarget(self, address, prefix=None):
"""Delete the specified OSCTarget from the Client's dict.
the 'address' argument can be a ((host, port) tuple), or a hostname.
If the 'prefix' argument is given, the Target is only deleted if the address and prefix match.
"""
if type(address) in types.StringTypes:
address = self._searchHostAddr(address)
if type(address) == types.TupleType:
(host, port) = address[:2]
try:
host = socket.gethostbyname(host)
except socket.error:
pass
address = (host, port)
self._delTarget(address, prefix)
def hasOSCTarget(self, address, prefix=None):
"""Return True if the given OSCTarget exists in the Client's dict.
the 'address' argument can be a ((host, port) tuple), or a hostname.
If the 'prefix' argument is given, the return-value is only True if the address and prefix match.
"""
if type(address) in types.StringTypes:
address = self._searchHostAddr(address)
if type(address) == types.TupleType:
(host, port) = address[:2]
try:
host = socket.gethostbyname(host)
except socket.error:
pass
address = (host, port)
if address in self.targets.keys():
if prefix == None:
return True
elif prefix == self.targets[address][0]:
return True
return False
def getOSCTargets(self):
"""Returns the dict of OSCTargets: {addr:[prefix, filters], ...}
"""
out = {}
for ((host, port), pf) in self.targets.items():
try:
(host, _, _) = socket.gethostbyaddr(host)
except socket.error:
pass
out[(host, port)] = pf
return out
def getOSCTarget(self, address):
"""Returns the OSCTarget matching the given address as a ((host, port), [prefix, filters]) tuple.
'address' can be a (host, port) tuple, or a 'host' (string), in which case the first matching OSCTarget is returned
Returns (None, ['',{}]) if address not found.
"""
if type(address) in types.StringTypes:
address = self._searchHostAddr(address)
if (type(address) == types.TupleType):
(host, port) = address[:2]
try:
host = socket.gethostbyname(host)
except socket.error:
pass
address = (host, port)
if (address in self.targets.keys()):
try:
(host, _, _) = socket.gethostbyaddr(host)
except socket.error:
pass
return ((host, port), self.targets[address])
return (None, ['',{}])
def clearOSCTargets(self):
"""Erases all OSCTargets from the Client's dict
"""
self.targets = {}
def updateOSCTargets(self, dict):
"""Update the Client's OSCTargets dict with the contents of 'dict'
The given dict's items MUST be of the form
{ (host, port):[prefix, filters], ... }
"""
for ((host, port), (prefix, filters)) in dict.items():
val = [prefix, {}]
self._updateFilters(val[1], filters)
try:
host = socket.gethostbyname(host)
except socket.error:
pass
self.targets[(host, port)] = val
def getOSCTargetStr(self, address):
"""Returns the OSCTarget matching the given address as a ('osc://<host>:<port>[<prefix>]', ['<filter-string>', ...])' tuple.
'address' can be a (host, port) tuple, or a 'host' (string), in which case the first matching OSCTarget is returned
Returns (None, []) if address not found.
"""
(addr, (prefix, filters)) = self.getOSCTarget(address)
if addr == None:
return (None, [])
return ("osc://%s" % getUrlStr(addr, prefix), getFilterStr(filters))
def getOSCTargetStrings(self):
"""Returns a list of all OSCTargets as ('osc://<host>:<port>[<prefix>]', ['<filter-string>', ...])' tuples.
"""
out = []
for (addr, (prefix, filters)) in self.targets.items():
out.append(("osc://%s" % getUrlStr(addr, prefix), getFilterStr(filters)))
return out
def connect(self, address):
"""The OSCMultiClient isn't allowed to connect to any specific
address.
"""
return NotImplemented
def sendto(self, msg, address, timeout=None):
"""Send the given OSCMessage.
The specified address is ignored. Instead this method calls send() to
send the message to all subscribed clients.
- msg: OSCMessage (or OSCBundle) to be sent
- address: (host, port) tuple specifing remote server to send the message to
- timeout: A timeout value for attempting to send. If timeout == None,
this call blocks until socket is available for writing.
Raises OSCClientError when timing out while waiting for the socket.
"""
self.send(msg, timeout)
def _filterMessage(self, filters, msg):
"""Checks the given OSCMessge against the given filters.
'filters' is a dict containing OSC-address:bool pairs.
If 'msg' is an OSCBundle, recursively filters its constituents.
Returns None if the message is to be filtered, else returns the message.
or
Returns a copy of the OSCBundle with the filtered messages removed.
"""
if isinstance(msg, OSCBundle):
out = msg.copy()
msgs = out.values()
out.clearData()
for m in msgs:
m = self._filterMessage(filters, m)
if m: # this catches 'None' and empty bundles.
out.append(m)
elif isinstance(msg, OSCMessage):
if '/*' in filters.keys():
if filters['/*']:
out = msg
else:
out = None
elif False in filters.values():
out = msg
else:
out = None
else:
raise TypeError("'msg' argument is not an OSCMessage or OSCBundle object")
expr = getRegEx(msg.address)
for addr in filters.keys():
if addr == '/*':
continue
match = expr.match(addr)
if match and (match.end() == len(addr)):
if filters[addr]:
out = msg
else:
out = None
break
return out
def _prefixAddress(self, prefix, msg):
"""Makes a copy of the given OSCMessage, then prepends the given prefix to
The message's OSC-address.
If 'msg' is an OSCBundle, recursively prepends the prefix to its constituents.
"""
out = msg.copy()
if isinstance(msg, OSCBundle):
msgs = out.values()
out.clearData()
for m in msgs:
out.append(self._prefixAddress(prefix, m))
elif isinstance(msg, OSCMessage):
out.setAddress(prefix + out.address)
else:
raise TypeError("'msg' argument is not an OSCMessage or OSCBundle object")
return out
def send(self, msg, timeout=None):
"""Send the given OSCMessage to all subscribed OSCTargets
- msg: OSCMessage (or OSCBundle) to be sent
- timeout: A timeout value for attempting to send. If timeout == None,
this call blocks until socket is available for writing.
Raises OSCClientError when timing out while waiting for the socket.
"""
for (address, (prefix, filters)) in self.targets.items():
if len(filters):
out = self._filterMessage(filters, msg)
if not out: # this catches 'None' and empty bundles.
continue
else:
out = msg
if len(prefix):
out = self._prefixAddress(prefix, msg)
binary = out.getBinary()
ret = select.select([],[self._fd], [], timeout)
try:
ret[1].index(self._fd)
except:
# for the very rare case this might happen
raise OSCClientError("Timed out waiting for file descriptor")
try:
while len(binary):
sent = self.socket.sendto(binary, address)
binary = binary[sent:]
except socket.error, e:
if e[0] in (7, 65): # 7 = 'no address associated with nodename', 65 = 'no route to host'
raise e
else:
raise OSCClientError("while sending to %s: %s" % (str(address), str(e)))
class OSCAddressSpace:
def __init__(self):
self.callbacks = {}
def addMsgHandler(self, address, callback):
"""Register a handler for an OSC-address
- 'address' is the OSC address-string.
the address-string should start with '/' and may not contain '*'
- 'callback' is the function called for incoming OSCMessages that match 'address'.
The callback-function will be called with the same arguments as the 'msgPrinter_handler' below
"""
for chk in '*?,[]{}# ':
if chk in address:
raise OSCServerError("OSC-address string may not contain any characters in '*?,[]{}# '")
if type(callback) not in (types.FunctionType, types.MethodType):
raise OSCServerError("Message callback '%s' is not callable" % repr(callback))
if address != 'default':
address = '/' + address.strip('/')
self.callbacks[address] = callback
def delMsgHandler(self, address):
"""Remove the registered handler for the given OSC-address
"""
del self.callbacks[address]
def getOSCAddressSpace(self):
"""Returns a list containing all OSC-addresses registerd with this Server.
"""
return self.callbacks.keys()
def dispatchMessage(self, pattern, tags, data, client_address):
"""Attmept to match the given OSC-address pattern, which may contain '*',
against all callbacks registered with the OSCServer.
Calls the matching callback and returns whatever it returns.
If no match is found, and a 'default' callback is registered, it calls that one,
or raises NoCallbackError if a 'default' callback is not registered.
- pattern (string): The OSC-address of the receied message
- tags (string): The OSC-typetags of the receied message's arguments, without ','
- data (list): The message arguments
"""
if len(tags) != len(data):
raise OSCServerError("Malformed OSC-message; got %d typetags [%s] vs. %d values" % (len(tags), tags, len(data)))
expr = getRegEx(pattern)
replies = []
matched = 0
for addr in self.callbacks.keys():
match = expr.match(addr)
if match and (match.end() == len(addr)):
reply = self.callbacks[addr](pattern, tags, data, client_address)
matched += 1
if isinstance(reply, OSCMessage):
replies.append(reply)
elif reply != None:
raise TypeError("Message-callback %s did not return OSCMessage or None: %s" % (self.server.callbacks[addr], type(reply)))
if matched == 0:
if 'default' in self.callbacks:
reply = self.callbacks['default'](pattern, tags, data, client_address)
if isinstance(reply, OSCMessage):
replies.append(reply)
elif reply != None:
raise TypeError("Message-callback %s did not return OSCMessage or None: %s" % (self.server.callbacks['default'], type(reply)))
else:
raise NoCallbackError(pattern)
return replies
######
#
# OSCRequestHandler classes
#
######
class OSCRequestHandler(DatagramRequestHandler):
"""RequestHandler class for the OSCServer
"""
def setup(self):
"""Prepare RequestHandler.
Unpacks request as (packet, source socket address)
Creates an empty list for replies.
"""
(self.packet, self.socket) = self.request
self.replies = []
def _unbundle(self, decoded):
"""Recursive bundle-unpacking function"""
if decoded[0] != "#bundle":
self.replies += self.server.dispatchMessage(decoded[0], decoded[1][1:], decoded[2:], self.client_address)
return
now = time.time()
timetag = decoded[1]
if (timetag > 0.) and (timetag > now):
time.sleep(timetag - now)
for msg in decoded[2:]:
self._unbundle(msg)
def handle(self):
"""Handle incoming OSCMessage
"""
decoded = decodeOSC(self.packet)
if not len(decoded):
return
self._unbundle(decoded)
def finish(self):
"""Finish handling OSCMessage.
Send any reply returned by the callback(s) back to the originating client
as an OSCMessage or OSCBundle
"""
if self.server.return_port:
self.client_address = (self.client_address[0], self.server.return_port)
if len(self.replies) > 1:
msg = OSCBundle()
for reply in self.replies:
msg.append(reply)
elif len(self.replies) == 1:
msg = self.replies[0]
else:
return
self.server.client.sendto(msg, self.client_address)
class ThreadingOSCRequestHandler(OSCRequestHandler):
"""Multi-threaded OSCRequestHandler;
Starts a new RequestHandler thread for each unbundled OSCMessage
"""
def _unbundle(self, decoded):
"""Recursive bundle-unpacking function
This version starts a new thread for each sub-Bundle found in the Bundle,
then waits for all its children to finish.
"""
if decoded[0] != "#bundle":
self.replies += self.server.dispatchMessage(decoded[0], decoded[1][1:], decoded[2:], self.client_address)
return
now = time.time()
timetag = decoded[1]
if (timetag > 0.) and (timetag > now):
time.sleep(timetag - now)
now = time.time()
children = []
for msg in decoded[2:]:
t = threading.Thread(target = self._unbundle, args = (msg,))
t.start()
children.append(t)
# wait for all children to terminate
for t in children:
t.join()
######
#
# OSCServer classes
#
######
class OSCServer(UDPServer, OSCAddressSpace):
"""A Synchronous OSCServer
Serves one request at-a-time, until the OSCServer is closed.
The OSC address-pattern is matched against a set of OSC-adresses
that have been registered to the server with a callback-function.
If the adress-pattern of the message machtes the registered address of a callback,
that function is called.
"""
# set the RequestHandlerClass, will be overridden by ForkingOSCServer & ThreadingOSCServer
RequestHandlerClass = OSCRequestHandler
# define a socket timeout, so the serve_forever loop can actually exit.
socket_timeout = 1
# DEBUG: print error-tracebacks (to stderr)?
print_tracebacks = False
def __init__(self, server_address, client=None, return_port=0):
"""Instantiate an OSCServer.
- server_address ((host, port) tuple): the local host & UDP-port
the server listens on
- client (OSCClient instance): The OSCClient used to send replies from this server.
If none is supplied (default) an OSCClient will be created.
- return_port (int): if supplied, sets the default UDP destination-port
for replies coming from this server.
"""
UDPServer.__init__(self, server_address, self.RequestHandlerClass)
OSCAddressSpace.__init__(self)
self.setReturnPort(return_port)
self.error_prefix = ""
self.info_prefix = "/info"
self.socket.settimeout(self.socket_timeout)
self.running = False
self.client = None
if client == None:
self.client = OSCClient(server=self)
else:
self.setClient(client)
def setClient(self, client):
"""Associate this Server with a new local Client instance, closing the Client this Server is currently using.
"""
if not isinstance(client, OSCClient):
raise ValueError("'client' argument is not a valid OSCClient object")
if client.server != None:
raise OSCServerError("Provided OSCClient already has an OSCServer-instance: %s" % str(client.server))
# Server socket is already listening at this point, so we can't use the client's socket.
# we'll have to force our socket on the client...
client_address = client.address() # client may be already connected
client.close() # shut-down that socket
# force our socket upon the client
client.setServer(self)
if client_address:
client.connect(client_address)
if not self.return_port:
self.return_port = client_address[1]
def serve_forever(self):
"""Handle one request at a time until server is closed."""
self.running = True
while self.running:
self.handle_request() # this times-out when no data arrives.
def close(self):
"""Stops serving requests, closes server (socket), closes used client
"""
self.running = False
self.client.close()
self.server_close()
def __str__(self):
"""Returns a string containing this Server's Class-name, software-version and local bound address (if any)
"""
out = self.__class__.__name__
out += " v%s.%s-%s" % version
addr = self.address()
if addr:
out += " listening on osc://%s" % getUrlStr(addr)
else:
out += " (unbound)"
return out
def __eq__(self, other):
"""Compare function.
"""
if not isinstance(other, self.__class__):
return False
return cmp(self.socket._sock, other.socket._sock)
def __ne__(self, other):
"""Compare function.
"""
return not self.__eq__(other)
def address(self):
"""Returns a (host,port) tuple of the local address this server is bound to,
or None if not bound to any address.
"""
try:
return self.socket.getsockname()
except socket.error:
return None
def setReturnPort(self, port):
"""Set the destination UDP-port for replies returning from this server to the remote client
"""
if (port > 1024) and (port < 65536):
self.return_port = port
else:
self.return_port = None
def setSrvInfoPrefix(self, pattern):
"""Set the first part of OSC-address (pattern) this server will use to reply to server-info requests.
"""
if len(pattern):
pattern = '/' + pattern.strip('/')
self.info_prefix = pattern
def setSrvErrorPrefix(self, pattern=""):
"""Set the OSC-address (pattern) this server will use to report errors occuring during
received message handling to the remote client.
If pattern is empty (default), server-errors are not reported back to the client.
"""
if len(pattern):
pattern = '/' + pattern.strip('/')
self.error_prefix = pattern
def addDefaultHandlers(self, prefix="", info_prefix="/info", error_prefix="/error"):
"""Register a default set of OSC-address handlers with this Server:
- 'default' -> noCallback_handler
the given prefix is prepended to all other callbacks registered by this method:
- '<prefix><info_prefix' -> serverInfo_handler
- '<prefix><error_prefix> -> msgPrinter_handler
- '<prefix>/print' -> msgPrinter_handler
and, if the used Client supports it;
- '<prefix>/subscribe' -> subscription_handler
- '<prefix>/unsubscribe' -> subscription_handler
Note: the given 'error_prefix' argument is also set as default 'error_prefix' for error-messages
*sent from* this server. This is ok, because error-messages generally do not elicit a reply from the receiver.
To do this with the serverInfo-prefixes would be a bad idea, because if a request received on '/info' (for example)
would send replies to '/info', this could potentially cause a never-ending loop of messages!
Do *not* set the 'info_prefix' here (for incoming serverinfo requests) to the same value as given to
the setSrvInfoPrefix() method (for *replies* to incoming serverinfo requests).
For example, use '/info' for incoming requests, and '/inforeply' or '/serverinfo' or even just '/print' as the
info-reply prefix.
"""
self.error_prefix = error_prefix
self.addMsgHandler('default', self.noCallback_handler)
self.addMsgHandler(prefix + info_prefix, self.serverInfo_handler)
self.addMsgHandler(prefix + error_prefix, self.msgPrinter_handler)
self.addMsgHandler(prefix + '/print', self.msgPrinter_handler)
if isinstance(self.client, OSCMultiClient):
self.addMsgHandler(prefix + '/subscribe', self.subscription_handler)
self.addMsgHandler(prefix + '/unsubscribe', self.subscription_handler)
def printErr(self, txt):
"""Writes 'OSCServer: txt' to sys.stderr
"""
sys.stderr.write("OSCServer: %s\n" % txt)
def sendOSCerror(self, txt, client_address):
"""Sends 'txt', encapsulated in an OSCMessage to the default 'error_prefix' OSC-addres.
Message is sent to the given client_address, with the default 'return_port' overriding
the client_address' port, if defined.
"""
lines = txt.split('\n')
if len(lines) == 1:
msg = OSCMessage(self.error_prefix)
msg.append(lines[0])
elif len(lines) > 1:
msg = OSCBundle(self.error_prefix)
for line in lines:
msg.append(line)
else:
return
if self.return_port:
client_address = (client_address[0], self.return_port)
self.client.sendto(msg, client_address)
def reportErr(self, txt, client_address):
"""Writes 'OSCServer: txt' to sys.stderr
If self.error_prefix is defined, sends 'txt' as an OSC error-message to the client(s)
(see printErr() and sendOSCerror())
"""
self.printErr(txt)
if len(self.error_prefix):
self.sendOSCerror(txt, client_address)
def sendOSCinfo(self, txt, client_address):
"""Sends 'txt', encapsulated in an OSCMessage to the default 'info_prefix' OSC-addres.
Message is sent to the given client_address, with the default 'return_port' overriding
the client_address' port, if defined.
"""
lines = txt.split('\n')
if len(lines) == 1:
msg = OSCMessage(self.info_prefix)
msg.append(lines[0])
elif len(lines) > 1:
msg = OSCBundle(self.info_prefix)
for line in lines:
msg.append(line)
else:
return
if self.return_port:
client_address = (client_address[0], self.return_port)
self.client.sendto(msg, client_address)
###
# Message-Handler callback functions
###
def handle_error(self, request, client_address):
"""Handle an exception in the Server's callbacks gracefully.
Writes the error to sys.stderr and, if the error_prefix (see setSrvErrorPrefix()) is set,
sends the error-message as reply to the client
"""
(e_type, e) = sys.exc_info()[:2]
self.printErr("%s on request from %s: %s" % (e_type.__name__, getUrlStr(client_address), str(e)))
if self.print_tracebacks:
import traceback
traceback.print_exc() # XXX But this goes to stderr!
if len(self.error_prefix):
self.sendOSCerror("%s: %s" % (e_type.__name__, str(e)), client_address)
def noCallback_handler(self, addr, tags, data, client_address):
"""Example handler for OSCMessages.
All registerd handlers must accept these three arguments:
- addr (string): The OSC-address pattern of the received Message
(the 'addr' string has already been matched against the handler's registerd OSC-address,
but may contain '*'s & such)
- tags (string): The OSC-typetags of the received message's arguments. (without the preceding comma)
- data (list): The OSCMessage's arguments
Note that len(tags) == len(data)
- client_address ((host, port) tuple): the host & port this message originated from.
a Message-handler function may return None, but it could also return an OSCMessage (or OSCBundle),
which then gets sent back to the client.
This handler prints a "No callback registered to handle ..." message.
Returns None
"""
self.reportErr("No callback registered to handle OSC-address '%s'" % addr, client_address)
def msgPrinter_handler(self, addr, tags, data, client_address):
"""Example handler for OSCMessages.
All registerd handlers must accept these three arguments:
- addr (string): The OSC-address pattern of the received Message
(the 'addr' string has already been matched against the handler's registerd OSC-address,
but may contain '*'s & such)
- tags (string): The OSC-typetags of the received message's arguments. (without the preceding comma)
- data (list): The OSCMessage's arguments
Note that len(tags) == len(data)
- client_address ((host, port) tuple): the host & port this message originated from.
a Message-handler function may return None, but it could also return an OSCMessage (or OSCBundle),
which then gets sent back to the client.
This handler prints the received message.
Returns None
"""
txt = "OSCMessage '%s' from %s: " % (addr, getUrlStr(client_address))
txt += str(data)
self.printErr(txt) # strip trailing comma & space
def serverInfo_handler(self, addr, tags, data, client_address):
"""Example handler for OSCMessages.
All registerd handlers must accept these three arguments:
- addr (string): The OSC-address pattern of the received Message
(the 'addr' string has already been matched against the handler's registerd OSC-address,
but may contain '*'s & such)
- tags (string): The OSC-typetags of the received message's arguments. (without the preceding comma)
- data (list): The OSCMessage's arguments
Note that len(tags) == len(data)
- client_address ((host, port) tuple): the host & port this message originated from.
a Message-handler function may return None, but it could also return an OSCMessage (or OSCBundle),
which then gets sent back to the client.
This handler returns a reply to the client, which can contain various bits of information
about this server, depending on the first argument of the received OSC-message:
- 'help' | 'info' : Reply contains server type & version info, plus a list of
available 'commands' understood by this handler
- 'list' | 'ls' : Reply is a bundle of 'address <string>' messages, listing the server's
OSC address-space.
- 'clients' | 'targets' : Reply is a bundle of 'target osc://<host>:<port>[<prefix>] [<filter>] [...]'
messages, listing the local Client-instance's subscribed remote clients.
"""
if len(data) == 0:
return None
cmd = data.pop(0)
reply = None
if cmd in ('help', 'info'):
reply = OSCBundle(self.info_prefix)
reply.append(('server', str(self)))
reply.append(('info_command', "ls | list : list OSC address-space"))
reply.append(('info_command', "clients | targets : list subscribed clients"))
elif cmd in ('ls', 'list'):
reply = OSCBundle(self.info_prefix)
for addr in self.callbacks.keys():
reply.append(('address', addr))
elif cmd in ('clients', 'targets'):
if hasattr(self.client, 'getOSCTargetStrings'):
reply = OSCBundle(self.info_prefix)
for trg in self.client.getOSCTargetStrings():
reply.append(('target',) + trg)
else:
cli_addr = self.client.address()
if cli_addr:
reply = OSCMessage(self.info_prefix)
reply.append(('target', "osc://%s/" % getUrlStr(cli_addr)))
else:
self.reportErr("unrecognized command '%s' in /info request from osc://%s. Try 'help'" % (cmd, getUrlStr(client_address)), client_address)
return reply
def _subscribe(self, data, client_address):
"""Handle the actual subscription. the provided 'data' is concatenated together to form a
'<host>:<port>[<prefix>] [<filter>] [...]' string, which is then passed to
parseUrlStr() & parseFilterStr() to actually retreive <host>, <port>, etc.
This 'long way 'round' approach (almost) guarantees that the subscription works,
regardless of how the bits of the <url> are encoded in 'data'.
"""
url = ""
have_port = False
for item in data:
if (type(item) == types.IntType) and not have_port:
url += ":%d" % item
have_port = True
elif type(item) in types.StringTypes:
url += item
(addr, tail) = parseUrlStr(url)
(prefix, filters) = parseFilterStr(tail)
if addr != None:
(host, port) = addr
if not host:
host = client_address[0]
if not port:
port = client_address[1]
addr = (host, port)
else:
addr = client_address
self.client._setTarget(addr, prefix, filters)
trg = self.client.getOSCTargetStr(addr)
if trg[0] != None:
reply = OSCMessage(self.info_prefix)
reply.append(('target',) + trg)
return reply
def _unsubscribe(self, data, client_address):
"""Handle the actual unsubscription. the provided 'data' is concatenated together to form a
'<host>:<port>[<prefix>]' string, which is then passed to
parseUrlStr() to actually retreive <host>, <port> & <prefix>.
This 'long way 'round' approach (almost) guarantees that the unsubscription works,
regardless of how the bits of the <url> are encoded in 'data'.
"""
url = ""
have_port = False
for item in data:
if (type(item) == types.IntType) and not have_port:
url += ":%d" % item
have_port = True
elif type(item) in types.StringTypes:
url += item
(addr, _) = parseUrlStr(url)
if addr == None:
addr = client_address
else:
(host, port) = addr
if not host:
host = client_address[0]
if not port:
try:
(host, port) = self.client._searchHostAddr(host)
except NotSubscribedError:
port = client_address[1]
addr = (host, port)
try:
self.client._delTarget(addr)
except NotSubscribedError, e:
txt = "%s: %s" % (e.__class__.__name__, str(e))
self.printErr(txt)
reply = OSCMessage(self.error_prefix)
reply.append(txt)
return reply
def subscription_handler(self, addr, tags, data, client_address):
"""Handle 'subscribe' / 'unsubscribe' requests from remote hosts,
if the local Client supports this (i.e. OSCMultiClient).
Supported commands:
- 'help' | 'info' : Reply contains server type & version info, plus a list of
available 'commands' understood by this handler
- 'list' | 'ls' : Reply is a bundle of 'target osc://<host>:<port>[<prefix>] [<filter>] [...]'
messages, listing the local Client-instance's subscribed remote clients.
- '[subscribe | listen | sendto | target] <url> [<filter> ...] : Subscribe remote client/server at <url>,
and/or set message-filters for messages being sent to the subscribed host, with the optional <filter>
arguments. Filters are given as OSC-addresses (or '*') prefixed by a '+' (send matching messages) or
a '-' (don't send matching messages). The wildcard '*', '+*' or '+/*' means 'send all' / 'filter none',
and '-*' or '-/*' means 'send none' / 'filter all' (which is not the same as unsubscribing!)
Reply is an OSCMessage with the (new) subscription; 'target osc://<host>:<port>[<prefix>] [<filter>] [...]'
- '[unsubscribe | silence | nosend | deltarget] <url> : Unsubscribe remote client/server at <url>
If the given <url> isn't subscribed, a NotSubscribedError-message is printed (and possibly sent)
The <url> given to the subscribe/unsubscribe handler should be of the form:
'[osc://][<host>][:<port>][<prefix>]', where any or all components can be omitted.
If <host> is not specified, the IP-address of the message's source is used.
If <port> is not specified, the <host> is first looked up in the list of subscribed hosts, and if found,
the associated port is used.
If <port> is not specified and <host> is not yet subscribed, the message's source-port is used.
If <prefix> is specified on subscription, <prefix> is prepended to the OSC-address of all messages
sent to the subscribed host.
If <prefix> is specified on unsubscription, the subscribed host is only unsubscribed if the host,
port and prefix all match the subscription.
If <prefix> is not specified on unsubscription, the subscribed host is unsubscribed if the host and port
match the subscription.
"""
if not isinstance(self.client, OSCMultiClient):
raise OSCServerError("Local %s does not support subsctiptions or message-filtering" % self.client.__class__.__name__)
addr_cmd = addr.split('/')[-1]
if len(data):
if data[0] in ('help', 'info'):
reply = OSCBundle(self.info_prefix)
reply.append(('server', str(self)))
reply.append(('subscribe_command', "ls | list : list subscribed targets"))
reply.append(('subscribe_command', "[subscribe | listen | sendto | target] <url> [<filter> ...] : subscribe to messages, set filters"))
reply.append(('subscribe_command', "[unsubscribe | silence | nosend | deltarget] <url> : unsubscribe from messages"))
return reply
if data[0] in ('ls', 'list'):
reply = OSCBundle(self.info_prefix)
for trg in self.client.getOSCTargetStrings():
reply.append(('target',) + trg)
return reply
if data[0] in ('subscribe', 'listen', 'sendto', 'target'):
return self._subscribe(data[1:], client_address)
if data[0] in ('unsubscribe', 'silence', 'nosend', 'deltarget'):
return self._unsubscribe(data[1:], client_address)
if addr_cmd in ('subscribe', 'listen', 'sendto', 'target'):
return self._subscribe(data, client_address)
if addr_cmd in ('unsubscribe', 'silence', 'nosend', 'deltarget'):
return self._unsubscribe(data, client_address)
class ForkingOSCServer(ForkingMixIn, OSCServer):
"""An Asynchronous OSCServer.
This server forks a new process to handle each incoming request.
"""
# set the RequestHandlerClass, will be overridden by ForkingOSCServer & ThreadingOSCServer
RequestHandlerClass = ThreadingOSCRequestHandler
class ThreadingOSCServer(ThreadingMixIn, OSCServer):
"""An Asynchronous OSCServer.
This server starts a new thread to handle each incoming request.
"""
# set the RequestHandlerClass, will be overridden by ForkingOSCServer & ThreadingOSCServer
RequestHandlerClass = ThreadingOSCRequestHandler
######
#
# OSCError classes
#
######
class OSCError(Exception):
"""Base Class for all OSC-related errors
"""
def __init__(self, message):
self.message = message
def __str__(self):
return self.message
class OSCClientError(OSCError):
"""Class for all OSCClient errors
"""
pass
class OSCServerError(OSCError):
"""Class for all OSCServer errors
"""
pass
class NoCallbackError(OSCServerError):
"""This error is raised (by an OSCServer) when an OSCMessage with an 'unmatched' address-pattern
is received, and no 'default' handler is registered.
"""
def __init__(self, pattern):
"""The specified 'pattern' should be the OSC-address of the 'unmatched' message causing the error to be raised.
"""
self.message = "No callback registered to handle OSC-address '%s'" % pattern
class NotSubscribedError(OSCClientError):
"""This error is raised (by an OSCMultiClient) when an attempt is made to unsubscribe a host
that isn't subscribed.
"""
def __init__(self, addr, prefix=None):
if prefix:
url = getUrlStr(addr, prefix)
else:
url = getUrlStr(addr, '')
self.message = "Target osc://%s is not subscribed" % url
######
#
# OSC over streaming transport layers (usually TCP)
#
# Note from the OSC 1.0 specifications about streaming protocols:
#
# The underlying network that delivers an OSC packet is responsible for
# delivering both the contents and the size to the OSC application. An OSC
# packet can be naturally represented by a datagram by a network protocol such
# as UDP. In a stream-based protocol such as TCP, the stream should begin with
# an int32 giving the size of the first packet, followed by the contents of the
# first packet, followed by the size of the second packet, etc.
#
# The contents of an OSC packet must be either an OSC Message or an OSC Bundle.
# The first byte of the packet's contents unambiguously distinguishes between
# these two alternatives.
#
######
class OSCStreamRequestHandler(StreamRequestHandler, OSCAddressSpace):
""" This is the central class of a streaming OSC server. If a client
connects to the server, the server instantiates a OSCStreamRequestHandler
for each new connection. This is fundamentally different to a packet
oriented server which has a single address space for all connections.
This connection based (streaming) OSC server maintains an address space
for each single connection, because usually tcp server spawn a new thread
or process for each new connection. This would generate severe
multithreading synchronization problems when each thread would operate on
the same address space object. Therefore: To implement a streaming/TCP OSC
server a custom handler must be implemented which implements the
setupAddressSpace member in which it creates its own address space for this
very connection. This has been done within the testbench and can serve as
inspiration.
"""
def __init__(self, request, client_address, server):
""" Initialize all base classes. The address space must be initialized
before the stream request handler because the initialization function
of the stream request handler calls the setup member which again
requires an already initialized address space.
"""
self._txMutex = threading.Lock()
OSCAddressSpace.__init__(self)
StreamRequestHandler.__init__(self, request, client_address, server)
def _unbundle(self, decoded):
"""Recursive bundle-unpacking function"""
if decoded[0] != "#bundle":
self.replies += self.dispatchMessage(decoded[0], decoded[1][1:], decoded[2:], self.client_address)
return
now = time.time()
timetag = decoded[1]
if (timetag > 0.) and (timetag > now):
time.sleep(timetag - now)
for msg in decoded[2:]:
self._unbundle(msg)
def setup(self):
StreamRequestHandler.setup(self)
print "SERVER: New client connection."
self.setupAddressSpace()
self.server._clientRegister(self)
def setupAddressSpace(self):
""" Override this function to customize your address space. """
pass
def finish(self):
StreamRequestHandler.finish(self)
self.server._clientUnregister(self)
print "SERVER: Client connection handled."
def _transmit(self, data):
sent = 0
while sent < len(data):
tmp = self.connection.send(data[sent:])
if tmp == 0:
return False
sent += tmp
return True
def _transmitMsg(self, msg):
"""Send an OSC message over a streaming socket. Raises exception if it
should fail. If everything is transmitted properly, True is returned. If
socket has been closed, False.
"""
if not isinstance(msg, OSCMessage):
raise TypeError("'msg' argument is not an OSCMessage or OSCBundle object")
try:
binary = msg.getBinary()
length = len(binary)
# prepend length of packet before the actual message (big endian)
len_big_endian = array.array('c', '\0' * 4)
struct.pack_into(">L", len_big_endian, 0, length)
len_big_endian = len_big_endian.tostring()
if self._transmit(len_big_endian) and self._transmit(binary):
return True
return False
except socket.error, e:
if e[0] == errno.EPIPE: # broken pipe
return False
raise e
def _receive(self, count):
""" Receive a certain amount of data from the socket and return it. If the
remote end should be closed in the meanwhile None is returned.
"""
chunk = self.connection.recv(count)
if not chunk or len(chunk) == 0:
return None
while len(chunk) < count:
tmp = self.connection.recv(count - len(chunk))
if not tmp or len(tmp) == 0:
return None
chunk = chunk + tmp
return chunk
def _receiveMsg(self):
""" Receive OSC message from a socket and decode.
If an error occurs, None is returned, else the message.
"""
# get OSC packet size from stream which is prepended each transmission
chunk = self._receive(4)
if chunk == None:
print "SERVER: Socket has been closed."
return None
# extract message length from big endian unsigned long (32 bit)
slen = struct.unpack(">L", chunk)[0]
# receive the actual message
chunk = self._receive(slen)
if chunk == None:
print "SERVER: Socket has been closed."
return None
# decode OSC data and dispatch
msg = decodeOSC(chunk)
if msg == None:
raise OSCError("SERVER: Message decoding failed.")
return msg
def handle(self):
"""
Handle a connection.
"""
# set socket blocking to avoid "resource currently not available"
# exceptions, because the connection socket inherits the settings
# from the listening socket and this times out from time to time
# in order to provide a way to shut the server down. But we want
# clean and blocking behaviour here
self.connection.settimeout(None)
print "SERVER: Entered server loop"
try:
while True:
decoded = self._receiveMsg()
if decoded == None:
return
elif len(decoded) <= 0:
# if message decoding fails we try to stay in sync but print a message
print "OSC stream server: Spurious message received."
continue
self.replies = []
self._unbundle(decoded)
if len(self.replies) > 1:
msg = OSCBundle()
for reply in self.replies:
msg.append(reply)
elif len(self.replies) == 1:
msg = self.replies[0]
else:
# no replies, continue receiving
continue
self._txMutex.acquire()
txOk = self._transmitMsg(msg)
self._txMutex.release()
if not txOk:
break
except socket.error, e:
if e[0] == errno.ECONNRESET:
# if connection has been reset by client, we do not care much
# about it, we just assume our duty fullfilled
print "SERVER: Connection has been reset by peer."
else:
raise e
def sendOSC(self, oscData):
""" This member can be used to transmit OSC messages or OSC bundles
over the client/server connection. It is thread save.
"""
self._txMutex.acquire()
result = self._transmitMsg(oscData)
self._txMutex.release()
return result
""" TODO Note on threaded unbundling for streaming (connection oriented)
transport:
Threaded unbundling as implemented in ThreadingOSCServer must be implemented in
a different way for the streaming variant, because contrary to the datagram
version the streaming handler is instantiated only once per connection. This
leads to the problem (if threaded unbundling is implemented as in OSCServer)
that all further message reception is blocked until all (previously received)
pending messages are processed.
Each StreamRequestHandler should provide a so called processing queue in which
all pending messages or subbundles are inserted to be processed in the future).
When a subbundle or message gets queued, a mechanism must be provided that
those messages get invoked when time asks for them. There are the following
opportunities:
- a timer is started which checks at regular intervals for messages in the
queue (polling - requires CPU resources)
- a dedicated timer is started for each message (requires timer resources)
"""
class OSCStreamingServer(TCPServer):
""" A connection oriented (TCP/IP) OSC server.
"""
# define a socket timeout, so the serve_forever loop can actually exit.
# with 2.6 and server.shutdown this wouldn't be necessary
socket_timeout = 1
# this is the class which handles a new connection. Override this for a
# useful customized server. See the testbench for an example
RequestHandlerClass = OSCStreamRequestHandler
def __init__(self, address):
"""Instantiate an OSCStreamingServer.
- server_address ((host, port) tuple): the local host & UDP-port
the server listens for new connections.
"""
self._clientList = []
self._clientListMutex = threading.Lock()
TCPServer.__init__(self, address, self.RequestHandlerClass)
self.socket.settimeout(self.socket_timeout)
def serve_forever(self):
"""Handle one request at a time until server is closed.
Had to add this since 2.5 does not support server.shutdown()
"""
self.running = True
while self.running:
self.handle_request() # this times-out when no data arrives.
def start(self):
""" Start the server thread. """
self._server_thread = threading.Thread(target=self.serve_forever)
self._server_thread.setDaemon(True)
self._server_thread.start()
def stop(self):
""" Stop the server thread and close the socket. """
self.running = False
self._server_thread.join()
self.server_close()
# 2.6 only
#self.shutdown()
def _clientRegister(self, client):
""" Gets called by each request/connection handler when connection is
established to add itself to the client list
"""
self._clientListMutex.acquire()
self._clientList.append(client)
self._clientListMutex.release()
def _clientUnregister(self, client):
""" Gets called by each request/connection handler when connection is
lost to remove itself from the client list
"""
self._clientListMutex.acquire()
self._clientList.remove(client)
self._clientListMutex.release()
def broadcastToClients(self, oscData):
""" Send OSC message or bundle to all connected clients. """
result = True
for client in self._clientList:
result = result and client.sendOSC(oscData)
return result
class OSCStreamingServerThreading(ThreadingMixIn, OSCStreamingServer):
pass
""" Implements a server which spawns a separate thread for each incoming
connection. Care must be taken since the OSC address space is for all
the same.
"""
class OSCStreamingClient(OSCAddressSpace):
""" OSC streaming client.
A streaming client establishes a connection to a streaming server but must
be able to handle replies by the server as well. To accomplish this the
receiving takes place in a secondary thread, because no one knows if we
have to expect a reply or not, i.e. synchronous architecture doesn't make
much sense.
Replies will be matched against the local address space. If message
handlers access code of the main thread (where the client messages are sent
to the server) care must be taken e.g. by installing sychronization
mechanisms or by using an event dispatcher which can handle events
originating from other threads.
"""
# set outgoing socket buffer size
sndbuf_size = 4096 * 8
rcvbuf_size = 4096 * 8
def __init__(self):
self._txMutex = threading.Lock()
OSCAddressSpace.__init__(self)
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, self.sndbuf_size)
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, self.rcvbuf_size)
self.socket.settimeout(1.0)
self._running = False
def _receiveWithTimeout(self, count):
chunk = str()
while len(chunk) < count:
try:
tmp = self.socket.recv(count - len(chunk))
except socket.timeout:
if not self._running:
print "CLIENT: Socket timed out and termination requested."
return None
else:
continue
except socket.error, e:
if e[0] == errno.ECONNRESET:
print "CLIENT: Connection reset by peer."
return None
else:
raise e
if not tmp or len(tmp) == 0:
print "CLIENT: Socket has been closed."
return None
chunk = chunk + tmp
return chunk
def _receiveMsgWithTimeout(self):
""" Receive OSC message from a socket and decode.
If an error occurs, None is returned, else the message.
"""
# get OSC packet size from stream which is prepended each transmission
chunk = self._receiveWithTimeout(4)
if not chunk:
return None
# extract message length from big endian unsigned long (32 bit)
slen = struct.unpack(">L", chunk)[0]
# receive the actual message
chunk = self._receiveWithTimeout(slen)
if not chunk:
return None
# decode OSC content
msg = decodeOSC(chunk)
if msg == None:
raise OSCError("CLIENT: Message decoding failed.")
return msg
def _receiving_thread_entry(self):
print "CLIENT: Entered receiving thread."
self._running = True
while self._running:
decoded = self._receiveMsgWithTimeout()
if not decoded:
break
elif len(decoded) <= 0:
continue
self.replies = []
self._unbundle(decoded)
if len(self.replies) > 1:
msg = OSCBundle()
for reply in self.replies:
msg.append(reply)
elif len(self.replies) == 1:
msg = self.replies[0]
else:
continue
self._txMutex.acquire()
txOk = self._transmitMsgWithTimeout(msg)
self._txMutex.release()
if not txOk:
break
print "CLIENT: Receiving thread terminated."
def _unbundle(self, decoded):
if decoded[0] != "#bundle":
self.replies += self.dispatchMessage(decoded[0], decoded[1][1:], decoded[2:], self.socket.getpeername())
return
now = time.time()
timetag = decoded[1]
if (timetag > 0.) and (timetag > now):
time.sleep(timetag - now)
for msg in decoded[2:]:
self._unbundle(msg)
def connect(self, address):
self.socket.connect(address)
self.receiving_thread = threading.Thread(target=self._receiving_thread_entry)
self.receiving_thread.start()
def close(self):
# let socket time out
self._running = False
self.receiving_thread.join()
self.socket.close()
def _transmitWithTimeout(self, data):
sent = 0
while sent < len(data):
try:
tmp = self.socket.send(data[sent:])
except socket.timeout:
if not self._running:
print "CLIENT: Socket timed out and termination requested."
return False
else:
continue
except socket.error, e:
if e[0] == errno.ECONNRESET:
print "CLIENT: Connection reset by peer."
return False
else:
raise e
if tmp == 0:
return False
sent += tmp
return True
def _transmitMsgWithTimeout(self, msg):
if not isinstance(msg, OSCMessage):
raise TypeError("'msg' argument is not an OSCMessage or OSCBundle object")
binary = msg.getBinary()
length = len(binary)
# prepend length of packet before the actual message (big endian)
len_big_endian = array.array('c', '\0' * 4)
struct.pack_into(">L", len_big_endian, 0, length)
len_big_endian = len_big_endian.tostring()
if self._transmitWithTimeout(len_big_endian) and self._transmitWithTimeout(binary):
return True
else:
return False
def sendOSC(self, msg):
"""Send an OSC message or bundle to the server. Returns True on success.
"""
self._txMutex.acquire()
txOk = self._transmitMsgWithTimeout(msg)
self._txMutex.release()
return txOk
def __str__(self):
"""Returns a string containing this Client's Class-name, software-version
and the remote-address it is connected to (if any)
"""
out = self.__class__.__name__
out += " v%s.%s-%s" % version
addr = self.socket.getpeername()
if addr:
out += " connected to osc://%s" % getUrlStr(addr)
else:
out += " (unconnected)"
return out
def __eq__(self, other):
"""Compare function.
"""
if not isinstance(other, self.__class__):
return False
isequal = cmp(self.socket._sock, other.socket._sock)
if isequal and self.server and other.server:
return cmp(self.server, other.server)
return isequal
def __ne__(self, other):
"""Compare function.
"""
return not self.__eq__(other)
# vim:noexpandtab
| jsudano/WaveFront | python/OSC.py | Python | gpl-3.0 | 89,922 | [
"VisIt"
] | db7d6f2052a13ccfa55664ca499e2f2427d61a12a8f3e07b72d380c085062d41 |
## INFO ########################################################################
## ##
## COUBLET ##
## ======= ##
## ##
## Cross-platform desktop client to follow posts from COUB ##
## Version: 0.6.93.172 (20140814) ##
## ##
## File: widgets/media.py ##
## ##
## Designed and written by Peter Varo. Copyright (c) 2014 ##
## License agreement is provided in the LICENSE file ##
## For more info visit: https://github.com/petervaro/coub ##
## ##
## Copyright (c) 2014 Coub Ltd and/or its suppliers and licensors, ##
## 5 Themistokli Dervi Street, Elenion Building, 1066 Nicosia, Cyprus. ##
## All rights reserved. COUB (TM) is a trademark of Coub Ltd. ##
## http://coub.com ##
## ##
######################################################################## INFO ##
# Import python modules
from textwrap import fill
# Import PyQt5 modules
from PyQt5.QtGui import QPixmap
from PyQt5.QtCore import Qt, QUrl
from PyQt5.QtMultimediaWidgets import QVideoWidget
from PyQt5.QtWidgets import QWidget, QVBoxLayout, QLabel
from PyQt5.QtMultimedia import QMediaContent, QMediaPlayer
#------------------------------------------------------------------------------#
class CoubletMediaPlayerWidget(QWidget):
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
def __init__(self, width, height, thumb_file, video_file, audio_file=None,
loop_video=False, loop_audio=False, error_font=None,
error_color=None, error_background=None, parent=None):
super().__init__(parent)
# Restrict size
self.setFixedSize(width, height)
# Store static values
self._error_font = error_font
self._error_color = error_color
self._error_background = error_background
# Create thumbnail preview
self._thumb = thumb = QLabel(self)
thumb.setPixmap(QPixmap(thumb_file).scaled(width, height))
# Create video player and its content
self._video = video = QVideoWidget(self)
video.setFixedSize(width, height)
# Set video player file
self._video_player = video_player = QMediaPlayer(None, QMediaPlayer.VideoSurface)
video_player.setVideoOutput(video)
video_player.error.connect(lambda: self.set_error(self.get_error()))
video_player.setMedia(QMediaContent(QUrl.fromLocalFile(video_file)))
# Set looping for video
if loop_video:
self._loop_video = False
video_player.stateChanged.connect(self.on_video_player_state_changed)
# Set separate playe for audio file if any
if audio_file:
self._audio_player = audio_player = QMediaPlayer(None, QMediaPlayer.StreamPlayback)
audio_player.error.connect(lambda: self.set_error(self.get_error()))
# Store MediaContent, otherwise it will be GC'd after stop()
self._audio = QMediaContent(QUrl(audio_file))
audio_player.setMedia(self._audio)
# Ste looping for audio
if loop_audio:
self._loop_audio = False
audio_player.stateChanged.connect(self.on_audio_player_state_changed)
# Make sure all flags are set and
# only the proper widgets are visible
self.stop()
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
def on_video_player_state_changed(self, event):
# If playing => has to be looped => start over!
if self._loop_video:
self._video_player.play()
# If paused
else:
# Reset looping
self._loop_video = True
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
def on_audio_player_state_changed(self, event):
# If playing => has to be looped => start over!
if self._loop_audio:
self._audio_player.play()
# If paused
else:
# Reset looping
self._loop_audio = True
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
def set_error(self, message):
try:
self._loop_audio = False
except AttributeError:
pass
self._loop_video = False
self._video.hide()
self._thumb.hide()
layout = QVBoxLayout()
error_msg = self._video_player.errorString()
error_label = QLabel(fill('ERROR: {}'.format(message.upper()), width=32))
if self._error_font:
error_label.setFont(self._error_font)
if self._error_color:
error_label.setPalette(self._error_color)
if self._error_background:
self.setPalette(self._error_background)
self.setAutoFillBackground(True)
layout.addWidget(error_label, alignment=Qt.AlignHCenter)
self.setLayout(layout)
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
def get_error(self):
message = self._video_player.errorString()
if message:
return '{!r} @video'.format(message)
try:
message = self._audio_player.errorString()
if message:
return '{!r} @audio'.format(message)
except AttributeError:
pass
return 'unknown'
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
def play(self):
if self._stopped:
self._stopped = False
self._video.show()
self._thumb.hide()
try:
self._loop_audio = True
self._audio_player.play()
except AttributeError:
pass
self._loop_video = True
self._video_player.play()
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
def pause(self):
try:
self._loop_audio = False
self._audio_player.pause()
except AttributeError:
pass
self._loop_video = False
self._video_player.pause()
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
def stop(self):
self._stopped = True
self._thumb.show()
self._video.hide()
try:
self._loop_audio = False
self._audio_player.stop()
except AttributeError:
pass
self._loop_video = False
self._video_player.stop()
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
def state(self):
return self._video_player.state()
#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
def set_volume(self, volume):
try:
self._audio_player.setVolume(volume)
except AttributeError:
pass
self._video_player.setVolume(volume)
| petervaro/coublet | widgets/media.py | Python | mit | 7,680 | [
"VisIt"
] | b9e6388ce663bae00149f8a9812286ca17f097ddf556b1989d365d8cfc254735 |
# Copyright 2016 Uri Laserson
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gzip
import json
import os
import pathlib
import re
import sys
from collections import Counter, OrderedDict
from functools import reduce
from glob import glob
from os import path as osp
from os.path import join as pjoin
from subprocess import PIPE, Popen
import numpy as np
import pandas as pd
from click import Choice, Path, command, group, option
from tqdm import tqdm
from phip.utils import (
DEFAULT_FDR,
DEFAULT_REFERENCE_QUANTILE,
compute_size_factors,
readfq)
# handle gzipped or uncompressed files
def open_maybe_compressed(*args, **kwargs):
if args[0].endswith(".gz"):
# gzip modes are different from default open modes
if len(args[1]) == 1:
args = (args[0], args[1] + "t") + args[2:]
compresslevel = kwargs.pop("compresslevel", 6)
return gzip.open(*args, **kwargs, compresslevel=compresslevel)
else:
return open(*args, **kwargs)
@group(context_settings={"help_option_names": ["-h", "--help"]})
def cli():
"""phip -- PhIP-seq analysis tools"""
pass
@cli.command(name="truncate-fasta")
@option(
"-i",
"--input",
required=True,
type=Path(exists=True, dir_okay=False),
help="input fasta",
)
@option("-o", "--output", required=True, type=Path(exists=False), help="output fasta")
@option(
"-k",
"--length",
required=True,
type=int,
help="length of starting subsequence to extract",
)
def truncate_fasta(input, output, length):
"""Truncate each sequence of a fasta file."""
with open(input, "r") as ip, open(output, "w") as op:
for (n, s, q) in readfq(ip):
print(f">{n}\n{s[:length]}", file=op)
@cli.command(name="merge-kallisto-tpm")
@option(
"-i",
"--input",
required=True,
type=Path(exists=True, file_okay=False),
help="input dir containing kallisto results",
)
@option("-o", "--output", required=True, type=Path(exists=False), help="output path")
def merge_kallisto_tpm(input, output):
"""Merge kallisto abundance results.
Input directory should contain sample-named subdirectories, each containing
an abundance.tsv file. This command will generate a single tab-delim
output file with each column containing the tpm values for that sample.
"""
samples = os.listdir(input)
iterators = [open(pjoin(input, s, "abundance.tsv"), "r") for s in samples]
with open(output, "w") as op:
it = zip(*iterators)
# burn headers of input files and write header of output file
_ = next(it)
print("id\t{}".format("\t".join(samples)), file=op)
for lines in it:
fields_array = [line.split("\t") for line in lines]
# check that join column is the same
assert all([fields[0] == fields_array[0][0] for fields in fields_array])
merged_fields = [fields_array[0][0]] + [f[4].strip() for f in fields_array]
print("\t".join(merged_fields), file=op)
@cli.command(name="gamma-poisson-model")
@option(
"-i",
"--input",
required=True,
type=Path(exists=True, dir_okay=False),
help="input counts file (tab-delim)",
)
@option(
"-o", "--output", required=True, type=Path(exists=False), help="output directory"
)
@option(
"-t",
"--trim-percentile",
default=99.9,
help="lower percent of data to keep for model fitting",
)
@option(
"-d", "--index-cols", default=1, help="number of columns to use as index/row-key"
)
def gamma_poisson_model(input, output, trim_percentile, index_cols):
"""Fit a gamma-poisson model.
Compute -log10(pval) for each (possibly-normalized) count.
"""
from phip.gampois import gamma_poisson_model as model
counts = pd.read_csv(input, sep="\t", header=0, index_col=list(range(index_cols)))
os.makedirs(output, exist_ok=True)
alpha, beta, rates, mlxp = model(counts, trim_percentile)
with open(pjoin(output, "parameters.json"), "w") as op:
json.dump(
{
"alpha": alpha,
"beta": beta,
"trim_percentile": trim_percentile,
"background_rates": list(rates),
},
op,
)
mlxp.to_csv(pjoin(output, "mlxp.tsv"), sep="\t", float_format="%.2f")
@cli.command(name="clipped-factorization-model")
@option(
"-i",
"--input",
required=True,
type=Path(exists=True, dir_okay=False),
help="input counts file (tab-delim)",
)
@option(
"-o",
"--output",
required=False,
type=Path(exists=False),
help="output file or directory. If ends in .tsv, will be treated as file",
)
@option(
"-d", "--index-cols", default=1, help="number of columns to use as index/row-key"
)
@option("--rank", default=3, show_default=True, help="matrix rank")
@option(
"--clip-percentile",
default=99.9,
show_default=True,
help="percentile thershold to clip at",
)
@option(
"--learning-rate",
default=1.0,
show_default=True,
help="learning rate for Adam optimizer",
)
@option(
"--minibatch-size", default=1024 * 32, show_default=True, help="rows per minibatch"
)
@option(
"--patience",
default=5,
show_default=True,
help="number of epochs of no improvement to wait before early stopping",
)
@option("--max-epochs", default=1000, show_default=True, help="maximum epochs")
@option(
"--discard-sample-reads-fraction",
default=0.01,
show_default=True,
help="Discard samples with fewer than X * m reads, where m is the median "
"number of reads across samples",
)
@option(
"--no-normalize-to-reads-per-million",
is_flag=True,
help="Work directly on read counts, not counts divided by sample totals",
)
@option(
"--log-every-seconds",
default=1,
show_default=True,
help="write progress no more often than every N seconds",
)
def clipped_factorization_model(
input,
output,
index_cols,
rank,
clip_percentile,
learning_rate,
minibatch_size,
patience,
max_epochs,
discard_sample_reads_fraction,
no_normalize_to_reads_per_million,
log_every_seconds,
):
"""Fit matrix factorization model.
Computes residuals from a matrix factorization model. Specifically, attempt
to detect and correct for clone and sample batch effects by subtracting off
a learned low-rank reconstruction of the given counts matrix.
The result is the (clones x samples) matrix of residuals after correcting for
batch effects. A few additional rows and columns (named _background_0,
_background_1, ...) giving the learned effects are also included.
"""
from phip.clipped_factorization import do_clipped_factorization
counts = pd.read_csv(input, sep="\t", header=0, index_col=list(range(index_cols)))
total_reads = counts.sum()
expected_reads = total_reads.median()
for sample in counts.columns:
if total_reads[sample] / expected_reads < discard_sample_reads_fraction:
print(
"[!!] EXCLUDING SAMPLE %s DUE TO INSUFFICIENT READS "
"(%d vs. sample median %d)"
% (sample, total_reads[sample], expected_reads)
)
del counts[sample]
result_df = do_clipped_factorization(
counts,
rank=rank,
clip_percentile=clip_percentile,
learning_rate=learning_rate,
minibatch_size=minibatch_size,
patience=patience,
max_epochs=max_epochs,
normalize_to_reads_per_million=not no_normalize_to_reads_per_million,
log_every_seconds=log_every_seconds,
)
if output.endswith(".tsv"):
output_path = output
else:
os.makedirs(output)
output_path = pjoin(output, "mixture.tsv")
result_df.to_csv(output_path, sep="\t", float_format="%.2f")
print("Wrote: %s" % output_path)
@cli.command(name="call-hits")
@option(
"-i",
"--input",
required=True,
type=Path(exists=True, dir_okay=False),
help="input counts file (tab-delim)",
)
@option(
"-o",
"--output",
required=False,
type=Path(exists=False),
help="output file or directory. If ends in .tsv, will be treated as file",
)
@option(
"-d", "--index-cols", default=1, help="number of columns to use as index/row-key"
)
@option(
"--beads-regex",
default=".*beads.*",
show_default=True,
help="samples with names matching this regex are considered beads-only",
)
@option(
"--ignore-columns-regex",
default="^_background.*",
show_default=True,
help="ignore columns matching the given regex (evaluated in case-insensitive"
" mode.) Ignored columns are passed through to output without processing.",
)
@option(
"--ignore-rows-regex",
default="^_background.*",
show_default=True,
help="ignore rows matching the given regex (evaluated in case-insensitive "
"mode). Ignored rows are passed through to output without processing.",
)
@option(
"--fdr", default=DEFAULT_FDR, show_default=True, help="target false discovery rate"
)
@option(
"--reference-quantile",
default=DEFAULT_REFERENCE_QUANTILE,
show_default=True,
help="Percentile to take of each clone's beads-only samples."
)
@option(
"--discard-sample-reads-fraction",
default=0.01,
show_default=True,
help="Discard samples with fewer than X * m reads, where m is the median "
"number of reads across samples",
)
@option(
"--normalize-to-reads-per-million",
type=Choice(["always", "never", "guess"]),
default="guess",
show_default=True,
help="Divide counts by totals per sample. Recommended "
"when running directly on raw read counts (as opposed to matrix "
'factorization residuals). If set to "guess" then the counts matrix '
"will be left as-is if it contains negative entries, and otherwise "
"will be normalized.",
)
@option(
"--verbosity",
default=2,
show_default=True,
help="verbosity: no output (0), result summary only (1), or progress (2)",
)
def call_hits(
input,
output,
index_cols,
beads_regex,
ignore_columns_regex,
ignore_rows_regex,
fdr,
reference_quantile,
discard_sample_reads_fraction,
normalize_to_reads_per_million,
verbosity,
):
"""Call hits at specified FDR using a heuristic.
Either raw read counts or the result of the clipped-factorization-model
sub-command can be provided.
The result is a matrix of shape (clones x samples). Entries above 1.0 in
this matrix indicate hits. Higher values indicate more evidence for a
hit, but there is no simple interpretation of these values beyond whether
they are below/above 1.0.
See the documentation for `hit_calling.do_hit_calling()` for details on
the implementation.
"""
from phip.hit_calling import do_hit_calling
original_counts = pd.read_csv(
input, sep="\t", header=0, index_col=list(range(index_cols))
)
counts = original_counts
print("Read input matrix: %d clones x %d samples." % counts.shape)
print("Columns: %s" % " ".join(counts.columns))
columns_to_ignore = [
s
for s in counts.columns
if ignore_columns_regex
and re.match(ignore_columns_regex, s, flags=re.IGNORECASE)
]
if columns_to_ignore:
print(
"Ignoring %d columns matching regex '%s': %s"
% (
len(columns_to_ignore),
ignore_columns_regex,
" ".join(columns_to_ignore),
)
)
counts = counts[[c for c in counts.columns if c not in columns_to_ignore]]
rows_to_ignore = [
s
for s in counts.index
if ignore_rows_regex
and index_cols == 1
and re.match(ignore_rows_regex, s, flags=re.IGNORECASE)
]
if rows_to_ignore:
print(
"Ignoring %d rows matching regex '%s': %s"
% (len(rows_to_ignore), ignore_rows_regex, " ".join(rows_to_ignore))
)
counts = counts.loc[~counts.index.isin(rows_to_ignore)]
total_reads = counts.sum()
expected_reads = total_reads.median()
if (counts > 0).all().all():
for sample in counts.columns:
if total_reads[sample] / expected_reads < discard_sample_reads_fraction:
print(
"[!!] EXCLUDING SAMPLE %s DUE TO INSUFFICIENT READS "
"(%d vs. sample median %d)"
% (sample, total_reads[sample], expected_reads)
)
del counts[sample]
beads_only_samples = [
s for s in counts.columns if re.match(beads_regex, s, flags=re.IGNORECASE)
]
print(
"Beads-only regex '%s' matched %d samples: %s"
% (beads_regex, len(beads_only_samples), " ".join(beads_only_samples))
)
result_df = do_hit_calling(
counts,
beads_only_samples=beads_only_samples,
reference_quantile=reference_quantile,
fdr=fdr,
normalize_to_reads_per_million={"always": True, "never": False, "guess": None}[
normalize_to_reads_per_million
],
verbosity=verbosity,
)
full_result_df = original_counts.copy()
for column in result_df.columns:
full_result_df.loc[result_df.index, column] = result_df[column]
if output.endswith(".tsv"):
output_path = output
else:
os.makedirs(output)
output_path = pjoin(output, "hits.tsv")
full_result_df.to_csv(output_path, sep="\t", float_format="%.4f")
print("Wrote: %s" % output_path)
# TOOLS THAT SHOULD BE USED RARELY
@cli.command(name="zip-reads-and-barcodes")
@option(
"-i",
"--input",
type=Path(exists=True, dir_okay=False),
required=True,
help="reads fastq file",
)
@option(
"-b",
"--barcodes",
type=Path(exists=True, dir_okay=False),
required=True,
help="indexes/barcodes fastq file",
)
@option(
"-m",
"--mapping",
type=Path(exists=True, dir_okay=False),
required=True,
help="barcode to sample mapping (tab-delim, no header line)",
)
@option(
"-o", "--output", type=Path(exists=False), required=True, help="output directory"
)
@option(
"-z", "--compress-output", is_flag=True, help="gzip-compress output fastq files"
)
@option(
"-n",
"--no-wrap",
is_flag=True,
help="fastq inputs are not wrapped (i.e., 4 lines per record)",
)
def zip_reads_barcodes(input, barcodes, mapping, output, compress_output, no_wrap):
"""Zip reads with barcodes and split into files.
Some older versions of the Illumina pipeline would not annotate the reads
with their corresponding barcodes, but would leave the barcode reads in a
separate fastq file. This tool will take both fastq files and will modify
the main fastq record to add the barcode to the header line (in the same
place Illumina would put it). It will the write one file per sample as
provided in the mapping.
This should only be necessary on older data files. Newer pipelines that use
bcl2fastq2 or the "generate fastq" pipeline in Basespace (starting 9/2016)
should not require this.
This tool requires that the reads are presented in the same order in the
two input files (which should be the case).
This tool should be used very rarely.
"""
from .utils import load_mapping, edit1_mapping
if no_wrap:
from .utils import read_fastq_nowrap as fastq_parser
else:
from .utils import readfq as fastq_parser
os.makedirs(output, mode=0o755)
input = osp.abspath(input)
barcodes = osp.abspath(barcodes)
# generate all possible edit-1 BCs
bc2sample = edit1_mapping(load_mapping(mapping))
with open_maybe_compressed(input, "r") as r_h, open_maybe_compressed(
barcodes, "r"
) as b_h:
# open file handles for each sample
ext = "fastq.gz" if compress_output else "fastq"
output_handles = {
s: open_maybe_compressed(
pjoin(output, "{s}.{ext}".format(s=s, ext=ext)), "w"
)
for s in set(bc2sample.values())
}
try:
for (r_n, r_s, r_q), (b_n, b_s, b_q) in zip(
tqdm(fastq_parser(r_h)), fastq_parser(b_h)
):
assert r_n.split(maxsplit=1)[0] == b_n.split(maxsplit=1)[0]
try:
print(
"@{r_n}\n{r_s}\n+\n{r_q}".format(r_n=r_n, r_s=r_s, r_q=r_q),
file=output_handles[bc2sample[b_s]],
)
except KeyError:
continue
finally:
for h in output_handles.values():
h.close()
@cli.command(name="merge-columns")
@option(
"-i", "--input", required=True, help="input path (directory of tab-delim files)"
)
@option("-o", "--output", required=True, help="output path")
@option(
"-m",
"--method",
type=Choice(["iter", "outer"]),
default="iter",
help="merge/join method",
)
@option(
"-p",
"--position",
type=int,
default=1,
help="the field position to merge (0-indexed)",
)
@option(
"-d", "--index-cols", default=1, help="number of columns to use as index/row-key"
)
def merge_columns(input, output, method, position, index_cols):
"""Merge tab-delimited files.
input must be a directory containing `.tsv` files to merge.
method: iter -- concurrently iterate over lines of all files; assumes
row-keys are identical in each file
method: outer -- bona fide outer join of data in each file; loads all files
into memory and joins using pandas
method: prealloc -- preallocate an array to hold all values; then read each
file into the array
"""
def load(path):
icols = list(range(index_cols))
ucols = icols + [position]
return pd.read_csv(
path, sep="\t", header=0, dtype=str, index_col=icols, usecols=ucols
)
input_dir = os.path.abspath(input)
output_file = os.path.abspath(output)
input_files = glob(pjoin(input_dir, "*.tsv"))
if method == "iter":
file_iterators = [open(f, "r") for f in input_files]
file_headers = [osp.splitext(osp.basename(f))[0] for f in input_files]
with open(output_file, "w") as op:
# iterate through lines
for lines in zip(*file_iterators):
fields_array = [
[field.strip() for field in line.split("\t")] for line in lines
]
# check that join column is the same
for fields in fields_array[1:]:
assert fields_array[0][:index_cols] == fields[:index_cols]
merged_fields = fields_array[0][:index_cols] + [
f[position] for f in fields_array
]
print("\t".join(merged_fields), file=op)
elif method == "outer":
dfs = [load(path) for path in input_files]
merge = lambda l, r: pd.merge(
l, r, how="outer", left_index=True, right_index=True
)
df = reduce(merge, dfs).fillna(0)
df.to_csv(output, sep="\t", float_format="%.2f")
elif method == "prealloc":
# iterate through just the first file to generate the row names
with open(input_files[0], "r") as ip:
row_names = ["\t".join(line.split("\t")[:index_cols]) for line in ip]
data = np.zeros((len(row_names), len(input_files)))
column_names = []
def load_into(path, i):
df = load(path)
column_names.append(df.columns[0])
data[:, i] = df.iloc[:, 0]
for i, path in enumerate(input_files):
load_into(path, i)
df = pd.DataFrame(data, index=row_names, columns=column_names, copy=False)
df.to_csv(output, sep="\t", float_format="%.2f")
@cli.command(name="normalize-counts")
@option("-i", "--input", required=True, help="input counts (tab-delim)")
@option("-o", "--output", required=True, help="output path")
@option(
"-m",
"--method",
type=Choice(["col-sum", "size-factors"]),
default="size-factors",
help="normalization method",
)
@option(
"-d", "--index-cols", default=1, help="number of columns to use as index/row-key"
)
def normalize_counts(input, output, method, index_cols):
"""Normalize count matrix.
Two methods for normalizing are available:
* Size factors from Anders and Huber 2010 (similar to TMM)
* Normalize to constant column-sum of 1e6
"""
df = pd.read_csv(input, sep="\t", header=0, index_col=list(range(index_cols)))
if method == "col-sum":
normalized = df / (df.sum() / 1e6)
elif method == "size-factors":
factors = compute_size_factors(df.values)
normalized = df / factors
normalized.to_csv(output, sep="\t", float_format="%.2f")
@cli.command(name="count-exact-matches")
@option(
"-i",
"--input",
required=True,
type=Path(exists=True, dir_okay=False),
help="input fastq (gzipped ok)",
)
@option(
"-o",
"--output",
required=True,
type=Path(exists=False, dir_okay=False),
help="output tsv",
)
@option(
"-r",
"--reference",
required=True,
type=Path(exists=True, dir_okay=False),
help="path to reference (input) counts file (tab-delim)",
)
@option(
"-l",
"--read-length",
required=True,
type=int,
help="read length (or, number of bases to use for matching)",
metavar="<read-length>",
)
@option("--sample", type=str, help="sample name [default: filename stem]")
def count_exact_matches(input, output, reference, read_length, sample):
"""Match reads to reference exactly.
Takes the first <read-length> bases of each read and attempt to match it
exactly to the reference sequences. Computes the number of matches for each
reference.
"""
# load reference
seq_to_ref = OrderedDict()
with open(reference, "r") as ip:
for (ref_name, seq, _) in readfq(ip):
seq_to_ref[seq[:read_length]] = ref_name
num_reads = 0
num_matched = 0
counts = Counter()
with open_maybe_compressed(input, "r") as ip:
for (name, seq, _) in tqdm(readfq(ip)):
num_reads += 1
refname = seq_to_ref.get(seq[:read_length])
if refname is not None:
num_matched += 1
counts[refname] += 1
print(
"num_reads: {}\nnum_matched: {}\nfrac_matched: {}".format(
num_reads, num_matched, num_matched / num_reads
),
file=sys.stderr,
)
if not sample:
sample = pathlib.Path(input).stem
with open(output, "w") as op:
print(f"id\t{sample}", file=op)
for (_, refname) in seq_to_ref.items():
print(f"{refname}\t{counts[refname]}", file=op)
# DEPRECATED TOOLS
@cli.command(name="split-fastq", deprecated=True)
@option("-i", "--input", required=True, help="input path (fastq file)")
@option("-o", "--output", required=True, help="output path (directory)")
@option("-n", "--chunk-size", type=int, required=True, help="number of reads per chunk")
def split_fastq(input, output, chunk_size):
"""Split fastq files into smaller chunks."""
input_file = osp.abspath(input)
output_dir = osp.abspath(output)
os.makedirs(output_dir, mode=0o755)
# convenience functions
output_file = lambda i: pjoin(output_dir, "part.{0}.fastq".format(i))
with open_maybe_compressed(input_file, "r") as input_handle:
num_processed = 0
file_num = 1
for (name, seq, qual) in readfq(input_handle):
if num_processed == 0:
op = open_maybe_compressed(output_file(file_num), "w")
print(f"@{name}\n{seq}\n+\n{qual}", file=op)
num_processed += 1
if num_processed == chunk_size:
op.close()
num_processed = 0
file_num += 1
if not op.closed:
op.close()
@cli.command(name="align-parts", deprecated=True)
@option("-i", "--input", required=True, help="input path (directory of fastq parts)")
@option("-o", "--output", required=True, help="output path (directory)")
@option(
"-x", "--index", required=True, help="bowtie index (e.g., as specified to bowtie2)"
)
@option(
"-b",
"--batch-submit",
default="",
help="batch submit command to prefix bowtie command invocation",
)
@option(
"-p",
"--threads",
default=1,
help="Number of threads to specify for each invocation of bowtie",
)
@option(
"-3",
"--trim3",
default=0,
help="Number of bases to trim off of 3-end (passed to bowtie)",
)
@option("-d", "--dry-run", is_flag=True, help="Dry run; print out commands to execute")
def align_parts(input, output, index, batch_submit, threads, trim3, dry_run):
"""Align fastq files to peptide reference."""
input_dir = osp.abspath(input)
output_dir = osp.abspath(output)
if not dry_run:
os.makedirs(output_dir, mode=0o755)
bowtie_cmd_template = (
"bowtie -n 3 -l 100 --best --nomaqround --norc -k 1 -p {threads} "
"-3 {trim3} --quiet {index} {input} {output}"
)
for input_file in glob(pjoin(input_dir, "*.fastq")):
output_file = pjoin(
output_dir, osp.splitext(osp.basename(input_file))[0] + ".aln"
)
bowtie_cmd = bowtie_cmd_template.format(
index=index,
input=input_file,
output=output_file,
threads=threads,
trim3=trim3,
)
submit_cmd = "{batch_cmd} {app_cmd}".format(
batch_cmd=batch_submit, app_cmd=bowtie_cmd
)
if dry_run:
print(submit_cmd.strip())
else:
p = Popen(
submit_cmd.strip(), shell=True, stdout=PIPE, universal_newlines=True
)
print(p.communicate()[0])
@cli.command(name="compute-counts", deprecated=True)
@option("-i", "--input", required=True, help="input path (directory of aln files)")
@option("-o", "--output", required=True, help="output path (directory)")
@option(
"-r",
"--reference",
required=True,
help="path to reference (input) counts file (tab-delim)",
)
def compute_counts(input, output, reference):
"""Compute counts from aligned bam file."""
input_dir = osp.abspath(input)
output_dir = osp.abspath(output)
os.makedirs(output_dir, mode=0o755)
# load reference (i.e., input) counts
ref_names = []
ref_counts = []
with open(reference, "r") as ip:
# burn header
_ = next(ip)
for line in ip:
fields = line.split("\t")
ref_names.append(fields[0].strip())
ref_counts.append(round(float(fields[1])))
# compute count dicts
for input_file in glob(pjoin(input_dir, "*.aln")):
print(input_file)
sys.stdout.flush()
counts = {}
sample = osp.splitext(osp.basename(input_file))[0]
# accumulate counts
with open(input_file, "r") as ip:
for line in ip:
ref_clone = line.split("\t")[2].strip()
counts[ref_clone] = counts.get(ref_clone, 0) + 1
# write counts
output_file = pjoin(output_dir, sample + ".tsv")
with open(output_file, "w") as op:
print("id\tinput\t{0}".format(sample), file=op)
for (ref_name, ref_count) in zip(ref_names, ref_counts):
record = "{0}\t{1}\t{2}".format(
ref_name, ref_count, counts.get(ref_name, 0)
)
print(record, file=op)
@cli.command(name="gen-covariates", deprecated=True)
@option("-i", "--input", required=True, help="input path to merged count file")
@option(
"-s", "--substring", required=True, help="substring to match against column names"
)
@option("-o", "--output", required=True, help="output file (recommend .tsv extension)")
def gen_covariates(input, substring, output):
"""Compute covariates for input to stat model.
The input (`-i`) should be the merged counts file. Each column name is
matched against the given substring. The median coverage-normalized value
of each row from the matching columns will be output into a tab-delim file.
This file can be used as the "reference" values for computing p-values.
"""
input_file = osp.abspath(input)
output_file = osp.abspath(output)
counts = pd.read_csv(input_file, sep="\t", header=0, index_col=0)
matched_columns = [col for col in counts.columns if substring in col]
sums = counts[matched_columns].sum()
normed = counts[matched_columns] / sums * sums.median()
medians = normed.median(axis=1)
medians.name = "input"
medians.to_csv(output_file, sep="\t", header=True, index_label="id")
@cli.command(name="compute-pvals", deprecated=True)
@option("-i", "--input", required=True, help="input path")
@option("-o", "--output", required=True, help="output path")
@option(
"-b",
"--batch-submit",
help="batch submit command to prefix pval command invocation",
)
@option(
"-d",
"--dry-run",
is_flag=True,
help="Dry run; print out commands to execute for batch submit",
)
def compute_pvals(input, output, batch_submit, dry_run):
"""Compute p-values from counts."""
from .genpois import (
estimate_GP_distributions,
lambda_theta_regression,
precompute_pvals,
)
if batch_submit is not None:
# run compute-pvals on each file using batch submit command
input_dir = osp.abspath(input)
output_dir = osp.abspath(output)
if not dry_run:
os.makedirs(output_dir, mode=0o755)
pval_cmd_template = "phip compute-pvals -i {input} -o {output}"
for input_file in glob(pjoin(input_dir, "*.tsv")):
sample = osp.splitext(osp.basename(input_file))[0]
output_file = pjoin(output_dir, "{0}.pvals.tsv".format(sample))
pval_cmd = pval_cmd_template.format(input=input_file, output=output_file)
submit_cmd = "{batch_cmd} {app_cmd}".format(
batch_cmd=batch_submit, app_cmd=pval_cmd
)
if dry_run:
print(submit_cmd.strip())
else:
p = Popen(
submit_cmd.strip(), shell=True, stdout=PIPE, universal_newlines=True
)
print(p.communicate()[0])
else:
# actually compute p-vals on single file
input_file = osp.abspath(input)
output_file = osp.abspath(output)
clones = []
samples = None
input_counts = []
output_counts = []
with open(input_file, "r") as ip:
header_fields = next(ip).split("\t")
samples = [f.strip() for f in header_fields[2:]]
for line in tqdm(ip, desc="Loading data"):
fields = line.split("\t")
clones.append(fields[0].strip())
input_counts.append(int(fields[1]))
output_counts.append(np.int_(fields[2:]))
input_counts = np.asarray(input_counts)
# pseudocounts to combat negative regressed theta:
output_counts = np.asarray(output_counts) + 1
uniq_input_values = list(set(input_counts))
# Estimate generalized Poisson distributions for every input count
(lambdas, thetas, idxs) = estimate_GP_distributions(
input_counts, output_counts, uniq_input_values
)
# Regression on all of the theta and lambda values computed
(lambda_fits, theta_fits) = lambda_theta_regression(lambdas, thetas, idxs)
# Precompute CDF for possible input-output combinations
uniq_combos = []
for i in range(output_counts.shape[1]):
uniq_combos.append(set(zip(input_counts, output_counts[:, i])))
log10pval_hash = precompute_pvals(lambda_fits, theta_fits, uniq_combos)
# Compute p-values for each clone using regressed GP parameters
with open(output_file, "w") as op:
header = "\t".join(["id"] + samples)
print(header, file=op)
for (clone, ic, ocs) in zip(
tqdm(clones, desc="Writing scores"), input_counts, output_counts
):
fields = [clone]
for (i, oc) in enumerate(ocs):
fields.append("{:.2f}".format(log10pval_hash[(i, ic, oc)]))
print("\t".join(fields), file=op)
| lasersonlab/phip-stat | phip/cli.py | Python | apache-2.0 | 33,134 | [
"Bowtie"
] | 13b03c00bd01d5fb586e79e6b6948994c514fef421abb4f3c6647c91558f6761 |
import argparse
from collections import defaultdict
import numpy
import pysam
def Parser():
the_parser = argparse.ArgumentParser()
the_parser.add_argument(
'--input', action="store", type=str, help="bam alignment file")
the_parser.add_argument(
'--minquery', type=int,
help="Minimum readsize of query reads (nt) - must be an integer")
the_parser.add_argument(
'--maxquery', type=int,
help="Maximum readsize of query reads (nt) - must be an integer")
the_parser.add_argument(
'--mintarget', type=int,
help="Minimum readsize of target reads (nt) - must be an integer")
the_parser.add_argument(
'--maxtarget', type=int,
help="Maximum readsize of target reads (nt) - must be an integer")
the_parser.add_argument(
'--minscope', type=int,
help="Minimum overlap analyzed (nt) - must be an integer")
the_parser.add_argument(
'--maxscope', type=int,
help="Maximum overlap analyzed (nt) - must be an integer")
the_parser.add_argument(
'--output_h', action="store", type=str,
help="h-signature dataframe")
the_parser.add_argument(
'--output_z', action="store", type=str,
help="z-signature dataframe")
args = the_parser.parse_args()
return args
class Map:
def __init__(self, bam_file, minquery=23, maxquery=29, mintarget=23,
maxtarget=29, minscope=1, maxscope=19, output_h='',
output_z=''):
self.bam_object = pysam.AlignmentFile(bam_file, 'rb')
self.query_range = range(minquery, maxquery + 1)
self.target_range = range(mintarget, maxtarget + 1)
self.scope = range(minscope, maxscope + 1)
self.H = open(output_h, 'w')
self.Z = open(output_z, 'w')
self.chromosomes = dict(zip(self.bam_object.references,
self.bam_object.lengths))
self.map_dict = self.create_map(self.bam_object)
self.query_positions = self.compute_query_positions()
self.Z.write(self.compute_signature_pairs())
self.H.write(self.compute_signature_h())
self.H.close()
self.Z.close()
def create_map(self, bam_object):
'''
Returns a map_dictionary {(chromosome,read_position,polarity):
[read_length, ...]}
'''
map_dictionary = defaultdict(list)
# get empty value for start and end of each chromosome
for chrom in self.chromosomes:
map_dictionary[(chrom, 1, 'F')] = []
map_dictionary[(chrom, self.chromosomes[chrom], 'F')] = []
for chrom in self.chromosomes:
for read in bam_object.fetch(chrom):
if read.is_reverse:
map_dictionary[(chrom, read.reference_end,
'R')].append(read.query_alignment_length)
else:
map_dictionary[(chrom, read.reference_start+1,
'F')].append(read.query_alignment_length)
return map_dictionary
def compute_query_positions(self):
''' this method does not filter on read size, just forward reads
that overlap reverse reads in the overlap range'''
all_query_positions = defaultdict(list)
for genomicKey in self.map_dict.keys():
chrom, coord, pol = genomicKey
for i in self.scope:
if pol == 'F' and len(self.map_dict[chrom,
coord+i-1,
'R']) > 0:
all_query_positions[chrom].append(coord)
break
for chrom in all_query_positions:
all_query_positions[chrom] = sorted(
list(set(all_query_positions[chrom])))
return all_query_positions
def countpairs(self, uppers, lowers):
query_range = self.query_range
target_range = self.target_range
uppers = [size for size in uppers if size in query_range or size in
target_range]
lowers = [size for size in lowers if size in query_range or size in
target_range]
paired = []
for upread in uppers:
for downread in lowers:
if (upread in query_range and downread in target_range) or (
upread in target_range and downread in query_range):
paired.append(upread)
lowers.remove(downread)
break
return len(paired)
def compute_signature_pairs(self):
frequency_table = defaultdict(dict)
scope = self.scope
for chrom in self.chromosomes:
for overlap in scope:
frequency_table[chrom][overlap] = 0
for chrom in self.query_positions:
for coord in self.query_positions[chrom]:
for overlap in scope:
uppers = self.map_dict[chrom, coord, 'F']
lowers = self.map_dict[chrom, coord+overlap-1, 'R']
frequency_table[chrom][overlap] += self.countpairs(uppers,
lowers)
# compute overlaps for all chromosomes merged
for overlap in scope:
accumulator = []
for chrom in frequency_table:
if chrom != 'all_chromosomes':
accumulator.append(frequency_table[chrom][overlap])
frequency_table['all_chromosomes'][overlap] = sum(accumulator)
return self.stringify_table(frequency_table)
def signature_tables(self):
query_range = self.query_range
target_range = self.target_range
Query_table = defaultdict(dict)
Target_table = defaultdict(dict)
for key in self.map_dict:
for size in self.map_dict[key]:
if size in query_range or size in target_range:
if key[2] == 'F':
coordinate = key[1]
else:
coordinate = -key[1]
if size in query_range:
Query_table[key[0]][coordinate] = Query_table[key[0]].get(
coordinate, 0) + 1
if size in target_range:
Target_table[key[0]][coordinate] = \
Target_table[key[0]].get(coordinate, 0) + 1
return Query_table, Target_table
def compute_signature_h(self):
scope = self.scope
Query_table, Target_table = self.signature_tables()
frequency_table = defaultdict(dict)
for chrom in self.chromosomes:
for overlap in scope:
frequency_table[chrom][overlap] = 0
for chrom in Query_table:
Total_Query_Numb = 0
for coord in Query_table[chrom]:
Total_Query_Numb += Query_table[chrom][coord]
for coord in Query_table[chrom]:
local_table = dict([(overlap, 0) for overlap in scope])
number_of_targets = 0
for overlap in scope:
local_table[overlap] += Query_table[chrom][coord] * \
Target_table[chrom].get(-coord - overlap + 1, 0)
number_of_targets += Target_table[chrom].get(
-coord - overlap + 1, 0)
for overlap in scope:
try:
frequency_table[chrom][overlap] += \
local_table[overlap] / number_of_targets \
/ float(Total_Query_Numb)
except ZeroDivisionError:
continue
# compute overlap probabilities for all chromosomes merged
general_frequency_table = dict([(overlap, 0) for overlap in scope])
total_aligned_reads = 0
for chrom in frequency_table:
for overlap in frequency_table[chrom]:
total_aligned_reads += self.bam_object.count(chrom)
for chrom in frequency_table:
for overlap in frequency_table[chrom]:
try:
general_frequency_table[overlap] += \
frequency_table[chrom][overlap] / total_aligned_reads \
* self.bam_object.count(chrom)
except ZeroDivisionError:
continue
for overlap in general_frequency_table:
frequency_table['all_chromosomes'][overlap] = \
general_frequency_table[overlap]
return self.stringify_table(frequency_table)
def stringify_table(self, frequency_table):
'''
method both to compute z-score and to return a writable string
'''
tablestring = []
for chrom in sorted(frequency_table):
accumulator = []
for overlap in frequency_table[chrom]:
accumulator.append(frequency_table[chrom][overlap])
z_mean = numpy.mean(accumulator)
z_std = numpy.std(accumulator)
if z_std == 0:
for overlap in sorted(frequency_table[chrom]):
tablestring.append('%s\t%s\t%s\t%s\n' % (
chrom, str(overlap),
str(frequency_table[chrom][overlap]), str(0)))
else:
for overlap in sorted(frequency_table[chrom]):
tablestring.append('%s\t%s\t%s\t%s\n' % (
chrom, str(overlap),
str(frequency_table[chrom][overlap]),
str((frequency_table[chrom][overlap] - z_mean)/z_std)))
return ''.join(tablestring)
if __name__ == "__main__":
args = Parser()
mapobj = Map(args.input, args.minquery, args.maxquery, args.mintarget,
args.maxtarget, args.minscope, args.maxscope, args.output_h,
args.output_z)
| drosofff/tools-artbio | tools/small_rna_signatures/signature.py | Python | mit | 10,056 | [
"pysam"
] | 808be14d90dc010dd12b045f12fa644fe12a918f47bc75445dc2410563d52993 |
# Boil.py
# Aaron Taylor
# Moose Abumeeiz
#
# This is the class for the red boil that appears as an enemy
# and shoots in random directions
#
from pygame import *
from func import *
from const import *
from Enemy import *
from Animation import *
from time import time as cTime
from random import choice
from Tear import *
class Boil(Enemy):
hurtDistance = 0.6
health = 10
def __init__(self, xy, sounds, textures):
self.x, self.y = xy
self.sounds = sounds
# Split textures
self.frames = [textures["enemies"]["boil"].subsurface(i*64-(i//4)*(64*4), (i//4)*64, 64, 64) for i in range(10)][::-1]
# Record sound and textures for tears
self.tearTextures = textures["tears"]
self.tearSounds = sounds["tear"]
# How long it takes to grow + current size
self.size = 0
self.advanceTime = 0.5
# When the animation was started
self.start = cTime()
# Animation for grow
self.animation = Animation(self.frames, 10, shouldLoop=False)
# How long the boil has been full
self.sinceFull = -1
# How many tears the boil has active
self.tears = []
def hurt(self, ammount):
# Take damage and reduce size
if not self.dead:
self.health -= ammount
self.animation.setFrame(self.animation.currentIndex-ammount)
if self.health <= 0:
self.die()
def render(self, surface, time, character, nodes, paths, bounds, obsticals):
# Set current health to match animation
self.health = self.animation.currentIndex
if not self.dead:
self.checkHurt(character, time) # Check for any damage
if self.animation.currentIndex == self.animation.frameCount-1:
# The boil is full size
if time-self.sinceFull >= 1:
# Ensure the boil doesnt shoot too often
self.tears.append(Tear(choice([(1,0),(-1,0),(0,-1),(0,1)]), (GRIDX+GRATIO*self.x, GRIDY+GRATIO*self.y), (0, 0), 1, 1, 1, False, self.tearTextures, self.tearSounds))
self.sinceFull = time
for tear in self.tears[:]:
# Render the boil's tear
if not tear.render(surface, time, bounds, obsticals):
self.tears.remove(tear)
surface.blit(self.animation.render(time), (GRIDX+GRATIO*self.x-16, GRIDY+GRATIO*self.y-32))
return not self.dead
| ExPHAT/binding-of-isaac | Boil.py | Python | mit | 2,158 | [
"MOOSE"
] | 394db5b3a57649a4daf4aad0d011a4bdf128d3de906a96fe0f02e70fd9ccbb45 |
"""Module defining ``Eigensolver`` classes."""
import numpy as np
from gpaw.utilities.blas import axpy, gemm, rk, r2k
from gpaw.utilities.lapack import general_diagonalize
from gpaw.utilities import unpack
from gpaw.eigensolvers.eigensolver import Eigensolver
class RMM_DIIS(Eigensolver):
"""RMM-DIIS eigensolver
It is expected that the trial wave functions are orthonormal
and the integrals of projector functions and wave functions
``nucleus.P_uni`` are already calculated
Solution steps are:
* Subspace diagonalization
* Calculation of residuals
* Improvement of wave functions: psi' = psi + lambda PR + lambda PR'
* Orthonormalization"""
def __init__(self, keep_htpsit=True, blocksize=10, niter=3, rtol=1e-16,
limit_lambda=False, use_rayleigh=False, trial_step=0.1):
"""Initialize RMM-DIIS eigensolver.
Parameters:
limit_lambda: dictionary
determines if step length should be limited
supported keys:
'absolute':True/False limit the absolute value
'upper':float upper limit for lambda
'lower':float lower limit for lambda
"""
Eigensolver.__init__(self, keep_htpsit, blocksize)
self.niter = niter
self.rtol = rtol
self.limit_lambda = limit_lambda
self.use_rayleigh = use_rayleigh
if use_rayleigh:
self.blocksize = 1
self.trial_step = trial_step
self.first = True
def iterate_one_k_point(self, hamiltonian, wfs, kpt):
"""Do a single RMM-DIIS iteration for the kpoint"""
psit_nG, R_nG = self.subspace_diagonalize(hamiltonian, wfs, kpt)
self.timer.start('RMM-DIIS')
self.timer.start('Calculate residuals')
if self.keep_htpsit:
self.calculate_residuals(kpt, wfs, hamiltonian, psit_nG,
kpt.P_ani, kpt.eps_n, R_nG)
self.timer.stop('Calculate residuals')
def integrate(a_G, b_G):
return np.real(wfs.integrate(a_G, b_G, global_integral=False))
comm = wfs.gd.comm
B = self.blocksize
dR_xG = wfs.empty(B, q=kpt.q)
P_axi = wfs.pt.dict(B)
errors_x = np.zeros(B)
state_done = np.zeros(B, dtype=bool)
errors_n = np.zeros(wfs.bd.mynbands)
# Arrays needed for DIIS step
if self.niter > 1:
psit_diis_nxG = wfs.empty(B * self.niter, q=kpt.q)
R_diis_nxG = wfs.empty(B * self.niter, q=kpt.q)
# P_diis_anxi = wfs.pt.dict(B * self.niter)
eig_n = np.zeros(self.niter) # eigenvalues for diagonalization
# not needed in any step
error = 0.0
for n1 in range(0, wfs.bd.mynbands, B):
state_done[:] = False
n2 = n1 + B
if n2 > wfs.bd.mynbands:
n2 = wfs.bd.mynbands
B = n2 - n1
P_axi = dict((a, P_xi[:B]) for a, P_xi in P_axi.items())
dR_xG = dR_xG[:B]
n_x = range(n1, n2)
psit_xG = psit_nG[n1:n2]
self.timer.start('Calculate residuals')
if self.keep_htpsit:
R_xG = R_nG[n1:n2]
else:
R_xG = wfs.empty(B, q=kpt.q)
wfs.apply_pseudo_hamiltonian(kpt, hamiltonian, psit_xG, R_xG)
wfs.pt.integrate(psit_xG, P_axi, kpt.q)
self.calculate_residuals(kpt, wfs, hamiltonian, psit_xG,
P_axi, kpt.eps_n[n_x], R_xG, n_x)
self.timer.stop('Calculate residuals')
errors_x[:] = 0.0
for n in range(n1, n2):
if kpt.f_n is None:
weight = kpt.weight
else:
weight = kpt.f_n[n]
if self.nbands_converge != 'occupied':
if wfs.bd.global_index(n) < self.nbands_converge:
weight = kpt.weight
else:
weight = 0.0
errors_x[n - n1] = weight * integrate(R_xG[n - n1],
R_xG[n - n1])
errors_n[n] = errors_x[n - n1]
comm.sum(errors_x)
error += np.sum(errors_x)
# Insert first vectors and residuals for DIIS step
if self.niter > 1:
# Save the previous vectors contiguously for each band
# in the block
psit_diis_nxG[:B * self.niter:self.niter] = psit_xG
R_diis_nxG[:B * self.niter:self.niter] = R_xG
# Precondition the residual:
self.timer.start('precondition')
# ekin_x = self.preconditioner.calculate_kinetic_energy(
# R_xG, kpt)
ekin_x = self.preconditioner.calculate_kinetic_energy(
psit_xG, kpt)
dpsit_xG = self.preconditioner(R_xG, kpt, ekin_x)
self.timer.stop('precondition')
# Calculate the residual of dpsit_G, dR_G = (H - e S) dpsit_G:
# self.timer.start('Apply Hamiltonian')
wfs.apply_pseudo_hamiltonian(kpt, hamiltonian, dpsit_xG, dR_xG)
# self.timer.stop('Apply Hamiltonian')
self.timer.start('projections')
wfs.pt.integrate(dpsit_xG, P_axi, kpt.q)
self.timer.stop('projections')
if self.use_rayleigh:
self.timer.start('Minimize Rayleigh')
i1 = wfs.integrate(psit_xG, dR_xG,
global_integral=False).item()
i2 = wfs.integrate(dpsit_xG, dR_xG,
global_integral=False).item()
i3 = wfs.integrate(dpsit_xG, psit_xG,
global_integral=False).item()
i4 = wfs.integrate(dpsit_xG, dpsit_xG,
global_integral=False).item()
for a, dP_xi in P_axi.items():
P_i = kpt.P_ani[a][n1]
dP_i = dP_xi[0]
dH_ii = unpack(hamiltonian.dH_asp[a][kpt.s])
dO_ii = wfs.setups[a].dO_ii
i1 += np.dot(P_i,
np.dot(dH_ii, dP_i.conj())).item()
i2 += np.dot(dP_i,
np.dot(dH_ii, dP_i.conj())).item()
i3 += np.dot(dP_i,
np.dot(dO_ii, P_i.conj())).item()
i4 += np.dot(dP_i,
np.dot(dO_ii, dP_i.conj())).item()
i1 = comm.sum(i1)
i2 = comm.sum(i2)
i3 = comm.sum(i3)
i4 = comm.sum(i4)
a = np.real(i2 * i3 - i1 * i4)
b = np.real(i2 - kpt.eps_n[n1] * i4)
c = np.real(i1 - kpt.eps_n[n1] * i3)
# print "A,B,C", a,b,c
lam_x = np.array((-2.0 * c / (b + np.sqrt(b**2 - 4.0 * a * c)),))
self.timer.stop('Minimize Rayleigh')
self.timer.start('Calculate residuals')
self.calculate_residuals(kpt, wfs, hamiltonian, dpsit_xG,
P_axi, kpt.eps_n[n_x], dR_xG, n_x,
calculate_change=True)
self.timer.stop('Calculate residuals')
else:
self.timer.start('Calculate residuals')
self.calculate_residuals(kpt, wfs, hamiltonian, dpsit_xG,
P_axi, kpt.eps_n[n_x], dR_xG, n_x,
calculate_change=True)
self.timer.stop('Calculate residuals')
# Find lam that minimizes the norm of R'_G = R_G + lam dR_G
self.timer.start('Find lambda')
RdR_x = np.array([integrate(dR_G, R_G)
for R_G, dR_G in zip(R_xG, dR_xG)])
dRdR_x = np.array([integrate(dR_G, dR_G) for dR_G in dR_xG])
comm.sum(RdR_x)
comm.sum(dRdR_x)
lam_x = -RdR_x / dRdR_x
self.timer.stop('Find lambda')
# print "Lam_x:", lam_x
# Limit abs(lam) to [0.15, 1.0]
if self.limit_lambda:
upper = self.limit_lambda['upper']
lower = self.limit_lambda['lower']
if self.limit_lambda.get('absolute', False):
lam_x = np.where(np.abs(lam_x) < lower,
lower * np.sign(lam_x), lam_x)
lam_x = np.where(np.abs(lam_x) > upper,
upper * np.sign(lam_x), lam_x)
else:
lam_x = np.where(lam_x < lower, lower, lam_x)
lam_x = np.where(lam_x > upper, upper, lam_x)
# lam_x[:] = 0.1
# New trial wavefunction and residual
self.timer.start('Update psi')
for lam, psit_G, dpsit_G, R_G, dR_G in zip(lam_x, psit_xG,
dpsit_xG, R_xG, dR_xG):
axpy(lam, dpsit_G, psit_G) # psit_G += lam * dpsit_G
axpy(lam, dR_G, R_G) # R_G += lam** dR_G
self.timer.stop('Update psi')
self.timer.start('DIIS step')
# DIIS step
for nit in range(1, self.niter):
# Do not perform DIIS if error is small
# if abs(error_block / B) < self.rtol:
# break
# Update the subspace
psit_diis_nxG[nit:B * self.niter:self.niter] = psit_xG
R_diis_nxG[nit:B * self.niter:self.niter] = R_xG
# XXX Only integrals of nit old psits would be needed
# self.timer.start('projections')
# wfs.pt.integrate(psit_diis_nxG, P_diis_anxi, kpt.q)
# self.timer.stop('projections')
if nit > 1 or self.limit_lambda:
for ib in range(B):
if state_done[ib]:
continue
istart = ib * self.niter
iend = istart + nit + 1
# Residual matrix
self.timer.start('Construct matrix')
R_nn = wfs.integrate(R_diis_nxG[istart:iend],
R_diis_nxG[istart:iend],
global_integral=True)
# Full matrix
A_nn = -np.ones((nit + 2, nit + 2), wfs.dtype)
A_nn[:nit+1, :nit+1] = R_nn[:]
A_nn[-1,-1] = 0.0
x_n = np.zeros(nit + 2, wfs.dtype)
x_n[-1] = -1.0
self.timer.stop('Construct matrix')
self.timer.start('Linear solve')
alpha_i = np.linalg.solve(A_nn, x_n)[:-1]
self.timer.stop('Linear solve')
self.timer.start('Update trial vectors')
psit_xG[ib] = alpha_i[nit] * psit_diis_nxG[istart + nit]
R_xG[ib] = alpha_i[nit] * R_diis_nxG[istart + nit]
for i in range(nit):
# axpy(alpha_i[i], psit_diis_nxG[istart + i],
# psit_diis_nxG[istart + nit])
# axpy(alpha_i[i], R_diis_nxG[istart + i],
# R_diis_nxG[istart + nit])
axpy(alpha_i[i], psit_diis_nxG[istart + i],
psit_xG[ib])
axpy(alpha_i[i], R_diis_nxG[istart + i],
R_xG[ib])
self.timer.stop('Update trial vectors')
if nit < self.niter - 1:
self.timer.start('precondition')
# ekin_x = self.preconditioner.calculate_kinetic_energy(
# R_xG, kpt)
dpsit_xG = self.preconditioner(R_xG, kpt, ekin_x)
self.timer.stop('precondition')
for psit_G, lam, dpsit_G in zip(psit_xG, lam_x, dpsit_xG):
axpy(lam, dpsit_G, psit_G)
# Calculate the new residuals
self.timer.start('Calculate residuals')
wfs.apply_pseudo_hamiltonian(kpt, hamiltonian, psit_xG,
R_xG)
wfs.pt.integrate(psit_xG, P_axi, kpt.q)
self.calculate_residuals(kpt, wfs, hamiltonian, psit_xG,
P_axi, kpt.eps_n[n_x], R_xG, n_x,
calculate_change=True)
self.timer.stop('Calculate residuals')
self.timer.start('Calculate errors')
errors_new_x = np.zeros(B)
# errors_x[:] = 0.0
for n in range(n1, n2):
if kpt.f_n is None:
weight = kpt.weight
else:
weight = kpt.f_n[n]
if self.nbands_converge != 'occupied':
if wfs.bd.global_index(n) < self.nbands_converge:
weight = kpt.weight
else:
weight = 0.0
errors_new_x[n-n1] += weight * integrate(R_xG[n - n1],
R_xG[n - n1])
comm.sum(errors_x)
self.timer.stop('Calculate errors')
self.timer.stop('DIIS step')
# Final trial step
self.timer.start('precondition')
# ekin_x = self.preconditioner.calculate_kinetic_energy(
# R_xG, kpt)
dpsit_xG = self.preconditioner(R_xG, kpt, ekin_x)
self.timer.stop('precondition')
self.timer.start('Update psi')
if self.trial_step is not None:
lam_x[:] = self.trial_step
for lam, psit_G, dpsit_G in zip(lam_x, psit_xG, dpsit_xG):
axpy(lam, dpsit_G, psit_G) # psit_G += lam * dpsit_G
self.timer.stop('Update psi')
# norm = wfs.integrate(psit_xG[0], psit_xG[0])
# wfs.pt.integrate(psit_xG, P_axi, kpt.q)
# for a, P_xi in P_axi.items():
# dO_ii = wfs.setups[a].dO_ii
# norm += np.vdot(P_xi[0], np.inner(dO_ii, P_xi[0]))
# norm = comm.sum(np.real(norm).item())
# psit_xG /= np.sqrt(norm)
self.timer.stop('RMM-DIIS')
return error, psit_nG
def __repr__(self):
repr_string = 'RMM-DIIS eigensolver\n'
repr_string += ' keep_htpsit: %s\n' % self.keep_htpsit
repr_string += ' Block size: %d\n' % self.blocksize
repr_string += ' DIIS iterations: %d\n' % self.niter
repr_string += ' Threshold for DIIS: %5.1e\n' % self.rtol
repr_string += ' Limit lambda: %s\n' % self.limit_lambda
repr_string += ' use_rayleigh: %s\n' % self.use_rayleigh
repr_string += ' trial_step: %s' % self.trial_step
return repr_string
| robwarm/gpaw-symm | gpaw/eigensolvers/rmm_diis.py | Python | gpl-3.0 | 15,823 | [
"GPAW"
] | 69066fc0a321b5a802b63aebb92cbda126d05064da1708ae0c5eb7ce1ebf1d88 |
# suppyrFRB.py ---
#
# Filename: suppyrFRB.py
# Description:
# Author: subhasis ray
# Maintainer:
# Created: Mon Sep 21 01:45:00 2009 (+0530)
# Version:
# Last-Updated: Fri Oct 21 17:10:23 2011 (+0530)
# By: Subhasis Ray
# Update #: 129
# URL:
# Keywords:
# Compatibility:
#
#
# Commentary:
#
#
#
#
# Change log:
#
#
#
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street, Fifth
# Floor, Boston, MA 02110-1301, USA.
#
#
# Code:
import string
from datetime import datetime
import config
import trbutil
import moose
from cell import *
from capool import CaPool
class SupPyrFRB(TraubCell):
chan_params = {
'ENa': 50e-3,
'EK': -95e-3,
'EAR': -35e-3,
'ECa': 125e-3,
'EGABA': -81e-3,
'TauCa': 20e-3
}
ca_dep_chans = ['KAHP', 'KC']
num_comp = 74
presyn = 72
proto_file = 'SupPyrFRB.p'
# level maps level number to the set of compartments belonging to it
level = None
# depth stores a map between level number and the depth of the compartments.
depth = {
1: 850 * 1e-6,
2: 885 * 1e-6,
3: 920 * 1e-6,
4: 955 * 1e-6,
5: 825 * 1e-6,
6: 775 * 1e-6,
7: 725 * 1e-6,
8: 690 * 1e-6,
9: 655 * 1e-6,
10: 620 * 1e-6,
11: 585 * 1e-6,
12: 550 * 1e-6,
}
prototype = TraubCell.read_proto(proto_file, "SupPyrFRB", level_dict=level, depth_dict=depth, params=chan_params)
def __init__(self, *args):
TraubCell.__init__(self, *args)
soma_ca_pool = moose.CaConc(self.soma.path + '/CaPool')
soma_ca_pool.tau = 100e-3
def _topology(self):
raise Exception, 'Deprecated'
self.presyn = 72
def _setup_passive(self):
raise Exception, 'Deprecated'
for comp in self.comp[1:]:
comp.initVm = -70e-3
def _setup_channels(self):
raise Exception, 'Deprecated'
for comp in self.comp[1:]:
ca_pool = None
ca_dep_chans = []
ca_chans = []
for child in comp.children():
obj = moose.Neutral(child)
if obj.name == 'CaPool':
ca_pool = moose.CaConc(child)
ca_pool.tau = 20e-3
else:
obj_class = obj.className
if obj_class == 'HHChannel':
obj = moose.HHChannel(child)
# if not obj.name in self.chan_list:
# obj.Gbar = 0.0
pyclass = eval(obj.name)
if issubclass(pyclass, KChannel):
obj.Ek = -95e-3
if issubclass(pyclass, KCaChannel):
ca_dep_chans.append(obj)
elif issubclass(pyclass, NaChannel):
obj.Ek = 50e-3
elif issubclass(pyclass, CaChannel):
obj.Ek = 125e-3
if issubclass(pyclass, CaL):
ca_chans.append(obj)
elif issubclass(pyclass, AR):
obj.Ek = -35e-3
if ca_pool:
for channel in ca_chans:
channel.connect('IkSrc', ca_pool, 'current')
config.LOGGER.debug(comp.name + ':' + channel.name + ' connected to ' + ca_pool.name)
for channel in ca_dep_chans:
channel.useConcentration = 1
ca_pool.connect("concSrc", channel, "concen")
config.LOGGER.debug(comp.name + ': ' + ca_pool.name + ' connected to ' + channel.name)
obj = moose.CaConc(self.soma.path + '/CaPool')
obj.tau = 100e-3
@classmethod
def test_single_cell(cls):
"""Simulates a single superficial pyramidal FRB cell and plots
the Vm and [Ca2+]"""
config.LOGGER.info("/**************************************************************************")
config.LOGGER.info(" *")
config.LOGGER.info(" * Simulating a single cell: %s" % (cls.__name__))
config.LOGGER.info(" *")
config.LOGGER.info(" **************************************************************************/")
sim = Simulation(cls.__name__)
mycell = SupPyrFRB(SupPyrFRB.prototype, sim.model.path + "/SupPyrFRB")
print 'Created cell:', mycell.path
vm_table = mycell.comp[mycell.presyn].insertRecorder('Vm_suppyrFRB', 'Vm', sim.data)
ca_conc_path = mycell.soma.path + '/CaPool'
ca_table = None
if config.context.exists(ca_conc_path):
ca_conc = moose.CaConc(ca_conc_path)
ca_table = moose.Table('Ca_suppyrFRB', sim.data)
ca_table.stepMode = 3
ca_conc.connect('Ca', ca_table, 'inputRequest')
kc_path = mycell.soma.path + '/KC'
gk_table = None
if config.context.exists(kc_path):
gk_table = moose.Table('gkc', sim.data)
gk_table.stepMode = 3
kc = moose.HHChannel(kc_path)
kc.connect('Gk', gk_table, 'inputRequest')
pymoose.showmsg(ca_conc)
pulsegen = mycell.soma.insertPulseGen('pulsegen', sim.model, firstLevel=3e-10, firstDelay=50e-3, firstWidth=50e-3)
# pulsegen1 = mycell.soma.insertPulseGen('pulsegen1', sim.model, firstLevel=3e-7, firstDelay=150e-3, firstWidth=10e-3)
sim.schedule()
if mycell.has_cycle():
print "WARNING!! CYCLE PRESENT IN CICRUIT."
t1 = datetime.now()
sim.run(200e-3)
t2 = datetime.now()
delta = t2 - t1
print 'simulation time: ', delta.seconds + 1e-6 * delta.microseconds
sim.dump_data('data')
if config.has_pylab:
mus_vm = config.pylab.array(vm_table) * 1e3
mus_t = linspace(0, sim.simtime * 1e3, len(mus_vm))
try:
nrn_vm = config.pylab.loadtxt('../nrn/mydata/Vm_deepLTS.plot')
nrn_t = nrn_vm[:, 0]
nrn_vm = nrn_vm[:, 1]
config.pylab.plot(nrn_t, nrn_vm, 'y-', label='nrn vm')
except IOError:
print 'NEURON Data not available.'
config.pylab.plot(mus_t, mus_vm, 'g-.', label='mus vm')
config.pylab.legend()
config.pylab.show()
# test main --
from simulation import Simulation
import pylab
from subprocess import call
if __name__ == "__main__":
SupPyrFRB.test_single_cell()
#
# suppyrFRB.py ends here
| BhallaLab/moose-thalamocortical | DEMOS/pymoose/traub2005/py/suppyrFRB.py | Python | lgpl-2.1 | 6,656 | [
"MOOSE",
"NEURON"
] | ddc7ca8d6bde6c8d1dfa8e5ef9841142784ece83e5767361b0c4065f523047ff |
'''Module containing a DensityFunc abstract class, with common probability densities
@since: Jan 10, 2013
@author: kroon
'''
from __future__ import division
import numpy as np
class Gaussian(object):
'''
Class for representing a multi-dimensional Gaussian distribution of dimension d,
given mean and covariance.
The covariance matrix has to be positive definite and non-singular.
Parameters
----------
mean : (d,) ndarray
mean of the distribution
cov : (d,d) ndarray
Covariance matrix.
Methods
-------
f
Returns the value of the density function
logf
Returns the log of the density function
likelihood
Returns the likelihood of the data
loglik
Reurns the log-likelihood of the data
sample
Returns samples drawn from the normal distribution with the given
mean and covariance
Example
-------
>>> from density import Gaussian
>>> # Scalar example
>>> mean = [10.]
>>> cov = [[1.]]
>>> ga = Gaussian(mean,cov)
>>> ga.f([10.])
0.398942280401
>>> x = np.array([[10.,10.,10.]])
>>> ga.likelihood(x)
0.0634936359342
>>> # Multivariate example
>>> mean = [10.0, 10.0]
>>> cov = [[ 1. 0.],[ 0. 10.]]
>>> ga = Gaussian(mean,cov)
>>> ga.f(np.array([10.,10.])
0.050329212104487035
>>> x = np.array([[10.,10.,10.,10.],[10.,10.,10.,10.]])
>>> ga.likelihood(x)
6.4162389091777101e-06
'''
def __init__(self, mean=[0.,0.], cov=[[1.,0.],[0.,1.]]):
mean = np.array(mean); cov = np.array(cov)
d,n = cov.shape
self._dim = d
self._mean = mean.flatten()
self._cov = cov
self._covdet = np.linalg.det(2.0*np.pi*cov)
if self._covdet < 10e-12:
raise ValueError('The covariance matrix is singular.')
def f(self, x):
'''
Calculate the value of the normal distributions at x
Parameters
----------
x : (d,) ndarray
Evaluate a single d-dimensional samples x
Returns
-------
val : scalar
The value of the normal distribution at x.
'''
return np.exp(self.logf(x))
def logf(self, x):
'''
Calculate the log-density at x
Parameters
----------
x : (d,) ndarray
Evaluate the log-normal distribution at a single d-dimensional
sample x
Returns
-------
val : scalar
The value of the log of the normal distribution at x.
'''
#x = x[:,np.newaxis]
trans = x - self._mean
mal = -trans.dot(np.linalg.solve(self._cov,trans))/2.
return -0.5*np.log(self._covdet) + mal
def likelihood(self, x):
'''
Calculates the likelihood of the data set x for the normal
distribution.
Parameters
----------
x : (d,n) ndarray
Calculate the likelihood of n, d-dimensional samples
Returns
-------
val : scalar
The likelihood value
'''
return np.exp(self.loglik(x))
def loglik(self, x):
'''
Calculates the log-likelihood of the data set x for the normal
distribution.
Parameters
----------
x : (d,n) ndarray
Calculate the likelihood of n, d-dimensional samples
Returns
-------
val : scalar
The log-likelihood value
'''
return np.sum(np.apply_along_axis(self.logf, 0, x))
def sample(self, n=1):
'''
Calculates n independent points sampled from the normal distribution
Parameters
----------
n : int
The number of samples
Returns
-------
samples : (d,n) ndarray
n, d-dimensional samples
'''
return np.random.multivariate_normal(self._mean, self._cov, n).T
| scjrobertson/xRange | kalman/gaussian.py | Python | gpl-3.0 | 4,271 | [
"Gaussian"
] | 628de464465455801ed54f015bba294970140620be98a11325418d3fc923aeee |
# we should be able to load in a plugin multiple times without issues,
# as long as it's the same plugin.
import sys
import paraview.simple
paraview.simple.LoadDistributedPlugin("EyeDomeLightingView", True, globals())
print 'loaded the first time successfully'
paraview.simple.LoadDistributedPlugin("EyeDomeLightingView", True, globals())
print 'loaded the second time successfully'
| HopeFOAM/HopeFOAM | ThirdParty-0.1/ParaView-5.0.1/Plugins/EyeDomeLighting/ParaViewPlugin/Testing/ReloadPlugin.py | Python | gpl-3.0 | 385 | [
"ParaView"
] | afbd33b1115f51f1dd386c5d667708b38d719ab65d4033010ba24edf88e0f8df |
# coding=utf-8
import os, datetime
from decimal import Decimal
from django.test import TestCase
from tao import workflow, time
from tao.forms import FormsGraph
from tao.output_format_form import OutputFormatForm
from tao.record_filter_form import RecordFilterForm
from tao.xml_util import xml_print, xml_parse
from taoui_light_cone.forms import Form as LightConeForm
from taoui_sed.forms import Form as SEDForm
from tao.tests.support import stripped_joined_lines, UtcPlusTen
from tao.tests.support.xml import light_cone_xml
from tao.tests.support.factories import UserFactory, StellarModelFactory, SnapshotFactory, DataSetFactory, SimulationFactory, GalaxyModelFactory, DataSetPropertyFactory, BandPassFilterFactory, DustModelFactory, GlobalParameterFactory
from tao.tests.support.xml import XmlDiffMixin
from tao.tests.helper import MockUIHolder, make_form
class WorkflowTests(TestCase, XmlDiffMixin):
def setUp(self):
super(WorkflowTests, self).setUp()
OUTPUT_FORMATS = [
{'value':'csv', 'text':'CSV (Text2)', 'extension':'csv'},
{'value':'hdf5', 'text':'HDF5', 'extension':'hdf5'},
{'value': 'fits', 'text': 'FITS', 'extension': 'fits'},
{'value': 'votable', 'text': 'VOTable', 'extension': 'xml'}
]
self.output_formats = GlobalParameterFactory.create(parameter_name='output_formats', parameter_value=OUTPUT_FORMATS)
# "2012-12-13T13:55:36+10:00"
time.frozen_time = datetime.datetime(2012, 12, 20, 13, 55, 36, 0, UtcPlusTen())
self.user = UserFactory.create()
self.common_parameters = [
{'attrs': {'name': 'database-type'}, 'value': 'postgresql'},
{'attrs': {'name': 'database-host'}, 'value': 'tao02.hpc.swin.edu.au'},
{'attrs': {'name': 'database-name'}, 'value': 'millennium_full'},
{'attrs': {'name': 'database-port'}, 'value': '3306'},
{'attrs': {'name': 'database-user'}, 'value': ''},
{'attrs': {'name': 'database-pass'}, 'value': ''},
{
'attrs': {
'name': 'schema-version'
},
'value': '2.0',
},
]
self.simulation = SimulationFactory.create(box_size=500)
self.galaxy_model = GalaxyModelFactory.create()
self.dataset = DataSetFactory.create(simulation=self.simulation, galaxy_model=self.galaxy_model)
self.filter = DataSetPropertyFactory.create(name='CentralMvir rf', units="Msun/h", dataset=self.dataset)
self.computed_filter = DataSetPropertyFactory.create(name='Computed Filter', dataset=self.dataset, is_computed = True)
self.output_prop = DataSetPropertyFactory.create(name='Central op', dataset=self.dataset, is_filter=False)
self.snapshot = SnapshotFactory.create(dataset=self.dataset, redshift="0.1234567891")
self.stellar_model = StellarModelFactory.create(name='Stella')
self.band_pass_filter = BandPassFilterFactory.create(label='bandpass')
self.dust_model = DustModelFactory.create()
self.sed_parameters = {'apply_sed': True, 'single_stellar_population_model': self.stellar_model.id,
'band_pass_filters': [str(self.band_pass_filter.id) + '_apparent'], 'apply_dust': True,
'select_dust_model': self.dust_model.id}
self.sed_disabled = {'apply_sed': False}
self.sed_parameters_no_dust = {'apply_sed': True, 'single_stellar_population_model': self.stellar_model.id,
'band_pass_filters': [str(self.band_pass_filter.id) + '_absolute']}
# from code import interact
# interact(local=locals())
self.output_format = OUTPUT_FORMATS[0]['value']
self.output_format_parameters = {'supported_formats': self.output_format}
def tearDown(self):
super(WorkflowTests, self).tearDown()
time.frozen_time = None
from tao.models import Simulation
for sim in Simulation.objects.all():
sim.delete()
def test_unique_cone(self):
form_parameters = {
'catalogue_geometry': 'light-cone',
'dark_matter_simulation': self.simulation.id,
'galaxy_model': self.galaxy_model.id,
'redshift_min': 0.0,
'redshift_max': 0.2,
'ra_opening_angle': 12.001,
'dec_opening_angle': 10.003,
'output_properties' : [self.filter.id, self.output_prop.id, self.computed_filter.id],
'light_cone_type': 'unique',
'number_of_light_cones': 8,
}
xml_parameters = form_parameters.copy()
xml_parameters.update({
'username' : self.user.username,
'dark_matter_simulation': self.simulation.name,
'galaxy_model': self.galaxy_model.name,
'output_properties_1_name' : self.filter.name,
'output_properties_1_label' : self.filter.label,
'output_properties_1_units' : self.filter.units,
'output_properties_1_description' : self.filter.description,
'output_properties_2_name' : self.output_prop.name,
'output_properties_2_label' : self.output_prop.label,
'output_properties_2_description' : self.output_prop.description,
'output_properties_3_name' : self.computed_filter.name,
'output_properties_3_label' : self.computed_filter.label,
'output_properties_3_description' : self.computed_filter.description,
})
xml_parameters.update({
'filter': self.filter.name,
'filter_min' : '1000000',
'filter_max' : 'None',
})
xml_parameters.update({
'apply_sed': False,
})
xml_parameters.update({
'ssp_encoding': self.stellar_model.encoding,
'band_pass_filter_label': self.band_pass_filter.label,
'band_pass_filter_id': self.band_pass_filter.filter_id,
'band_pass_filter_name': self.band_pass_filter.filter_id,
'dust_model_name': self.dust_model.name,
})
xml_parameters.update({
'light_cone_id': FormsGraph.LIGHT_CONE_ID,
'csv_dump_id': FormsGraph.OUTPUT_ID,
'bandpass_filter_id': FormsGraph.BANDPASS_FILTER_ID,
'sed_id': FormsGraph.SED_ID,
'dust_id': FormsGraph.DUST_ID,
})
# TODO: there are commented out elements which are not implemented yet
# comments are ignored by assertXmlEqual
expected_parameter_xml = light_cone_xml(xml_parameters)
mock_ui_holder = MockUIHolder()
light_cone_form = make_form({}, LightConeForm, form_parameters, ui_holder=mock_ui_holder, prefix='light_cone')
sed_form = make_form({}, SEDForm, self.sed_parameters, ui_holder=mock_ui_holder, prefix='sed')
output_form = make_form({}, OutputFormatForm, {'supported_formats': 'csv'}, ui_holder=mock_ui_holder,
prefix='output_format')
mock_ui_holder.update(light_cone = light_cone_form, sed = sed_form, output_format = output_form)
record_filter_form = make_form({}, RecordFilterForm, {'filter':'D-'+str(self.filter.id),'min':str(1000000)},
ui_holder=mock_ui_holder, prefix='record_filter')
self.assertEqual({}, light_cone_form.errors)
self.assertEqual({}, sed_form.errors)
self.assertEqual({}, record_filter_form.errors)
self.assertEqual({}, output_form.errors)
mock_ui_holder.update(record_filter = record_filter_form)
mock_ui_holder.dataset = self.dataset
job = workflow.save(self.user, mock_ui_holder)
actual_parameter_xml = job.parameters
self.assertEqual(self.dataset.database, job.database)
self.assertXmlEqual(expected_parameter_xml, actual_parameter_xml)
def test_random_cone(self):
form_parameters = {
'catalogue_geometry': 'light-cone',
'dark_matter_simulation': self.simulation.id,
'galaxy_model': self.galaxy_model.id,
'redshift_min': 0.2,
'redshift_max': 0.3,
'ra_opening_angle': 71.565,
'dec_opening_angle': 41.811,
'output_properties' : [self.filter.id, self.output_prop.id, self.computed_filter.id],
'light_cone_type': 'random',
'number_of_light_cones': 10,
}
xml_parameters = form_parameters.copy()
xml_parameters.update({
'username' : self.user.username,
'dark_matter_simulation': self.simulation.name,
'galaxy_model': self.galaxy_model.name,
'output_properties_1_name' : self.filter.name,
'output_properties_1_label' : self.filter.label,
'output_properties_1_units' : self.filter.units,
'output_properties_1_description' : self.filter.description,
'output_properties_2_name' : self.output_prop.name,
'output_properties_2_label' : self.output_prop.label,
'output_properties_2_description' : self.output_prop.description,
'output_properties_3_name' : self.computed_filter.name,
'output_properties_3_label' : self.computed_filter.label,
'output_properties_3_description' : self.computed_filter.description,
})
xml_parameters.update({
'filter': self.filter.name,
'filter_min' : '1000000',
'filter_max' : 'None',
})
xml_parameters.update({
'ssp_encoding': self.stellar_model.encoding,
'band_pass_filter_label': self.band_pass_filter.label,
'band_pass_filter_id': self.band_pass_filter.filter_id,
'band_pass_filter_name': self.band_pass_filter.filter_id,
'dust_model_name': self.dust_model.name,
})
xml_parameters.update({
'light_cone_id': FormsGraph.LIGHT_CONE_ID,
'csv_dump_id': FormsGraph.OUTPUT_ID,
'bandpass_filter_id': FormsGraph.BANDPASS_FILTER_ID,
'sed_id': FormsGraph.SED_ID,
'dust_id': FormsGraph.DUST_ID,
})
# TODO: there are commented out elements which are not implemented yet
# comments are ignored by assertXmlEqual
expected_parameter_xml = light_cone_xml(xml_parameters)
mock_ui_holder = MockUIHolder()
light_cone_form = make_form({}, LightConeForm, form_parameters, ui_holder=mock_ui_holder, prefix='light_cone')
sed_form = make_form({}, SEDForm, self.sed_parameters, ui_holder=mock_ui_holder, prefix='sed')
output_form = make_form({}, OutputFormatForm, {'supported_formats': 'csv'}, ui_holder=mock_ui_holder,
prefix='output_format')
mock_ui_holder.update(light_cone = light_cone_form, sed = sed_form, output_format = output_form)
mock_ui_holder.dataset = self.dataset
record_filter_form = make_form({}, RecordFilterForm, {'filter':'D-'+str(self.filter.id),'min':str(1000000)},
ui_holder=mock_ui_holder, prefix='record_filter')
self.assertEqual({}, light_cone_form.errors)
self.assertEqual({}, sed_form.errors)
self.assertEqual({}, record_filter_form.errors)
self.assertEqual({}, output_form.errors)
mock_ui_holder.update(record_filter = record_filter_form)
job = workflow.save(self.user, mock_ui_holder)
actual_parameter_xml = job.parameters
self.assertXmlEqual(expected_parameter_xml, actual_parameter_xml)
self.assertEqual(self.dataset.database, job.database)
def test_box(self):
form_parameters = {
'catalogue_geometry': 'box',
'dark_matter_simulation': self.simulation.id,
'galaxy_model': self.dataset.id,
'output_properties' : [self.filter.id],
'snapshot': self.snapshot.id,
'box_size': 20,
'rng_seed': 12345678901234567890
}
xml_parameters = form_parameters.copy()
xml_parameters.update({
'username' : self.user.username,
'dark_matter_simulation': self.simulation.name,
'galaxy_model': self.galaxy_model.name,
'output_properties_1_name' : self.filter.name,
'output_properties_1_label' : self.filter.label,
'output_properties_1_units' : self.filter.units,
'output_properties_1_description' : self.filter.description,
'redshift' : float(self.snapshot.redshift),
})
xml_parameters.update({
'filter': self.filter.name,
'filter_min' : 'None',
'filter_max' : '1000000',
})
# TODO: there are commented out elements which are not implemented yet
xml_parameters.update({
'ssp_encoding': self.stellar_model.encoding,
'band_pass_filter_label': self.band_pass_filter.label,
'band_pass_filter_id': self.band_pass_filter.filter_id,
'band_pass_filter_description': self.band_pass_filter.description,
'band_pass_extension': 'apparent',
'dust_model_name': self.dust_model.name,
})
xml_parameters.update({
'light_cone_id': FormsGraph.LIGHT_CONE_ID,
'csv_dump_id': FormsGraph.OUTPUT_ID,
'bandpass_filter_id': FormsGraph.BANDPASS_FILTER_ID,
'sed_id': FormsGraph.SED_ID,
'dust_id': FormsGraph.DUST_ID,
})
# comments are ignored by assertXmlEqual
expected_parameter_xml = stripped_joined_lines("""<?xml version="1.0"?>
<!-- Using the XML namespace provides a version for future modifiability. The timestamp allows
a researcher to know when this parameter file was generated. -->
<tao xmlns="http://tao.asvo.org.au/schema/module-parameters-v1" timestamp="2012-12-20T13:55:36+10:00">
<!-- Username submitting the job -->
<username>%(username)s</username>
<!-- Workflow name identifies which workflow is to be executed.
This is currently a placeholder, the name is ignored. -->
<workflow name="alpha-light-cone-image">
<!-- Global Configuration Parameters -->
<schema-version>2.0</schema-version>
<!-- Light-cone module parameters -->
<light-cone id="%(light_cone_id)s">
<!-- Module Version Number -->
<module-version>1</module-version>
<!-- Is the query a light-cone or box? -->
<geometry>box</geometry>
<!-- Selected Simuation -->
<simulation>%(dark_matter_simulation)s</simulation>
<!-- Selected Galaxy Model -->
<galaxy-model>%(galaxy_model)s</galaxy-model>
<!-- The number of light-cones to generate
<box-repetition>unique | random</box-repetition>
<num-cones>1</num-cones> -->
<!-- The min and max redshifts to filter by -->
<redshift>%(redshift).10f</redshift>
<!-- Size of box to return -->
<query-box-size units="Mpc">%(box_size)d</query-box-size>
<!-- RNG Seed -->
<!-- This will be added by the workflow after the job has been completed
to enable the job to be repeated.
The information stored may change, the intent is to store whatever is
required to re-run the job and obtain the same results. -->
<rng-seed>12345678901234567890</rng-seed>
<!-- List of fields to be included in the output file -->
<output-fields>
<item description="%(output_properties_1_description)s" label="%(output_properties_1_label)s" units="%(output_properties_1_units)s">%(output_properties_1_name)s</item>
</output-fields>
</light-cone>
<!-- File output module -->
<csv id="%(csv_dump_id)s">
<fields>
<item label="%(output_properties_1_label)s" units="%(output_properties_1_units)s">%(output_properties_1_name)s</item>
<!-- <item label="bandpass (Absolute)">Band_pass_filter_000_absolute</item> -->
<item label="bandpass (Apparent)">%(band_pass_filter_id)s_%(band_pass_extension)s</item>
</fields>
<!-- Module Version Number -->
<module-version>1</module-version>
<!-- Output file format -->
<filename>tao.output.csv</filename>
<parents>
<item>%(bandpass_filter_id)s</item>
</parents>
</csv>
<!-- Optional: Spectral Energy Distribution parameters -->
<sed id="%(sed_id)s">
<!-- Module Version Number -->
<module-version>1</module-version>
<parents>
<item>%(light_cone_id)s</item>
</parents>
%(ssp_encoding)s
</sed>
<filter id="%(bandpass_filter_id)s">
<!-- Module Version Number -->
<module-version>1</module-version>
<parents>
<item>%(dust_id)s</item>
</parents>
<!-- Bandpass Filters) -->
<bandpass-filters>
<item description="%(band_pass_filter_description)s" label="%(band_pass_filter_label)s" selected="%(band_pass_extension)s">%(band_pass_filter_id)s</item>
</bandpass-filters>
</filter>
<dust id="%(dust_id)s">
<module-version>1</module-version>
<parents>
<item>%(sed_id)s</item>
</parents>
<model>%(dust_model_name)s</model>
</dust>
<!-- Record Filter -->
<record-filter>
<!-- Module Version Number -->
<module-version>1</module-version>
<!-- Note that the units are for readability,
no unit conversion is supported. The consumer of the
parameter file should check that the expected units are provided. -->
<filter>
<filter-attribute>%(filter)s</filter-attribute>
<filter-min units="Msun/h">%(filter_min)s</filter-min>
<filter-max units="Msun/h">%(filter_max)s</filter-max>
</filter>
</record-filter>
<!-- Image generation module parameters
<image-generator>
<!- Module Version Number ->
<module-version>1</module-version>
<!- Image size parameters ->
<image-width units="px">1024</image-width>
<image-height units="px">1024</image-height>
<!- Focal scale parameters ->
<focalx units="??">1024</focalx>
<focaly units="??">1024</focaly>
<!- Image offset parameters ->
<image-offsetx units="??">512</image-offsetx>
<image-offsety units="??">0</image-offsety>
</image-generator> -->
</workflow>
<!-- The signature is automatically generated and is intended to be used when running
old versions of the science modules (to remove the need for the UI to parse and check
every version. -->
<signature>base64encodedsignature</signature>
</tao>
""") % xml_parameters
mock_ui_holder = MockUIHolder()
light_cone_form = make_form({}, LightConeForm, form_parameters, ui_holder=mock_ui_holder, prefix='light_cone')
output_form = make_form({}, OutputFormatForm, {'supported_formats': 'csv'}, prefix='output_format')
sed_form = make_form({}, SEDForm, self.sed_parameters, ui_holder=mock_ui_holder, prefix='sed')
mock_ui_holder.update(light_cone = light_cone_form, sed = sed_form, output_format = output_form)
mock_ui_holder.dataset = self.dataset
record_filter_form = make_form({}, RecordFilterForm, {'filter':'D-'+str(self.filter.id),'max':str(1000000)},
ui_holder=mock_ui_holder, prefix='record_filter')
self.assertEqual({}, light_cone_form.errors)
self.assertEqual({}, sed_form.errors)
self.assertEqual({}, record_filter_form.errors)
self.assertEqual({}, output_form.errors)
mock_ui_holder.update(record_filter = record_filter_form)
job = workflow.save(self.user, mock_ui_holder)
actual_parameter_xml = job.parameters
self.assertXmlEqual(expected_parameter_xml, actual_parameter_xml)
self.assertEqual(self.dataset.database, job.database)
def test_no_sed(self):
form_parameters = {
'catalogue_geometry': 'box',
'dark_matter_simulation': self.simulation.id,
'galaxy_model': self.dataset.id,
'output_properties' : [self.filter.id],
'snapshot': self.snapshot.id,
'box_size': 20,
'rng_seed': 12345678901234567890
}
xml_parameters = form_parameters.copy()
xml_parameters.update({
'username' : self.user.username,
'dark_matter_simulation': self.simulation.name,
'galaxy_model': self.galaxy_model.name,
'output_properties_1_name' : self.filter.name,
'output_properties_1_label' : self.filter.label,
'output_properties_1_units' : self.filter.units,
'output_properties_1_description' : self.filter.description,
'output_properties_2_name' : self.output_prop.name,
'output_properties_2_label' : self.output_prop.label,
'output_properties_2_description' : self.output_prop.description,
'redshift' : float(self.snapshot.redshift),
})
xml_parameters.update({
'filter': self.filter.name,
'filter_min' : 'None',
'filter_max' : '1000000',
'band_pass_filter_name': self.band_pass_filter.filter_id,
})
xml_parameters.update({
'ssp_encoding': self.stellar_model.encoding,
'band_pass_filter_label': self.band_pass_filter.label,
'band_pass_filter_id': self.band_pass_filter.filter_id,
'band_pass_filter_name': self.band_pass_filter.filter_id,
'dust_model_name': self.dust_model.name,
})
xml_parameters.update({
'light_cone_id': FormsGraph.LIGHT_CONE_ID,
'csv_dump_id': FormsGraph.OUTPUT_ID,
'bandpass_filter_id': FormsGraph.BANDPASS_FILTER_ID,
'sed_id': FormsGraph.SED_ID,
'dust_id': FormsGraph.DUST_ID,
})
expected_parameter_xml = stripped_joined_lines("""<?xml version="1.0" encoding="UTF-8"?>
<!-- Using the XML namespace provides a version for future modifiability. The timestamp allows
a researcher to know when this parameter file was generated. -->
<tao xmlns="http://tao.asvo.org.au/schema/module-parameters-v1" timestamp="2012-12-20T13:55:36+10:00">
<!-- Username submitting the job -->
<username>%(username)s</username>
<!-- Workflow name identifies which workflow is to be executed.
This is currently a placeholder, the name is ignored. -->
<workflow name="alpha-light-cone-image">
<!-- Global Configuration Parameters -->
<schema-version>2.0</schema-version>
<!-- Light-cone module parameters -->
<light-cone id="%(light_cone_id)s">
<!-- Module Version Number -->
<module-version>1</module-version>
<!-- Is the query a light-cone or box? -->
<geometry>box</geometry>
<!-- Selected Simuation -->
<simulation>%(dark_matter_simulation)s</simulation>
<!-- Selected Galaxy Model -->
<galaxy-model>%(galaxy_model)s</galaxy-model>
<!-- The number of light-cones to generate
<box-repetition>unique | random</box-repetition>
<num-cones>1</num-cones> -->
<!-- The min and max redshifts to filter by -->
<redshift>%(redshift).10f</redshift>
<!-- Size of box to return -->
<query-box-size units="Mpc">%(box_size)d</query-box-size>
<!-- RNG Seed -->
<!-- This will be added by the workflow after the job has been completed
to enable the job to be repeated.
The information stored may change, the intent is to store whatever is
required to re-run the job and obtain the same results. -->
<rng-seed>12345678901234567890</rng-seed>
<!-- List of fields to be included in the output file -->
<output-fields>
<item description="%(output_properties_1_description)s" label="%(output_properties_1_label)s" units="%(output_properties_1_units)s">%(output_properties_1_name)s</item>
</output-fields>
</light-cone>
<!-- File output module -->
<csv id="%(csv_dump_id)s">
<fields>
<item label="%(output_properties_1_label)s" units="%(output_properties_1_units)s">%(output_properties_1_name)s</item>
</fields>
<!-- Module Version Number -->
<module-version>1</module-version>
<!-- Output file format -->
<filename>tao.output.csv</filename>
<parents>
<item>%(light_cone_id)s</item>
</parents>
</csv>
<!-- Record Filter -->
<record-filter>
<!-- Module Version Number -->
<module-version>1</module-version>
<!-- Note that the units are for readability,
no unit conversion is supported. The consumer of the
parameter file should check that the expected units are provided. -->
<filter>
<filter-attribute>%(filter)s</filter-attribute>
<filter-min units="Msun/h">%(filter_min)s</filter-min>
<filter-max units="Msun/h">%(filter_max)s</filter-max>
</filter>
</record-filter>
<!-- Image generation module parameters
<image-generator>
<!- Module Version Number ->
<module-version>1</module-version>
<!- Image size parameters ->
<image-width units="px">1024</image-width>
<image-height units="px">1024</image-height>
<!- Focal scale parameters ->
<focalx units="??">1024</focalx>
<focaly units="??">1024</focaly>
<!- Image offset parameters ->
<image-offsetx units="??">512</image-offsetx>
<image-offsety units="??">0</image-offsety>
</image-generator> -->
</workflow>
<!-- The signature is automatically generated and is intended to be used when running
old versions of the science modules (to remove the need for the UI to parse and check
every version. -->
<signature>base64encodedsignature</signature>
</tao>
""") % xml_parameters
mock_ui_holder = MockUIHolder()
light_cone_form = make_form({}, LightConeForm, form_parameters, ui_holder=mock_ui_holder, prefix='light_cone')
sed_form = make_form({}, SEDForm, self.sed_disabled, ui_holder=mock_ui_holder, prefix='sed')
output_form = make_form({}, OutputFormatForm, {'supported_formats': 'csv'}, ui_holder=mock_ui_holder, prefix='output_format')
mock_ui_holder.update(light_cone = light_cone_form, sed = sed_form, output_format = output_form)
mock_ui_holder.dataset = self.dataset
record_filter_form = make_form({}, RecordFilterForm, {'filter':'D-'+str(self.filter.id),'max':str(1000000)},
ui_holder=mock_ui_holder, prefix='record_filter')
self.assertEqual({}, light_cone_form.errors)
self.assertEqual({}, sed_form.errors)
self.assertEqual({}, record_filter_form.errors)
self.assertEqual({}, output_form.errors)
mock_ui_holder.update(record_filter = record_filter_form)
job = workflow.save(self.user, mock_ui_holder)
actual_parameter_xml = job.parameters
self.assertXmlEqual(expected_parameter_xml, actual_parameter_xml)
self.assertEqual(self.dataset.database, job.database)
def test_no_dust(self):
form_parameters = {
'catalogue_geometry': 'box',
'dark_matter_simulation': self.simulation.id,
'galaxy_model': self.dataset.id,
'output_properties' : [self.filter.id],
'snapshot': self.snapshot.id,
'box_size': 20,
'rng_seed': 12345678901234567890
}
xml_parameters = form_parameters.copy()
xml_parameters.update({
'username' : self.user.username,
'dark_matter_simulation': self.simulation.name,
'galaxy_model': self.galaxy_model.name,
'output_properties_1_name' : self.filter.name,
'output_properties_1_label' : self.filter.label,
'output_properties_1_units' : self.filter.units,
'output_properties_1_description' : self.filter.description,
'redshift' : float(self.snapshot.redshift),
})
xml_parameters.update({
'filter': self.filter.name,
'filter_min' : 'None',
'filter_max' : '1000000',
})
# TODO: there are commented out elements which are not implemented yet
xml_parameters.update({
'ssp_encoding': self.stellar_model.encoding,
'band_pass_filter_label': self.band_pass_filter.label,
'band_pass_filter_id': self.band_pass_filter.filter_id,
'band_pass_filter_name': self.band_pass_filter.filter_id,
'band_pass_filter_description': self.band_pass_filter.description,
'band_pass_extension': 'absolute',
})
xml_parameters.update({
'light_cone_id': FormsGraph.LIGHT_CONE_ID,
'csv_dump_id': FormsGraph.OUTPUT_ID,
'bandpass_filter_id': FormsGraph.BANDPASS_FILTER_ID,
'sed_id': FormsGraph.SED_ID,
'dust_id': FormsGraph.DUST_ID,
})
# comments are ignored by assertXmlEqual
expected_parameter_xml = stripped_joined_lines("""<?xml version="1.0"?>
<!-- Using the XML namespace provides a version for future modifiability. The timestamp allows
a researcher to know when this parameter file was generated. -->
<tao xmlns="http://tao.asvo.org.au/schema/module-parameters-v1" timestamp="2012-12-20T13:55:36+10:00">
<!-- Username submitting the job -->
<username>%(username)s</username>
<!-- Workflow name identifies which workflow is to be executed.
This is currently a placeholder, the name is ignored. -->
<workflow name="alpha-light-cone-image">
<!-- Global Configuration Parameters -->
<schema-version>2.0</schema-version>
<!-- Light-cone module parameters -->
<light-cone id="%(light_cone_id)s">
<!-- Module Version Number -->
<module-version>1</module-version>
<!-- Is the query a light-cone or box? -->
<geometry>box</geometry>
<!-- Selected Simuation -->
<simulation>%(dark_matter_simulation)s</simulation>
<!-- Selected Galaxy Model -->
<galaxy-model>%(galaxy_model)s</galaxy-model>
<!-- The number of light-cones to generate
<box-repetition>unique | random</box-repetition>
<num-cones>1</num-cones> -->
<!-- The min and max redshifts to filter by -->
<redshift>%(redshift).10f</redshift>
<!-- Size of box to return -->
<query-box-size units="Mpc">%(box_size)d</query-box-size>
<!-- RNG Seed -->
<!-- This will be added by the workflow after the job has been completed
to enable the job to be repeated.
The information stored may change, the intent is to store whatever is
required to re-run the job and obtain the same results.-->
<rng-seed>12345678901234567890</rng-seed>
<!-- List of fields to be included in the output file -->
<output-fields>
<item description="%(output_properties_1_description)s" label="%(output_properties_1_label)s" units="%(output_properties_1_units)s">%(output_properties_1_name)s</item>
</output-fields>
</light-cone>
<!-- File output module -->
<csv id="%(csv_dump_id)s">
<fields>
<item label="%(output_properties_1_label)s" units="%(output_properties_1_units)s">%(output_properties_1_name)s</item>
<item label="bandpass (Absolute)">%(band_pass_filter_name)s_absolute</item>
<!-- <item label="bandpass (Apparent)">%(band_pass_filter_name)s_apparent</item> -->
</fields>
<!-- Module Version Number -->
<module-version>1</module-version>
<!-- Output file format -->
<filename>tao.output.csv</filename>
<parents>
<item>%(bandpass_filter_id)s</item>
</parents>
</csv>
<!-- Optional: Spectral Energy Distribution parameters -->
<sed id="%(sed_id)s">
<!-- Module Version Number -->
<module-version>1</module-version>
<parents>
<item>%(light_cone_id)s</item>
</parents>
%(ssp_encoding)s
</sed>
<filter id="%(bandpass_filter_id)s">
<module-version>1</module-version>
<parents>
<item>%(sed_id)s</item>
</parents>
<!-- Bandpass Filters) -->
<bandpass-filters>
<item description="%(band_pass_filter_description)s" label="%(band_pass_filter_label)s" selected="%(band_pass_extension)s">%(band_pass_filter_id)s</item>
</bandpass-filters>
</filter>
<!-- Record Filter -->
<record-filter>
<!-- Module Version Number -->
<module-version>1</module-version>
<!-- Note that the units are for readability,
no unit conversion is supported. The consumer of the
parameter file should check that the expected units are provided. -->
<filter>
<filter-attribute>%(filter)s</filter-attribute>
<filter-min units="Msun/h">%(filter_min)s</filter-min>
<filter-max units="Msun/h">%(filter_max)s</filter-max>
</filter>
</record-filter>
<!-- Image generation module parameters
<image-generator>
<!- Module Version Number ->
<module-version>1</module-version>
<!- Image size parameters ->
<image-width units="px">1024</image-width>
<image-height units="px">1024</image-height>
<!- Focal scale parameters ->
<focalx units="??">1024</focalx>
<focaly units="??">1024</focaly>
<!- Image offset parameters ->
<image-offsetx units="??">512</image-offsetx>
<image-offsety units="??">0</image-offsety>
</image-generator> -->
</workflow>
<!-- The signature is automatically generated and is intended to be used when running
old versions of the science modules (to remove the need for the UI to parse and check
every version. -->
<signature>base64encodedsignature</signature>
</tao>
""") % xml_parameters
mock_ui_holder = MockUIHolder()
light_cone_form = make_form({}, LightConeForm, form_parameters, ui_holder=mock_ui_holder, prefix='light_cone')
sed_form = make_form({}, SEDForm, self.sed_parameters_no_dust, ui_holder=mock_ui_holder, prefix='sed')
output_form = make_form({}, OutputFormatForm, {'supported_formats': 'csv'}, ui_holder=mock_ui_holder, prefix='output_format')
mock_ui_holder.update(light_cone = light_cone_form, sed = sed_form, output_format = output_form)
mock_ui_holder.dataset = self.dataset
record_filter_form = make_form({}, RecordFilterForm, {'filter':'D-'+str(self.filter.id),'max':str(1000000)},
ui_holder=mock_ui_holder, prefix='record_filter')
self.assertEqual({}, light_cone_form.errors)
self.assertEqual({}, sed_form.errors)
self.assertEqual({}, record_filter_form.errors)
self.assertEqual({}, output_form.errors)
mock_ui_holder.update(record_filter = record_filter_form)
job = workflow.save(self.user, mock_ui_holder)
actual_parameter_xml = job.parameters
self.assertXmlEqual(expected_parameter_xml, actual_parameter_xml)
self.assertEqual(self.dataset.database, job.database)
| IntersectAustralia/asvo-tao | web/tao/tests/workflow_tests.py | Python | gpl-3.0 | 40,664 | [
"Galaxy"
] | 277fc7ff3efb7437b336558bc65747e127e8b10ea367dda9ea43ace71af0a56b |
import numpy as np
from numpy.testing import (
assert_array_equal,
)
from MDAnalysis.topology.base import squash_by, change_squash
class TestSquash(object):
atom_resids = np.array([2, 2, 1, 1, 5, 5, 4, 4])
atom_resnames = np.array(['A', 'A', 'B', 'B', 'C', 'C', 'D', 'D'],
dtype=object)
def test_squash(self):
atom_residx, resids, (resnames,) = squash_by(
self.atom_resids, self.atom_resnames)
assert_array_equal(atom_residx, np.array([1, 1, 0, 0, 3, 3, 2, 2]))
assert_array_equal(resids, np.array([1, 2, 4, 5]))
assert_array_equal(resnames, np.array(['B', 'A', 'D', 'C']))
class TestChangeSquash(object):
def test_resid_squash(self):
# System of 6 atoms in 3 residues
# Residues 1 & 2 are Segid A, Residue 3 is Segid B
# Resid 2 is repeated twice! Should be detected as 2 distinct residues
resids = np.array([2, 2, 3, 3, 2, 2])
resnames = np.array(['RsA', 'RsA', 'RsB', 'RsB', 'RsC', 'RsC'])
segids = np.array(['A', 'A', 'A', 'A', 'B', 'B'])
residx, (new_resids, new_resnames, new_segids) = change_squash(
(resids,), (resids, resnames, segids))
assert_array_equal(residx, np.array([0, 0, 1, 1, 2, 2]))
assert_array_equal(new_resids, np.array([2, 3, 2]))
assert_array_equal(new_resnames, np.array(['RsA', 'RsB', 'RsC']))
assert_array_equal(new_segids, np.array(['A', 'A', 'B']))
def test_segid_squash(self):
segids = np.array(['A', 'A', 'B'])
segidx, (new_segids,) = change_squash((segids,), (segids,))
assert_array_equal(segidx, np.array([0, 0, 1]))
assert_array_equal(new_segids, np.array(['A', 'B']))
| alejob/mdanalysis | testsuite/MDAnalysisTests/topology/test_topology_base.py | Python | gpl-2.0 | 1,744 | [
"MDAnalysis"
] | abff8ad4474d7a9b6af9bf143b6234fe047cf506e6f6280159ec938066215e64 |
#!/usr/bin/python
# -*- coding:UTF-8 -*-
################################################################################
#
# Copyright 2010-2014 Carlos Ramisch, Vitor De Araujo, Silvio Ricardo Cordeiro,
# Sandra Castellanos
#
# XMLFormatter.py is part of mwetoolkit
#
# mwetoolkit is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# mwetoolkit is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with mwetoolkit. If not, see <http://www.gnu.org/licenses/>.
#
################################################################################
from EitherPattern import *
from SequencePattern import *
from WordPattern import *
from Formatter import *
class XMLFormatter(Formatter):
'''docstring for XMLFormatter'''
def __init__(self):
'''Create a XMLFormatter object.'''
Formatter.__init__(self)
def visit(self, pattern):
if isinstance(pattern, WordPattern):
if pattern.negative == {}:
self.content += '<w'
else:
self.content += '<w'
if pattern.id is not None:
self.content += ' id="%s"' %pattern.id
# For now, the positive dictionary can't have multiple values for the same attribute
# So, the first element of the list is taken
for key in pattern.positive:
self.content += ' %s="%s"' %(key, pattern.positive[key][0])
if pattern.negative != {}:
self.content += '>'
# Create a negation tag for each value
for key in pattern.negative:
for value in pattern.negative[key]:
self.content += '<neg'
self.content += ' %s="%s"' %(key, value)
self.content += ' />'
self.content += '</w>'
else:
self.content += ' />'
elif isinstance(pattern, SequencePattern):
self.content += '<pat'
if pattern.repeat is not None:
self.content += ' repeat="%s"' %pattern.repeat
if pattern.ignore:
self.content += ' ignore="true"'
self.content += '>'
# Loop through its children
for component in pattern.components:
component.accept(self)
self.content += '</pat>'
elif isinstance(pattern, EitherPattern):
self.content += '<either>'
# Loop through its children
for component in pattern.components:
component.accept(self)
self.content += '</either>' | KWARC/mwetoolkit | gui/libs/XMLFormatter.py | Python | gpl-3.0 | 2,617 | [
"VisIt"
] | 16ba3549b5e1162d4558a4597510ddf06a893209c76f6be347b5a24d860caa7a |
##
## Biskit, a toolkit for the manipulation of macromolecular structures
## Copyright (C) 2004-2018 Raik Gruenberg & Johan Leckner
##
## This program is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 3 of the
## License, or any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You find a copy of the GNU General Public License in the file
## license.txt along with this program; if not, write to the Free
## Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
##
##
"""
Wrap the Amber tleap program.
"""
import tempfile
import os.path as P
from biskit.exe import Executor, TemplateError
import biskit.tools as T
class LeapError( Exception ):
pass
class AmberLeap( Executor ):
"""
Low-level wrapper for Amber tleap.
AmberLeap expects a template for the leap input script which is then
completed and passed to tleap. The manual command would like this::
tleap -f leap.input
The existence of a parm output file is used as success criterium.
Usage::
x = AmberLeap( 'leap_in.template' )
x.run()
However, typical leap scripts also load a PDB file and write
top and crd files. Your template will thatswhy probably have placeholders
for these. As usual, you can provide values for arbitrary place holders
directly to the constructor::
x = AmberLeap( 'leap_in.template',
leaprc='ff03',
out_parm='top.parm',
out_crd='0.crd',
in_pdb='1HPT_cleaned.pdb')
x.run()
Three special options are noteworthy:
-leaprc -- the forcefield to be used,
will be completed to an existing leaprc file.
-out_parm -- the name of the resulting topology file (default: top.parm)
-out_crd -- the name of the resulting coordinate file (default: 0.crd)
AmberParmBuilder uses AmberLeap and implements the gory details of PDB
cleanup, S-S bond patching, chain break capping,
and so on. It would be better to move this functionality directly
into AmberLeap.
.. seealso:: `biskit.Executor`, `biskit.AmberParmBuilder`
"""
LEAPRC = 'leaprc.' #: default leaprc file name
LEAPRC_PATH = 'dat/leap/cmd/' #: default leaprc location within $AMBERHOME
def __init__( self,
template,
leaprc=None,
**kw ):
"""
:param template: template for leap input file (file or string)
:type template: str
:param leaprc: forcefield code (leaprc file ending, e.g. 'ff99')
OR leaprc file name (e.g, 'leaprc.f99')
OR leaprc path witin $AMBERHOME
OR leaprc path
default: take value from exe_tleap.dat
:type leaprc: str
:param f_in: complete leap input file -- existing or not, to be kept
(default: create from template and then discard)
:type f_in: str
:param f_out: file for leap log output (default: discard)
:type f_out: str
:param out_parm: parm output file name (default: 'top.parm')
:type out_parm: str
:param out_crd : coordinate output file name (default '0.crd' )
:type out_crd : str
:param kw: additional key=value parameters for Executor:
:type kw: key=value pairs
::
debug - 0|1, keep all temporary files (default: 0)
verbose - 0|1, print progress messages to log (log != STDOUT)
node - str, host for calculation (None->local) NOT TESTED
(default: None)
nice - int, nice level (default: 0)
log - biskit.LogFile, program log (None->STOUT) (default: None)
"""
# override some Executor defaults unless they are freshly given
kw['catchout'] = kw.get('catchout',0)
Executor.__init__( self, 'tleap', template=template, **kw )
self.args = '-f %s' % self.f_in
self.leaprc = self.findLeaprc( leaprc or self.exe.leaprc )
## set some defaults that may or may not have been specified
self.out_parm = kw.get('out_parm', 'top.parm' )
self.out_crd = kw.get('out_crd', '0.crd')
def findLeaprc( self, ff ):
"""
Guess full path to an existing leaprc file name or fragment of it.
We first take the file as is, then look in AMBERHOME, then look
in AMBERHOME/dat/leap/cmd.
:param ff: forcefield code (leaprc file ending, e.g. 'ff99', 'ff01')
OR leaprc file name (e.g, 'leaprc.f99')
OR leaprc path witin $AMBERHOME
OR leaprc path
:type ff: str
:return: full path to an existing leaprc file
:rtype: str
:raise: LeapError, if no existing file can be found
"""
if P.exists( T.absfile(ff) ):
return T.absfile(ff)
amberhome = self.exe.env['AMBERHOME']
## interpret ff as file ending ('ff99', 'ff03', etc)
r = P.join( amberhome, self.LEAPRC_PATH, self.LEAPRC + ff )
if P.exists( r ):
return r
## interpret ff as file ending ('ff99', 'ff03', etc pointing to 'old' forcefield)
r = P.join( amberhome, self.LEAPRC_PATH, 'oldff', self.LEAPRC + ff )
if P.exists( r ):
return r
## interpret ff as file name (e.g. 'leaprc.ff99')
r = P.join( amberhome, self.LEAPRC_PATH, ff )
if P.exists( r ):
return r
## interpret ff as file name (e.g. 'leaprc.ff99') for 'old' forcefield
r = P.join( amberhome, self.LEAPRC_PATH, 'oldff', ff )
if P.exists( r ):
return r
## interpret ff as path within AMBERHOME ('dat/leap/cmd/leaprc.f99')
r = P.join( amberhome, ff )
if P.exists( r ):
return r
raise LeapError('Could not find Forcefield definition %s. ' % ff +\
'Check exe_tleap.dat or provide an explicit leaprc parameter!')
def isfailed( self ):
return not os.path.exists( self.out_parm )
def cleanup(self):
if not self.debug:
T.tryRemove( P.join(self.cwd or '', 'leap.log'))
super().cleanup()
#############
## TESTING ##
import biskit.test as BT
import tempfile
import biskit.tools as T
class Test( BT.BiskitTest ):
"""Test AmberParmBuilder"""
TAGS = [ BT.EXE ]
def prepare(self):
root = T.testRoot() + '/amber/'
self.fpdb = T.testRoot() + '/amber/1HPT_0.pdb'
self.fparm = tempfile.mktemp('.parm', 'top_')
self.flog = tempfile.mktemp('.log', 'leap_')
self.template = T.dataRoot() + '/amber/leap/solvate_box.leap'
def cleanUp(self):
import os.path as osp
T.tryRemove( self.fparm )
T.tryRemove( self.flog )
def test_AmberLeap_findLeaprc(self):
"""AmberLeap.findLeaprc test"""
self.x = AmberLeap(self.template)
target = P.join(self.x.exe.env['AMBERHOME'], 'dat/leap/cmd/oldff/leaprc.ff10')
if self.local:
self.log.add( '\ntarget leaprc: %s' % target )
self.assertEqual( self.x.findLeaprc('ff10'), target )
self.assertEqual( self.x.findLeaprc('leaprc.ff10'), target )
self.assertEqual( self.x.findLeaprc('dat/leap/cmd/oldff/leaprc.ff10'),target)
self.assertEqual( self.x.findLeaprc( target ), target )
def test_AmberLeap_run( self ):
"""AmberLeap.run test"""
self.x = AmberLeap( self.template,
leaprc='protein.ff03.r1',
leap_out=self.flog,
fmod='', fprep='', ss_bonds='', box=12.5,
in_pdb=self.fpdb,
debug=self.DEBUG)
self.run()
if __name__ == '__main__':
BT.localTest()
| graik/biskit | biskit/md/amberLeap.py | Python | gpl-3.0 | 8,351 | [
"Amber"
] | eacada5d9dabc6d70322dd723d312146f5f343d2172beacc3582a0034065f359 |
'''
Created on Nov 11, 2014
@author: alucard
'''
import vtk
from SceneObject import SceneObject
from Text import Text
class MenuItem(SceneObject):
def __init__(self, renderManager, width, height, parent, name, vtkTextureSelected, vtkTextureUnselected, callbackFunction = None, callbackObject = None):
'''
Initialize the MenuItem.
'''
# Initialize all the variables so that they're unique
self.__renderManager = renderManager
# Call the parent constructor
super(MenuItem, self).__init__(self.__renderManager.renderers)
# Set selected and unselected textures
self.__selectedTexture = vtkTextureSelected
self.__unselectedTexture = vtkTextureUnselected
self.menuItemPadding = [0.0, 0.0, 0.0, 0.0]
self.parent = parent
self.name = name
# Default a menu item to be hidden and collapsed.
self.__isOpen = False
self.__isVisible = False
self.__planeSource = vtk.vtkPlaneSource()
self.__transform = vtk.vtkTransform()
self.__transform.Scale(width, height, 1)
self.__transformFilter = vtk.vtkTransformPolyDataFilter()
self.__transformFilter.SetInputConnection(self.__planeSource.GetOutputPort())
self.__transformFilter.SetTransform(self.__transform)
self.__planeMapper = vtk.vtkPolyDataMapper()
self.__planeMapper.SetInputConnection(self.__transformFilter.GetOutputPort())
self.vtkActor.SetMapper(self.__planeMapper)
self.vtkActor.SetTexture(self.__unselectedTexture)
self.vtkActor.VisibilityOff()
self.vtkActor.DragableOff()
self.parent.AddMenuItem(self)
self.SetCallback(callbackFunction, callbackObject)
bounds = self.vtkActor.GetBounds()
self.__menuItemWidth = bounds[1] - bounds[0]
self.__menuItemHeight = bounds[3] - bounds[2]
# Add the text such that it is centred and at the right x position for the icon on the right of the SAOish texture
# (root nodes do not have text, so this is a special case for leaf nodes with a known texture)
# Below is a HACK
planeWidth = self.GetWidth()
textureWidth = 258.0
textPos = 83.0
textOffset = -planeWidth / 2.0 + textPos / textureWidth * planeWidth #Special calc = left edge [which is -width / 2] + pixel position of text [83] / (total pixel width of texture [258]) * total surface width [width]
self.__isHighlighted = False
self.__textItem = Text(self.__renderManager.renderers, self, name, height/4.0, [textOffset, -height * 1.0 / 8.0, 0.01])
# Turn off picking for the text.
self.__textItem.vtkActor.PickableOff()
# Add it to the children
self.childrenObjects.append(self.__textItem)
def __str__(self):
'''
Print the MenuItem
'''
s = "Name : " + self.GetName() + "\n"
s += "Open : " + str(self.GetOpen()) + "\n"
s += "Visible : " + str(self.GetVisible()) + "\n"
s += "Width : " + str(self.GetWidth()) + "\n"
s += "Height : " + str(self.GetHeight()) + "\n"
s += "Padding : " + str(self.GetPadding()) + "\n"
s += "Child Menu Item Count : " + str(self.GetChildCount()) + "\n"
s += "Call Back Function : " + str(self.__calbackFunction) + "\n"
s += "Call Back Object : " + str(self.__callbackObject) + "\n"
return s
def GetOpen(self):
return self.__isOpen
def GetVisible(self):
return self.__isVisible
def GetWidth(self):
# Width is returned after adding MenuItem Padding (left and right)
width = self.menuItemPadding[0] + self.__menuItemWidth + self.menuItemPadding[2]
return width
def GetHeight(self):
# Height is returned after adding MenuItem Padding (top and bottom)
height = self.menuItemPadding[1] + self.__menuItemHeight + self.menuItemPadding[3]
return height
def GetPadding(self):
return self.menuItemPadding
def GetParent(self):
return self.parent
def GetChildCount(self):
return len(self.childrenObjects)
def GetChildren(self):
'''
Create a list of all MenuItem Children
'''
children = []
# Get all MenuItem children
for child in self.childrenObjects:
if type(child) is MenuItem:
children.append(child)
return children
def GetName(self):
return self.name
def SetOpen(self, status):
self.__isOpen = status
def SetVisible(self, status):
'''
Set the MenuItem visibility, set textItem visibility
'''
self.__isVisible = status
# Set the MenuItems Visibility and the accompanying text visibility
if self.__isVisible == True:
self.vtkActor.VisibilityOn()
self.__textItem.vtkActor.VisibilityOn()
if self.__isVisible == False:
self.vtkActor.VisibilityOff()
self.__textItem.vtkActor.VisibilityOff()
def SetPadding(self, padding):
self.__menuItemPadding = padding
def SetPosition(self, position):
self.SetSceneObjectPosition(position)
def SetName(self, name):
self.__name = name
def SetCallback(self, callbackFunction, callbackObject):
'''
Set the callback for this MenuItem
'''
self.__callbackObject = callbackObject
self.__calbackFunction = callbackFunction
def AddMenuItem(self, menuItem):
self.childrenObjects.append(menuItem)
def OpenMenuItem(self):
'''
Open the MenuItem, set the visibility and set the texture, set visibility for each child MenuItem
'''
self.SetOpen(True)
self.SetVisible(True)
self.vtkActor.SetTexture(self.__selectedTexture)
if self.GetChildCount() > 0:
for item in self.childrenObjects:
# Make sure we are only setting MenuItem
if type(item) is MenuItem:
item.SetVisible(True)
def SetHighlightOn(self):
'''
Tell the object to highlight
'''
if not self.__isHighlighted:
origPos = self.GetSceneObjectPosition()
self.SetSceneObjectPosition([origPos[0], origPos[1], origPos[2]+0.15])
self.vtkActor.SetTexture(self.__selectedTexture)
self.__isHighlighted = True
def SetHighlightOff(self):
'''
Tell the object to highlight
'''
if self.__isHighlighted:
origPos = self.GetSceneObjectPosition()
self.SetSceneObjectPosition([origPos[0], origPos[1], origPos[2]-0.15])
self.vtkActor.SetTexture(self.__unselectedTexture)
self.__isHighlighted = False
def CloseMenuItem(self):
'''
Close the MenuItem, set the visibility and set the texture, recursive function
'''
self.SetOpen(False)
self.SetVisible(False)
self.vtkActor.SetTexture(self.__unselectedTexture)
if self.GetChildCount() > 0:
for item in self.childrenObjects:
# Make sure we are only setting MenuItem
if type(item) is MenuItem:
item.CloseMenuItem()
def CheckActor(self, actor):
'''
Check if this MenuItem is picked, return true if it is picked, also fire the MenuItem's callback if it exists.
'''
if self.vtkActor is actor: #If the actor is the picked one
# Click the button.
if self.__calbackFunction is not None:
self.__calbackFunction(self.__callbackObject)
return True
return False
def GlobalMenuClose(self):
'''
Close and remove all references to the menu items.
'''
if self.GetChildCount() > 0:
for item in self.childrenObjects:
if type(item) is MenuItem:
item.GlobalMenuClose()
self.RemoveSceneObject() | GearsAD/semisorted_arnerve | arnerve/scene/MenuItem.py | Python | mit | 8,376 | [
"VTK"
] | 0c045cc127bf2ff8fbfb9a070c110ef05013704ab5eb96cfe3539ecc8ad4eff3 |
#
# This file is a part of Siesta Help Scripts
#
# (c) Andrey Sobolev, 2013
#
import per_atom
import per_evol
import per_type
import per_step
class Data(object):
_types = ['Function', 'Histogram', 'Time evolution']
def __init__(self):
import inspect
# dirty hack; I don't know how to populate the list of modules imported within current file
data_modules = [per_atom, per_evol, per_type, per_step]
data_class_list = []
for mod in data_modules:
data_class_list += inspect.getmembers(mod, lambda m: inspect.isclass(m) and not inspect.isabstract(m))
self._classes = dict(zip(self._types, [{} for _ in self._types]))
self._name2class = dict()
for name, cl in data_class_list:
if cl.isFunction:
self._classes['Function'][cl.shortDoc()] = cl
if cl.isHistogram:
self._classes['Histogram'][cl.shortDoc()] = cl
if cl.isTimeEvol:
self._classes['Time evolution'][cl.shortDoc()] = cl
self._name2class[cl.__name__.lower()] = cl
def types(self):
return sorted(self._types)
def classes(self, t):
assert t in self._types
return sorted(self._classes[t].keys())
def dataClass(self, t, c):
assert t in self._types
assert c in self._classes[t].keys()
return self._classes[t][c]
def names(self):
return self._name2class.keys()
def get_type_by_name(self, name):
name = name.lower()
if name not in self._name2class:
raise ValueError("%s is not a possible data type name" % (name, ))
return self._name2class[name]
| ansobolev/shs | shs/data/__init__.py | Python | mit | 1,696 | [
"SIESTA"
] | ee0c18106dbb93e53a65e211f3f329e05d7248bf5fb9a6721e01a96ba3f1c532 |
#
# Copyright (C) 2013-2019 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import unittest as ut
import numpy as np
import espressomd
# Dihedral interaction needs more rigorous tests.
# The geometry checked here is rather simple and special.
# I also found that as the dihedral angle approaches to 0, the simulation
# values deviate from the analytic values by roughly 10%.
def rotate_vector(v, k, phi):
"""Rotates vector v around unit vector k by angle phi.
Uses Rodrigues' rotation formula."""
vrot = v * np.cos(phi) + np.cross(k, v) * \
np.sin(phi) + k * np.dot(k, v) * (1.0 - np.cos(phi))
return vrot
def dihedral_potential(k, phi, n, phase):
if phi == -1:
return 0
else:
return k * (1 - np.cos(n * phi - phase))
def dihedral_force(k, n, phase, p1, p2, p3, p4):
v12 = p2 - p1
v23 = p3 - p2
v34 = p4 - p3
v12Xv23 = np.cross(v12, v23)
l_v12Xv23 = np.linalg.norm(v12Xv23)
v23Xv34 = np.cross(v23, v34)
l_v23Xv34 = np.linalg.norm(v23Xv34)
# if dihedral angle is not defined, no forces
if l_v12Xv23 <= 1e-8 or l_v23Xv34 <= 1e-8:
return 0, 0, 0
else:
cosphi = np.abs(np.dot(v12Xv23, v23Xv34)) / (l_v12Xv23 * l_v23Xv34)
phi = np.arccos(cosphi)
f1 = (v23Xv34 - cosphi * v12Xv23) / l_v12Xv23
f4 = (v12Xv23 - cosphi * v23Xv34) / l_v23Xv34
v23Xf1 = np.cross(v23, f1)
v23Xf4 = np.cross(v23, f4)
v34Xf4 = np.cross(v34, f4)
v12Xf1 = np.cross(v12, f1)
coeff = -k * n * np.sin(n * phi - phase) / np.sin(phi)
force1 = coeff * v23Xf1
force2 = coeff * (v34Xf4 - v12Xf1 - v23Xf1)
force3 = coeff * (v12Xf1 - v23Xf4 - v34Xf4)
return force1, force2, force3
class InteractionsBondedTest(ut.TestCase):
system = espressomd.System(box_l=[1.0, 1.0, 1.0])
np.random.seed(seed=42)
box_l = 10.
start_pos = [5., 5., 5.]
axis = np.array([1., 0., 0.])
axis /= np.linalg.norm(axis)
rel_pos_1 = np.array([0., 1., 0.])
rel_pos_2 = np.array([0., 0., 1.])
def setUp(self):
self.system.box_l = [self.box_l] * 3
self.system.cell_system.skin = 0.4
self.system.time_step = .1
self.system.part.add(id=0, pos=self.start_pos, type=0)
self.system.part.add(id=1, pos=self.start_pos, type=0)
self.system.part.add(id=2, pos=self.start_pos, type=0)
self.system.part.add(id=3, pos=self.start_pos, type=0)
def tearDown(self):
self.system.part.clear()
# Analytical Expression
def dihedral_angle(self, p1, p2, p3, p4):
"""
Calculate the dihedral angle phi based on particles' position p1, p2, p3, p4.
"""
v12 = p2 - p1
v23 = p3 - p2
v34 = p4 - p3
v12Xv23 = np.cross(v12, v23)
l_v12Xv23 = np.linalg.norm(v12Xv23)
v23Xv34 = np.cross(v23, v34)
l_v23Xv34 = np.linalg.norm(v23Xv34)
# if dihedral angle is not defined, phi := -1.
if l_v12Xv23 <= 1e-8 or l_v23Xv34 <= 1e-8:
return -1
else:
cosphi = np.abs(np.dot(v12Xv23, v23Xv34)) / (
l_v12Xv23 * l_v23Xv34)
return np.arccos(cosphi)
# Test Dihedral Angle
def test_dihedral(self):
dh_k = 1
dh_phase = np.pi / 6
dh_n = 1
dh = espressomd.interactions.Dihedral(
bend=dh_k, mult=dh_n, phase=dh_phase)
self.system.bonded_inter.add(dh)
self.system.part[1].add_bond((dh, 0, 2, 3))
self.system.part[2].pos = self.system.part[1].pos + [1, 0, 0]
N = 111
d_phi = np.pi / (N * 4)
for i in range(N):
self.system.part[0].pos = self.system.part[1].pos + \
rotate_vector(self.rel_pos_1, self.axis, i * d_phi)
self.system.part[3].pos = self.system.part[2].pos + \
rotate_vector(self.rel_pos_2, self.axis, -i * d_phi)
self.system.integrator.run(recalc_forces=True, steps=0)
# Calculate energies
E_sim = self.system.analysis.energy()["bonded"]
phi = self.dihedral_angle(self.system.part[0].pos,
self.system.part[1].pos,
self.system.part[2].pos,
self.system.part[3].pos)
E_ref = dihedral_potential(dh_k, phi, dh_n, dh_phase)
# Calculate forces
f2_sim = self.system.part[1].f
_, f2_ref, _ = dihedral_force(dh_k, dh_n, dh_phase,
self.system.part[0].pos,
self.system.part[1].pos,
self.system.part[2].pos,
self.system.part[3].pos)
# Check that energies match, ...
np.testing.assert_almost_equal(E_sim, E_ref)
# and has correct value.
f2_sim_copy = np.copy(f2_sim)
np.testing.assert_almost_equal(f2_sim_copy, f2_ref)
# Test Tabulated Dihedral Angle
def test_tabulated_dihedral(self):
N = 111
d_phi = 2 * np.pi / N
# tabulated values for the range [0, 2*pi]
tab_energy = [np.cos(i * d_phi) for i in range(N + 1)]
tab_force = [np.cos(i * d_phi) for i in range(N + 1)]
dihedral_tabulated = espressomd.interactions.TabulatedDihedral(
energy=tab_energy, force=tab_force)
self.system.bonded_inter.add(dihedral_tabulated)
self.system.part[1].add_bond((dihedral_tabulated, 0, 2, 3))
self.system.part[2].pos = self.system.part[1].pos + [1, 0, 0]
# check stored parameters
interaction_id = len(self.system.bonded_inter) - 1
tabulated = self.system.bonded_inter[interaction_id]
np.testing.assert_allclose(tabulated.params['force'], tab_force)
np.testing.assert_allclose(tabulated.params['energy'], tab_energy)
np.testing.assert_almost_equal(tabulated.params['min'], 0.)
np.testing.assert_almost_equal(tabulated.params['max'], 2 * np.pi)
# measure at half the angular resolution to observe interpolation
for i in range(2 * N - 1):
# increase dihedral angle by d_phi (phi ~ 0 at i = 0)
self.system.part[0].pos = self.system.part[1].pos + \
rotate_vector(self.rel_pos_1, self.axis, -i * d_phi / 4)
self.system.part[3].pos = self.system.part[2].pos + \
rotate_vector(self.rel_pos_1, self.axis, i * d_phi / 4)
self.system.integrator.run(recalc_forces=True, steps=0)
# Calculate energies
E_sim = self.system.analysis.energy()["bonded"]
# Get tabulated values
j = i // 2
if i % 2 == 0:
E_ref = tab_energy[j]
else:
E_ref = (tab_energy[j] + tab_energy[j + 1]) / 2.0
# Check that energies match, ...
np.testing.assert_almost_equal(E_sim, E_ref)
if __name__ == '__main__':
ut.main()
| KaiSzuttor/espresso | testsuite/python/interactions_dihedral.py | Python | gpl-3.0 | 7,742 | [
"ESPResSo"
] | 9e17e8de37915aabc9afa123cd7ba318e0066672e1b0a2a0d727bb5dcd549829 |
# $HeadURL: $
""" ResourceManagementClient
Client to interact with the ResourceManagementDB.
"""
from DIRAC import gLogger, S_ERROR
from DIRAC.Core.DISET.RPCClient import RPCClient
__RCSID__ = '$Id: $'
class ResourceManagementClient( object ):
"""
The :class:`ResourceManagementClient` class exposes the :mod:`DIRAC.ResourceManagement`
API. All functions you need are on this client.
It has the 'direct-db-access' functions, the ones of the type:
- insert
- update
- select
- delete
that return parts of the RSSConfiguration stored on the CS, and used everywhere
on the RSS module. Finally, and probably more interesting, it exposes a set
of functions, badly called 'boosters'. They are 'home made' functions using the
basic database functions that are interesting enough to be exposed.
The client will ALWAYS try to connect to the DB, and in case of failure, to the
XML-RPC server ( namely :class:`ResourceManagementDB` and
:class:`ResourceManagementHancler` ).
You can use this client on this way
>>> from DIRAC.ResourceManagementSystem.Client.ResourceManagementClient import ResourceManagementClient
>>> rsClient = ResourceManagementClient()
All functions calling methods exposed on the database or on the booster are
making use of some syntactic sugar, in this case a decorator that simplifies
the client considerably.
"""
def __init__( self , serviceIn = None ):
'''
The client tries to connect to :class:ResourceManagementDB by default. If it
fails, then tries to connect to the Service :class:ResourceManagementHandler.
'''
if not serviceIn:
self.gate = RPCClient( "ResourceStatus/ResourceManagement" )
else:
self.gate = serviceIn
# AccountingCache Methods ....................................................
def selectAccountingCache( self, name = None, plotType = None, plotName = None,
result = None, dateEffective = None,
lastCheckTime = None, meta = None ):
'''
Gets from PolicyResult all rows that match the parameters given.
:Parameters:
**name** - `[, string, list]`
name of an individual of the grid topology
**plotType** - `[, string, list]`
the plotType name (e.g. 'Pilot')
**plotName** - `[, string, list]`
the plot name
**result** - `[, string, list]`
command result
**dateEffective** - `[, datetime, list]`
time-stamp from which the result is effective
**lastCheckTime** - `[, datetime, list]`
time-stamp setting last time the result was checked
**meta** - `[, dict]`
meta-data for the MySQL query. It will be filled automatically with the\
`table` key and the proper table name.
:return: S_OK() || S_ERROR()
'''
# Unused argument
# pylint: disable-msg=W0613
return self._query( 'select', 'AccountingCache', locals() )
def addOrModifyAccountingCache( self, name = None, plotType = None,
plotName = None, result = None,
dateEffective = None, lastCheckTime = None,
meta = None ):
'''
Adds or updates-if-duplicated to AccountingCache. Using `name`, `plotType`
and `plotName` to query the database, decides whether to insert or update the
table.
:Parameters:
**name** - `string`
name of an individual of the grid topology
**plotType** - `string`
the plotType name (e.g. 'Pilot')
**plotName** - `string`
the plot name
**result** - `string`
command result
**dateEffective** - `datetime`
time-stamp from which the result is effective
**lastCheckTime** - `datetime`
time-stamp setting last time the result was checked
**meta** - `[, dict]`
meta-data for the MySQL query. It will be filled automatically with the\
`table` key and the proper table name.
:return: S_OK() || S_ERROR()
'''
# Unused argument
# pylint: disable-msg=W0613
meta = { 'onlyUniqueKeys' : True }
return self._query( 'addOrModify', 'AccountingCache', locals() )
# GGUSTicketsCache Methods ...................................................
#FIXME: only one method
def selectGGUSTicketsCache( self, gocSite = None, link = None, openTickets = None,
tickets = None, lastCheckTime = None, meta = None ):
# Unused argument
# pylint: disable-msg=W0613
return self._query( 'select', 'GGUSTicketsCache', locals() )
def deleteGGUSTicketsCache( self, gocSite = None, link = None, openTickets = None,
tickets = None, lastCheckTime = None, meta = None ):
# Unused argument
# pylint: disable-msg=W0613
return self._query( 'delete', 'GGUSTicketsCache', locals() )
def addOrModifyGGUSTicketsCache( self, gocSite = None, link = None,
openTickets = None, tickets = None,
lastCheckTime = None, meta = None ):
# Unused argument
# pylint: disable-msg=W0613
meta = { 'onlyUniqueKeys' : True }
return self._query( 'addOrModify', 'GGUSTicketsCache', locals() )
# DowntimeCache Methods ......................................................
def selectDowntimeCache( self, downtimeID = None, element = None, name = None,
startDate = None, endDate = None, severity = None,
description = None, link = None, dateEffective = None,
lastCheckTime = None, gocdbServiceType = None, meta = None ):
'''
Gets from DowntimeCache all rows that match the parameters given.
:Parameters:
**downtimeID** - [, `string`, `list`]
unique id for the downtime
**element** - [, `string`, `list`]
valid element in the topology ( Site, Resource, Node )
**name** - [, `string`, `list`]
name of the element where the downtime applies
**startDate** - [, `datetime`, `list`]
starting time for the downtime
**endDate** - [, `datetime`, `list`]
ending time for the downtime
**severity** - [, `string`, `list`]
severity assigned by the gocdb
**description** - [, `string`, `list`]
brief description of the downtime
**link** - [, `string`, `list`]
url to the details
**dateEffective** - [, `datetime`, `list`]
time when the entry was created in this database
**lastCheckTime** - [, `datetime`, `list`]
time-stamp setting last time the result was checked
**gocdbServiceType** - `string`
service type assigned by gocdb
**meta** - `[, dict]`
meta-data for the MySQL query. It will be filled automatically with the\
`table` key and the proper table name.
:return: S_OK() || S_ERROR()
'''
# Unused argument
# pylint: disable-msg=W0613
return self._query( 'select', 'DowntimeCache', locals() )
def deleteDowntimeCache( self, downtimeID = None, element = None, name = None,
startDate = None, endDate = None, severity = None,
description = None, link = None, dateEffective = None,
lastCheckTime = None, gocdbServiceType = None, meta = None ):
'''
Deletes from DowntimeCache all rows that match the parameters given.
:Parameters:
**downtimeID** - [, `string`, `list`]
unique id for the downtime
**element** - [, `string`, `list`]
valid element in the topology ( Site, Resource, Node )
**name** - [, `string`, `list`]
name of the element where the downtime applies
**startDate** - [, `datetime`, `list`]
starting time for the downtime
**endDate** - [, `datetime`, `list`]
ending time for the downtime
**severity** - [, `string`, `list`]
severity assigned by the gocdb
**description** - [, `string`, `list`]
brief description of the downtime
**link** - [, `string`, `list`]
url to the details
**dateEffective** - [, `datetime`, `list`]
time when the entry was created in this database
**lastCheckTime** - [, `datetime`, `list`]
time-stamp setting last time the result was checked
**gocdbServiceType** - `string`
service type assigned by gocdb
**meta** - `[, dict]`
meta-data for the MySQL query. It will be filled automatically with the\
`table` key and the proper table name.
:return: S_OK() || S_ERROR()
'''
# Unused argument
# pylint: disable-msg=W0613
return self._query( 'delete', 'DowntimeCache', locals() )
def addOrModifyDowntimeCache( self, downtimeID = None, element = None, name = None,
startDate = None, endDate = None, severity = None,
description = None, link = None, dateEffective = None,
lastCheckTime = None, gocdbServiceType = None, meta = None ):
'''
Adds or updates-if-duplicated to DowntimeCache. Using `downtimeID` to query
the database, decides whether to insert or update the table.
:Parameters:
**downtimeID** - `string`
unique id for the downtime
**element** - `string`
valid element in the topology ( Site, Resource, Node )
**name** - `string`
name of the element where the downtime applies
**startDate** - `datetime`
starting time for the downtime
**endDate** - `datetime`
ending time for the downtime
**severity** - `string`
severity assigned by the gocdb
**description** - `string`
brief description of the downtime
**link** - `string`
url to the details
**dateEffective** - `datetime`
time when the entry was created in this database
**lastCheckTime** - `datetime`
time-stamp setting last time the result was checked
**gocdbServiceType** - `string`
service type assigned by gocdb
**meta** - `[, dict]`
meta-data for the MySQL query. It will be filled automatically with the\
`table` key and the proper table name.
:return: S_OK() || S_ERROR()
'''
# Unused argument
# pylint: disable-msg=W0613
meta = { 'onlyUniqueKeys' : True }
return self._query( 'addOrModify', 'DowntimeCache', locals() )
# JobCache Methods ...........................................................
def selectJobCache( self, site = None, maskStatus = None, efficiency = None,
status = None, lastCheckTime = None, meta = None ):
'''
Gets from JobCache all rows that match the parameters given.
:Parameters:
**site** - `[, string, list ]`
name of the site element
**maskStatus** - `[, string, list ]`
maskStatus for the site
**efficiency** - `[, float, list ]`
job efficiency ( successful / total )
**status** - `[, string, list ]`
status for the site computed
**lastCheckTime** - `[, datetime, list ]`
measure calculated
**meta** - `[, dict]`
meta-data for the MySQL query. It will be filled automatically with the\
`table` key and the proper table name.
:return: S_OK() || S_ERROR()
'''
# Unused argument
# pylint: disable-msg=W0613
return self._query( 'select', 'JobCache', locals() )
def deleteJobCache( self, site = None, maskStatus = None, efficiency = None,
status = None, lastCheckTime = None, meta = None ):
'''
Deletes from JobCache all rows that match the parameters given.
:Parameters:
**site** - `[, string, list ]`
name of the site element
**maskStatus** - `[, string, list ]`
maskStatus for the site
**efficiency** - `[, float, list ]`
job efficiency ( successful / total )
**status** - `[, string, list ]`
status for the site computed
**lastCheckTime** - `[, datetime, list ]`
measure calculated
**meta** - `[, dict]`
meta-data for the MySQL query. It will be filled automatically with the\
`table` key and the proper table name.
:return: S_OK() || S_ERROR()
'''
# Unused argument
# pylint: disable-msg=W0613
return self._query( 'delete', 'JobCache', locals() )
def addOrModifyJobCache( self, site = None, maskStatus = None, efficiency = None,
status = None, lastCheckTime = None, meta = None ):
'''
Adds or updates-if-duplicated to JobCache. Using `site` to query
the database, decides whether to insert or update the table.
:Parameters:
**site** - `[, string, list ]`
name of the site element
**maskStatus** - `[, string, list ]`
maskStatus for the site
**efficiency** - `[, float, list ]`
job efficiency ( successful / total )
**status** - `[, string, list ]`
status for the site computed
**lastCheckTime** - `[, datetime, list ]`
measure calculated
**meta** - `[, dict]`
meta-data for the MySQL query. It will be filled automatically with the\
`table` key and the proper table name.
:return: S_OK() || S_ERROR()
'''
# Unused argument
# pylint: disable-msg=W0613
meta = { 'onlyUniqueKeys' : True }
return self._query( 'addOrModify', 'JobCache', locals() )
# TransferCache Methods ......................................................
def selectTransferCache( self, sourceName = None, destinationName = None, metric = None,
value = None, lastCheckTime = None, meta = None ):
'''
# Gets from TransferCache all rows that match the parameters given.
#
# :Parameters:
# **elementName** - `[, string, list ]`
# name of the element
# **direction** - `[, string, list ]`
# the element taken as Source or Destination of the transfer
# **metric** - `[, string, list ]`
# measured quality of failed transfers
# **value** - `[, float, list ]`
# percentage
# **lastCheckTime** - `[, float, list ]`
# time-stamp setting last time the result was checked
# **meta** - `[, dict]`
# meta-data for the MySQL query. It will be filled automatically with the\
# `table` key and the proper table name.
#
# :return: S_OK() || S_ERROR()
'''
# Unused argument
# pylint: disable-msg=W0613
return self._query( 'select', 'TransferCache', locals() )
def deleteTransferCache( self, sourceName = None, destinationName = None, metric = None,
value = None, lastCheckTime = None, meta = None ):
'''
# Deletes from TransferCache all rows that match the parameters given.
#
# :Parameters:
# **elementName** - `[, string, list ]`
# name of the element
# **direction** - `[, string, list ]`
# the element taken as Source or Destination of the transfer
# **metric** - `[, string, list ]`
# measured quality of failed transfers
# **value** - `[, float, list ]`
# percentage
# **lastCheckTime** - `[, float, list ]`
# time-stamp setting last time the result was checked
# **meta** - `[, dict]`
# meta-data for the MySQL query. It will be filled automatically with the\
# `table` key and the proper table name.
#
# :return: S_OK() || S_ERROR()
'''
# Unused argument
# pylint: disable-msg=W0613
return self._query( 'delete', 'TransferCache', locals() )
def addOrModifyTransferCache( self, sourceName = None, destinationName = None,
metric = None, value = None, lastCheckTime = None,
meta = None ):
'''
# Adds or updates-if-duplicated to TransferCache. Using `elementName`, `direction`
# and `metric` to query the database, decides whether to insert or update the table.
#
# :Parameters:
# **elementName** - `string`
# name of the element
# **direction** - `string`
# the element taken as Source or Destination of the transfer
# **metric** - `string`
# measured quality of failed transfers
# **value** - `float`
# percentage
# **lastCheckTime** - `datetime`
# time-stamp setting last time the result was checked
# **meta** - `[, dict]`
# meta-data for the MySQL query. It will be filled automatically with the\
# `table` key and the proper table name.
#
# :return: S_OK() || S_ERROR()
'''
# Unused argument
# pylint: disable-msg=W0613
meta = { 'onlyUniqueKeys' : True }
return self._query( 'addOrModify', 'TransferCache', locals() )
# PilotCache Methods .........................................................
def selectPilotCache( self, site = None, cE = None, pilotsPerJob = None,
pilotJobEff = None, status = None, lastCheckTime = None,
meta = None ):
'''
Gets from TransferCache all rows that match the parameters given.
:Parameters:
**site** - `[, string, list ]`
name of the site
**cE** - `[, string, list ]`
name of the CE of 'Multiple' if all site CEs are considered
**pilotsPerJob** - `[, float, list ]`
measure calculated
**pilotJobEff** - `[, float, list ]`
percentage
**status** - `[, float, list ]`
status of the CE / Site
**lastCheckTime** - `[, datetime, list ]`
measure calculated
**meta** - `[, dict]`
meta-data for the MySQL query. It will be filled automatically with the\
`table` key and the proper table name.
:return: S_OK() || S_ERROR()
'''
# Unused argument
# pylint: disable=W0613
return self._query( 'select', 'PilotCache', locals() )
def deletePilotCache( self, site = None, cE = None, pilotsPerJob = None,
pilotJobEff = None, status = None, lastCheckTime = None,
meta = None ):
'''
Deletes from TransferCache all rows that match the parameters given.
:Parameters:
**site** - `[, string, list ]`
name of the site
**cE** - `[, string, list ]`
name of the CE of 'Multiple' if all site CEs are considered
**pilotsPerJob** - `[, float, list ]`
measure calculated
**pilotJobEff** - `[, float, list ]`
percentage
**status** - `[, float, list ]`
status of the CE / Site
**lastCheckTime** - `[, datetime, list ]`
measure calculated
**meta** - `[, dict]`
meta-data for the MySQL query. It will be filled automatically with the\
`table` key and the proper table name.
:return: S_OK() || S_ERROR()
'''
# Unused argument
# pylint: disable=W0613
return self._query( 'delete', 'PilotCache', locals() )
def addOrModifyPilotCache( self, site = None, cE = None, pilotsPerJob = None,
pilotJobEff = None, status = None, lastCheckTime = None,
meta = None ):
'''
Adds or updates-if-duplicated to PilotCache. Using `cE` and `timespan`
to query the database, decides whether to insert or update the table.
:Parameters:
**site** - `string`
name of the site
**cE** - `string`
name of the CE of 'Multiple' if all site CEs are considered
**pilotsPerJob** - `float`
measure calculated
**pilotJobEff** - `float`
percentage
**status** - `string`
status of the CE / Site
**lastCheckTime** - `datetime`
measure calculated
**meta** - `[, dict]`
meta-data for the MySQL query. It will be filled automatically with the\
`table` key and the proper table name.
:return: S_OK() || S_ERROR()
'''
# Unused argument
# pylint: disable=W0613
meta = { 'onlyUniqueKeys' : True }
return self._query( 'addOrModify', 'PilotCache', locals() )
# PolicyResult Methods .......................................................
def selectPolicyResult( self, element = None, name = None, policyName = None,
statusType = None, status = None, reason = None,
lastCheckTime = None, meta = None ):
'''
Gets from PolicyResult all rows that match the parameters given.
:Parameters:
**granularity** - `[, string, list]`
it has to be a valid element ( ValidElement ), any of the defaults: `Site` \
| `Service` | `Resource` | `StorageElement`
**name** - `[, string, list]`
name of the element
**policyName** - `[, string, list]`
name of the policy
**statusType** - `[, string, list]`
it has to be a valid status type for the given granularity
**status** - `[, string, list]`
it has to be a valid status, any of the defaults: `Active` | `Degraded` | \
`Probing` | `Banned`
**reason** - `[, string, list]`
decision that triggered the assigned status
**lastCheckTime** - `[, datetime, list]`
time-stamp setting last time the policy result was checked
**meta** - `[, dict]`
meta-data for the MySQL query. It will be filled automatically with the\
`table` key and the proper table name.
:return: S_OK() || S_ERROR()
'''
# Unused argument
# pylint: disable=W0613
return self._query( 'select', 'PolicyResult', locals() )
def deletePolicyResult( self, element = None, name = None,
policyName = None, statusType = None, status = None,
reason = None, lastCheckTime = None, meta = None ):
'''
Deletes from PolicyResult all rows that match the parameters given.
:Parameters:
**granularity** - `[, string, list]`
it has to be a valid element ( ValidElement ), any of the defaults: `Site` \
| `Service` | `Resource` | `StorageElement`
**name** - `[, string, list]`
name of the element
**policyName** - `[, string, list]`
name of the policy
**statusType** - `[, string, list]`
it has to be a valid status type for the given granularity
**status** - `[, string, list]`
it has to be a valid status, any of the defaults: `Active` | `Degraded` | \
`Probing` | `Banned`
**reason** - `[, string, list]`
decision that triggered the assigned status
**lastCheckTime** - `[, datetime, list]`
time-stamp setting last time the policy result was checked
**meta** - `[, dict]`
meta-data for the MySQL query. It will be filled automatically with the\
`table` key and the proper table name.
:return: S_OK() || S_ERROR()
'''
# Unused argument
# pylint: disable=W0613
return self._query( 'delete', 'PolicyResult', locals() )
def addOrModifyPolicyResult( self, element = None, name = None,
policyName = None, statusType = None,
status = None, reason = None, dateEffective = None,
lastCheckTime = None, meta = None ):
'''
Adds or updates-if-duplicated to PolicyResult. Using `name`, `policyName` and
`statusType` to query the database, decides whether to insert or update the table.
:Parameters:
**element** - `string`
it has to be a valid element ( ValidElement ), any of the defaults: `Site` \
| `Service` | `Resource` | `StorageElement`
**name** - `string`
name of the element
**policyName** - `string`
name of the policy
**statusType** - `string`
it has to be a valid status type for the given element
**status** - `string`
it has to be a valid status, any of the defaults: `Active` | `Degraded` | \
`Probing` | `Banned`
**reason** - `string`
decision that triggered the assigned status
**dateEffective** - `datetime`
time-stamp from which the policy result is effective
**lastCheckTime** - `datetime`
time-stamp setting last time the policy result was checked
**meta** - `[, dict]`
meta-data for the MySQL query. It will be filled automatically with the\
`table` key and the proper table name.
:return: S_OK() || S_ERROR()
'''
# Unused argument
# pylint: disable=W0613
meta = { 'onlyUniqueKeys' : True }
return self._query( 'addOrModify', 'PolicyResult', locals() )
# PolicyResultLog Methods ....................................................
def selectPolicyResultLog( self, element = None, name = None,
policyName = None, statusType = None, status = None,
reason = None, lastCheckTime = None, meta = None ):
'''
Gets from PolicyResultLog all rows that match the parameters given.
:Parameters:
**element** - `[, string, list]`
it has to be a valid element ( ValidRes ), any of the defaults: `Site` \
| `Service` | `Resource` | `StorageElement`
**name** - `[, string, list]`
name of the element
**policyName** - `[, string, list]`
name of the policy
**statusType** - `[, string, list]`
it has to be a valid status type for the given element
**status** - `[, string, list]`
it has to be a valid status, any of the defaults: `Active` | `Degraded` | \
`Probing` | `Banned`
**reason** - `[, string, list]`
decision that triggered the assigned status
**lastCheckTime** - `[, datetime, list]`
time-stamp setting last time the policy result was checked
**meta** - `[, dict]`
meta-data for the MySQL query. It will be filled automatically with the\
`table` key and the proper table name.
:return: S_OK() || S_ERROR()
'''
# Unused argument
# pylint: disable=W0613
return self._query( 'select', 'PolicyResultLog', locals() )
def deletePolicyResultLog( self, element = None, name = None,
policyName = None, statusType = None, status = None,
reason = None, lastCheckTime = None, meta = None ):
'''
Deletes from PolicyResult all rows that match the parameters given.
:Parameters:
**element** - `[, string, list]`
it has to be a valid element ( ValidRes ), any of the defaults: `Site` \
| `Service` | `Resource` | `StorageElement`
**name** - `[, string, list]`
name of the element
**policyName** - `[, string, list]`
name of the policy
**statusType** - `[, string, list]`
it has to be a valid status type for the given element
**status** - `[, string, list]`
it has to be a valid status, any of the defaults: `Active` | `Degraded` | \
`Probing` | `Banned`
**reason** - `[, string, list]`
decision that triggered the assigned status
**lastCheckTime** - `[, datetime, list]`
time-stamp setting last time the policy result was checked
**meta** - `[, dict]`
meta-data for the MySQL query. It will be filled automatically with the\
`table` key and the proper table name.
:return: S_OK() || S_ERROR()
'''
# Unused argument
# pylint: disable=W0613
return self._query( 'delete', 'PolicyResultLog', locals() )
def addOrModifyPolicyResultLog( self, element = None, name = None,
policyName = None, statusType = None,
status = None, reason = None, lastCheckTime = None,
meta = None ):
'''
Adds or updates-if-duplicated to PolicyResultLog. Using `name`, `policyName`,
'statusType` to query the database, decides whether to insert or update the table.
:Parameters:
**element** - `string`
it has to be a valid element ( ValidRes ), any of the defaults: `Site` \
| `Service` | `Resource` | `StorageElement`
**name** - `string`
name of the element
**policyName** - `string`
name of the policy
**statusType** - `string`
it has to be a valid status type for the given element
**status** - `string`
it has to be a valid status, any of the defaults: `Active` | `Degraded` | \
`Probing` | `Banned`
**reason** - `string`
decision that triggered the assigned status
**lastCheckTime** - `datetime`
time-stamp setting last time the policy result was checked
**meta** - `[, dict]`
meta-data for the MySQL query. It will be filled automatically with the\
`table` key and the proper table name.
:return: S_OK() || S_ERROR()
'''
# Unused argument
# pylint: disable=W0613
meta = { 'onlyUniqueKeys' : True }
return self._query( 'addOrModify', 'PolicyResultLog', locals() )
# SpaceTokenOccupancyCache Methods ...........................................
def selectSpaceTokenOccupancyCache( self, endpoint = None, token = None,
total = None, guaranteed = None, free = None,
lastCheckTime = None, meta = None ):
'''
Gets from SpaceTokenOccupancyCache all rows that match the parameters given.
:Parameters:
**endpoint** - `[, string, list]`
srm endpoint
**token** - `[, string, list]`
name of the token
**total** - `[, integer, list]`
total terabytes
**guaranteed** - `[, integer, list]`
guaranteed terabytes
**free** - `[, integer, list]`
free terabytes
**lastCheckTime** - `[, datetime, list]`
time-stamp from which the result is effective
**meta** - `[, dict]`
meta-data for the MySQL query. It will be filled automatically with the\
`table` key and the proper table name.
:return: S_OK() || S_ERROR()
'''
# Unused argument
# pylint: disable=W0613
return self._query( 'select', 'SpaceTokenOccupancyCache', locals() )
def deleteSpaceTokenOccupancyCache( self, endpoint = None, token = None,
total = None, guaranteed = None, free = None,
lastCheckTime = None, meta = None ):
'''
Deletes from SpaceTokenOccupancyCache all rows that match the parameters given.
:Parameters:
**endpoint** - `[, string, list]`
srm endpoint
**token** - `[, string, list]`
name of the token
**total** - `[, integer, list]`
total terabytes
**guaranteed** - `[, integer, list]`
guaranteed terabytes
**free** - `[, integer, list]`
free terabytes
**lastCheckTime** - `[, datetime, list]`
time-stamp from which the result is effective
**meta** - `[, dict]`
meta-data for the MySQL query. It will be filled automatically with the\
`table` key and the proper table name.
:return: S_OK() || S_ERROR()
'''
# Unused argument
# pylint: disable-msg=W0613
return self._query( 'delete', 'SpaceTokenOccupancyCache', locals() )
def addOrModifySpaceTokenOccupancyCache( self, endpoint = None, token = None,
total = None, guaranteed = None,
free = None, lastCheckTime = None,
meta = None ):
'''
Adds or updates-if-duplicated to SpaceTokenOccupancyCache. Using `site` and `token`
to query the database, decides whether to insert or update the table.
:Parameters:
**endpoint** - `[, string, list]`
srm endpoint
**token** - `string`
name of the token
**total** - `integer`
total terabytes
**guaranteed** - `integer`
guaranteed terabytes
**free** - `integer`
free terabytes
**lastCheckTime** - `datetime`
time-stamp from which the result is effective
**meta** - `[, dict]`
meta-data for the MySQL query. It will be filled automatically with the\
`table` key and the proper table name.
:return: S_OK() || S_ERROR()
'''
# Unused argument
# pylint: disable=W0613
meta = { 'onlyUniqueKeys' : True }
return self._query( 'addOrModify', 'SpaceTokenOccupancyCache', locals() )
# UserRegistryCache Methods ..................................................
def selectUserRegistryCache( self, login = None, name = None, email = None,
lastCheckTime = None, meta = None ):
'''
Gets from UserRegistryCache all rows that match the parameters given.
:Parameters:
**login** - `[, string, list]`
user's login ID
**name** - `[, string, list]`
user's name
**email** - `[, string, list]`
user's email
**lastCheckTime** - `[, datetime, list]`
time-stamp from which the result is effective
**meta** - `[, dict]`
meta-data for the MySQL query. It will be filled automatically with the\
`table` key and the proper table name.
:return: S_OK() || S_ERROR()
'''
# Unused argument
# pylint: disable=W0613
return self._query( 'select', 'UserRegistryCache', locals() )
def deleteUserRegistryCache( self, login = None, name = None, email = None,
lastCheckTime = None, meta = None ):
'''
Deletes from UserRegistryCache all rows that match the parameters given.
:Parameters:
**login** - `[, string, list]`
user's login ID
**name** - `[, string, list]`
user's name
**email** - `[, string, list]`
user's email
**lastCheckTime** - `[, datetime, list]`
time-stamp from which the result is effective
**meta** - `[, dict]`
meta-data for the MySQL query. It will be filled automatically with the\
`table` key and the proper table name.
:return: S_OK() || S_ERROR()
'''
# Unused argument
# pylint: disable=W0613
return self._query( 'delete', 'UserRegistryCache', locals() )
def addOrModifyUserRegistryCache( self, login = None, name = None,
email = None, lastCheckTime = None, meta = None ):
'''
Adds or updates-if-duplicated to UserRegistryCache. Using `login` to query
the database, decides whether to insert or update the table.
:Parameters:
**login** - `string`
user's login ID
**name** - `string`
user's name
**email** - `string`
user's email
**lastCheckTime** - `datetime`
time-stamp from which the result is effective
**meta** - `[, dict]`
meta-data for the MySQL query. It will be filled automatically with the\
`table` key and the proper table name.
:return: S_OK() || S_ERROR()
'''
# Unused argument
# pylint: disable=W0613
meta = { 'onlyUniqueKeys' : True }
return self._query( 'addOrModify', 'UserRegistryCache', locals() )
# VOBOXCache Methods ........................................................
def selectVOBOXCache( self, site = None, system = None, serviceUp = None,
machineUp = None, lastCheckTime = None, meta = None ):
'''
Gets from VOBOXCache all rows that match the parameters given.
:Parameters:
**site** - `[, string, list ]`
name of the site hosting the VOBOX
**system** - `[, string, list ]`
DIRAC system ( e.g. ConfigurationService )
**serviceUp** - `[, integer, list]`
seconds the system has been up
**machineUp** - `[, integer, list]`
seconds the machine has been up
**lastCheckTime** - `[, datetime, list]`
time-stamp from which the result is effective
**meta** - `[, dict]`
meta-data for the MySQL query. It will be filled automatically with the\
`table` key and the proper table name.
:return: S_OK() || S_ERROR()
'''
# Unused argument
# pylint: disable=W0613
return self._query( 'select', 'VOBOXCache', locals() )
def deleteVOBOXCache( self, site = None, system = None, serviceUp = None,
machineUp = None, lastCheckTime = None, meta = None ):
'''
Deletes from VOBOXCache all rows that match the parameters given.
:Parameters:
**site** - `[, string, list ]`
name of the site hosting the VOBOX
**system** - `[, string, list ]`
DIRAC system ( e.g. ConfigurationService )
**serviceUp** - `[, integer, list]`
seconds the system has been up
**machineUp** - `[, integer, list]`
seconds the machine has been up
**lastCheckTime** - `[, datetime, list]`
time-stamp from which the result is effective
**meta** - `[, dict]`
meta-data for the MySQL query. It will be filled automatically with the\
`table` key and the proper table name.
:return: S_OK() || S_ERROR()
'''
# Unused argument
# pylint: disable-msg=W0613
return self._query( 'delete', 'VOBOXCache', locals() )
def addOrModifyVOBOXCache( self, site = None, system = None, serviceUp = None,
machineUp = None, lastCheckTime = None, meta = None ):
'''
Adds or updates-if-duplicated to VOBOXCache. Using `site` and `system` to query
the database, decides whether to insert or update the table.
:Parameters:
**site** - `string`
name of the site hosting the VOBOX
**system** - `string`
DIRAC system ( e.g. ConfigurationService )
**serviceUp** - `integer`
seconds the system has been up
**machineUp** - `integer`
seconds the machine has been up
**lastCheckTime** - `datetime`
time-stamp from which the result is effective
**meta** - `[, dict]`
meta-data for the MySQL query. It will be filled automatically with the\
`table` key and the proper table name.
:return: S_OK() || S_ERROR()
'''
# Unused argument
# pylint: disable=W0613
meta = { 'onlyUniqueKeys' : True }
return self._query( 'addOrModify', 'VOBOXCache', locals() )
# ErrorReportBuffer Methods ..................................................
def insertErrorReportBuffer( self, name = None, elementType = None, reporter = None,
errorMessage = None, operation = None, arguments = None,
dateEffective = None, meta = None ):
# Unused argument
# pylint: disable-msg=W0613
return self._query( 'insert', 'ErrorReportBuffer', locals() )
def selectErrorReportBuffer( self, name = None, elementType = None, reporter = None,
errorMessage = None, operation = None, arguments = None,
dateEffective = None, meta = None ):
# Unused argument
# pylint: disable-msg=W0613
return self._query( 'select', 'ErrorReportBuffer', locals() )
def deleteErrorReportBuffer( self, name = None, elementType = None, reporter = None,
errorMessage = None, operation = None, arguments = None,
dateEffective = None, meta = None ):
# Unused argument
# pylint: disable-msg=W0613
return self._query( 'delete', 'ErrorReportBuffer', locals() )
# Protected methods ..........................................................
def _query( self, queryType, tableName, parameters ):
'''
It is a simple helper, this way inheriting classes can use it.
'''
return self.__query( queryType, tableName, parameters )
def __query( self, queryType, tableName, parameters ):
'''
This method is a rather important one. It will format the input for the DB
queries, instead of doing it on a decorator. Two dictionaries must be passed
to the DB. First one contains 'columnName' : value pairs, being the key
lower camel case. The second one must have, at lease, a key named 'table'
with the right table name.
'''
# Functions we can call, just a light safety measure.
_gateFunctions = [ 'insert', 'update', 'select', 'delete', 'addOrModify', 'addIfNotThere' ]
if not queryType in _gateFunctions:
return S_ERROR( '"%s" is not a proper gate call' % queryType )
gateFunction = getattr( self.gate, queryType )
# If meta is None, we set it to {}
meta = ( True and parameters.pop( 'meta' ) ) or {}
# params = parameters
# Remove self, added by locals()
del parameters[ 'self' ]
meta[ 'table' ] = tableName
gLogger.debug( 'Calling %s, with \n params %s \n meta %s' % ( queryType, parameters, meta ) )
return gateFunction( parameters, meta )
#...............................................................................
#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF
| Sbalbp/DIRAC | ResourceStatusSystem/Client/ResourceManagementClient.py | Python | gpl-3.0 | 41,110 | [
"DIRAC"
] | 827d5c9546966c6b0d9c7e70576d54aa40385c399e5b0feedd4c7231fc1958f4 |
# modified mexican hat wavelet test.py
# spectral analysis for RADAR and WRF patterns
import os, shutil
import time
import pickle
import numpy as np
from scipy import signal, ndimage
import matplotlib.pyplot as plt
from armor import defaultParameters as dp
from armor import pattern
from armor import objects4 as ob
#from armor import misc as ms
dbz = pattern.DBZ
testScriptsFolder = dp.root + 'python/armor/tests/'
testName = "modifiedMexicanHatTest8"
timeString = str(int(time.time()))
outputFolder = dp.root + 'labLogs/%d-%d-%d-%s/' % \
(time.localtime().tm_year, time.localtime().tm_mon, time.localtime().tm_mday, testName)
if not os.path.exists(outputFolder):
os.makedirs(outputFolder)
shutil.copyfile(testScriptsFolder+testName+".py", outputFolder+ timeString + testName+".py")
kongreywrf = ob.kongreywrf
kongreywrf.fix()
kongrey = ob.kongrey
monsoon = ob.monsoon
monsoon.list= [v for v in monsoon.list if '20120612' in v.dataTime]
march2014 = ob.march2014
march2014wrf11 = ob.march2014wrf11
march2014wrf12 = ob.march2014wrf12
sigmas = [1, 2, 4, 5, 8 ,10 ,16, 20, 32, 40, 64, 80, 128, 160, 256, 320,]
march2014.list = [v for v in march2014 if v.dataTime>"20140312.1330"] #hack 2014-05-05
dbzstreams = [march2014]
summaryFile = open(outputFolder + timeString + "summary.txt", 'a')
for ds in dbzstreams:
summaryFile.write("\n===============================================================\n\n\n")
streamMean = 0.
dbzCount = 0
for a in ds:
print "-------------------------------------------------"
print testName
print
print a.name
a.load()
a.setThreshold(0)
a.saveImage(imagePath=outputFolder+a.name+".png")
L = []
a.responseImages = [] #2014-05-02
#for sigma in [1, 2, 4, 8 ,16, 32, 64, 128, 256, 512]:
for sigma in sigmas:
print "sigma:", sigma
a.load()
a.setThreshold(0)
arr0 = a.matrix
#arr1 = signal.convolve2d(arr0, mask_i, mode='same', boundary='fill')
arr1 = ndimage.filters.gaussian_laplace(arr0, sigma=sigma, mode="constant", cval=0.0) * sigma**2 #2014-04-29
a1 = dbz(matrix=arr1.real, name=a.name + "_" + testName + "_sigma" + str(sigma))
L.append({ 'sigma' : sigma,
'a1' : a1,
'abssum1': abs(a1.matrix).sum(),
'sum1' : a1.matrix.sum(),
})
print "abs sum", abs(a1.matrix.sum())
#a1.show()
#a2.show()
plt.close()
a1.histogram(display=False, outputPath=outputFolder+a1.name+"_histogram.png")
###############################################################################
# computing the spectrum, i.e. sigma for which the LOG has max response
# 2014-05-02
a.responseImages.append({'sigma' : sigma,
'matrix' : arr1 * sigma**2,
})
a_LOGspec = a.copy()
a_LOGspec.name= a.name + "Laplacian-of-Gaussian spectrum"
a_LOGspec.imagePath=outputFolder+a.name+"_LOGspec.png"
a_LOGspec.outputPath = outputFolder+a.name+"_LOGspec.dat"
a_LOGspec.cmap = 'jet'
a.responseImages = np.dstack([v['matrix'] for v in a.responseImages])
print 'shape:', a.responseImages.shape #debug
a.responseMax = a.responseImages.max(axis=2) # the deepest dimension
a_LOGspec.matrix = np.zeros(a.matrix.shape)
for count, sigma in enumerate(sigmas):
a_LOGspec.matrix += sigma * (a.responseMax == a.responseImages[:,:,count])
a_LOGspec.vmin = a_LOGspec.matrix.min()
a_LOGspec.vmax = a_LOGspec.matrix.max()
a_LOGspec.saveImage()
a_LOGspec.saveMatrix()
a_LOGspec.histogram(display=False, outputPath=outputFolder+a1.name+"_LOGspec_histogram.png") #2014-05-05
# end computing the sigma for which the LOG has max response
# 2014-05-02
##############################################################################
#pickle.dump(L, open(outputFolder+ a.name +'_test_results.pydump','w')) # no need to dump if test is easy
x = [v['sigma'] for v in L]
y1 = [v['abssum1'] for v in L]
plt.close()
plt.plot(x,y1)
plt.title(a1.name+ '\n absolute values against sigma')
plt.savefig(outputFolder+a1.name+"-spectrum-histogram.png")
plt.close()
# now update the mean
streamMeanUpdate = np.array([v['abssum1'] for v in L])
dbzCount += 1
streamMean = 1.* ((streamMean*(dbzCount -1)) + streamMeanUpdate ) / dbzCount
sigmas =[v['sigma'] for v in L]
print "Stream Count and Mean so far:", dbzCount, streamMean
# now save the mean and the plot
summaryText = '\n---------------------------------------\n'
summaryText += str(int(time.time())) + '\n'
summaryText += "dbzStream Name:" + ds.name + '\n'
summaryText += "dbzCount:\t" + str(dbzCount) + '\n'
summaryText +="sigma:\t\t" + str(sigmas) + '\n'
summaryText += "streamMean:\t" + str(streamMean.tolist()) +'\n'
print summaryText
print "saving..."
# release the memory
a.matrix = np.array([0])
summaryFile.write(summaryText)
plt.close()
plt.plot(sigmas, streamMean)
plt.title(ds.name + '- average laplacian-of-gaussian spectrum for ' +str(dbzCount) + ' DBZ patterns')
plt.savefig(outputFolder + ds.name + "_average_LoG_spectrum.png")
plt.close()
summaryFile.close()
| yaukwankiu/armor | tests/modifiedMexicanHatTest8.py | Python | cc0-1.0 | 5,809 | [
"Gaussian"
] | cb8cdff0f9023c1bfcc787b1402653229d386042600f62ff53850d01ab71940f |
# -*- coding: utf-8 -*-
## Sid Meier's Civilization 4
## Copyright Firaxis Games 2005
#import string
#from time import time
#from copy import deepcopy
from threading import Timer
import re
from CvPythonExtensions import *
import CvUtil
from CvWBDesc import CvPlayerDesc
import Popup as PyPopup
import ScreenInput
import CvScreenEnums
import CvPediaScreen # base class
import CvScreensInterface
# For overriding
import CvGameInterface
import CvGameUtils
import CvEventInterface
import ModUpdater
# Delay of first drawing call of mod window after startup
# Values < 0 disable delayed drawing.
DELAYED_MS = 5000
# globals
gc = CyGlobalContext()
ArtFileMgr = CyArtFileMgr()
localText = CyTranslator()
"""
Add all required changes to Civ4 classes/modules
"""
def integrate():
# Attention, shifting this definition from CvScreenEnums into
# integrate() could lead to application crashs. Always define value
# directly in CvScreenEnums.
#CvScreenEnums.MODUPDATER_SCREEN = 2000
CvScreensInterface.modUpdaterScreen = CvModUpdaterScreen()
def _showModUpdaterScreen():
if CyGame().getActivePlayer() == -1 and not CyGame().isPitbossHost():
CvScreensInterface.modUpdaterScreen.showScreen(True)
def _pediaShowHistorical(argsList):
# Switch between Pedia and ModUpdater screen
if argsList[0] >= CvScreensInterface.modUpdaterScreen.ID_OFFSET:
val1 = argsList[0] - CvScreensInterface.modUpdaterScreen.ID_OFFSET
val2 = argsList[1]
# Just return here because event is already handled
return
# deprecated
# CvScreensInterface.modUpdaterScreen.handleClick(val1, val2)
else:
iEntryId = CvScreensInterface.pediaMainScreen.pediaHistorical.getIdFromEntryInfo(argsList[0], argsList[1])
CvScreensInterface.pediaMainScreen.pediaJump(CvScreenEnums.PEDIA_HISTORY, iEntryId, True)
return
CvScreensInterface.showModUpdaterScreen = _showModUpdaterScreen
CvScreensInterface.pediaShowHistorical = _pediaShowHistorical
CvScreensInterface.HandleInputMap[CvScreenEnums.MODUPDATER_SCREEN] = \
CvScreensInterface.modUpdaterScreen
CvScreensInterface.HandleNavigationMap[CvScreenEnums.MODUPDATER_SCREEN] = \
CvScreensInterface.modUpdaterScreen
def _delayedPythonCall(argsList):
return CvGameInterface.gameUtils().delayedPythonCall(argsList)
def _delayedPythonCallUtil(_self, argsList):
iArg1, iArg2 = argsList
#print("delayedPythonCall triggerd with %i %i" % (iArg1, iArg2))
if iArg1 == 1 and iArg2 == 0:
# To avoid nested redrawing of two threads (leads to CtD)
# try to win the battle by periodical requests if getMousePos()
# returns a valid value.
#(If yes, drawing will not causes an 'unidentifiable C++ exception'
# in fullscreen mode.)
iRepeat = 1000 # Milliseconds till next check
pt = CyInterface().getMousePos()
#print("Mouse position (%i, %i)" % (int(pt.x), int(pt.y)))
if pt.x == 0 and pt.y == 0:
print("(ModUpdaterScreen) Hey, window not ready for drawing."
"Wait %s milliseconds..." % (iRepeat,))
return iRepeat
else:
if not CvScreensInterface.modUpdaterScreen.FIRST_DRAWN:
CvScreensInterface.showModUpdaterScreen()
return 0
# Unhandled argument combination... Should not be reached.
return 0
CvGameInterface.delayedPythonCall = _delayedPythonCall
CvGameUtils.CvGameUtils.delayedPythonCall = _delayedPythonCallUtil
print("Integration of CvModUpaterScreen finished")
# Substitute {A:B} with A or B
def mehrzahl(text, val):
m = r"\2"
if int(val) == 1:
m = r"\1"
return re.sub("{([^:]*):([^}]*)}", m, text)
class CvModUpdaterScreen(CvPediaScreen.CvPediaScreen):
def __init__(self):
# No super() required
self.bInit = False
self.mode = "start"
self.MOD_UPDATER_SCREEN_NAME = "ModUpdaterScreen"
self.INTERFACE_ART_INFO = "SCREEN_BG_OPAQUE"
self.WIDGET_ID = "ModUpdaterWidget"
self.BG_DDS_NAME = "ModUpdaterBG"
self.PANEL_NAME = "ModUpdaterPanel"
self.BORDER = 60
# self.HEADLINE_HEIGHT = 55 # Main menu height
# self.Y_TITLE = 8
self.HEADLINE_HEIGHT = 40
self.Y_TITLE = 4
self.X_EXIT = 994
self.Y_EXIT = 730
self.nWidgetCount = 0
self.ID_OFFSET = 22222
self.events = {
"start": 0,
"set_startup_search": 1,
"search": 3,
"update": 4,
"exit": 10,
}
self.mode_background_heights = {
"start": 3 * 30,
"info_none": 2 * 30,
"info_fail": 3 * 30,
"info_up_to_date": 1 * 30,
}
self.DRAWING_COUNTER = 0
self.FIRST_DRAWING = True # True until showScreen is called once
self.FIRST_DRAWN = False # True after menu was drawn.
# Time : 0 FIRST_DRAWING FIRST_DRAWN
# chart: [Game Start] [onWindowActivation called] ...wait... [Timer or second onWindowActivation]
self.BULLET = "-"
# Made pylint happy...
self.UPDATER_AVAIL = u""
self.MOD_NAME = u""
self.UPDATER_SEARCH2 = u""
self.UPDATER_RUN = u""
self.UPDATER_NONE = u""
self.UPDATER_NO_INFO = u""
self.UPDATER_FAIL = u""
self.UPDATER_START = u""
self.UPDATER_STARTUP_DISABLE = u""
self.UPDATER_STARTUP_ENABLE = u""
self.SCREEN_RES = [1920, 1080] # Real values set in initScreen()
# Abmaße des nicht verdeckbaren Hauptmenüs
self.MAIN_MENU_RES = [700, 400]
self.MOD_MENU_DIM = [] # Real values set in initScreen()
self.updater = None
def getScreen(self):
return CyGInterfaceScreen(self.MOD_UPDATER_SCREEN_NAME, CvScreenEnums.MODUPDATER_SCREEN)
def showScreen(self, bForce=False):
# Screen construction function
# Note: Do not call getScreen in this function during first call.
# This fails in fullscreen mode.
self.initScreen()
if self.FIRST_DRAWING:
print("First Drawing...")
if DELAYED_MS < 0:
print("Omit display of Updater Screen.")
self.FIRST_DRAWING = False
return
if self.FIRST_DRAWING:
self.FIRST_DRAWING = False
# First call of showScreen will draw the menu behind(!) the main menu.
if DELAYED_MS > 0:
CyGame().delayedPythonCall(DELAYED_MS, 1, self.DRAWING_COUNTER) # Not too early...
self.DRAWING_COUNTER += 1
return
else:
self.DRAWING_COUNTER += 1
self.FIRST_DRAWN = True
screen = self.getScreen()
self.deleteAllWidgets()
bNotActive = (not screen.isActive())
if bNotActive or bForce:
self.setCommonWidgets()
def initScreen(self):
if self.bInit and self.DRAWING_COUNTER == 1:
# Use second initialisation because first would blockade startup(?)
if 0 != int(self.updater.get_config().get("check_at_startup", 0)):
if self.updater.check_for_updates():
if self.updater.has_pending_updates():
self.mode = "info_avail_updates"
else:
self.mode = "info_up_to_date"
else:
self.mode = "info_none"
if self.bInit:
return
self.bInit = True
self.updater = ModUpdater.ModUpdater()
global DELAYED_MS
DELAYED_MS = self.updater.get_delayed_startup_seconds() * 1000
self.MOD_NAME = u"<font=3>" + self.updater.get_mod_name() + "</font>"
self.UPDATER_SEARCH2 = u"<font=3>" + localText.getText("TXT_KEY_UPDATER_SEARCH2", ()) + "</font>"
self.UPDATER_RUN = u"<font=3>" + localText.getText("TXT_KEY_UPDATER_RUN", ()).upper() + "</font>"
self.UPDATER_NONE = u"<font=3>" + localText.getText("TXT_KEY_UPDATER_NONE", ()).upper() + "</font>"
self.UPDATER_NO_INFO = u"<font=2>" + localText.getText("TXT_KEY_UPDATER_NO_INFO", ()).upper() + "</font>"
self.UPDATER_FAIL = u"<font=3>" + localText.getText("TXT_KEY_UPDATER_FAIL", ()).upper() + "</font>"
self.UPDATER_START = u"<font=2>" + localText.getText("TXT_KEY_UPDATER_START", ()).upper() + "</font>"
self.UPDATER_STARTUP_DISABLE = u"<font=2>" + localText.getText("TXT_KEY_UPDATER_STARTUP_DISABLE", ()).upper() + "</font>"
self.UPDATER_STARTUP_ENABLE = u"<font=2>" + localText.getText("TXT_KEY_UPDATER_STARTUP_ENABLE", ()).upper() + "</font>"
# Optional. Allow http-Links as pedia link targets
self.wrap_pedia_method()
def setCommonWidgets(self):
screen = self.getScreen()
#screen = CyGInterfaceScreen( "MainInterface", CvScreenEnums.MAIN_INTERFACE )
# Bildschirm-Auflösung
self.SCREEN_RES = [screen.getXResolution(), screen.getYResolution()]
# Rechteck für Menu in der Form [X,Y,W,H]
self.MOD_MENU_DIM = [
self.SCREEN_RES[0]-250,
25,
250,
self.SCREEN_RES[1] - 200
]
nU = len(self.updater.PendingUpdates)
print("Num of available updates: %d" % (nU,))
if self.FIRST_DRAWING:
self.UPDATER_AVAIL = u"Hey"
else:
# This fails (C++ Exception) at early initialisation stages of Civ4!
# CyTranslator.getText can not handle variables until the main menu is shown...
#self.UPDATER_AVAIL = u"<font=3>" + localText.getText("TXT_KEY_UPDATER_AVAIL", (nU,)).upper() + "</font>"
# As workaround, create string by hand
avail_txt = localText.getText("TXT_KEY_UPDATER_AVAIL_WORKAROUND", ())
self.UPDATER_AVAIL = u"<font=3>%d %s</font>" % (nU, mehrzahl(avail_txt, nU).upper(),)
# Create a new screen
screen = self.getScreen()
screen.setRenderInterfaceOnly(False)
screen.setScreenGroup(0) # ?
# Similar to CvPediaMain.py
#screen.setRenderInterfaceOnly(True)
#screen.setPersistent(True) # nix
#screen.showWindowBackground( False )
screen.showScreen(PopupStates.POPUPSTATE_IMMEDIATE, False)
# Hintergrund-Höhe von Text unter der Headline.
body_height = 0 * 30
if self.mode == "info_avail_updates":
body_height = (nU + 1) * 30
elif self.mode in self.mode_background_heights:
body_height = self.mode_background_heights[self.mode]
bg_height = body_height + self.HEADLINE_HEIGHT
screen.addDDSGFC(self.BG_DDS_NAME, ArtFileMgr.getInterfaceArtInfo("SCREEN_BG_OPAQUE").getPath(),
self.MOD_MENU_DIM[0], self.MOD_MENU_DIM[1], self.MOD_MENU_DIM[2], bg_height,
WidgetTypes.WIDGET_PEDIA_DESCRIPTION_NO_HELP, -1, -1)
# Hintergrund von Headline
screen.addPanel(self.PANEL_NAME, u"", u"", True, False,
self.MOD_MENU_DIM[0], self.MOD_MENU_DIM[1], self.MOD_MENU_DIM[2], self.HEADLINE_HEIGHT,
PanelStyles.PANEL_STYLE_TOPBAR)
if self.mode == "start":
screen.setText(
self.getNextWidgetName(), "Background", self.MOD_NAME, CvUtil.FONT_CENTER_JUSTIFY,
self.MOD_MENU_DIM[0] + self.MOD_MENU_DIM[2]/2, self.MOD_MENU_DIM[1] + self.Y_TITLE, 0, FontTypes.TITLE_FONT,
WidgetTypes.WIDGET_GENERAL, self.ID_OFFSET+self.events["search"], -1)
textPos = [self.MOD_MENU_DIM[0] + 20, self.MOD_MENU_DIM[1] + self.HEADLINE_HEIGHT + 0*20]
multiHeight = 2 * 30 # == body_height - 30
screen.addMultilineText(
self.getNextWidgetName(), self.UPDATER_SEARCH2,
textPos[0], textPos[1], self.MOD_MENU_DIM[2] - 40, multiHeight,
WidgetTypes.WIDGET_PEDIA_DESCRIPTION_NO_HELP,
self.ID_OFFSET+self.events["start"], -1, CvUtil.FONT_LEFT_JUSTIFY)
textPos[1] += multiHeight
if int(self.updater.get_config().get("check_at_startup", 0)) != 0:
screen.setText(
self.getNextWidgetName(), "Background", self.UPDATER_STARTUP_DISABLE, CvUtil.FONT_LEFT_JUSTIFY,
textPos[0], textPos[1], 0, FontTypes.TITLE_FONT,
WidgetTypes.WIDGET_PEDIA_DESCRIPTION_NO_HELP, self.ID_OFFSET+self.events["set_startup_search"], 0)
else:
screen.setText(
self.getNextWidgetName(), "Background", self.UPDATER_STARTUP_ENABLE, CvUtil.FONT_LEFT_JUSTIFY,
textPos[0], textPos[1], 0, FontTypes.TITLE_FONT,
WidgetTypes.WIDGET_PEDIA_DESCRIPTION_NO_HELP, self.ID_OFFSET+self.events["set_startup_search"], 1)
if self.mode == "info_avail_updates":
screen.setText(
self.getNextWidgetName(), "Background", self.UPDATER_AVAIL, CvUtil.FONT_CENTER_JUSTIFY,
self.MOD_MENU_DIM[0] + self.MOD_MENU_DIM[2]/2, self.MOD_MENU_DIM[1] + self.Y_TITLE, 0, FontTypes.TITLE_FONT,
WidgetTypes.WIDGET_PEDIA_DESCRIPTION_NO_HELP, self.ID_OFFSET+self.events["start"], -1)
textPos = [self.MOD_MENU_DIM[0] + 20, self.MOD_MENU_DIM[1] + self.HEADLINE_HEIGHT + 0*20]
for u in self.updater.PendingUpdates:
u_text = u"<font=3>%s %s</font>" % (self.BULLET, u["name"])
screen.setLabel(self.getNextWidgetName(), "Background", u_text, CvUtil.FONT_LEFT_JUSTIFY,
textPos[0], textPos[1], 0, FontTypes.TITLE_FONT,
WidgetTypes.WIDGET_GENERAL, -1, -1)
textPos[1] += 30
screen.setText(
self.getNextWidgetName(), "Background", self.UPDATER_START, CvUtil.FONT_CENTER_JUSTIFY,
self.MOD_MENU_DIM[0] + self.MOD_MENU_DIM[2]/2, textPos[1], 0, FontTypes.TITLE_FONT,
WidgetTypes.WIDGET_PEDIA_DESCRIPTION_NO_HELP, self.ID_OFFSET+self.events["update"], 0)
if self.mode == "info_up_to_date":
screen.setText(
self.getNextWidgetName(), "Background", self.MOD_NAME, CvUtil.FONT_CENTER_JUSTIFY,
self.MOD_MENU_DIM[0] + self.MOD_MENU_DIM[2]/2, self.MOD_MENU_DIM[1] + self.Y_TITLE, 0, FontTypes.TITLE_FONT,
WidgetTypes.WIDGET_PEDIA_DESCRIPTION_NO_HELP, self.ID_OFFSET+self.events["exit"], 0)
textPos = [self.MOD_MENU_DIM[0] + 20, self.MOD_MENU_DIM[1] + self.HEADLINE_HEIGHT + 0*20]
screen.addMultilineText(
self.getNextWidgetName(), self.UPDATER_NONE,
textPos[0], textPos[1], self.MOD_MENU_DIM[2] - 40, body_height,
WidgetTypes.WIDGET_PEDIA_DESCRIPTION_NO_HELP,
self.ID_OFFSET+self.events["start"], -1, CvUtil.FONT_LEFT_JUSTIFY)
if self.mode == "info_none":
screen.setText(
self.getNextWidgetName(), "Background", self.MOD_NAME, CvUtil.FONT_CENTER_JUSTIFY,
self.MOD_MENU_DIM[0] + self.MOD_MENU_DIM[2]/2, self.MOD_MENU_DIM[1] + self.Y_TITLE, 0, FontTypes.TITLE_FONT,
WidgetTypes.WIDGET_PEDIA_DESCRIPTION_NO_HELP, self.ID_OFFSET+self.events["exit"], 0)
textPos = [self.MOD_MENU_DIM[0] + 20, self.MOD_MENU_DIM[1] + self.HEADLINE_HEIGHT + 0*20]
screen.addMultilineText(
self.getNextWidgetName(), self.UPDATER_NO_INFO,
textPos[0], textPos[1], self.MOD_MENU_DIM[2] - 40, body_height,
WidgetTypes.WIDGET_PEDIA_DESCRIPTION_NO_HELP,
self.ID_OFFSET+self.events["start"], -1, CvUtil.FONT_LEFT_JUSTIFY)
if self.mode == "info_fail":
screen.setText(
self.getNextWidgetName(), "Background", self.MOD_NAME, CvUtil.FONT_CENTER_JUSTIFY,
self.MOD_MENU_DIM[0] + self.MOD_MENU_DIM[2]/2, self.MOD_MENU_DIM[1] + self.Y_TITLE, 0, FontTypes.TITLE_FONT,
WidgetTypes.WIDGET_PEDIA_DESCRIPTION_NO_HELP, self.ID_OFFSET+self.events["start"], 0)
textPos = [self.MOD_MENU_DIM[0] + 20, self.MOD_MENU_DIM[1] + self.HEADLINE_HEIGHT + 0*20]
visit_url = self.updater.get_config().get("visit_url", "")
failText = localText.getText("TXT_KEY_UPDATER_FAILED", (visit_url,))
screen.addMultilineText(
self.getNextWidgetName(), failText,
textPos[0], textPos[1], self.MOD_MENU_DIM[2] - 40, body_height,
WidgetTypes.WIDGET_PEDIA_DESCRIPTION_NO_HELP,
self.ID_OFFSET+self.events["start"], -1, CvUtil.FONT_LEFT_JUSTIFY)
if self.mode == "updating":
screen.setText(
self.getNextWidgetName(), "Background", self.UPDATER_RUN, CvUtil.FONT_CENTER_JUSTIFY,
self.MOD_MENU_DIM[0] + self.MOD_MENU_DIM[2]/2, self.MOD_MENU_DIM[1] + self.Y_TITLE, 0, FontTypes.TITLE_FONT,
WidgetTypes.WIDGET_PEDIA_DESCRIPTION_NO_HELP, self.ID_OFFSET+self.events["exit"], 0)
# returns unique ID for a widget in this screen
def getNextWidgetName(self):
szName = self.WIDGET_ID + str(self.nWidgetCount)
self.nWidgetCount += 1
return szName
def deleteAllWidgets(self, startid=-1):
screen = self.getScreen()
iNumWidgets = self.nWidgetCount
if startid == -1:
screen.deleteWidget(self.BG_DDS_NAME)
screen.deleteWidget(self.PANEL_NAME)
startid = 0
self.nWidgetCount = startid
for _ in range(startid, iNumWidgets):
screen.deleteWidget(self.getNextWidgetName())
self.nWidgetCount = startid # getNextWidgetName has increased val..
def hideAllWidgets(self):
screen = self.getScreen()
iNumWidgets = self.nWidgetCount
self.nWidgetCount = 0
screen.hide(self.BG_DDS_NAME)
screen.hide(self.PANEL_NAME)
for _ in range(iNumWidgets):
screen.hide(self.getNextWidgetName())
def redraw(self):
self.initScreen()
self.hideAllWidgets()
screen = self.getScreen()
screen.show(self.BG_DDS_NAME)
screen.show(self.PANEL_NAME)
nWidgetCount_Back = self.nWidgetCount
self.nWidgetCount = 0
self.setCommonWidgets()
if nWidgetCount_Back > self.nWidgetCount:
print("CvModUpdaterScreen: Old widgets still active... Could cause drawing issues.")
#self.deleteAllWidgets(self.nWidgetCount)
# Called from PediaScreen
def handleClick(self, val1, val2):
# self._handleClick_(val1, val2)
pass
def _handleClick_(self, val1, val2):
screen = self.getScreen()
if val1 == self.events["start"]:
self.mode = "start"
self.deleteAllWidgets()
self.redraw()
return
if val1 == self.events["exit"]:
self.mode = "start"
screen.hideScreen()
self.deleteAllWidgets() # Clear because mode was changed
return
elif val1 == self.events["search"]:
self.updater.Config = None # Force re-read of json-file
if self.updater.check_for_updates():
if self.updater.has_pending_updates():
self.mode = "info_avail_updates"
else:
self.mode = "info_up_to_date"
else:
self.mode = "info_none"
self.deleteAllWidgets() # Clear because mode was changed
self.redraw()
elif val1 == self.events["update"]:
self.mode = "updating"
status = self.updater.start_update()
if status["successful"]:
self.mode = "info_up_to_date"
screen.hideScreen()
self.deleteAllWidgets() # Clear because mode was changed
show_popup(localText.getText("TXT_KEY_UPDATER_SUCCESSFUL", ()))
self.show_info_popups(status)
else:
self.mode = "info_fail"
self.deleteAllWidgets() # Clear because mode was changed
self.redraw()
show_popup(localText.getText("TXT_KEY_UPDATER_FAILED_POPUP", ()))
elif val1 == self.events["set_startup_search"]:
self.updater.get_config()["check_at_startup"] = val2
self.updater.write_config()
self.redraw()
# Will handle the input for this screen...
# Achtung, das funktioniert für WIDGET_GENERAL NICHT vor dem Laden eines Spiels(?!)
# Nutze handleClick und den Umweg über WIDGET_PEDIA_DESCRIPTION_NO_HELP
def handleInput(self, inputClass):
iNotifyCode = inputClass.getNotifyCode()
# show_popup("HandleInput called")
if inputClass.getNotifyCode() == NotifyCode.NOTIFY_CLICKED:
if(inputClass.getData1() >= self.ID_OFFSET and
inputClass.getData1() < self.ID_OFFSET + 100):
#show_popup("HandleInput called for event 0")
self._handleClick_(
inputClass.getData1() - self.ID_OFFSET,
inputClass.getData2())
return 1
def show_info_popups(self, update_status):
for i in range(len(update_status["updates"])-1, -1, -1):
desc = update_status["updates"][i]["info"].get("desc")
if desc and len(desc) > 0:
#show_popup(str(desc.encode("utf-8"))) # Error Popup.py
show_popup(desc) # type(desc) is unicode
def wrap_pedia_method(self):
# Wrap CvPediaMain.link method with own code to detect html links
CvScreensInterface.pediaMainScreen.link_orig = CvScreensInterface.pediaMainScreen.link
def __link__(szLink):
if szLink.strip().startswith("http"):
CvUtil.pyPrint("Visit link: " + str(szLink))
import webbrowser
webbrowser.open(szLink)
return
return CvScreensInterface.pediaMainScreen.link_orig(szLink)
CvScreensInterface.pediaMainScreen.link = __link__
def show_popup(text):
popup = PyPopup.PyPopup()
popup.setHeaderString("Mod Updater")
popup.setBodyString(text)
# popup.setBodyString("more text...")
popup.launch()
| YggdrasiI/PBStats | tests/Updater/Mods/Updater/Assets/Python/Screens/CvModUpdaterScreen.py | Python | gpl-2.0 | 22,672 | [
"VisIt"
] | 1e48d5a09b9010a6a5b702abf066788e815abfcf7f53c8304c7f88c79e37d0ae |
#####################################################################
# $HeadURL $
# File: ReqManagerHandler.py
########################################################################
"""
:mod: ReqManagerHandler
.. module: ReqManagerHandler
:synopsis: Implementation of the RequestDB service in the DISET framework
"""
__RCSID__ = "$Id$"
# # imports
from types import DictType, IntType, LongType, ListType, StringTypes, NoneType
# # from DIRAC
from DIRAC import gLogger, S_OK, S_ERROR
from DIRAC.Core.DISET.RequestHandler import RequestHandler
# # from RMS
from DIRAC.RequestManagementSystem.Client.Request import Request
from DIRAC.RequestManagementSystem.private.RequestValidator import RequestValidator
from DIRAC.RequestManagementSystem.DB.RequestDB import RequestDB
class ReqManagerHandler( RequestHandler ):
"""
.. class:: ReqManagerHandler
RequestDB interface in the DISET framework.
"""
# # request validator
__validator = None
# # request DB instance
__requestDB = None
@classmethod
def initializeHandler( cls, serviceInfoDict ):
""" initialize handler """
try:
cls.__requestDB = RequestDB()
except RuntimeError, error:
gLogger.exception( error )
return S_ERROR( error )
# # create tables for empty db
getTables = cls.__requestDB.getTables()
if not getTables["OK"]:
gLogger.error( getTables["Message"] )
return getTables
getTables = getTables["Value"]
toCreate = [ tab for tab in cls.__requestDB.getTableMeta().keys() if tab not in getTables ]
return cls.__requestDB.createTables( toCreate )
# # helper functions
@classmethod
def validate( cls, request ):
""" request validation """
if not cls.__validator:
cls.__validator = RequestValidator()
return cls.__validator.validate( request )
@classmethod
def __getRequestID( cls, requestName ):
""" get requestID for given :requestName: """
requestID = requestName
if type( requestName ) in StringTypes:
result = cls.__requestDB.getRequestProperties( requestName, [ "RequestID" ] )
if not result["OK"]:
return result
requestID = result["Value"]
return S_OK( requestID )
types_cancelRequest = [ StringTypes ]
@classmethod
def export_cancelRequest( cls , requestName ):
""" Cancel a request """
return cls.__requestDB.cancelRequest( requestName )
types_putRequest = [ DictType ]
@classmethod
def export_putRequest( cls, requestJSON ):
""" put a new request into RequestDB
:param cls: class ref
:param str requestJSON: request serialized to JSON format
"""
requestName = requestJSON.get( "RequestName", "***UNKNOWN***" )
request = Request( requestJSON )
requestID = request.RequestID
optimized = request.optimize()
if optimized.get( "Value", False ):
if request.RequestID == 0 and requestID != 0:
# A new request has been created, delete the old one
delete = cls.__requestDB.deleteRequest( request.RequestName )
if not delete['OK']:
return delete
gLogger.debug( "putRequest: request was optimized and removed for a new insertion" )
else:
gLogger.debug( "putRequest: request was optimized" )
else:
gLogger.debug( "putRequest: request unchanged", optimized.get( "Message", "Nothing could be optimized" ) )
valid = cls.validate( request )
if not valid["OK"]:
gLogger.error( "putRequest: request %s not valid: %s" % ( requestName, valid["Message"] ) )
return valid
requestName = request.RequestName
gLogger.info( "putRequest: Attempting to set request '%s'" % requestName )
return cls.__requestDB.putRequest( request )
types_getScheduledRequest = [ ( IntType, LongType ) ]
@classmethod
def export_getScheduledRequest( cls , operationID ):
""" read scheduled request given operationID """
scheduled = cls.__requestDB.getScheduledRequest( operationID )
if not scheduled["OK"]:
gLogger.error( "getScheduledRequest: %s" % scheduled["Message"] )
return scheduled
if not scheduled["Value"]:
return S_OK()
requestJSON = scheduled["Value"].toJSON()
if not requestJSON["OK"]:
gLogger.error( "getScheduledRequest: %s" % requestJSON["Message"] )
return requestJSON
types_getDBSummary = []
@classmethod
def export_getDBSummary( cls ):
""" Get the summary of requests in the Request DB """
return cls.__requestDB.getDBSummary()
types_getRequest = [ list( StringTypes ) + [NoneType] ]
@classmethod
def export_getRequest( cls, requestName = "" ):
""" Get a request of given type from the database """
getRequest = cls.__requestDB.getRequest( requestName )
if not getRequest["OK"]:
gLogger.error( "getRequest: %s" % getRequest["Message"] )
return getRequest
if getRequest["Value"]:
getRequest = getRequest["Value"]
toJSON = getRequest.toJSON()
if not toJSON["OK"]:
gLogger.error( toJSON["Message"] )
return toJSON
return S_OK()
types_getBulkRequests = [ IntType ]
@classmethod
def export_getBulkRequests( cls, numberOfRequest = 10 ):
""" Get a request of given type from the database
:param numberOfRequest : size of the bulk (default 10)
:return S_OK( {Failed : message, Successful : list of Request.toJSON()} )
"""
getRequests = cls.__requestDB.getBulkRequests( numberOfRequest )
if not getRequests["OK"]:
gLogger.error( "getRequests: %s" % getRequests["Message"] )
return getRequests
if getRequests["Value"]:
getRequests = getRequests["Value"]
toJSONDict = {"Successful" : {}, "Failed" : {}}
for rId in getRequests:
toJSON = getRequests[rId].toJSON()
if not toJSON["OK"]:
gLogger.error( toJSON["Message"] )
toJSONDict["Failed"][rId] = toJSON["Message"]
else:
toJSONDict["Successful"][rId] = toJSON["Value"]
return S_OK( toJSONDict )
return S_OK()
types_peekRequest = [ StringTypes ]
@classmethod
def export_peekRequest( cls, requestName = "" ):
""" peek request given its name """
peekRequest = cls.__requestDB.peekRequest( requestName )
if not peekRequest["OK"]:
gLogger.error( "peekRequest: %s" % peekRequest["Message"] )
return peekRequest
if peekRequest["Value"]:
peekRequest = peekRequest["Value"].toJSON()
if not peekRequest["OK"]:
gLogger.error( peekRequest["Message"] )
return peekRequest
types_getRequestSummaryWeb = [ DictType, ListType, IntType, IntType ]
@classmethod
def export_getRequestSummaryWeb( cls, selectDict, sortList, startItem, maxItems ):
""" Get summary of the request/operations info in the standard form for the web
:param dict selectDict: selection dict
:param list sortList: whatever
:param int startItem: start item
:param int maxItems: max items
"""
return cls.__requestDB.getRequestSummaryWeb( selectDict, sortList, startItem, maxItems )
types_getDistinctValues = [ StringTypes ]
@classmethod
def export_getDistinctValues( cls, attribute ):
""" Get distinct values for a given (sub)request attribute """
onames = ['Type', 'Status']
rnames = ['OwnerDN', 'OwnerGroup']
if attribute in onames:
return cls.__requestDB.getDistinctAttributeValues( 'Operation', attribute )
elif attribute in rnames:
return cls.__requestDB.getDistinctAttributeValues( 'Request', attribute )
return S_ERROR( 'Invalid attribute %s' % attribute )
types_deleteRequest = [ StringTypes ]
@classmethod
def export_deleteRequest( cls, requestName ):
""" Delete the request with the supplied name"""
return cls.__requestDB.deleteRequest( requestName )
types_getRequestNamesList = [ ListType, IntType, StringTypes ]
@classmethod
def export_getRequestNamesList( cls, statusList = None, limit = None, since = None, until = None ):
""" get requests' names with status in :statusList: """
statusList = statusList if statusList else list( Request.FINAL_STATES )
limit = limit if limit else 100
since = since if since else ""
until = until if until else ""
reqNamesList = cls.__requestDB.getRequestNamesList( statusList, limit, since = since, until = until )
if not reqNamesList["OK"]:
gLogger.error( "getRequestNamesList: %s" % reqNamesList["Message"] )
return reqNamesList
types_getRequestNamesForJobs = [ ListType ]
@classmethod
def export_getRequestNamesForJobs( cls, jobIDs ):
""" Select the request names for supplied jobIDs """
return cls.__requestDB.getRequestNamesForJobs( jobIDs )
types_readRequestsForJobs = [ ListType ]
@classmethod
def export_readRequestsForJobs( cls, jobIDs ):
""" read requests for jobs given list of jobIDs """
requests = cls.__requestDB.readRequestsForJobs( jobIDs )
if not requests["OK"]:
gLogger.error( "readRequestsForJobs: %s" % requests["Message"] )
return requests
for jobID, request in requests["Value"]["Successful"].items():
requests["Value"]["Successful"][jobID] = request.toJSON()["Value"]
return requests
types_getDigest = [ StringTypes ]
@classmethod
def export_getDigest( cls, requestName ):
""" get digest for a request given its name
:param str requestName: request's name
:return: S_OK( json_str )
"""
return cls.__requestDB.getDigest( requestName )
types_getRequestStatus = [ StringTypes ]
@classmethod
def export_getRequestStatus( cls, requestName ):
""" get request status given its name """
status = cls.__requestDB.getRequestStatus( requestName )
if not status["OK"]:
gLogger.error( "getRequestStatus: %s" % status["Message"] )
return status
types_getRequestFileStatus = [ list( StringTypes ) + [ IntType, LongType ], list( StringTypes ) + [ListType] ]
@classmethod
def export_getRequestFileStatus( cls, requestName, lfnList ):
""" get request file status for a given LFNs list and requestID/Name """
if type( lfnList ) == str:
lfnList = [lfnList]
res = cls.__requestDB.getRequestFileStatus( requestName, lfnList )
if not res["OK"]:
gLogger.error( "getRequestFileStatus: %s" % res["Message"] )
return res
types_getRequestName = [ ( IntType, LongType ) ]
@classmethod
def export_getRequestName( cls, requestID ):
""" get request name for a given requestID """
requestName = cls.__requestDB.getRequestName( requestID )
if not requestName["OK"]:
gLogger.error( "getRequestName: %s" % requestName["Message"] )
return requestName
types_getRequestInfo = [ list( StringTypes ) + [ IntType, LongType ] ]
@classmethod
def export_getRequestInfo( cls, requestName ):
""" get request info for a given requestID/Name """
requestInfo = cls.__requestDB.getRequestInfo( requestName )
if not requestInfo["OK"]:
gLogger.error( "getRequestInfo: %s" % requestInfo["Message"] )
return requestInfo
| calancha/DIRAC | RequestManagementSystem/Service/ReqManagerHandler.py | Python | gpl-3.0 | 10,989 | [
"DIRAC"
] | cb906bc2500176393afc6350a76ab637798023014e34201ab555326c1cfbf6be |
#!/usr/bin/env python
from math import pi, cos, sin, sqrt
import vtk
LEVEL = 6
def as_polyline(points, level):
"""
Koch Snowflake as a vtkPolyLine
"""
# Use the points from the previous iteration to create the points of the next
# level. There is an assumption on my part that the curve is traversed in a
# counterclockwise fashion. If the initial triangle above is written to
# describe clockwise motion, the points will face inward instead of outward.
for i in range(level):
temp = vtk.vtkPoints()
# The first point of the previous vtkPoints is the first point of the next vtkPoints.
temp.InsertNextPoint(*points.GetPoint(0))
# Iterate over "edges" in the vtkPoints
for i in range(1, points.GetNumberOfPoints()):
x0, y0, z0 = points.GetPoint(i - 1)
x1, y1, z1 = points.GetPoint(i)
t = sqrt((x1 - x0) ** 2 + (y1 - y0) ** 2)
nx = (x1 - x0) / t # x-component of edge unit tangent
ny = (y1 - y0) / t # y-component of edge unit tangent
# the points describing the Koch snowflake edge
temp.InsertNextPoint(x0 + nx * t / 3, y0 + ny * t / 3, 0.)
temp.InsertNextPoint(x0 + nx * t / 2 + ny * t * sqrt(3) / 6, y0 + ny * t / 2 - nx * t * sqrt(3) / 6, 0.)
temp.InsertNextPoint(x0 + nx * 2 * t / 3, y0 + ny * 2 * t / 3, 0.)
temp.InsertNextPoint(x0 + nx * t, y0 + ny * t, 0.)
points = temp
# draw the outline
lines = vtk.vtkCellArray()
pl = vtk.vtkPolyLine()
pl.GetPointIds().SetNumberOfIds(points.GetNumberOfPoints())
for i in range(points.GetNumberOfPoints()):
pl.GetPointIds().SetId(i, i)
lines.InsertNextCell(pl)
# complete the polydata
polydata = vtk.vtkPolyData()
polydata.SetLines(lines)
polydata.SetPoints(points)
return polydata
def as_triangles(indices, cellarray, level, data):
"""
Koch Snowflake as a collection of vtkTriangles
"""
if len(indices) >= 3:
stride = len(indices) // 4
indices.append(indices[-1] + 1)
triangle = vtk.vtkTriangle()
triangle.GetPointIds().SetId(0, indices[stride])
triangle.GetPointIds().SetId(1, indices[2 * stride])
triangle.GetPointIds().SetId(2, indices[3 * stride])
cellarray.InsertNextCell(triangle)
data.InsertNextValue(level)
as_triangles(indices[0: stride], cellarray, level + 1, data)
as_triangles(indices[stride: 2 * stride], cellarray, level + 1, data)
as_triangles(indices[2 * stride: 3 * stride], cellarray, level + 1, data)
as_triangles(indices[3 * stride: -1], cellarray, level + 1, data)
def main():
colors = vtk.vtkNamedColors()
# Initially, set up the points to be an equilateral triangle. Note that the
# first point is the same as the last point to make this a closed curve when
# I create the vtkPolyLine.
points = vtk.vtkPoints()
for i in range(4):
points.InsertNextPoint(cos(2.0 * pi * i / 3), sin(2 * pi * i / 3.0), 0.0)
outline_pd = as_polyline(points, LEVEL)
# You have already gone through the trouble of putting the points in the
# right places - so "all" you need to do now is to create polygons from the
# points that are in the vtkPoints.
# The points that are passed in, have an overlap of the beginning and the
# end. For this next trick, I will need a list of the indices in the
# vtkPoints. They're consecutive, so thats pretty straightforward.
indices = [i for i in range(outline_pd.GetPoints().GetNumberOfPoints() + 1)]
triangles = vtk.vtkCellArray()
# Set this up for each of the initial sides, then call the recursive function.
stride = (len(indices) - 1) // 3
# The cell data will allow us to color the triangles based on the level of
# the iteration of the Koch snowflake.
data = vtk.vtkIntArray()
data.SetNumberOfComponents(0)
data.SetName("Iteration Level")
# This is the starting triangle.
t = vtk.vtkTriangle()
t.GetPointIds().SetId(0, 0)
t.GetPointIds().SetId(1, stride)
t.GetPointIds().SetId(2, 2 * stride)
triangles.InsertNextCell(t)
data.InsertNextValue(0)
as_triangles(indices[0: stride + 1], triangles, 1, data)
as_triangles(indices[stride: 2 * stride + 1], triangles, 1, data)
as_triangles(indices[2 * stride: -1], triangles, 1, data)
triangle_pd = vtk.vtkPolyData()
triangle_pd.SetPoints(outline_pd.GetPoints())
triangle_pd.SetPolys(triangles)
triangle_pd.GetCellData().SetScalars(data)
# ---------------- #
# rendering stuff #
# ---------------- #
outline_mapper = vtk.vtkPolyDataMapper()
outline_mapper.SetInputData(outline_pd)
lut = vtk.vtkLookupTable()
lut.SetNumberOfTableValues(256)
lut.SetHueRange(0.6, 0.6)
lut.SetSaturationRange(0.0, 1.0)
lut.Build()
triangle_mapper = vtk.vtkPolyDataMapper()
triangle_mapper.SetInputData(triangle_pd)
triangle_mapper.SetScalarRange(0.0, LEVEL)
triangle_mapper.SetLookupTable(lut)
outline_actor = vtk.vtkActor()
outline_actor.SetMapper(outline_mapper)
triangle_actor = vtk.vtkActor()
triangle_actor.SetMapper(triangle_mapper)
outline_ren = vtk.vtkRenderer()
outline_ren.AddActor(outline_actor)
outline_ren.SetViewport(0.0, 0.0, 0.5, 1.0)
triangle_ren = vtk.vtkRenderer()
triangle_ren.AddActor(triangle_actor)
triangle_ren.SetViewport(0.5, 0.0, 1.0, 1.0)
triangle_ren.SetActiveCamera(outline_ren.GetActiveCamera())
renw = vtk.vtkRenderWindow()
renw.AddRenderer(outline_ren)
renw.AddRenderer(triangle_ren)
renw.SetSize(800, 400)
outline_ren.SetBackground(colors.GetColor3d("Maroon"))
triangle_ren.SetBackground(colors.GetColor3d("Maroon"))
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renw)
outline_ren.ResetCamera()
renw.Render()
iren.Start()
if __name__ == '__main__':
main()
| lorensen/VTKExamples | src/Python/Visualization/KochSnowflake.py | Python | apache-2.0 | 6,018 | [
"VTK"
] | e2c2a70b0ad287161e144bb719849b09adc320904bbdf78d1cde2fc26077790c |
#!/usr/bin/env python
# encoding: utf-8
import MySQLdb
import os
import re
import scrapy
from scrapy.loader import ItemLoader
from ForumSpider.items import BbsComment,BbsAsk,Person,ForumItem,ForumVisit,TopicPage
from scrapy.spiders import Spider
from scrapy.http import Request, FormRequest
from scrapy.selector import Selector
class BbsSpider(scrapy.Spider):
handle_httpstatus_list = [403]
name = 'forum_list_spider'
allowed_domains = ['bbs.csdn.net','my.csdn.net']
start_urls = ['http://bbs.csdn.net/map']
def parse(self, response):
cur_url = response._url
sels = Selector(response)
forums_list_urls_end = sels.xpath('//div[@class="map"]/descendant::a/@href').extract()
for forums_url_end in forums_list_urls_end:
forums_list_url = 'http://bbs.csdn.net' + forums_url_end
forum_class = forums_url_end[8:]
#print (forums_list_url,forum_class)
yield Request(forums_list_url + '/recommend', meta = {'forum_class': forum_class ,
'forum_type': 'recommend' }, callback = self.parse_forums_list)
yield Request(forums_list_url+'/closed', meta = {'forum_class': forum_class ,
'forum_type': 'closed' }, callback = self.parse_forums_list)
yield Request(forums_list_url, meta = {'forum_class': forum_class ,
'forum_type': 'follow' }, callback = self.parse_forums_list)
def parse_forums_list(self,response):
for sel in response.xpath('//tr'):
forum_item = ForumItem()
forum_item['forum_class'] = response.meta['forum_class']
forum_item['forum_type'] = response.meta['forum_type']
forum_item['forum_title'] = sel.xpath('td[1]/a/@title').extract()
forum_item['forum_id'] = re.sub(r'\D', "", ''.join(sel.xpath('td[1]/a/@href').extract()))
forum_item['forum_url'] = "http://bbs.csdn.net/topics/" + forum_item['forum_id']
forum_item['forum_point'] = re.sub(r'\D', "", ''.join(sel.xpath('td[2]/text()').extract()))
forum_item['forum_question_user'] = re.sub('http://my.csdn.net/', "", ''.join(sel.xpath('td[3]/a/@href').extract()))
forum_item['forum_question_time'] = sel.xpath('td[3]/span[@class = "time"]/text()').extract()
forum_item['forum_answer_number'] = re.sub(r'\D', "", ''.join(sel.xpath('td[4]/text()').extract()))
forum_item['forum_update_user'] = re.sub('http://my.csdn.net/', "", ''.join(sel.xpath('td[5]/a/@href').extract()))
forum_item['forum_update_time'] = sel.xpath('td[5]/span[@class = "time"]/text()').extract()
yield forum_item
visit_item = ForumVisit()
visit_item['visit_topic'] = re.findall(r'/forums/(\w+)',response.url)
T_type = re.findall(r'(closed)|(recommend)',response.url)
if not T_type:
visit_item['visit_type'] = "follow"
elif T_type[0][0] == "closed":
visit_item['visit_type'] = "closed"
elif T_type[0][1] == "recommend":
visit_item['visit_type'] = "recommend"
else:
print ("Error: Visit Type Wrong!")
visit_item['visit_page'] = re.findall(r'page=(\d+)',response.url)
yield visit_item
# there is a bug,can not right request!
next_page = sel.xpath('//a[@class="next"]/@href').extract()
if next_page:
next_url = 'http://bbs.csdn.net' + next_page[0]
yield Request(next_url, callback = self.parse_forums_list)
| zhouqilin1993/IntelliDE | crawler/ForumSpider/ForumSpider/spiders/forum_list_spider.py | Python | gpl-3.0 | 3,160 | [
"VisIt"
] | c96bc8f569cb1b506952c2411e8200c873d5179fefb92afd0cf4da9bf839c5eb |
"""
Tests the classification functions
"""
import unittest
import os
from mollib.core import Molecule, load_settings
import mollib.core.settings
from mollib.hbonds import classify_residues, find_hbond_partners, settings
def convert_dssp(string):
"""Convert a DSSP classification (like) string into a classification
dict that can be used in tests.
Parameters
----------
string: str
The DSSP (like) string.
"""
answer_key = {' ': '',
'E': settings.major_beta,
'H': settings.major_alpha,
'G': settings.major_310,
'I': settings.major_pi,
'a': settings.major_beta_turnI,
'b': settings.major_beta_turnIp,
'c': settings.major_beta_turnII,
'd': settings.major_beta_turnIIp,
}
return {count: answer_key[k] for count, k in enumerate(string, 1)}
class TestHbondClassify(unittest.TestCase):
def setUp(self):
"""Load the settings."""
load_settings()
def test_classify_residues(self):
"Tests the residue classification from hbonds for specific molecule"
answer_key = {
# Hemagglutinin fusion peptide
'2KXA': convert_dssp(' HHHHHHHHHHH HHHHHHHHH '),
# Ubiquitin crystal structure
'1UBQ': convert_dssp(' EEEEEEaa EEEEEE aa HHHHHHHHHHHH aaa'
'EEEEEbb EEEaa GGGG ccEEEEEEE ')
}
msg = "Residue {} is assigned as '{}', but the test has '{}' assigned"
for identifier, class_dict in answer_key.items():
mol = Molecule(identifier)
classify_residues(mol)
for residue in mol.residues:
# Skip HETATM molecules
if '*' in residue.chain.id:
continue
test_classification = class_dict[residue.number]
actual_classification = residue.classification[0]
residue_msg = msg.format(residue, actual_classification,
test_classification)
self.assertEqual(actual_classification, test_classification,
msg=residue_msg)
def test_energy_hbond(self):
"""Test the assignment of hydrogen bond energies."""
# Load the molecule
mol = Molecule('2KXA')
# Find hydrogen bonds and classify them
hbonds = find_hbond_partners(mol)
# Assert that all of the hbonds have an energy associated and that these
# are float numbers
for hbond in hbonds:
msg = ("The hydrogen bond '{}' does not have an 'energy_hbond' "
"assignment.")
self.assertTrue(hasattr(hbond, 'energy_hbond'),
msg=msg.format(hbond.short_repr()))
self.assertTrue(isinstance(hbond.energy_hbond, float))
def test_energy_ramachandran(self):
"""Test the 'energy_ramachandran` property of residues, set by
classify_residues.
"""
# Load the molecule
mol = Molecule('2KXA')
# First confuse the path for the ramachandran_dataset_path so that the
# datasets cannot be found.
correct_path = mollib.core.settings.ramachandran_dataset_path
mollib.core.settings.ramachandran_dataset_path = ''
# Try classify_residues. All of the 'energy_ramachandran' attributes
# should not be assigned because the datasets could not be found.
for residue in mol.residues:
self.assertFalse(hasattr(residue, 'energy_ramachandran'))
# With the correct path, the datasets should be found and the energies
# are correctly set
correct_path = os.path.join('../..', correct_path)
mollib.core.settings.ramachandran_dataset_path = correct_path
classify_residues(mol)
# The 'energy_ramachandran' attributes should now be assigned and
# have float values
for residue in mol.residues:
msg = ("Residue '{}' does not have an 'energy_ramachandran' "
"assignment")
self.assertTrue(hasattr(residue, 'energy_ramachandran'),
msg=msg.format(residue))
self.assertIsInstance(residue.energy_ramachandran, float)
| jlorieau/mollib | tests/hbonds/test_classify.py | Python | gpl-3.0 | 4,408 | [
"CRYSTAL"
] | 4db9a3736c2e9ad72abb642eb866fb9c4c08acc5f6ac255faaa66263954d4b93 |
# Copyright (C) 2012,2013
# Max Planck Institute for Polymer Research
# Copyright (C) 2008,2009,2010,2011
# Max-Planck-Institute for Polymer Research & Fraunhofer SCAI
#
# This file is part of ESPResSo++.
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
r"""
****************************
espressopp.analysis.Pressure
****************************
.. function:: espressopp.analysis.Pressure(system)
:param system:
:type system:
"""
from espressopp.esutil import cxxinit
from espressopp import pmi
from espressopp.analysis.Observable import *
from _espressopp import analysis_Pressure
class PressureLocal(ObservableLocal, analysis_Pressure):
def __init__(self, system):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
cxxinit(self, analysis_Pressure, system)
if pmi.isController :
class Pressure(Observable, metaclass=pmi.Proxy):
pmiproxydefs = dict(
cls = 'espressopp.analysis.PressureLocal'
)
| espressopp/espressopp | src/analysis/Pressure.py | Python | gpl-3.0 | 1,656 | [
"ESPResSo"
] | 3991c597c09fe0bbcc7a8d17616875c6e3ff68ac9592634cc907489ac5ecdca4 |
#* This file is part of the MOOSE framework
#* https://www.mooseframework.org
#*
#* All rights reserved, see COPYRIGHT for full restrictions
#* https://github.com/idaholab/moose/blob/master/COPYRIGHT
#*
#* Licensed under LGPL 2.1, please see LICENSE for details
#* https://www.gnu.org/licenses/lgpl-2.1.html
import collections
from PyQt5 import QtWidgets
import mooseutils
from .MooseWidget import MooseWidget
from .Plugin import Plugin
from .PreferenceWidget import PreferenceWidget
class PluginManager(MooseWidget):
"""
A MooseWidget for managing plugins.
A manager creates plugins and places then in a layout automatically. It also
connects signals and slots automatically between the plugins. If a signal is named 'mySignal'
it will automatically connect to a slot named 'onMySignal'. A plugin will not connect to itself, to help minimize cyclic dependencies.
A plugin is added to a layout by name, as specified by the Plugin object (see Plugin.py). Thus,
when creating child PluginManager the layout member names (e.g., 'MainLayout' or 'LeftLayout')
must be specified in the Plugin object. When a plugin is added, it is also added as a member variable according to the class name, if multiple plugins are added of the same type the member variable will be a list of the plugins rather than the plugin itself.
This is a MooseWidget, thus the setup() method must be called by derived classes.
Args:
plugins[list]: A list of Plugin object classes (not instances) to be managed by this object.
@see Plugin.py
"""
def __init__(self, plugins=[], plugin_base=Plugin):
super(PluginManager, self).__init__()
# Check that self is a QWidget
if not isinstance(self, QtWidgets.QWidget):
mooseutils.MooseException("{} objects must also be a QWidget.".format(self.__class__.__name__))
# A list of plugin classes (see setup()), this is private because child classes
# shouldn't be messing with the classes.
self._plugin_classes = plugins
# The base class that this manager is allowed to manage
self._plugin_base = plugin_base
# An OrderedDict (to maintain the order plugins are added) for storing plugin objects
self._plugins = collections.OrderedDict()
# The tab index for this plugin
self._index = None
# List of all plugins for connecting signal
self._all_plugins = []
self._pref_widget = None
def __contains__(self, item):
"""
Provide "in" access into the list of plugins.
"""
return item in self._plugins
def __getitem__(self, item):
"""
Provide operator[] access to plugins.
"""
return self._plugins[item]
def addObject(self, widget):
"""
Method for adding a widget to a layout.
Args:
widget[QWidget]: The widget to add.
NOTE: This method exists so that derived classes can customize how items are added.
"""
if not hasattr(self, widget.mainLayoutName()):
mooseutils.mooseError("Unknown layout name '{}' returned when adding plugin '{}', the plugin is being skipped.".format(widget.mainLayoutName(), widget.__class__.__name__))
else:
layout = getattr(self, widget.mainLayoutName())
layout.addWidget(widget)
def setTabIndex(self, index, signal=None):
"""
Set the Peacock tab index.
"""
self._index = index
for plugin in self._all_plugins:
plugin.setTabIndex(index, signal=signal)
def setup(self):
"""
Call widget setup methods and connect signals and slots from plugins.
"""
super(PluginManager, self).setup()
# Create the plugins
for plugin_class in self._plugin_classes:
# Create the widget instance
widget = plugin_class()
# Check the type
if not isinstance(widget, self._plugin_base):
mooseutils.MooseException("The supplied widget is of type '{}' but must be a direct child of a '{}'".format(widget.__class__.__name__, self._plugin_base.__name__))
# Define the widget name
name = widget.__class__.__name__
# Store widget in a list if more than one exist
if name in self._plugins:
if not isinstance(self._plugins[name], list):
self._plugins[name] = [self._plugins[name]]
self._plugins[name].append(widget)
# Store widget directly if only one
else:
self._plugins[name] = widget
# Set the parent
widget.setParent(self)
# Add the widget
self.addObject(widget)
# Set the class attribute base on plugin name
setattr(self, name, self._plugins[name])
mooseutils.mooseDebug('Adding plugin as member: {}'.format(name))
# Store in the temporary flat list
self._all_plugins.append(widget)
# Connect signal/slots of plugins
for plugin0 in self._all_plugins:
plugin0._plugin_manager = self
for plugin1 in self._all_plugins:
plugin0.connect(plugin1)
def write(self, filename):
"""
Write the python script.
"""
with open(filename, 'w') as fid:
string = '"""\npython {}\n"""\n'.format(filename)
string += self.repr()
fid.write(string)
def repr(self):
"""
Return a string containing a script to reproduce the plugins.
"""
return ''
def call(self, method, *args, **kwargs):
"""
If a method is present on the plugin call it with the given arguments.
see ExodusViewer.onJobStart/onInputFileChanged
"""
for plugin in self._all_plugins:
if hasattr(plugin, method):
attr = getattr(plugin, method)
attr(*args, **kwargs)
def preferencesWidget(self):
"""
Returns an instance of a widget to set preferences for all the plugins.
"""
if not self._pref_widget:
self._pref_widget = PreferenceWidget(self._all_plugins)
return self._pref_widget
def onPreferencesSaved(self):
"""
This will be called when the preferences have been saved.
"""
for p in self._all_plugins:
p.onPreferencesSaved()
def fixLayoutWidth(self, layout):
# Set the width of the left-side widgets to that the VTK window gets the space
width = 0
for child in self._plugins.values():
if child.mainLayoutName() == layout:
width = max(child.sizeHint().width(), width)
for child in self._plugins.values():
if child.mainLayoutName() == layout:
child.setFixedWidth(width)
| harterj/moose | python/peacock/base/PluginManager.py | Python | lgpl-2.1 | 6,979 | [
"MOOSE",
"VTK"
] | d96b71b20f8a962db2f6ee53e480baa411349f2d9f0b38a273f498de981cbc64 |
import threading
try:
from Queue import Queue
except ImportError:
from queue import Queue
from os import getenv
from .client import JobClient
from .client import InputCachingJobClient
from .client import MessageJobClient
from .client import MessageCLIJobClient
from .interface import HttpPulsarInterface
from .interface import LocalPulsarInterface
from .object_client import ObjectStoreClient
from .transport import get_transport
from .util import TransferEventManager
from .destination import url_to_destination_params
from .amqp_exchange_factory import get_exchange
from logging import getLogger
log = getLogger(__name__)
DEFAULT_TRANSFER_THREADS = 2
def build_client_manager(**kwargs):
if 'job_manager' in kwargs:
return ClientManager(**kwargs) # TODO: Consider more separation here.
elif kwargs.get('amqp_url', None):
return MessageQueueClientManager(**kwargs)
else:
return ClientManager(**kwargs)
class ClientManager(object):
"""
Factory to create Pulsar clients, used to manage potential shared
state between multiple client connections.
"""
def __init__(self, **kwds):
if 'job_manager' in kwds:
self.job_manager_interface_class = LocalPulsarInterface
self.job_manager_interface_args = dict(job_manager=kwds['job_manager'], file_cache=kwds['file_cache'])
else:
self.job_manager_interface_class = HttpPulsarInterface
transport_type = kwds.get('transport', None)
transport = get_transport(transport_type)
self.job_manager_interface_args = dict(transport=transport)
cache = kwds.get('cache', None)
if cache is None:
cache = _environ_default_int('PULSAR_CACHE_TRANSFERS')
if cache:
log.info("Setting Pulsar client class to caching variant.")
self.client_cacher = ClientCacher(**kwds)
self.client_class = InputCachingJobClient
self.extra_client_kwds = {"client_cacher": self.client_cacher}
else:
log.info("Setting Pulsar client class to standard, non-caching variant.")
self.client_class = JobClient
self.extra_client_kwds = {}
def get_client(self, destination_params, job_id, **kwargs):
destination_params = _parse_destination_params(destination_params)
destination_params.update(**kwargs)
job_manager_interface_class = self.job_manager_interface_class
job_manager_interface_args = dict(destination_params=destination_params, **self.job_manager_interface_args)
job_manager_interface = job_manager_interface_class(**job_manager_interface_args)
return self.client_class(destination_params, job_id, job_manager_interface, **self.extra_client_kwds)
def shutdown(self):
pass
try:
from galaxy.jobs.runners.util.cli import factory as cli_factory
except ImportError:
from pulsar.managers.util.cli import factory as cli_factory
class MessageQueueClientManager(object):
def __init__(self, **kwds):
self.url = kwds.get('amqp_url')
self.manager_name = kwds.get("manager", None) or "_default_"
self.exchange = get_exchange(self.url, self.manager_name, kwds)
self.status_cache = {}
self.callback_lock = threading.Lock()
self.callback_thread = None
self.active = True
def ensure_has_status_update_callback(self, callback):
with self.callback_lock:
if self.callback_thread is not None:
return
def callback_wrapper(body, message):
try:
if "job_id" in body:
job_id = body["job_id"]
self.status_cache[job_id] = body
log.debug("Handling asynchronous status update from remote Pulsar.")
callback(body)
except Exception:
log.exception("Failure processing job status update message.")
except BaseException as e:
log.exception("Failure processing job status update message - BaseException type %s" % type(e))
finally:
message.ack()
def run():
self.exchange.consume("status_update", callback_wrapper, check=self)
log.debug("Leaving Pulsar client status update thread, no additional Pulsar updates will be processed.")
thread = threading.Thread(
name="pulsar_client_%s_status_update_callback" % self.manager_name,
target=run
)
thread.daemon = False # Lets not interrupt processing of this.
thread.start()
self.callback_thread = thread
def shutdown(self):
self.active = False
def __nonzero__(self):
return self.active
def get_client(self, destination_params, job_id, **kwargs):
if job_id is None:
raise Exception("Cannot generate Pulsar client for empty job_id.")
destination_params = _parse_destination_params(destination_params)
destination_params.update(**kwargs)
if 'shell_plugin' in destination_params:
shell = cli_factory.get_shell(destination_params)
return MessageCLIJobClient(destination_params, job_id, self, shell)
else:
return MessageJobClient(destination_params, job_id, self)
class ObjectStoreClientManager(object):
def __init__(self, **kwds):
if 'object_store' in kwds:
self.interface_class = LocalPulsarInterface
self.interface_args = dict(object_store=kwds['object_store'])
else:
self.interface_class = HttpPulsarInterface
transport_type = kwds.get('transport', None)
transport = get_transport(transport_type)
self.interface_args = dict(transport=transport)
self.extra_client_kwds = {}
def get_client(self, client_params):
interface_class = self.interface_class
interface_args = dict(destination_params=client_params, **self.interface_args)
interface = interface_class(**interface_args)
return ObjectStoreClient(interface)
class ClientCacher(object):
def __init__(self, **kwds):
self.event_manager = TransferEventManager()
default_transfer_threads = _environ_default_int('PULSAR_CACHE_THREADS', DEFAULT_TRANSFER_THREADS)
num_transfer_threads = int(kwds.get('transfer_threads', default_transfer_threads))
self.__init_transfer_threads(num_transfer_threads)
def queue_transfer(self, client, path):
self.transfer_queue.put((client, path))
def acquire_event(self, input_path):
return self.event_manager.acquire_event(input_path)
def _transfer_worker(self):
while True:
transfer_info = self.transfer_queue.get()
try:
self.__perform_transfer(transfer_info)
except BaseException as e:
log.warn("Transfer failed.")
log.exception(e)
pass
self.transfer_queue.task_done()
def __perform_transfer(self, transfer_info):
(client, path) = transfer_info
event_holder = self.event_manager.acquire_event(path, force_clear=True)
failed = True
try:
client.cache_insert(path)
failed = False
finally:
event_holder.failed = failed
event_holder.release()
def __init_transfer_threads(self, num_transfer_threads):
self.num_transfer_threads = num_transfer_threads
self.transfer_queue = Queue()
for i in range(num_transfer_threads):
t = threading.Thread(target=self._transfer_worker)
t.daemon = True
t.start()
def _parse_destination_params(destination_params):
try:
unicode_type = unicode
except NameError:
unicode_type = str
if isinstance(destination_params, str) or isinstance(destination_params, unicode_type):
destination_params = url_to_destination_params(destination_params)
return destination_params
def _environ_default_int(variable, default="0"):
val = getenv(variable, default)
int_val = int(default)
if str(val).isdigit():
int_val = int(val)
return int_val
__all__ = [ClientManager, ObjectStoreClientManager, HttpPulsarInterface]
| mikel-egana-aranguren/SADI-Galaxy-Docker | galaxy-dist/lib/pulsar/client/manager.py | Python | gpl-3.0 | 8,432 | [
"Galaxy"
] | 17465becd73a8eef7d8714e48f999a0eb5a72228fb27060aec03cba77770829d |
""" Some general standard classifier routines for astronomical data. """
import pickle
import gc
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn import metrics
from pandas import DataFrame, MultiIndex
from IPython.display import display
from sklearn.preprocessing import PolynomialFeatures
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import SVC, LinearSVC
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import classification_report
from sklearn.cross_validation import StratifiedShuffleSplit
from sklearn.cross_validation import train_test_split
from sklearn.grid_search import GridSearchCV
from sklearn.preprocessing import StandardScaler
from sklearn.utils import shuffle
from .photometry import optimise_sdss_features
from .performance import balanced_accuracy_expected
from .preprocessing import balanced_train_test_split
from .viz import (plot_validation_accuracy_heatmap, reshape_grid_socres, plot_hex_map,
plot_recall_maps)
def train_classifier(data, feature_names, class_name, train_size, test_size, output='',
random_state=None, coords=True, recall_maps=True, classifier=None, correct_baseline=None,
balanced=True, returns=['correct_boolean', 'confusion_test'], report=True,
pickle_path=None, fig_dir=''):
""" Standard classifier routine.
Parameters
----------
data : DataFrame
The DataFrame containing all the data.
feature_names : array
A list of column names in data that are used as features.
class_name : str
The column name of the target.
train_size : int
The size of the training set.
test_size : int
The size of the test set.
output : str
The name that will be attached to the path of the saved plots.
random_state : int
The value of the random state (used for reproducibility).
coords : bool
Whehter coordinates are part of the features.
recall_maps : bool
Wheter to make a map of recall scores.
classifier : Classifier object
An initialised scikit-learn Classifier object.
correct_baseline : array
If we want to compare our results to some baseline, supply the default predicted data here.
balanced : bool
Whether to make the training and test set balanced.
returns : array
The list of variables to be retuned by the function.
report : bool
Whether to print out the classification report.
pickle_path : str
If a pickle path is supplied, the classifier will be saved in the specified location.
Returns
-------
correct_boolean : array
The boolean array indicating which test exmaples were correctly predicted.
confusion_test : array
The confusion matrix on the test examples.
"""
if balanced:
X_train, X_test, y_train, y_test = balanced_train_test_split(
data[feature_names], data[class_name], train_size=train_size, test_size=test_size,
random_state=random_state)
else:
X_train, X_test, y_train, y_test = train_test_split(np.array(data[feature_names]),
np.array(data[class_name]), train_size=train_size, test_size=test_size, random_state=random_state)
if not classifier:
classifier = RandomForestClassifier(
n_estimators=300, n_jobs=-1, class_weight='subsample', random_state=random_state)
coords_test = None
if coords:
coords_train = X_train[:, 0:2]
coords_test = X_test[:, 0:2]
X_train = X_train[:, 2:]
X_test = X_test[:, 2:]
correct_boolean, confusion_test = print_classification_result(X_train, X_test, y_train,
y_test, report, recall_maps, classifier, correct_baseline, coords_test, output, fig_dir)
if pickle_path:
with open(pickle_path, 'wb') as f:
pickle.dump(classifier, f, protocol=4)
results = []
if 'classifier' in returns:
results.append(classifier)
if 'correct_boolean' in returns:
results.append(correct_boolean)
if 'confusion_test' in returns:
results.append(confusion_test)
return results
def print_classification_result(X_train, X_test, y_train, y_test, report=True,
recall_maps=True, classifier=None, correct_baseline=None, coords_test=None, output='',
fig_dir='', trained=False):
""" Train the specified classifier and print out the results.
Parameters
----------
X_train : array
The feature vectors (stored as columns) in the training set.
X_test : array
The feature vectors (stored as columns) in the test set.
y_train : array
The target vector in the training set.
y_test : array
The target vector in the test set.
report : bool
Whether to print out the classification report.
recall_maps : bool
Wheter to make a map of recall scores.
classifier : Classifier object
A classifier object that will be used to train and test the data.
It should have the same interface as scikit-learn classifiers.
correct_baseline : array
If we want to compare our results to some baseline, supply the default
predicted data here.
coords_test : array
The coordinates of the test examples used in mapping.
output : str
The name that will be attached to the path of the saved plots.
Returns
-------
correct_boolean : array
The boolean array indicating which test exmaples were correctly predicted.
confusion_test : array
The confusion matrix on the test examples.
"""
# train and test
if not trained:
classifier.fit(X_train, y_train)
y_pred_test = classifier.predict(X_test)
confusion_test = metrics.confusion_matrix(y_test, y_pred_test)
balanced_accuracy = balanced_accuracy_expected(confusion_test)
# put confusion matrix in a DataFrame
classes = ['Galaxy', 'Quasar', 'Star']
pred_index = MultiIndex.from_tuples(list(zip(['Predicted'] * 3, classes)))
act_index = MultiIndex.from_tuples(list(zip(['Actual'] * 3, classes)))
confusion_features_df = DataFrame(confusion_test, columns=pred_index, index=act_index)
# display results
class_names = ['Galaxy', 'Star', 'Quasar']
print('Here\'s the confusion matrix:')
display(confusion_features_df)
print('The balanced accuracy rate is {:.2%}.'.format(balanced_accuracy))
if report:
print('Classification report:')
print(classification_report(y_test, y_pred_test, class_names, digits=4))
correct_boolean = y_test == y_pred_test
# plot the recall maps
if recall_maps:
if correct_baseline is None:
print('Recall Maps of Galaxies, Stars, and Quasars, respectively:')
plot_recall_maps(coords_test, y_test, y_pred_test, class_names, output,
correct_boolean, vmin=0.7, vmax=1, mincnt=None, cmap=plt.cm.YlGn, fig_dir=fig_dir)
else:
print('Recall Improvement Maps of Galaxies, Stars, and Quasars, respectively:')
correct_diff = correct_boolean.astype(int) - correct_baseline.astype(int)
plot_recall_maps(coords_test, y_test, y_pred_test, class_names, output,
correct_diff, vmin=-0.1, vmax=+0.1, mincnt=20, cmap=plt.cm.RdBu, fig_dir=fig_dir)
return correct_boolean, confusion_test
def learning_curve(classifier, X, y, cv, sample_sizes,
degree=1, pickle_path=None, verbose=True):
""" Learning curve
"""
learning_curves = []
for i, (train_index, test_index) in enumerate(cv):
X_train = X[train_index]
X_test = X[test_index]
y_train = y[train_index]
y_test = y[test_index]
if degree > 1:
poly = PolynomialFeatures(degree=degree, interaction_only=False, include_bias=True)
X_train = poly.fit_transform(X_train)
X_test = poly.transform(X_test)
lc = []
for sample in sample_sizes:
classifier.fit(X_train[:sample], y_train[:sample])
# apply classifier on test set
y_pred = classifier.predict(X_test)
confusion = metrics.confusion_matrix(y_test, y_pred)
lc.append(balanced_accuracy_expected(confusion))
learning_curves.append(lc)
if verbose: print(i, end=' ')
# pickle learning curve
if pickle_path:
with open(pickle_path, 'wb') as f:
pickle.dump(learning_curves, f, protocol=4)
if verbose: print()
def learning_curve_old(data, feature_cols, target_col, classifier, train_sizes, test_sizes=200000,
random_state=None, balanced=True, normalise=True, degree=1, pickle_path=None):
""" Compute the learning curve of a classiifer.
Parameters
----------
data : DataFrame
The DataFrame containing all the data.
feature_cols : array
A list of column names in data that are used as features.
target_col : str
The column name of the target.
classifier : Classifier object
A classifier object that will be used to train and test the data.
It should have the same interface as scikit-learn classifiers.
train_sizes : array
The list of the sample sizes that the classifier will be trained on.
test_sizes : int or list of ints
The sizes of the test set.
random_state : int
The value of the Random State (used for reproducibility).
normalise : boolean
Whether we should first normalise the data to zero mean and unit variance.
degree : int
If greater than 1, the data will first be polynomially transformed
with the given degree.
pickle_path : str
The path where the values of the learning curve will be saved.
Returns
-------
lc_accuracy_test : array
The list of balanced accuracy scores for the given sample sizes.
"""
lc_accuracy_test = []
if type(test_sizes) is int:
test_sizes = [test_sizes] * len(train_sizes)
for i, j in zip(train_sizes, test_sizes):
gc.collect()
# split data into test set and training set
if balanced:
X_train, X_test, y_train, y_test = balanced_train_test_split(
data[feature_names], data[class_name], train_size=i, test_size=j, random_state=random_state)
else:
X_train, X_test, y_train, y_test = train_test_split(np.array(data[feature_cols]),
np.array(data[target_col]), train_size=i, test_size=j, random_state=random_state)
X_train, y_train = shuffle(X_train, y_train, random_state=random_state*2)
X_test, y_test = shuffle(X_test, y_test, random_state=random_state*3)
if normalise:
scaler = StandardScaler()
X_train = scaler.fit_transform(X_train)
X_test = scaler.transform(X_test)
if degree > 1:
poly_features = PolynomialFeatures(degree=degree, interaction_only=False, include_bias=True)
X_train = poly_features.fit_transform(X_train)
X_test = poly_features.transform(X_test)
# train the classifier
classifier.fit(X_train, y_train)
# apply classifier on test set
y_pred_test = classifier.predict(X_test)
confusion_test = metrics.confusion_matrix(y_test, y_pred_test)
lc_accuracy_test.append(balanced_accuracy_expected(confusion_test))
# pickle learning curve
if pickle_path:
with open(pickle_path, 'wb') as f:
pickle.dump(lc_accuracy_test, f, protocol=4)
return lc_accuracy_test
def compute_all_learning_curves(data, feature_cols, target_col):
""" Compute the learning curves with the most popular classifiers.
Parameters
----------
data : DataFrame
The DataFrame containing all the features and target.
feature_cols : array
The list of column names of the features.
target_col: array
The name of the target column in the DataFrame.
"""
# define the range of the sample sizes
sample_sizes = np.concatenate((np.arange(100, 1000, 100), np.arange(1000, 10000, 1000),
np.arange(10000, 100001, 10000), [200000, 300000]))
# initialise the classifiers
svm_rbf = SVC(kernel='rbf', gamma=0.01, C=100, cache_size=2000)
svm_sigmoid = SVC(kernel='sigmoid', gamma=0.001, C=1000, cache_size=2000)
svm_poly = LinearSVC(C=0.1, loss='squared_hinge', penalty='l1', dual=False, multi_class='ovr',
fit_intercept=True, random_state=21)
logistic = LogisticRegression(penalty='l1', dual=False, C=1, multi_class='ovr', solver='liblinear', random_state=21)
forest = RandomForestClassifier(n_estimators=100, n_jobs=-1, class_weight='auto', random_state=21)
# train SVM with RBF kernel (this will take a few hours)
lc_svm_rbf = learning_curve(data, feature_cols, target_col, svm_rbf, sample_sizes, random_state=2,
normalise=True, pickle_path='pickle/04_learning_curves/lc_svm_rbf.pickle')
# train SVM with polynomial kernel of degree 2
lc_svm_poly_2 = learning_curve(data, feature_cols, target_col, svm_poly, sample_sizes, degree=2,
random_state=2, normalise=True, pickle_path='pickle/04_learning_curves/lc_svm_poly_2.pickle')
# train SVM with polynomial kernel of degree 3
lc_svm_poly_3 = learning_curve(data, feature_cols, target_col, svm_poly, sample_sizes, degree=3,
random_state=2, normalise=True, pickle_path='pickle/04_learning_curves/lc_svm_poly_3.pickle')
# train logistic regression with polynomial kernel of degree 2
lc_logistic_2 = learning_curve(data, feature_cols, target_col, logistic, sample_sizes, degree=2,
random_state=2, normalise=True, pickle_path='pickle/04_learning_curves/lc_logistic_2.pickle')
# train logistic regression with polynomial kernel of degree 3
lc_logistic_3 = learning_curve(data, feature_cols, target_col, logistic, sample_sizes, degree=3,
random_state=2, normalise=True, pickle_path='pickle/04_learning_curves/lc_logistic_3.pickle')
# train a random forest
lc_forest = learning_curve(data, feature_cols, target_col, forest, sample_sizes,
random_state=2, normalise=True, pickle_path='pickle/04_learning_curves/lc_forest.pickle')
def grid_search(X, y, classifier, param_grid, train_size=300, test_size=300, clf_name=None,
report=True):
""" A general grid search routine.
Parameters
----------
X : array
The feature matrix of the data.
y : array
The target column.
classifier : Classifier object
A classifier object that will be used to train and test the data.
It should have the same interface as scikit-learn classifiers.
param_grid : dict
Dictionary containing the names of the hyperparameters and their
associated values which the classifier will be trained with.
train_size : int
The size of the training set in each iteration.
test_size : int
The size of the test set in each iteration.
clf_name : str
The name of the classifier (used for printing of the results).
report : boolean
Whether the results (the best hyperparameters) will be printed out.
"""
cv = StratifiedShuffleSplit(y, n_iter=5, train_size=train_size,
test_size=test_size, random_state=17)
grid = GridSearchCV(classifier, param_grid=param_grid, cv=cv)
grid.fit(X, y)
if not clf_name:
clf_name = str(classifier.__class__)
if report:
print("The best parameters for {} are {} with a score of {:.2%}.".format(
clf_name, grid.best_params_, grid.best_score_))
return grid
def grid_search_svm_rbf(X, y, train_size=300, test_size=300, fig_path=None,
C_range=np.logspace(-2, 10, 13), gamma_range=np.logspace(-9, 3, 13),
pickle_path=None):
""" Do a grid search on SVM wih an RBF kernel.
Parameters
----------
X : array
The feature matrix of the data.
y : array
The target column.
train_size : int
The size of the training set in each iteration.
test_size : int
The size of the test set in each iteration.
fig_path : str
The path where the heat map plot can be saved.
pickle_path : str
The path where the pickled scores can be saved.
"""
# define search domain
param_grid_svm = dict(gamma=gamma_range, C=C_range)
# run grid search
classifier = SVC(kernel='rbf')
grid = grid_search(X, y, classifier, param_grid_svm,
train_size=train_size, test_size=test_size, clf_name='SVM RBF')
scores = reshape_grid_socres(grid.grid_scores_, len(C_range), len(gamma_range))
# pickle scores
if pickle_path:
with open(pickle_path, 'wb') as f:
pickle.dump(scores, f, protocol=4)
def grid_search_svm_sigmoid(X, y, train_size=300, test_size=300, fig_path=None, pickle_path=None):
""" Do a grid search on SVM wih a sigmoid kernel.
Parameters
----------
X : array
The feature matrix of the data.
y : array
The target column.
train_size : int
The size of the training set in each iteration.
test_size : int
The size of the test set in each iteration.
fig_path : str
The path where the heat map plot can be saved.
pickle_path : str
The path where the pickled scores can be saved.
"""
# define search domain
C_range = np.logspace(-2, 10, 13)
gamma_range = np.logspace(-9, 3, 13)
param_grid_svm = dict(gamma=gamma_range, C=C_range)
# run grid search
classifier = SVC(kernel='sigmoid')
grid = grid_search(X, y, classifier, param_grid_svm,
train_size=train_size, test_size=test_size, clf_name='SVM Sigmoid')
scores = reshape_grid_socres(grid.grid_scores_, len(C_range), len(gamma_range))
# plot scores in a heat map
fig = plt.figure(figsize=(10, 5))
ax = plot_validation_accuracy_heatmap(scores, x_range=gamma_range,
y_range=C_range, y_label='$C$', x_label='$\gamma$', power10='both')
if fig_path:
fig.savefig(fig_path, bbox_inches='tight')
# pickle scores
if pickle_path:
with open(pickle_path, 'wb') as f:
pickle.dump(scores, f, protocol=4)
def grid_search_svm_poly_degree(X, y, param_grid, degree=2, train_size=300, test_size=300):
""" Do a grid search on a Linear SVM given the specified polynomial transformation.
Parameters
----------
X : array
The feature matrix of the data.
y : array
The target column.
param_grid : dict
Dictionary containing the names of the hyperparameters and their
associated values which the classifier will be trained with.
train_size : int
The size of the training set in each iteration.
test_size : int
The size of the test set in each iteration.
Returns
-------
scores_flat : array
List of scores of all possible cominbations of the hyperparameters.
"""
# transform features to polynomial space
poly_features = PolynomialFeatures(degree=degree, interaction_only=False, include_bias=True)
X_poly = poly_features.fit_transform(X)
# run grid search on various combinations
classifier = LinearSVC(dual=False, fit_intercept=True, multi_class='ovr',
loss='squared_hinge', penalty='l1', random_state=13)
grid1 = grid_search(X_poly, y, classifier, param_grid,
train_size=train_size, test_size=test_size, report=False)
classifier = LinearSVC(dual=False, fit_intercept=True, multi_class='ovr',
loss='squared_hinge', penalty='l2', random_state=13)
grid2 = grid_search(X_poly, y, classifier, param_grid,
train_size=train_size, test_size=test_size, report=False)
classifier = LinearSVC(dual=True, fit_intercept=True, multi_class='ovr',
loss='hinge', penalty='l2', random_state=13)
grid3 = grid_search(X_poly, y, classifier, param_grid,
train_size=train_size, test_size=test_size, report=False)
classifier = LinearSVC(fit_intercept=True, multi_class='crammer_singer',
random_state=13)
grid4 = grid_search(X_poly, y, classifier, param_grid,
train_size=train_size, test_size=test_size, report=False)
# construct the scores
scores_flat = grid1.grid_scores_ + grid2.grid_scores_ + grid3.grid_scores_ + grid4.grid_scores_
return scores_flat
def grid_search_svm_poly(X, y, train_size=300, test_size=300, fig_path=None, pickle_path=None):
""" Do a grid search on SVM with polynomial transformation of the features.
Parameters
----------
X : array
The feature matrix of the data.
y : array
The target column.
train_size : int
The size of the training set in each iteration.
test_size : int
The size of the test set in each iteration.
fig_path : str
The path where the heat map plot can be saved.
pickle_path : str
The path where the pickled scores can be saved.
"""
# define search domain
C_range = np.logspace(-6, 6, 13)
param_grid = dict(C=C_range)
scores_1 = grid_search_svm_poly_degree(
X, y, param_grid, degree=1, train_size=train_size, test_size=test_size)
scores_2 = grid_search_svm_poly_degree(
X, y, param_grid, degree=2, train_size=train_size, test_size=test_size)
scores_3 = grid_search_svm_poly_degree(
X, y, param_grid, degree=3, train_size=train_size, test_size=test_size)
scores = scores_1 + scores_2 + scores_3
scores = reshape_grid_socres(scores, 12, len(C_range))
if fig_path:
ylabels = ['Degree 1, OVR, Squared Hinge, L1-norm',
'Degree 1, OVR, Squared Hinge, L2-norm',
'Degree 1, OVR, Hinge, L2-norm',
'Degree 1, Crammer-Singer',
'Degree 2, OVR, Squared Hinge, L1-norm',
'Degree 2, OVR, Squared Hinge, L2-norm',
'Degree 2, OVR, Hinge, L2-norm',
'Degree 2, Crammer-Singer',
'Degree 3, OVR, Squared Hinge, L1-norm',
'Degree 3, OVR, Squared Hinge, L2-norm',
'Degree 3, OVR, Hinge, L2-norm',
'Degree 3, Crammer-Singer']
# plot scores on heat map
fig = plt.figure(figsize=(10, 5))
ax = plot_validation_accuracy_heatmap(scores, x_range=C_range, x_label='$C$', power10='x')
plt.yticks(np.arange(0, 12), ylabels)
fig.savefig(fig_path, bbox_inches='tight')
# pickle scores
if pickle_path:
with open(pickle_path, 'wb') as f:
pickle.dump(scores, f, protocol=4)
def grid_search_logistic_degree(X, y, param_grid, degree=2, train_size=300, test_size=300):
""" Do a grid search on Logistic Regression given the specified polynomial transformation.
Parameters
----------
X : array
The feature matrix of the data.
y : array
The target column.
param_grid : dict
Dictionary containing the names of the hyperparameters and their
associated values which the classifier will be trained with.
train_size : int
The size of the training set in each iteration.
test_size : int
The size of the test set in each iteration.
Returns
-------
scores_flat : array
List of scores of all possible cominbations of the hyperparameters.
"""
# transform features to polynomial space
poly_features = PolynomialFeatures(degree=degree, interaction_only=False, include_bias=True)
X_poly = poly_features.fit_transform(X)
# run grid search
classifier = LogisticRegression(fit_intercept=True, dual=False, solver='liblinear',
multi_class='ovr', penalty='l1', random_state=51)
grid1 = grid_search(X_poly, y, classifier, param_grid, report=False)
classifier = LogisticRegression(fit_intercept=True, dual=False, solver='liblinear',
multi_class='ovr', penalty='l2', random_state=51)
grid2 = grid_search(X_poly, y, classifier, param_grid, report=False)
classifier = LogisticRegression(fit_intercept=True, dual=False, solver='lbfgs',
multi_class='multinomial', penalty='l2', random_state=51)
grid3 = grid_search(X_poly, y, classifier, param_grid, report=False)
# construct the scores
scores_flat = grid1.grid_scores_ + grid2.grid_scores_ + grid3.grid_scores_
return scores_flat
def grid_search_logistic(X, y, train_size=300, test_size=300, fig_path=None, pickle_path=None):
""" Do a grid search on Logistic Regression.
Parameters
----------
X : array
The feature matrix of the data.
y : array
The target column.
train_size : int
The size of the training set in each iteration.
test_size : int
The size of the test set in each iteration.
fig_path : str
The path where the heat map plot can be saved.
pickle_path : str
The path where the pickled scores can be saved.
"""
# define search domain
C_range = np.logspace(-6, 6, 13)
param_grid = dict(C=C_range)
scores_1 = grid_search_logistic_degree(
X, y, param_grid, degree=1, train_size=train_size, test_size=test_size)
scores_2 = grid_search_logistic_degree(
X, y, param_grid, degree=2, train_size=train_size, test_size=test_size)
scores_3 = grid_search_logistic_degree(
X, y, param_grid, degree=3, train_size=train_size, test_size=test_size)
scores = scores_1 + scores_2 + scores_3
scores = reshape_grid_socres(scores, 9, len(C_range))
if fig_path:
ylabels = ['Degree 1, OVR, L1-norm',
'Degree 1, OVR, L2-norm',
'Degree 1, Multinomial, L2-norm',
'Degree 2, OVR, L1-norm',
'Degree 2, OVR, L2-norm',
'Degree 2, Multinomial, L2-norm',
'Degree 3, OVR, L1-norm',
'Degree 3, OVR, L2-norm',
'Degree 3, Multinomial, L2-norm']
# plot scores on heat map
fig = plt.figure(figsize=(10, 5))
ax = plot_validation_accuracy_heatmap(scores, x_range=C_range, x_label='$C$', power10='x')
plt.yticks(np.arange(0, 9), ylabels)
fig.savefig(fig_path, bbox_inches='tight')
# pickle scores
if pickle_path:
with open(pickle_path, 'wb') as f:
pickle.dump(scores, f, protocol=4)
def predict_unlabelled_objects(file_path, table, classifier,
data_cols, feature_cols, chunksize, pickle_paths,
scaler_path, verbose=True):
""" Predict the classes of unlabelled objects given a classifier.
Parameters
----------
file_path : str
The path of the HDF5 table that contains the feature matrix.
"""
sdss_chunks = pd.read_hdf(file_path, table, columns=data_cols, chunksize=chunksize)
galaxy_map = np.zeros((3600, 3600), dtype=int)
quasar_map = np.zeros((3600, 3600), dtype=int)
star_map = np.zeros((3600, 3600), dtype=int)
object_maps = [galaxy_map, quasar_map, star_map]
for i, chunk in enumerate(sdss_chunks):
# apply reddening correction and compute key colours
optimise_sdss_features(chunk, scaler_path)
chunk['prediction'] = classifier.predict(chunk[feature_cols])
chunk['ra'] = np.remainder(np.round(chunk['ra'] * 10) + 3600, 3600)
chunk['dec'] = np.remainder(np.round(chunk['dec'] * 10) + 3600, 3600)
for index, row in chunk.iterrows():
if row['prediction'] == 'Galaxy':
galaxy_map[row['ra']][row['dec']] += 1
elif row['prediction'] == 'Quasar':
quasar_map[row['ra']][row['dec']] += 1
elif row['prediction'] == 'Star':
star_map[row['ra']][row['dec']] += 1
else:
print('Invalid prediction.')
current_line = i * chunksize
if verbose and current_line % 1000000 == 0:
print(current_line // 1000000, end=' ')
if verbose: print()
# save our predictions
for object_map, pickle_path in zip(object_maps, pickle_paths):
with open(pickle_path, 'wb') as f:
pickle.dump(object_map, f, protocol=4)
def map_unlabelled_objects(galaxy_map, quasar_map, star_map, fig_paths):
"""
"""
# print out results
whole_map = galaxy_map + star_map + quasar_map
total_galaxies = np.sum(galaxy_map)
total_quasars = np.sum(quasar_map)
total_stars = np.sum(star_map)
total = np.sum(whole_map)
print('Total number of objects:', total)
print('Number of predicted as galaxies: {:,} ({:.1%})'.format(total_galaxies, total_galaxies/total))
print('Number of predicted as quasars: {:,} ({:.1%})'.format(total_quasars, total_quasars/total))
print('Number of predicted as stars: {:,} ({:.1%})'.format(total_stars, total_stars/total))
# construct ra-dec coordinates
ra = np.arange(0, 360, 0.1)
dec = np.arange(0, 360, 0.1)
decs, ras = np.meshgrid(dec, ra)
decs = decs.flatten()
ras = ras.flatten()
# plot prediction on ra-dec maps
object_maps = [whole_map, galaxy_map, quasar_map, star_map]
for obj_map, fig_path in zip(object_maps, fig_paths):
fig = plt.figure(figsize=(10,5))
ax = plot_hex_map(ras, decs, C=obj_map.flatten(), gridsize=360,
reduce_C_function=np.sum, vmin=0, vmax=50000, origin=180,
milky_way=True)
fig.savefig(fig_path, bbox_inches='tight', dpi=300) | yen223/mclass-sky | mclearn/classifier.py | Python | bsd-3-clause | 30,619 | [
"Galaxy"
] | ee35471fb53c0e1df8e1d58000387da8e8288b223e1cbf697e36d8bbb78da80e |
from frontpage.models import Article, Profile, Media, ArticleMedia, MediaUpload, Post, Settings
from django.contrib.auth.models import User
# This function assumes that the create superuser command has already been run.
def make_testing_db():
m = Media()
m.headline = "Most ugly image"
m.lowResFile = "https://example.com/image.jpg"
m.highResFile = "https://example.com/image.jpg"
m.save()
print("media created")
u = Profile()
u.authuser = User.objects.all()[0]
u.active = True
u.dect = 5234
u.displayName = "Test Profile 01"
u.rights = 0
u.avatarMedia = m
u.notes = "<center>This is to test html insertion</center>"
u.save()
print("Profile created")
a = Article()
a.cachedText = "<h2>This is a dummy article due to testing purposes</h2>"
a.description = "Test article"
a.price = "$15.00"
a.quantity = 1000
a.size = "XXL"
a.type = 1
a.visible = True
a.addedByUser = u
a.save()
print("Article created")
am = ArticleMedia()
am.AID = a
am.MID = m
am.save()
print("Article media link created")
mu = MediaUpload()
mu.MID = m
mu.UID = u
mu.save()
print("Media user link created")
p = Post()
p.title = "Test post 01"
p.cacheText = "<p>this is a test post<br/>generated by tools.make_testing_db()</p>"
p.createdByUser = u
p.visibleLevel = 0
s = Settings()
s.changedByUser = u
s.property = '''[{
"type":"link",
"href":"example.com","text":"Visit example.com"
},{"type":"link","text":"Visit the top level website",
"href":".."}]'''
s.SName = 'frontpage.ui.navbar.content'
s.requiredLevel = 0
s.save()
print("NavBar setting created")
| Technikradio/C3FOCSite | c3shop/test/tools.py | Python | bsd-3-clause | 1,768 | [
"VisIt"
] | 978d2e7c81dc142227b314c95f0085cf12ddf0cf463c80ea134ad94bfa05ea7d |
# Copyright 2016 Mycroft AI, Inc.
#
# This file is part of Mycroft Core.
#
# Mycroft Core is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Mycroft Core is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Mycroft Core. If not, see <http://www.gnu.org/licenses/>.
# Visit https://docs.mycroft.ai/skill.creation for more detailed information
# on the structure of this skill and its containing folder, as well as
# instructions for designing your own skill based on this template.
# Import statements: the list of outside modules you'll be using in your
# skills, whether from other files in mycroft-core or from external libraries
from os.path import dirname
from adapt.intent import IntentBuilder
from mycroft.skills.core import MycroftSkill
from mycroft.util.log import getLogger
import json
import requests
__author__ = 'seymour-bootay'
# Logger: used for debug lines, like "LOGGER.debug(xyz)". These
# statements will show up in the command line when running Mycroft.
LOGGER = getLogger(__name__)
class PresenceDetectionSkill(MycroftSkill):
# The constructor of the skill, which calls MycroftSkill's constructor
def __init__(self):
super(PresenceDetectionSkill, self).__init__(name="PresenceDetectionSkill")
self.presence_system = self.config['presence_system']
self.presence_url = self.config['presence_url']
# This method loads the files needed for the skill's functioning, and
# creates and registers each intent that the skill uses
def initialize(self):
self.load_data_files(dirname(__file__))
presence_detection_intent = IntentBuilder("PresenceDetectionIntent"). \
require("LocateBeaconKeyword").require("Beacon").build()
self.register_intent(presence_detection_intent,
self.handle_presence_detection_intent)
def handle_presence_detection_intent(self, message):
beacon_name = message.data.get("Beacon").lower()
where = self.get_location(beacon_name)
data = {
"beacon_name": beacon_name,
"where": where
}
if where is not None:
self.speak_dialog("located", data)
else:
self.speak_dialog("could.not.locate", data)
def process_beacons_happy_bubbles(self):
beacon_dict = {}
response = requests.get(self.presence_url)
json_data = json.loads(response.text)
for key, value in json_data['beacons'].iteritems():
beacon_name = str(value['name']).lower()
location = value['incoming_json']['hostname']
beacon_dict[beacon_name] = location
return beacon_dict
def get_location(self, beacon_name):
if self.presence_system is None:
self.speak_dialog("presence.system.not.configured")
elif self.presence_system == 'happy-bubbles':
beacon_dict = self.process_beacons_happy_bubbles()
else:
# the configured presence system is not supported.
data = {
"presence_system": self.presence_system
}
self.speak_dialog("presence.system.not.supported", data)
return beacon_dict.get(beacon_name)
# The "stop" method defines what Mycroft does when told to stop during
# the skill's execution. In this case, since the skill's functionality
# is extremely simple, the method just contains the keyword "pass", which
# does nothing.
def stop(self):
pass
# The "create_skill()" method is used to create an instance of the skill.
# Note that it's outside the class itself.
def create_skill():
return PresenceDetectionSkill()
| seymour-bootay/my-mycroft-skills | presence-detection/__init__.py | Python | gpl-3.0 | 4,111 | [
"VisIt"
] | bc422e19c789ff5860cb436c100c9ea7a365e0f5f5f5e4e622c73d784f652099 |
# (c) 2015, Brian Coca <briancoca+dev@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
from ansible.errors import AnsibleError, AnsibleAction, _AnsibleActionDone, AnsibleActionFail
from ansible.module_utils._text import to_native
from ansible.module_utils.parsing.convert_bool import boolean
from ansible.plugins.action import ActionBase
class ActionModule(ActionBase):
TRANSFERS_FILES = True
def run(self, tmp=None, task_vars=None):
if task_vars is None:
task_vars = dict()
result = super(ActionModule, self).run(tmp, task_vars)
tmp = self._connection._shell.tempdir
src = self._task.args.get('src', None)
remote_src = boolean(self._task.args.get('remote_src', 'no'), strict=False)
try:
if src is None:
raise AnsibleActionFail("src is required")
elif remote_src:
# everything is remote, so we just execute the module
# without changing any of the module arguments
raise _AnsibleActionDone(result=self._execute_module(task_vars=task_vars))
try:
src = self._find_needle('files', src)
except AnsibleError as e:
raise AnsibleActionFail(to_native(e))
tmp_src = self._connection._shell.join_path(tmp, os.path.basename(src))
self._transfer_file(src, tmp_src)
self._fixup_perms2((tmp_src,))
new_module_args = self._task.args.copy()
new_module_args.update(
dict(
src=tmp_src,
)
)
result.update(self._execute_module('patch', module_args=new_module_args, task_vars=task_vars))
except AnsibleAction as e:
result.update(e.result)
finally:
self._remove_tmp_path(tmp)
return result
| jnerin/ansible | lib/ansible/plugins/action/patch.py | Python | gpl-3.0 | 2,558 | [
"Brian"
] | 3f66e4e87d9578f458e36bf73f61f284ea30d78e962fc7d6af5ad85be6f3ef51 |
# -*- coding: utf-8 -*-
"""Entry points for PyKEEN.
PyKEEN is a machine learning library for knowledge graph embeddings that supports node clustering,
link prediction, entity disambiguation, question/answering, and other tasks with knowledge graphs.
It provides an interface for registering plugins using Python's entrypoints under the
``pykeen.triples.extension_importer`` and ``pykeen.triples.prefix_importer`` groups. More specific
information about how the PyBEL plugins are loaded into PyKEEN can be found in PyBEL's
`setup.cfg <https://github.com/pybel/pybel/blob/master/setup.cfg>`_ under the ``[options.entry_points]``
header.
The following example shows how you can parse/load the triples from a BEL document with the `*.bel` extension.
.. code-block:: python
from urllib.request import urlretrieve
url = 'https://raw.githubusercontent.com/cthoyt/selventa-knowledge/master/selventa_knowledge/small_corpus.bel'
urlretrieve(url, 'small_corpus.bel')
# Example 1A: Make triples factory
from pykeen.triples import TriplesFactory
tf = TriplesFactory(path='small_corpus.bel')
# Example 1B: Use directly in the pipeline, which automatically invokes training/testing set stratification
from pykeen.pipeline import pipeline
results = pipeline(
dataset='small_corpus.bel',
model='TransE',
)
The same is true for precompiled BEL documents in the node-link format with the `*.bel.nodelink.json` extension and
the pickle format with the `*.bel.pickle` extension.
The following example shows how you can load/parse the triples from a BEL document stored in BEL Commons using the
``bel-commons`` prefix in combination with the network's identifier.
.. code-block:: python
# Example 2A: Make a triples factory
from pykeen.triples import TriplesFactory
# the network's identifier is 528
tf = TriplesFactory(path='bel-commons:528')
# Example 1B: Use directly in the pipeline, which automatically invokes training/testing set stratification
from pykeen.pipeline import pipeline
results = pipeline(
dataset='bel-commons:528',
model='TransR',
)
Currently, this relies on the default BEL Commons service provider at https://bel-commons-dev.scai.fraunhofer.de,
whose location might change in the future.
"""
import numpy as np
from .bel_commons_client import from_bel_commons
from .gpickle import from_pickle
from .nodelink import from_nodelink_file
from .triples import to_triples
__all__ = [
"get_triples_from_bel",
"get_triples_from_bel_nodelink",
"get_triples_from_bel_pickle",
"get_triples_from_bel_commons",
]
def get_triples_from_bel(path: str) -> np.ndarray:
"""Get triples from a BEL file by wrapping :func:`pybel.io.tsv.api.get_triples`.
:param path: the file path to a BEL Script
:return: A three column array with head, relation, and tail in each row
"""
from pybel import from_bel_script
return _from_bel(path, from_bel_script)
def get_triples_from_bel_nodelink(path: str) -> np.ndarray:
"""Get triples from a BEL Node-link JSON file by wrapping :func:`pybel.io.tsv.api.get_triples`.
:param path: the file path to a BEL Node-link JSON file
:return: A three column array with head, relation, and tail in each row
"""
return _from_bel(path, from_nodelink_file)
def get_triples_from_bel_pickle(path: str) -> np.ndarray:
"""Get triples from a BEL pickle file by wrapping :func:`pybel.io.tsv.api.get_triples`.
:param path: the file path to a BEL pickle file
:return: A three column array with head, relation, and tail in each row
"""
return _from_bel(path, from_pickle)
def get_triples_from_bel_commons(network_id: str) -> np.ndarray:
"""Load a BEL document from BEL Commons by wrapping :func:`pybel.io.tsv.api.get_triples`.
:param network_id: The network identifier for a graph in BEL Commons
:return: A three column array with head, relation, and tail in each row
"""
return _from_bel(str(network_id), from_bel_commons)
def _from_bel(path, bel_importer) -> np.ndarray:
graph = bel_importer(path)
triples = to_triples(graph)
return np.array(triples)
| pybel/pybel | src/pybel/io/pykeen.py | Python | mit | 4,191 | [
"Pybel"
] | 3e271917e185b580bb4046b869947bb1141722d00191448b133f36c1987d16ab |
# Copyright (c) 2012, 2013 by California Institute of Technology
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the California Institute of Technology nor
# the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL CALTECH
# OR THE CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
# USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
# OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
"""
Routines for working with gridworlds.
Note (24 June 2012): Several pieces of source code are taken or
derived from btsynth; see http://scottman.net/2012/btsynth
"""
import itertools
import time
import copy
import numpy as np
import matplotlib.patches as mpl_patches
import matplotlib.pyplot as plt
import matplotlib.animation as anim
import matplotlib.cm as mpl_cm
import polytope as pc
from prop2part import prop2part2, PropPreservingPartition
from spec import GRSpec
class GridWorld:
def __init__(self, gw_desc=None, prefix="Y"):
"""Load gridworld described in given string, or make empty instance.
@param gw_desc: String containing a gridworld description, or
None to create an empty instance.
@param prefix: String to be used as prefix for naming
gridworld cell variables.
"""
if gw_desc is not None:
self.loads(gw_desc)
else:
self.W = None
self.init_list = []
self.goal_list = []
self.prefix = prefix
self.offset = (0, 0)
def __eq__(self, other):
"""Test for equality.
Does not compare prefixes of cell variable names.
"""
if self.W is None and other.W is None:
return True
if self.W is None or other.W is None:
return False # Only one of the two is undefined.
if self.size() != other.size():
return False
if np.all(self.W != other.W):
return False
if self.goal_list != other.goal_list:
return False
if self.init_list != other.init_list:
return False
return True
def __ne__(self, other):
return not self.__eq__(other)
def __str__(self):
return self.pretty(show_grid=True)
def __getitem__(self, key, next=False, nonbool=True):
"""Return variable name corresponding to this cell.
Supports negative wrapping, e.g., if Y is an instance of
GridWorld, then Y[-1,-1] will return the variable name of the
cell in the bottom-right corner, Y[0,-1] the name of the
top-right corner cell, etc. As usual in Python, you can only
wrap around once.
@param next: Use the primed (i.e., state at next time step)
form of the variable.
@param nonbool: If True, then use gr1c support for nonboolean
variable domains.
"""
if self.W is None:
raise ValueError("Gridworld is empty; no names available.")
if len(key) != len(self.W.shape):
raise ValueError("malformed gridworld key.")
if key[0] < -self.W.shape[0] or key[1] < -self.W.shape[1] or key[0] >= self.W.shape[0] or key[1] >= self.W.shape[1]:
raise ValueError("gridworld key is out of bounds.")
if key[0] < 0:
key = (self.W.shape[0]+key[0], key[1])
if key[1] < 0:
key = (key[0], self.W.shape[1]+key[1])
if nonbool:
if next:
return "(("+str(self.prefix)+"_r' = "+str(key[0] + self.offset[0])+") & ("+str(self.prefix)+"_c' = "+str(key[1] + self.offset[1])+"))"
else:
return "(("+str(self.prefix)+"_r = "+str(key[0] + self.offset[0])+") & ("+str(self.prefix)+"_c = "+str(key[1] + self.offset[1])+"))"
else:
out = str(self.prefix)+"_"+str(key[0] + self.offset[0])+"_"+str(key[1] + self.offset[1])
if next:
return out+"'"
else:
return out
def __copy__(self):
return GridWorld(self.dumps(), prefix=self.prefix)
def copy(self):
return self.__copy__()
def state(self, key, offset=(0, 0), nonbool=True):
"""Return dictionary form of state with keys of variable names.
Supports negative indices for key, e.g., as in __getitem__.
The offset argument is motivated by the use-case of multiple
agents whose moves are governed by separate "gridworlds" but
who interact in a shared space; with an offset, we can make
"sub-gridworlds" and enforce rules like mutual exclusion.
@param nonbool: If True, then use gr1c support for nonboolean
variable domains.
"""
if self.W is None:
raise ValueError("Gridworld is empty; no cells exist.")
if len(key) != len(self.W.shape):
raise ValueError("malformed gridworld key.")
if key[0] < -self.W.shape[0] or key[1] < -self.W.shape[1] or key[0] >= self.W.shape[0] or key[1] >= self.W.shape[1]:
raise ValueError("gridworld key is out of bounds.")
if key[0] < 0:
key = (self.W.shape[0]+key[0], key[1])
if key[1] < 0:
key = (key[0], self.W.shape[1]+key[1])
output = dict()
if nonbool:
output[self.prefix+"_r"] = key[0]+offset[0]
output[self.prefix+"_c"] = key[1]+offset[1]
else:
for i in range(self.W.shape[0]):
for j in range(self.W.shape[1]):
output[self.prefix+"_"+str(i+offset[0])+"_"+str(j+offset[1])] = 0
output[self.prefix+"_"+str(key[0]+offset[0])+"_"+str(key[1]+offset[1])] = 1
return output
def isEmpty(self, coord, extend=False):
"""Is cell at coord empty?
@param coord: (row, column) pair; supports negative indices.
@param extend: If True, then do not wrap indices and treat any
cell outside the grid as being occupied.
"""
if self.W is None:
raise ValueError("Gridworld is empty; no cells exist.")
if len(coord) != len(self.W.shape):
raise ValueError("malformed gridworld coord.")
if extend and (coord[0] < 0 or coord[1] < 0
or coord[0] > self.W.shape[0]-1
or coord[1] > self.W.shape[1]-1):
return False
if self.W[coord[0]][coord[1]] == 0:
return True
else:
return False
def setOccupied(self, coord):
"""Mark cell at coord as statically (permanently) occupied."""
if self.W is None:
raise ValueError("Gridworld is empty; no cells exist.")
self.W[coord[0]][coord[1]] = 1
def setEmpty(self, coord):
"""Mark cell at coord as empty."""
if self.W is None:
raise ValueError("Gridworld is empty; no cells exist.")
self.W[coord[0]][coord[1]] = 0
def isReachable(self, start, stop):
"""Decide whether there is a path from start cell to stop.
Assume the gridworld is 4-connected.
@param start: (row, column) pair; supports negative indices.
@param stop: same as start argument.
@return: True if there is a path, False otherwise.
"""
# Check input values and handle negative coordinates
if self.W is None:
raise ValueError("Gridworld is empty; no names available.")
if len(start) != len(self.W.shape):
raise ValueError("malformed gridworld start coordinate.")
if start[0] < -self.W.shape[0] or start[1] < -self.W.shape[1] or start[0] >= self.W.shape[0] or start[1] >= self.W.shape[1]:
raise ValueError("gridworld start coordinate is out of bounds.")
if start[0] < 0:
start = (self.W.shape[0]+start[0], start[1])
if start[1] < 0:
start = (start[0], self.W.shape[1]+start[1])
if len(stop) != len(self.W.shape):
raise ValueError("malformed gridworld stop coordinate.")
if stop[0] < -self.W.shape[0] or stop[1] < -self.W.shape[1] or stop[0] >= self.W.shape[0] or stop[1] >= self.W.shape[1]:
raise ValueError("gridworld stop coordinate is out of bounds.")
if stop[0] < 0:
stop = (self.W.shape[0]+stop[0], stop[1])
if stop[1] < 0:
stop = (stop[0], self.W.shape[1]+stop[1])
# Quick sanity check
if not (self.isEmpty(start) and self.isEmpty(stop)):
return False
# Similar to depth-first search
OPEN = [start]
CLOSED = []
while len(OPEN) > 0:
current = OPEN.pop()
if current == stop:
return True
for (i,j) in [(1,0), (-1,0), (0,1), (0,-1)]:
if (current[0]+i < 0 or current[0]+i >= self.W.shape[0]
or current[1]+j < 0 or current[1]+j >= self.W.shape[1]):
continue
if self.isEmpty((current[0]+i, current[1]+j)) and (current[0]+i, current[1]+j) not in CLOSED and (current[0]+i, current[1]+j) not in OPEN:
OPEN.append((current[0]+i, current[1]+j))
CLOSED.append(current)
return False
def plot(self, font_pt=18, show_grid=False, grid_width=2, troll_list=[]):
"""Draw figure depicting this gridworld.
Figure legend (symbolic form in parenthesis):
- "I" ('m+') : possible initial position;
- "G" ('r*') : goal;
- "E" ('gx') : goal of a troll; its extent is indicated by gray cells
@param font_pt: size (in points) for rendering text in the
figure. If 0, then use symbols instead (see legend above).
@param troll_list: ...same as the argument with the same name
given to L{add_trolls}.
"""
W = self.W.copy()
W = np.ones(shape=W.shape) - W
fig = plt.figure()
ax = plt.subplot(111)
plt.imshow(W, cmap=mpl_cm.gray, aspect="equal", interpolation="nearest",
vmin=0., vmax=1.)
xmin, xmax, ymin, ymax = plt.axis()
x_steps = np.linspace(xmin, xmax, W.shape[1]+1)
y_steps = np.linspace(ymin, ymax, W.shape[0]+1)
if show_grid:
for k in x_steps:
plt.plot([k, k], [ymin, ymax], 'k-', linewidth=grid_width)
for k in y_steps:
plt.plot([xmin, xmax], [k, k], 'k-', linewidth=grid_width)
plt.axis([xmin, xmax, ymin, ymax])
for p in self.init_list:
if font_pt > 0:
plt.text(p[1], p[0], "I", size=font_pt)
else:
plt.plot(p[1], p[0], 'm+')
for p in self.goal_list:
if font_pt > 0:
plt.text(p[1], p[0], "G", size=font_pt)
else:
plt.plot(p[1], p[0], 'r*')
for (center, radius) in troll_list:
if font_pt > 0:
plt.text(center[1], center[0], "E", size=font_pt)
else:
plt.plot(center[1], center[0], 'gx')
if center[0] >= W.shape[0] or center[0] < 0 or center[1] >= W.shape[1] or center[1] < 0:
raise ValueError("troll center is outside of gridworld")
t_offset = (max(0, center[0]-radius), max(0, center[1]-radius))
t_size = [center[0]-t_offset[0]+radius+1, center[1]-t_offset[1]+radius+1]
if t_offset[0]+t_size[0] >= W.shape[0]:
t_size[0] = W.shape[0]-t_offset[0]
if t_offset[1]+t_size[1] >= W.shape[1]:
t_size[1] = W.shape[1]-t_offset[1]
t_size = (t_size[0], t_size[1])
for i in range(t_size[0]):
for j in range(t_size[1]):
if self.W[i+t_offset[0]][j+t_offset[1]] == 0:
ax.add_patch(mpl_patches.Rectangle((x_steps[j+t_offset[1]], y_steps[W.shape[0]-(i+t_offset[0])]),1,1, color=(.8,.8,.8)))
plt.axis([xmin, xmax, ymin, ymax])
def pretty(self, show_grid=False, line_prefix="", path=[], goal_order=False, troll_list=[]):
"""Return pretty-for-printing string.
@param show_grid: If True, then grid the pretty world and show
row and column labels along the outer edges.
@param line_prefix: prefix each line with this string.
@param troll_list: ...same as the argument with the same name
given to L{add_trolls}.
"""
compress = lambda p: [ p[n] for n in range(len(p)-1) if p[n] != p[n+1] ]
# See comments in code for the method loads regarding values in W
if self.W is None:
return ""
# LEGEND:
# * - wall (as used in original world matrix definition);
# G - goal location;
# I - possible initial location.
# E - goal of a troll (if troll_list is nonempty);
# its extent is indicated by "+"
out_str = line_prefix
def direct(c1, c2):
(y1, x1) = c1
(y2, x2) = c2
if x1 > x2:
return "<"
elif x1 < x2:
return ">"
elif y1 > y2:
return "^"
elif y1 < y2:
return "v"
else: # c1 == c2
return "."
# Temporarily augment world map W to indicate troll positions
for (center, radius) in troll_list:
if self.W[center[0]][center[1]] == 0:
self.W[center[0]][center[1]] = -1
if center[0] >= self.W.shape[0] or center[0] < 0 or center[1] >= self.W.shape[1] or center[1] < 0:
raise ValueError("troll center is outside of gridworld")
t_offset = (max(0, center[0]-radius), max(0, center[1]-radius))
t_size = [center[0]-t_offset[0]+radius+1, center[1]-t_offset[1]+radius+1]
if t_offset[0]+t_size[0] >= self.W.shape[0]:
t_size[0] = self.W.shape[0]-t_offset[0]
if t_offset[1]+t_size[1] >= self.W.shape[1]:
t_size[1] = self.W.shape[1]-t_offset[1]
t_size = (t_size[0], t_size[1])
for i in range(t_size[0]):
for j in range(t_size[1]):
if self.W[i+t_offset[0]][j+t_offset[1]] == 0:
self.W[i+t_offset[0]][j+t_offset[1]] = -2
if show_grid:
out_str += " " + "".join([str(k).rjust(2) for k in range(self.W.shape[1])]) + "\n"
else:
out_str += "-"*(self.W.shape[1]+2) + "\n"
#if path:
# path = compress(path)
for i in range(self.W.shape[0]):
out_str += line_prefix
if show_grid:
out_str += " " + "-"*(self.W.shape[1]*2+1) + "\n"
out_str += line_prefix
out_str += str(i).rjust(2)
else:
out_str += "|"
for j in range(self.W.shape[1]):
if show_grid:
out_str += "|"
if self.W[i][j] == 0:
if (i,j) in self.init_list:
out_str += "I"
elif (i,j) in self.goal_list:
if goal_order:
out_str += str(self.goal_list.index((i,j)))
else:
out_str += "G"
elif (i,j) in path:
indices = (n for (n,c) in enumerate(path) if c == (i,j))
for x in indices:
d = direct((i,j), path[(x+1) % len(path)])
if d != ".":
break
out_str += d
else:
out_str += " "
elif self.W[i][j] == 1:
out_str += "*"
elif self.W[i][j] == -1:
out_str += "E"
elif self.W[i][j] == -2:
out_str += "+"
else:
raise ValueError("Unrecognized internal world W encoding.")
out_str += "|\n"
out_str += line_prefix
if show_grid:
out_str += " " + "-"*(self.W.shape[1]*2+1) + "\n"
else:
out_str += "-"*(self.W.shape[1]+2) + "\n"
# Delete temporary mark-up to world map W
self.W[self.W == -1] = 0
self.W[self.W == -2] = 0
return out_str
def size(self):
"""Return size of gridworld as a tuple in row-major order."""
if self.W is None:
return (0, 0)
else:
return self.W.shape
def loads(self, gw_desc):
"""Reincarnate using given gridworld description string.
@param gw_desc: String containing a gridworld description.
In a gridworld description, any line beginning with # is
ignored (regarded as a comment). The first non-blank and
non-comment line must give the grid size as two positive
integers separated by whitespace, with the first being the
number of rows and the second the number of columns.
Each line after the size line is used to construct a row of
the gridworld. These are read in order with maximum number of
lines being the number of rows in the gridworld. A row
definition is whitespace-sensitive up to the number of columns
(any characters beyond the column count are ignored, so in
particular trailing whitespace is allowed) and can include the
following symbols:
- C{ } : an empty cell,
- C{*} : a statically occupied cell,
- C{I} : possible initial cell,
- C{G} : goal cell (must be visited infinitely often).
If the end of file is reached before all rows have been
constructed, then the remaining rows are assumed to be empty.
After all rows have been constructed, the remainder of the
file is ignored.
"""
###################################################
# Internal format notes:
#
# W is a matrix of integers with the same shape as the
# gridworld. Each element has value indicating properties of
# the corresponding cell, according the following key.
#
# 0 - empty,
# 1 - statically (permanently) occupied.
###################################################
W = None
init_list = []
goal_list = []
row_index = -1
for line in gw_desc.splitlines():
if row_index != -1:
# Size has been read, so we are processing row definitions
if row_index >= W.shape[0]:
break
for j in range(min(len(line), W.shape[1])):
if line[j] == " ":
W[row_index][j] = 0
elif line[j] == "*":
W[row_index][j] = 1
elif line[j] == "I":
init_list.append((row_index, j))
elif line[j] == "G":
goal_list.append((row_index, j))
else:
raise ValueError("unrecognized row symbol \""+str(line[j])+"\".")
row_index += 1
else:
# Still looking for gridworld size in the given string
if len(line.strip()) == 0 or line.lstrip()[0] == "#":
continue # Ignore blank and comment lines
line_el = line.split()
W = np.zeros((int(line_el[0]), int(line_el[1])),
dtype=np.int32)
row_index = 0
if W is None:
raise ValueError("malformed gridworld description.")
# Arrived here without errors, so actually reincarnate
self.W = W
self.init_list = init_list
self.goal_list = goal_list
def load(self, gw_file):
"""Read description from given file.
Merely a convenience wrapper for the L{loads} method.
"""
with open(gw_file, "r") as f:
self.loads(f.read())
def dumps(self, line_prefix=""):
"""Dump gridworld description string.
@param line_prefix: prefix each line with this string.
"""
if self.W is None:
raise ValueError("Gridworld does not exist.")
out_str = line_prefix+" ".join([str(i) for i in self.W.shape])+"\n"
for i in range(self.W.shape[0]):
out_str += line_prefix
for j in range(self.W.shape[1]):
if self.W[i][j] == 0:
if (i,j) in self.init_list:
out_str += "I"
elif (i,j) in self.goal_list:
out_str += "G"
else:
out_str += " "
elif self.W[i][j] == 1:
out_str += "*"
else:
raise ValueError("Unrecognized internal world W encoding.")
out_str += "\n"
return out_str
def dumpsubworld(self, size, offset=(0, 0), prefix="Y", extend=False):
"""Generate new GridWorld instance from part of current one.
Does not perform automatic truncation (to make desired
subworld fit); instead a ValueError exception is raised.
However, the "extend" argument can be used to achieve
something similar.
Possible initial positions and goals are not included in the
returned GridWorld instance.
@param size: (height, width)
@param prefix: String to be used as prefix for naming
subgridworld cell variables.
@param extend: If True, then any size and offset is permitted,
where any positions outside the actual gridworld are
assumed to be occupied.
@rtype: L{GridWorld}
"""
if self.W is None:
raise ValueError("Gridworld does not exist.")
if len(size) != len(self.W.shape) or len(offset) != len(self.W.shape):
raise ValueError("malformed size or offset.")
if not extend:
if offset[0] < 0 or offset[0] >= self.W.shape[0] or offset[1] < 0 or offset[1] >= self.W.shape[1]:
raise ValueError("offset is out of bounds.")
if size[0] < 1 or size[1] < 1 or offset[0]+size[0] > self.W.shape[0] or offset[1]+size[1] > self.W.shape[1]:
raise ValueError("unworkable subworld size, given offset.")
sub = GridWorld(prefix=prefix)
sub.W = self.W[offset[0]:(offset[0]+size[0]), offset[1]:(offset[1]+size[1])].copy()
else:
sub = GridWorld(prefix=prefix)
sub.W = np.ones(size)
self_offset = (max(offset[0],0), max(offset[1],0))
self_offset = (min(self_offset[0],self.W.shape[0]-1), min(self_offset[1],self.W.shape[1]-1))
sub_offset = (max(-offset[0],0), max(-offset[1],0))
sub_offset = (min(sub_offset[0], sub.W.shape[0]-1), min(sub_offset[1], sub.W.shape[1]-1))
actual_size = (min(size[0], self.W.shape[0]-self_offset[0], sub.W.shape[0]-sub_offset[0]),
min(size[1], self.W.shape[1]-self_offset[1], sub.W.shape[1]-sub_offset[1]))
sub.W[sub_offset[0]:(sub_offset[0]+actual_size[0]), sub_offset[1]:(sub_offset[1]+actual_size[1])] = self.W[self_offset[0]:(self_offset[0]+actual_size[0]), self_offset[1]:(self_offset[1]+actual_size[1])]
return sub
def dumpPPartition(self, side_lengths=(1., 1.), offset=(0., 0.), nonbool=True):
"""Return proposition-preserving partition from this gridworld.
In setting the initial transition matrix, we assume the
gridworld is 4-connected.
@param side_lengths: pair (W, H) giving width and height of
each cell, assumed to be the same across the grid.
@param offset: 2-dimensional coordinate declaring where the
bottom-left corner of the gridworld should be placed
in the continuous space; default places it at the origin.
@rtype: L{PropPreservingPartition<prop2part.PropPreservingPartition>}
"""
if self.W is None:
raise ValueError("Gridworld does not exist.")
domain = pc.Polytope(A=np.array([[0,-1],
[0,1],
[-1,0],
[1,0]], dtype=np.float64),
b=np.array([-offset[1],
offset[1]+self.W.shape[0]*side_lengths[1],
-offset[0],
offset[0]+self.W.shape[1]*side_lengths[0]],
dtype=np.float64))
cells = {}
for i in range(self.W.shape[0]):
for j in range(self.W.shape[1]):
if nonbool:
cell_var = self.__getitem__((i,j))
else:
cell_var = self.prefix+"_"+str(i)+"_"+str(j)
cells[cell_var] \
= pc.Polytope(A=np.array([[0,-1],
[0,1],
[-1,0],
[1,0]], dtype=np.float64),
b=np.array([-offset[1]-(self.W.shape[0]-i-1)*side_lengths[1],
offset[1]+(self.W.shape[0]-i)*side_lengths[1],
-offset[0]-j*side_lengths[0],
offset[0]+(j+1)*side_lengths[0]],
dtype=np.float64))
part = prop2part2(domain, cells)
adjacency = np.zeros((self.W.shape[0]*self.W.shape[1], self.W.shape[0]*self.W.shape[1]), dtype=np.int8)
for this_ind in range(len(part.list_region)):
(prefix, i, j) = extract_coord(part.list_prop_symbol[part.list_region[this_ind].list_prop.index(1)],
nonbool=nonbool)
if self.W[i][j] != 0:
continue # Static obstacle cells are not traversable
adjacency[this_ind, this_ind] = 1
if i > 0 and self.W[i-1][j] == 0:
row_index = i-1
col_index = j
if j > 0 and self.W[i][j-1] == 0:
row_index = i
col_index = j-1
if i < self.W.shape[0]-1 and self.W[i+1][j] == 0:
row_index = i+1
col_index = j
if j < self.W.shape[1]-1 and self.W[i][j+1] == 0:
row_index = i
col_index = j+1
if nonbool:
symbol_ind = part.list_prop_symbol.index(self.__getitem__((row_index, col_index)))
else:
symbol_ind = part.list_prop_symbol.index(prefix+"_"+str(row_index)+"_"+str(col_index))
ind = 0
while part.list_region[ind].list_prop[symbol_ind] == 0:
ind += 1
adjacency[ind, this_ind] = 1
part.adj = adjacency
return part
def discreteTransitionSystem(self, nonbool=True):
""" Write a discrete transition system suitable for synthesis.
Unlike dumpPPartition, this does not create polytopes; it is
nonetheless useful and computationally less expensive.
@param nonbool: If True, then use gr1c support for nonboolean
variable domains. In particular this affects region
naming, as achieved with L{__getitem__}.
@rtype: L{PropPreservingPartition<prop2part.PropPreservingPartition>}
"""
disc_dynamics = PropPreservingPartition(list_region=[],
list_prop_symbol=[], trans=[])
num_cells = self.W.shape[0] * self.W.shape[1]
for i in range(self.W.shape[0]):
for j in range(self.W.shape[1]):
flat = lambda x, y: x*self.W.shape[1] + y
# Proposition
prop = self.__getitem__((i,j), nonbool=nonbool)
disc_dynamics.list_prop_symbol.append(prop)
# Region
r = [ 0 for x in range(0, num_cells) ]
r[flat(i,j)] = 1
disc_dynamics.list_region.append(pc.Region("R_" + prop, r))
# Transitions
# trans[p][q] if q -> p
t = [ 0 for x in range(0, num_cells) ]
t[flat(i,j)] = 1
if self.W[i][j] == 0:
if i > 0: t[flat(i-1,j)] = 1
if j > 0: t[flat(i,j-1)] = 1
if i < self.W.shape[0]-1: t[flat(i+1,j)] = 1
if j < self.W.shape[1]-1: t[flat(i,j+1)] = 1
disc_dynamics.trans.append(t)
disc_dynamics.num_prop = len(disc_dynamics.list_prop_symbol)
disc_dynamics.num_regions = len(disc_dynamics.list_region)
return disc_dynamics
def deterministicMovingObstacle(self, path):
trans = []
num_cells = self.W.shape[0] * self.W.shape[1]
for i in range(self.W.shape[0]):
for j in range(self.W.shape[1]):
flat = lambda x, y: x*self.W.shape[1] + y
t = [ 0 for x in range(0, num_cells) ]
if (i,j) in path:
n = path.index((i,j))
# path[n-1] -> path[n], path[L-1] -> path[0]
t[flat(*path[(n-1)%len(path)])] = 1
trans.append(t)
return trans
def spec(self, offset=(0, 0), controlled_dyn=True, nonbool=True):
"""Return GRSpec instance describing this gridworld.
The offset argument is motivated by the use-case of multiple
agents whose moves are governed by separate "gridworlds" but
who interact in a shared space; with an offset, we can make
"sub-gridworlds" and enforce rules like mutual exclusion.
Syntax is that of gr1c; in particular, "next" variables are
primed. For example, x' refers to the variable x at the next
time step.
If nonbool is False, then variables are named according to
prefix_R_C, where prefix is given (attribute of this GridWorld
object), R is the row, and C is the column of the cell
(0-indexed). If nonbool is True (default), cells are
identified with subformulae of the form::
((prefix_r = R) & (prefix_c = C))
L{GridWorld.__getitem__} and L{extract_coord} provide
reference implementations.
For incorporating this gridworld into an existing
specification (e.g., respecting external references to cell
variable names), see the method L{GRSpec.importGridWorld}.
@param offset: index offset to apply when generating the
specification; e.g., given prefix of "Y",
offset=(2,1) would cause the variable for the cell at
(0,3) to be named Y_2_4.
@param controlled_dyn: whether to treat this gridworld as
describing controlled ("system") or uncontrolled
("environment") variables.
@param nonbool: If True, then use gr1c support for nonboolean
variable domains.
@rtype: L{GRSpec}
"""
if self.W is None:
raise ValueError("Gridworld does not exist.")
row_low = 0
row_high = self.W.shape[0]
col_low = 0
col_high = self.W.shape[1]
spec_trans = []
orig_offset = copy.copy(self.offset)
if nonbool:
self.offset = (0,0)
else:
self.offset = offset
# Safety, transitions
for i in range(row_low, row_high):
for j in range(col_low, col_high):
if self.W[i][j] == 1:
continue # Cannot start from an occupied cell.
spec_trans.append(self.__getitem__((i,j), nonbool=nonbool)+" -> (")
# Normal transitions:
spec_trans[-1] += self.__getitem__((i,j), next=True, nonbool=nonbool)
if i > row_low and self.W[i-1][j] == 0:
spec_trans[-1] += " | " + self.__getitem__((i-1,j), next=True, nonbool=nonbool)
if j > col_low and self.W[i][j-1] == 0:
spec_trans[-1] += " | " + self.__getitem__((i,j-1), next=True, nonbool=nonbool)
if i < row_high-1 and self.W[i+1][j] == 0:
spec_trans[-1] += " | " + self.__getitem__((i+1,j), next=True, nonbool=nonbool)
if j < col_high-1 and self.W[i][j+1] == 0:
spec_trans[-1] += " | " + self.__getitem__((i,j+1), next=True, nonbool=nonbool)
spec_trans[-1] += ")"
# Safety, static
for i in range(row_low, row_high):
for j in range(col_low, col_high):
if self.W[i][j] == 1:
spec_trans.append("!(" + self.__getitem__((i,j), next=True, nonbool=nonbool) + ")")
# Safety, mutex; only needed when using boolean variables for cells
if not nonbool:
pos_indices = [k for k in itertools.product(range(row_low, row_high), range(col_low, col_high))]
disj = []
for outer_ind in pos_indices:
conj = []
if outer_ind != (-1, -1) and self.W[outer_ind[0]][outer_ind[1]] == 1:
continue
if outer_ind == (-1, -1):
conj.append(self.prefix+"_n_n'")
else:
conj.append(self.__getitem__((outer_ind[0], outer_ind[1]), next=True, nonbool=nonbool))
for inner_ind in pos_indices:
if ((inner_ind != (-1, -1) and self.W[inner_ind[0]][inner_ind[1]] == 1)
or outer_ind == inner_ind):
continue
if inner_ind == (-1, -1):
conj.append("(!" + self.prefix+"_n_n')")
else:
conj.append("(!" + self.__getitem__((inner_ind[0], inner_ind[1]), next=True, nonbool=nonbool)+")")
disj.append("(" + " & ".join(conj) + ")")
spec_trans.append("\n| ".join(disj))
if nonbool:
sys_vars = [self.prefix+"_r", self.prefix+"_c"]
sys_domains = [(0, self.W.shape[0]-1), (0, self.W.shape[1]-1)]
else:
sys_vars = []
for i in range(row_low, row_high):
for j in range(col_low, col_high):
sys_vars.append(self.__getitem__((i,j), nonbool=nonbool))
sys_domains = None # Default to boolean
if nonbool:
initspec = [self.__getitem__(loc, nonbool=nonbool) for loc in self.init_list]
else:
initspec = []
for loc in self.init_list:
mutex = [self.__getitem__((loc[0],loc[1]), nonbool=nonbool)]
mutex.extend(["!"+ovar for ovar in sys_vars if ovar != self.__getitem__(loc, nonbool=nonbool)])
initspec.append("(" + " & ".join(mutex) + ")")
init_str = " | ".join(initspec)
spec_goal = []
for loc in self.goal_list:
spec_goal.append(self.__getitem__(loc, nonbool=nonbool))
self.offset = orig_offset
if controlled_dyn:
return GRSpec(sys_vars=sys_vars, sys_domains=sys_domains,
sys_init=init_str, sys_safety=spec_trans, sys_prog=spec_goal)
else:
return GRSpec(env_vars=sys_vars, env_domains=sys_domains,
env_init=init_str, env_safety=spec_trans, env_prog=spec_goal)
def scale(self, xf=1, yf=1):
"""Return a new gridworld equivalent to this but scaled by integer
factor (xf, yf). In the new world, obstacles are increased in size but
initials and goals change their position only. If this world is of size
(h, w) then the returned world will be of size (h*yf, w*xf).
@param xf: integer scaling factor for rows
@param yf: integer scaling factor for columns
@rtype: L{GridWorld}
"""
shape_scaled = (self.W.shape[0]*yf, self.W.shape[1]*xf)
scaleW = np.zeros(shape_scaled, dtype=np.int32)
scale_goal = []
scale_init = []
for row in range(shape_scaled[0]):
for col in range(shape_scaled[1]):
(y,x) = (row/yf, col/xf)
(yr, xr) = (row % yf, col % xf)
if self.W[y,x] == 1:
scaleW[row, col] = 1
if (yr, xr) == (0, 0):
if (y,x) in self.goal_list:
scale_goal.append((row,col))
if (y,x) in self.init_list:
scale_init.append((row,col))
scale_gw = GridWorld(prefix=self.prefix)
scale_gw.W = scaleW
scale_gw.goal_list = scale_goal
scale_gw.init_list = scale_init
return scale_gw
def place_features(W, n):
"""Place n features randomly in 1D array W"""
try:
avail_inds = np.arange(W.size)[W==0]
np.random.shuffle(avail_inds)
return avail_inds[:n]
except IndexError:
raise ValueError("Unable to place features: no empty space left")
def world_from_1D(W, size, goal_list, init_list, prefix="Y"):
W = W.reshape(size)
row_col = lambda k: (k/size[1], k%size[1])
goal_list = [row_col(k) for k in goal_list]
init_list = [row_col(k) for k in init_list]
gw = GridWorld(prefix=prefix)
gw.W = W
gw.goal_list = goal_list
gw.init_list = init_list
return gw
class MGridWorld(GridWorld):
"""Gridworld with support for models of moving obstacles.
"""
def __init__(self, gw_desc=None, prefix="Y"):
"""(See documentation for L{GridWorld.__init__}.)
The first argument can be an instance of GridWorld from which
a new instance of MGridWorld should be built. In this case,
the prefix argument is ignored.
"""
if isinstance(gw_desc, GridWorld):
GridWorld.__init__(self, gw_desc=gw_desc.dumps(), prefix=gw_desc.prefix)
else:
GridWorld.__init__(self, gw_desc=None, prefix=prefix)
self.troll_list = []
if gw_desc is not None:
self.loads(gw_desc)
def __eq__(self, other):
"""Test for equality.
Does not compare prefixes of cell variable names.
"""
if not GridWorld.__eq__(self, other) or self.troll_list != other.troll_list:
return False
return True
def __ne__(self, other):
return not self.__eq__(other)
def __str__(self):
return self.pretty(show_grid=True)
def pretty(self, show_grid=False, line_prefix="", path=[], goal_order=False):
"""Wrap L{GridWorld.pretty}, using troll_list of this object.
"""
return GridWorld.pretty(self, show_grid=show_grid, line_prefix=line_prefix, path=path, goal_order=goal_order, troll_list=self.troll_list)
def plot(self, font_pt=18, show_grid=False, grid_width=2):
"""Wrap L{GridWorld.plot}, using troll_list of this object.
"""
return GridWorld.plot(self, font_pt=font_pt, show_grid=show_grid, grid_width=grid_width, troll_list=self.troll_list)
def loads(self, gw_desc):
"""Reincarnate using obstacle-annotated gridworld description string.
Cf. L{GridWorld.loads}. The core description string format is
extended to support the following:
- C{E} : a base cell to which a troll must always return;
default radius is 1.
"""
W = None
init_list = []
goal_list = []
troll_list = []
row_index = -1
for line in gw_desc.splitlines():
if row_index != -1:
# Size has been read, so we are processing row definitions
if row_index >= W.shape[0]:
break
for j in range(min(len(line), W.shape[1])):
if line[j] == " ":
W[row_index][j] = 0
elif line[j] == "*":
W[row_index][j] = 1
elif line[j] == "I":
init_list.append((row_index, j))
elif line[j] == "G":
goal_list.append((row_index, j))
elif line[j] == "E":
troll_list.append(((row_index, j), 1))
else:
raise ValueError("unrecognized row symbol \""+str(line[j])+"\".")
row_index += 1
else:
# Still looking for gridworld size in the given string
if len(line.strip()) == 0 or line.lstrip()[0] == "#":
continue # Ignore blank and comment lines
line_el = line.split()
W = np.zeros((int(line_el[0]), int(line_el[1])),
dtype=np.int32)
row_index = 0
if W is None:
raise ValueError("malformed gridworld description.")
# Arrived here without errors, so actually reincarnate
self.W = W
self.init_list = init_list
self.goal_list = goal_list
self.troll_list = troll_list
def dumps(self, line_prefix=""):
"""Dump obstacle-annotated gridworld description string
Cf. L{loads} of this class and L{GridWorld.dumps}.
"""
if self.W is None:
raise ValueError("Gridworld does not exist.")
out_str = line_prefix+" ".join([str(i) for i in self.W.shape])+"\n"
for i in range(self.W.shape[0]):
out_str += line_prefix
for j in range(self.W.shape[1]):
if self.W[i][j] == 0:
if (i,j) in self.init_list:
out_str += "I"
elif (i,j) in self.goal_list:
out_str += "G"
elif ((i,j),1) in self.troll_list:
out_str += "E"
else:
out_str += " "
elif self.W[i][j] == 1:
out_str += "*"
else:
raise ValueError("Unrecognized internal world W encoding.")
out_str += "\n"
return out_str
def mspec(self, troll_prefix="X"):
"""
Cf. L{GridWorld.spec} and L{add_trolls}.
"""
return add_trolls(self, self.troll_list, get_moves_lists=False, prefix=troll_prefix)
class CGridWorld(GridWorld):
"""Gridworld with intrinsic relation to continuous space partition.
"""
def __init__(self, gw_desc=None, prefix="Y",
side_lengths=(1., 1.), offset=(0., 0.)):
"""(See documentation for L{GridWorld.__init__} and L{GridWorld.dumpPPartition}.)"""
GridWorld.__init__(self, gw_desc=gw_desc, prefix=prefix)
self.part = self.dumpPPartition(side_lengths=side_lengths, offset=offset, nonbool=False)
self.side_lengths = copy.copy(side_lengths)
self.offset = np.array(offset, dtype=np.float64)
def __copy__(self):
return self.copy()
def copy(self):
Y = GridWorld.copy(self)
Y.part = self.part.copy()
return Y
def remap(self, side_lengths=(1., 1.), offset=(0., 0.)):
"""Change associated continuous space partition.
"""
self.part = self.dumpPPartition(side_lengths=side_lengths, offset=offset, nonbool=False)
def get_cell(self, x):
"""Return discrete coordinate (i,j) of cell that contains x.
...or None if x is outside the gridworld.
"""
if not (isinstance(x, np.ndarray) and
len(x.shape) == 1 and x.shape[0] == 2):
raise TypeError("continuous state must be 2-d vector.")
for r in self.part.list_region:
# Assume there is only one polytope per region
if pc.is_inside(r.list_poly[0], x):
# ...and only one symbol associated with it
(prefix, i, j) = extract_coord(self.part.list_prop_symbol[r.list_prop.index(1)], nonbool=False)
return (i,j)
return None
def get_bbox(self, coord):
"""Return bounding box for cell with given discrete coordinate.
@param coord: (row, column) pair; supports negative indices.
@return: L{numpy.ndarray} (size 2 by 2), first row is the
lower-left point, second row is the upper-right point.
"""
if self.W is None:
raise ValueError("Gridworld is empty; no cells exist.")
if len(coord) != len(self.W.shape):
raise ValueError("malformed gridworld coord.")
# lower-left
coord = (coord[0]%self.W.shape[0], coord[1]%self.W.shape[1])
ll = np.array([float(coord[1])*self.side_lengths[0],
float(self.W.shape[0]-(coord[0]+1))*self.side_lengths[1]])
# upper-right
ur = np.array([float(coord[1]+1)*self.side_lengths[0],
float(self.W.shape[0]-coord[0])*self.side_lengths[1]])
return np.array([self.offset+ll, self.offset+ur])
def get_ccenter(self, coord):
"""Get continuous position center for cell from discrete coordinate.
...merely a convenience wrapper using get_bbox()
"""
return np.mean(self.get_bbox(coord), axis=0)
def random_world(size, wall_density=.2, num_init=1, num_goals=2, prefix="Y",
ensure_feasible=False, timeout=None,
num_trolls=0):
"""Generate random gridworld of given size.
While an instance of GridWorld is returned, other views of the
result are possible; e.g., to obtain a description string, use
L{GridWorld.dumps}.
@param size: a pair, indicating number of rows and columns.
@param wall_density: the ratio of walls to total number of cells.
@param num_init: number of possible initial positions.
@param num_goals: number of positions to be visited infinitely often.
@param prefix: string to be used as prefix for naming gridworld
cell variables.
@param num_trolls: number of random trolls to generate, each
occupies an area of radius 1. If nonzero, then an
instance of MGridWorld will be returned.
@param ensure_feasible: guarantee that all goals and initial
positions are mutually reachable, assuming a 4-connected
grid. This method may not be complete, i.e., may fail to
return a feasible random gridworld with the given
parameters. Note that "feasibility" does not account for
nondeterminism (in particular, nonzero num_trolls
argument has no effect.)
@param timeout: if ensure_feasible, then quit if no correct random
world is found before timeout seconds. If timeout is
None (default), then do not impose time constraints.
@rtype: L{GridWorld}, or None if timeout occurs.
"""
if ensure_feasible and timeout is not None:
st = time.time()
num_cells = size[0]*size[1]
goal_list = []
init_list = []
troll_list = []
W = np.zeros(num_cells, dtype=np.int32)
num_blocks = int(np.round(wall_density*num_cells))
for i in range(num_goals):
avail_inds = np.array(range(num_cells))[W==0]
avail_inds = [k for k in avail_inds if k not in goal_list]
goal_list.append(avail_inds[np.random.randint(low=0, high=len(avail_inds))])
for i in range(num_init):
avail_inds = np.array(range(num_cells))[W==0]
avail_inds = [k for k in avail_inds if k not in goal_list and k not in init_list]
init_list.append(avail_inds[np.random.randint(low=0, high=len(avail_inds))])
for i in range(num_trolls):
avail_inds = np.array(range(num_cells))[W==0]
avail_inds = [k for k in avail_inds if k not in goal_list and k not in init_list and k not in troll_list]
troll_list.append(avail_inds[np.random.randint(low=0, high=len(avail_inds))])
bcounter = 0
while bcounter < num_blocks: # Add blocks (or "wall cells")
avail_inds = np.array(range(num_cells))[W==0]
avail_inds = [k for k in avail_inds if k not in goal_list and k not in init_list and k not in troll_list]
changed_index = np.random.randint(low=0, high=len(avail_inds))
W[avail_inds[changed_index]] = 1
bcounter += 1
if ensure_feasible:
if (timeout is not None) and (time.time()-st > timeout):
return None
# If feasibility must be guaranteed, then check whether
# the newly unreachable cell is permissible.
W_tmp = W.reshape(size)
goal_list_tmp = [(k/size[1], k%size[1]) for k in goal_list]
init_list_tmp = [(k/size[1], k%size[1]) for k in init_list]
troll_list_tmp = [(k/size[1], k%size[1]) for k in troll_list]
world = GridWorld(prefix=prefix)
world.W = W_tmp
chain_of_points = init_list_tmp[:]
chain_of_points.extend(goal_list_tmp)
is_feasible = True
for i in range(len(chain_of_points)):
if not world.isReachable(chain_of_points[i], chain_of_points[(i+1)%len(chain_of_points)]):
is_feasible = False
break
if not is_feasible:
W[avail_inds[changed_index]] = 0
bcounter -= 1
# Reshape the gridworld to final form; build and return the result.
W = W.reshape(size)
goal_list = [(k/size[1], k%size[1]) for k in goal_list]
init_list = [(k/size[1], k%size[1]) for k in init_list]
troll_list = [((k/size[1], k%size[1]), 1) for k in troll_list]
world = GridWorld(prefix=prefix)
world.W = W
world.goal_list = goal_list
world.init_list = init_list
if num_trolls > 0:
world = MGridWorld(world)
world.troll_list = troll_list
return world
def narrow_passage(size, passage_width=1, num_init=1, num_goals=2,
passage_length=0.4, ptop=None, prefix="Y"):
"""Generate a narrow-passage world: this is a world containing
two zones (initial, final) with a tube connecting them.
@param size: a pair, indicating number of rows and columns.
@param passage_width: the width of the connecting passage in cells.
@param passage_length: the length of the passage as a proportion of the
width of the world.
@param num_init: number of possible initial positions.
@param num_goals: number of positions to be visited infinitely often.
@param ptop: row number of top of passage, default (None) is random
@param prefix: string to be used as prefix for naming gridworld
cell variables.
@rtype: L{GridWorld}
"""
(w, h) = size
if w < 3 or h < 3:
raise ValueError("Gridworld too small: minimum dimension 3")
Z = unoccupied(size, prefix)
# Zone width is 30% of world width by default
zone_width = ((1.0-passage_length)/2.0)*size[1]
izone = int(max(1, zone_width)) # boundary of left zone
gzone = size[1] - int(max(1, zone_width)) # boundary of right zone
if izone * size[0] < num_init or gzone * size[0] < num_goals:
raise ValueError("Too many initials/goals for grid size")
if ptop is None:
ptop = np.random.randint(0, size[0]-passage_width)
passage = range(ptop, ptop+passage_width)
print passage, ptop
for y in range(0, size[0]):
if y not in passage:
for x in range(izone, gzone):
Z.W[y][x] = 1
avail_cells = [(y,x) for y in range(size[0]) for x in range(izone)]
Z.init_list = random.sample(avail_cells, num_init)
avail_cells = [(y,x) for y in range(size[0]) for x in range(gzone, size[1])]
Z.goal_list = random.sample(avail_cells, num_goals)
return Z
def add_trolls(Y, troll_list, prefix="X", start_anywhere=False, nonbool=True,
get_moves_lists=True):
"""Create GR(1) specification with troll-like obstacles.
Trolls are introduced into the specification with names derived
from the given prefix and a number (matching the order in
troll_list). Note that mutual exclusion is only between the
controlled "Y gridworld" position and each troll, but not
between trolls.
@type Y: L{GridWorld}
@param Y: The controlled gridworld, describing in particular
static obstacles that must be respected by the trolls.
@param troll_list: List of pairs of center position, to which the
troll must always eventually return, and radius defining
the extent of the trollspace. The radius is measured
using infinity-norm.
@param start_anywhere: If True, then initial troll position can be
anywhere in its trollspace. Else (default), the troll is
assumed to begin each game at its center position.
@param nonbool: If True, then use gr1c support for nonboolean
variable domains.
@param get_moves_lists: Consult returned value description below.
@rtype: (L{GRSpec}, list)
@return: If get_moves_lists is True, then returns (spec, moves_N)
where spec is the specification incorporating all of the
trolls, and moves_N is a list of lists of states (where
"state" is given as a dictionary with keys of variable
names), where the length of moves_N is equal to the
number of trolls, and each element of moves_N is a list
of possible states of that the corresponding troll
(dynamic obstacle). If get_moves_lists is False, then
moves_N is not returned and not computed.
"""
X = []
X_ID = -1
if get_moves_lists:
moves_N = []
(num_rows, num_cols) = Y.size()
for (center, radius) in troll_list:
if center[0] >= num_rows or center[0] < 0 or center[1] >= num_cols or center[1] < 0:
raise ValueError("troll center is outside of gridworld")
t_offset = (max(0, center[0]-radius), max(0, center[1]-radius))
t_size = [center[0]-t_offset[0]+radius+1, center[1]-t_offset[1]+radius+1]
if t_offset[0]+t_size[0] >= num_rows:
t_size[0] = num_rows-t_offset[0]
if t_offset[1]+t_size[1] >= num_cols:
t_size[1] = num_cols-t_offset[1]
t_size = (t_size[0], t_size[1])
X_ID += 1
X.append((t_offset, Y.dumpsubworld(t_size, offset=t_offset, prefix=prefix+"_"+str(X_ID))))
X[-1][1].goal_list = [(center[0]-t_offset[0], center[1]-t_offset[1])]
if start_anywhere:
X[-1][1].init_list = []
for i in range(X[-1][1].size()[0]):
for j in range(X[-1][1].size()[1]):
if X[-1][1].isEmpty((i,j)):
X[-1][1].init_list.append((i,j))
else:
X[-1][1].init_list = [(center[0]-t_offset[0], center[1]-t_offset[1])]
if get_moves_lists:
moves_N.append([])
for i in range(t_size[0]):
for j in range(t_size[1]):
moves_N[-1].append(X[-1][1].state((i,j), offset=t_offset, nonbool=nonbool))
spec = GRSpec()
spec.importGridWorld(Y, controlled_dyn=True, nonbool=nonbool)
for Xi in X:
spec.importGridWorld(Xi[1], offset=(-Xi[0][0], -Xi[0][1]), controlled_dyn=False, nonbool=nonbool)
# Mutual exclusion
for i in range(Y.size()[0]):
for j in range(Y.size()[1]):
for Xi in X:
if i >= Xi[0][0] and i < Xi[0][0]+Xi[1].size()[0] and j >= Xi[0][1] and j < Xi[0][1]+Xi[1].size()[1]:
if nonbool:
Xivar = "(("+Xi[1].prefix+"_r' = "+str(i-Xi[0][0])+") & ("+Xi[1].prefix+"_c' = "+str(j-Xi[0][1])+"))"
else:
Xivar = Xi[1].prefix+"_"+str(i)+"_"+str(j)+"'"
spec.sys_safety.append("!("+Y.__getitem__((i,j), nonbool=nonbool, next=True)+" & "+Xivar+")")
if get_moves_lists:
return (spec, moves_N)
return spec
def unoccupied(size, prefix="Y"):
"""Generate entirely unoccupied gridworld of given size.
@param size: a pair, indicating number of rows and columns.
@param prefix: String to be used as prefix for naming gridworld
cell variables.
@rtype: L{GridWorld}
"""
if len(size) < 2:
raise TypeError("invalid gridworld size.")
return GridWorld(str(size[0])+" "+str(size[1]), prefix="Y")
def extract_coord(subf, nonbool=True):
"""Extract (prefix,row,column) tuple from given subformula.
If nonbool is False, then assume prefix_R_C format. prefix is of
type string; row and column are integers. The "nowhere" coordinate
has form prefix_n_n. To indicate this, (-1, -1) is returned as the
row, column position.
If nonbool is True (default), then assume
C{((prefix_r = R) & (prefix_c = C))} format.
Also consult L{GridWorld.__getitem__} and L{GridWorld.spec}.
If error, return None or throw exception.
"""
if not isinstance(subf, str):
raise TypeError("extract_coord: invalid argument type; must be string.")
if nonbool:
subf_frags = [s.strip().strip(")(").strip() for s in subf.split("=")]
if (len(subf_frags) != 3) or not ((subf_frags[0].endswith("_r") and subf_frags[1].endswith("_c")) or (subf_frags[1].endswith("_r") and subf_frags[0].endswith("_c"))):
return None
prefix = subf_frags[0][:subf_frags[0].rfind("_")]
row = int(subf_frags[1][:subf_frags[1].find(")")])
col = int(subf_frags[2])
if not subf_frags[0].endswith("_r"):
row, col = col, row # Swap
return (prefix, row, col)
else:
name_frags = subf.split("_")
if len(name_frags) < 3:
return None
try:
if name_frags[-1] == "n" and name_frags[-2] == "n":
# Special "nowhere" case
return ("_".join(name_frags[:-2]), -1, -1)
col = int(name_frags[-1])
row = int(name_frags[-2])
except ValueError:
return None
return ("_".join(name_frags[:-2]), row, col)
def prefix_filt(d, prefix):
"""Return all items in dictionary d with key with given prefix."""
match_list = []
for k in d.keys():
if isinstance(k, str):
if k.startswith(prefix):
match_list.append(k)
return dict([(k, d[k]) for k in match_list])
def extract_path(aut, prefix=None):
"""Extract a path from a gridworld automaton"""
n = 0 # Node with ID of 0
last = None
path = []
visited = [0]
while 1:
updated = False
for p in aut.node[n]["state"]:
if (not prefix or p.startswith(prefix)) and aut.node[n]["state"][p]:
try:
c = extract_coord(p, nonbool=False)
if c:
path.append(c[1:])
last = c[1:]
updated = True
except:
pass
if not updated:
# Robot has not moved, even out path lengths
path.append(last)
# next state
if len(aut.successors(n)) > 0:
if aut.successors(n)[0] in visited:
# loop detected
break
visited.append(aut.successors(n)[0])
n = aut.successors(n)[0]
else:
# dead-end, return
break
try:
first = [ x for x in path if x ][0]
except IndexError:
return []
for i in range(len(path)):
if path[i] is None:
path[i] = first
else:
break
return path
def verify_path(W, path, seq=False):
goals = W.goal_list[:]
print goals
print path
if seq:
# Check if path visits all goals in gridworld W in the correct order
for p in path:
if not goals: break
if goals[0] == p:
del(goals[0])
elif p in goals:
return False
if goals:
return False
else:
# Check if path visits all goals
for g in goals:
if not g in path:
assert_message = "Path does not visit goal " + str(g)
print assert_message
return False
# Ensure that path does not intersect an obstacle
for p in path:
if not W.isEmpty(p):
assert_message = "Path intersects obstacle at " + str(p)
print assert_message
return False
return True
def verify_mutex(paths):
# sanity check - all paths same length
if not all(len(p) == len(paths[0]) for p in paths):
assert_message = "Paths are different lengths"
return False
for t in zip(*paths):
# Coordinates in each tuple must be unique
if not len(set(t)) == len(t):
assert_message = "Non-unique coordinates in tuple " + str(t)
return False
return True
def animate_paths(Z, paths, jitter=0.0, save_prefix=None):
"""Animate a list of paths simultaneously in world Z using matplotlib.
@param Z: Gridworld for which paths were generated.
@param paths: List of paths to animate (one per robot).
@param jitter: Random jitter added to each coordinate value in animation.
Makes the robot's path more visible by avoiding overlap.
@param save_prefix: If not None, do not show an animation but produce a
series of images "<save_prefix>nnn.png" which can be
compiled into an animated GIF.
"""
colors = 'rgbcmyk'
fig = plt.figure()
ax = fig.add_subplot(111)
Z.plot(font_pt=min(288/Z.W.shape[1], 48), show_grid=True)
def update_line(num, dlist, lines):
for (p,t), d in zip(lines, dlist):
t.set_data(d[...,:num+1])
p.set_data(d[...,num])
if save_prefix:
fig.savefig(save_prefix + "%03d.png" % num)
return lines,
data = []
lines = []
for n,path in enumerate(paths):
arr = np.array([[x,y] for (y,x) in path]).transpose()
arr = np.add(arr, jitter*(np.random.rand(*arr.shape) - 0.5))
data.append(arr)
l, = ax.plot([], [], 'o', color=colors[n], markersize=10.0, zorder=2)
l_trail, = ax.plot([], [], '-', color=colors[n], zorder=1)
lines.append((l, l_trail))
if not save_prefix:
ani = anim.FuncAnimation(fig, update_line, len(paths[0]), fargs=(data,lines),
interval=500)
plt.show()
else:
print "Writing %s000.png - %s%03d.png" % (save_prefix, save_prefix, len(paths[0]))
for n in range(len(paths[0])):
update_line(n, data, lines)
def compress_paths(paths):
"""Remove insignificant path-element tuples from a path list
Given a list of paths [[p11, p12, ..., p1n], [p21, p22, ..., p2n], ...]
a path-element tuple (p1k, p2k, ...) is insignificant if p1k = p1(k+1),
p2k = p2(k+1), ...; (p1n, p2n, ...) is always significant.
@param paths: A list of paths, where each path is a list of tuples, each
representing a coordinate in the world.
@rtype: list of lists of (x,y) tuples
"""
pzip = zip(*paths)
if pzip == []: return []
acc = []
for n in range(len(pzip)-1):
if not pzip[n] == pzip[n+1]:
acc.append(pzip[n])
acc.append(pzip[-1])
return zip(*acc)
| pombredanne/nTLP | tulip/gridworld.py | Python | bsd-3-clause | 65,320 | [
"VisIt"
] | aef20d08bca1a7c738ea87461649160d63e3140b358dad9f301348ee878a453d |
""" Runs through the whole range of PWM Duty Cycles
For the complete modbus map, visit the Modbus support page:
http://labjack.com/support/Modbus
Note: Low-level commands that are commented out work only for U6/U3. UE9 is a
little more complicated.
"""
import u3, u6, ue9
from time import sleep
# Open the LabJack. Comment out all but one of these:
d = ue9.UE9()
#d = u3.U3()
#d = u6.U6()
if d.devType == 9:
# Set the timer clock to be the system clock with a given divisor
d.writeRegister(7000, 1)
d.writeRegister(7002, 15)
else:
# Set the timer clock to be 4 MHz with a given divisor
#d.configTimerClock( TimerClockBase = 4, TimerClockDivisor = 15)
d.writeRegister(7000, 4)
d.writeRegister(7002, 15)
# Enable the timer
#d.configIO( NumberTimersEnabled = 1 )
d.writeRegister(50501, 1)
# Configure the timer for PWM, starting with a duty cycle of 0.0015%.
baseValue = 65535
#d.getFeedback( u6.Timer0Config(TimerMode = 0, Value = baseValue) )
d.writeRegister(7100, [0, baseValue])
# Loop, updating the duty cycle every time.
for i in range(65):
currentValue = baseValue - (i * 1000)
dutyCycle = ( float(65536 - currentValue) / 65535 ) * 100
print "Duty Cycle = %s%%" % dutyCycle
#d.getFeedback( u6.Timer0( Value = currentValue, UpdateReset = True ) )
d.writeRegister(7200, currentValue)
sleep(0.3)
print "Duty Cycle = 100%"
#d.getFeedback( u6.Timer0( Value = 0, UpdateReset = True ) )
d.writeRegister(7200, 0)
# Close the device.
d.close | bmazin/SDR | DataReadout/ReadoutControls/lib/LabJackPython-8-26-2011/Examples/PWM-looping.py | Python | gpl-2.0 | 1,507 | [
"VisIt"
] | 29ab1d33cd6d416b7c7ba4350bf690373eee3eebf96291b9fada3bf4d3ecc1ec |
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""Tools for the submission of Tasks."""
from __future__ import unicode_literals, division, print_function
import os
import time
import ruamel.yaml as yaml
import pickle
from collections import deque
from datetime import timedelta
from six.moves import cStringIO
from monty.io import get_open_fds
from monty.string import boxed, is_string
from monty.os.path import which
from monty.collections import AttrDict, dict2namedtuple
from monty.termcolor import cprint
from .utils import as_bool, File, Directory
from . import qutils as qu
from pymatgen.util.io_utils import ask_yesno
try:
import apscheduler
has_apscheduler = True
has_sched_v3 = apscheduler.version >= "3.0.0"
except ImportError:
has_apscheduler = False
import logging
logger = logging.getLogger(__name__)
__all__ = [
"ScriptEditor",
"PyLauncher",
"PyFlowScheduler",
]
def straceback():
"""Returns a string with the traceback."""
import traceback
return traceback.format_exc()
class ScriptEditor(object):
"""Simple editor that simplifies the writing of shell scripts"""
_shell = '/bin/bash'
def __init__(self):
self._lines = []
@property
def shell(self):
return self._shell
def _add(self, text, pre=""):
if is_string(text):
self._lines.append(pre + text)
else:
self._lines.extend([pre + t for t in text])
def reset(self):
"""Reset the editor."""
try:
del self._lines
except AttributeError:
pass
def shebang(self):
"""Adds the shebang line."""
self._lines.append('#!' + self.shell)
def declare_var(self, key, val):
"""Declare a env variable. If val is None the variable is unset."""
if val is not None:
line = "export " + key + '=' + str(val)
else:
line = "unset " + key
self._add(line)
def declare_vars(self, d):
"""Declare the variables defined in the dictionary d."""
for k, v in d.items():
self.declare_var(k, v)
def export_envar(self, key, val):
"""Export an environment variable."""
line = "export " + key + "=" + str(val)
self._add(line)
def export_envars(self, env):
"""Export the environment variables contained in the dict env."""
for k, v in env.items():
self.export_envar(k, v)
def add_emptyline(self):
"""Add an empty line."""
self._add("", pre="")
def add_comment(self, comment):
"""Add a comment"""
self._add(comment, pre="# ")
def load_modules(self, modules):
"""Load the list of specified modules."""
for module in modules:
self.load_module(module)
def load_module(self, module):
self._add('module load ' + module + " 2>> mods.err")
def add_line(self, line):
self._add(line)
def add_lines(self, lines):
self._add(lines)
def get_script_str(self, reset=True):
"""Returns a string with the script and reset the editor if reset is True"""
s = "\n".join(l for l in self._lines)
if reset:
self.reset()
return s
class PyLauncherError(Exception):
"""Error class for PyLauncher."""
class PyLauncher(object):
"""This object handle the submission of the tasks contained in a :class:`Flow`"""
Error = PyLauncherError
def __init__(self, flow, **kwargs):
"""
Initialize the object
Args:
flow: :class:`Flow` object
max_njobs_inqueue: The launcher will stop submitting jobs when the
number of jobs in the queue is >= Max number of jobs
"""
self.flow = flow
self.max_njobs_inqueue = kwargs.get("max_njobs_inqueue", 200)
#self.flow.check_pid_file()
def single_shot(self):
"""
Run the first :class:`Task` than is ready for execution.
Returns:
Number of jobs launched.
"""
num_launched = 0
# Get the tasks that can be executed in each workflow.
tasks = []
for work in self.flow:
try:
task = work.fetch_task_to_run()
if task is not None:
tasks.append(task)
else:
# No task found, this usually happens when we have dependencies.
# Beware of possible deadlocks here!
logger.debug("No task to run! Possible deadlock")
except StopIteration:
logger.info("All tasks completed.")
# Submit the tasks and update the database.
if tasks:
tasks[0].start()
num_launched += 1
self.flow.pickle_dump()
return num_launched
def rapidfire(self, max_nlaunch=-1, max_loops=1, sleep_time=5):
"""
Keeps submitting `Tasks` until we are out of jobs or no job is ready to run.
Args:
max_nlaunch: Maximum number of launches. default: no limit.
max_loops: Maximum number of loops
sleep_time: seconds to sleep between rapidfire loop iterations
Returns:
The number of tasks launched.
"""
num_launched, do_exit, launched = 0, False, []
for count in range(max_loops):
if do_exit:
break
if count > 0:
time.sleep(sleep_time)
tasks = self.fetch_tasks_to_run()
# I don't know why but we receive duplicated tasks.
if any(task in launched for task in tasks):
logger.critical("numtasks %d already in launched list:\n%s" % (len(tasks), launched))
# Preventive test.
tasks = [t for t in tasks if t not in launched]
if not tasks:
continue
for task in tasks:
fired = task.start()
if fired:
launched.append(task)
num_launched += 1
if num_launched >= max_nlaunch > 0:
logger.info('num_launched >= max_nlaunch, going back to sleep')
do_exit = True
break
# Update the database.
self.flow.pickle_dump()
return num_launched
def fetch_tasks_to_run(self):
"""
Return the list of tasks that can be submitted.
Empty list if no task has been found.
"""
tasks_to_run = []
for work in self.flow:
tasks_to_run.extend(work.fetch_alltasks_to_run())
return tasks_to_run
class PyFlowSchedulerError(Exception):
"""Exceptions raised by `PyFlowScheduler`."""
class PyFlowScheduler(object):
"""
This object schedules the submission of the tasks in a :class:`Flow`.
There are two types of errors that might occur during the execution of the jobs:
#. Python exceptions
#. Errors in the ab-initio code
Python exceptions are easy to detect and are usually due to a bug in the python code or random errors such as IOError.
The set of errors in the ab-initio is much much broader. It includes wrong input data, segmentation
faults, problems with the resource manager, etc. The flow tries to handle the most common cases
but there's still a lot of room for improvement.
Note, in particular, that `PyFlowScheduler` will shutdown automatically in the following cases:
#. The number of python exceptions is > max_num_pyexcs
#. The number of task errors (i.e. the number of tasks whose status is S_ERROR) is > max_num_abierrs
#. The number of jobs launched becomes greater than (`safety_ratio` * total_number_of_tasks).
#. The scheduler will send an email to the user (specified by `mailto`) every `remindme_s` seconds.
If the mail cannot be sent, the scheduler will shutdown automatically.
This check prevents the scheduler from being trapped in an infinite loop.
"""
# Configuration file.
YAML_FILE = "scheduler.yml"
USER_CONFIG_DIR = os.path.join(os.path.expanduser("~"), ".abinit", "abipy")
Error = PyFlowSchedulerError
@classmethod
def autodoc(cls):
i = cls.__init__.__doc__.index("Args:")
return cls.__init__.__doc__[i+5:]
def __init__(self, **kwargs):
"""
Args:
weeks: number of weeks to wait (DEFAULT: 0).
days: number of days to wait (DEFAULT: 0).
hours: number of hours to wait (DEFAULT: 0).
minutes: number of minutes to wait (DEFAULT: 0).
seconds: number of seconds to wait (DEFAULT: 0).
mailto: The scheduler will send an email to `mailto` every `remindme_s` seconds.
(DEFAULT: None i.e. not used).
verbose: (int) verbosity level. (DEFAULT: 0)
use_dynamic_manager: "yes" if the :class:`TaskManager` must be re-initialized from
file before launching the jobs. (DEFAULT: "no")
max_njobs_inqueue: Limit on the number of jobs that can be present in the queue. (DEFAULT: 200)
remindme_s: The scheduler will send an email to the user specified by `mailto` every `remindme_s` seconds.
(int, DEFAULT: 1 day).
max_num_pyexcs: The scheduler will exit if the number of python exceptions is > max_num_pyexcs
(int, DEFAULT: 0)
max_num_abierrs: The scheduler will exit if the number of errored tasks is > max_num_abierrs
(int, DEFAULT: 0)
safety_ratio: The scheduler will exits if the number of jobs launched becomes greater than
`safety_ratio` * total_number_of_tasks_in_flow. (int, DEFAULT: 5)
max_nlaunches: Maximum number of tasks launched in a single iteration of the scheduler.
(DEFAULT: -1 i.e. no limit)
debug: Debug level. Use 0 for production (int, DEFAULT: 0)
fix_qcritical: "yes" if the launcher should try to fix QCritical Errors (DEFAULT: "yes")
rmflow: If "yes", the scheduler will remove the flow directory if the calculation
completed successfully. (DEFAULT: "no")
killjobs_if_errors: "yes" if the scheduler should try to kill all the runnnig jobs
before exiting due to an error. (DEFAULT: "yes")
"""
# Options passed to the scheduler.
self.sched_options = AttrDict(
weeks=kwargs.pop("weeks", 0),
days=kwargs.pop("days", 0),
hours=kwargs.pop("hours", 0),
minutes=kwargs.pop("minutes", 0),
seconds=kwargs.pop("seconds", 0),
#start_date=kwargs.pop("start_date", None),
)
if all(not v for v in self.sched_options.values()):
raise self.Error("Wrong set of options passed to the scheduler.")
self.mailto = kwargs.pop("mailto", None)
self.verbose = int(kwargs.pop("verbose", 0))
self.use_dynamic_manager = as_bool(kwargs.pop("use_dynamic_manager", False))
self.max_njobs_inqueue = kwargs.pop("max_njobs_inqueue", 200)
self.max_ncores_used = kwargs.pop("max_ncores_used", None)
self.contact_resource_manager = as_bool(kwargs.pop("contact_resource_manager", False))
self.remindme_s = float(kwargs.pop("remindme_s", 1 * 24 * 3600))
self.max_num_pyexcs = int(kwargs.pop("max_num_pyexcs", 0))
self.max_num_abierrs = int(kwargs.pop("max_num_abierrs", 0))
self.safety_ratio = int(kwargs.pop("safety_ratio", 5))
#self.max_etime_s = kwargs.pop("max_etime_s", )
self.max_nlaunches = kwargs.pop("max_nlaunches", -1)
self.debug = kwargs.pop("debug", 0)
self.fix_qcritical = as_bool(kwargs.pop("fix_qcritical", True))
self.rmflow = as_bool(kwargs.pop("rmflow", False))
self.killjobs_if_errors = as_bool(kwargs.pop("killjobs_if_errors", True))
self.customer_service_dir = kwargs.pop("customer_service_dir", None)
if self.customer_service_dir is not None:
self.customer_service_dir = Directory(self.customer_service_dir)
self._validate_customer_service()
if kwargs:
raise self.Error("Unknown arguments %s" % kwargs)
if not has_apscheduler:
raise RuntimeError("Install apscheduler with pip")
if has_sched_v3:
logger.warning("Using scheduler v>=3.0.0")
from apscheduler.schedulers.blocking import BlockingScheduler
self.sched = BlockingScheduler()
else:
from apscheduler.scheduler import Scheduler
self.sched = Scheduler(standalone=True)
self.nlaunch = 0
self.num_reminders = 1
# Used to keep track of the exceptions raised while the scheduler is running
self.exceptions = deque(maxlen=self.max_num_pyexcs + 10)
# Used to push additional info during the execution.
self.history = deque(maxlen=100)
@classmethod
def from_file(cls, filepath):
"""Read the configuration parameters from a Yaml file."""
with open(filepath, "rt") as fh:
return cls(**yaml.safe_load(fh))
@classmethod
def from_string(cls, s):
"""Create an istance from string s containing a YAML dictionary."""
stream = cStringIO(s)
stream.seek(0)
return cls(**yaml.safe_load(stream))
@classmethod
def from_user_config(cls):
"""
Initialize the :class:`PyFlowScheduler` from the YAML file 'scheduler.yml'.
Search first in the working directory and then in the configuration directory of abipy.
Raises:
`RuntimeError` if file is not found.
"""
# Try in the current directory.
path = os.path.join(os.getcwd(), cls.YAML_FILE)
if os.path.exists(path):
return cls.from_file(path)
# Try in the configuration directory.
path = os.path.join(cls.USER_CONFIG_DIR, cls.YAML_FILE)
if os.path.exists(path):
return cls.from_file(path)
raise cls.Error("Cannot locate %s neither in current directory nor in %s" % (cls.YAML_FILE, path))
def __str__(self):
"""String representation."""
lines = [self.__class__.__name__ + ", Pid: %d" % self.pid]
app = lines.append
app("Scheduler options: %s" % str(self.sched_options))
if self.flow is not None:
app(80 * "=")
app(str(self.flow))
return "\n".join(lines)
@property
def pid(self):
"""The pid of the process associated to the scheduler."""
try:
return self._pid
except AttributeError:
self._pid = os.getpid()
return self._pid
@property
def pid_file(self):
"""
Absolute path of the file with the pid.
The file is located in the workdir of the flow
"""
return self._pid_file
@property
def flow(self):
"""`Flow`."""
try:
return self._flow
except AttributeError:
return None
@property
def num_excs(self):
"""Number of exceptions raised so far."""
return len(self.exceptions)
def get_delta_etime(self):
"""Returns a `timedelta` object representing with the elapsed time."""
return timedelta(seconds=(time.time() - self.start_time))
def add_flow(self, flow):
"""
Add an :class:`Flow` flow to the scheduler.
"""
if hasattr(self, "_flow"):
raise self.Error("Only one flow can be added to the scheduler.")
# Check if we are already using a scheduler to run this flow
flow.check_pid_file()
flow.set_spectator_mode(False)
# Build dirs and files (if not yet done)
flow.build()
with open(flow.pid_file, "wt") as fh:
fh.write(str(self.pid))
self._pid_file = flow.pid_file
self._flow = flow
def _validate_customer_service(self):
"""
Validate input parameters if customer service is on then
create directory for tarball files with correct premissions for user and group.
"""
direc = self.customer_service_dir
if not direc.exists:
mode = 0o750
print("Creating customer_service_dir %s with mode %s" % (direc, mode))
direc.makedirs()
os.chmod(direc.path, mode)
if self.mailto is None:
raise RuntimeError("customer_service_dir requires mailto option in scheduler.yml")
def _do_customer_service(self):
"""
This method is called before the shutdown of the scheduler.
If customer_service is on and the flow didn't completed successfully,
a lightweight tarball file with inputs and the most important output files
is created in customer_servide_dir.
"""
if self.customer_service_dir is None: return
doit = self.exceptions or not self.flow.all_ok
doit = True
if not doit: return
prefix = os.path.basename(self.flow.workdir) + "_"
import tempfile, datetime
suffix = str(datetime.datetime.now()).replace(" ", "-")
# Remove milliseconds
i = suffix.index(".")
if i != -1: suffix = suffix[:i]
suffix += ".tar.gz"
#back = os.getcwd()
#os.chdir(self.customer_service_dir.path)
_, tmpname = tempfile.mkstemp(suffix="_" + suffix, prefix=prefix,
dir=self.customer_service_dir.path, text=False)
print("Dear customer,\n We are about to generate a tarball in\n %s" % tmpname)
self.flow.make_light_tarfile(name=tmpname)
#os.chdir(back)
def start(self):
"""
Starts the scheduler in a new thread. Returns 0 if success.
In standalone mode, this method will block until there are no more scheduled jobs.
"""
self.history.append("Started on %s" % time.asctime())
self.start_time = time.time()
if not has_apscheduler:
raise RuntimeError("Install apscheduler with pip")
if has_sched_v3:
self.sched.add_job(self.callback, "interval", **self.sched_options)
else:
self.sched.add_interval_job(self.callback, **self.sched_options)
errors = self.flow.look_before_you_leap()
if errors:
self.exceptions.append(errors)
return 1
# Try to run the job immediately. If something goes wrong return without initializing the scheduler.
self._runem_all()
if self.exceptions:
self.cleanup()
self.send_email(msg="Error while trying to run the flow for the first time!\n %s" % self.exceptions)
return 1
try:
self.sched.start()
return 0
except KeyboardInterrupt:
self.shutdown(msg="KeyboardInterrupt from user")
if ask_yesno("Do you want to cancel all the jobs in the queue? [Y/n]"):
print("Number of jobs cancelled:", self.flow.cancel())
self.flow.pickle_dump()
return -1
def _runem_all(self):
"""
This function checks the status of all tasks,
tries to fix tasks that went unconverged, abicritical, or queuecritical
and tries to run all the tasks that can be submitted.+
"""
excs = []
flow = self.flow
# Allow to change the manager at run-time
if self.use_dynamic_manager:
from pymatgen.io.abinit.tasks import TaskManager
new_manager = TaskManager.from_user_config()
for work in flow:
work.set_manager(new_manager)
nqjobs = 0
if self.contact_resource_manager: # and flow.TaskManager.qadapter.QTYPE == "shell":
# This call is expensive and therefore it's optional (must be activate in manager.yml)
nqjobs = flow.get_njobs_in_queue()
if nqjobs is None:
nqjobs = 0
if flow.manager.has_queue:
logger.warning('Cannot get njobs_inqueue')
else:
# Here we just count the number of tasks in the flow who are running.
# This logic breaks down if there are multiple schedulers runnig
# but it's easy to implement without having to contact the resource manager.
nqjobs = (len(list(flow.iflat_tasks(status=flow.S_RUN))) +
len(list(flow.iflat_tasks(status=flow.S_SUB))))
if nqjobs >= self.max_njobs_inqueue:
print("Too many jobs in the queue: %s. No job will be submitted." % nqjobs)
flow.check_status(show=False)
return
if self.max_nlaunches == -1:
max_nlaunch = self.max_njobs_inqueue - nqjobs
else:
max_nlaunch = min(self.max_njobs_inqueue - nqjobs, self.max_nlaunches)
# check status.
flow.check_status(show=False)
# This check is not perfect, we should make a list of tasks to sumbit
# and select only the subset so that we don't exceeed mac_ncores_used
# Many sections of this code should be rewritten.
#if self.max_ncores_used is not None and flow.ncores_used > self.max_ncores_used:
if self.max_ncores_used is not None and flow.ncores_allocated > self.max_ncores_used:
print("Cannot exceed max_ncores_used %s" % self.max_ncores_used)
return
# Try to restart the unconverged tasks
# TODO: do not fire here but prepare for fireing in rapidfire
for task in self.flow.unconverged_tasks:
try:
logger.info("Flow will try restart task %s" % task)
fired = task.restart()
if fired:
self.nlaunch += 1
max_nlaunch -= 1
if max_nlaunch == 0:
logger.info("Restart: too many jobs in the queue, returning")
flow.pickle_dump()
return
except task.RestartError:
excs.append(straceback())
# Temporarily disable by MG because I don't know if fix_critical works after the
# introduction of the new qadapters
# reenabled by MsS disable things that do not work at low level
# fix only prepares for restarting, and sets to ready
if self.fix_qcritical:
nfixed = flow.fix_queue_critical()
if nfixed: print("Fixed %d QCritical error(s)" % nfixed)
nfixed = flow.fix_abicritical()
if nfixed: print("Fixed %d AbiCritical error(s)" % nfixed)
# update database
flow.pickle_dump()
# Submit the tasks that are ready.
try:
nlaunch = PyLauncher(flow).rapidfire(max_nlaunch=max_nlaunch, sleep_time=10)
self.nlaunch += nlaunch
if nlaunch:
cprint("[%s] Number of launches: %d" % (time.asctime(), nlaunch), "yellow")
except Exception:
excs.append(straceback())
# check status.
flow.show_status()
if excs:
logger.critical("*** Scheduler exceptions:\n *** %s" % "\n".join(excs))
self.exceptions.extend(excs)
def callback(self):
"""The function that will be executed by the scheduler."""
try:
return self._callback()
except:
# All exceptions raised here will trigger the shutdown!
s = straceback()
self.exceptions.append(s)
# This is useful when debugging
#try:
# print("Exception in callback, will cancel all tasks")
# for task in self.flow.iflat_tasks():
# task.cancel()
#except Exception:
# pass
self.shutdown(msg="Exception raised in callback!\n" + s)
def _callback(self):
"""The actual callback."""
if self.debug:
# Show the number of open file descriptors
print(">>>>> _callback: Number of open file descriptors: %s" % get_open_fds())
self._runem_all()
# Mission accomplished. Shutdown the scheduler.
all_ok = self.flow.all_ok
if all_ok:
return self.shutdown(msg="All tasks have reached S_OK. Will shutdown the scheduler and exit")
# Handle failures.
err_lines = []
# Shall we send a reminder to the user?
delta_etime = self.get_delta_etime()
if delta_etime.total_seconds() > self.num_reminders * self.remindme_s:
self.num_reminders += 1
msg = ("Just to remind you that the scheduler with pid %s, flow %s\n has been running for %s " %
(self.pid, self.flow, delta_etime))
retcode = self.send_email(msg, tag="[REMINDER]")
if retcode:
# Cannot send mail, shutdown now!
msg += ("\nThe scheduler tried to send an e-mail to remind the user\n" +
" but send_email returned %d. Aborting now" % retcode)
err_lines.append(msg)
#if delta_etime.total_seconds() > self.max_etime_s:
# err_lines.append("\nExceeded max_etime_s %s. Will shutdown the scheduler and exit" % self.max_etime_s)
# Too many exceptions. Shutdown the scheduler.
if self.num_excs > self.max_num_pyexcs:
msg = "Number of exceptions %s > %s. Will shutdown the scheduler and exit" % (
self.num_excs, self.max_num_pyexcs)
err_lines.append(boxed(msg))
# Paranoid check: disable the scheduler if we have submitted
# too many jobs (it might be due to some bug or other external reasons
# such as race conditions between difference callbacks!)
if self.nlaunch > self.safety_ratio * self.flow.num_tasks:
msg = "Too many jobs launched %d. Total number of tasks = %s, Will shutdown the scheduler and exit" % (
self.nlaunch, self.flow.num_tasks)
err_lines.append(boxed(msg))
# Count the number of tasks with status == S_ERROR.
if self.flow.num_errored_tasks > self.max_num_abierrs:
msg = "Number of tasks with ERROR status %s > %s. Will shutdown the scheduler and exit" % (
self.flow.num_errored_tasks, self.max_num_abierrs)
err_lines.append(boxed(msg))
# Test on the presence of deadlocks.
g = self.flow.find_deadlocks()
if g.deadlocked:
# Check the flow again so that status are updated.
self.flow.check_status()
g = self.flow.find_deadlocks()
print("deadlocked:\n", g.deadlocked, "\nrunnables:\n", g.runnables, "\nrunning\n", g.running)
if g.deadlocked and not g.runnables and not g.running:
err_lines.append("No runnable job with deadlocked tasks:\n%s." % str(g.deadlocked))
if not g.runnables and not g.running:
# Check the flow again so that status are updated.
self.flow.check_status()
g = self.flow.find_deadlocks()
if not g.runnables and not g.running:
err_lines.append("No task is running and cannot find other tasks to submit.")
# Something wrong. Quit
if err_lines:
# Cancel all jobs.
if self.killjobs_if_errors:
cprint("killjobs_if_errors set to 'yes' in scheduler file. Will kill jobs before exiting.", "yellow")
try:
num_cancelled = 0
for task in self.flow.iflat_tasks():
num_cancelled += task.cancel()
cprint("Killed %d tasks" % num_cancelled, "yellow")
except Exception as exc:
cprint("Exception while trying to kill jobs:\n%s" % str(exc), "red")
self.shutdown("\n".join(err_lines))
return len(self.exceptions)
def cleanup(self):
"""Cleanup routine: remove the pid file and save the pickle database"""
try:
os.remove(self.pid_file)
except OSError as exc:
logger.critical("Could not remove pid_file: %s", exc)
# Save the final status of the flow.
self.flow.pickle_dump()
def shutdown(self, msg):
"""Shutdown the scheduler."""
try:
self.cleanup()
self.history.append("Completed on: %s" % time.asctime())
self.history.append("Elapsed time: %s" % self.get_delta_etime())
if self.debug:
print(">>>>> shutdown: Number of open file descriptors: %s" % get_open_fds())
retcode = self.send_email(msg)
if self.debug:
print("send_mail retcode", retcode)
# Write file with the list of exceptions:
if self.exceptions:
dump_file = os.path.join(self.flow.workdir, "_exceptions")
with open(dump_file, "wt") as fh:
fh.writelines(self.exceptions)
fh.write("Shutdown message:\n%s" % msg)
lines = []
app = lines.append
app("Submitted on: %s" % time.ctime(self.start_time))
app("Completed on: %s" % time.asctime())
app("Elapsed time: %s" % str(self.get_delta_etime()))
if self.flow.all_ok:
app("Flow completed successfully")
else:
app("Flow %s didn't complete successfully" % repr(self.flow.workdir))
app("use `abirun.py FLOWDIR debug` to analyze the problem.")
app("Shutdown message:\n%s" % msg)
print("")
print("\n".join(lines))
print("")
self._do_customer_service()
if self.flow.all_ok:
print("Calling flow.finalize()...")
self.flow.finalize()
#print("finalized:", self.flow.finalized)
if self.rmflow:
app("Flow directory will be removed...")
try:
self.flow.rmtree()
except Exception:
logger.warning("Ignoring exception while trying to remove flow dir.")
finally:
# Shutdown the scheduler thus allowing the process to exit.
logger.debug('This should be the shutdown of the scheduler')
# Unschedule all the jobs before calling shutdown
#self.sched.print_jobs()
if not has_sched_v3:
for job in self.sched.get_jobs():
self.sched.unschedule_job(job)
#self.sched.print_jobs()
self.sched.shutdown()
# Uncomment the line below if shutdown does not work!
#os.system("kill -9 %d" % os.getpid())
def send_email(self, msg, tag=None):
"""
Send an e-mail before completing the shutdown.
Returns 0 if success.
"""
try:
return self._send_email(msg, tag)
except:
self.exceptions.append(straceback())
return -2
def _send_email(self, msg, tag):
if self.mailto is None:
return -1
header = msg.splitlines()
app = header.append
app("Submitted on: %s" % time.ctime(self.start_time))
app("Completed on: %s" % time.asctime())
app("Elapsed time: %s" % str(self.get_delta_etime()))
app("Number of errored tasks: %d" % self.flow.num_errored_tasks)
app("Number of unconverged tasks: %d" % self.flow.num_unconverged_tasks)
strio = cStringIO()
strio.writelines("\n".join(header) + 4 * "\n")
# Add the status of the flow.
self.flow.show_status(stream=strio)
if self.exceptions:
# Report the list of exceptions.
strio.writelines(self.exceptions)
if tag is None:
tag = " [ALL OK]" if self.flow.all_ok else " [WARNING]"
return sendmail(subject=self.flow.name + tag, text=strio.getvalue(), mailto=self.mailto)
def sendmail(subject, text, mailto, sender=None):
"""
Sends an e-mail with unix sendmail.
Args:
subject: String with the subject of the mail.
text: String with the body of the mail.
mailto: String or list of string with the recipients.
sender: string with the sender address.
If sender is None, username@hostname is used.
Returns:
Exit status
"""
def user_at_host():
from socket import gethostname
return os.getlogin() + "@" + gethostname()
# Body of the message.
try:
sender = user_at_host() if sender is None else sender
except OSError:
sender = 'abipyscheduler@youknowwhere'
if is_string(mailto): mailto = [mailto]
from email.mime.text import MIMEText
mail = MIMEText(text)
mail["Subject"] = subject
mail["From"] = sender
mail["To"] = ", ".join(mailto)
msg = mail.as_string()
# sendmail works much better than the python interface.
# Note that sendmail is available only on Unix-like OS.
from subprocess import Popen, PIPE
import sys
sendmail = which("sendmail")
if sendmail is None: return -1
if sys.version_info[0] < 3:
p = Popen([sendmail, "-t"], stdin=PIPE, stderr=PIPE)
else:
# msg is string not bytes so must use universal_newlines
p = Popen([sendmail, "-t"], stdin=PIPE, stderr=PIPE, universal_newlines=True)
outdata, errdata = p.communicate(msg)
return len(errdata)
def __test_sendmail():
retcode = sendmail("sendmail_test", text="hello\nworld", mailto="nobody@nowhere.com")
print("Retcode", retcode)
assert retcode == 0
class BatchLauncherError(Exception):
"""Exceptions raised by :class:`BatchLauncher`."""
class BatchLauncher(object):
"""
This object automates the execution of multiple flow. It generates a job script
that uses abirun.py to run each flow stored in self with a scheduler.
The execution of the flows is done in sequential but each scheduler will start
to submit the tasks of the flow in autoparal mode.
The `BatchLauncher` is pickleable, hence one can reload it, check if all flows are completed
and rerun only those that are not completed due to the timelimit.
"""
PICKLE_FNAME = "__BatchLauncher__.pickle"
Error = BatchLauncherError
@classmethod
def from_dir(cls, top, workdir=None, name=None, manager=None, max_depth=2):
"""
Find all flows located withing the directory `top` and build the `BatchLauncher`.
Args:
top: Top level directory or list of directories.
workdir: Batch workdir.
name:
manager: :class:`TaskManager` object. If None, the manager is read from `manager.yml`
In this case the YAML file must provide the entry `batch_manager` that defined
the queue adapter used to submit the batch script.
max_depth: Search in directory only if it is N or fewer levels below top
"""
from .flows import Flow
def find_pickles(dirtop):
# Walk through each directory inside path and find the pickle database.
paths = []
for dirpath, dirnames, filenames in os.walk(dirtop):
fnames = [f for f in filenames if f == Flow.PICKLE_FNAME]
paths.extend([os.path.join(dirpath, f) for f in fnames])
return paths
if is_string(top):
pickle_paths = find_pickles(top)
else:
# List of directories.
pickle_paths = []
for p in top:
pickle_paths.extend(find_pickles(p))
#workdir = os.path.join(top, "batch") if workdir is None else workdir
workdir = "batch" if workdir is None else workdir
new = cls(workdir, name=name, manager=manager)
for path in pickle_paths:
new.add_flow(path)
return new
@classmethod
def pickle_load(cls, filepath):
"""
Loads the object from a pickle file.
Args:
filepath: Filename or directory name. It filepath is a directory, we
scan the directory tree starting from filepath and we
read the first pickle database. Raise RuntimeError if multiple
databases are found.
"""
if os.path.isdir(filepath):
# Walk through each directory inside path and find the pickle database.
for dirpath, dirnames, filenames in os.walk(filepath):
fnames = [f for f in filenames if f == cls.PICKLE_FNAME]
if fnames:
if len(fnames) == 1:
filepath = os.path.join(dirpath, fnames[0])
break # Exit os.walk
else:
err_msg = "Found multiple databases:\n %s" % str(fnames)
raise RuntimeError(err_msg)
else:
err_msg = "Cannot find %s inside directory %s" % (cls.PICKLE_FNAME, filepath)
raise ValueError(err_msg)
with open(filepath, "rb") as fh:
new = pickle.load(fh)
# new.flows is a list of strings with the workdir of the flows (see __getstate__).
# Here we read the Flow from the pickle file so that we have
# and up-to-date version and we set the flow in visitor_mode
from .flows import Flow
flow_workdirs, new.flows = new.flows, []
for flow in map(Flow.pickle_load, flow_workdirs):
new.add_flow(flow)
return new
def pickle_dump(self):
"""Save the status of the object in pickle format."""
with open(os.path.join(self.workdir, self.PICKLE_FNAME), mode="wb") as fh:
pickle.dump(self, fh)
def __getstate__(self):
"""
Return state is pickled as the contents for the instance.
Here we replace the flow objects with their workdir because we are observing
the flows and we want to have the updated version when we reload the `BatchLauncher` from pickle.
"""
d = {k: v for k, v in self.__dict__.items() if k not in ["flows"]}
d["flows"] = [flow.workdir for flow in self.flows]
return d
def __init__(self, workdir, name=None, flows=None, manager=None, timelimit=None):
"""
Args:
workdir: Working directory
name: Name assigned to the `BatchLauncher`.
flows: List of `Flow` objects.
manager: :class:`TaskManager` object responsible for the submission of the jobs.
If manager is None, the object is initialized from the yaml file
located either in the working directory or in the user configuration dir.
timelimit: Time limit (int with seconds or string with time given with
the slurm convention: "days-hours:minutes:seconds".
If timelimit is None, the default value specified in the `batch_adapter` is taken.
"""
self.workdir = os.path.abspath(workdir)
if not os.path.exists(self.workdir):
os.makedirs(self.workdir)
else:
pass
#raise RuntimeError("Directory %s already exists. Use BatchLauncher.pickle_load()" % self.workdir)
self.name = os.path.basename(self.workdir) if name is None else name
self.script_file = File(os.path.join(self.workdir, "run.sh"))
self.qerr_file = File(os.path.join(self.workdir, "queue.qerr"))
self.qout_file = File(os.path.join(self.workdir, "queue.qout"))
self.log_file = File(os.path.join(self.workdir, "run.log"))
self.batch_pidfile = File(os.path.join(self.workdir, "batch.pid"))
from .tasks import TaskManager
manager = TaskManager.as_manager(manager)
# Extract the qadapater to be used for the batch script.
try:
self.qadapter = qad = manager.batch_adapter
except AttributeError:
raise RuntimeError("Your manager.yml file does not define an entry for the batch_adapter")
if qad is None:
raise RuntimeError("Your manager.yml file does not define an entry for the batch_adapter")
# Set mpi_procs to 1 just to be on the safe side
# Then allow the user to change the timelimit via __init__
qad.set_mpi_procs(1)
if timelimit is not None:
self.set_timelimit(timelimit)
# FIXME: Remove me!
self.set_timelimit(36000)
# Initialize list of flows.
if flows is None: flows = []
if not isinstance(flows, (list, tuple)): flows = [flows]
self.flows = flows
def set_timelimit(self, timelimit):
"""
Set the timelimit of the batch launcher.
Args:
timelimit: Time limit (int with seconds or string with time given
with the slurm convention: "days-hours:minutes:seconds".
"""
self.qad.set_timelimit(qu.timelimit_parser(timelimit))
def to_string(self, **kwargs):
lines = []
lines.extend(str(self.qadapter).splitlines())
for i, flow in enumerate(self.flows):
lines.append("Flow [%d] " % i + str(flow))
return "\n".join(lines)
def __str__(self):
return self.to_string()
def add_flow(self, flow):
"""
Add a flow. Accept filepath or :class:`Flow` object. Return 1 if flow was added else 0.
"""
from .flows import Flow
flow = Flow.as_flow(flow)
if flow in self.flows:
raise self.Error("Cannot add same flow twice!")
if not flow.allocated:
# Set the workdir of the flow here. Create a dir in self.workdir with name flow.name
flow_workdir = os.path.join(self.workdir, os.path.basename(flow.name))
if flow_workdir in (flow.workdir for flow in self.flows):
raise self.Error("Two flows have the same name and hence the same workdir!")
flow.allocate(workdir=flow_workdir)
# Check if we are already using a scheduler to run this flow
flow.check_pid_file()
flow.set_spectator_mode(False)
flow.check_status(show=False)
#if flow.all_ok:
# print("flow.all_ok: Ignoring %s" % flow)
# return 0
self.flows.append(flow)
#print("Flow %s added to the BatchLauncher" % flow)
return 1
def submit(self, **kwargs):
"""
Submit a job script that will run the schedulers with `abirun.py`.
Args:
verbose: Verbosity level
dry_run: Don't submit the script if dry_run. Default: False
Returns:
namedtuple with attributes:
retcode: Return code as returned by the submission script.
qjob: :class:`QueueJob` object.
num_flows_inbatch: Number of flows executed by the batch script
Return code of the job script submission.
"""
verbose, dry_run = kwargs.pop("verbose", 0), kwargs.pop("dry_run", False)
if not self.flows:
print("Cannot submit an empty list of flows!")
return 0
if hasattr(self, "qjob"):
# This usually happens when we have loaded the object from pickle
# and we have already submitted to batch script to the queue.
# At this point we need to understand if the previous batch job
# is still running before trying to submit it again. There are three cases:
#
# 1) The batch script has completed withing timelimit and therefore
# the pid_file has been removed by the script. In this case, we
# should not try to submit it again.
# 2) The batch script has been killed due to timelimit (other reasons are possible
# but we neglect them). In this case the pid_file exists but there's no job with
# this pid runnig and we can resubmit it again.
# 3) The batch script is still running.
print("BatchLauncher has qjob %s" % self.qjob)
if not self.batch_pid_file.exists:
print("It seems that the batch script reached the end. Wont' try to submit it again")
return 0
msg = ("Here I have to understand if qjob is in the queue."
" but I need an abstract API that can retrieve info from the queue id")
raise RuntimeError(msg)
# TODO: Temptative API
if self.qjob.in_status("Running|Queued"):
print("Job is still running. Cannot submit")
else:
del self.qjob
script, num_flows_inbatch = self._get_script_nflows()
if num_flows_inbatch == 0:
print("All flows have reached all_ok! Batch script won't be submitted")
return 0
if verbose:
print("*** submission script ***")
print(script)
# Write the script.
self.script_file.write(script)
self.script_file.chmod(0o740)
# Builf the flow.
for flow in self.flows:
flow.build_and_pickle_dump()
# Submit the task and save the queue id.
if dry_run: return -1
print("Will submit %s flows in batch script" % len(self.flows))
self.qjob, process = self.qadapter.submit_to_queue(self.script_file.path)
# Save the queue id in the pid file
# The file will be removed by the job script if execution is completed.
self.batch_pidfile.write(str(self.qjob.qid))
self.pickle_dump()
process.wait()
return dict2namedtuple(retcode=process.returncode, qjob=self.qjob,
num_flows_inbatch=num_flows_inbatch)
def _get_script_nflows(self):
"""
Write the submission script. Return (script, num_flows_in_batch)
"""
flows_torun = [f for f in self.flows if not f.all_ok]
if not flows_torun:
return "", 0
executable = [
'export _LOG=%s' % self.log_file.path,
'date1=$(date +"%s")',
'echo Running abirun.py in batch mode > ${_LOG}',
" ",
]
app = executable.append
# Build list of abirun commands and save the name of the log files.
self.sched_logs, num_flows = [], len(flows_torun)
for i, flow in enumerate(flows_torun):
logfile = os.path.join(self.workdir, "log_" + os.path.basename(flow.workdir))
app("echo Starting flow %d/%d on: `date` >> ${LOG}" % (i+1, num_flows))
app("\nabirun.py %s scheduler > %s" % (flow.workdir, logfile))
app("echo Returning from abirun on `date` with retcode $? >> ${_LOG}")
assert logfile not in self.sched_logs
self.sched_logs.append(logfile)
# Remove the batch pid_file and compute elapsed time.
executable.extend([
" ",
"# Remove batch pid file",
'rm %s' % self.batch_pidfile.path,
" ",
"# Compute elapsed time",
'date2=$(date +"%s")',
'diff=$(($date2-$date1))',
'echo $(($diff / 60)) minutes and $(($diff % 60)) seconds elapsed. >> ${_LOG}'
])
return self.qadapter.get_script_str(
job_name=self.name,
launch_dir=self.workdir,
executable=executable,
qout_path=self.qout_file.path,
qerr_path=self.qerr_file.path,
), num_flows
def show_summary(self, **kwargs):
"""
Show a summary with the status of the flows.
"""
for flow in self.flows:
flow.show_summary()
def show_status(self, **kwargs):
"""
Report the status of the flows.
Args:
stream: File-like object, Default: sys.stdout
verbose: Verbosity level (default 0). > 0 to show only the works that are not finalized.
"""
for flow in self.flows:
flow.show_status(**kwargs)
| czhengsci/pymatgen | pymatgen/io/abinit/launcher.py | Python | mit | 48,636 | [
"ABINIT",
"pymatgen"
] | f7fb8880c68bbaabf45c2eb5136ee292f6db6ceb908d306ea2840eb608cf9755 |
#----------Import-Modules-START-----------------------------
import os
import posixpath
import pyexiv2
import urllib.parse
import threading
from bs4 import BeautifulSoup
#----------Import-Modules-END-------------------------------
#----------Global-Variables-START---------------------------
__std_links_to_visit = [".html"]
__std_links_to_download = [".jpg", ".png", ".jpeg", ".mp4", ".wmv", ".avi"]
__std_chunk_size = 1024
__visited_links = set()
__file_lock = threading.Lock()
#----------Global-Variables-END-----------------------------
#----------Utility-functions-START--------------------------
def format_previous_directory(url):
"""
>>> format_previous_directory('http://www.example.com/foo/bar/../../baz/bux/')
'http://www.example.com/baz/bux/'
>>> format_previous_directory('http://www.example.com/some/path/../file.ext')
'http://www.example.com/some/file.ext'
>>> format_previous_directory('http://www.example.com/some/../path/../file.ext')
'http://www.example.com/file.ext'
"""
parsed = urllib.parse.urlparse(url)
new_path = posixpath.normpath(parsed.path)
if parsed.path.endswith('/'):
new_path += '/'
cleaned = parsed._replace(path=new_path)
return cleaned.geturl()
def format_url(url, base_url):
'''
Assumes url and base_url are well formed.
>>> format_url("http://rand.com/random", "http://rand.com")
'http://rand.com/random'
>>> format_url("http://rand.com/", "http://rand.com")
'http://rand.com/'
>>> format_url("", "http://rand.com") is None
True
>>> format_url("//rand.com", "http://rand.com")
'http://rand.com'
>>> format_url("//rand.com/random", "http://rand.com")
'http://rand.com/random'
>>> format_url("/random", "http://rand.com")
'http://rand.com/random'
>>> format_url("/random/ram", "http://rand.com")
'http://rand.com/random/ram'
>>> format_url("./random", "http://rand.com/ram")
'http://rand.com/random'
>>> format_url("./random", "http://rand.com/ram/")
'http://rand.com/ram/random'
>>> format_url("random/rand", "http://rand.com/ram")
'http://rand.com/random/rand'
>>> format_url("/random/rand", "http://rand.com/ram")
'http://rand.com/random/rand'
'''
if url == '':
return None
info = urllib.parse.urlsplit(url)
if info.netloc == '':
path = info.path
if path.startswith('/'):
base_info = urllib.parse.urlsplit(base_url)
base_loc = base_info.scheme + "://" + base_info.netloc
ret = posixpath.join(base_loc, path[1:])
if info.query != '':
ret = ret + '?' + info.query
return ret
rel_path = path
if rel_path.startswith("./"):
rel_path = rel_path[2:]
ret = posixpath.join(os.path.dirname(base_url), rel_path)
if info.query != '':
ret = ret + '?' + info.query
return ret
elif info.scheme == '':
return "http:" + url
else:
return url
def format_url_with_resolution(url, base_url):
new_url = format_url(url, base_url)
if new_url is not None:
new_url = format_previous_directory(new_url)
return new_url
#----------Utility-functions-END----------------------------
#----------Template-functions-START-------------------------
def std_preprocess(root_jobs):
for url, _, _ in root_jobs:
__visited_links.add(url)
def std_visit(request, base_url, id, jobs):
return std_visit_template(request, request.url, id, jobs, __std_links_to_visit, __std_links_to_download)
def std_visit_template(request, base_url, id, jobs, links_to_visit, links_to_download):
new_urls = []
#--------std_process-Utilities-START-------------
def find_and_add_url(tag, attr, soup):
for div in soup.find_all(tag):
new_url = format_url_with_resolution(div.get(attr), base_url)
if new_url is not None and new_url not in __visited_links:
__visited_links.add(new_url)
new_urls.append(new_url)
#--------std_process-Utilities-START-------------
soup = BeautifulSoup(request.text.replace('\n', '').replace('\r', ''), "html.parser")
find_and_add_url("a", "href", soup)
find_and_add_url("img", "src", soup)
find_and_add_url("source", "src", soup)
find_and_add_url("video", "src", soup)
jobs.clear()
for new_url in new_urls:
_, ext = os.path.splitext(new_url)
# "both" is not used here
if ext in links_to_visit:
# id is not used here
jobs.append((new_url, "visit", 0))
elif ext in links_to_download:
jobs.append((new_url, "download", 0))
return True
def std_download(request, url, id, iptc_tags):
url_info = urllib.parse.urlsplit(url)
path = url_info.path
if path != '' and path[0] == '/':
path = path[1:]
filename = os.path.join(url_info.netloc, path)
filename = os.path.join("scrapper2_download", filename)
dirname = os.path.dirname(filename)
# synchronize file operations
__file_lock.acquire()
if dirname != "" and not os.path.exists(dirname):
os.makedirs(dirname)
if not os.path.isfile(filename):
hfile = open(filename, 'wb')
for chunk in request.iter_content(chunk_size=__std_chunk_size):
if chunk:
hfile.write(chunk)
hfile.close()
__file_lock.release()
# Write metadata
meta = pyexiv2.ImageMetadata(filename)
meta.read()
for tag in iptc_tags:
meta[tag] = pyexiv2.IptcTag(tag, iptc_tags[tag])
meta.write()
return True
def std_modify_header(header, url, task, id):
pass
def std_report_header(header, success, url, task, id):
pass
def std_nok(status_code, url, task, id):
return False
#----------Template-functions-END---------------------------
#----------Main-START---------------------------------------
if __name__ == "__main__":
import colorama.initialise; colorama.initialise.init()
from scrapper2_utils import *
post_info("Running doctests...")
import doctest
if doctest.testmod()[0] == 0:
post_success("All tests passed")
#----------Main-END----------------------------------------- | lennoxho/scrapper2 | lib2/scrapper2_templates.py | Python | bsd-3-clause | 6,279 | [
"VisIt"
] | 13015d19e8194b028c307a6d19be3b4a6a742c595fe147c7080a06c2b19d1605 |
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import division, print_function, unicode_literals, \
absolute_import
from collections import OrderedDict
from io import StringIO
import itertools
import re
import warnings
import numpy as np
import pandas as pd
from monty.json import MSONable
from ruamel.yaml import YAML
from six import string_types
from pymatgen.util.io_utils import clean_lines
from pymatgen.core.structure import SiteCollection
from pymatgen import Molecule, Element, Lattice, Structure
"""
This module implements a core class LammpsData for generating/parsing
LAMMPS data file, and other bridging classes to build LammpsData from
molecules.
Only point particle styles are supported for now (atom_style in angle,
atomic, bond, charge, full and molecular only). See the pages below for
more info.
http://lammps.sandia.gov/doc/atom_style.html
http://lammps.sandia.gov/doc/read_data.html
"""
__author__ = "Kiran Mathew, Zhi Deng"
__email__ = "kmathew@lbl.gov, z4deng@eng.ucsd.edu"
__credits__ = "Brandon Wood"
SECTION_KEYWORDS = {"atom": ["Atoms", "Velocities", "Masses",
"Ellipsoids", "Lines", "Triangles", "Bodies"],
"topology": ["Bonds", "Angles", "Dihedrals", "Impropers"],
"ff": ["Pair Coeffs", "PairIJ Coeffs", "Bond Coeffs",
"Angle Coeffs", "Dihedral Coeffs",
"Improper Coeffs"],
"class2": ["BondBond Coeffs", "BondAngle Coeffs",
"MiddleBondTorsion Coeffs",
"EndBondTorsion Coeffs", "AngleTorsion Coeffs",
"AngleAngleTorsion Coeffs",
"BondBond13 Coeffs", "AngleAngle Coeffs"]}
CLASS2_KEYWORDS = {"Angle Coeffs": ["BondBond Coeffs", "BondAngle Coeffs"],
"Dihedral Coeffs": ["MiddleBondTorsion Coeffs",
"EndBondTorsion Coeffs",
"AngleTorsion Coeffs",
"AngleAngleTorsion Coeffs",
"BondBond13 Coeffs"],
"Improper Coeffs": ["AngleAngle Coeffs"]}
SECTION_HEADERS = {"Masses": ["mass"],
"Velocities": ["vx", "vy", "vz"],
"Bonds": ["type", "atom1", "atom2"],
"Angles": ["type", "atom1", "atom2", "atom3"],
"Dihedrals": ["type", "atom1", "atom2", "atom3", "atom4"],
"Impropers": ["type", "atom1", "atom2", "atom3", "atom4"]}
ATOMS_HEADERS = {"angle": ["molecule-ID", "type", "x", "y", "z"],
"atomic": ["type", "x", "y", "z"],
"bond": ["molecule-ID", "type", "x", "y", "z"],
"charge": ["type", "q", "x", "y", "z"],
"full": ["molecule-ID", "type", "q", "x", "y", "z"],
"molecular": ["molecule-ID", "type", "x", "y", "z"]}
class LammpsData(MSONable):
"""
Object for representing the data in a LAMMPS data file.
"""
def __init__(self, masses, atoms, box_bounds, box_tilt=None,
velocities=None, force_field=None, topology=None,
atom_style="full"):
"""
This is a low level constructor designed to work with parsed
data or other bridging objects (ForceField and Topology). Not
recommended to use directly.
Args:
masses (pandas.DataFrame): DataFrame with one column
["mass"] for Masses section.
atoms (pandas.DataFrame): DataFrame with multiple columns
for Atoms section. Column names vary with atom_style.
box_bounds: A (3, 2) array/list of floats setting the
boundaries of simulation box.
box_tilt: A (3,) array/list of floats setting the tilt of
simulation box. Default to None, i.e., use an
orthogonal box.
velocities (pandas.DataFrame): DataFrame with three columns
["vx", "vy", "vz"] for Velocities section. Optional
with default to None. If not None, its index should be
consistent with atoms.
force_field (dict): Data for force field sections. Optional
with default to None. Only keywords in force field and
class 2 force field are valid keys, and each value is a
DataFrame.
topology (dict): Data for topology sections. Optional with
default to None. Only keywords in topology are valid
keys, and each value is a DataFrame.
atom_style (str): Output atom_style. Default to "full".
"""
bounds_arr = np.array(box_bounds)
bounds_shape = bounds_arr.shape
assert bounds_shape == (3, 2), \
"Expecting a (3, 2) array for box_bounds," \
" got {}".format(bounds_shape)
box_bounds = bounds_arr.tolist()
if box_tilt is not None:
tilt_arr = np.array(box_tilt)
tilt_shape = tilt_arr.shape
assert tilt_shape == (3,),\
"Expecting a (3,) array for box_tilt," \
" got {}".format(tilt_shape)
box_tilt = tilt_arr.tolist()
if velocities is not None:
assert len(velocities) == len(atoms),\
"Inconsistency found between atoms and velocities"
if force_field:
all_ff_kws = SECTION_KEYWORDS["ff"] + SECTION_KEYWORDS["class2"]
force_field = {k: v for k, v in force_field.items()
if k in all_ff_kws}
if topology:
topology = {k: v for k, v in topology.items()
if k in SECTION_KEYWORDS["topology"]}
self.masses = masses
self.atoms = atoms
self.box_bounds = box_bounds
self.box_tilt = box_tilt
self.velocities = velocities
self.force_field = force_field
self.topology = topology
self.atom_style = atom_style
def __str__(self):
return self.get_string()
@property
def structure(self):
"""
Export a periodic structure object representing the simulation box.
Return:
A pymatgen structure object
"""
masses = self.masses
atoms = self.atoms.copy()
atoms["molecule-ID"] = 1
box_bounds = np.array(self.box_bounds)
box_tilt = self.box_tilt if self.box_tilt else [0.0] * 3
ld_copy = self.__class__(masses, atoms, box_bounds, box_tilt)
_, topologies = ld_copy.disassemble()
molecule = topologies[0].sites
coords = molecule.cart_coords - box_bounds[:, 0]
species = molecule.species
matrix = np.diag(box_bounds[:, 1] - box_bounds[:, 0])
matrix[1, 0] = box_tilt[0]
matrix[2, 0] = box_tilt[1]
matrix[2, 1] = box_tilt[2]
latt = Lattice(matrix)
site_properties = None if self.velocities is None \
else {"velocities": self.velocities.values}
return Structure(latt, species, coords, coords_are_cartesian=True,
site_properties=site_properties)
def get_string(self, distance=6, velocity=8, charge=3):
"""
Returns the string representation of LammpsData, essentially
the string to be written to a file.
Args:
distance (int): No. of significant figures to output for
box settings (bounds and tilt) and atomic coordinates.
Default to 6.
velocity (int): No. of significant figures to output for
velocities. Default to 8.
charge (int): No. of significant figures to output for
charges. Default to 3.
Returns:
String representation
"""
file_template = """Generated by pymatgen.io.lammps.data.LammpsData
{stats}
{box}
{body}
"""
box_ph = "{:.%df}" % distance
box_lines = []
for bound, d in zip(self.box_bounds, "xyz"):
fillers = bound + [d] * 2
bound_format = " ".join([box_ph] * 2 + [" {}lo {}hi"])
box_lines.append(bound_format.format(*fillers))
if self.box_tilt:
tilt_format = " ".join([box_ph] * 3 + [" xy xz yz"])
box_lines.append(tilt_format.format(*self.box_tilt))
box = "\n".join(box_lines)
body_dict = OrderedDict()
body_dict["Masses"] = self.masses
types = OrderedDict()
types["atom"] = len(self.masses)
if self.force_field:
all_ff_kws = SECTION_KEYWORDS["ff"] + SECTION_KEYWORDS["class2"]
ff_kws = [k for k in all_ff_kws if k in self.force_field]
for kw in ff_kws:
body_dict[kw] = self.force_field[kw]
if kw in SECTION_KEYWORDS["ff"][2:]:
types[kw.lower()[:-7]] = len(self.force_field[kw])
body_dict["Atoms"] = self.atoms
counts = OrderedDict()
counts["atoms"] = len(self.atoms)
if self.velocities is not None:
body_dict["Velocities"] = self.velocities
if self.topology:
for kw in SECTION_KEYWORDS["topology"]:
if kw in self.topology:
body_dict[kw] = self.topology[kw]
counts[kw.lower()] = len(self.topology[kw])
all_stats = list(counts.values()) + list(types.values())
stats_template = "{:>%d} {}" % len(str(max(all_stats)))
count_lines = [stats_template.format(v, k) for k, v in counts.items()]
type_lines = [stats_template.format(v, k + " types")
for k, v in types.items()]
stats = "\n".join(count_lines + [""] + type_lines)
map_coords = lambda q: ("{:.%df}" % distance).format(q)
map_velos = lambda q: ("{:.%df}" % velocity).format(q)
map_charges = lambda q: ("{:.%df}" % charge).format(q)
formatters = {"x": map_coords, "y": map_coords, "z": map_coords,
"vx": map_velos, "vy": map_velos, "vz": map_velos,
"q": map_charges}
section_template = "{kw}\n\n{df}\n"
parts = []
for k, v in body_dict.items():
index = True if k != "PairIJ Coeffs" else False
df_string = v.to_string(header=False, formatters=formatters,
index_names=False, index=index)
parts.append(section_template.format(kw=k, df=df_string))
body = "\n".join(parts)
return file_template.format(stats=stats, box=box, body=body)
def write_file(self, filename, distance=6, velocity=8, charge=3):
"""
Writes LammpsData to file.
Args:
filename (str): Filename.
distance (int): No. of significant figures to output for
box settings (bounds and tilt) and atomic coordinates.
Default to 6.
velocity (int): No. of significant figures to output for
velocities. Default to 8.
charge (int): No. of significant figures to output for
charges. Default to 3.
"""
with open(filename, "w") as f:
f.write(self.get_string(distance=distance, velocity=velocity,
charge=charge))
def disassemble(self, atom_labels=None, guess_element=True,
ff_label="ff_map"):
"""
Breaks down LammpsData to ForceField and a series of Topology.
RESTRICTIONS APPLIED:
1. No complex force field defined not just on atom
types, where the same type or equivalent types of topology
may have more than one set of coefficients.
2. No intermolecular topologies (with atoms from different
molecule-ID) since a Topology object includes data for ONE
molecule or structure only.
Args:
atom_labels ([str]): List of strings (must be different
from one another) for labelling each atom type found in
Masses section. Default to None, where the labels are
automaticaly added based on either element guess or
dummy specie assignment.
guess_element (bool): Whether to guess the element based on
its atomic mass. Default to True, otherwise dummy
species "Qa", "Qb", ... will be assigned to various
atom types. The guessed or assigned elements will be
reflected on atom labels if atom_labels is None, as
well as on the species of molecule in each Topology.
ff_label (str): Site property key for labeling atoms of
different types. Default to "ff_map".
Returns:
ForceField, [Topology]
"""
atoms_df = self.atoms.copy()
if "nx" in atoms_df.columns:
box_dim = np.ptp(self.box_bounds, axis=1)
atoms_df[["x", "y", "z"]] += atoms_df[["nx", "ny", "nz"]].values \
* box_dim
atoms_df = pd.concat([atoms_df, self.velocities], axis=1)
mids = atoms_df.get("molecule-ID")
if mids is None:
unique_mids = [1]
data_by_mols = {1: {"Atoms": atoms_df}}
else:
unique_mids = np.unique(mids)
data_by_mols = {}
for k in unique_mids:
df = atoms_df[atoms_df["molecule-ID"] == k]
data_by_mols[k] = {"Atoms": df}
masses = self.masses.copy()
masses["label"] = atom_labels
unique_masses = np.unique(masses["mass"])
if guess_element:
ref_masses = sorted([el.atomic_mass.real for el in Element])
diff = np.abs(np.array(ref_masses) - unique_masses[:, None])
atomic_numbers = np.argmin(diff, axis=1) + 1
symbols = [Element.from_Z(an).symbol for an in atomic_numbers]
else:
symbols = ["Q%s" % a for a in
map(chr, range(97, 97 + len(unique_masses)))]
for um, s in zip(unique_masses, symbols):
masses.loc[masses["mass"] == um, "element"] = s
if atom_labels is None: # add unique labels based on elements
for el, vc in masses["element"].value_counts().iteritems():
masses.loc[masses["element"] == el, "label"] = \
["%s%d" % (el, c) for c in range(1, vc + 1)]
assert masses["label"].nunique(dropna=False) == len(masses), \
"Expecting unique atom label for each type"
mass_info = [tuple([r["label"], r["mass"]])
for _, r in masses.iterrows()]
nonbond_coeffs, topo_coeffs = None, None
if self.force_field:
if "PairIJ Coeffs" in self.force_field:
nbc = self.force_field["PairIJ Coeffs"]
nbc = nbc.sort_values(["id1", "id2"]).drop(["id1", "id2"], axis=1)
nonbond_coeffs = [list(t) for t in nbc.itertuples(False, None)]
elif "Pair Coeffs" in self.force_field:
nbc = self.force_field["Pair Coeffs"].sort_index()
nonbond_coeffs = [list(t) for t in nbc.itertuples(False, None)]
topo_coeffs = {k: [] for k in SECTION_KEYWORDS["ff"][2:]
if k in self.force_field}
for kw in topo_coeffs.keys():
class2_coeffs = {k: list(v.itertuples(False, None))
for k, v in self.force_field.items()
if k in CLASS2_KEYWORDS.get(kw, [])}
ff_df = self.force_field[kw]
for t in ff_df.itertuples(True, None):
d = {"coeffs": list(t[1:]), "types": []}
if class2_coeffs:
d.update({k: list(v[t[0] - 1])
for k, v in class2_coeffs.items()})
topo_coeffs[kw].append(d)
if self.topology:
label_topo = lambda t: tuple(masses.loc[atoms_df.loc[t, "type"],
"label"])
for k, v in self.topology.items():
ff_kw = k[:-1] + " Coeffs"
for topo in v.itertuples(False, None):
topo_idx = topo[0] - 1
indices = topo[1:]
mids = atoms_df.loc[indices, "molecule-ID"].unique()
assert len(mids) == 1, \
"Do not support intermolecular topology formed " \
"by atoms with different molecule-IDs"
label = label_topo(indices)
topo_coeffs[ff_kw][topo_idx]["types"].append(label)
if data_by_mols[mids[0]].get(k):
data_by_mols[mids[0]][k].append(indices)
else:
data_by_mols[mids[0]][k] = [indices]
if topo_coeffs:
for v in topo_coeffs.values():
for d in v:
d["types"] = list(set(d["types"]))
ff = ForceField(mass_info=mass_info, nonbond_coeffs=nonbond_coeffs,
topo_coeffs=topo_coeffs)
topo_list = []
for mid in unique_mids:
data = data_by_mols[mid]
atoms = data["Atoms"]
shift = min(atoms.index)
type_ids = atoms["type"]
species = masses.loc[type_ids, "element"]
labels = masses.loc[type_ids, "label"]
coords = atoms[["x", "y", "z"]]
m = Molecule(species.values, coords.values,
site_properties={ff_label: labels.values})
charges = atoms.get("q")
velocities = atoms[["vx", "vy", "vz"]] if "vx" in atoms.columns \
else None
topologies = {}
for kw in SECTION_KEYWORDS["topology"]:
if data.get(kw):
topologies[kw] = (np.array(data[kw]) - shift).tolist()
topologies = None if not topologies else topologies
topo_list.append(Topology(sites=m, ff_label=ff_label,
charges=charges, velocities=velocities,
topologies=topologies))
return ff, topo_list
@classmethod
def from_file(cls, filename, atom_style="full", sort_id=False):
"""
Constructor that parses a file.
Args:
filename (str): Filename to read.
atom_style (str): Associated atom_style. Default to "full".
sort_id (bool): Whether sort each section by id. Default to
True.
"""
with open(filename) as f:
lines = f.readlines()
kw_pattern = r"|".join(itertools.chain(*SECTION_KEYWORDS.values()))
section_marks = [i for i, l in enumerate(lines)
if re.search(kw_pattern, l)]
parts = np.split(lines, section_marks)
float_group = r"([0-9eE.+-]+)"
header_pattern = dict()
header_pattern["counts"] = r"^\s*(\d+)\s+([a-zA-Z]+)$"
header_pattern["types"] = r"^\s*(\d+)\s+([a-zA-Z]+)\s+types$"
header_pattern["bounds"] = r"^\s*{}$".format(r"\s+".join(
[float_group] * 2 + [r"([xyz])lo \3hi"]))
header_pattern["tilt"] = r"^\s*{}$".format(r"\s+".join(
[float_group] * 3 + ["xy xz yz"]))
header = {"counts": {}, "types": {}}
bounds = {}
for l in clean_lines(parts[0][1:]): # skip the 1st line
match = None
for k, v in header_pattern.items():
match = re.match(v, l)
if match:
break
else:
continue
if match and k in ["counts", "types"]:
header[k][match.group(2)] = int(match.group(1))
elif match and k == "bounds":
g = match.groups()
bounds[g[2]] = [float(i) for i in g[:2]]
elif match and k == "tilt":
header["tilt"] = [float(i) for i in match.groups()]
header["bounds"] = [bounds.get(i, [-0.5, 0.5]) for i in "xyz"]
def parse_section(sec_lines):
title_info = sec_lines[0].split("#", 1)
kw = title_info[0].strip()
sio = StringIO("".join(sec_lines[2:])) # skip the 2nd line
df = pd.read_csv(sio, header=None, comment="#",
delim_whitespace=True)
if kw.endswith("Coeffs") and not kw.startswith("PairIJ"):
names = ["id"] + ["coeff%d" % i
for i in range(1, df.shape[1])]
elif kw == "PairIJ Coeffs":
names = ["id1", "id2"] + ["coeff%d" % i
for i in range(1, df.shape[1] - 1)]
df.index.name = None
elif kw in SECTION_HEADERS:
names = ["id"] + SECTION_HEADERS[kw]
elif kw == "Atoms":
names = ["id"] + ATOMS_HEADERS[atom_style]
if df.shape[1] == len(names):
pass
elif df.shape[1] == len(names) + 3:
names += ["nx", "ny", "nz"]
else:
raise ValueError("Format in Atoms section inconsistent"
" with atom_style %s" % atom_style)
else:
raise NotImplementedError("Parser for %s section"
" not implemented" % kw)
df.columns = names
if sort_id:
sort_by = "id" if kw != "PairIJ Coeffs" else ["id1", "id2"]
df.sort_values(sort_by, inplace=True)
if "id" in df.columns:
df.set_index("id", drop=True, inplace=True)
df.index.name = None
return kw, df
err_msg = "Bad LAMMPS data format where "
body = {}
seen_atoms = False
for part in parts[1:]:
name, section = parse_section(part)
if name == "Atoms":
seen_atoms = True
if name in ["Velocities"] + SECTION_KEYWORDS["topology"] and \
not seen_atoms: # Atoms must appear earlier than these
raise RuntimeError(err_msg + "%s section appears before"
" Atoms section" % name)
body.update({name: section})
err_msg += "Nos. of {} do not match between header and {} section"
assert len(body["Masses"]) == header["types"]["atom"], \
err_msg.format("atom types", "Masses")
atom_sections = ["Atoms", "Velocities"] \
if "Velocities" in body else ["Atoms"]
for s in atom_sections:
assert len(body[s]) == header["counts"]["atoms"], \
err_msg.format("atoms", s)
for s in SECTION_KEYWORDS["topology"]:
if header["counts"].get(s.lower(), 0) > 0:
assert len(body[s]) == header["counts"][s.lower()], \
err_msg.format(s.lower(), s)
items = {k.lower(): body[k] for k in ["Masses", "Atoms"]}
items["box_bounds"] = header["bounds"]
items["box_tilt"] = header.get("tilt")
items["velocities"] = body.get("Velocities")
ff_kws = [k for k in body if k
in SECTION_KEYWORDS["ff"] + SECTION_KEYWORDS["class2"]]
items["force_field"] = {k: body[k] for k in ff_kws} if ff_kws \
else None
topo_kws = [k for k in body if k in SECTION_KEYWORDS["topology"]]
items["topology"] = {k: body[k] for k in topo_kws} \
if topo_kws else None
items["atom_style"] = atom_style
return cls(**items)
@classmethod
def from_ff_and_topologies(cls, ff, topologies, box_bounds, box_tilt=None,
atom_style="full"):
"""
Constructor building LammpsData from a ForceField object and a
list of Topology objects. Do not support intermolecular
topologies since a Topology object includes data for ONE
molecule or structure only.
Args:
ff (ForceField): ForceField object with data for Masses and
force field sections.
topologies ([Topology]): List of Topology objects with data
for Atoms, Velocities and topology sections.
box_bounds: A (3, 2) array/list of floats setting the
boundaries of simulation box.
box_tilt: A (3,) array/list of floats setting the tilt of
simulation box. Default to None, i.e., use an
orthogonal box.
atom_style (str): Output atom_style. Default to "full".
"""
atom_types = set.union(*[t.species for t in topologies])
assert atom_types.issubset(ff.maps["Atoms"].keys()),\
"Unknown atom type found in topologies"
items = dict(box_bounds=box_bounds, box_tilt=box_tilt,
atom_style=atom_style, masses=ff.masses,
force_field=ff.force_field)
mol_ids, charges, coords, labels = [], [], [], []
v_collector = [] if topologies[0].velocities else None
topo_collector = {"Bonds": [], "Angles": [], "Dihedrals": [],
"Impropers": []}
topo_labels = {"Bonds": [], "Angles": [], "Dihedrals": [],
"Impropers": []}
for i, topo in enumerate(topologies):
if topo.topologies:
shift = len(labels)
for k, v in topo.topologies.items():
topo_collector[k].append(np.array(v) + shift + 1)
topo_labels[k].extend([tuple([topo.type_by_sites[j]
for j in t]) for t in v])
if isinstance(v_collector, list):
v_collector.append(topo.velocities)
mol_ids.extend([i + 1] * len(topo.sites))
labels.extend(topo.type_by_sites)
coords.append(topo.sites.cart_coords)
q = [0.0] * len(topo.sites) if not topo.charges else topo.charges
charges.extend(q)
atoms = pd.DataFrame(np.concatenate(coords), columns=["x", "y", "z"])
atoms["molecule-ID"] = mol_ids
atoms["q"] = charges
atoms["type"] = list(map(ff.maps["Atoms"].get, labels))
atoms.index += 1
atoms = atoms[ATOMS_HEADERS[atom_style]]
velocities = None
if v_collector:
velocities = pd.DataFrame(np.concatenate(v_collector),
columns=SECTION_HEADERS["Velocities"])
velocities.index += 1
topology = {k: None for k, v in topo_labels.items() if len(v) > 0}
for k in topology:
df = pd.DataFrame(np.concatenate(topo_collector[k]),
columns=SECTION_HEADERS[k][1:])
df["type"] = list(map(ff.maps[k].get, topo_labels[k]))
if any(pd.isnull(df["type"])): # Throw away undefined topologies
warnings.warn("Undefined %s detected and removed" % k.lower())
df.dropna(subset=["type"], inplace=True)
df.reset_index(drop=True, inplace=True)
df.index += 1
topology[k] = df[SECTION_HEADERS[k]]
topology = {k: v for k, v in topology.items() if not v.empty}
items.update({"atoms": atoms, "velocities": velocities,
"topology": topology})
return cls(**items)
@classmethod
def from_dict(cls, d):
decode_df = lambda s: pd.read_json(s, orient="split")
items = dict()
items["masses"] = decode_df(d["masses"])
items["atoms"] = decode_df(d["atoms"])
items["box_bounds"] = d["box_bounds"]
items["box_tilt"] = d["box_tilt"]
items["atom_style"] = d["atom_style"]
velocities = d["velocities"]
if velocities:
velocities = decode_df(velocities)
items["velocities"] = velocities
force_field = d["force_field"]
if force_field:
force_field = {k: decode_df(v) for k, v in force_field.items()}
items["force_field"] = force_field
topology = d["topology"]
if topology:
topology = {k: decode_df(v) for k, v in topology.items()}
items["topology"] = topology
return cls(**items)
def as_dict(self):
encode_df = lambda df: df.to_json(orient="split")
d = dict()
d["@module"] = self.__class__.__module__
d["class"] = self.__class__.__name__
d["masses"] = encode_df(self.masses)
d["atoms"] = encode_df(self.atoms)
d["box_bounds"] = self.box_bounds
d["box_tilt"] = self.box_tilt
d["atom_style"] = self.atom_style
d["velocities"] = None if self.velocities is None \
else encode_df(self.velocities)
d["force_field"] = None if not self.force_field \
else {k: encode_df(v) for k, v in self.force_field.items()}
d["topology"] = None if not self.topology \
else {k: encode_df(v) for k, v in self.topology.items()}
return d
class Topology(MSONable):
"""
Class carrying most data in Atoms, Velocities and molecular
topology sections for ONE SINGLE Molecule or Structure
object, or a plain list of Sites.
"""
def __init__(self, sites, ff_label=None, charges=None, velocities=None,
topologies=None):
"""
Args:
sites ([Site] or SiteCollection): A group of sites in a
list or as a Molecule/Structure.
ff_label (str): Site property key for labeling atoms of
different types. Default to None, i.e., use
site.species_string.
charges ([q, ...]): Charge of each site in a (n,)
array/list, where n is the No. of sites. Default to
None, i.e., search site property for charges.
velocities ([[vx, vy, vz], ...]): Velocity of each site in
a (n, 3) array/list, where n is the No. of sites.
Default to None, i.e., search site property for
velocities.
topologies (dict): Bonds, angles, dihedrals and improper
dihedrals defined by site indices. Default to None,
i.e., no additional topology. All four valid keys
listed below are optional.
{
"Bonds": [[i, j], ...],
"Angles": [[i, j, k], ...],
"Dihedrals": [[i, j, k, l], ...],
"Impropers": [[i, j, k, l], ...]
}
"""
if not isinstance(sites, SiteCollection):
sites = Molecule.from_sites(sites)
if ff_label:
type_by_sites = sites.site_properties.get(ff_label)
else:
type_by_sites = [site.species_string for site in sites]
# search for site property if not override
if charges is None:
charges = sites.site_properties.get("charge")
if velocities is None:
velocities = sites.site_properties.get("velocities")
# validate shape
if charges is not None:
charge_arr = np.array(charges)
assert charge_arr.shape == (len(sites),),\
"Wrong format for charges"
charges = charge_arr.tolist()
if velocities is not None:
velocities_arr = np.array(velocities)
assert velocities_arr.shape == (len(sites), 3), \
"Wrong format for velocities"
velocities = velocities_arr.tolist()
if topologies:
topologies = {k: v for k, v in topologies.items()
if k in SECTION_KEYWORDS["topology"]}
self.sites = sites
self.ff_label = ff_label
self.charges = charges
self.velocities = velocities
self.topologies = topologies
self.type_by_sites = type_by_sites
self.species = set(type_by_sites)
@classmethod
def from_bonding(cls, molecule, bond=True, angle=True, dihedral=True,
ff_label=None, charges=None, velocities=None, tol=0.1):
"""
Another constructor that creates an instance from a molecule.
Covalent bonds and other bond-based topologies (angles and
dihedrals) can be automatically determined. Cannot be used for
non bond-based topologies, e.g., improper dihedrals.
Args:
molecule (Molecule): Input molecule.
bond (bool): Whether find bonds. If set to False, angle and
dihedral searching will be skipped. Default to True.
angle (bool): Whether find angles. Default to True.
dihedral (bool): Whether find dihedrals. Default to True.
ff_label (str): Site property key for labeling atoms of
different types. Default to None, i.e., use
site.species_string.
charges ([q, ...]): Charge of each site in a (n,)
array/list, where n is the No. of sites. Default to
None, i.e., search site property for charges.
velocities ([[vx, vy, vz], ...]): Velocity of each site in
a (n, 3) array/list, where n is the No. of sites.
Default to None, i.e., search site property for
velocities.
tol (float): Bond distance tolerance. Default to 0.1.
Not recommended to alter.
"""
real_bonds = molecule.get_covalent_bonds(tol=tol)
bond_list = [list(map(molecule.index, [b.site1, b.site2]))
for b in real_bonds]
if not all((bond, bond_list)):
# do not search for others if not searching for bonds or no bonds
return cls(sites=molecule, ff_label=ff_label, charges=charges,
velocities=velocities)
else:
angle_list, dihedral_list = [], []
dests, freq = np.unique(bond_list, return_counts=True)
hubs = dests[np.where(freq > 1)]
bond_arr = np.array(bond_list)
if len(hubs) > 0:
hub_spokes = {}
for hub in hubs:
ix = np.any(np.isin(bond_arr, hub), axis=1)
bonds = list(np.unique(bond_arr[ix]))
bonds.remove(hub)
hub_spokes[hub] = bonds
# skip angle or dihedral searching if too few bonds or hubs
dihedral = False if len(bond_list) < 3 or len(hubs) < 2 \
else dihedral
angle = False if len(bond_list) < 2 or len(hubs) < 1 else angle
if angle:
for k, v in hub_spokes.items():
angle_list.extend([[i, k, j] for i, j in
itertools.combinations(v, 2)])
if dihedral:
hub_cons = bond_arr[np.all(np.isin(bond_arr, hubs), axis=1)]
for i, j in hub_cons:
ks = [k for k in hub_spokes[i] if k != j]
ls = [l for l in hub_spokes[j] if l != i]
dihedral_list.extend([[k, i, j, l] for k,l in
itertools.product(ks, ls)
if k != l])
topologies = {k: v for k, v
in zip(SECTION_KEYWORDS["topology"][:3],
[bond_list, angle_list, dihedral_list])
if len(v) > 0}
topologies = None if len(topologies) == 0 else topologies
return cls(sites=molecule, ff_label=ff_label, charges=charges,
velocities=velocities, topologies=topologies)
class ForceField(MSONable):
"""
Class carrying most data in Masses and force field sections.
Attributes:
masses (pandas.DataFrame): DataFrame for Masses section.
force_field (dict): Force field section keywords (keys) and
data (values) as DataFrames.
maps (dict): Dict for labeling atoms and topologies.
"""
_is_valid = lambda self, df: not pd.isnull(df).values.any()
def __init__(self, mass_info, nonbond_coeffs=None, topo_coeffs=None):
"""
Args:
mass_into (list): List of atomic mass info. Elements,
strings (symbols) and floats are all acceptable for the
values, with the first two converted to the atomic mass
of an element. It is recommended to use
OrderedDict.items() to prevent key duplications.
[("C", 12.01), ("H", Element("H")), ("O", "O"), ...]
nonbond_coeffs [coeffs]: List of pair or pairij
coefficients, of which the sequence must be sorted
according to the species in mass_dict. Pair or PairIJ
determined by the length of list. Optional with default
to None.
topo_coeffs (dict): Dict with force field coefficients for
molecular topologies. Optional with default
to None. All four valid keys listed below are optional.
Each value is a list of dicts with non optional keys
"coeffs" and "types", and related class2 force field
keywords as optional keys.
{
"Bond Coeffs":
[{"coeffs": [coeff],
"types": [("C", "C"), ...]}, ...],
"Angle Coeffs":
[{"coeffs": [coeff],
"BondBond Coeffs": [coeff],
"types": [("H", "C", "H"), ...]}, ...],
"Dihedral Coeffs":
[{"coeffs": [coeff],
"BondBond13 Coeffs": [coeff],
"types": [("H", "C", "C", "H"), ...]}, ...],
"Improper Coeffs":
[{"coeffs": [coeff],
"AngleAngle Coeffs": [coeff],
"types": [("H", "C", "C", "H"), ...]}, ...],
}
Topology of same type or equivalent types (e.g.,
("C", "H") and ("H", "C") bonds) are NOT ALLOWED to
be defined MORE THAN ONCE with DIFFERENT coefficients.
"""
map_mass = lambda v: v.atomic_mass.real if isinstance(v, Element) \
else Element(v).atomic_mass.real if isinstance(v, string_types) \
else v
index, masses, self.mass_info, atoms_map = [], [], [], {}
for i, m in enumerate(mass_info):
index.append(i + 1)
mass = map_mass(m[1])
masses.append(mass)
self.mass_info.append((m[0], mass))
atoms_map[m[0]] = i + 1
self.masses = pd.DataFrame({"mass": masses}, index=index)
self.maps = {"Atoms": atoms_map}
ff_dfs = {}
self.nonbond_coeffs = nonbond_coeffs
if self.nonbond_coeffs:
ff_dfs.update(self._process_nonbond())
self.topo_coeffs = topo_coeffs
if self.topo_coeffs:
self.topo_coeffs = {k: v for k, v in self.topo_coeffs.items()
if k in SECTION_KEYWORDS["ff"][2:]}
for k in self.topo_coeffs.keys():
coeffs, mapper = self._process_topo(k)
ff_dfs.update(coeffs)
self.maps.update(mapper)
self.force_field = None if len(ff_dfs) == 0 else ff_dfs
def _process_nonbond(self):
pair_df = pd.DataFrame(self.nonbond_coeffs)
assert self._is_valid(pair_df), \
"Invalid nonbond coefficients with rows varying in length"
npair, ncoeff = pair_df.shape
pair_df.columns = ["coeff%d" % i for i in range(1, ncoeff + 1)]
nm = len(self.mass_info)
ncomb = int(nm * (nm + 1) / 2)
if npair == nm:
kw = "Pair Coeffs"
pair_df.index = range(1, nm + 1)
elif npair == ncomb:
kw = "PairIJ Coeffs"
ids = list(itertools.
combinations_with_replacement(range(1, nm + 1), 2))
id_df = pd.DataFrame(ids, columns=["id1", "id2"])
pair_df = pd.concat([id_df, pair_df], axis=1)
else:
raise ValueError("Expecting {} Pair Coeffs or "
"{} PairIJ Coeffs for {} atom types,"
" got {}".format(nm, ncomb, nm, npair))
return {kw: pair_df}
def _process_topo(self, kw):
def find_eq_types(label, section):
if section.startswith("Improper"):
label_arr = np.array(label)
seqs = [[0, 1, 2, 3], [0, 2, 1, 3],
[3, 1, 2, 0], [3, 2, 1, 0]]
return [tuple(label_arr[s]) for s in seqs]
else:
return [label] + [label[::-1]]
main_data, distinct_types = [], []
class2_data = {k: [] for k in self.topo_coeffs[kw][0].keys()
if k in CLASS2_KEYWORDS.get(kw, [])}
for i, d in enumerate(self.topo_coeffs[kw]):
main_data.append(d["coeffs"])
distinct_types.append(d["types"])
for k in class2_data.keys():
class2_data[k].append(d[k])
distinct_types = [set(itertools.
chain(*[find_eq_types(t, kw)
for t in dt])) for dt in distinct_types]
type_counts = sum([len(dt) for dt in distinct_types])
type_union = set.union(*distinct_types)
assert len(type_union) == type_counts, "Duplicated items found " \
"under different coefficients in %s" % kw
atoms = set(np.ravel(list(itertools.chain(*distinct_types))))
assert atoms.issubset(self.maps["Atoms"].keys()), \
"Undefined atom type found in %s" % kw
mapper = {}
for i, dt in enumerate(distinct_types):
for t in dt:
mapper[t] = i + 1
def process_data(data):
df = pd.DataFrame(data)
assert self._is_valid(df),\
"Invalid coefficients with rows varying in length"
n, c = df.shape
df.columns = ["coeff%d" % i for i in range(1, c + 1)]
df.index = range(1, n + 1)
return df
all_data = {kw: process_data(main_data)}
if class2_data:
all_data.update({k: process_data(v) for k, v
in class2_data.items()})
return all_data, {kw[:-7] + "s": mapper}
def to_file(self, filename):
"""
Saves object to a file in YAML format.
Args:
filename (str): Filename.
"""
d = {"mass_info": self.mass_info,
"nonbond_coeffs": self.nonbond_coeffs,
"topo_coeffs": self.topo_coeffs}
yaml = YAML(typ="safe")
with open(filename, "w") as f:
yaml.dump(d, f)
@classmethod
def from_file(cls, filename):
"""
Constructor that reads in a file in YAML format.
Args:
filename (str): Filename.
"""
yaml = YAML(typ="safe")
with open(filename, "r") as f:
d = yaml.load(f)
return cls.from_dict(d)
@classmethod
def from_dict(cls, d):
d["mass_info"] = [tuple(m) for m in d["mass_info"]]
if d.get("topo_coeffs"):
for v in d["topo_coeffs"].values():
for c in v:
c["types"] = [tuple(t) for t in c["types"]]
return cls(d["mass_info"], d["nonbond_coeffs"], d["topo_coeffs"])
def structure_2_lmpdata(structure, ff_elements=None, atom_style="charge"):
"""
Converts a structure to a LammpsData object with no force field
parameters and topologies.
Args:
structure (Structure): Input structure.
ff_elements ([str]): List of strings of elements that must be
present due to force field settings but not necessarily in
the structure. Default to None.
atom_style (str): Choose between "atomic" (neutral) and
"charge" (charged). Default to "charge".
Returns:
LammpsData
"""
s = structure.get_sorted_structure()
a, b, c = s.lattice.abc
m = s.lattice.matrix
xhi = a
xy = np.dot(m[1], m[0] / xhi)
yhi = np.sqrt(b ** 2 - xy ** 2)
xz = np.dot(m[2], m[0] / xhi)
yz = (np.dot(m[1], m[2]) - xy * xz) / yhi
zhi = np.sqrt(c ** 2 - xz ** 2 - yz ** 2)
box_bounds = [[0.0, xhi], [0.0, yhi], [0.0, zhi]]
box_tilt = [xy, xz, yz]
box_tilt = None if not any(box_tilt) else box_tilt
new_latt = Lattice([[xhi, 0, 0], [xy, yhi, 0], [xz, yz, zhi]])
s.modify_lattice(new_latt)
symbols = list(s.symbol_set)
if ff_elements:
symbols.extend(ff_elements)
elements = sorted(Element(el) for el in set(symbols))
mass_info = [tuple([i.symbol] * 2) for i in elements]
ff = ForceField(mass_info)
topo = Topology(s)
return LammpsData.from_ff_and_topologies(ff=ff, topologies=[topo],
box_bounds=box_bounds,
box_tilt=box_tilt,
atom_style=atom_style)
| czhengsci/pymatgen | pymatgen/io/lammps/data.py | Python | mit | 45,838 | [
"LAMMPS",
"pymatgen"
] | a8b74c45d89b1351e813cb4976af9122d26a44d09dfacb7aa728a0d2491133fb |
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
from operator import or_
from copy import deepcopy
from itertools import combinations
from functools import reduce
from collections import defaultdict
import numpy as np
from scipy.stats import pearsonr
from future.builtins import zip
import six
from skbio._base import SkbioObject
from skbio.stats.distance import DistanceMatrix
from ._exception import (NoLengthError, DuplicateNodeError, NoParentError,
MissingNodeError, TreeError)
from skbio.util._decorator import experimental
def distance_from_r(m1, m2):
r"""Estimates distance as (1-r)/2: neg correl = max distance
Parameters
----------
m1 : DistanceMatrix
a distance matrix to compare
m2 : DistanceMatrix
a distance matrix to compare
Returns
-------
float
The distance between m1 and m2
"""
return (1-pearsonr(m1.data.flat, m2.data.flat)[0])/2
class TreeNode(SkbioObject):
r"""Representation of a node within a tree
A `TreeNode` instance stores links to its parent and optional children
nodes. In addition, the `TreeNode` can represent a `length` (e.g., a
branch length) between itself and its parent. Within this object, the use
of "children" and "descendants" is frequent in the documentation. A child
is a direct descendant of a node, while descendants are all nodes that are
below a given node (e.g., grand-children, etc).
Parameters
----------
name : str or None
A node can have a name. It is common for tips in particular to have
names, for instance, in a phylogenetic tree where the tips correspond
to species.
length : float, int, or None
Distances between nodes can be used to represent evolutionary
distances, time, etc.
parent : TreeNode or None
Connect this node to a parent
children : list of TreeNode or None
Connect this node to existing children
Attributes
----------
name
length
parent
children
id
"""
default_write_format = 'newick'
_exclude_from_copy = set(['parent', 'children', '_tip_cache',
'_non_tip_cache'])
@experimental(as_of="0.4.0")
def __init__(self, name=None, length=None, parent=None, children=None):
self.name = name
self.length = length
self.parent = parent
self._tip_cache = {}
self._non_tip_cache = {}
self._registered_caches = set()
self.children = []
self.id = None
if children is not None:
self.extend(children)
@experimental(as_of="0.4.0")
def __repr__(self):
r"""Returns summary of the tree
Returns
-------
str
A summary of this node and all descendants
Notes
-----
This method returns the name of the node and a count of tips and the
number of internal nodes in the tree
Examples
--------
>>> from skbio import TreeNode
>>> tree = TreeNode.read([u"((a,b)c, d)root;"])
>>> repr(tree)
'<TreeNode, name: root, internal node count: 1, tips count: 3>'
"""
nodes = [n for n in self.traverse(include_self=False)]
n_tips = sum([n.is_tip() for n in nodes])
n_nontips = len(nodes) - n_tips
classname = self.__class__.__name__
name = self.name if self.name is not None else "unnamed"
return "<%s, name: %s, internal node count: %d, tips count: %d>" % \
(classname, name, n_nontips, n_tips)
@experimental(as_of="0.4.0")
def __str__(self):
r"""Returns string version of self, with names and distances
Returns
-------
str
Returns a Newick representation of the tree
See Also
--------
read
write
Examples
--------
>>> from skbio import TreeNode
>>> tree = TreeNode.read([u"((a,b)c);"])
>>> str(tree)
'((a,b)c);\n'
"""
return str(''.join(self.write([])))
@experimental(as_of="0.4.0")
def __iter__(self):
r"""Node iter iterates over the `children`."""
return iter(self.children)
@experimental(as_of="0.4.0")
def __len__(self):
return len(self.children)
@experimental(as_of="0.4.0")
def __getitem__(self, i):
r"""Node delegates slicing to `children`."""
return self.children[i]
@experimental(as_of="0.4.0")
def _adopt(self, node):
r"""Update `parent` references but does NOT update `children`."""
self.invalidate_caches()
if node.parent is not None:
node.parent.remove(node)
node.parent = self
return node
@experimental(as_of="0.4.0")
def append(self, node):
r"""Appends a node to `children`, in-place, cleaning up refs
`append` will invalidate any node lookup caches, remove an existing
parent on `node` if one exists, set the parent of `node` to self
and add the `node` to `self` `children`.
Parameters
----------
node : TreeNode
An existing TreeNode object
See Also
--------
extend
Examples
--------
>>> from skbio import TreeNode
>>> root = TreeNode(name="root")
>>> child1 = TreeNode(name="child1")
>>> child2 = TreeNode(name="child2")
>>> root.append(child1)
>>> root.append(child2)
>>> print(root)
(child1,child2)root;
<BLANKLINE>
"""
self.children.append(self._adopt(node))
@experimental(as_of="0.4.0")
def extend(self, nodes):
r"""Append a `list` of `TreeNode` to `self`.
`extend` will invalidate any node lookup caches, remove existing
parents of the `nodes` if they have any, set their parents to self
and add the nodes to `self` `children`.
Parameters
----------
nodes : list of TreeNode
A list of TreeNode objects
See Also
--------
append
Examples
--------
>>> from skbio import TreeNode
>>> root = TreeNode(name="root")
>>> root.extend([TreeNode(name="child1"), TreeNode(name="child2")])
>>> print(root)
(child1,child2)root;
<BLANKLINE>
"""
self.children.extend([self._adopt(n) for n in nodes[:]])
@experimental(as_of="0.4.0")
def pop(self, index=-1):
r"""Remove a `TreeNode` from `self`.
Remove a child node by its index position. All node lookup caches
are invalidated, and the parent reference for the popped node will be
set to `None`.
Parameters
----------
index : int
The index position in `children` to pop
Returns
-------
TreeNode
The popped child
See Also
--------
remove
remove_deleted
Examples
--------
>>> from skbio import TreeNode
>>> tree = TreeNode.read([u"(a,b)c;"])
>>> print(tree.pop(0))
a;
<BLANKLINE>
"""
return self._remove_node(index)
def _remove_node(self, idx):
r"""The actual (and only) method that performs node removal"""
self.invalidate_caches()
node = self.children.pop(idx)
node.parent = None
return node
@experimental(as_of="0.4.0")
def remove(self, node):
r"""Remove a node from self
Remove a `node` from `self` by identity of the node.
Parameters
----------
node : TreeNode
The node to remove from self's children
Returns
-------
bool
`True` if the node was removed, `False` otherwise
See Also
--------
pop
remove_deleted
Examples
--------
>>> from skbio import TreeNode
>>> tree = TreeNode.read([u"(a,b)c;"])
>>> tree.remove(tree.children[0])
True
"""
for (i, curr_node) in enumerate(self.children):
if curr_node is node:
self._remove_node(i)
return True
return False
@experimental(as_of="0.4.0")
def remove_deleted(self, func):
r"""Delete nodes in which `func(node)` evaluates `True`.
Remove all descendants from `self` that evaluate `True` from `func`.
This has the potential to drop clades.
Parameters
----------
func : a function
A function that evaluates `True` when a node should be deleted
See Also
--------
pop
remove
Examples
--------
>>> from skbio import TreeNode
>>> tree = TreeNode.read([u"(a,b)c;"])
>>> tree.remove_deleted(lambda x: x.name == 'b')
>>> print(tree)
(a)c;
<BLANKLINE>
"""
for node in self.traverse(include_self=False):
if func(node):
node.parent.remove(node)
@experimental(as_of="0.4.0")
def prune(self):
r"""Reconstructs correct topology after nodes have been removed.
Internal nodes with only one child will be removed and new connections
will be made to reflect change. This method is useful to call
following node removals as it will clean up nodes with singular
children.
Names and properties of singular children will override the names and
properties of their parents following the prune.
Node lookup caches are invalidated.
See Also
--------
shear
remove
pop
remove_deleted
Examples
--------
>>> from skbio import TreeNode
>>> tree = TreeNode.read([u"((a,b)c,(d,e)f)root;"])
>>> to_delete = tree.find('b')
>>> tree.remove_deleted(lambda x: x == to_delete)
>>> print(tree)
((a)c,(d,e)f)root;
<BLANKLINE>
>>> tree.prune()
>>> print(tree)
((d,e)f,a)root;
<BLANKLINE>
"""
# build up the list of nodes to remove so the topology is not altered
# while traversing
nodes_to_remove = []
for node in self.traverse(include_self=False):
if len(node.children) == 1:
nodes_to_remove.append(node)
# clean up the single children nodes
for node in nodes_to_remove:
child = node.children[0]
if child.length is None or node.length is None:
child.length = child.length or node.length
else:
child.length += node.length
node.parent.append(child)
node.parent.remove(node)
@experimental(as_of="0.4.0")
def shear(self, names):
"""Lop off tips until the tree just has the desired tip names.
Parameters
----------
names : Iterable of str
The tip names on the tree to keep
Returns
-------
TreeNode
The resulting tree
Raises
------
ValueError
If the names do not exist in the tree
See Also
--------
prune
remove
pop
remove_deleted
Examples
--------
>>> from skbio import TreeNode
>>> t = TreeNode.read([u'((H:1,G:1):2,(R:0.5,M:0.7):3);'])
>>> sheared = t.shear(['G', 'M'])
>>> print(sheared)
(G:3.0,M:3.7);
<BLANKLINE>
"""
tcopy = self.deepcopy()
all_tips = {n.name for n in tcopy.tips()}
ids = set(names)
if not ids.issubset(all_tips):
raise ValueError("ids are not a subset of the tree!")
while len(list(tcopy.tips())) != len(ids):
for n in list(tcopy.tips()):
if n.name not in ids:
n.parent.remove(n)
tcopy.prune()
return tcopy
@experimental(as_of="0.4.0")
def copy(self):
r"""Returns a copy of self using an iterative approach
Perform an iterative deepcopy of self. It is not assured that the copy
of node attributes will be performed iteratively as that depends on
the copy method of the types being copied
Returns
-------
TreeNode
A new copy of self
See Also
--------
unrooted_deepcopy
unrooted_copy
Examples
--------
>>> from skbio import TreeNode
>>> tree = TreeNode.read([u"((a,b)c,(d,e)f)root;"])
>>> tree_copy = tree.copy()
>>> tree_nodes = set([id(n) for n in tree.traverse()])
>>> tree_copy_nodes = set([id(n) for n in tree_copy.traverse()])
>>> print(len(tree_nodes.intersection(tree_copy_nodes)))
0
"""
def __copy_node(node_to_copy):
r"""Helper method to copy a node"""
# this is _possibly_ dangerous, we're assuming the node to copy is
# of the same class as self, and has the same exclusion criteria.
# however, it is potentially dangerous to mix TreeNode subclasses
# within a tree, so...
result = self.__class__()
efc = self._exclude_from_copy
for key in node_to_copy.__dict__:
if key not in efc:
result.__dict__[key] = deepcopy(node_to_copy.__dict__[key])
return result
root = __copy_node(self)
nodes_stack = [[root, self, len(self.children)]]
while nodes_stack:
# check the top node, any children left unvisited?
top = nodes_stack[-1]
new_top_node, old_top_node, unvisited_children = top
if unvisited_children:
top[2] -= 1
old_child = old_top_node.children[-unvisited_children]
new_child = __copy_node(old_child)
new_top_node.append(new_child)
nodes_stack.append([new_child, old_child,
len(old_child.children)])
else: # no unvisited children
nodes_stack.pop()
return root
__copy__ = copy
__deepcopy__ = deepcopy = copy
@experimental(as_of="0.4.0")
def unrooted_deepcopy(self, parent=None):
r"""Walks the tree unrooted-style and returns a new copy
Perform a deepcopy of self and return a new copy of the tree as an
unrooted copy. This is useful for defining new roots of the tree as
the `TreeNode`.
This method calls `TreeNode.unrooted_copy` which is recursive.
Parameters
----------
parent : TreeNode or None
Used to avoid infinite loops when performing the unrooted traverse
Returns
-------
TreeNode
A new copy of the tree
See Also
--------
copy
unrooted_copy
root_at
Examples
--------
>>> from skbio import TreeNode
>>> tree = TreeNode.read([u"((a,(b,c)d)e,(f,g)h)i;"])
>>> new_tree = tree.find('d').unrooted_deepcopy()
>>> print(new_tree)
(b,c,(a,((f,g)h)e)d)root;
<BLANKLINE>
"""
root = self.root()
root.assign_ids()
new_tree = root.copy()
new_tree.assign_ids()
new_tree_self = new_tree.find_by_id(self.id)
return new_tree_self.unrooted_copy(parent)
@experimental(as_of="0.4.0")
def unrooted_copy(self, parent=None):
r"""Walks the tree unrooted-style and returns a copy
Perform a copy of self and return a new copy of the tree as an
unrooted copy. This is useful for defining new roots of the tree as
the `TreeNode`.
This method is recursive.
Warning, this is _NOT_ a deepcopy
Parameters
----------
parent : TreeNode or None
Used to avoid infinite loops when performing the unrooted traverse
Returns
-------
TreeNode
A new copy of the tree
See Also
--------
copy
unrooted_deepcopy
root_at
Examples
--------
>>> from skbio import TreeNode
>>> tree = TreeNode.read([u"((a,(b,c)d)e,(f,g)h)i;"])
>>> new_tree = tree.find('d').unrooted_copy()
>>> print(new_tree)
(b,c,(a,((f,g)h)e)d)root;
<BLANKLINE>
"""
neighbors = self.neighbors(ignore=parent)
children = [c.unrooted_copy(parent=self) for c in neighbors]
# we might be walking UP the tree, so:
if parent is None:
# base edge
edgename = None
length = None
elif parent.parent is self:
# self's parent is becoming self's child
edgename = parent.name
length = parent.length
else:
assert parent is self.parent
edgename = self.name
length = self.length
result = self.__class__(name=edgename, children=children,
length=length)
if parent is None:
result.name = "root"
return result
@experimental(as_of="0.4.0")
def count(self, tips=False):
"""Get the count of nodes in the tree
Parameters
----------
tips : bool
If `True`, only return the count of the number of tips
Returns
-------
int
The number of nodes or tips
Examples
--------
>>> from skbio import TreeNode
>>> tree = TreeNode.read([u"((a,(b,c)d)e,(f,g)h)i;"])
>>> print(tree.count())
9
>>> print(tree.count(tips=True))
5
"""
if tips:
return len(list(self.tips()))
else:
return len(list(self.traverse(include_self=True)))
@experimental(as_of="0.4.0")
def subtree(self, tip_list=None):
r"""Make a copy of the subtree"""
raise NotImplementedError()
@experimental(as_of="0.4.0")
def subset(self):
r"""Returns set of names that descend from specified node
Get the set of `name` on tips that descend from this node.
Returns
-------
frozenset
The set of names at the tips of the clade that descends from self
See Also
--------
subsets
compare_subsets
Examples
--------
>>> from skbio import TreeNode
>>> tree = TreeNode.read([u"((a,(b,c)d)e,(f,g)h)i;"])
>>> sorted(tree.subset())
[u'a', u'b', u'c', u'f', u'g']
"""
return frozenset({i.name for i in self.tips()})
@experimental(as_of="0.4.0")
def subsets(self):
r"""Return all sets of names that come from self and its descendants
Compute all subsets of tip names over `self`, or, represent a tree as a
set of nested sets.
Returns
-------
frozenset
A frozenset of frozensets of str
See Also
--------
subset
compare_subsets
Examples
--------
>>> from skbio import TreeNode
>>> tree = TreeNode.read([u"(((a,b)c,(d,e)f)h)root;"])
>>> for s in sorted(tree.subsets()):
... print(sorted(s))
[u'a', u'b']
[u'd', u'e']
[u'a', u'b', u'd', u'e']
"""
sets = []
for i in self.postorder(include_self=False):
if not i.children:
i.__leaf_set = frozenset([i.name])
else:
leaf_set = reduce(or_, [c.__leaf_set for c in i.children])
if len(leaf_set) > 1:
sets.append(leaf_set)
i.__leaf_set = leaf_set
return frozenset(sets)
@experimental(as_of="0.4.0")
def root_at(self, node):
r"""Return a new tree rooted at the provided node.
This can be useful for drawing unrooted trees with an orientation that
reflects knowledge of the true root location.
Parameters
----------
node : TreeNode or str
The node to root at
Returns
-------
TreeNode
A new copy of the tree
Raises
------
TreeError
Raises a `TreeError` if a tip is specified as the new root
See Also
--------
root_at_midpoint
unrooted_deepcopy
Examples
--------
>>> from skbio import TreeNode
>>> tree = TreeNode.read([u"(((a,b)c,(d,e)f)g,h)i;"])
>>> print(tree.root_at('c'))
(a,b,((d,e)f,(h)g)c)root;
<BLANKLINE>
"""
if isinstance(node, six.string_types):
node = self.find(node)
if not node.children:
raise TreeError("Can't use a tip (%s) as the root" %
repr(node.name))
return node.unrooted_deepcopy()
@experimental(as_of="0.4.0")
def root_at_midpoint(self):
r"""Return a new tree rooted at midpoint of the two tips farthest apart
This method doesn't preserve the internal node naming or structure,
but does keep tip to tip distances correct. Uses `unrooted_copy` but
operates on a full copy of the tree.
Raises
------
TreeError
If a tip ends up being the mid point
Returns
-------
TreeNode
A tree rooted at its midpoint
LengthError
Midpoint rooting requires `length` and will raise (indirectly) if
evaluated nodes don't have length.
See Also
--------
root_at
unrooted_deepcopy
Examples
--------
>>> from skbio import TreeNode
>>> tree = TreeNode.read([u"(((d:1,e:1,(g:1)f:1)c:1)b:1,h:1)a:1;"])
>>> print(tree.root_at_midpoint())
((d:1.0,e:1.0,(g:1.0)f:1.0)c:0.5,((h:1.0)b:1.0):0.5)root;
<BLANKLINE>
"""
tree = self.copy()
max_dist, tips = tree.get_max_distance()
half_max_dist = max_dist / 2.0
if max_dist == 0.0: # only pathological cases with no lengths
return tree
tip1 = tree.find(tips[0])
tip2 = tree.find(tips[1])
lca = tree.lowest_common_ancestor([tip1, tip2])
if tip1.accumulate_to_ancestor(lca) > half_max_dist:
climb_node = tip1
else:
climb_node = tip2
dist_climbed = 0.0
while dist_climbed + climb_node.length < half_max_dist:
dist_climbed += climb_node.length
climb_node = climb_node.parent
# now midpt is either at on the branch to climb_node's parent
# or midpt is at climb_node's parent
if dist_climbed + climb_node.length == half_max_dist:
# climb to midpoint spot
climb_node = climb_node.parent
if climb_node.is_tip():
raise TreeError('error trying to root tree at tip')
else:
return climb_node.unrooted_copy()
else:
# make a new node on climb_node's branch to its parent
old_br_len = climb_node.length
new_root = tree.__class__()
climb_node.parent.append(new_root)
new_root.append(climb_node)
climb_node.length = half_max_dist - dist_climbed
new_root.length = old_br_len - climb_node.length
return new_root.unrooted_copy()
@experimental(as_of="0.4.0")
def is_tip(self):
r"""Returns `True` if the current node has no `children`.
Returns
-------
bool
`True` if the node is a tip
See Also
--------
is_root
has_children
Examples
--------
>>> from skbio import TreeNode
>>> tree = TreeNode.read([u"((a,b)c);"])
>>> print(tree.is_tip())
False
>>> print(tree.find('a').is_tip())
True
"""
return not self.children
@experimental(as_of="0.4.0")
def is_root(self):
r"""Returns `True` if the current is a root, i.e. has no `parent`.
Returns
-------
bool
`True` if the node is the root
See Also
--------
is_tip
has_children
Examples
--------
>>> from skbio import TreeNode
>>> tree = TreeNode.read([u"((a,b)c);"])
>>> print(tree.is_root())
True
>>> print(tree.find('a').is_root())
False
"""
return self.parent is None
@experimental(as_of="0.4.0")
def has_children(self):
r"""Returns `True` if the node has `children`.
Returns
-------
bool
`True` if the node has children.
See Also
--------
is_tip
is_root
Examples
--------
>>> from skbio import TreeNode
>>> tree = TreeNode.read([u"((a,b)c);"])
>>> print(tree.has_children())
True
>>> print(tree.find('a').has_children())
False
"""
return not self.is_tip()
@experimental(as_of="0.4.0")
def traverse(self, self_before=True, self_after=False, include_self=True):
r"""Returns iterator over descendants
This is a depth-first traversal. Since the trees are not binary,
preorder and postorder traversals are possible, but inorder traversals
would depend on the data in the tree and are not handled here.
Parameters
----------
self_before : bool
includes each node before its descendants if True
self_after : bool
includes each node after its descendants if True
include_self : bool
include the initial node if True
`self_before` and `self_after` are independent. If neither is `True`,
only terminal nodes will be returned.
Note that if self is terminal, it will only be included once even if
`self_before` and `self_after` are both `True`.
Yields
------
TreeNode
Traversed node.
See Also
--------
preorder
postorder
pre_and_postorder
levelorder
tips
non_tips
Examples
--------
>>> from skbio import TreeNode
>>> tree = TreeNode.read([u"((a,b)c);"])
>>> for node in tree.traverse():
... print(node.name)
None
c
a
b
"""
if self_before:
if self_after:
return self.pre_and_postorder(include_self=include_self)
else:
return self.preorder(include_self=include_self)
else:
if self_after:
return self.postorder(include_self=include_self)
else:
return self.tips(include_self=include_self)
@experimental(as_of="0.4.0")
def preorder(self, include_self=True):
r"""Performs preorder iteration over tree
Parameters
----------
include_self : bool
include the initial node if True
Yields
------
TreeNode
Traversed node.
See Also
--------
traverse
postorder
pre_and_postorder
levelorder
tips
non_tips
Examples
--------
>>> from skbio import TreeNode
>>> tree = TreeNode.read([u"((a,b)c);"])
>>> for node in tree.preorder():
... print(node.name)
None
c
a
b
"""
stack = [self]
while stack:
curr = stack.pop()
if include_self or (curr is not self):
yield curr
if curr.children:
stack.extend(curr.children[::-1])
@experimental(as_of="0.4.0")
def postorder(self, include_self=True):
r"""Performs postorder iteration over tree.
This is somewhat inelegant compared to saving the node and its index
on the stack, but is 30% faster in the average case and 3x faster in
the worst case (for a comb tree).
Parameters
----------
include_self : bool
include the initial node if True
Yields
------
TreeNode
Traversed node.
See Also
--------
traverse
preorder
pre_and_postorder
levelorder
tips
non_tips
Examples
--------
>>> from skbio import TreeNode
>>> tree = TreeNode.read([u"((a,b)c);"])
>>> for node in tree.postorder():
... print(node.name)
a
b
c
None
"""
child_index_stack = [0]
curr = self
curr_children = self.children
curr_children_len = len(curr_children)
while 1:
curr_index = child_index_stack[-1]
# if there are children left, process them
if curr_index < curr_children_len:
curr_child = curr_children[curr_index]
# if the current child has children, go there
if curr_child.children:
child_index_stack.append(0)
curr = curr_child
curr_children = curr.children
curr_children_len = len(curr_children)
curr_index = 0
# otherwise, yield that child
else:
yield curr_child
child_index_stack[-1] += 1
# if there are no children left, return self, and move to
# self's parent
else:
if include_self or (curr is not self):
yield curr
if curr is self:
break
curr = curr.parent
curr_children = curr.children
curr_children_len = len(curr_children)
child_index_stack.pop()
child_index_stack[-1] += 1
@experimental(as_of="0.4.0")
def pre_and_postorder(self, include_self=True):
r"""Performs iteration over tree, visiting node before and after
Parameters
----------
include_self : bool
include the initial node if True
Yields
------
TreeNode
Traversed node.
See Also
--------
traverse
postorder
preorder
levelorder
tips
non_tips
Examples
--------
>>> from skbio import TreeNode
>>> tree = TreeNode.read([u"((a,b)c);"])
>>> for node in tree.pre_and_postorder():
... print(node.name)
None
c
a
b
c
None
"""
# handle simple case first
if not self.children:
if include_self:
yield self
raise StopIteration
child_index_stack = [0]
curr = self
curr_children = self.children
while 1:
curr_index = child_index_stack[-1]
if not curr_index:
if include_self or (curr is not self):
yield curr
# if there are children left, process them
if curr_index < len(curr_children):
curr_child = curr_children[curr_index]
# if the current child has children, go there
if curr_child.children:
child_index_stack.append(0)
curr = curr_child
curr_children = curr.children
curr_index = 0
# otherwise, yield that child
else:
yield curr_child
child_index_stack[-1] += 1
# if there are no children left, return self, and move to
# self's parent
else:
if include_self or (curr is not self):
yield curr
if curr is self:
break
curr = curr.parent
curr_children = curr.children
child_index_stack.pop()
child_index_stack[-1] += 1
@experimental(as_of="0.4.0")
def levelorder(self, include_self=True):
r"""Performs levelorder iteration over tree
Parameters
----------
include_self : bool
include the initial node if True
Yields
------
TreeNode
Traversed node.
See Also
--------
traverse
postorder
preorder
pre_and_postorder
tips
non_tips
Examples
--------
>>> from skbio import TreeNode
>>> tree = TreeNode.read([u"((a,b)c,(d,e)f);"])
>>> for node in tree.levelorder():
... print(node.name)
None
c
f
a
b
d
e
"""
queue = [self]
while queue:
curr = queue.pop(0)
if include_self or (curr is not self):
yield curr
if curr.children:
queue.extend(curr.children)
@experimental(as_of="0.4.0")
def tips(self, include_self=False):
r"""Iterates over tips descended from `self`.
Node order is consistent between calls and is ordered by a
postorder traversal of the tree.
Parameters
----------
include_self : bool
include the initial node if True
Yields
------
TreeNode
Traversed node.
See Also
--------
traverse
postorder
preorder
pre_and_postorder
levelorder
non_tips
Examples
--------
>>> from skbio import TreeNode
>>> tree = TreeNode.read([u"((a,b)c,(d,e)f);"])
>>> for node in tree.tips():
... print(node.name)
a
b
d
e
"""
for n in self.postorder(include_self=False):
if n.is_tip():
yield n
@experimental(as_of="0.4.0")
def non_tips(self, include_self=False):
r"""Iterates over nontips descended from self
`include_self`, if `True` (default is False), will return the current
node as part of non_tips if it is a non_tip. Node order is consistent
between calls and is ordered by a postorder traversal of the tree.
Parameters
----------
include_self : bool
include the initial node if True
Yields
------
TreeNode
Traversed node.
See Also
--------
traverse
postorder
preorder
pre_and_postorder
levelorder
tips
Examples
--------
>>> from skbio import TreeNode
>>> tree = TreeNode.read([u"((a,b)c,(d,e)f);"])
>>> for node in tree.non_tips():
... print(node.name)
c
f
"""
for n in self.postorder(include_self):
if not n.is_tip():
yield n
@experimental(as_of="0.4.0")
def invalidate_caches(self, attr=True):
r"""Delete lookup and attribute caches
Parameters
----------
attr : bool, optional
If ``True``, invalidate attribute caches created by
`TreeNode.cache_attr`.
See Also
--------
create_caches
cache_attr
find
"""
if not self.is_root():
self.root().invalidate_caches()
else:
self._tip_cache = {}
self._non_tip_cache = {}
if self._registered_caches and attr:
for n in self.traverse():
for cache in self._registered_caches:
if hasattr(n, cache):
delattr(n, cache)
@experimental(as_of="0.4.0")
def create_caches(self):
r"""Construct an internal lookups to facilitate searching by name
This method will not cache nodes in which the .name is None. This
method will raise `DuplicateNodeError` if a name conflict in the tips
is discovered, but will not raise if on internal nodes. This is
because, in practice, the tips of a tree are required to be unique
while no such requirement holds for internal nodes.
Raises
------
DuplicateNodeError
The tip cache requires that names are unique (with the exception of
names that are None)
See Also
--------
invalidate_caches
cache_attr
find
"""
if not self.is_root():
self.root().create_caches()
else:
if self._tip_cache and self._non_tip_cache:
return
self.invalidate_caches(attr=False)
tip_cache = {}
non_tip_cache = defaultdict(list)
for node in self.postorder():
name = node.name
if name is None:
continue
if node.is_tip():
if name in tip_cache:
raise DuplicateNodeError("Tip with name '%s' already "
"exists!" % name)
tip_cache[name] = node
else:
non_tip_cache[name].append(node)
self._tip_cache = tip_cache
self._non_tip_cache = non_tip_cache
@experimental(as_of="0.4.0")
def find_all(self, name):
r"""Find all nodes that match `name`
The first call to `find_all` will cache all nodes in the tree on the
assumption that additional calls to `find_all` will be made.
Parameters
----------
name : TreeNode or str
The name or node to find. If `name` is `TreeNode` then all other
nodes with the same name will be returned.
Raises
------
MissingNodeError
Raises if the node to be searched for is not found
Returns
-------
list of TreeNode
The nodes found
See Also
--------
find
find_by_id
find_by_func
Examples
--------
>>> from skbio.tree import TreeNode
>>> tree = TreeNode.read([u"((a,b)c,(d,e)d,(f,g)c);"])
>>> for node in tree.find_all('c'):
... print(node.name, node.children[0].name, node.children[1].name)
c a b
c f g
>>> for node in tree.find_all('d'):
... print(node.name, str(node))
d (d,e)d;
<BLANKLINE>
d d;
<BLANKLINE>
"""
root = self.root()
# if what is being passed in looks like a node, just return it
if isinstance(name, root.__class__):
return [name]
root.create_caches()
tip = root._tip_cache.get(name, None)
nodes = root._non_tip_cache.get(name, [])
nodes.append(tip) if tip is not None else None
if not nodes:
raise MissingNodeError("Node %s is not in self" % name)
else:
return nodes
@experimental(as_of="0.4.0")
def find(self, name):
r"""Find a node by `name`.
The first call to `find` will cache all nodes in the tree on the
assumption that additional calls to `find` will be made.
`find` will first attempt to find the node in the tips. If it cannot
find a corresponding tip, then it will search through the internal
nodes of the tree. In practice, phylogenetic trees and other common
trees in biology do not have unique internal node names. As a result,
this find method will only return the first occurance of an internal
node encountered on a postorder traversal of the tree.
Parameters
----------
name : TreeNode or str
The name or node to find. If `name` is `TreeNode` then it is
simply returned
Raises
------
MissingNodeError
Raises if the node to be searched for is not found
Returns
-------
TreeNode
The found node
See Also
--------
find_all
find_by_id
find_by_func
Examples
--------
>>> from skbio import TreeNode
>>> tree = TreeNode.read([u"((a,b)c,(d,e)f);"])
>>> print(tree.find('c').name)
c
"""
root = self.root()
# if what is being passed in looks like a node, just return it
if isinstance(name, root.__class__):
return name
root.create_caches()
node = root._tip_cache.get(name, None)
if node is None:
node = root._non_tip_cache.get(name, [None])[0]
if node is None:
raise MissingNodeError("Node %s is not in self" % name)
else:
return node
@experimental(as_of="0.4.0")
def find_by_id(self, node_id):
r"""Find a node by `id`.
This search method is based from the root.
Parameters
----------
node_id : int
The `id` of a node in the tree
Returns
-------
TreeNode
The tree node with the matcing id
Notes
-----
This method does not cache id associations. A full traversal of the
tree is performed to find a node by an id on every call.
Raises
------
MissingNodeError
This method will raise if the `id` cannot be found
See Also
--------
find
find_all
find_by_func
Examples
--------
>>> from skbio import TreeNode
>>> tree = TreeNode.read([u"((a,b)c,(d,e)f);"])
>>> print(tree.find_by_id(2).name)
d
"""
# if this method gets used frequently, then we should cache by ID
# as well
root = self.root()
root.assign_ids()
node = None
for n in self.traverse(include_self=True):
if n.id == node_id:
node = n
break
if node is None:
raise MissingNodeError("ID %d is not in self" % node_id)
else:
return node
@experimental(as_of="0.4.0")
def find_by_func(self, func):
r"""Find all nodes given a function
This search method is based on the current subtree, not the root.
Parameters
----------
func : a function
A function that accepts a TreeNode and returns `True` or `False`,
where `True` indicates the node is to be yielded
Yields
------
TreeNode
Node found by `func`.
See Also
--------
find
find_all
find_by_id
Examples
--------
>>> from skbio import TreeNode
>>> tree = TreeNode.read([u"((a,b)c,(d,e)f);"])
>>> func = lambda x: x.parent == tree.find('c')
>>> [n.name for n in tree.find_by_func(func)]
[u'a', u'b']
"""
for node in self.traverse(include_self=True):
if func(node):
yield node
@experimental(as_of="0.4.0")
def ancestors(self):
r"""Returns all ancestors back to the root
This call will return all nodes in the path back to root, but does not
include the node instance that the call was made from.
Returns
-------
list of TreeNode
The path, toward the root, from self
Examples
--------
>>> from skbio import TreeNode
>>> tree = TreeNode.read([u"((a,b)c,(d,e)f)root;"])
>>> [node.name for node in tree.find('a').ancestors()]
[u'c', u'root']
"""
result = []
curr = self
while not curr.is_root():
result.append(curr.parent)
curr = curr.parent
return result
@experimental(as_of="0.4.0")
def root(self):
r"""Returns root of the tree `self` is in
Returns
-------
TreeNode
The root of the tree
Examples
--------
>>> from skbio import TreeNode
>>> tree = TreeNode.read([u"((a,b)c,(d,e)f)root;"])
>>> tip_a = tree.find('a')
>>> root = tip_a.root()
>>> root == tree
True
"""
curr = self
while not curr.is_root():
curr = curr.parent
return curr
@experimental(as_of="0.4.0")
def siblings(self):
r"""Returns all nodes that are `children` of `self` `parent`.
This call excludes `self` from the list.
Returns
-------
list of TreeNode
The list of sibling nodes relative to self
See Also
--------
neighbors
Examples
--------
>>> from skbio import TreeNode
>>> tree = TreeNode.read([u"((a,b)c,(d,e,f)g)root;"])
>>> tip_e = tree.find('e')
>>> [n.name for n in tip_e.siblings()]
[u'd', u'f']
"""
if self.is_root():
return []
result = self.parent.children[:]
result.remove(self)
return result
@experimental(as_of="0.4.0")
def neighbors(self, ignore=None):
r"""Returns all nodes that are connected to self
This call does not include `self` in the result
Parameters
----------
ignore : TreeNode
A node to ignore
Returns
-------
list of TreeNode
The list of all nodes that are connected to self
Examples
--------
>>> from skbio import TreeNode
>>> tree = TreeNode.read([u"((a,b)c,(d,e)f)root;"])
>>> node_c = tree.find('c')
>>> [n.name for n in node_c.neighbors()]
[u'a', u'b', u'root']
"""
nodes = [n for n in self.children + [self.parent] if n is not None]
if ignore is None:
return nodes
else:
return [n for n in nodes if n is not ignore]
@experimental(as_of="0.4.0")
def lowest_common_ancestor(self, tipnames):
r"""Lowest common ancestor for a list of tips
Parameters
----------
tipnames : list of TreeNode or str
The nodes of interest
Returns
-------
TreeNode
The lowest common ancestor of the passed in nodes
Raises
------
ValueError
If no tips could be found in the tree
Examples
--------
>>> from skbio import TreeNode
>>> tree = TreeNode.read([u"((a,b)c,(d,e)f)root;"])
>>> nodes = [tree.find('a'), tree.find('b')]
>>> lca = tree.lowest_common_ancestor(nodes)
>>> print(lca.name)
c
>>> nodes = [tree.find('a'), tree.find('e')]
>>> lca = tree.lca(nodes) # lca is an alias for convience
>>> print(lca.name)
root
"""
if len(tipnames) == 1:
return self.find(tipnames[0])
tips = [self.find(name) for name in tipnames]
if len(tips) == 0:
raise ValueError("No tips found!")
nodes_to_scrub = []
for t in tips:
if t.is_root():
# has to be the LCA...
return t
prev = t
curr = t.parent
while curr and not hasattr(curr, 'black'):
setattr(curr, 'black', [prev])
nodes_to_scrub.append(curr)
prev = curr
curr = curr.parent
# increase black count, multiple children lead to here
if curr:
curr.black.append(prev)
curr = self
while len(curr.black) == 1:
curr = curr.black[0]
# clean up tree
for n in nodes_to_scrub:
delattr(n, 'black')
return curr
lca = lowest_common_ancestor # for convenience
@classmethod
@experimental(as_of="0.4.0")
def from_taxonomy(cls, lineage_map):
"""Construct a tree from a taxonomy
Parameters
----------
lineage_map : iterable of tuple
A id to lineage mapping where the first index is an ID and the
second index is an iterable of the lineage.
Returns
-------
TreeNode
The constructed taxonomy
Examples
--------
>>> from skbio.tree import TreeNode
>>> lineages = {'1': ['Bacteria', 'Firmicutes', 'Clostridia'],
... '2': ['Bacteria', 'Firmicutes', 'Bacilli'],
... '3': ['Bacteria', 'Bacteroidetes', 'Sphingobacteria'],
... '4': ['Archaea', 'Euryarchaeota', 'Thermoplasmata'],
... '5': ['Archaea', 'Euryarchaeota', 'Thermoplasmata'],
... '6': ['Archaea', 'Euryarchaeota', 'Halobacteria'],
... '7': ['Archaea', 'Euryarchaeota', 'Halobacteria'],
... '8': ['Bacteria', 'Bacteroidetes', 'Sphingobacteria'],
... '9': ['Bacteria', 'Bacteroidetes', 'Cytophagia']}
>>> tree = TreeNode.from_taxonomy(lineages.items())
>>> print(tree.ascii_art())
/Clostridia-1
/Firmicutes
| \Bacilli- /-2
/Bacteria|
| | /-3
| | /Sphingobacteria
| \Bacteroidetes \-8
| |
---------| \Cytophagia-9
|
| /-5
| /Thermoplasmata
| | \-4
\Archaea- /Euryarchaeota
| /-7
\Halobacteria
\-6
"""
root = cls(name=None)
root._lookup = {}
for id_, lineage in lineage_map:
cur_node = root
# for each name, see if we've seen it, if not, add that puppy on
for name in lineage:
if name in cur_node._lookup:
cur_node = cur_node._lookup[name]
else:
new_node = TreeNode(name=name)
new_node._lookup = {}
cur_node._lookup[name] = new_node
cur_node.append(new_node)
cur_node = new_node
cur_node.append(TreeNode(name=id_))
# scrub the lookups
for node in root.non_tips(include_self=True):
del node._lookup
return root
def _balanced_distance_to_tip(self):
"""Return the distance to tip from this node.
The distance to every tip from this node must be equal for this to
return a correct result.
Returns
-------
int
The distance to tip of a length-balanced tree
"""
node = self
distance = 0
while node.has_children():
distance += node.children[0].length
node = node.children[0]
return distance
@classmethod
@experimental(as_of="0.4.0")
def from_linkage_matrix(cls, linkage_matrix, id_list):
"""Return tree from SciPy linkage matrix.
Parameters
----------
linkage_matrix : ndarray
A SciPy linkage matrix as returned by
`scipy.cluster.hierarchy.linkage`
id_list : list
The indices of the `id_list` will be used in the linkage_matrix
Returns
-------
TreeNode
An unrooted bifurcated tree
See Also
--------
scipy.cluster.hierarchy.linkage
"""
tip_width = len(id_list)
cluster_count = len(linkage_matrix)
lookup_len = cluster_count + tip_width
node_lookup = np.empty(lookup_len, dtype=TreeNode)
for i, name in enumerate(id_list):
node_lookup[i] = TreeNode(name=name)
for i in range(tip_width, lookup_len):
node_lookup[i] = TreeNode()
newest_cluster_index = cluster_count + 1
for link in linkage_matrix:
child_a = node_lookup[int(link[0])]
child_b = node_lookup[int(link[1])]
path_length = link[2] / 2
child_a.length = path_length - child_a._balanced_distance_to_tip()
child_b.length = path_length - child_b._balanced_distance_to_tip()
new_cluster = node_lookup[newest_cluster_index]
new_cluster.append(child_a)
new_cluster.append(child_b)
newest_cluster_index += 1
return node_lookup[-1]
@experimental(as_of="0.4.0")
def to_taxonomy(self, allow_empty=False, filter_f=None):
"""Returns a taxonomy representation of self
Parameters
----------
allow_empty : bool, optional
Allow gaps the taxonomy (e.g., internal nodes without names).
filter_f : function, optional
Specify a filtering function that returns True if the lineage is
to be returned. This function must accept a ``TreeNode`` as its
first parameter, and a ``list`` that represents the lineage as the
second parameter.
Yields
------
tuple
``(tip, [lineage])`` where ``tip`` corresponds to a tip in the tree
and ``[lineage]`` is the expanded names from root to tip. ``None``
and empty strings are omitted from the lineage.
Notes
-----
If ``allow_empty`` is ``True`` and the root node does not have a name,
then that name will not be included. This is because it is common to
have multiple domains represented in the taxonomy, which would result
in a root node that does not have a name and does not make sense to
represent in the output.
Examples
--------
>>> from skbio.tree import TreeNode
>>> lineages = {'1': ['Bacteria', 'Firmicutes', 'Clostridia'],
... '2': ['Bacteria', 'Firmicutes', 'Bacilli'],
... '3': ['Bacteria', 'Bacteroidetes', 'Sphingobacteria'],
... '4': ['Archaea', 'Euryarchaeota', 'Thermoplasmata'],
... '5': ['Archaea', 'Euryarchaeota', 'Thermoplasmata'],
... '6': ['Archaea', 'Euryarchaeota', 'Halobacteria'],
... '7': ['Archaea', 'Euryarchaeota', 'Halobacteria'],
... '8': ['Bacteria', 'Bacteroidetes', 'Sphingobacteria'],
... '9': ['Bacteria', 'Bacteroidetes', 'Cytophagia']}
>>> tree = TreeNode.from_taxonomy(lineages.items())
>>> lineages = sorted([(n.name, l) for n, l in tree.to_taxonomy()])
>>> for name, lineage in lineages:
... print(name, '; '.join(lineage))
1 Bacteria; Firmicutes; Clostridia
2 Bacteria; Firmicutes; Bacilli
3 Bacteria; Bacteroidetes; Sphingobacteria
4 Archaea; Euryarchaeota; Thermoplasmata
5 Archaea; Euryarchaeota; Thermoplasmata
6 Archaea; Euryarchaeota; Halobacteria
7 Archaea; Euryarchaeota; Halobacteria
8 Bacteria; Bacteroidetes; Sphingobacteria
9 Bacteria; Bacteroidetes; Cytophagia
"""
if filter_f is None:
def filter_f(a, b):
return True
self.assign_ids()
seen = set()
lineage = []
# visit internal nodes while traversing out to the tips, and on the
# way back up
for node in self.traverse(self_before=True, self_after=True):
if node.is_tip():
if filter_f(node, lineage):
yield (node, lineage[:])
else:
if allow_empty:
if node.is_root() and not node.name:
continue
else:
if not node.name:
continue
if node.id in seen:
lineage.pop(-1)
else:
lineage.append(node.name)
seen.add(node.id)
@experimental(as_of="0.4.0")
def to_array(self, attrs=None):
"""Return an array representation of self
Parameters
----------
attrs : list of tuple or None
The attributes and types to return. The expected form is
[(attribute_name, type)]. If `None`, then `name`, `length`, and
`id` are returned.
Returns
-------
dict of array
{id_index: {id: TreeNode},
child_index: [(node_id, left_child_id, right_child_id)],
attr_1: array(...),
...
attr_N: array(...)}
Notes
-----
Attribute arrays are in index order such that TreeNode.id can be used
as a lookup into the the array
If `length` is an attribute, this will also record the length off the
root which is `nan`. Take care when summing.
Examples
--------
>>> from skbio import TreeNode
>>> t = TreeNode.read([u'(((a:1,b:2,c:3)x:4,(d:5)y:6)z:7);'])
>>> res = t.to_array()
>>> res.keys()
['child_index', 'length', 'name', 'id_index', 'id']
>>> res['child_index']
[(4, 0, 2), (5, 3, 3), (6, 4, 5), (7, 6, 6)]
>>> for k, v in res['id_index'].items():
... print(k, v)
...
0 a:1.0;
<BLANKLINE>
1 b:2.0;
<BLANKLINE>
2 c:3.0;
<BLANKLINE>
3 d:5.0;
<BLANKLINE>
4 (a:1.0,b:2.0,c:3.0)x:4.0;
<BLANKLINE>
5 (d:5.0)y:6.0;
<BLANKLINE>
6 ((a:1.0,b:2.0,c:3.0)x:4.0,(d:5.0)y:6.0)z:7.0;
<BLANKLINE>
7 (((a:1.0,b:2.0,c:3.0)x:4.0,(d:5.0)y:6.0)z:7.0);
<BLANKLINE>
>>> res['id']
array([0, 1, 2, 3, 4, 5, 6, 7])
>>> res['name']
array([u'a', u'b', u'c', u'd', u'x', u'y', u'z', None], dtype=object)
"""
if attrs is None:
attrs = [('name', object), ('length', float), ('id', int)]
else:
for attr, dtype in attrs:
if not hasattr(self, attr):
raise AttributeError("Invalid attribute '%s'." % attr)
id_index, child_index = self.index_tree()
n = self.id + 1 # assign_ids starts at 0
tmp = [np.zeros(n, dtype=dtype) for attr, dtype in attrs]
for node in self.traverse(include_self=True):
n_id = node.id
for idx, (attr, dtype) in enumerate(attrs):
tmp[idx][n_id] = getattr(node, attr)
results = {'id_index': id_index, 'child_index': child_index}
results.update({attr: arr for (attr, dtype), arr in zip(attrs, tmp)})
return results
def _ascii_art(self, char1='-', show_internal=True, compact=False):
LEN = 10
PAD = ' ' * LEN
PA = ' ' * (LEN - 1)
namestr = self.name or '' # prevents name of NoneType
if self.children:
mids = []
result = []
for c in self.children:
if c is self.children[0]:
char2 = '/'
elif c is self.children[-1]:
char2 = '\\'
else:
char2 = '-'
(clines, mid) = c._ascii_art(char2, show_internal, compact)
mids.append(mid + len(result))
result.extend(clines)
if not compact:
result.append('')
if not compact:
result.pop()
(lo, hi, end) = (mids[0], mids[-1], len(result))
prefixes = [PAD] * (lo + 1) + [PA + '|'] * \
(hi - lo - 1) + [PAD] * (end - hi)
mid = np.int(np.trunc((lo + hi) / 2))
prefixes[mid] = char1 + '-' * (LEN - 2) + prefixes[mid][-1]
result = [p + l for (p, l) in zip(prefixes, result)]
if show_internal:
stem = result[mid]
result[mid] = stem[0] + namestr + stem[len(namestr) + 1:]
return (result, mid)
else:
return ([char1 + '-' + namestr], 0)
@experimental(as_of="0.4.0")
def ascii_art(self, show_internal=True, compact=False):
r"""Returns a string containing an ascii drawing of the tree
Note, this method calls a private recursive function and is not safe
for large trees.
Parameters
----------
show_internal : bool
includes internal edge names
compact : bool
use exactly one line per tip
Returns
-------
str
an ASCII formatted version of the tree
Examples
--------
>>> from skbio import TreeNode
>>> tree = TreeNode.read([u"((a,b)c,(d,e)f)root;"])
>>> print(tree.ascii_art())
/-a
/c-------|
| \-b
-root----|
| /-d
\f-------|
\-e
"""
(lines, mid) = self._ascii_art(show_internal=show_internal,
compact=compact)
return '\n'.join(lines)
@experimental(as_of="0.4.0")
def accumulate_to_ancestor(self, ancestor):
r"""Return the sum of the distance between self and ancestor
Parameters
----------
ancestor : TreeNode
The node of the ancestor to accumulate distance too
Returns
-------
float
The sum of lengths between self and ancestor
Raises
------
NoParentError
A NoParentError is raised if the ancestor is not an ancestor of
self
NoLengthError
A NoLengthError is raised if one of the nodes between self and
ancestor (including self) lacks a `length` attribute
See Also
--------
distance
Examples
--------
>>> from skbio import TreeNode
>>> tree = TreeNode.read([u"((a:1,b:2)c:3,(d:4,e:5)f:6)root;"])
>>> root = tree
>>> tree.find('a').accumulate_to_ancestor(root)
4.0
"""
accum = 0.0
curr = self
while curr is not ancestor:
if curr.is_root():
raise NoParentError("Provided ancestor is not in the path")
if curr.length is None:
raise NoLengthError("No length on node %s found!" %
curr.name or "unnamed")
accum += curr.length
curr = curr.parent
return accum
@experimental(as_of="0.4.0")
def distance(self, other):
"""Return the distance between self and other
This method can be used to compute the distances between two tips,
however, it is not optimized for computing pairwise tip distances.
Parameters
----------
other : TreeNode
The node to compute a distance to
Returns
-------
float
The distance between two nodes
Raises
------
NoLengthError
A NoLengthError will be raised if a node without `length` is
encountered
See Also
--------
tip_tip_distances
accumulate_to_ancestor
compare_tip_distances
get_max_distance
Examples
--------
>>> from skbio import TreeNode
>>> tree = TreeNode.read([u"((a:1,b:2)c:3,(d:4,e:5)f:6)root;"])
>>> tip_a = tree.find('a')
>>> tip_d = tree.find('d')
>>> tip_a.distance(tip_d)
14.0
"""
if self is other:
return 0.0
root = self.root()
lca = root.lowest_common_ancestor([self, other])
accum = self.accumulate_to_ancestor(lca)
accum += other.accumulate_to_ancestor(lca)
return accum
def _set_max_distance(self):
"""Propagate tip distance information up the tree
This method was originally implemented by Julia Goodrich with the
intent of being able to determine max tip to tip distances between
nodes on large trees efficiently. The code has been modified to track
the specific tips the distance is between
"""
for n in self.postorder():
if n.is_tip():
n.MaxDistTips = [[0.0, n], [0.0, n]]
else:
if len(n.children) == 1:
raise TreeError("No support for single descedent nodes")
else:
tip_info = [(max(c.MaxDistTips), c) for c in n.children]
dists = [i[0][0] for i in tip_info]
best_idx = np.argsort(dists)[-2:]
tip_a, child_a = tip_info[best_idx[0]]
tip_b, child_b = tip_info[best_idx[1]]
tip_a[0] += child_a.length or 0.0
tip_b[0] += child_b.length or 0.0
n.MaxDistTips = [tip_a, tip_b]
def _get_max_distance_singledesc(self):
"""returns the max distance between any pair of tips
Also returns the tip names that it is between as a tuple"""
distmtx = self.tip_tip_distances()
idx_max = divmod(distmtx.data.argmax(), distmtx.shape[1])
max_pair = (distmtx.ids[idx_max[0]], distmtx.ids[idx_max[1]])
return distmtx[idx_max], max_pair
@experimental(as_of="0.4.0")
def get_max_distance(self):
"""Returns the max tip tip distance between any pair of tips
Returns
-------
float
The distance between the two most distant tips in the tree
tuple of TreeNode
The two most distant tips in the tree
Raises
------
NoLengthError
A NoLengthError will be thrown if a node without length is
encountered
See Also
--------
distance
tip_tip_distances
compare_tip_distances
Examples
--------
>>> from skbio import TreeNode
>>> tree = TreeNode.read([u"((a:1,b:2)c:3,(d:4,e:5)f:6)root;"])
>>> dist, tips = tree.get_max_distance()
>>> dist
16.0
>>> [n.name for n in tips]
[u'b', u'e']
"""
if not hasattr(self, 'MaxDistTips'):
# _set_max_distance will throw a TreeError if a node with a single
# child is encountered
try:
self._set_max_distance()
except TreeError: #
return self._get_max_distance_singledesc()
longest = 0.0
tips = [None, None]
for n in self.non_tips(include_self=True):
tip_a, tip_b = n.MaxDistTips
dist = (tip_a[0] + tip_b[0])
if dist > longest:
longest = dist
tips = [tip_a[1], tip_b[1]]
return longest, tips
@experimental(as_of="0.4.0")
def tip_tip_distances(self, endpoints=None):
"""Returns distance matrix between pairs of tips, and a tip order.
By default, all pairwise distances are calculated in the tree. If
`endpoints` are specified, then only the distances between those tips
are computed.
Parameters
----------
endpoints : list of TreeNode or str, or None
A list of TreeNode objects or names of TreeNode objects
Returns
-------
DistanceMatrix
The distance matrix
Raises
------
ValueError
If any of the specified `endpoints` are not tips
NoLengthError
If a node without length is encountered
See Also
--------
distance
compare_tip_distances
Examples
--------
>>> from skbio import TreeNode
>>> tree = TreeNode.read([u"((a:1,b:2)c:3,(d:4,e:5)f:6)root;"])
>>> mat = tree.tip_tip_distances()
>>> print(mat)
4x4 distance matrix
IDs:
u'a', u'b', u'd', u'e'
Data:
[[ 0. 3. 14. 15.]
[ 3. 0. 15. 16.]
[ 14. 15. 0. 9.]
[ 15. 16. 9. 0.]]
"""
all_tips = list(self.tips())
if endpoints is None:
tip_order = all_tips
else:
tip_order = [self.find(n) for n in endpoints]
for n in tip_order:
if not n.is_tip():
raise ValueError("Node with name '%s' is not a tip." %
n.name)
# linearize all tips in postorder
# .__start, .__stop compose the slice in tip_order.
for i, node in enumerate(all_tips):
node.__start, node.__stop = i, i + 1
# the result map provides index in the result matrix
result_map = {n.__start: i for i, n in enumerate(tip_order)}
num_all_tips = len(all_tips) # total number of tips
num_tips = len(tip_order) # total number of tips in result
result = np.zeros((num_tips, num_tips), float) # tip by tip matrix
distances = np.zeros((num_all_tips), float) # dist from tip to tip
def update_result():
# set tip_tip distance between tips of different child
for child1, child2 in combinations(node.children, 2):
for tip1 in range(child1.__start, child1.__stop):
if tip1 not in result_map:
continue
t1idx = result_map[tip1]
for tip2 in range(child2.__start, child2.__stop):
if tip2 not in result_map:
continue
t2idx = result_map[tip2]
result[t1idx, t2idx] = distances[
tip1] + distances[tip2]
for node in self.postorder():
if not node.children:
continue
# subtree with solved child wedges
# can possibly use np.zeros
starts, stops = [], [] # to calc ._start and ._stop for curr node
for child in node.children:
if child.length is None:
raise NoLengthError("Node with name '%s' doesn't have a "
"length." % child.name)
distances[child.__start:child.__stop] += child.length
starts.append(child.__start)
stops.append(child.__stop)
node.__start, node.__stop = min(starts), max(stops)
if len(node.children) > 1:
update_result()
return DistanceMatrix(result + result.T, [n.name for n in tip_order])
@experimental(as_of="0.4.0")
def compare_rfd(self, other, proportion=False):
"""Calculates the Robinson and Foulds symmetric difference
Parameters
----------
other : TreeNode
A tree to compare against
proportion : bool
Return a proportional difference
Returns
-------
float
The distance between the trees
Notes
-----
Implementation based off of code by Julia Goodrich. The original
description of the algorithm can be found in [1]_.
Raises
------
ValueError
If the tip names between `self` and `other` are equal.
See Also
--------
compare_subsets
compare_tip_distances
References
----------
.. [1] Comparison of phylogenetic trees. Robinson and Foulds.
Mathematical Biosciences. 1981. 53:131-141
Examples
--------
>>> from skbio import TreeNode
>>> tree1 = TreeNode.read([u"((a,b),(c,d));"])
>>> tree2 = TreeNode.read([u"(((a,b),c),d);"])
>>> tree1.compare_rfd(tree2)
2.0
"""
t1names = {n.name for n in self.tips()}
t2names = {n.name for n in other.tips()}
if t1names != t2names:
if t1names < t2names:
tree1 = self
tree2 = other.shear(t1names)
else:
tree1 = self.shear(t2names)
tree2 = other
else:
tree1 = self
tree2 = other
tree1_sets = tree1.subsets()
tree2_sets = tree2.subsets()
not_in_both = tree1_sets.symmetric_difference(tree2_sets)
dist = float(len(not_in_both))
if proportion:
total_subsets = len(tree1_sets) + len(tree2_sets)
dist = dist / total_subsets
return dist
@experimental(as_of="0.4.0")
def compare_subsets(self, other, exclude_absent_taxa=False):
"""Returns fraction of overlapping subsets where self and other differ.
Names present in only one of the two trees will count as mismatches,
if you don't want this behavior, strip out the non-matching tips first.
Parameters
----------
other : TreeNode
The tree to compare
exclude_absent_taxa : bool
Strip out names that don't occur in both trees
Returns
-------
float
The fraction of overlapping subsets that differ between the trees
See Also
--------
compare_rfd
compare_tip_distances
subsets
Examples
--------
>>> from skbio import TreeNode
>>> tree1 = TreeNode.read([u"((a,b),(c,d));"])
>>> tree2 = TreeNode.read([u"(((a,b),c),d);"])
>>> tree1.compare_subsets(tree2)
0.5
"""
self_sets, other_sets = self.subsets(), other.subsets()
if exclude_absent_taxa:
in_both = self.subset() & other.subset()
self_sets = (i & in_both for i in self_sets)
self_sets = frozenset({i for i in self_sets if len(i) > 1})
other_sets = (i & in_both for i in other_sets)
other_sets = frozenset({i for i in other_sets if len(i) > 1})
total_subsets = len(self_sets) + len(other_sets)
intersection_length = len(self_sets & other_sets)
if not total_subsets: # no common subsets after filtering, so max dist
return 1
return 1 - (2 * intersection_length / float(total_subsets))
@experimental(as_of="0.4.0")
def compare_tip_distances(self, other, sample=None, dist_f=distance_from_r,
shuffle_f=np.random.shuffle):
"""Compares self to other using tip-to-tip distance matrices.
Value returned is `dist_f(m1, m2)` for the two matrices. Default is
to use the Pearson correlation coefficient, with +1 giving a distance
of 0 and -1 giving a distance of +1 (the maximum possible value).
Depending on the application, you might instead want to use
distance_from_r_squared, which counts correlations of both +1 and -1
as identical (0 distance).
Note: automatically strips out the names that don't match (this is
necessary for this method because the distance between non-matching
names and matching names is undefined in the tree where they don't
match, and because we need to reorder the names in the two trees to
match up the distance matrices).
Parameters
----------
other : TreeNode
The tree to compare
sample : int or None
Randomly subsample the tips in common between the trees to
compare. This is useful when comparing very large trees.
dist_f : function
The distance function used to compare two the tip-tip distance
matrices
shuffle_f : function
The shuffling function used if `sample` is not None
Returns
-------
float
The distance between the trees
Raises
------
ValueError
A ValueError is raised if there does not exist common tips
between the trees
See Also
--------
compare_subsets
compare_rfd
Examples
--------
>>> from skbio import TreeNode
>>> # note, only three common taxa between the trees
>>> tree1 = TreeNode.read([u"((a:1,b:1):2,(c:0.5,X:0.7):3);"])
>>> tree2 = TreeNode.read([u"(((a:1,b:1,Y:1):2,c:3):1,Z:4);"])
>>> dist = tree1.compare_tip_distances(tree2)
>>> print("%.9f" % dist)
0.000133446
"""
self_names = {i.name: i for i in self.tips()}
other_names = {i.name: i for i in other.tips()}
common_names = frozenset(self_names) & frozenset(other_names)
common_names = list(common_names)
if not common_names:
raise ValueError("No tip names in common between the two trees.")
if len(common_names) <= 2:
return 1 # the two trees must match by definition in this case
if sample is not None:
shuffle_f(common_names)
common_names = common_names[:sample]
self_nodes = [self_names[k] for k in common_names]
other_nodes = [other_names[k] for k in common_names]
self_matrix = self.tip_tip_distances(endpoints=self_nodes)
other_matrix = other.tip_tip_distances(endpoints=other_nodes)
return dist_f(self_matrix, other_matrix)
@experimental(as_of="0.4.0")
def index_tree(self):
"""Index a tree for rapid lookups within a tree array
Indexes nodes in-place as `n._leaf_index`.
Returns
-------
dict
A mapping {node_id: TreeNode}
list of tuple of (int, int, int)
The first index in each tuple is the corresponding node_id. The
second index is the left most leaf index. The third index is the
right most leaf index
"""
self.assign_ids()
id_index = {}
child_index = []
for n in self.postorder():
for c in n.children:
id_index[c.id] = c
if c:
# c has children itself, so need to add to result
child_index.append((c.id,
c.children[0].id,
c.children[-1].id))
# handle root, which should be t itself
id_index[self.id] = self
# only want to add to the child_index if self has children...
if self.children:
child_index.append((self.id,
self.children[0].id,
self.children[-1].id))
return id_index, child_index
@experimental(as_of="0.4.0")
def assign_ids(self):
"""Assign topologically stable unique ids to self
Following the call, all nodes in the tree will have their id
attribute set
"""
curr_index = 0
for n in self.postorder():
for c in n.children:
c.id = curr_index
curr_index += 1
self.id = curr_index
@experimental(as_of="0.4.0")
def descending_branch_length(self, tip_subset=None):
"""Find total descending branch length from self or subset of self tips
Parameters
----------
tip_subset : Iterable, or None
If None, the total descending branch length for all tips in the
tree will be returned. If a list of tips is provided then only the
total descending branch length associated with those tips will be
returned.
Returns
-------
float
The total descending branch length for the specified set of tips.
Raises
------
ValueError
A ValueError is raised if the list of tips supplied to tip_subset
contains internal nodes or non-tips.
Notes
-----
This function replicates cogent's totalDescendingBranch Length method
and extends that method to allow the calculation of total descending
branch length of a subset of the tips if requested. The postorder
guarantees that the function will always be able to add the descending
branch length if the node is not a tip.
Nodes with no length will have their length set to 0. The root length
(if it exists) is ignored.
Examples
--------
>>> from skbio import TreeNode
>>> tr = TreeNode.read([u"(((A:.1,B:1.2)C:.6,(D:.9,E:.6)F:.9)G:2.4,"
... "(H:.4,I:.5)J:1.3)K;"])
>>> tdbl = tr.descending_branch_length()
>>> sdbl = tr.descending_branch_length(['A','E'])
>>> print(tdbl, sdbl)
8.9 2.2
"""
self.assign_ids()
if tip_subset is not None:
all_tips = self.subset()
if not set(tip_subset).issubset(all_tips):
raise ValueError('tip_subset contains ids that arent tip '
'names.')
lca = self.lowest_common_ancestor(tip_subset)
ancestors = {}
for tip in tip_subset:
curr = self.find(tip)
while curr is not lca:
ancestors[curr.id] = curr.length if curr.length is not \
None else 0.0
curr = curr.parent
return sum(ancestors.values())
else:
return sum(n.length for n in self.postorder(include_self=True) if
n.length is not None)
@experimental(as_of="0.4.0")
def cache_attr(self, func, cache_attrname, cache_type=list):
"""Cache attributes on internal nodes of the tree
Parameters
----------
func : function
func will be provided the node currently being evaluated and must
return a list of item (or items) to cache from that node or an
empty list.
cache_attrname : str
Name of the attribute to decorate on containing the cached values
cache_type : {set, frozenset, list}
The type of the cache
Notes
-----
This method is particularly useful if you need to frequently look up
attributes that would normally require a traversal of the tree.
WARNING: any cache created by this method will be invalidated if the
topology of the tree changes (e.g., if `TreeNode.invalidate_caches` is
called).
Raises
------
TypeError
If an cache_type that is not a `set` or a `list` is specified.
Examples
--------
Cache the tip names of the tree on its internal nodes
>>> from skbio import TreeNode
>>> tree = TreeNode.read([u"((a,b,(c,d)e)f,(g,h)i)root;"])
>>> f = lambda n: [n.name] if n.is_tip() else []
>>> tree.cache_attr(f, 'tip_names')
>>> for n in tree.traverse(include_self=True):
... print("Node name: %s, cache: %r" % (n.name, n.tip_names))
Node name: root, cache: [u'a', u'b', u'c', u'd', u'g', u'h']
Node name: f, cache: [u'a', u'b', u'c', u'd']
Node name: a, cache: [u'a']
Node name: b, cache: [u'b']
Node name: e, cache: [u'c', u'd']
Node name: c, cache: [u'c']
Node name: d, cache: [u'd']
Node name: i, cache: [u'g', u'h']
Node name: g, cache: [u'g']
Node name: h, cache: [u'h']
"""
if cache_type in [set, frozenset]:
def reduce_f(a, b):
return a | b
elif cache_type == list:
def reduce_f(a, b):
return a + b
else:
raise TypeError("Only list, set and frozenset are supported!")
for node in self.postorder(include_self=True):
node._registered_caches.add(cache_attrname)
cached = [getattr(c, cache_attrname) for c in node.children]
cached.append(cache_type(func(node)))
setattr(node, cache_attrname, reduce(reduce_f, cached))
@experimental(as_of="0.4.0")
def shuffle(self, k=None, names=None, shuffle_f=np.random.shuffle, n=1):
"""Yield trees with shuffled tip names
Parameters
----------
k : int, optional
The number of tips to shuffle. If k is not `None`, k tips are
randomly selected, and only those names will be shuffled.
names : list, optional
The specific tip names to shuffle. k and names cannot be specified
at the same time.
shuffle_f : func
Shuffle method, this function must accept a list and modify
inplace.
n : int, optional
The number of iterations to perform. Value must be > 0 and `np.inf`
can be specified for an infinite number of iterations.
Notes
-----
Tip names are shuffled inplace. If neither `k` nor `names` are
provided, all tips are shuffled.
Yields
------
TreeNode
Tree with shuffled tip names.
Raises
------
ValueError
If `k` is < 2
If `n` is < 1
ValueError
If both `k` and `names` are specified
MissingNodeError
If `names` is specified but one of the names cannot be found
Examples
--------
Alternate the names on two of the tips, 'a', and 'b', and do this 5
times.
>>> from skbio import TreeNode
>>> tree = TreeNode.read([u"((a,b),(c,d));"])
>>> rev = lambda items: items.reverse()
>>> shuffler = tree.shuffle(names=['a', 'b'], shuffle_f=rev, n=5)
>>> for shuffled_tree in shuffler:
... print(shuffled_tree)
((b,a),(c,d));
<BLANKLINE>
((a,b),(c,d));
<BLANKLINE>
((b,a),(c,d));
<BLANKLINE>
((a,b),(c,d));
<BLANKLINE>
((b,a),(c,d));
<BLANKLINE>
"""
if k is not None and k < 2:
raise ValueError("k must be None or >= 2")
if k is not None and names is not None:
raise ValueError("n and names cannot be specified at the sametime")
if n < 1:
raise ValueError("n must be > 0")
self.assign_ids()
if names is None:
all_tips = list(self.tips())
if n is None:
n = len(all_tips)
shuffle_f(all_tips)
names = [tip.name for tip in all_tips[:k]]
nodes = [self.find(name) for name in names]
# Since the names are being shuffled, the association between ID and
# name is no longer reliable
self.invalidate_caches()
counter = 0
while counter < n:
shuffle_f(names)
for node, name in zip(nodes, names):
node.name = name
yield self
counter += 1
| wdwvt1/scikit-bio | skbio/tree/_tree.py | Python | bsd-3-clause | 88,326 | [
"VisIt",
"scikit-bio"
] | 8068bacedc1e96be2db0fbb96f6adfd12be1f692ffbe536f485e96d0b951ec49 |
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
This module provides classes used to enumerate surface sites
and to find adsorption sites on slabs
"""
import numpy as np
from pymatgen import Structure, Lattice, vis
import itertools
import os
from monty.serialization import loadfn
from scipy.spatial import Delaunay
from pymatgen.core.operations import SymmOp
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer
from pymatgen.util.coord import in_coord_list_pbc
from pymatgen.analysis.local_env import VoronoiNN
from pymatgen.core.surface import generate_all_slabs
from pymatgen.analysis.structure_matcher import StructureMatcher
from matplotlib import patches
from matplotlib.path import Path
__author__ = "Joseph Montoya"
__copyright__ = "Copyright 2016, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Joseph Montoya"
__credits__ = "Richard Tran"
__email__ = "montoyjh@lbl.gov"
__status__ = "Development"
__date__ = "December 2, 2015"
class AdsorbateSiteFinder:
"""
This class finds adsorbate sites on slabs and generates
adsorbate structures according to user-defined criteria.
The algorithm for finding sites is essentially as follows:
1. Determine "surface sites" by finding those within
a height threshold along the miller index of the
highest site
2. Create a network of surface sites using the Delaunay
triangulation of the surface sites
3. Assign on-top, bridge, and hollow adsorption sites
at the nodes, edges, and face centers of the Del.
Triangulation
4. Generate structures from a molecule positioned at
these sites
"""
def __init__(self, slab, selective_dynamics=False,
height=0.9, mi_vec=None):
"""
Create an AdsorbateSiteFinder object.
Args:
slab (Slab): slab object for which to find adsorbate sites
selective_dynamics (bool): flag for whether to assign
non-surface sites as fixed for selective dynamics
height (float): height criteria for selection of surface sites
mi_vec (3-D array-like): vector corresponding to the vector
concurrent with the miller index, this enables use with
slabs that have been reoriented, but the miller vector
must be supplied manually
top_surface (bool): Which surface to adsorb, True for the surface
above the center of mass, False for the surface below
center of mass
"""
# get surface normal from miller index
if mi_vec:
self.mvec = mi_vec
else:
self.mvec = get_mi_vec(slab)
slab = self.assign_site_properties(slab, height)
if selective_dynamics:
slab = self.assign_selective_dynamics(slab)
self.slab = slab
@classmethod
def from_bulk_and_miller(cls, structure, miller_index, min_slab_size=8.0,
min_vacuum_size=10.0, max_normal_search=None,
center_slab=True, selective_dynamics=False,
undercoord_threshold=0.09):
"""
This method constructs the adsorbate site finder from a bulk
structure and a miller index, which allows the surface sites
to be determined from the difference in bulk and slab coordination,
as opposed to the height threshold.
Args:
structure (Structure): structure from which slab
input to the ASF is constructed
miller_index (3-tuple or list): miller index to be used
min_slab_size (float): min slab size for slab generation
min_vacuum_size (float): min vacuum size for slab generation
max_normal_search (int): max normal search for slab generation
center_slab (bool): whether to center slab in slab generation
selective dynamics (bool): whether to assign surface sites
to selective dynamics
undercoord_threshold (float): threshold of "undercoordation"
to use for the assignment of surface sites. Default is
0.1, for which surface sites will be designated if they
are 10% less coordinated than their bulk counterpart
"""
# TODO: for some reason this works poorly with primitive cells
# may want to switch the coordination algorithm eventually
vnn_bulk = VoronoiNN(tol=0.05)
bulk_coords = [len(vnn_bulk.get_nn(structure, n))
for n in range(len(structure))]
struct = structure.copy(site_properties={'bulk_coordinations': bulk_coords})
slabs = generate_all_slabs(struct, max_index=max(miller_index),
min_slab_size=min_slab_size,
min_vacuum_size=min_vacuum_size,
max_normal_search=max_normal_search,
center_slab=center_slab)
slab_dict = {slab.miller_index: slab for slab in slabs}
if miller_index not in slab_dict:
raise ValueError("Miller index not in slab dict")
this_slab = slab_dict[miller_index]
vnn_surface = VoronoiNN(tol=0.05, allow_pathological=True)
surf_props, undercoords = [], []
this_mi_vec = get_mi_vec(this_slab)
mi_mags = [np.dot(this_mi_vec, site.coords) for site in this_slab]
average_mi_mag = np.average(mi_mags)
for n, site in enumerate(this_slab):
bulk_coord = this_slab.site_properties['bulk_coordinations'][n]
slab_coord = len(vnn_surface.get_nn(this_slab, n))
mi_mag = np.dot(this_mi_vec, site.coords)
undercoord = (bulk_coord - slab_coord) / bulk_coord
undercoords += [undercoord]
if undercoord > undercoord_threshold and mi_mag > average_mi_mag:
surf_props += ['surface']
else:
surf_props += ['subsurface']
new_site_properties = {'surface_properties': surf_props,
'undercoords': undercoords}
new_slab = this_slab.copy(site_properties=new_site_properties)
return cls(new_slab, selective_dynamics)
def find_surface_sites_by_height(self, slab, height=0.9, xy_tol=0.05):
"""
This method finds surface sites by determining which sites are within
a threshold value in height from the topmost site in a list of sites
Args:
site_list (list): list of sites from which to select surface sites
height (float): threshold in angstroms of distance from topmost
site in slab along the slab c-vector to include in surface
site determination
xy_tol (float): if supplied, will remove any sites which are
within a certain distance in the miller plane.
Returns:
list of sites selected to be within a threshold of the highest
"""
# Get projection of coordinates along the miller index
m_projs = np.array([np.dot(site.coords, self.mvec)
for site in slab.sites])
# Mask based on window threshold along the miller index.
mask = (m_projs - np.amax(m_projs)) >= -height
surf_sites = [slab.sites[n] for n in np.where(mask)[0]]
if xy_tol:
# sort surface sites by height
surf_sites = [s for (h, s) in zip(m_projs[mask], surf_sites)]
surf_sites.reverse()
unique_sites, unique_perp_fracs = [], []
for site in surf_sites:
this_perp = site.coords - np.dot(site.coords, self.mvec)
this_perp_frac = slab.lattice.get_fractional_coords(this_perp)
if not in_coord_list_pbc(unique_perp_fracs, this_perp_frac):
unique_sites.append(site)
unique_perp_fracs.append(this_perp_frac)
surf_sites = unique_sites
return surf_sites
def assign_site_properties(self, slab, height=0.9):
"""
Assigns site properties.
"""
if 'surface_properties' in slab.site_properties.keys():
return slab
else:
surf_sites = self.find_surface_sites_by_height(slab, height)
surf_props = ['surface' if site in surf_sites
else 'subsurface' for site in slab.sites]
return slab.copy(
site_properties={'surface_properties': surf_props})
def get_extended_surface_mesh(self, repeat=(5, 5, 1)):
"""
Gets an extended surface mesh for to use for adsorption
site finding by constructing supercell of surface sites
Args:
repeat (3-tuple): repeat for getting extended surface mesh
"""
surf_str = Structure.from_sites(self.surface_sites)
surf_str.make_supercell(repeat)
return surf_str
@property
def surface_sites(self):
"""
convenience method to return a list of surface sites
"""
return [site for site in self.slab.sites
if site.properties['surface_properties'] == 'surface']
def subsurface_sites(self):
"""
convenience method to return list of subsurface sites
"""
return [site for site in self.slab.sites
if site.properties['surface_properties'] == 'subsurface']
def find_adsorption_sites(self, distance=2.0, put_inside=True,
symm_reduce=1e-2, near_reduce=1e-2,
positions=['ontop', 'bridge', 'hollow'],
no_obtuse_hollow=True):
"""
Finds surface sites according to the above algorithm. Returns
a list of corresponding cartesian coordinates.
Args:
distance (float): distance from the coordinating ensemble
of atoms along the miller index for the site (i. e.
the distance from the slab itself)
put_inside (bool): whether to put the site inside the cell
symm_reduce (float): symm reduction threshold
near_reduce (float): near reduction threshold
positions (list): which positions to include in the site finding
"ontop": sites on top of surface sites
"bridge": sites at edges between surface sites in Delaunay
triangulation of surface sites in the miller plane
"hollow": sites at centers of Delaunay triangulation faces
"subsurface": subsurface positions projected into miller plane
no_obtuse_hollow (bool): flag to indicate whether to include
obtuse triangular ensembles in hollow sites
"""
ads_sites = {k: [] for k in positions}
if 'ontop' in positions:
ads_sites['ontop'] = [s.coords for s in self.surface_sites]
if 'subsurface' in positions:
# Get highest site
ref = self.slab.sites[np.argmax(self.slab.cart_coords[:, 2])]
# Project diff between highest site and subs site into miller
ss_sites = [self.mvec * np.dot(ref.coords - s.coords, self.mvec)
+ s.coords for s in self.subsurface_sites()]
ads_sites['subsurface'] = ss_sites
if 'bridge' in positions or 'hollow' in positions:
mesh = self.get_extended_surface_mesh()
sop = get_rot(self.slab)
dt = Delaunay([sop.operate(m.coords)[:2] for m in mesh])
# TODO: refactor below to properly account for >3-fold
for v in dt.simplices:
if -1 not in v:
dots = []
for i_corner, i_opp in zip(range(3), ((1, 2), (0, 2), (0, 1))):
corner, opp = v[i_corner], [v[o] for o in i_opp]
vecs = [mesh[d].coords - mesh[corner].coords for d in opp]
vecs = [vec / np.linalg.norm(vec) for vec in vecs]
dots.append(np.dot(*vecs))
# Add bridge sites at midpoints of edges of D. Tri
if 'bridge' in positions:
ads_sites["bridge"].append(
self.ensemble_center(mesh, opp))
# Prevent addition of hollow sites in obtuse triangles
obtuse = no_obtuse_hollow and (np.array(dots) < 1e-5).any()
# Add hollow sites at centers of D. Tri faces
if 'hollow' in positions and not obtuse:
ads_sites['hollow'].append(
self.ensemble_center(mesh, v))
ads_sites['all'] = sum(ads_sites.values(), [])
for key, sites in ads_sites.items():
# Pare off outer sites for bridge/hollow
if key in ['bridge', 'hollow']:
frac_coords = [self.slab.lattice.get_fractional_coords(ads_site)
for ads_site in sites]
frac_coords = [frac_coord for frac_coord in frac_coords
if (frac_coord[0] > 1 and frac_coord[0] < 4
and frac_coord[1] > 1 and frac_coord[1] < 4)]
sites = [self.slab.lattice.get_cartesian_coords(frac_coord)
for frac_coord in frac_coords]
if near_reduce:
sites = self.near_reduce(sites, threshold=near_reduce)
if put_inside:
sites = [put_coord_inside(self.slab.lattice, coord)
for coord in sites]
if symm_reduce:
sites = self.symm_reduce(sites, threshold=symm_reduce)
sites = [site + distance * self.mvec for site in sites]
ads_sites[key] = sites
return ads_sites
def symm_reduce(self, coords_set, threshold=1e-6):
"""
Reduces the set of adsorbate sites by finding removing
symmetrically equivalent duplicates
Args:
coords_set: coordinate set in cartesian coordinates
threshold: tolerance for distance equivalence, used
as input to in_coord_list_pbc for dupl. checking
"""
surf_sg = SpacegroupAnalyzer(self.slab, 0.1)
symm_ops = surf_sg.get_symmetry_operations()
unique_coords = []
# Convert to fractional
coords_set = [self.slab.lattice.get_fractional_coords(coords)
for coords in coords_set]
for coords in coords_set:
incoord = False
for op in symm_ops:
if in_coord_list_pbc(unique_coords, op.operate(coords),
atol=threshold):
incoord = True
break
if not incoord:
unique_coords += [coords]
# convert back to cartesian
return [self.slab.lattice.get_cartesian_coords(coords)
for coords in unique_coords]
def near_reduce(self, coords_set, threshold=1e-4):
"""
Prunes coordinate set for coordinates that are within
threshold
Args:
coords_set (Nx3 array-like): list or array of coordinates
threshold (float): threshold value for distance
"""
unique_coords = []
coords_set = [self.slab.lattice.get_fractional_coords(coords)
for coords in coords_set]
for coord in coords_set:
if not in_coord_list_pbc(unique_coords, coord, threshold):
unique_coords += [coord]
return [self.slab.lattice.get_cartesian_coords(coords)
for coords in unique_coords]
def ensemble_center(self, site_list, indices, cartesian=True):
"""
Finds the center of an ensemble of sites selected from
a list of sites. Helper method for the find_adsorption_sites
algorithm.
Args:
site_list (list of sites): list of sites
indices (list of ints): list of ints from which to select
sites from site list
cartesian (bool): whether to get average fractional or
cartesian coordinate
"""
if cartesian:
return np.average([site_list[i].coords for i in indices],
axis=0)
else:
return np.average([site_list[i].frac_coords for i in indices],
axis=0)
def add_adsorbate(self, molecule, ads_coord, repeat=None, translate=True,
reorient=True):
"""
Adds an adsorbate at a particular coordinate. Adsorbate
represented by a Molecule object and is translated to (0, 0, 0) if
translate is True, or positioned relative to the input adsorbate
coordinate if translate is False.
Args:
molecule (Molecule): molecule object representing the adsorbate
ads_coord (array): coordinate of adsorbate position
repeat (3-tuple or list): input for making a supercell of slab
prior to placing the adsorbate
translate (bool): flag on whether to translate the molecule so
that its CoM is at the origin prior to adding it to the surface
reorient (bool): flag on whether to reorient the molecule to
have its z-axis concurrent with miller index
"""
molecule = molecule.copy()
if translate:
# Translate the molecule so that the center of mass of the atoms
# that have the most negative z coordinate is at (0, 0, 0)
front_atoms = molecule.copy()
front_atoms._sites = [s for s in molecule.sites
if s.coords[2] == min([s.coords[2]
for s in molecule.sites])]
x, y, z = front_atoms.center_of_mass
molecule.translate_sites(vector=[-x, -y, -z])
if reorient:
# Reorient the molecule along slab m_index
sop = get_rot(self.slab)
molecule.apply_operation(sop.inverse)
struct = self.slab.copy()
if repeat:
struct.make_supercell(repeat)
if 'surface_properties' in struct.site_properties.keys():
molecule.add_site_property("surface_properties",
["adsorbate"] * molecule.num_sites)
if 'selective_dynamics' in struct.site_properties.keys():
molecule.add_site_property("selective_dynamics",
[[True, True, True]] * molecule.num_sites)
for site in molecule:
struct.append(site.specie, ads_coord + site.coords, coords_are_cartesian=True,
properties=site.properties)
return struct
def assign_selective_dynamics(self, slab):
"""
Helper function to assign selective dynamics site_properties
based on surface, subsurface site properties
Args:
slab (Slab): slab for which to assign selective dynamics
"""
sd_list = []
sd_list = [[False, False, False] if site.properties['surface_properties'] == 'subsurface'
else [True, True, True] for site in slab.sites]
new_sp = slab.site_properties
new_sp['selective_dynamics'] = sd_list
return slab.copy(site_properties=new_sp)
def generate_adsorption_structures(self, molecule, repeat=None, min_lw=5.0,
translate=True, reorient=True, find_args={}):
"""
Function that generates all adsorption structures for a given
molecular adsorbate. Can take repeat argument or minimum
length/width of precursor slab as an input
Args:
molecule (Molecule): molecule corresponding to adsorbate
repeat (3-tuple or list): repeat argument for supercell generation
min_lw (float): minimum length and width of the slab, only used
if repeat is None
reorient (bool): flag on whether or not to reorient adsorbate
along the miller index
find_args (dict): dictionary of arguments to be passed to the
call to self.find_adsorption_sites, e.g. {"distance":2.0}
"""
if repeat is None:
xrep = np.ceil(min_lw / np.linalg.norm(self.slab.lattice.matrix[0]))
yrep = np.ceil(min_lw / np.linalg.norm(self.slab.lattice.matrix[1]))
repeat = [xrep, yrep, 1]
structs = []
for coords in self.find_adsorption_sites(**find_args)['all']:
structs.append(self.add_adsorbate(molecule, coords,
repeat=repeat, translate=translate, reorient=reorient))
return structs
def adsorb_both_surfaces(self, molecule, repeat=None, min_lw=5.0,
translate=True, reorient=True, find_args={}):
"""
Function that generates all adsorption structures for a given
molecular adsorbate on both surfaces of a slab. This is useful
for calculating surface energy where both surfaces need to be
equivalent or if we want to calculate nonpolar systems.
Args:
molecule (Molecule): molecule corresponding to adsorbate
repeat (3-tuple or list): repeat argument for supercell generation
min_lw (float): minimum length and width of the slab, only used
if repeat is None
reorient (bool): flag on whether or not to reorient adsorbate
along the miller index
find_args (dict): dictionary of arguments to be passed to the
call to self.find_adsorption_sites, e.g. {"distance":2.0}
"""
# Get the adsorbed surfaces first
adslabs = self.generate_adsorption_structures(molecule, repeat=repeat,
min_lw=min_lw,
translate=translate,
reorient=reorient,
find_args=find_args)
new_adslabs = []
for adslab in adslabs:
# Find the adsorbate sites and indices in each slab
symmetric, adsorbates, indices = False, [], []
for i, site in enumerate(adslab.sites):
if site.surface_properties == "adsorbate":
adsorbates.append(site)
indices.append(i)
# Start with the clean slab
adslab.remove_sites(indices)
slab = adslab.copy()
# For each site, we add it back to the slab along with a
# symmetrically equivalent position on the other side of
# the slab using symmetry operations
for adsorbate in adsorbates:
p2 = adslab.get_symmetric_site(adsorbate.frac_coords)
slab.append(adsorbate.specie, p2,
properties={"surface_properties": "adsorbate"})
slab.append(adsorbate.specie, adsorbate.frac_coords,
properties={"surface_properties": "adsorbate"})
new_adslabs.append(slab)
return new_adslabs
def generate_substitution_structures(self, atom, target_species=[],
sub_both_sides=False, range_tol=1e-2,
dist_from_surf=0):
"""
Function that performs substitution-type doping on the surface and
returns all possible configurations where one dopant is substituted
per surface. Can substitute one surface or both.
Args:
atom (str): atom corresponding to substitutional dopant
sub_both_sides (bool): If true, substitute an equivalent
site on the other surface
target_species (list): List of specific species to substitute
range_tol (float): Find viable substitution sites at a specific
distance from the surface +- this tolerance
dist_from_surf (float): Distance from the surface to find viable
substitution sites, defaults to 0 to substitute at the surface
"""
# Get symmetrized structure in case we want to substitue both sides
sym_slab = SpacegroupAnalyzer(self.slab).get_symmetrized_structure()
# Define a function for substituting a site
def substitute(site, i):
slab = self.slab.copy()
props = self.slab.site_properties
if sub_both_sides:
# Find an equivalent site on the other surface
eq_indices = [indices for indices in
sym_slab.equivalent_indices if i in indices][0]
for ii in eq_indices:
if "%.6f" % (sym_slab[ii].frac_coords[2]) != \
"%.6f" % (site.frac_coords[2]):
props["surface_properties"][ii] = "substitute"
slab.replace(ii, atom)
break
props["surface_properties"][i] = "substitute"
slab.replace(i, atom)
slab.add_site_property("surface_properties",
props["surface_properties"])
return slab
# Get all possible substitution sites
substituted_slabs = []
# Sort sites so that we can define a range relative to the position of the
# surface atoms, i.e. search for sites above (below) the bottom (top) surface
sorted_sites = sorted(sym_slab, key=lambda site: site.frac_coords[2])
if sorted_sites[0].surface_properties == "surface":
d = sorted_sites[0].frac_coords[2] + dist_from_surf
else:
d = sorted_sites[-1].frac_coords[2] - dist_from_surf
for i, site in enumerate(sym_slab):
if d - range_tol < site.frac_coords[2] < d + range_tol:
if target_species and site.species_string in target_species:
substituted_slabs.append(substitute(site, i))
elif not target_species:
substituted_slabs.append(substitute(site, i))
matcher = StructureMatcher()
return [s[0] for s in matcher.group_structures(substituted_slabs)]
def get_mi_vec(slab):
"""
Convenience function which returns the unit vector aligned
with the miller index.
"""
mvec = np.cross(slab.lattice.matrix[0], slab.lattice.matrix[1])
return mvec / np.linalg.norm(mvec)
def get_rot(slab):
"""
Gets the transformation to rotate the z axis into the miller index
"""
new_z = get_mi_vec(slab)
a, b, c = slab.lattice.matrix
new_x = a / np.linalg.norm(a)
new_y = np.cross(new_z, new_x)
x, y, z = np.eye(3)
rot_matrix = np.array([np.dot(*el) for el in
itertools.product([x, y, z],
[new_x, new_y, new_z])]).reshape(3, 3)
rot_matrix = np.transpose(rot_matrix)
sop = SymmOp.from_rotation_and_translation(rot_matrix)
return sop
def put_coord_inside(lattice, cart_coordinate):
"""
converts a cartesian coordinate such that it is inside the unit cell.
"""
fc = lattice.get_fractional_coords(cart_coordinate)
return lattice.get_cartesian_coords([c - np.floor(c) for c in fc])
def reorient_z(structure):
"""
reorients a structure such that the z axis is concurrent with the
normal to the A-B plane
"""
struct = structure.copy()
sop = get_rot(struct)
struct.apply_operation(sop)
return struct
# Get color dictionary
colors = loadfn(os.path.join(os.path.dirname(vis.__file__),
"ElementColorSchemes.yaml"))
color_dict = {el: [j / 256.001 for j in colors["Jmol"][el]]
for el in colors["Jmol"].keys()}
def plot_slab(slab, ax, scale=0.8, repeat=5, window=1.5,
draw_unit_cell=True, decay=0.2, adsorption_sites=True):
"""
Function that helps visualize the slab in a 2-D plot, for
convenient viewing of output of AdsorbateSiteFinder.
Args:
slab (slab): Slab object to be visualized
ax (axes): matplotlib axes with which to visualize
scale (float): radius scaling for sites
repeat (int): number of repeating unit cells to visualize
window (float): window for setting the axes limits, is essentially
a fraction of the unit cell limits
draw_unit_cell (bool): flag indicating whether or not to draw cell
decay (float): how the alpha-value decays along the z-axis
"""
orig_slab = slab.copy()
slab = reorient_z(slab)
orig_cell = slab.lattice.matrix.copy()
if repeat:
slab.make_supercell([repeat, repeat, 1])
coords = np.array(sorted(slab.cart_coords, key=lambda x: x[2]))
sites = sorted(slab.sites, key=lambda x: x.coords[2])
alphas = 1 - decay * (np.max(coords[:, 2]) - coords[:, 2])
alphas = alphas.clip(min=0)
corner = [0, 0, slab.lattice.get_fractional_coords(coords[-1])[-1]]
corner = slab.lattice.get_cartesian_coords(corner)[:2]
verts = orig_cell[:2, :2]
lattsum = verts[0] + verts[1]
# Draw circles at sites and stack them accordingly
for n, coord in enumerate(coords):
r = sites[n].specie.atomic_radius * scale
ax.add_patch(patches.Circle(coord[:2] - lattsum * (repeat // 2),
r, color='w', zorder=2 * n))
color = color_dict[sites[n].species_string]
ax.add_patch(patches.Circle(coord[:2] - lattsum * (repeat // 2), r,
facecolor=color, alpha=alphas[n],
edgecolor='k', lw=0.3, zorder=2 * n + 1))
# Adsorption sites
if adsorption_sites:
asf = AdsorbateSiteFinder(orig_slab)
ads_sites = asf.find_adsorption_sites()['all']
sop = get_rot(orig_slab)
ads_sites = [sop.operate(ads_site)[:2].tolist()
for ads_site in ads_sites]
ax.plot(*zip(*ads_sites), color='k', marker='x',
markersize=10, mew=1, linestyle='', zorder=10000)
# Draw unit cell
if draw_unit_cell:
verts = np.insert(verts, 1, lattsum, axis=0).tolist()
verts += [[0., 0.]]
verts = [[0., 0.]] + verts
codes = [Path.MOVETO, Path.LINETO, Path.LINETO,
Path.LINETO, Path.CLOSEPOLY]
verts = [(np.array(vert) + corner).tolist() for vert in verts]
path = Path(verts, codes)
patch = patches.PathPatch(path, facecolor='none', lw=2,
alpha=0.5, zorder=2 * n + 2)
ax.add_patch(patch)
ax.set_aspect("equal")
center = corner + lattsum / 2.
extent = np.max(lattsum)
lim_array = [center - extent * window, center + extent * window]
x_lim = [ele[0] for ele in lim_array]
y_lim = [ele[1] for ele in lim_array]
ax.set_xlim(x_lim)
ax.set_ylim(y_lim)
return ax
| blondegeek/pymatgen | pymatgen/analysis/adsorption.py | Python | mit | 31,445 | [
"Jmol",
"pymatgen"
] | 7383d31e1be0b62fc8ca19ac3f72917885d3b61c2e5161322084a3156adb2d6d |
# $HeadURL$
""" ErrorMessageMonitor gets new errors that have been injected into the
SystemLoggingDB and reports them by mail to the person(s) in charge
of checking that they conform with DIRAC style. Reviewer option
contains the list of users to be notified.
"""
__RCSID__ = "$Id$"
from DIRAC.Core.Base.AgentModule import AgentModule
from DIRAC import S_OK
from DIRAC.ConfigurationSystem.Client.Helpers.Operations import Operations
from DIRAC.ConfigurationSystem.Client.Helpers.Registry import getUserOption
from DIRAC.FrameworkSystem.DB.SystemLoggingDB import SystemLoggingDB
from DIRAC.FrameworkSystem.Client.NotificationClient import NotificationClient
AGENT_NAME = 'Logging/ErrorMessageMonitor'
class ErrorMessageMonitor( AgentModule ):
def initialize( self ):
self.systemLoggingDB = SystemLoggingDB()
self.notification = NotificationClient()
userList = self.am_getOption( "Reviewer", [] )
self.log.debug( "Users to be notified:", ', '.join( userList ) )
mailList = []
for user in userList:
mail = getUserOption( user, 'Email', '' )
if not mail:
self.log.warn( "Could not get user's mail", user )
else:
mailList.append( mail )
if not mailList:
mailList = Operations().getValue( 'EMail/Logging', [] )
if not len( mailList ):
errString = "There are no valid users in the mailing list"
varString = "[" + ','.join( userList ) + "]"
self.log.warn( errString, varString )
self.log.info( "List of mails to be notified", ','.join( mailList ) )
self._mailAddress = mailList
self._subject = 'New error messages were entered in the SystemLoggingDB'
return S_OK()
def execute( self ):
""" The main agent execution method
"""
condDict = {'ReviewedMessage':0}
result = self.systemLoggingDB.getCounters( 'FixedTextMessages', ['ReviewedMessage'], condDict )
if not result['OK']:
return result
if not result['Value']:
self.log.info( 'No messages need review' )
return S_OK( 'No messages need review' )
returnFields = [ 'FixedTextID', 'FixedTextString', 'SystemName',
'SubSystemName' ]
result = self.systemLoggingDB._queryDB( showFieldList = returnFields,
groupColumn = 'FixedTextString',
condDict = condDict )
if not result['OK']:
self.log.error( 'Failed to obtain the non reviewed Strings',
result['Message'] )
return S_OK()
messageList = result['Value']
if messageList == 'None' or not messageList:
self.log.error( 'The DB query returned an empty result' )
return S_OK()
mailBody = 'These new messages have arrived to the Logging Service\n'
for message in messageList:
mailBody = mailBody + "String: '" + message[1] + "'\tSystem: '" \
+ message[2] + "'\tSubsystem: '" + message[3] + "'\n"
if self._mailAddress:
result = self.notification.sendMail( self._mailAddress, self._subject, mailBody )
if not result[ 'OK' ]:
self.log.warn( "The mail could not be sent" )
return S_OK()
messageIDs = [ message[0] for message in messageList ]
condDict = {'FixedTextID': messageIDs}
result = self.systemLoggingDB.updateFields( 'FixedTextMessages', ['ReviewedMessage'], [1], condDict = condDict )
if not result['OK']:
self.log.error( 'Could not update message Status', result['ERROR'] )
return S_OK()
self.log.verbose( 'Updated message Status for:', str( messageList ) )
self.log.info( "The messages have been sent for review",
"There are %s new descriptions" % len( messageList ) )
return S_OK( "%s Messages have been sent for review" % len( messageList ) )
| Sbalbp/DIRAC | FrameworkSystem/Agent/ErrorMessageMonitor.py | Python | gpl-3.0 | 3,908 | [
"DIRAC"
] | f7ab8298266b282ee8cea883fd092486621dd22e7b79cb404725f9f42e9ff047 |
#!/usr/bin/env python
# Copyright 2015 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import argparse
import glob
import json
import mmap
import os
import re
import sys
parser = argparse.ArgumentParser()
parser.add_argument("filenames", help="list of files to check, all files if unspecified", nargs='*')
rootdir = os.path.dirname(__file__) + "/../../"
rootdir = os.path.abspath(rootdir)
parser.add_argument("--rootdir", default=rootdir, help="root directory to examine")
default_boilerplate_dir = os.path.join(rootdir, "hack/boilerplate")
parser.add_argument("--boilerplate-dir", default=default_boilerplate_dir)
args = parser.parse_args()
def get_refs():
refs = {}
for path in glob.glob(os.path.join(args.boilerplate_dir, "boilerplate.*.txt")):
extension = os.path.basename(path).split(".")[1]
ref_file = open(path, 'r')
ref = ref_file.read().splitlines()
ref_file.close()
refs[extension] = ref
return refs
def file_passes(filename, refs, regexs):
try:
f = open(filename, 'r')
except:
return False
data = f.read()
f.close()
basename = os.path.basename(filename)
extension = file_extension(filename)
if extension != "":
ref = refs[extension]
else:
ref = refs[basename]
# remove build tags from the top of Go files
if extension == "go":
p = regexs["go_build_constraints"]
(data, found) = p.subn("", data, 1)
# remove shebang from the top of shell files
if extension == "sh":
p = regexs["shebang"]
(data, found) = p.subn("", data, 1)
data = data.splitlines()
# if our test file is smaller than the reference it surely fails!
if len(ref) > len(data):
return False
# trim our file to the same number of lines as the reference file
data = data[:len(ref)]
p = regexs["year"]
for d in data:
if p.search(d):
return False
# Replace all occurrences of the regex "2016|2015|2014" with "YEAR"
p = regexs["date"]
for i, d in enumerate(data):
(data[i], found) = p.subn('YEAR', d)
if found != 0:
break
# if we don't match the reference at this point, fail
if ref != data:
return False
return True
def file_extension(filename):
return os.path.splitext(filename)[1].split(".")[-1].lower()
skipped_dirs = ['Godeps', 'third_party', '_gopath', '_output', '.git', 'cluster/env.sh', 'vendor']
def normalize_files(files):
newfiles = []
for pathname in files:
if any(x in pathname for x in skipped_dirs):
continue
newfiles.append(pathname)
for i, pathname in enumerate(newfiles):
if not os.path.isabs(pathname):
newfiles[i] = os.path.join(args.rootdir, pathname)
return newfiles
def get_files(extensions):
files = []
if len(args.filenames) > 0:
files = args.filenames
else:
for root, dirs, walkfiles in os.walk(args.rootdir):
# don't visit certain dirs. This is just a performance improvement
# as we would prune these later in normalize_files(). But doing it
# cuts down the amount of filesystem walking we do and cuts down
# the size of the file list
for d in skipped_dirs:
if d in dirs:
dirs.remove(d)
for name in walkfiles:
pathname = os.path.join(root, name)
files.append(pathname)
files = normalize_files(files)
outfiles = []
for pathname in files:
basename = os.path.basename(pathname)
extension = file_extension(pathname)
if extension in extensions or basename in extensions:
outfiles.append(pathname)
return outfiles
def get_regexs():
regexs = {}
# Search for "YEAR" which exists in the boilerplate, but shouldn't in the real thing
regexs["year"] = re.compile( 'YEAR' )
# dates can be 2014, 2015 or 2016, company holder names can be anything
regexs["date"] = re.compile( '(2014|2015|2016)' )
# strip // +build \n\n build constraints
regexs["go_build_constraints"] = re.compile(r"^(// \+build.*\n)+\n", re.MULTILINE)
# strip #!.* from shell scripts
regexs["shebang"] = re.compile(r"^(#!.*\n)\n*", re.MULTILINE)
return regexs
def main():
regexs = get_regexs()
refs = get_refs()
filenames = get_files(refs.keys())
for filename in filenames:
if not file_passes(filename, refs, regexs):
print(filename, file=sys.stdout)
if __name__ == "__main__":
sys.exit(main())
| Kaffa-MY/kubernetes | hack/boilerplate/boilerplate.py | Python | apache-2.0 | 5,204 | [
"VisIt"
] | 6e5f00b7268f417d916b8fb331cf462098fcfe8c583a716264900a69becf7586 |
# 10.07.2007, c
# last revision: 25.03.2008
from __future__ import absolute_import
from sfepy import data_dir
from sfepy.mechanics.matcoefs import stiffness_from_lame
filename_meshes = ['/meshes/3d/cube_medium_tetra.mesh',
'/meshes/3d/cube_medium_tetra.mesh',
'/meshes/3d/cube_medium_hexa.mesh']
filename_meshes = [data_dir + name for name in filename_meshes]
all_your_bases = [1, 2, 1]
filename_mesh = None
field_1 = {
'name' : '3_displacement',
'dtype' : 'real',
'shape' : (3,),
'region' : 'Omega',
'approx_order' : None,
}
def get_pars( dim, full = False ):
import numpy as nm
sym = (dim + 1) * dim // 2
lam = 1e1
mu = 1e0
o = nm.array( [1.] * dim + [0.] * (sym - dim), dtype = nm.float64 )
oot = nm.outer( o, o )
if full:
return lam * oot + mu * nm.diag( o + 1.0 )
else:
return lam, mu
material_1 = {
'name' : 'solid',
'values' : {
'Dijkl' : get_pars( 3, True ),
'D' : stiffness_from_lame(3, get_pars(3)[0], get_pars(3)[1]),
'lam' : get_pars(3)[0],
'mu' : get_pars(3)[1],
}
}
material_2 = {
'name' : 'spring',
'values' : {
'.stiffness' : 1e0,
}
}
variable_1 = {
'name' : 'u',
'kind' : 'unknown field',
'field' : '3_displacement',
'order' : 0,
}
variable_2 = {
'name' : 'v',
'kind' : 'test field',
'field' : '3_displacement',
'dual' : 'u',
}
region_1000 = {
'name' : 'Omega',
'select' : 'all',
}
region_1 = {
'name' : 'Bottom',
'select' : 'vertices in (z < -0.499)',
'kind' : 'facet',
}
region_2 = {
'name' : 'Top',
'select' : 'vertices in (z > 0.499)',
'kind' : 'facet',
}
ebc_1 = {
'name' : 'Load',
'region' : 'Top',
'dofs' : {'u.2' : 0.1},
}
integral_1 = {
'name' : 'i',
'order' : 2,
}
equations_getpars = {
'balance_of_forces' :
"""dw_lin_elastic.i.Omega(solid.Dijkl, v, u)
= dw_point_lspring.i.Bottom(spring.stiffness, v, u)""",
}
equations_matcoefs = {
'balance_of_forces' :
"""dw_lin_elastic.i.Omega(solid.D, v, u)
= dw_point_lspring.i.Bottom(spring.stiffness, v, u)""",
}
equations_iso = {
'balance_of_forces' :
"""dw_lin_elastic_iso.i.Omega(solid.lam, solid.mu, v, u)
= dw_point_lspring.i.Bottom(spring.stiffness, v, u)""",
}
solver_0 = {
'name' : 'ls',
'kind' : 'ls.scipy_direct',
}
solver_1 = {
'name' : 'newton',
'kind' : 'nls.newton',
'i_max' : 1,
'eps_a' : 1e-10,
}
from sfepy.base.testing import TestCommon
##
# 10.07.2007, c
class Test( TestCommon ):
tests = ['test_get_solution', 'test_linear_terms']
##
# 10.07.2007, c
def from_conf( conf, options ):
return Test( conf = conf, options = options )
from_conf = staticmethod( from_conf )
##
# c: 25.03.2008, r: 25.03.2008
def test_linear_terms( self ):
ok = True
for sols in self.solutions:
ok = ok and self.compare_vectors(sols[0], sols[1],
label1 = 'getpars',
label2 = 'matcoefs')
ok = ok and self.compare_vectors(sols[0], sols[2],
label1 = 'getpars',
label2 = 'iso')
return ok
##
# c: 10.07.2007, r: 25.03.2008
def test_get_solution( self ):
from sfepy.applications import solve_pde
from sfepy.base.base import IndexedStruct
import os.path as op
ok = True
self.solutions = []
for ii, approx_order in enumerate(all_your_bases):
fname = filename_meshes[ii]
self.conf.filename_mesh = fname
fields = {'field_1' : {
'name' : '3_displacement',
'dtype' : 'real',
'shape' : (3,),
'region' : 'Omega',
'approx_order' : approx_order,
}
}
self.conf.edit('fields', fields)
self.report('mesh: %s, base: %s' % (fname, approx_order))
status = IndexedStruct()
self.report('getpars')
self.conf.equations = self.conf.equations_getpars
problem, state1 = solve_pde(self.conf, status=status,
save_results=False)
converged = status.nls_status.condition == 0
ok = ok and converged
self.report('converged: %s' % converged)
self.report('matcoefs')
self.conf.equations = self.conf.equations_matcoefs
problem, state2 = solve_pde(self.conf, status=status,
save_results=False)
converged = status.nls_status.condition == 0
ok = ok and converged
self.report('converged: %s' % converged)
self.report('iso')
self.conf.equations = self.conf.equations_iso
problem, state3 = solve_pde(self.conf, status=status,
save_results=False)
converged = status.nls_status.condition == 0
ok = ok and converged
self.report('converged: %s' % converged)
self.solutions.append((state1(), state2(), state3()))
name = op.join(self.options.out_dir,
'_'.join(('test_elasticity_small_strain',
op.splitext(op.basename(fname))[0],
'%d' % approx_order))
+ '.vtk')
problem.save_state(name, state1)
return ok
| rc/sfepy | tests/test_elasticity_small_strain.py | Python | bsd-3-clause | 5,744 | [
"VTK"
] | bb76c97630d6079426556f72ee3d3761d77ef1345fef20f7d15ff0da2b9b56ec |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''Copyright (C) 2012 Computational Neuroscience Group, NMBU.
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
'''
import numpy as np
import neuron
from neuron import units
from pathlib import Path
import h5py
def _run_simulation_with_probes(cell, cvode, probes=[],
variable_dt=False, atol=0.001, rtol=0.,
to_memory=True,
to_file=False, file_name=None):
'''Initialize and run simulation in NEURON,
repeatedly calling neuron.h.fadvance() until cell.tstop is reached.
Parameters
----------
cell: LFPy.Cell like object
cvode: neuron.h.CVode object
probes: list of objects
variable_dt: bool
atol: float
rtol: float
to_memory: bool
to_file: bool
file_name: str
Returns
-------
'''
if variable_dt and to_file:
raise NotImplementedError('to_file=True with variable_dt=True '
'not supported')
# Initialize NEURON simulations of cell object
neuron.h.dt = cell.dt
# needed for variable dt method
if variable_dt:
cvode.active(1)
cvode.atol(atol)
cvode.rtol(rtol)
else:
cvode.active(0)
# re-initialize state
neuron.h.finitialize(cell.v_init * units.mV)
# initialize current- and record
if cvode.active():
cvode.re_init()
else:
neuron.h.fcurrent()
neuron.h.frecord_init() # wrong voltages t=0 for tstart < 0 otherwise
# Start simulation at tstart (which may be < 0)
neuron.h.t = cell.tstart
# load spike times from NetCon
cell._load_spikes()
# temporary vector to store membrane currents at each timestep
imem = np.zeros(cell.totnsegs)
# precompute linear transformation matrices for each probe
transforms = [] # container
for probe in probes:
M = probe.get_transformation_matrix()
assert M.shape[-1] == cell.totnsegs, \
'Linear transformation shape mismatch'
transforms.append(M)
if not variable_dt:
probe.data = np.zeros((M.shape[0], int(cell.tstop / cell.dt) + 1))
else:
# for variable_dt, data will be added to last axis each time step
probe.data = np.zeros((M.shape[0], 0))
if to_file:
# ensure right file extension:
file_name = Path(file_name)
if file_name.suffix != '.h5':
file_name = file_name.parent / (file_name.name + '.h5')
if not cvode.active():
print('creating output file {}'.format(file_name))
f = h5py.File(file_name, 'w')
# create empty data arrays for data storage of output
# corresponding to each probe. The naming scheme is
# probe.__class__.__name__+'0', probe.__class__.__name__+'1' etc.
names = []
for probe, M in zip(probes, transforms):
name = probe.__class__.__name__
i = 0
while True:
if name + '{}'.format(i) not in names:
names.append(name + '{}'.format(i))
break
i += 1
#
probe.data = f.create_dataset(
name=name,
shape=(M.shape[0],
int(cell.tstop / cell.dt) + 1),
dtype=float)
def get_imem(imem):
i = 0
for sec in cell.allseclist:
for seg in sec:
imem[i] = seg.i_membrane_
i += 1
return imem
tstep = 0
# run fadvance until time limit, and calculate LFPs for each timestep
while neuron.h.t < cell.tstop:
if neuron.h.t >= 0:
imem = get_imem(imem)
for j, (probe, transform) in enumerate(zip(probes, transforms)):
if not variable_dt:
probe.data[:, tstep] = transform @ imem
else:
probe.data = np.c_[probes[j].data, transform @ imem]
tstep += 1
neuron.h.fadvance()
# calculate LFP after final fadvance() if needed
# (may occur for certain values for dt)
if tstep < len(cell._neuron_tvec):
imem = get_imem(imem)
for j, (probe, transform) in enumerate(zip(probes, transforms)):
if not variable_dt:
probe.data[:, tstep] = transform @ imem
else:
probe.data = np.c_[probes[j].data, transform @ imem]
if to_file:
f.close()
def _collect_geometry_neuron(cell):
'''Loop over allseclist to determine area, diam, xyz-start- and
endpoints, embed geometry to cell object'''
areavec = np.zeros(cell.totnsegs)
diamvec = np.zeros(cell.totnsegs)
lengthvec = np.zeros(cell.totnsegs)
xstartvec = np.zeros(cell.totnsegs)
xendvec = np.zeros(cell.totnsegs)
ystartvec = np.zeros(cell.totnsegs)
yendvec = np.zeros(cell.totnsegs)
zstartvec = np.zeros(cell.totnsegs)
zendvec = np.zeros(cell.totnsegs)
counter = 0
# loop over all segments
for sec in cell.allseclist:
n3d = int(neuron.h.n3d(sec=sec))
nseg = sec.nseg
gsen2 = 1. / 2 / nseg
if n3d > 0:
# create interpolation objects for the xyz pt3d info:
L = np.zeros(n3d)
x = np.zeros(n3d)
y = np.zeros(n3d)
z = np.zeros(n3d)
for i in range(n3d):
L[i] = neuron.h.arc3d(i, sec=sec)
x[i] = neuron.h.x3d(i, sec=sec)
y[i] = neuron.h.y3d(i, sec=sec)
z[i] = neuron.h.z3d(i, sec=sec)
# normalize as seg.x [0, 1]
L /= sec.L
# temporary store position of segment midpoints
segx = np.zeros(nseg)
for i, seg in enumerate(sec):
segx[i] = seg.x
# can't be >0 which may happen due to NEURON->Python float
# transfer:
segx0 = (segx - gsen2).round(decimals=6)
segx1 = (segx + gsen2).round(decimals=6)
# fill vectors with interpolated coordinates of start and end
# points
xstartvec[counter:counter + nseg] = np.interp(segx0, L, x)
xendvec[counter:counter + nseg] = np.interp(segx1, L, x)
ystartvec[counter:counter + nseg] = np.interp(segx0, L, y)
yendvec[counter:counter + nseg] = np.interp(segx1, L, y)
zstartvec[counter:counter + nseg] = np.interp(segx0, L, z)
zendvec[counter:counter + nseg] = np.interp(segx1, L, z)
# fill in values area, diam, length
for seg in sec:
areavec[counter] = neuron.h.area(seg.x, sec=sec)
diamvec[counter] = seg.diam
lengthvec[counter] = sec.L / nseg
counter += 1
# set cell attributes
cell.x = np.c_[xstartvec, xendvec]
cell.y = np.c_[ystartvec, yendvec]
cell.z = np.c_[zstartvec, zendvec]
cell.area = areavec
cell.d = diamvec
cell.length = lengthvec
| LFPy/LFPy | LFPy/run_simulation.py | Python | gpl-3.0 | 7,588 | [
"NEURON"
] | 7cb534882dd06f03d13dc106bd5bdfc5026276b44f12fe89a37a7742481c9833 |
# Local Polynomial Approximation Smoother using Numba JIT Acceleration - single thread version
#
# Smooths the data by fitting a 2nd order polynomial to a small window around
# each data sample using gaussian weighted least squares. This implementation uses the Numba JIT to
# accelerate the convolution.
#
import sys,os
import numpy as np
#
# Import the module with the I/O scaffolding of the External Attribute
#
sys.path.insert(0, os.path.join(sys.path[0], '..', '..'))
import extattrib as xa
import extlib as xl
#
# These are the attribute parameters
#
xa.params = {
'Input': 'Input',
'ZSampMargin' : {'Value':[-1,1], 'Symmetric': True},
'StepOut' : {'Value': [1,1]},
'Par_0': {'Name': 'Weight Factor', 'Value': 0.2},
'Parallel': False,
'Help': 'http://waynegm.github.io/OpendTect-Plugin-Docs/External_Attributes/LPA_Attributes/'
}
#
# Define the compute function
#
def doCompute():
dz = xa.params['ZSampMargin']['Value'][1] - xa.params['ZSampMargin']['Value'][0] + 1
kernel = lpa3D_init(xa.SI['nrinl'], xa.SI['nrcrl'], dz, xa.params['Par_0']['Value'])[0]
while True:
xa.doInput()
xa.Output = xl.sconvolve(xa.Input, kernel)
xa.doOutput()
#
# Find the LPA solution for a 2nd order polynomial in 3D
#
def lpa3D_init( xs, ys, zs, sigma=0.2 ):
std = sigma * (min(xs,ys,zs)-1)
hxs = (xs-1)/2
hys = (ys-1)/2
hzs = (zs-1)/2
xtmp = np.linspace(-hxs,hxs,xs)
ytmp = np.linspace(-hys,hys,ys)
ztmp = np.linspace(-hzs,hzs,zs)
xyz = np.meshgrid(xtmp,ytmp,ztmp, indexing='ij')
x = xyz[0].flatten()
y = xyz[1].flatten()
z = xyz[2].flatten()
w = np.exp(-(x**2+y**2+z**2)/(2*std**2))
W = np.diagflat(w)
A = np.dstack((np.ones(x.size), x, y, z, x*x, y*y, z*z, x*y, x*z, y*z)).reshape((x.size,10))
DB = np.linalg.inv(A.T.dot(W).dot(A)).dot(A.T).dot(W)
return DB.reshape((10,xs,ys,zs))
#
# Assign the compute function to the attribute
#
xa.doCompute = doCompute
#
# Do it
#
xa.run(sys.argv[1:])
| waynegm/OpendTect-Plugins | bin/python/wmpy/Experimental/LocalPolynomialApproximation/ex_lpa_smooth_single.py | Python | gpl-3.0 | 1,916 | [
"Gaussian"
] | 6dcfc1b09cfbe45f9b94f43cccb815a86b938e37e0dde40ca2bf786cc419f421 |
#
# -*- coding: utf-8 -*-
#
# Copyright (C) 2008-2011 Red Hat, Inc.
# This file is part of python-fedora
#
# python-fedora is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# python-fedora is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with python-fedora; if not, see <http://www.gnu.org/licenses/>
#
'''
Cross-site Request Forgery Protection.
http://en.wikipedia.org/wiki/Cross-site_request_forgery
.. moduleauthor:: John (J5) Palmieri <johnp@redhat.com>
.. moduleauthor:: Luke Macken <lmacken@redhat.com>
.. versionadded:: 0.3.17
'''
from hashlib import sha1
import logging
from munch import Munch
from kitchen.text.converters import to_bytes
from webob import Request
try:
# webob > 1.0
from webob.headers import ResponseHeaders
except ImportError:
# webob < 1.0
from webob.headerdict import HeaderDict as ResponseHeaders
from paste.httpexceptions import HTTPFound
from paste.response import replace_header
from repoze.who.interfaces import IMetadataProvider
from zope.interface import implements
from fedora.urlutils import update_qs
log = logging.getLogger(__name__)
class CSRFProtectionMiddleware(object):
'''
CSRF Protection WSGI Middleware.
A layer of WSGI middleware that is responsible for making sure
authenticated requests originated from the user inside of the app's domain
and not a malicious website.
This middleware works with the :mod:`repoze.who` middleware, and requires
that it is placed below :mod:`repoze.who` in the WSGI stack,
since it relies upon ``repoze.who.identity`` to exist in the environ before
it is called.
To utilize this middleware, you can just add it to your WSGI stack below
the :mod:`repoze.who` middleware. Here is an example of utilizing the
`CSRFProtectionMiddleware` within a TurboGears2 application.
In your ``project/config/middleware.py``, you would wrap your main
application with the `CSRFProtectionMiddleware`, like so:
.. code-block:: python
from fedora.wsgi.csrf import CSRFProtectionMiddleware
def make_app(global_conf, full_stack=True, **app_conf):
app = make_base_app(global_conf, wrap_app=CSRFProtectionMiddleware,
full_stack=full_stack, **app_conf)
You then need to add the CSRF token to every url that you need to be
authenticated for. When used with TurboGears2, an overridden version of
:func:`tg.url` is provided. You can use it directly by calling::
from fedora.tg2.utils import url
[...]
url = url('/authentication_needed')
An easier and more portable way to use that is from within TG2 to set this
up is to use :func:`fedora.tg2.utils.enable_csrf` when you setup your
application. This function will monkeypatch TurboGears2's :func:`tg.url`
so that it adds a csrf token to urls. This way, you can keep the same
code in your templates and controller methods whether or not you configure
the CSRF middleware to provide you with protection via
:func:`~fedora.tg2.utils.enable_csrf`.
'''
def __init__(self, application, csrf_token_id='_csrf_token',
clear_env='repoze.who.identity repoze.what.credentials',
token_env='CSRF_TOKEN', auth_state='CSRF_AUTH_STATE'):
'''
Initialize the CSRF Protection WSGI Middleware.
:csrf_token_id: The name of the CSRF token variable
:clear_env: Variables to clear out of the `environ` on invalid token
:token_env: The name of the token variable in the environ
:auth_state: The environ key that will be set when we are logging in
'''
log.info('Creating CSRFProtectionMiddleware')
self.application = application
self.csrf_token_id = csrf_token_id
self.clear_env = clear_env.split()
self.token_env = token_env
self.auth_state = auth_state
def _clean_environ(self, environ):
''' Delete the ``keys`` from the supplied ``environ`` '''
log.debug('clean_environ(%s)' % to_bytes(self.clear_env))
for key in self.clear_env:
if key in environ:
log.debug('Deleting %(key)s from environ' %
{'key': to_bytes(key)})
del(environ[key])
def __call__(self, environ, start_response):
'''
This method is called for each request. It looks for a user-supplied
CSRF token in the GET/POST parameters, and compares it to the token
attached to ``environ['repoze.who.identity']['_csrf_token']``. If it
does not match, or if a token is not provided, it will remove the
user from the ``environ``, based on the ``clear_env`` setting.
'''
request = Request(environ)
log.debug('CSRFProtectionMiddleware(%(r_path)s)' %
{'r_path': to_bytes(request.path)})
token = environ.get('repoze.who.identity', {}).get(self.csrf_token_id)
csrf_token = environ.get(self.token_env)
if token and csrf_token and token == csrf_token:
log.debug('User supplied CSRF token matches environ!')
else:
if not environ.get(self.auth_state):
log.debug('Clearing identity')
self._clean_environ(environ)
if 'repoze.who.identity' not in environ:
environ['repoze.who.identity'] = Munch()
if 'repoze.who.logins' not in environ:
# For compatibility with friendlyform
environ['repoze.who.logins'] = 0
if csrf_token:
log.warning('Invalid CSRF token. User supplied'
' (%(u_token)s) does not match what\'s in our'
' environ (%(e_token)s)' %
{'u_token': to_bytes(csrf_token),
'e_token': to_bytes(token)})
response = request.get_response(self.application)
if environ.get(self.auth_state):
log.debug('CSRF_AUTH_STATE; rewriting headers')
token = environ.get('repoze.who.identity', {})\
.get(self.csrf_token_id)
loc = update_qs(
response.location, {self.csrf_token_id: str(token)})
response.location = loc
log.debug('response.location = %(r_loc)s' %
{'r_loc': to_bytes(response.location)})
environ[self.auth_state] = None
return response(environ, start_response)
class CSRFMetadataProvider(object):
'''
Repoze.who CSRF Metadata Provider Plugin.
This metadata provider is called with an authenticated users identity
automatically by repoze.who. It will then take the SHA1 hash of the
users session cookie, and set it as the CSRF token in
``environ['repoze.who.identity']['_csrf_token']``.
This plugin will also set ``CSRF_AUTH_STATE`` in the environ if the user
has just authenticated during this request.
To enable this plugin in a TurboGears2 application, you can
add the following to your ``project/config/app_cfg.py``
.. code-block:: python
from fedora.wsgi.csrf import CSRFMetadataProvider
base_config.sa_auth.mdproviders = [('csrfmd', CSRFMetadataProvider())]
Note: If you use the faswho plugin, this is turned on automatically.
'''
implements(IMetadataProvider)
def __init__(self, csrf_token_id='_csrf_token', session_cookie='tg-visit',
clear_env='repoze.who.identity repoze.what.credentials',
login_handler='/post_login', token_env='CSRF_TOKEN',
auth_session_id='CSRF_AUTH_SESSION_ID',
auth_state='CSRF_AUTH_STATE'):
'''
Create the CSRF Metadata Provider Plugin.
:kwarg csrf_token_id: The name of the CSRF token variable. The
identity will contain an entry with this as key and the
computed csrf_token as the value.
:kwarg session_cookie: The name of the session cookie
:kwarg login_handler: The path to the login handler, used to determine
if the user logged in during this request
:kwarg token_env: The name of the token variable in the environ.
The environ will contain the token from the request
:kwarg auth_session_id: The environ key containing an optional
session id
:kwarg auth_state: The environ key that indicates when we are
logging in
'''
self.csrf_token_id = csrf_token_id
self.session_cookie = session_cookie
self.clear_env = clear_env
self.login_handler = login_handler
self.token_env = token_env
self.auth_session_id = auth_session_id
self.auth_state = auth_state
def strip_script(self, environ, path):
# Strips the script portion of a url path so the middleware works even
# when mounted under a path other than root
if path.startswith('/') and 'SCRIPT_NAME' in environ:
prefix = environ.get('SCRIPT_NAME')
if prefix.endswith('/'):
prefix = prefix[:-1]
if path.startswith(prefix):
path = path[len(prefix):]
return path
def add_metadata(self, environ, identity):
request = Request(environ)
log.debug('CSRFMetadataProvider.add_metadata(%(r_path)s)'
% {'r_path': to_bytes(request.path)})
session_id = environ.get(self.auth_session_id)
if not session_id:
session_id = request.cookies.get(self.session_cookie)
log.debug('session_id = %(s_id)r' % {'s_id':
to_bytes(session_id)})
if session_id and session_id != 'Set-Cookie:':
environ[self.auth_session_id] = session_id
token = sha1(session_id).hexdigest()
identity.update({self.csrf_token_id: token})
log.debug('Identity updated with CSRF token')
path = self.strip_script(environ, request.path)
if path == self.login_handler:
log.debug('Setting CSRF_AUTH_STATE')
environ[self.auth_state] = True
environ[self.token_env] = token
else:
environ[self.token_env] = self.extract_csrf_token(request)
app = environ.get('repoze.who.application')
if app:
# This occurs during login in some application configurations
if isinstance(app, HTTPFound) and environ.get(self.auth_state):
log.debug('Got HTTPFound(302) from'
' repoze.who.application')
# What possessed people to make this a string or
# a function?
location = app.location
if hasattr(location, '__call__'):
location = location()
loc = update_qs(location, {self.csrf_token_id:
str(token)})
headers = app.headers.items()
replace_header(headers, 'location', loc)
app.headers = ResponseHeaders(headers)
log.debug('Altered headers: %(headers)s' % {
'headers': to_bytes(app.headers)})
else:
log.warning('Invalid session cookie %(s_id)r, not setting CSRF'
' token!' % {'s_id': to_bytes(session_id)})
def extract_csrf_token(self, request):
'''Extract and remove the CSRF token from a given
:class:`webob.Request`
'''
csrf_token = None
if self.csrf_token_id in request.GET:
log.debug("%(token)s in GET" % {'token':
to_bytes(self.csrf_token_id)})
csrf_token = request.GET[self.csrf_token_id]
del(request.GET[self.csrf_token_id])
request.query_string = '&'.join(['%s=%s' % (k, v) for k, v in
request.GET.items()])
if self.csrf_token_id in request.POST:
log.debug("%(token)s in POST" % {'token':
to_bytes(self.csrf_token_id)})
csrf_token = request.POST[self.csrf_token_id]
del(request.POST[self.csrf_token_id])
return csrf_token
| fedora-infra/python-fedora | fedora/wsgi/csrf.py | Python | lgpl-2.1 | 12,944 | [
"VisIt"
] | a40fe88feabb5daa38f48e6a0bd11669b42be86cf23bb629e234cb2112805e0d |
#!/usr/bin/env python
from __future__ import division
import os
import argparse
import console
import fileinput
"""
When running BLAST with GenBank (INSDC) data using seperate partitions, this script
helps to merge different BLAST results together by sorting best hits using BLAST
score. Input is taken from STDIN, where you can define list of files and output is
written to STDOUT.
"""
parser = argparse.ArgumentParser(description = """
Merges BLAST outputs into one BLAST file by selecting best BLAST score hits only for
each sequence. Input is taken from STDIN and output is written to STDOUT.
""")
args = parser.parse_args()
hits = {}
for line in fileinput.input():
col = line.strip().split("\t")
if len(col) < 15:
continue
seq = col[0]
score = col[15]
if seq in hits:
comp_score = hits[seq][15]
if score > comp_score:
hits[seq] = col
else:
hits[seq] = col
for key in hits:
print("\t".join(hits[key]))
| ut-planteco/ssu-pipeline | pipeline_merge_blasts.py | Python | gpl-3.0 | 941 | [
"BLAST"
] | ac66b51ec22dc20bb4ffcac986928522890cfcb1c48753cae6e3a1a146505a65 |
"""
Global configurations
"""
import os
'''
Dirs
'''
# Directory for pickled data
DATA_DIR = './data'
# Directory for temporary data
TMP_DIR = './tmp'
# Directory for figures
FIG_DIR = './fig'
# Directory for human-readable data
HR_DIR = './hr'
# Directory for news and tweets data by Dr. Meng Jiang
# DEPRECATED
MENG_NEWS_TWEETS_DIR = os.path.join(DATA_DIR, 'raw-news_tweets-meng')
# Directory for original news and tweets data
ORIGINAL_NEWS_TWEETS_DIR = os.path.join(DATA_DIR, 'raw-news_tweets-original')
# Directory for topic_news and topic_tweets docs
TOPICS_DOCS_DIR = os.path.join(HR_DIR, 'topics_docs')
# Directory for EXTERNAL Hedonometer data files
# http://hedonometer.org/index.html
HED_DATA_DIR = os.path.join(DATA_DIR, 'hedonometer')
# Directory of pkls for selected Hedonometer words frequency dicts on topic_tweets docs
TOPICS_TWEETS_SHED_WORDS_FREQ_DICT_PKLS_DIR = os.path.join(DATA_DIR, 'topics_tweets_shed_words_freq_dict_pkls')
# Directory for IBM tweets and news data
IBM_TWEETS_NEWS_DIR = os.path.join(DATA_DIR, 'ibm_tweets_news')
'''
Files
'''
# DDL scripts to create db, table schema for news, and table schema for tweets
NEWS_TWEETS_DDL_FILE = os.path.join(DATA_DIR, 'original-news_tweets.schema.sql')
# SQLite db for raw news and tweets data (provided by Dr. Meng Jiang)
NEWS_TWEETS_DB_FILE = os.path.join(DATA_DIR, 'original-news_tweets.db')
# Pickle of dataframe of news over selected period [2014-11-18, 2015-04-14]
NEWS_PERIOD_DF_PKL = os.path.join(DATA_DIR, 'news-period.df.pkl')
# Pickle of manually selected topics information with associated news and tweets native_id
TOPICS_LST_PKL = os.path.join(DATA_DIR, 'topics.lst.pkl')
# JSON file for Hedonometer happiness words
# Visualization: http://hedonometer.org/words.html
# Download: http://hedonometer.org/api/v1/timeseries/?format=json
HED_WORDS_JSON_FILE = os.path.join(HED_DATA_DIR, 'labMT_words.json')
# Pickle of dataframe for complete Hedonometer happiness words information
HED_WORDS_DF_PKL = os.path.join(DATA_DIR, 'hed_words.df.pkl')
# Pickle of dataframe for Hedonometer selected words
SHED_WORDS_DF_PKL = os.path.join(DATA_DIR, 'shed_words.df.pkl')
# Pickles of dicts for mapping between selected word and word_ind
SHED_WORD_IND_DICT_PKL = os.path.join(DATA_DIR, 'shed_word-ind.dict.pkl')
IND_SHED_WORD_DICT_PKL = os.path.join(DATA_DIR, 'ind-shed_word.dict.pkl')
# Pickle of dict for mapping between and shed_word_ind and shed_word_happs
IND_HAPPS_DICT_PKL = os.path.join(DATA_DIR, 'ind-happs.dict.pkl')
# Pickle of dict for selected Hedonometer words frequency on topic_news docs
TOPICS_NEWS_SHED_WORDS_FREQ_DICT_PKL = os.path.join(DATA_DIR, 'topics_news_shed_words_freq.dict.pkl')
'''
Misc
'''
# List of dates when Twitter internal server is unstable and tweets contain errors cannot be parsed
ORIGINAL_TWEETS_ERROR_DATES_LST = ['2015-06-05', '2015-09-20', '2015-09-21', '2015-12-08', '2015-12-09', '2015-12-10', '2016-02-14', '2016-02-15', '2016-02-17', '2016-02-18', '2016-02-19']
# Manully selected topics information
MANUALLY_SELECTED_TOPICS_LST = [
{'category': 'politics', 'name': 'Hillary_Clinton_email_controversy', 'keywords_lst': [('email', 'e-mail'), ('Hillary', 'Clinton')]},
{'category': 'politics', 'name': 'Iran_nuclear_deal', 'keywords_lst': ['Iran', 'nuclear']},
{'category': 'politics', 'name': 'ISIS_Jihadi_John_identity_reveal', 'keywords_lst': ['Jihadi John']},
{'category': 'politics', 'name': 'Ukraine_cease_fire', 'keywords_lst': [('cease-fire', 'ceasefire'), ('Ukraine', 'Russia')]},
{'category': 'politics', 'name': 'Egypt_free_Al_Jazeera_journalist', 'keywords_lst': [('Al Jazeera', 'Egypt'), ('Peter Greste', 'journalist')]},
{'category': 'politics', 'name': 'Keystone_XL_Pipeline_bill', 'keywords_lst': ['Keystone XL']},
{'category': 'politics', 'name': 'CIA_Torture_Report', 'keywords_lst': ['Torture Report']},
{'category': 'politics', 'name': 'Obama_cybersecurity_plan', 'keywords_lst': ['Obama', 'cyber']},
{'category': 'politics', 'name': 'DHS_funding_issue', 'keywords_lst': ['DHS', 'fund']},
{'category': 'politics', 'name': 'US_Cuba_relationship', 'keywords_lst': [('US', 'Obama'), ('Cuba', 'Castro')]},
{'category': 'politics', 'name': '2015_CPAC', 'keywords_lst': ['CPAC']},
{'category': 'politics', 'name': 'Iraq_free_ISIS_Tikrit', 'keywords_lst': ['Tikrit']},
{'category': 'politics', 'name': 'Nigeria_Boko_Haram_terrorists', 'keywords_lst': ['Boko Haram']},
{'category': 'social', 'name': 'Ferguson_unrest', 'keywords_lst': ['Ferguson']},
{'category': 'social', 'name': 'Hong_Kong_protest', 'keywords_lst': ['Hong Kong']},
{'category': 'social', 'name': 'Sony_cyberattack', 'keywords_lst': ['Sony']},
{'category': 'social', 'name': 'Bill_Cosby_sexual_assault_allegation', 'keywords_lst': ['Bill Cosby']},
{'category': 'social', 'name': 'SpaceX_rocket_landing', 'keywords_lst': ['SpaceX']},
{'category': 'social', 'name': 'Brian_Williams_fake_story', 'keywords_lst': ['Brian Williams']},
{'category': 'social', 'name': 'HSBC_tax_scandal', 'keywords_lst': ['HSBC']},
{'category': 'social', 'name': 'David_Carr_death', 'keywords_lst': ['David Carr']},
{'category': 'social', 'name': 'Patriots_Deflategate', 'keywords_lst': [('Deflategate', 'Deflate-gate')]},
{'category': 'social', 'name': 'Delhi_Uber_driver_rape', 'keywords_lst': ['Uber', ('rape', 'Delhi')]},
{'category': 'social', 'name': 'Superbug_spread', 'keywords_lst': ['Superbug']},
{'category': 'social', 'name': 'Rudy_Giuliani_Obama_critique', 'keywords_lst': ['Giuliani']},
#{'category': 'social', 'name': 'Ben_Carson_homosexuality_issue', 'keywords_lst': ['Ben Carson', ('gay', 'homosexuality')]},
{'category': 'entertainment', 'name': 'Oscar', 'keywords_lst': ['Oscar']},
{'category': 'entertainment', 'name': 'Super_Bowl', 'keywords_lst': ['Super Bowl']},
{'category': 'entertainment', 'name': 'Grammy', 'keywords_lst': ['Grammy']},
{'category': 'entertainment', 'name': 'Golden_Globe', 'keywords_lst': ['Golden Globe']},
{'category': 'entertainment', 'name': '500_million_Powerball', 'keywords_lst': ['Powerball']},
{'category': 'entertainment', 'name': 'Thanksgiving', 'keywords_lst': ['Thanksgiving']},
{'category': 'entertainment', 'name': 'Black_Friday_and_Cyber_Monday', 'keywords_lst': [('Black Friday', 'Cyber Monday')]},
{'category': 'entertainment', 'name': 'Christmas', 'keywords_lst': ['Christmas']},
{'category': 'entertainment', 'name': 'New_Year', 'keywords_lst': ['New Year']},
{'category': 'entertainment', 'name': 'Apple_Watch', 'keywords_lst': ['Apple Watch']},
{'category': 'entertainment', 'name': 'Yosemite_historic_climb', 'keywords_lst': [('Yosemite', 'El Capitan')]},
{'category': 'entertainment', 'name': 'Jon_Stewart_Daily_Show', 'keywords_lst': ['Jon Stewart']},
{'category': 'entertainment', 'name': 'success_of_American_Sniper', 'keywords_lst': ['American Sniper']},
{'category': 'tragedy', 'name': 'Ebola_virus_spread', 'keywords_lst': ['Ebola']},
{'category': 'tragedy', 'name': 'Indonesia_AirAsia_Flight_QZ8501_crash', 'keywords_lst': [('AirAsia', '8501')]},
{'category': 'tragedy', 'name': 'Paris_attacks', 'keywords_lst': ['Paris']},
{'category': 'tragedy', 'name': 'Vanuatu_Cyclone_Pam', 'keywords_lst': ['Vanuatu', 'Cyclone']},
{'category': 'tragedy', 'name': 'Malaysia_Airlines_Flight_MH370_crash', 'keywords_lst': ['370']},
{'category': 'tragedy', 'name': 'Colorado_NAACP_bombing', 'keywords_lst': ['NAACP']},
{'category': 'tragedy', 'name': 'FSU_shooting', 'keywords_lst': ['FSU']},
{'category': 'tragedy', 'name': 'Chapel_Hill_shooting', 'keywords_lst': ['Chapel Hill']},
{'category': 'tragedy', 'name': 'Bobbi_Kristina_Brown_death', 'keywords_lst': ['Bobbi Kristina Brown']},
{'category': 'tragedy', 'name': 'Taliban_Pakistan_school_massacre', 'keywords_lst': [('Pakistan', 'Taliban'), ('school', 'student', 'massacre')]},
{'category': 'tragedy', 'name': 'American_ISIS_Hostage_Kayla_Mueller', 'keywords_lst': ['Kayla Mueller']},
{'category': 'tragedy', 'name': 'TransAsia_Airways_Flight_GE235_crash', 'keywords_lst': [('TransAsia', 'Taiwan'), ('plane', 'crash', 'pilot', 'flight')]},
{'category': 'tragedy', 'name': 'Germanwings_Flight_9525_crash', 'keywords_lst': [('Germanwings', 'Lufthansa', '9525')]}]
| adamwang0705/cross_media_affect_analysis | develop/config.py | Python | mit | 8,394 | [
"Brian"
] | fcb8e7e82c7166129d5dbba01f2b33103e538c3b8ac8fce07b89312f10902fe2 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.