text
stringlengths 12
1.05M
| repo_name
stringlengths 5
86
| path
stringlengths 4
191
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 12
1.05M
| keyword
listlengths 1
23
| text_hash
stringlengths 64
64
|
|---|---|---|---|---|---|---|---|
# -*- coding: utf-8 -*-
import datetime
import json
import os
import shutil
from django.conf import settings
from django.core.files.storage import default_storage as storage
import mock
from nose.tools import eq_, ok_
from pyquery import PyQuery as pq
import amo
import amo.tests
from amo.tests import formset, initial
from amo.tests.test_helpers import get_image_path
from amo.urlresolvers import reverse
from addons.models import (Addon, AddonCategory, AddonDeviceType, AddonUser,
Category)
from apps.users.models import UserNotification
from apps.users.notifications import app_surveys
from constants.applications import DEVICE_TYPES
from files.tests.test_models import UploadTest as BaseUploadTest
from translations.models import Translation
from users.models import UserProfile
import mkt
from mkt.site.fixtures import fixture
from mkt.submit.forms import AppFeaturesForm, NewWebappVersionForm
from mkt.submit.models import AppSubmissionChecklist
from mkt.submit.decorators import read_dev_agreement_required
from mkt.webapps.models import AddonExcludedRegion as AER, AppFeatures, Webapp
class TestSubmit(amo.tests.TestCase):
fixtures = fixture('user_999')
def setUp(self):
self.fi_mock = mock.patch(
'mkt.developers.tasks.fetch_icon').__enter__()
self.user = self.get_user()
assert self.client.login(username=self.user.email, password='password')
def tearDown(self):
self.fi_mock.__exit__()
def get_user(self):
return UserProfile.objects.get(username='regularuser')
def get_url(self, url):
return reverse('submit.app.%s' % url, args=[self.webapp.app_slug])
def _test_anonymous(self):
self.client.logout()
r = self.client.get(self.url, follow=True)
self.assertLoginRedirects(r, self.url)
def _test_progress_display(self, completed, current):
"""Test that the correct steps are highlighted."""
r = self.client.get(self.url)
progress = pq(r.content)('#submission-progress')
# Check the completed steps.
completed_found = progress.find('.completed')
for idx, step in enumerate(completed):
li = completed_found.eq(idx)
eq_(li.text(), unicode(mkt.APP_STEPS_TITLE[step]))
# Check that we link back to the Developer Agreement.
terms_link = progress.find('.terms a')
if 'terms' in completed:
eq_(terms_link.attr('href'),
reverse('mkt.developers.docs', args=['policies', 'agreement']))
else:
eq_(terms_link.length, 0)
# Check the current step.
eq_(progress.find('.current').text(),
unicode(mkt.APP_STEPS_TITLE[current]))
class TestProceed(TestSubmit):
def setUp(self):
super(TestProceed, self).setUp()
self.user.update(read_dev_agreement=None)
self.url = reverse('submit.app')
def test_is_authenticated(self):
# Redirect user to Terms.
r = self.client.get(self.url)
self.assert3xx(r, reverse('submit.app.terms'))
def test_is_anonymous(self):
# Show user to Terms page but with the login prompt.
self.client.logout()
r = self.client.get(self.url)
eq_(r.status_code, 200)
eq_(r.context['proceed'], True)
class TestTerms(TestSubmit):
def setUp(self):
super(TestTerms, self).setUp()
self.user.update(read_dev_agreement=None)
self.url = reverse('submit.app.terms')
def test_anonymous(self):
self.client.logout()
r = self.client.get(self.url, follow=True)
self.assertLoginRedirects(r, self.url)
def test_jump_to_step(self):
r = self.client.get(reverse('submit.app'), follow=True)
self.assert3xx(r, self.url)
def test_page(self):
r = self.client.get(self.url)
eq_(r.status_code, 200)
doc = pq(r.content)('#submit-terms')
eq_(doc.length, 1)
eq_(doc.find('input[name=newsletter]').siblings('label').length, 1,
'Missing its <label>!')
def test_progress_display(self):
self._test_progress_display([], 'terms')
@mock.patch('basket.subscribe')
def test_agree(self, subscribe_mock):
self.client.post(self.url, {'read_dev_agreement': True})
dt = self.get_user().read_dev_agreement
self.assertCloseToNow(dt)
eq_(UserNotification.objects.count(), 0)
assert not subscribe_mock.called
@mock.patch('basket.subscribe')
def test_agree_and_sign_me_up(self, subscribe_mock):
self.client.post(self.url, {'read_dev_agreement':
datetime.datetime.now(),
'newsletter': True})
dt = self.get_user().read_dev_agreement
self.assertCloseToNow(dt)
eq_(UserNotification.objects.count(), 1)
notes = UserNotification.objects.filter(user=self.user, enabled=True,
notification_id=app_surveys.id)
eq_(notes.count(), 1, 'Expected to not be subscribed to newsletter')
assert subscribe_mock.called
def test_disagree(self):
r = self.client.post(self.url)
eq_(r.status_code, 200)
eq_(self.user.read_dev_agreement, None)
eq_(UserNotification.objects.count(), 0)
def test_read_dev_agreement_required(self):
f = mock.Mock()
f.__name__ = 'function'
request = mock.Mock()
request.amo_user.read_dev_agreement = None
request.get_full_path.return_value = self.url
func = read_dev_agreement_required(f)
res = func(request)
assert not f.called
eq_(res.status_code, 302)
eq_(res['Location'], reverse('submit.app'))
class TestManifest(TestSubmit):
def setUp(self):
super(TestManifest, self).setUp()
self.user.update(read_dev_agreement=None)
self.url = reverse('submit.app')
def _step(self):
self.user.update(read_dev_agreement=datetime.datetime.now())
def test_anonymous(self):
r = self.client.get(self.url, follow=True)
eq_(r.context['step'], 'terms')
def test_cannot_skip_prior_step(self):
r = self.client.get(self.url, follow=True)
# And we start back at one...
self.assert3xx(r, reverse('submit.app.terms'))
def test_jump_to_step(self):
# I already read the Terms.
self._step()
# So jump me to the Manifest step.
r = self.client.get(reverse('submit.app'), follow=True)
eq_(r.context['step'], 'manifest')
def test_legacy_redirects(self):
def check():
for before, status in redirects:
r = self.client.get(before, follow=True)
self.assert3xx(r, dest, status)
# I haven't read the dev agreement.
redirects = (
('/developers/submit/', 302),
('/developers/submit/app', 302),
('/developers/submit/app/terms', 302),
('/developers/submit/app/manifest', 302),
)
dest = '/developers/submit/terms'
check()
# I have read the dev agreement.
self._step()
redirects = (
('/developers/submit/app', 302),
('/developers/submit/app/terms', 302),
('/developers/submit/app/manifest', 302),
('/developers/submit/manifest', 301),
)
dest = '/developers/submit/'
check()
def test_page(self):
self._step()
r = self.client.get(self.url)
eq_(r.status_code, 200)
eq_(pq(r.content)('#upload-file').length, 1)
def test_progress_display(self):
self._step()
self._test_progress_display(['terms'], 'manifest')
class UploadAddon(object):
def post(self, expect_errors=False, data=None):
if data is None:
data = {'free_platforms': ['free-desktop']}
data.update(upload=self.upload.pk)
r = self.client.post(self.url, data, follow=True)
eq_(r.status_code, 200)
if not expect_errors:
# Show any unexpected form errors.
if r.context and 'form' in r.context:
eq_(r.context['form'].errors, {})
return r
class BaseWebAppTest(BaseUploadTest, UploadAddon, amo.tests.TestCase):
fixtures = fixture('app_firefox', 'platform_all', 'user_999', 'user_10482')
def setUp(self):
super(BaseWebAppTest, self).setUp()
self.manifest = self.manifest_path('mozball.webapp')
self.manifest_url = 'http://allizom.org/mozball.webapp'
self.upload = self.get_upload(abspath=self.manifest)
self.upload.update(name=self.manifest_url, is_webapp=True)
self.url = reverse('submit.app')
assert self.client.login(username='regular@mozilla.com',
password='password')
def post_addon(self, data=None):
eq_(Addon.objects.count(), 0)
self.post(data=data)
return Addon.objects.get()
class TestCreateWebApp(BaseWebAppTest):
def setUp(self):
super(TestCreateWebApp, self).setUp()
self.create_switch('buchets')
@mock.patch('mkt.developers.tasks.fetch_icon')
def test_post_app_redirect(self, fi_mock):
r = self.post()
webapp = Webapp.objects.get()
self.assert3xx(r,
reverse('submit.app.details', args=[webapp.app_slug]))
assert fi_mock.delay.called, (
'The fetch_icon task was expected to be called')
def test_no_hint(self):
self.post_addon()
self.upload = self.get_upload(abspath=self.manifest)
r = self.client.post(reverse('mkt.developers.upload_manifest'),
dict(manifest=self.manifest_url), follow=True)
eq_(r.status_code, 200)
assert 'already submitted' not in r.content, (
'Unexpected helpful error (trap_duplicate)')
assert 'already exists' not in r.content, (
'Unexpected validation error (verify_app_domain)')
def test_no_upload(self):
data = {'free_platforms': ['free-desktop']}
res = self.client.post(self.url, data, follow=True)
eq_(res.context['form'].errors,
{'upload': NewWebappVersionForm.upload_error})
@mock.patch('mkt.developers.tasks.fetch_icon')
def test_bad_upload(self, fi_mock):
data = {'free_platforms': ['free-desktop'], 'upload': 'foo'}
res = self.client.post(self.url, data, follow=True)
eq_(res.context['form'].errors,
{'upload': NewWebappVersionForm.upload_error})
assert not fi_mock.delay.called, (
'The fetch_icon task was not expected to be called')
def test_hint_for_same_manifest(self):
self.create_switch(name='webapps-unique-by-domain')
self.post_addon()
self.upload = self.get_upload(abspath=self.manifest)
r = self.client.post(reverse('mkt.developers.upload_manifest'),
dict(manifest=self.manifest_url))
data = json.loads(r.content)
assert 'Oops' in data['validation']['messages'][0]['message'], (
'Expected oops')
def test_no_hint_for_same_manifest_different_author(self):
self.create_switch(name='webapps-unique-by-domain')
self.post_addon()
# Submit same manifest as different user.
assert self.client.login(username='clouserw@gmail.com',
password='password')
self.upload = self.get_upload(abspath=self.manifest)
r = self.client.post(reverse('mkt.developers.upload_manifest'),
dict(manifest=self.manifest_url))
data = json.loads(r.content)
eq_(data['validation']['messages'][0]['message'],
'An app already exists on this domain; only one app per domain is '
'allowed.')
def test_app_from_uploaded_manifest(self):
addon = self.post_addon()
eq_(addon.type, amo.ADDON_WEBAPP)
eq_(addon.is_packaged, False)
assert addon.guid is not None, (
'Expected app to have a UUID assigned to guid')
eq_(unicode(addon.name), u'MozillaBall ょ')
eq_(addon.slug, 'app-%s' % addon.id)
eq_(addon.app_slug, u'mozillaball-ょ')
eq_(addon.description, u'Exciting Open Web development action!')
eq_(addon.manifest_url, u'http://allizom.org/mozball.webapp')
eq_(addon.app_domain, u'http://allizom.org')
eq_(Translation.objects.get(id=addon.description.id, locale='it'),
u'Azione aperta emozionante di sviluppo di fotoricettore!')
eq_(addon.current_version.developer_name, 'Mozilla Labs')
eq_(addon.current_version.manifest,
json.loads(open(self.manifest).read()))
def test_manifest_with_any_extension(self):
self.manifest = os.path.join(settings.ROOT, 'mkt', 'developers',
'tests', 'addons', 'mozball.owa')
self.upload = self.get_upload(abspath=self.manifest, is_webapp=True)
addon = self.post_addon()
eq_(addon.type, amo.ADDON_WEBAPP)
def test_version_from_uploaded_manifest(self):
addon = self.post_addon()
eq_(addon.current_version.version, '1.0')
def test_file_from_uploaded_manifest(self):
addon = self.post_addon()
files = addon.current_version.files.all()
eq_(len(files), 1)
eq_(files[0].status, amo.STATUS_PENDING)
def test_set_platform(self):
app = self.post_addon(
{'free_platforms': ['free-android-tablet', 'free-desktop']})
self.assertSetEqual(app.device_types,
[amo.DEVICE_TABLET, amo.DEVICE_DESKTOP])
def test_free(self):
app = self.post_addon({'free_platforms': ['free-firefoxos']})
self.assertSetEqual(app.device_types, [amo.DEVICE_GAIA])
eq_(app.premium_type, amo.ADDON_FREE)
def test_premium(self):
self.create_flag('allow-b2g-paid-submission')
app = self.post_addon({'paid_platforms': ['paid-firefoxos']})
self.assertSetEqual(app.device_types, [amo.DEVICE_GAIA])
eq_(app.premium_type, amo.ADDON_PREMIUM)
def test_supported_locales(self):
addon = self.post_addon()
eq_(addon.default_locale, 'en-US')
eq_(addon.versions.latest().supported_locales, 'es,it')
def test_short_locale(self):
# This manifest has a locale code of "pt" which is in the
# SHORTER_LANGUAGES setting and should get converted to "pt-PT".
self.manifest = self.manifest_path('short-locale.webapp')
self.upload = self.get_upload(abspath=self.manifest)
addon = self.post_addon()
eq_(addon.default_locale, 'pt-PT')
eq_(addon.versions.latest().supported_locales, 'es')
def test_unsupported_detail_locale(self):
# This manifest has a locale code of "en-GB" which is unsupported, so
# we default to "en-US".
self.manifest = self.manifest_path('unsupported-default-locale.webapp')
self.upload = self.get_upload(abspath=self.manifest)
addon = self.post_addon()
eq_(addon.default_locale, 'en-US')
eq_(addon.versions.latest().supported_locales, 'es,it')
def test_appfeatures_creation(self):
addon = self.post_addon(data={
'free_platforms': ['free-desktop'],
'has_contacts': 'on'
})
features = addon.current_version.features
ok_(isinstance(features, AppFeatures))
field_names = [f.name for f in AppFeaturesForm().all_fields()]
for field in field_names:
expected = field == 'has_contacts'
eq_(getattr(features, field), expected)
class TestCreateWebAppFromManifest(BaseWebAppTest):
def setUp(self):
super(TestCreateWebAppFromManifest, self).setUp()
Webapp.objects.create(app_slug='xxx',
app_domain='http://existing-app.com')
def upload_webapp(self, manifest_url, **post_kw):
self.upload.update(name=manifest_url) # Simulate JS upload.
return self.post(**post_kw)
def post_manifest(self, manifest_url):
rs = self.client.post(reverse('mkt.developers.upload_manifest'),
dict(manifest=manifest_url))
if 'json' in rs['content-type']:
rs = json.loads(rs.content)
return rs
def test_duplicate_domain(self):
self.create_switch(name='webapps-unique-by-domain')
rs = self.upload_webapp('http://existing-app.com/my.webapp',
expect_errors=True)
eq_(rs.context['form'].errors,
{'upload':
['An app already exists on this domain; only one '
'app per domain is allowed.']})
def test_allow_duplicate_domains(self):
self.upload_webapp('http://existing-app.com/my.webapp') # No errors.
def test_duplicate_domain_from_js(self):
self.create_switch(name='webapps-unique-by-domain')
data = self.post_manifest('http://existing-app.com/my.webapp')
eq_(data['validation']['errors'], 1)
eq_(data['validation']['messages'][0]['message'],
'An app already exists on this domain; '
'only one app per domain is allowed.')
def test_allow_duplicate_domains_from_js(self):
rs = self.post_manifest('http://existing-app.com/my.webapp')
eq_(rs.status_code, 302)
class BasePackagedAppTest(BaseUploadTest, UploadAddon, amo.tests.TestCase):
fixtures = fixture('webapp_337141', 'user_999')
def setUp(self):
super(BasePackagedAppTest, self).setUp()
self.app = Webapp.objects.get(pk=337141)
self.app.update(is_packaged=True)
self.version = self.app.current_version
self.file = self.version.all_files[0]
self.file.update(filename='mozball.zip')
self.package = self.packaged_app_path('mozball.zip')
self.upload = self.get_upload(abspath=self.package)
self.upload.update(name='mozball.zip', is_webapp=True)
self.url = reverse('submit.app')
assert self.client.login(username='regular@mozilla.com',
password='password')
def post_addon(self, data=None):
eq_(Addon.objects.count(), 1)
self.post(data=data)
return Addon.objects.order_by('-id')[0]
def setup_files(self, filename='mozball.zip'):
# Make sure the source file is there.
# Original packaged file.
if not storage.exists(self.file.file_path):
try:
# We don't care if these dirs exist.
os.makedirs(os.path.dirname(self.file.file_path))
except OSError:
pass
shutil.copyfile(self.packaged_app_path(filename),
self.file.file_path)
# Signed packaged file.
if not storage.exists(self.file.signed_file_path):
try:
# We don't care if these dirs exist.
os.makedirs(os.path.dirname(self.file.signed_file_path))
except OSError:
pass
shutil.copyfile(self.packaged_app_path(filename),
self.file.signed_file_path)
class TestCreatePackagedApp(BasePackagedAppTest):
@mock.patch('mkt.webapps.models.Webapp.get_cached_manifest')
def test_post_app_redirect(self, _mock):
res = self.post()
webapp = Webapp.objects.order_by('-created')[0]
self.assert3xx(res,
reverse('submit.app.details', args=[webapp.app_slug]))
@mock.patch('mkt.webapps.models.Webapp.get_cached_manifest')
@mock.patch('mkt.submit.forms.verify_app_domain')
def test_app_from_uploaded_package(self, _verify, _mock):
addon = self.post_addon(
data={'packaged': True, 'free_platforms': ['free-firefoxos']})
eq_(addon.type, amo.ADDON_WEBAPP)
eq_(addon.current_version.version, '1.0')
eq_(addon.is_packaged, True)
assert addon.guid is not None, (
'Expected app to have a UUID assigned to guid')
eq_(unicode(addon.name), u'Packaged MozillaBall ょ')
eq_(addon.slug, 'app-%s' % addon.id)
eq_(addon.app_slug, u'packaged-mozillaball-ょ')
eq_(addon.description, u'Exciting Open Web development action!')
eq_(addon.manifest_url, None)
eq_(addon.app_domain, 'app://hy.fr')
eq_(Translation.objects.get(id=addon.description.id, locale='it'),
u'Azione aperta emozionante di sviluppo di fotoricettore!')
eq_(addon.current_version.developer_name, 'Mozilla Labs')
assert _verify.called, (
'`verify_app_domain` should be called for packaged apps with '
'origins.')
@mock.patch('mkt.webapps.models.Webapp.get_cached_manifest')
def test_packaged_app_not_unique(self, _mock):
Webapp.objects.create(is_packaged=True, app_domain='app://hy.fr')
res = self.post(
data={'packaged': True, 'free_platforms': ['free-firefoxos']},
expect_errors=True)
eq_(res.context['form'].errors, {
'upload': ['An app already exists on this domain; only one app '
'per domain is allowed.']})
class TestDetails(TestSubmit):
fixtures = fixture('webapp_337141', 'user_999', 'user_10482')
def setUp(self):
super(TestDetails, self).setUp()
self.webapp = self.get_webapp()
self.webapp.update(status=amo.STATUS_NULL)
self.url = reverse('submit.app.details', args=[self.webapp.app_slug])
def get_webapp(self):
return Webapp.objects.get(id=337141)
def upload_preview(self, image_file=None):
if not image_file:
image_file = get_image_path('preview.jpg')
return self._upload_image(self.webapp.get_dev_url('upload_preview'),
image_file=image_file)
def upload_icon(self, image_file=None):
if not image_file:
image_file = get_image_path('mozilla-sq.png')
return self._upload_image(self.webapp.get_dev_url('upload_icon'),
image_file=image_file)
def _upload_image(self, url, image_file):
with open(image_file, 'rb') as data:
rp = self.client.post(url, {'upload_image': data})
eq_(rp.status_code, 200)
hash_ = json.loads(rp.content)['upload_hash']
assert hash_, 'No hash: %s' % rp.content
return hash_
def _step(self):
self.user.update(read_dev_agreement=datetime.datetime.now())
self.cl = AppSubmissionChecklist.objects.create(addon=self.webapp,
terms=True, manifest=True)
# Associate app with user.
AddonUser.objects.create(addon=self.webapp, user=self.user)
# Associate device type with app.
self.dtype = DEVICE_TYPES.values()[0]
AddonDeviceType.objects.create(addon=self.webapp,
device_type=self.dtype.id)
self.device_types = [self.dtype]
# Associate category with app.
self.cat1 = Category.objects.create(type=amo.ADDON_WEBAPP, name='Fun')
AddonCategory.objects.create(addon=self.webapp, category=self.cat1)
def test_anonymous(self):
self._test_anonymous()
def test_resume_later(self):
self._step()
self.webapp.appsubmissionchecklist.update(details=True)
r = self.client.get(reverse('submit.app.resume',
args=[self.webapp.app_slug]))
self.assert3xx(r, self.webapp.get_dev_url('edit'))
def test_not_owner(self):
self._step()
assert self.client.login(username='clouserw@gmail.com',
password='password')
eq_(self.client.get(self.url).status_code, 403)
def test_page(self):
self._step()
r = self.client.get(self.url)
eq_(r.status_code, 200)
eq_(pq(r.content)('#submit-details').length, 1)
def test_progress_display(self):
self._step()
self._test_progress_display(['terms', 'manifest'], 'details')
def new_preview_formset(self, *args, **kw):
ctx = self.client.get(self.url).context
blank = initial(ctx['form_previews'].forms[-1])
blank.update(**kw)
return blank
def preview_formset(self, *args, **kw):
kw.setdefault('initial_count', 0)
kw.setdefault('prefix', 'files')
fs = formset(*[a for a in args] + [self.new_preview_formset()], **kw)
return dict([(k, '' if v is None else v) for k, v in fs.items()])
def get_dict(self, **kw):
data = {
'app_slug': 'testname',
'description': 'desc',
'privacy_policy': 'XXX <script>alert("xss")</script>',
'homepage': 'http://www.goodreads.com/user/show/7595895-krupa',
'support_url': 'http://www.goodreads.com/user_challenges/351558',
'support_email': 'krupa+to+the+rescue@goodreads.com',
'categories': [self.cat1.id],
'flash': '1',
'publish': '1'
}
# Add the required screenshot.
data.update(self.preview_formset({
'upload_hash': '<hash>',
'position': 0
}))
data.update(**kw)
# Remove fields without values.
data = dict((k, v) for k, v in data.iteritems() if v is not None)
return data
def check_dict(self, data=None, expected=None):
if data is None:
data = self.get_dict()
addon = self.get_webapp()
# Build a dictionary of expected results.
expected_data = {
'app_slug': 'testname',
'description': 'desc',
'privacy_policy': 'XXX <script>alert("xss")</script>',
'uses_flash': True,
'make_public': amo.PUBLIC_IMMEDIATELY
}
if expected:
expected_data.update(expected)
for field, expected in expected_data.iteritems():
got = unicode(getattr(addon, field))
expected = unicode(expected)
eq_(got, expected,
'Expected %r for %r. Got %r.' % (expected, field, got))
self.assertSetEqual(addon.device_types, self.device_types)
@mock.patch('mkt.submit.views.record_action')
def test_success(self, record_action):
self._step()
data = self.get_dict()
r = self.client.post(self.url, data)
self.assertNoFormErrors(r)
self.check_dict(data=data)
self.webapp = self.get_webapp()
self.assert3xx(r, self.get_url('done'))
eq_(self.webapp.status, amo.STATUS_PENDING)
assert record_action.called
@mock.patch('mkt.submit.views.record_action')
def test_success_iarc(self, record_action):
"""TODO: delete the above test when cleaning up waffle."""
self.create_switch('iarc')
self._step()
data = self.get_dict()
r = self.client.post(self.url, data)
self.assertNoFormErrors(r)
self.check_dict(data=data)
self.webapp = self.get_webapp()
self.assert3xx(r, self.get_url('done'))
eq_(self.webapp.status, amo.STATUS_NULL)
assert record_action.called
def test_success_paid(self):
self._step()
self.webapp = self.get_webapp()
self.make_premium(self.webapp)
data = self.get_dict()
r = self.client.post(self.url, data)
self.assertNoFormErrors(r)
self.check_dict(data=data)
self.webapp = self.get_webapp()
self.assert3xx(r, self.get_url('done'))
eq_(self.webapp.status, amo.STATUS_NULL)
eq_(self.webapp.highest_status, amo.STATUS_PENDING)
def test_success_prefill_device_types_if_empty(self):
"""
The new submission flow asks for device types at step one.
This ensures that existing incomplete apps still have device
compatibility.
"""
self._step()
AddonDeviceType.objects.all().delete()
self.device_types = amo.DEVICE_TYPES.values()
data = self.get_dict()
r = self.client.post(self.url, data)
self.assertNoFormErrors(r)
self.check_dict(data=data)
self.webapp = self.get_webapp()
self.assert3xx(r, self.get_url('done'))
def test_success_for_public_waiting(self):
self._step()
data = self.get_dict()
del data['publish']
r = self.client.post(self.url, data)
self.assertNoFormErrors(r)
self.check_dict(data=data, expected={'make_public': amo.PUBLIC_WAIT})
self.webapp = self.get_webapp()
self.assert3xx(r, self.get_url('done'))
def test_media_types(self):
self._step()
res = self.client.get(self.url)
doc = pq(res.content)
eq_(doc('.screenshot_upload').attr('data-allowed-types'),
'image/jpeg|image/png|video/webm')
eq_(doc('#id_icon_upload').attr('data-allowed-types'),
'image/jpeg|image/png')
def test_screenshot(self):
self._step()
im_hash = self.upload_preview()
data = self.get_dict()
data.update(self.preview_formset({
'upload_hash': im_hash,
'position': 0
}))
rp = self.client.post(self.url, data)
eq_(rp.status_code, 302)
ad = Addon.objects.get(pk=self.webapp.pk)
eq_(ad.previews.all().count(), 1)
def test_icon(self):
self._step()
im_hash = self.upload_icon()
data = self.get_dict()
data['icon_upload_hash'] = im_hash
data['icon_type'] = 'image/png'
rp = self.client.post(self.url, data)
eq_(rp.status_code, 302)
ad = self.get_webapp()
eq_(ad.icon_type, 'image/png')
for size in amo.ADDON_ICON_SIZES:
fn = '%s-%s.png' % (ad.id, size)
assert os.path.exists(os.path.join(ad.get_icon_dir(), fn)), (
'Expected %s in %s' % (fn, os.listdir(ad.get_icon_dir())))
def test_screenshot_or_video_required(self):
self._step()
data = self.get_dict()
for k in data:
if k.startswith('files') and k.endswith('upload_hash'):
data[k] = ''
rp = self.client.post(self.url, data)
eq_(rp.context['form_previews'].non_form_errors(),
['You must upload at least one screenshot or video.'])
def test_unsaved_screenshot(self):
self._step()
# If there are form errors we should still pass the previews URIs.
preview_type = 'video/webm'
preview_uri = 'moz-filedata:p00p'
data = self.preview_formset({
'position': 1,
'upload_hash': '<hash_one>',
'unsaved_image_type': preview_type,
'unsaved_image_data': preview_uri
})
r = self.client.post(self.url, data)
eq_(r.status_code, 200)
form = pq(r.content)('form')
eq_(form.find('input[name=files-0-unsaved_image_type]').val(),
preview_type)
eq_(form.find('input[name=files-0-unsaved_image_data]').val(),
preview_uri)
def test_unique_allowed(self):
self._step()
r = self.client.post(self.url, self.get_dict(name=self.webapp.name))
self.assertNoFormErrors(r)
app = Webapp.objects.exclude(app_slug=self.webapp.app_slug)[0]
self.assert3xx(r, reverse('submit.app.done', args=[app.app_slug]))
eq_(self.get_webapp().status, amo.STATUS_PENDING)
def test_unique_allowed_iarc(self):
"""TODO: delete the above test when cleaning up waffle."""
self.create_switch('iarc')
self._step()
r = self.client.post(self.url, self.get_dict(name=self.webapp.name))
self.assertNoFormErrors(r)
app = Webapp.objects.exclude(app_slug=self.webapp.app_slug)[0]
self.assert3xx(r, reverse('submit.app.done', args=[app.app_slug]))
eq_(self.get_webapp().status, amo.STATUS_NULL)
def test_slug_invalid(self):
self._step()
# Submit an invalid slug.
d = self.get_dict(app_slug='slug!!! aksl23%%')
r = self.client.post(self.url, d)
eq_(r.status_code, 200)
self.assertFormError(r, 'form_basic', 'app_slug',
"Enter a valid 'slug' consisting of letters, numbers, underscores "
"or hyphens.")
def test_slug_required(self):
self._step()
r = self.client.post(self.url, self.get_dict(app_slug=''))
eq_(r.status_code, 200)
self.assertFormError(r, 'form_basic', 'app_slug',
'This field is required.')
def test_description_required(self):
self._step()
r = self.client.post(self.url, self.get_dict(description=''))
eq_(r.status_code, 200)
self.assertFormError(r, 'form_basic', 'description',
'This field is required.')
def test_privacy_policy_required(self):
self._step()
r = self.client.post(self.url, self.get_dict(privacy_policy=None))
self.assertFormError(r, 'form_basic', 'privacy_policy',
'This field is required.')
def test_clashing_locale(self):
self.webapp.default_locale = 'de'
self.webapp.save()
self._step()
self.client.cookies['current_locale'] = 'en-us'
data = self.get_dict(name=None, name_de='Test name',
privacy_policy=None,
**{'privacy_policy_en-us': 'XXX'})
r = self.client.post(self.url, data)
self.assertNoFormErrors(r)
def test_homepage_url_optional(self):
self._step()
r = self.client.post(self.url, self.get_dict(homepage=None))
self.assertNoFormErrors(r)
def test_homepage_url_invalid(self):
self._step()
r = self.client.post(self.url, self.get_dict(homepage='xxx'))
self.assertFormError(r, 'form_basic', 'homepage', 'Enter a valid URL.')
def test_support_url_optional(self):
self._step()
r = self.client.post(self.url, self.get_dict(support_url=None))
self.assertNoFormErrors(r)
def test_support_url_invalid(self):
self._step()
r = self.client.post(self.url, self.get_dict(support_url='xxx'))
self.assertFormError(r, 'form_basic', 'support_url',
'Enter a valid URL.')
def test_support_email_required(self):
self._step()
r = self.client.post(self.url, self.get_dict(support_email=None))
self.assertFormError(r, 'form_basic', 'support_email',
'This field is required.')
def test_support_email_invalid(self):
self._step()
r = self.client.post(self.url, self.get_dict(support_email='xxx'))
self.assertFormError(r, 'form_basic', 'support_email',
'Enter a valid e-mail address.')
def test_categories_required(self):
self._step()
r = self.client.post(self.url, self.get_dict(categories=[]))
eq_(r.context['form_cats'].errors['categories'],
['This field is required.'])
def test_categories_max(self):
self._step()
eq_(amo.MAX_CATEGORIES, 2)
cat2 = Category.objects.create(type=amo.ADDON_WEBAPP, name='bling')
cat3 = Category.objects.create(type=amo.ADDON_WEBAPP, name='blang')
cats = [self.cat1.id, cat2.id, cat3.id]
r = self.client.post(self.url, self.get_dict(categories=cats))
eq_(r.context['form_cats'].errors['categories'],
['You can have only 2 categories.'])
def _post_cats(self, cats):
self.client.post(self.url, self.get_dict(categories=cats))
eq_(sorted(self.get_webapp().categories.values_list('id', flat=True)),
sorted(cats))
def test_categories_add(self):
self._step()
cat2 = Category.objects.create(type=amo.ADDON_WEBAPP, name='bling')
self._post_cats([self.cat1.id, cat2.id])
def test_categories_add_and_remove(self):
self._step()
cat2 = Category.objects.create(type=amo.ADDON_WEBAPP, name='bling')
self._post_cats([cat2.id])
def test_categories_remove(self):
# Add another category here so it gets added to the initial formset.
cat2 = Category.objects.create(type=amo.ADDON_WEBAPP, name='bling')
AddonCategory.objects.create(addon=self.webapp, category=cat2)
self._step()
# `cat2` should get removed.
self._post_cats([self.cat1.id])
def test_games_default_excluded_in_regions(self):
games = Category.objects.create(type=amo.ADDON_WEBAPP, slug='games')
self._step()
r = self.client.post(self.url, self.get_dict(categories=[games.id]))
self.assertNoFormErrors(r)
self.assertSetEqual(AER.objects.values_list('region', flat=True),
[x.id for x in mkt.regions.ALL_REGIONS_WITH_CONTENT_RATINGS])
def test_other_categories_are_not_excluded(self):
# Keep the category around for good measure.
Category.objects.create(type=amo.ADDON_WEBAPP, slug='games')
self._step()
r = self.client.post(self.url, self.get_dict())
self.assertNoFormErrors(r)
eq_(AER.objects.count(), 0)
class TestDone(TestSubmit):
fixtures = ['base/users', 'webapps/337141-steamcube']
def setUp(self):
super(TestDone, self).setUp()
self.webapp = self.get_webapp()
self.url = reverse('submit.app.done', args=[self.webapp.app_slug])
def get_webapp(self):
return Webapp.objects.get(id=337141)
def _step(self, **kw):
data = dict(addon=self.webapp, terms=True, manifest=True,
details=True)
data.update(kw)
self.cl = AppSubmissionChecklist.objects.create(**data)
AddonUser.objects.create(addon=self.webapp, user=self.user)
def test_anonymous(self):
self._test_anonymous()
def test_progress_display(self):
self._step()
self._test_progress_display(['terms', 'manifest', 'details'], 'done')
def test_done(self):
self._step()
res = self.client.get(self.url)
eq_(res.status_code, 200)
class TestNextSteps(amo.tests.TestCase):
# TODO: Delete this test suite once we deploy IARC.
fixtures = fixture('user_999', 'webapp_337141')
def setUp(self):
self.create_switch('iarc')
self.user = UserProfile.objects.get(username='regularuser')
assert self.client.login(username=self.user.email, password='password')
self.webapp = Webapp.objects.get(id=337141)
self.webapp.update(status=amo.STATUS_PENDING)
self.url = reverse('submit.app.done', args=[self.webapp.app_slug])
def test_200(self, **kw):
data = dict(addon=self.webapp, terms=True, manifest=True,
details=True)
data.update(kw)
self.cl = AppSubmissionChecklist.objects.create(**data)
AddonUser.objects.create(addon=self.webapp, user=self.user)
res = self.client.get(self.url)
eq_(res.status_code, 200)
|
Joergen/zamboni
|
mkt/submit/tests/test_views.py
|
Python
|
bsd-3-clause
| 39,321
|
[
"exciting"
] |
8c6249b835bf73d49758947fc27ffabfa978d88b63ac849af9cd49d626cb638f
|
"""
Acceptance Tests for Course Information
"""
from flaky import flaky
from common.test.acceptance.pages.studio.course_info import CourseUpdatesPage
from common.test.acceptance.tests.studio.base_studio_test import StudioCourseTest
from ...pages.studio.auto_auth import AutoAuthPage
from ...pages.studio.index import DashboardPage
class UsersCanAddUpdatesTest(StudioCourseTest):
"""
Series of Bok Choy Tests to test the Course Updates page
"""
def _create_and_verify_update(self, message):
"""
Helper method to create and verify and update based on the message.
Arguments:
message (str): Message to add to the update.
"""
self.course_updates_page.visit()
self.assertTrue(self.course_updates_page.is_new_update_button_present())
self.course_updates_page.click_new_update_button()
self.course_updates_page.submit_update(message)
self.assertTrue(self.course_updates_page.is_first_update_message(message))
def setUp(self, is_staff=False, test_xss=True):
super(UsersCanAddUpdatesTest, self).setUp()
self.auth_page = AutoAuthPage(self.browser, staff=True)
self.dashboard_page = DashboardPage(self.browser)
self.course_updates_page = CourseUpdatesPage(
self.browser,
self.course_info['org'],
self.course_info['number'],
self.course_info['run']
)
def test_course_updates_page_exists(self):
"""
Scenario: User can access Course Updates Page
Given I have opened a new course in Studio
And I go to the course updates page
When I visit the page
Then I should see any course updates
And I should see the new update button
"""
self.course_updates_page.visit()
self.course_updates_page.wait_for_page()
self.assertTrue(self.course_updates_page.is_new_update_button_present)
def test_new_course_update_is_present(self):
"""
Scenario: Users can add updates
Given I have opened a new course in Studio
And I go to the course updates page
When I add a new update with the text "Hello"
Then I should see the update "Hello"
And I see a "saving" notification
"""
self._create_and_verify_update('Hello')
def test_new_course_update_can_be_edited(self):
"""
Scenario: Users can edit updates
Given I have opened a new course in Studio
And I go to the course updates page
When I add a new update with the text "Hello"
And I modify the text to "Goodbye"
Then I should see the update "Goodbye"
"""
self._create_and_verify_update('Hello')
self.assertTrue(self.course_updates_page.is_edit_button_present())
self.course_updates_page.click_edit_update_button()
self.course_updates_page.submit_update('Goodbye')
self.assertFalse(self.course_updates_page.is_first_update_message('Hello'))
self.assertTrue(self.course_updates_page.is_first_update_message('Goodbye'))
@flaky # TNL-5582
def test_delete_course_update(self):
"""
Scenario: Users can delete updates
Given I have opened a new course in Studio
And I go to the course updates page
And I add a new update with the text "Hello"
And I delete the update
And I confirm the prompt
Then I should not see the update "Hello"
"""
self._create_and_verify_update('Hello')
self.course_updates_page.click_delete_update_button()
self.assertTrue(self.course_updates_page.is_course_update_list_empty())
def test_user_edit_date(self):
"""
Scenario: Users can edit update dates
Given I have opened a new course in Studio
And I go to the course updates page
And I add a new update with the text "Hello"
When I edit the date to "06/01/13"
Then I should see the date "June 1, 2013"
"""
self._create_and_verify_update('Hello')
self.course_updates_page.click_edit_update_button()
self.course_updates_page.set_date('06/01/2013')
self.course_updates_page.click_new_update_save_button()
self.assertTrue(self.course_updates_page.is_first_update_date('June 1, 2013'))
def test_outside_tag_preserved(self):
"""
Scenario: Text outside of tags is preserved
Given I have opened a new course in Studio
And I go to the course updates page
When I add a new update with the text "before <strong>middle</strong> after"
Then I should see the update "before <strong>middle</strong> after"
And when I reload the page
Then I should see the update "before <strong>middle</strong> after"
"""
self._create_and_verify_update('before <strong>middle</strong> after')
self.course_updates_page.visit()
self.assertTrue(self.course_updates_page.is_first_update_message('before <strong>middle</strong> after'))
def test_asset_change_in_updates(self):
"""
Scenario: Static links are rewritten when previewing a course update
Given I have opened a new course in Studio
And I go to the course updates page
When I add a new update with the text "<img src='/static/my_img.jpg'/>"
# Can only do partial text matches because of the quotes with in quotes (and regexp step matching).
Then I should see the asset update to "my_img.jpg"
And I change the update from "/static/my_img.jpg" to "<img src='/static/modified.jpg'/>"
Then I should see the asset update to "modified.jpg"
And when I reload the page
Then I should see the asset update to "modified.jpg"
"""
self.course_updates_page.visit()
self.assertTrue(self.course_updates_page.is_new_update_button_present())
self.course_updates_page.click_new_update_button()
self.course_updates_page.submit_update("<img src='/static/my_img.jpg'/>")
self.assertTrue(self.course_updates_page.first_update_contains_html("my_img.jpg"))
self.course_updates_page.click_edit_update_button()
self.course_updates_page.submit_update("<img src='/static/modified.jpg'/>")
self.assertFalse(self.course_updates_page.first_update_contains_html("my_img.jpg"))
self.assertTrue(self.course_updates_page.first_update_contains_html("modified.jpg"))
self.course_updates_page.visit()
self.assertTrue(self.course_updates_page.first_update_contains_html("modified.jpg"))
|
tanmaykm/edx-platform
|
common/test/acceptance/tests/studio/test_studio_course_info.py
|
Python
|
agpl-3.0
| 6,819
|
[
"VisIt"
] |
0ed943be008cc94540d811fd715b7073ac46d75b60cac9e59c1712b4530c9e7c
|
#!/usr/bin/env python
# Author: Junjun Zhang
import sys
import os
import re
import glob
import xmltodict
import json
import yaml
import copy
import logging
from argparse import ArgumentParser
from argparse import RawDescriptionHelpFormatter
from elasticsearch import Elasticsearch
from collections import OrderedDict
import datetime
import dateutil.parser
from itertools import izip
from distutils.version import LooseVersion
logger = logging.getLogger('gnos parser')
# create console handler with a higher log level
ch = logging.StreamHandler()
def init_es(es_host, es_index):
es = Elasticsearch([ es_host ])
#es.indices.delete( es_index, ignore=[400, 404] )
es.indices.create( es_index, ignore=400 )
# create mappings
es_mapping = open('pancan.donor.mapping.json')
es.indices.put_mapping(index=es_index, doc_type='donor', body=es_mapping.read())
es_mapping.close()
es_mapping = open('pancan.file.mapping.json')
es.indices.put_mapping(index=es_index, doc_type='bam_file', body=es_mapping.read())
es_mapping.close()
return es
def process_gnos_analysis(gnos_analysis, donors, vcf_entries, es_index, es, bam_output_fh, annotations):
analysis_attrib = get_analysis_attrib(gnos_analysis)
if analysis_attrib and analysis_attrib.get('variant_workflow_name'): # variant call gnos entry
donor_unique_id = analysis_attrib.get('dcc_project_code') + '::' + analysis_attrib.get('submitter_donor_id')
if is_in_donor_blacklist(donor_unique_id):
logger.warning('ignore blacklisted donor: {} GNOS entry: {}'
.format(donor_unique_id, gnos_analysis.get('analysis_detail_uri').replace('analysisDetail', 'analysisFull') ))
return
if gnos_analysis.get('study').lower().endswith('_test'):
logger.warning('ignore variant calling entry with study ending with _test, donor: {} GNOS entry: {}'
.format(donor_unique_id, gnos_analysis.get('analysis_detail_uri').replace('analysisDetail', 'analysisFull') ))
return
if analysis_attrib.get('variant_workflow_name') == 'SangerPancancerCgpCnIndelSnvStr' \
and (
(analysis_attrib.get('variant_workflow_version').startswith('1.0.')
or analysis_attrib.get('variant_workflow_version').startswith('1.1.'))
and not analysis_attrib.get('variant_workflow_version') in ['1.0.0', '1.0.1']
):
donor_unique_id = analysis_attrib.get('dcc_project_code') + '::' + analysis_attrib.get('submitter_donor_id')
logger.info('process Sanger variant call for donor: {}, in entry {}'
.format(donor_unique_id, gnos_analysis.get('analysis_detail_uri').replace('analysisDetail', 'analysisFull')))
current_vcf_entry = create_vcf_entry(analysis_attrib, gnos_analysis)
if annotations.get('sanger_vcf_in_jamboree').get(donor_unique_id): # the current donor has sanger variant calling result in jamboree
if annotations.get('sanger_vcf_in_jamboree').get(donor_unique_id) == current_vcf_entry.get('gnos_id'): # this is the one expected
if not vcf_entries.get(donor_unique_id):
vcf_entries[donor_unique_id] = {'sanger_variant_calling': current_vcf_entry}
else:
vcf_entries.get(donor_unique_id).update({'sanger_variant_calling': current_vcf_entry})
logger.info('Sanger variant calling result for donor: {}. It is already saved in Jamboree, GNOS entry is {}'
.format(donor_unique_id, gnos_analysis.get('analysis_detail_uri').replace('analysisDetail', 'analysisFull')))
else: # this is not the one expected, likely duplications
logger.warning('Sanger variant calling result for donor: {}. Ignored as it not the one saved in Jamboree, ignoring entry {}'
.format(donor_unique_id, gnos_analysis.get('analysis_detail_uri').replace('analysisDetail', 'analysisFull')))
elif vcf_entries.get(donor_unique_id) and vcf_entries.get(donor_unique_id).get('sanger_variant_calling'):
# let's see whether they have the same GNOS ID first, if yes, it's a copy at different GNOS repo
# if not the same GNOS ID, we will see which one is newer, will keep the newer one
# this can be complicated as the GNOS entries coming as a ramdon order, it's not possible to decide
# which ones to keep when there are multiple GNOS IDs and one or all of them have replicates in different GNOS repo
# worry about this later.
# If there is no synchronization or redundant calling/uploading it would be much easier.
# The other way of handling this is to keep all VCF call entries and sort them out at
# the end when all entries are at hand
workflow_version_current = current_vcf_entry.get('workflow_details').get('variant_workflow_version')
workflow_version_previous = vcf_entries.get(donor_unique_id).get('sanger_variant_calling').get('workflow_details').get('variant_workflow_version')
gnos_updated_current = current_vcf_entry.get('gnos_last_modified')[0]
gnos_updated_previous = vcf_entries.get(donor_unique_id).get('sanger_variant_calling').get('gnos_last_modified')[0]
if LooseVersion(workflow_version_current) > LooseVersion(workflow_version_previous): # current is newer version
logger.info('Newer Sanger variant calling result with version: {} for donor: {}, in entry: {} replacing older GNOS entry {} in {}'
.format(workflow_version_current, donor_unique_id, \
gnos_analysis.get('analysis_detail_uri').replace('analysisDetail', 'analysisFull'), \
vcf_entries.get(donor_unique_id).get('sanger_variant_calling').get('gnos_id'), \
vcf_entries.get(donor_unique_id).get('sanger_variant_calling').get('gnos_repo')[0]))
vcf_entries.get(donor_unique_id)['sanger_variant_calling'] = current_vcf_entry
elif LooseVersion(workflow_version_current) == LooseVersion(workflow_version_previous) \
and gnos_updated_current > gnos_updated_previous: # current is newer
logger.info('Newer Sanger variant calling result with last modified date: {} for donor: {}, in entry: {} replacing older GNOS entry {} in {}'
.format(gnos_updated_current, donor_unique_id, \
gnos_analysis.get('analysis_detail_uri').replace('analysisDetail', 'analysisFull'), \
vcf_entries.get(donor_unique_id).get('sanger_variant_calling').get('gnos_id'), \
vcf_entries.get(donor_unique_id).get('sanger_variant_calling').get('gnos_repo')[0]))
vcf_entries.get(donor_unique_id)['sanger_variant_calling'] = current_vcf_entry
else: # no need to replace
logger.warning('Sanger variant calling result already exist and is latest for donor: {}, ignoring entry {}'
.format(donor_unique_id, gnos_analysis.get('analysis_detail_uri').replace('analysisDetail', 'analysisFull')))
else:
if not vcf_entries.get(donor_unique_id):
vcf_entries[donor_unique_id] = {'sanger_variant_calling': current_vcf_entry}
else:
vcf_entries.get(donor_unique_id).update({'sanger_variant_calling': current_vcf_entry})
elif analysis_attrib.get('variant_workflow_name').startswith('EMBLPancancer') \
and LooseVersion(analysis_attrib.get('variant_workflow_version')) >= LooseVersion('1.0.0'):
donor_unique_id = analysis_attrib.get('dcc_project_code') + '::' + analysis_attrib.get('submitter_donor_id')
logger.info('process EMBL variant call for donor: {}, in entry {}'
.format(donor_unique_id, gnos_analysis.get('analysis_detail_uri').replace('analysisDetail', 'analysisFull')))
current_vcf_entry = create_vcf_entry(analysis_attrib, gnos_analysis)
keep_latest_vcf_entry(donor_unique_id, gnos_analysis, vcf_entries, current_vcf_entry, 'EMBL')
elif analysis_attrib.get('variant_workflow_name') == 'DKFZPancancerCnIndelSnv' \
and LooseVersion(analysis_attrib.get('variant_workflow_version')) >= LooseVersion('1.0.0'):
donor_unique_id = analysis_attrib.get('dcc_project_code') + '::' + analysis_attrib.get('submitter_donor_id')
logger.info('process DKFZ variant call for donor: {}, in entry {}'
.format(donor_unique_id, gnos_analysis.get('analysis_detail_uri').replace('analysisDetail', 'analysisFull')))
current_vcf_entry = create_vcf_entry(analysis_attrib, gnos_analysis)
keep_latest_vcf_entry(donor_unique_id, gnos_analysis, vcf_entries, current_vcf_entry, 'DKFZ')
else: # this is test for VCF upload
logger.warning('ignore entry that is variant calling but likely is test entry, GNOS entry: {}'
.format(gnos_analysis.get('analysis_detail_uri').replace('analysisDetail', 'analysisFull') ))
return
else: # BAM entry
if gnos_analysis.get('dcc_project_code') and gnos_analysis.get('dcc_project_code').upper() == 'TEST':
logger.warning('ignore entry with dcc_project_code being TEST, GNOS entry: {}'
.format(gnos_analysis.get('analysis_detail_uri').replace('analysisDetail', 'analysisFull') ))
return
# if gnos_analysis.get('library_strategy') and gnos_analysis.get('library_strategy') == 'RNA-Seq':
# logger.warning('ignore entry with library_strategy being RNA-Seq for now, GNOS entry: {}'
# .format(gnos_analysis.get('analysis_detail_uri').replace('analysisDetail', 'analysisFull') ))
# return
if not gnos_analysis.get('aliquot_id'):
logger.warning('ignore entry does not have aliquot_id, GNOS entry: {}'
.format(gnos_analysis.get('analysis_detail_uri').replace('analysisDetail', 'analysisFull') ))
return
if gnos_analysis.get('refassem_short_name') != 'unaligned' and gnos_analysis.get('refassem_short_name') != 'GRCh37':
logger.warning('ignore entry that is aligned but not aligned to GRCh37: {}'
.format(gnos_analysis.get('analysis_detail_uri').replace('analysisDetail', 'analysisFull') ))
return # completely ignore test gnos entries for now, this is the quickest way to avoid test interferes real data
if not analysis_attrib:
logger.warning('ignore entry does not have ANALYSIS information, GNOS entry: {}'
.format(gnos_analysis.get('analysis_detail_uri').replace('analysisDetail', 'analysisFull') ))
return
if not analysis_attrib.get('dcc_project_code') or not analysis_attrib.get('submitter_donor_id'):
logger.warning('ignore entry does not have dcc_project_code or submitter_donor_id, GNOS entry: {}'
.format(gnos_analysis.get('analysis_detail_uri').replace('analysisDetail', 'analysisFull') ))
return
# added on Apr. 24, 2015 after discovering that one RNA-Seq uploaded to GNOS with TCGA barcode which was treated as a new donor
if analysis_attrib.get('dcc_project_code').endswith('-US') and \
analysis_attrib.get('submitter_donor_id').startswith('TCGA-'):
logger.warning('ignore TCGA entry submitted with barcode, GNOS entry: {}'
.format(gnos_analysis.get('analysis_detail_uri').replace('analysisDetail', 'analysisFull') ))
return
donor_unique_id = analysis_attrib.get('dcc_project_code') + '::' + analysis_attrib.get('submitter_donor_id')
if is_in_donor_blacklist(donor_unique_id):
logger.warning('ignore blacklisted donor: {} GNOS entry: {}'
.format(donor_unique_id, gnos_analysis.get('analysis_detail_uri').replace('analysisDetail', 'analysisFull') ))
return
if is_test(analysis_attrib, gnos_analysis):
logger.warning('ignore test entry: {}'.format(gnos_analysis.get('analysis_detail_uri')))
return # completely ignore test gnos entries for now, this is the quickest way to avoid test interferes real data
if gnos_analysis['analysis_xml']['ANALYSIS_SET']['ANALYSIS'].get('TITLE') and gnos_analysis['analysis_xml']['ANALYSIS_SET']['ANALYSIS']['TITLE'].startswith('TCGA/ICGC PanCancer Specimen-Level Germline Variant Calling for Specimen'):
logger.warning('ignore Annai germline call entry: {}'.format(gnos_analysis.get('analysis_detail_uri')))
return
if gnos_analysis.get('library_strategy') == 'RNA-Seq' and not analysis_attrib.get('workflow_name') in ('RNA-Seq_Alignment_SOP_STAR', 'Workflow_Bundle_TOPHAT2'):
logger.warning('ignore RNA-Seq entry that is not STAR or TOPHAT2 aligned, entry: {}'.format(gnos_analysis.get('analysis_detail_uri')))
return
if (gnos_analysis.get('library_strategy') == 'WGS' and gnos_analysis.get('refassem_short_name') != 'unaligned'
and not is_train_2_aligned(analysis_attrib, gnos_analysis)
):
# TODO: we may create another ES index for obsoleted BAM entries
# TODO: we will need more sophisticated check for handling BAMs that are flagged as aligned but
# treated as unaligned (this is actually the case for TCGA input BAM entries, maybe need a full
# TCGA spciment list from Marc?)
logger.warning('ignore entry that is aligned but not by train 2 protocol: {}'
.format( gnos_analysis.get('analysis_detail_uri').replace('analysisDetail', 'analysisFull') ))
return
if gnos_analysis.get('library_strategy') == 'WGS' and is_corrupted_train_2_alignment(analysis_attrib, gnos_analysis):
logger.warning('ignore entry that is aligned by train 2 protocol but seems corrupted: {}'
.format( gnos_analysis.get('analysis_detail_uri').replace('analysisDetail', 'analysisFull') ))
return
#TODO: put things above into one function
# temporary hack here to skip any BAM entries from odsc-tcga repo as it's supposed not contain
# any BAM data, but it does, and those aligned BAMs it has overlap with what in CGHub hence causes problems
if 'osdc-tcga' in gnos_analysis.get('analysis_detail_uri'):
logger.warning('ignore BAM entry in osdc-tcga repo: {}'
.format( gnos_analysis.get('analysis_detail_uri').replace('analysisDetail', 'analysisFull') ))
return
if not donors.get(donor_unique_id):
# create a new donor if not exist
donors[ donor_unique_id ] = create_donor(donor_unique_id, analysis_attrib, gnos_analysis)
else: # the donor this bam entry belongs to already exists
# perform some comparison between existing donor and the info in the current bam entry
if (donors[donor_unique_id].get('gnos_study') != gnos_analysis.get('study')):
logger.warning( 'existing donor {} has study {}, but study in current gnos ao is {}'.
format( donor_unique_id,
donors[donor_unique_id].get('gnos_study'),
gnos_analysis.get('study') ) )
# more such check may be added, no time for this now
# now parse out gnos analysis object info to build bam_file doc
bam_file = create_bam_file_entry(donor_unique_id, analysis_attrib, gnos_analysis)
# only do the following when it is WGS
if bam_file.get('library_strategy') == 'WGS':
if 'normal' in bam_file.get('dcc_specimen_type').lower(): # normal
if donors.get(donor_unique_id).get('normal_specimen'): # normal specimen exists
if donors.get(donor_unique_id).get('normal_specimen').get('aliquot_id') == gnos_analysis.get('aliquot_id'):
if bam_file.get('is_aligned'):
if donors.get(donor_unique_id)['normal_specimen'].get('is_aligned'):
logger.info('more than one normal aligned bam for donor: {}, entry in use: {}, additional entry found in: {}'
.format(donor_unique_id,
donors.get(donor_unique_id).get('normal_specimen').get('gnos_metadata_url'),
gnos_analysis.get('analysis_detail_uri').replace('analysisDetail', 'analysisFull')
)
)
if (not donors.get(donor_unique_id).get('normal_specimen').get('gnos_metadata_url').split('/')[-1]
== gnos_analysis.get('analysis_detail_uri').replace('analysisDetail', 'analysisFull').split('/')[-1]):
logger.warning('Two aligned BAM entries for the same normal specimen from donor: {} have different GNOS UUIDs: {} and {}'
.format(donor_unique_id,
donors.get(donor_unique_id).get('normal_specimen').get('gnos_metadata_url'),
gnos_analysis.get('analysis_detail_uri').replace('analysisDetail', 'analysisFull')
)
)
if donors.get(donor_unique_id).get('normal_specimen').get('upload_date') < bam_file.get(
'upload_date'): # the current one is newer
donors.get(donor_unique_id)['normal_specimen'].update(
prepare_aggregated_specimen_level_info(copy.deepcopy(bam_file))
)
donors.get(donor_unique_id)['gnos_repo'] = bam_file.get('gnos_repo')
else:
donors.get(donor_unique_id)['normal_specimen'].update(
prepare_aggregated_specimen_level_info(copy.deepcopy(bam_file))
)
donors.get(donor_unique_id)['gnos_repo'] = bam_file.get('gnos_repo')
else:
logger.warning('same donor: {} has different aliquot_id: {}, {} for normal specimen, entry in use: {}, additional entry found in {}'
.format(donor_unique_id,
donors.get(donor_unique_id).get('normal_specimen').get('aliquot_id'),
gnos_analysis.get('aliquot_id'),
donors.get(donor_unique_id).get('normal_specimen').get('gnos_metadata_url'),
gnos_analysis.get('analysis_detail_uri').replace('analysisDetail', 'analysisFull')
)
)
else:
# add normal_specimen
donors.get(donor_unique_id)['normal_specimen'].update(
prepare_aggregated_specimen_level_info(copy.deepcopy(bam_file))
)
# update donor's 'gnos_repo' field with normal aligned specimen
donors.get(donor_unique_id)['gnos_repo'] = bam_file.get('gnos_repo')
else: # not normal
donors.get(donor_unique_id).get('all_tumor_specimen_aliquots').add(bam_file.get('aliquot_id'))
donors.get(donor_unique_id).get('flags')['all_tumor_specimen_aliquot_counts'] = len(donors.get(donor_unique_id).get('all_tumor_specimen_aliquots'))
if bam_file.get('is_aligned'):
if donors.get(donor_unique_id).get('aligned_tumor_specimens'):
if donors.get(donor_unique_id).get('aligned_tumor_specimen_aliquots').intersection(
[ bam_file.get('aliquot_id') ]
): # multiple alignments for the same tumor aliquot_id
logger.warning('more than one tumor aligned bam for donor: {} with aliquot_id: {}, additional entry found in: {}'
.format(donor_unique_id,
bam_file.get('aliquot_id'),
gnos_analysis.get('analysis_detail_uri').replace('analysisDetail', 'analysisFull')
)
)
else:
donors.get(donor_unique_id).get('aligned_tumor_specimens').append( copy.deepcopy(bam_file) )
donors.get(donor_unique_id).get('aligned_tumor_specimen_aliquots').add(bam_file.get('aliquot_id'))
donors.get(donor_unique_id).get('flags')['aligned_tumor_specimen_aliquot_counts'] = len(donors.get(donor_unique_id).get('aligned_tumor_specimen_aliquots'))
else: # create the first element of the list
donors.get(donor_unique_id)['aligned_tumor_specimens'] = [copy.deepcopy(bam_file)]
donors.get(donor_unique_id).get('aligned_tumor_specimen_aliquots').add(bam_file.get('aliquot_id')) # set of aliquot_id
donors.get(donor_unique_id).get('flags')['aligned_tumor_specimen_aliquot_counts'] = 1
donors.get(donor_unique_id).get('flags')['has_aligned_tumor_specimen'] = True
original_gnos = bam_file['gnos_repo']
bam_file.update( donors[ donor_unique_id ] )
bam_file['gnos_repo'] = original_gnos
del bam_file['bam_files']
del bam_file['normal_specimen']
del bam_file['aligned_tumor_specimens']
del bam_file['aligned_tumor_specimen_aliquots']
del bam_file['all_tumor_specimen_aliquots']
del bam_file['flags']
del bam_file['rna_seq']
donors[donor_unique_id]['bam_files'].append( copy.deepcopy(bam_file) )
# push to Elasticsearch
# Let's not worry about this index type, it seems not that useful
#es.index(index=es_index, doc_type='bam_file', id=bam_file['bam_gnos_ao_id'], body=json.loads( json.dumps(bam_file, default=set_default) ), timeout=90)
bam_output_fh.write(json.dumps(bam_file, default=set_default) + '\n')
def keep_latest_vcf_entry(donor_unique_id, gnos_analysis, vcf_entries, current_vcf_entry, variant_workflow):
workflow_label = variant_workflow.lower() + '_variant_calling'
if not vcf_entries.get(donor_unique_id):
vcf_entries[donor_unique_id] = {workflow_label: current_vcf_entry}
return
elif not vcf_entries.get(donor_unique_id).get(workflow_label):
vcf_entries.get(donor_unique_id).update({workflow_label: current_vcf_entry})
return
else:
workflow_version_current = current_vcf_entry.get('workflow_details').get('variant_workflow_version')
workflow_version_previous = vcf_entries.get(donor_unique_id).get(workflow_label).get('workflow_details').get('variant_workflow_version')
gnos_updated_current = current_vcf_entry.get('gnos_last_modified')[0]
gnos_updated_previous = vcf_entries.get(donor_unique_id).get(workflow_label).get('gnos_last_modified')[0]
if LooseVersion(workflow_version_current) > LooseVersion(workflow_version_previous): # current is newer version
logger.info('Newer {} variant calling result with version: {} for donor: {}, in entry: {} replacing older GNOS entry {} in {}'
.format(variant_workflow.upper(), workflow_version_current, donor_unique_id, \
gnos_analysis.get('analysis_detail_uri').replace('analysisDetail', 'analysisFull'), \
vcf_entries.get(donor_unique_id).get(workflow_label).get('gnos_id'), \
vcf_entries.get(donor_unique_id).get(workflow_label).get('gnos_repo')[0]))
vcf_entries.get(donor_unique_id)[workflow_label] = current_vcf_entry
elif LooseVersion(workflow_version_current) == LooseVersion(workflow_version_previous) \
and gnos_updated_current > gnos_updated_previous: # current is newer
logger.info('Newer {} variant calling result with last modified date: {} for donor: {}, in entry: {} replacing older GNOS entry {} in {}'
.format(variant_workflow.upper(), gnos_updated_current, donor_unique_id, \
gnos_analysis.get('analysis_detail_uri').replace('analysisDetail', 'analysisFull'), \
vcf_entries.get(donor_unique_id).get(workflow_label).get('gnos_id'), \
vcf_entries.get(donor_unique_id).get(workflow_label).get('gnos_repo')[0]))
vcf_entries.get(donor_unique_id)[workflow_label] = current_vcf_entry
else: # no need to replace
logger.warning('{} variant calling result already exist and is latest for donor: {}, ignoring entry {}'
.format(variant_workflow.upper(), donor_unique_id, gnos_analysis.get('analysis_detail_uri').replace('analysisDetail', 'analysisFull')))
def create_vcf_entry(analysis_attrib, gnos_analysis):
files = []
for f in gnos_analysis.get('files').get('file'):
files.append({'file_name': f.get('filename'), 'file_size': f.get('filesize'), 'file_md5sum': f.get('checksum').get('#text')})
vcf_entry = {
#'analysis_attrib': analysis_attrib, # remove this later
#'gnos_analysis': gnos_analysis, # remove this later
"gnos_id": gnos_analysis.get('analysis_id'),
"gnos_repo": [gnos_analysis.get('analysis_detail_uri').split('/cghub/')[0] + '/'],
"gnos_last_modified": [dateutil.parser.parse(gnos_analysis.get('last_modified'))],
"files": files,
"study": gnos_analysis.get('study'),
"variant_calling_performed_at": gnos_analysis.get('analysis_xml').get('ANALYSIS_SET').get('ANALYSIS').get('@center_name'),
"workflow_details": {
"variant_workflow_name": analysis_attrib.get('variant_workflow_name'),
"variant_workflow_version": analysis_attrib.get('variant_workflow_version'),
"variant_pipeline_input_info": json.loads( analysis_attrib.get('variant_pipeline_input_info') ).get('workflow_inputs') if analysis_attrib.get('variant_pipeline_input_info') else [],
"variant_pipeline_output_info": json.loads( analysis_attrib.get('variant_pipeline_output_info') ).get('workflow_outputs') if analysis_attrib.get('variant_pipeline_output_info') else [],
"variant_qc_metrics": {},
"variant_timing_metrics": {}
}
}
qc = {}
try:
qc = json.loads( analysis_attrib.get('variant_qc_metrics') ).get('qc_metrics')
except:
logger.warning('variant_qc_metrics format incorrect: {}'.format(analysis_attrib.get('variant_qc_metrics')))
if isinstance(qc, dict): vcf_entry.get('workflow_details')['variant_qc_metrics'] = qc
# DO NOT KEEP timing metrics, it's way too verbose
#timing = json.loads( analysis_attrib.get('variant_timing_metrics') ).get('timing_metrics') if analysis_attrib.get('variant_timing_metrics') else {}
#if isinstance(timing, dict): vcf_entry.get('workflow_details')['variant_timing_metrics'] = timing
#print json.dumps(vcf_entry) # debugging only
return vcf_entry
def set_default(obj):
if isinstance(obj, datetime.datetime):
return obj.isoformat()
if isinstance(obj, set):
return list(obj)
raise TypeError
def prepare_aggregated_specimen_level_info(bam_file):
specimen = copy.deepcopy(bam_file)
# TODO: actual aggregation to be completed
return specimen
def is_in_donor_blacklist(donor_unique_id):
donor_blacklist = set([
"PACA-CA::PCSI_0449",
"PACA-CA::PCSI_0309",
"LIHC-US::G1551",
"LIHC-US::G15512",
"TCGA_MUT_BENCHMARK_4::G15511",
"TCGA_MUT_BENCHMARK_4::G15512",
"PBCA-DE::SNV_CALLING_TEST"
])
if donor_blacklist.intersection([donor_unique_id]):
return True
else:
return False
def create_bam_file_entry(donor_unique_id, analysis_attrib, gnos_analysis):
file_info = parse_bam_file_info(gnos_analysis.get('files').get('file'))
bam_file = {
"dcc_specimen_type": analysis_attrib.get('dcc_specimen_type'),
"submitter_specimen_id": analysis_attrib.get('submitter_specimen_id'),
"submitter_sample_id": analysis_attrib.get('submitter_sample_id'),
"aliquot_id": gnos_analysis.get('aliquot_id'),
"use_cntl": analysis_attrib.get('use_cntl'),
"total_lanes": analysis_attrib.get('total_lanes'),
"library_strategy": gnos_analysis.get('library_strategy'),
"gnos_repo": gnos_analysis.get('analysis_detail_uri').split('/cghub/')[0] + '/',
"gnos_metadata_url": gnos_analysis.get('analysis_detail_uri').replace('analysisDetail', 'analysisFull'),
"refassem_short_name": gnos_analysis.get('refassem_short_name'),
"bam_gnos_ao_id": gnos_analysis.get('analysis_id'),
"upload_date": dateutil.parser.parse(gnos_analysis.get('upload_date')),
"published_date": dateutil.parser.parse(gnos_analysis.get('published_date')),
"last_modified": dateutil.parser.parse(gnos_analysis.get('last_modified')),
"bam_file_name": file_info.get('file_name'),
"bam_file_size": file_info.get('file_size'),
"md5sum": file_info.get('md5sum'),
}
# much more TODO for bam file info and alignment details
if bam_file.get('refassem_short_name') == 'unaligned' and \
gnos_analysis.get('library_strategy') == 'WGS' :
bam_file['is_aligned'] = False
bam_file['bam_type'] = 'Unaligned BAM'
bam_file['alignment'] = None # or initiate as empty object {}, depending on how ES searches it
elif (analysis_attrib.get('workflow_output_bam_contents') == 'unaligned'
or gnos_analysis['analysis_xml']['ANALYSIS_SET']['ANALYSIS']['DESCRIPTION'].startswith('The BAM file includes unmapped reads extracted from specimen-level BAM with the reference alignment')
) and gnos_analysis.get('library_strategy') == 'WGS' : # this is actually BAM with unmapped reads
bam_file['is_aligned'] = False
bam_file['bam_type'] = 'Specimen level unmapped reads after BWA alignment'
bam_file['alignment'] = None
elif gnos_analysis['analysis_xml']['ANALYSIS_SET']['ANALYSIS']['DESCRIPTION'].startswith('Specimen-level BAM from the reference alignment') \
and gnos_analysis.get('library_strategy') == 'WGS' :
bam_file['is_aligned'] = True
bam_file['bam_type'] = 'Specimen level aligned BAM'
bam_file['alignment'] = get_alignment_detail(analysis_attrib, gnos_analysis)
elif (gnos_analysis['analysis_xml']['ANALYSIS_SET']['ANALYSIS']['DESCRIPTION'].lower().startswith('star ') \
or gnos_analysis['analysis_xml']['ANALYSIS_SET']['ANALYSIS']['DESCRIPTION'].lower().startswith('tophat2 ')) \
and gnos_analysis.get('library_strategy') == 'RNA-Seq' :
bam_file['is_aligned'] = True
bam_file['bam_type'] = 'RNA-Seq aligned BAM'
bam_file['alignment'] = get_rna_seq_alignment_detail(analysis_attrib, gnos_analysis)
elif (bam_file.get('refassem_short_name') == 'unaligned' and gnos_analysis.get('library_strategy') == 'RNA-Seq'):
bam_file['is_aligned'] = False
bam_file['bam_type'] = 'RNA-Seq unaligned BAM'
bam_file['alignment'] = None
else:
bam_file['is_aligned'] = False
bam_file['bam_type'] = 'Unknown'
bam_file['alignment'] = None
return bam_file
def get_rna_seq_alignment_detail(analysis_attrib, gnos_analysis):
alignment = {
"workflow_name": analysis_attrib.get('workflow_name'),
"workflow_version": analysis_attrib.get('workflow_version'),
"workflow_bundle_url": analysis_attrib.get('workflow_bundle_url'),
"workflow_source_url": analysis_attrib.get('workflow_source_url')
}
return alignment
def get_alignment_detail(analysis_attrib, gnos_analysis):
alignment = {
"data_train": "Train 2",
"workflow_name": analysis_attrib.get('workflow_name'),
"workflow_version": analysis_attrib.get('workflow_version'),
"workflow_bundle_url": analysis_attrib.get('workflow_bundle_url'),
"workflow_source_url": analysis_attrib.get('workflow_source_url'),
"pipeline_input_info": json.loads( analysis_attrib.get('pipeline_input_info') ).get('pipeline_input_info') if analysis_attrib.get('pipeline_input_info') else [],
"qc_metrics": json.loads( analysis_attrib.get('qc_metrics').replace('"not_collected"', 'null') ).get('qc_metrics') if analysis_attrib.get('qc_metrics') else [],
"markduplicates_metrics": json.loads( analysis_attrib.get('markduplicates_metrics') ).get('markduplicates_metrics') if analysis_attrib.get('markduplicates_metrics') else [],
"timing_metrics": json.loads( analysis_attrib.get('timing_metrics').replace('"not_collected"', 'null') ).get('timing_metrics') if analysis_attrib.get('timing_metrics') else [],
}
alignment['input_bam_summary'] = {} # TODO: do this in a function
return alignment
def parse_bam_file_info(file_fragment):
file_info = {}
if (type(file_fragment) != list): file_fragment = [file_fragment]
for f in file_fragment:
f = dict(f)
if f.get('filename').endswith('.bam'): # assume there is only one BAM file
file_info['file_name'] = f.get('filename')
file_info['file_size'] = int(f.get('filesize'))
file_info['md5sum'] = f.get('checksum').get('#text')
return file_info
def is_train_2_aligned(analysis_attrib, gnos_analysis):
if ( gnos_analysis.get('refassem_short_name') == 'GRCh37'
and analysis_attrib.get('workflow_version')
and analysis_attrib.get('workflow_version').startswith('2.6.')
):
return True
else:
return False
def is_corrupted_train_2_alignment(analysis_attrib, gnos_analysis):
if ( is_train_2_aligned(analysis_attrib, gnos_analysis)
and not gnos_analysis['analysis_xml']['ANALYSIS_SET']['ANALYSIS']['DESCRIPTION'].startswith('The BAM file includes unmapped reads extracted from specimen-level BAM with the reference alignment')
and (not analysis_attrib.get('qc_metrics') or not analysis_attrib.get('markduplicates_metrics'))
):
return True
else:
return False
def create_donor(donor_unique_id, analysis_attrib, gnos_analysis):
donor = {
'donor_unique_id': donor_unique_id,
'submitter_donor_id': analysis_attrib['submitter_donor_id'],
'dcc_project_code': analysis_attrib['dcc_project_code'],
'gnos_study': gnos_analysis.get('study'),
'gnos_repo': gnos_analysis.get('analysis_detail_uri').split('/cghub/')[0] + '/', # can be better
'flags': {
'is_test': is_test(analysis_attrib, gnos_analysis),
'is_cell_line': is_cell_line(analysis_attrib, gnos_analysis),
'is_train2_donor': False,
'is_train2_pilot': False,
'is_normal_specimen_aligned': False,
'are_all_tumor_specimens_aligned': False,
'has_aligned_tumor_specimen': False,
'aligned_tumor_specimen_aliquot_counts': 0,
'all_tumor_specimen_aliquot_counts': 0,
'is_sanger_variant_calling_performed': False,
'is_dkfz_variant_calling_performed': False,
'is_embl_variant_calling_performed': False,
'variant_calling_performed': [],
'vcf_in_jamboree': [],
'is_normal_star_rna_seq_alignment_performed': False,
'is_normal_tophat_rna_seq_alignment_performed': False,
'is_tumor_star_rna_seq_alignment_performed': False,
'is_tumor_tophat_rna_seq_alignment_performed': False
},
'normal_specimen': {},
'aligned_tumor_specimens': [],
'aligned_tumor_specimen_aliquots': set(),
'all_tumor_specimen_aliquots': set(),
'bam_files': [],
'rna_seq': {
'alignment': {
'normal': {},
'tumor': []
}
}
}
try:
if type(gnos_analysis.get('experiment_xml').get('EXPERIMENT_SET').get('EXPERIMENT')) == list:
donor['sequencing_center'] = gnos_analysis.get('experiment_xml').get('EXPERIMENT_SET').get('EXPERIMENT')[0].get('@center_name')
else:
donor['sequencing_center'] = gnos_analysis.get('experiment_xml').get('EXPERIMENT_SET').get('EXPERIMENT').get('@center_name')
except:
logger.warning('analysis object has no sequencing_center information: {}'.format(gnos_analysis.get('analysis_detail_uri')))
return donor
def is_test(analysis_attrib, gnos_analysis):
if (gnos_analysis.get('aliquot_id') == '85098796-a2c1-11e3-a743-6c6c38d06053'
or gnos_analysis.get('study') == 'CGTEST'
or gnos_analysis.get('study') == 'icgc_pancancer_vcf_test'
or gnos_analysis.get('study').lower().endswith('_test')
):
return True
elif (analysis_attrib.get('dcc_project_code') == 'None-US'
and analysis_attrib.get('submitter_donor_id') == 'None'
and analysis_attrib.get('submitter_specimen_id') == 'None'
and analysis_attrib.get('dcc_specimen_type') == 'unknown'
):
return True
# TODO: what's the criteria for determining *test* entries
return False
def is_cell_line(analysis_attrib, gnos_analysis):
is_cell_line = False
if analysis_attrib.get('dcc_project_code') == 'TCGA_MUT_BENCHMARK_4':
is_cell_line = True
return is_cell_line
def get_analysis_attrib(gnos_analysis):
analysis_attrib = {}
if (not gnos_analysis['analysis_xml']['ANALYSIS_SET'].get('ANALYSIS')
or not gnos_analysis['analysis_xml']['ANALYSIS_SET']['ANALYSIS'].get('ANALYSIS_ATTRIBUTES')
or not gnos_analysis['analysis_xml']['ANALYSIS_SET']['ANALYSIS']['ANALYSIS_ATTRIBUTES'].get('ANALYSIS_ATTRIBUTE')
):
return None
for a in gnos_analysis['analysis_xml']['ANALYSIS_SET']['ANALYSIS']['ANALYSIS_ATTRIBUTES']['ANALYSIS_ATTRIBUTE']:
if not analysis_attrib.get(a['TAG']):
analysis_attrib[a['TAG']] = a['VALUE']
else:
logger.warning('duplicated analysis attribute key: {}'.format(a['TAG']))
return analysis_attrib
def get_gnos_analysis(f):
with open (f, 'r') as x:
xml_str = x.read()
return xmltodict.parse(xml_str).get('ResultSet').get('Result')
def get_xml_files( metadata_dir, conf, repo ):
xml_files = []
#ao_seen = {}
for r in conf.get('gnos_repos'):
if repo and not r.get('repo_code') == repo:
continue
gnos_ao_list_file = metadata_dir + '/analysis_objects.' + r.get('repo_code') + '.tsv'
if not os.path.isfile(gnos_ao_list_file):
logger.warning('gnos analsysi object list file does not exist: {}'.format(gnos_ao_list_file))
continue
with open(gnos_ao_list_file, 'r') as list:
for ao in list:
ao_uuid, ao_state = str.split(ao, '\t')[0:2]
if not ao_state == 'live': continue # skip ao that is not live
#if (ao_seen.get(ao_uuid)): continue # skip ao if already added
#ao_seen[ao_uuid] = 1 # include this one
xml_files.append(r.get('repo_code') + '/' + ao.replace('\t', '__').replace('\n','') + '.xml')
return xml_files
def process(metadata_dir, conf, es_index, es, donor_output_jsonl_file, bam_output_jsonl_file, repo, exclude_gnos_id_lists):
donors = {}
vcf_entries = {}
# update the pc_annotation-sanger_vcf_in_jamboree files using the jamboree subdirectory files
vcf_in_jamboree_dir = '../pcawg-operations/variant_calling/sanger_workflow/jamboree/'
infiles = glob.glob(vcf_in_jamboree_dir+'/Sanger_jamboree_batch*.txt')
outfile = 'pc_annotation-sanger_vcf_in_jamboree.tsv' # hard-code file name
update_vcf_jamboree(infiles, outfile)
annotations = {}
read_annotations(annotations, 'gnos_assignment', 'pc_annotation-gnos_assignment.yml') # hard-code file name for now
read_annotations(annotations, 'train2_pilot', 'pc_annotation-train2_pilot.tsv') # hard-code file name for now
read_annotations(annotations, 'donor_blacklist', 'pc_annotation-donor_blacklist.tsv') # hard-code file name for now
read_annotations(annotations, 'manual_qc_failed', 'pc_annotation-manual_qc_failed.tsv') # hard-code file name for now
read_annotations(annotations, 'sanger_vcf_in_jamboree', 'pc_annotation-sanger_vcf_in_jamboree.tsv') # hard-code file name for now
# hard-code the file name for now
train2_freeze_bams = read_train2_bams('../pcawg-operations/variant_calling/train2-lists/Data_Freeze_Train_2.0_GoogleDocs__2015_04_10_1150.tsv')
# pre-exclude gnos entries when this option is chosen
gnos_ids_to_be_excluded = set()
if exclude_gnos_id_lists:
files = glob.glob(exclude_gnos_id_lists)
for fname in files:
with open(fname) as f:
for d in f: gnos_ids_to_be_excluded.add(d.rstrip())
donor_fh = open(donor_output_jsonl_file, 'w')
bam_fh = open(bam_output_jsonl_file, 'w')
for f in get_xml_files( metadata_dir, conf, repo ):
f = conf.get('output_dir') + '/__all_metadata_xml/' + f
gnos_analysis = get_gnos_analysis(f)
#print (json.dumps(gnos_analysis)) # debug
if gnos_analysis:
logger.info( 'processing xml file: {} ...'.format(f) )
if gnos_analysis.get('analysis_id') and gnos_analysis.get('analysis_id') in gnos_ids_to_be_excluded:
logger.warning( 'skipping xml file: {} with analysis_id: {}, as it\'s in the list to be excluded' \
.format(f, gnos_analysis.get('analysis_id')) )
continue
process_gnos_analysis( gnos_analysis, donors, vcf_entries, es_index, es, bam_fh, annotations )
else:
logger.warning( 'skipping invalid xml file: {}'.format(f) )
for donor_id in donors.keys():
donor = donors[donor_id]
process_donor(donor, annotations, vcf_entries, conf, train2_freeze_bams)
# push to Elasticsearch
es.index(index=es_index, doc_type='donor', id=donor['donor_unique_id'], \
body=json.loads(json.dumps(donor, default=set_default)), timeout=90 )
del donor['bam_files'] # prune this before dumping JSON for Keiran
donor_fh.write(json.dumps(donor, default=set_default) + '\n')
donor_fh.close()
bam_fh.close()
def update_vcf_jamboree(infilenames, outfilename):
seen = set() # just for checking in case there are duplicated lines in jamboree files
with open(outfilename, 'w') as fout:
for f_index in infilenames:
with open(f_index,'r') as fin:
for line in fin:
if len(line.rstrip()) == 0: continue
if line in seen:
pass
else:
donor_unique_id, gnos_metadata_url, aliquot_id = str.split(line.rstrip(), '\t')
repo, gnos_id = str.split(gnos_metadata_url, 'cghub/metadata/analysisFull/')
fout. write(donor_unique_id+'\t'+gnos_id+'\n')
seen.add(line)
def read_train2_bams(filename):
train2_bams = {}
with open(filename, 'r') as r:
for line in r:
if line.startswith('dcc_project_code'): continue
if len(line.rstrip()) == 0: continue
dcc_project_code, donor_submitter_id, normal_aliquot_id, normal_aligned_bam_gnos_url,\
num_tumor_samples, tumor_aliquot_id, tumor_aligned_bam_gnos_urls = str.split(line.rstrip(), '\t')
normal_repo, normal_gnos_id = str.split(normal_aligned_bam_gnos_url, 'cghub/metadata/analysisFull/')
train2_bams[dcc_project_code + "::" + donor_submitter_id] = {}
train2_bams.get(dcc_project_code + "::" + donor_submitter_id)[normal_gnos_id] = \
{"repo": normal_repo, "aliquot_id": normal_aliquot_id, "specimen_type": "normal"}
tumor_aliquots = str.split(tumor_aliquot_id, ',')
tumor_urls = str.split(tumor_aligned_bam_gnos_urls, ',')
for tumor_aliquot_id, tumor_url in zip(tumor_aliquots, tumor_urls):
tumor_repo, tumor_gnos_id = str.split(tumor_url, 'cghub/metadata/analysisFull/')
train2_bams.get(dcc_project_code + "::" + donor_submitter_id)[tumor_gnos_id] = \
{"repo": tumor_repo, "aliquot_id": tumor_aliquot_id, "specimen_type": "tumor"}
return train2_bams
def read_annotations(annotations, type, file_name):
with open(file_name, 'r') as r:
if annotations.get(type): # reset annotation if exists
del annotations[type]
if type == 'gnos_assignment':
annotations['gnos_assignment'] = {}
assignment = yaml.safe_load(r)
for repo, project_donors in assignment.iteritems():
for p_d in project_donors:
annotations['gnos_assignment'][p_d] = repo # key is project or donor unique id, value is repo
elif type == 'sanger_vcf_in_jamboree':
annotations['sanger_vcf_in_jamboree'] = {}
for line in r:
if line.startswith('#'): continue
if len(line.rstrip()) == 0: continue
donor_id, ao_id = str.split(line.rstrip(), '\t')
annotations[type][donor_id] = ao_id
elif type in ['train2_donors', 'train2_pilot', 'donor_blacklist', 'manual_qc_failed']:
annotations[type] = set()
for line in r:
if line.startswith('#'): continue
if len(line.rstrip()) == 0: continue
annotations[type].add(line.rstrip())
else:
logger.warning('unknown annotation type: {}'.format(type))
def process_donor(donor, annotations, vcf_entries, conf, train2_freeze_bams):
logger.info( 'processing donor: {} ...'.format(donor.get('donor_unique_id')) )
# check whether all tumor specimen(s) aligned
if (donor.get('flags').get('aligned_tumor_specimen_aliquot_counts')
and donor.get('flags').get('aligned_tumor_specimen_aliquot_counts') == donor.get('flags').get('all_tumor_specimen_aliquot_counts')):
donor.get('flags')['are_all_tumor_specimens_aligned'] = True
# now build easy-to-use, specimen-level, gnos_repo-aware summary of bwa alignment status by iterating all collected bams
aggregated_bam_info = bam_aggregation(donor['bam_files'])
#print json.dumps(aggregated_bam_info, default=set_default) # debug only
# let's add this aggregated alignment information to donor object
if aggregated_bam_info.get('WGS'):
add_alignment_status_to_donor(donor, aggregated_bam_info.get('WGS'))
#print json.dumps(donor.get('tumor_alignment_status'), default=set_default) # debug only
#print (json.dumps(aggregated_bam_info.get('RNA-Seq'), default=set_default)) # debug only
if aggregated_bam_info.get('RNA-Seq'):
add_rna_seq_status_to_donor(donor, aggregated_bam_info.get('RNA-Seq'))
if donor.get('rna_seq').get('alignment').get('normal'):
aliquot = donor.get('rna_seq').get('alignment').get('normal')
if aliquot.get('tophat'):
donor.get('flags')['is_normal_tophat_rna_seq_alignment_performed'] = True
if aliquot.get('star'):
donor.get('flags')['is_normal_star_rna_seq_alignment_performed'] = True
if len(donor.get('rna_seq').get('alignment').get('tumor')) > 0:
for aliquot in donor.get('rna_seq').get('alignment').get('tumor'):
if aliquot.get('tophat'):
donor.get('flags')['is_tumor_tophat_rna_seq_alignment_performed'] = True
if aliquot.get('star'):
donor.get('flags')['is_tumor_star_rna_seq_alignment_performed'] = True
if donor.get('normal_alignment_status') and donor.get('normal_alignment_status').get('aligned'):
donor.get('flags')['is_normal_specimen_aligned'] = True
# add gnos repos where complete alignments for the current donor are available
add_gnos_repos_with_complete_alignment_set(donor)
# add gnos repos where one alignment or all alignments for the current donor are available
add_gnos_repos_with_alignment_result(donor)
# add original gnos repo assignment, this is based on a manually maintained yaml file
add_original_gnos_repo(donor, annotations['gnos_assignment'])
if donor.get('flags').get('is_normal_specimen_aligned') and not donor.get('original_gnos_assignment'):
logger.warning('donor with normal aligned but gnos_for_originally_aligned_at is empty, please update gnos assignment annotation for donor: {} with {}'
.format(donor.get('donor_unique_id'), conf.get(donor.get('normal_alignment_status').get('aligned_bam').get('gnos_repo')[0])))
# it should be pretty safe to assign it automatically for this freshly aligned normal specimen
donor['original_gnos_assignment'] = conf.get(donor.get('normal_alignment_status').get('aligned_bam').get('gnos_repo')[0])
add_train2_donor_flag(donor, train2_freeze_bams)
add_train2_pilot_flag(donor, annotations['train2_pilot'])
add_donor_blacklist_flag(donor, annotations['donor_blacklist'])
add_manual_qc_failed_flag(donor, annotations['manual_qc_failed'])
donor.get('flags')['is_sanger_vcf_in_jamboree'] = False
if donor.get('donor_unique_id') in annotations.get('sanger_vcf_in_jamboree'):
donor.get('flags')['is_sanger_vcf_in_jamboree'] = True
donor.get('flags').get('vcf_in_jamboree').append('sanger')
add_vcf_entry(donor, vcf_entries.get(donor.get('donor_unique_id')))
check_bwa_duplicates(donor, train2_freeze_bams)
def check_bwa_duplicates(donor, train2_freeze_bams):
duplicated_bwa_alignment_summary = {
'exists_mismatch_bwa_bams': False,
'exists_mismatch_bwa_bams_in_normal': False,
'exists_mismatch_bwa_bams_in_tumor': False,
'exists_gnos_id_mismatch': False,
'exists_gnos_id_mismatch_in_normal': False,
'exists_gnos_id_mismatch_in_tumor': False,
'exists_md5sum_mismatch': False,
'exists_md5sum_mismatch_in_normal': False,
'exists_md5sum_mismatch_in_tumor': False,
'exists_version_mismatch': False,
'exists_version_mismatch_in_normal': False,
'exists_version_mismatch_in_tumor': False,
'exists_md5sum_mismatch_between_train2_marked_and_sanger_used': False,
'exists_version_mismatch_between_train2_marked_and_sanger_used': False,
'is_train2_freeze_bam_missing': False,
'is_train2_freeze_normal_bam_missing': False,
'is_train2_freeze_tumor_bam_missing': False,
'is_bam_used_by_sanger_missing': False,
'is_normal_bam_used_by_sanger_missing': False,
'is_tumor_bam_used_by_sanger_missing': False,
'normal': {},
'_tmp_tumor': {},
'tumor': []
}
aliquots = {}
duplicated_bwa = False
for bam_file in donor.get('bam_files'):
if not bam_file.get('is_aligned'): continue
# not do it for RNA-Seq Bams
if bam_file.get('library_strategy') == 'RNA-Seq': continue
if aliquots.get(bam_file.get('aliquot_id')): # exists already
duplicated_bwa = True
aliquots.get(bam_file.get('aliquot_id')).append(bam_file)
else:
aliquots[bam_file.get('aliquot_id')] = [bam_file]
if True or duplicated_bwa: # Let's do this for all donors
for aliquot in aliquots:
for bam_file in aliquots.get(aliquot):
if 'normal' in bam_file.get('dcc_specimen_type').lower():
if duplicated_bwa_alignment_summary.get('normal'):
duplicated_bwa_alignment_summary.get('normal').get('aligned_bam').append(
{
'gnos_id': bam_file.get('bam_gnos_ao_id'),
'gnos_repo': bam_file.get('gnos_repo'),
'md5sum': bam_file.get('md5sum'),
'upload_date': bam_file.get('upload_date'),
'published_date': bam_file.get('published_date'),
'last_modified': bam_file.get('last_modified'),
'bwa_workflow_version': bam_file.get('alignment').get('workflow_version'),
'is_train2_bam': is_train2_bam(donor, train2_freeze_bams, bam_file.get('bam_gnos_ao_id'), 'normal'),
'is_used_in_sanger_variant_call': is_used_in_sanger_variant_call(donor,
bam_file.get('bam_gnos_ao_id'))
}
)
else:
duplicated_bwa_alignment_summary['normal'] = {
'aliquot_id': aliquot,
'dcc_specimen_type': bam_file.get('dcc_specimen_type'),
'aligned_bam': [
{
'gnos_id': bam_file.get('bam_gnos_ao_id'),
'gnos_repo': bam_file.get('gnos_repo'),
'md5sum': bam_file.get('md5sum'),
'upload_date': bam_file.get('upload_date'),
'published_date': bam_file.get('published_date'),
'last_modified': bam_file.get('last_modified'),
'bwa_workflow_version': bam_file.get('alignment').get('workflow_version'),
'is_train2_bam': is_train2_bam(donor, train2_freeze_bams, bam_file.get('bam_gnos_ao_id'), 'normal'),
'is_used_in_sanger_variant_call': is_used_in_sanger_variant_call(donor,
bam_file.get('bam_gnos_ao_id'))
}
]
}
else: # tumor
if not duplicated_bwa_alignment_summary.get('_tmp_tumor').get(aliquot):
duplicated_bwa_alignment_summary.get('_tmp_tumor')[aliquot] = {
'aliquot_id': aliquot,
'dcc_specimen_type': bam_file.get('dcc_specimen_type'),
'aligned_bam': []
}
duplicated_bwa_alignment_summary.get('_tmp_tumor').get(aliquot).get('aligned_bam').append(
{
'gnos_id': bam_file.get('bam_gnos_ao_id'),
'gnos_repo': bam_file.get('gnos_repo'),
'md5sum': bam_file.get('md5sum'),
'upload_date': bam_file.get('upload_date'),
'published_date': bam_file.get('published_date'),
'last_modified': bam_file.get('last_modified'),
'bwa_workflow_version': bam_file.get('alignment').get('workflow_version'),
'is_train2_bam': is_train2_bam(donor, train2_freeze_bams, bam_file.get('bam_gnos_ao_id'), 'tumor'),
'is_used_in_sanger_variant_call': is_used_in_sanger_variant_call(donor,
bam_file.get('bam_gnos_ao_id'))
}
)
for aliquot in duplicated_bwa_alignment_summary.get('_tmp_tumor'):
duplicated_bwa_alignment_summary.get('tumor').append(duplicated_bwa_alignment_summary.get('_tmp_tumor').get(aliquot))
del duplicated_bwa_alignment_summary['_tmp_tumor']
# scan normal BAMs
if duplicated_bwa_alignment_summary.get('normal'):
b_gnos_id = None
b_md5sum = None
b_version = None
has_train2_n_bam = False
has_sanger_n_bam = False
count_is_train2_not_sanger = 0
count_not_train2_is_sanger = 0
count_is_train2_is_sanger = 0
duplicated_bwa_alignment_summary.get('normal')['exists_mismatch_bwa_bams'] = False
duplicated_bwa_alignment_summary.get('normal')['exists_gnos_id_mismatch'] = False
duplicated_bwa_alignment_summary.get('normal')['exists_md5sum_mismatch'] = False
duplicated_bwa_alignment_summary.get('normal')['exists_version_mismatch'] = False
for bam in duplicated_bwa_alignment_summary.get('normal').get('aligned_bam'):
is_train2_n_bam = bam.get('is_train2_bam')
if is_train2_n_bam: has_train2_n_bam = True
is_sanger_n_bam = bam.get('is_used_in_sanger_variant_call')
if is_sanger_n_bam: has_sanger_n_bam = True
if is_train2_n_bam and not is_sanger_n_bam: count_is_train2_not_sanger += 1
if not is_train2_n_bam and is_sanger_n_bam: count_not_train2_is_sanger += 1
if is_train2_n_bam and is_sanger_n_bam: count_is_train2_is_sanger += 1
if not b_gnos_id: b_gnos_id = bam.get('gnos_id')
if b_gnos_id and not b_gnos_id == bam.get('gnos_id'):
duplicated_bwa_alignment_summary['exists_gnos_id_mismatch'] = True
duplicated_bwa_alignment_summary['exists_gnos_id_mismatch_in_normal'] = True
duplicated_bwa_alignment_summary['exists_mismatch_bwa_bams'] = True
duplicated_bwa_alignment_summary['exists_mismatch_bwa_bams_in_normal'] = True
duplicated_bwa_alignment_summary.get('normal')['exists_mismatch_bwa_bams'] = True
duplicated_bwa_alignment_summary.get('normal')['exists_gnos_id_mismatch'] = True
if not b_md5sum: b_md5sum = bam.get('md5sum')
if b_md5sum and not b_md5sum == bam.get('md5sum'):
duplicated_bwa_alignment_summary['exists_md5sum_mismatch'] = True
duplicated_bwa_alignment_summary['exists_md5sum_mismatch_in_normal'] = True
duplicated_bwa_alignment_summary['exists_mismatch_bwa_bams'] = True
duplicated_bwa_alignment_summary['exists_mismatch_bwa_bams_in_normal'] = True
duplicated_bwa_alignment_summary.get('normal')['exists_mismatch_bwa_bams'] = True
duplicated_bwa_alignment_summary.get('normal')['exists_md5sum_mismatch'] = True
if not b_version: b_version = bam.get('bwa_workflow_version')
if b_version and not b_version == bam.get('bwa_workflow_version'):
duplicated_bwa_alignment_summary['exists_version_mismatch'] = True
duplicated_bwa_alignment_summary['exists_version_mismatch_in_normal'] = True
duplicated_bwa_alignment_summary['exists_mismatch_bwa_bams'] = True
duplicated_bwa_alignment_summary['exists_mismatch_bwa_bams_in_normal'] = True
duplicated_bwa_alignment_summary.get('normal')['exists_mismatch_bwa_bams'] = True
duplicated_bwa_alignment_summary.get('normal')['exists_version_mismatch'] = True
if donor.get('flags').get('is_train2_donor') and not has_train2_n_bam:
duplicated_bwa_alignment_summary['is_train2_freeze_bam_missing'] = True
duplicated_bwa_alignment_summary['is_train2_freeze_normal_bam_missing'] = True
if donor.get('flags').get('is_sanger_variant_calling_performed') and not has_sanger_n_bam:
duplicated_bwa_alignment_summary['is_bam_used_by_sanger_missing'] = True
duplicated_bwa_alignment_summary['is_normal_bam_used_by_sanger_missing'] = True
if donor.get('flags').get('is_train2_donor') and \
donor.get('flags').get('is_sanger_variant_calling_performed') and \
not count_is_train2_is_sanger and \
count_is_train2_not_sanger and count_not_train2_is_sanger:
if duplicated_bwa_alignment_summary['exists_md5sum_mismatch']:
duplicated_bwa_alignment_summary['exists_md5sum_mismatch_between_train2_marked_and_sanger_used'] = True
if duplicated_bwa_alignment_summary['exists_version_mismatch']:
duplicated_bwa_alignment_summary['exists_version_mismatch_between_train2_marked_and_sanger_used'] = True
# scan tumor BAMs
if duplicated_bwa_alignment_summary.get('tumor'):
for aliquot in duplicated_bwa_alignment_summary.get('tumor'):
b_gnos_id = None
b_md5sum = None
b_version = None
has_train2_t_bam = False
has_sanger_t_bam = False
count_is_train2_not_sanger = 0
count_not_train2_is_sanger = 0
count_is_train2_is_sanger = 0
aliquot['exists_mismatch_bwa_bams'] = False
aliquot['exists_gnos_id_mismatch'] = False
aliquot['exists_md5sum_mismatch'] = False
aliquot['exists_version_mismatch'] = False
for bam in aliquot.get('aligned_bam'):
is_train2_t_bam = bam.get('is_train2_bam')
if is_train2_t_bam: has_train2_t_bam = True
is_sanger_t_bam = bam.get('is_used_in_sanger_variant_call')
if is_sanger_t_bam: has_sanger_t_bam = True
if is_train2_t_bam and not is_sanger_t_bam: count_is_train2_not_sanger += 1
if not is_train2_t_bam and is_sanger_t_bam: count_not_train2_is_sanger += 1
if is_train2_t_bam and is_sanger_t_bam: count_is_train2_is_sanger += 1
if not b_gnos_id: b_gnos_id = bam.get('gnos_id')
if b_gnos_id and not b_gnos_id == bam.get('gnos_id'):
duplicated_bwa_alignment_summary['exists_gnos_id_mismatch'] = True
duplicated_bwa_alignment_summary['exists_gnos_id_mismatch_in_tumor'] = True
duplicated_bwa_alignment_summary['exists_mismatch_bwa_bams'] = True
duplicated_bwa_alignment_summary['exists_mismatch_bwa_bams_in_tumor'] = True
aliquot['exists_mismatch_bwa_bams'] = True
aliquot['exists_gnos_id_mismatch'] = True
if not b_md5sum: b_md5sum = bam.get('md5sum')
if b_md5sum and not b_md5sum == bam.get('md5sum'):
duplicated_bwa_alignment_summary['exists_md5sum_mismatch'] = True
duplicated_bwa_alignment_summary['exists_md5sum_mismatch_in_tumor'] = True
duplicated_bwa_alignment_summary['exists_mismatch_bwa_bams'] = True
duplicated_bwa_alignment_summary['exists_mismatch_bwa_bams_in_tumor'] = True
aliquot['exists_mismatch_bwa_bams'] = True
aliquot['exists_md5sum_mismatch'] = True
if not b_version: b_version = bam.get('bwa_workflow_version')
if b_version and not b_version == bam.get('bwa_workflow_version'):
duplicated_bwa_alignment_summary['exists_version_mismatch'] = True
duplicated_bwa_alignment_summary['exists_version_mismatch_in_tumor'] = True
duplicated_bwa_alignment_summary['exists_mismatch_bwa_bams'] = True
duplicated_bwa_alignment_summary['exists_mismatch_bwa_bams_in_tumor'] = True
aliquot['exists_version_mismatch'] = True
aliquot['exists_mismatch_bwa_bams'] = True
if donor.get('flags').get('is_train2_donor') and not has_train2_t_bam:
duplicated_bwa_alignment_summary['is_train2_freeze_bam_missing'] = True
duplicated_bwa_alignment_summary['is_train2_freeze_tumor_bam_missing'] = True
if donor.get('flags').get('is_sanger_variant_calling_performed') and not has_sanger_t_bam:
duplicated_bwa_alignment_summary['is_bam_used_by_sanger_missing'] = True
duplicated_bwa_alignment_summary['is_tumor_bam_used_by_sanger_missing'] = True
if donor.get('flags').get('is_train2_donor') and \
donor.get('flags').get('is_sanger_variant_calling_performed') and \
not count_is_train2_is_sanger and \
count_is_train2_not_sanger and count_not_train2_is_sanger:
if duplicated_bwa_alignment_summary['exists_md5sum_mismatch']:
duplicated_bwa_alignment_summary['exists_md5sum_mismatch_between_train2_marked_and_sanger_used'] = True
if duplicated_bwa_alignment_summary['exists_version_mismatch']:
duplicated_bwa_alignment_summary['exists_version_mismatch_between_train2_marked_and_sanger_used'] = True
donor['duplicated_bwa_alignment_summary'] = duplicated_bwa_alignment_summary
def is_used_in_sanger_variant_call(donor, gnos_id):
if donor.get('variant_calling_results') and donor.get('variant_calling_results').get('sanger_variant_calling'):
for input_gnos_entry in donor.get('variant_calling_results').get('sanger_variant_calling') \
.get('workflow_details').get('variant_pipeline_input_info'):
if gnos_id == input_gnos_entry.get('attributes').get('analysis_id'): return True
return False
def is_train2_bam(donor, train2_freeze_bams, gnos_id, specimen_type):
if donor.get('donor_unique_id') and train2_freeze_bams.get(donor.get('donor_unique_id')) \
and train2_freeze_bams.get(donor.get('donor_unique_id')).get(gnos_id):
if not specimen_type == train2_freeze_bams.get(donor.get('donor_unique_id')).get(gnos_id).get('specimen_type'):
logger.warning('This should never happen: specimen type mismatch in train2 list in donor {}'
.format(donor.get('donor_unique_id')))
return True
return False
def add_vcf_entry(donor, vcf_entry):
if not vcf_entry:
return
if not donor.get('variant_calling_results'): donor['variant_calling_results'] = {}
donor.get('variant_calling_results').update(vcf_entry)
for workflow in ['sanger', 'embl', 'dkfz']:
if donor.get('variant_calling_results').get(workflow + '_variant_calling'):
donor.get('flags')['is_' + workflow + '_variant_calling_performed'] = True
donor.get('flags').get('variant_calling_performed').append(workflow)
if not donor.get('flags').get('all_tumor_specimen_aliquot_counts') + 1 == \
len(donor.get('variant_calling_results').get(workflow + '_variant_calling').get('workflow_details').get('variant_pipeline_output_info')):
logger.warning(workflow + ' variant calling workflow may have missed tumour specimen for donor: {}'
.format(donor.get('donor_unique_id')))
donor.get('variant_calling_results').get(workflow + '_variant_calling')['is_output_and_tumour_specimen_counts_mismatch'] = True
else:
donor.get('variant_calling_results').get(workflow + '_variant_calling')['is_output_and_tumour_specimen_counts_mismatch'] = False
# add the flags of is_bam_used_by_{{workflow}}_missing, is_normal_bam_used_by_{{workflow}}_missing, is_tumor_bam_used_by_{{workflow}}_missing
donor.get('variant_calling_results').get(workflow + '_variant_calling')['is_bam_used_by_' + workflow + '_missing'] = False
donor.get('variant_calling_results').get(workflow + '_variant_calling')['is_normal_bam_used_by_' + workflow + '_missing'] = False
donor.get('variant_calling_results').get(workflow + '_variant_calling')['is_tumor_bam_used_by_' + workflow + '_missing'] = False
has_n_bam = False
vcf_input_t_bam = set()
tumor_alignment_bam = set()
# scan all the vcf input under "variant_pipeline_input_info"
for vcf_input in donor.get('variant_calling_results').get(workflow + '_variant_calling').get('workflow_details').get('variant_pipeline_input_info'):
if 'normal' in vcf_input.get('attributes').get('dcc_specimen_type').lower():
if vcf_input.get('attributes').get('analysis_id') == donor.get('normal_alignment_status').get('aligned_bam').get('gnos_id'): #check normal alignment
has_n_bam = True
elif 'tumour' in vcf_input.get('attributes').get('dcc_specimen_type').lower(): # check the tumor
vcf_input_t_bam.add((vcf_input.get('specimen'), vcf_input.get('attributes').get('analysis_id')))
else:
logger.warning('invalid specimen type: {} in donor: {} with aliquot_id: {}'
.format(vcf_input.get('attributes').get('dcc_specimen_type'), donor.get('donor_unique_id'), vcf_input.get('specimen'))
)
# scan all the bams in tumor_alignment_status
for tumor_alignment in donor.get('tumor_alignment_status'):
tumor_alignment_bam.add((tumor_alignment.get('aliquot_id'), tumor_alignment.get('aligned_bam').get('gnos_id')))
if not has_n_bam:
donor.get('variant_calling_results').get(workflow + '_variant_calling')['is_normal_bam_used_by_' + workflow + '_missing'] = True
donor.get('variant_calling_results').get(workflow + '_variant_calling')['is_bam_used_by_' + workflow + '_missing'] = True
if vcf_input_t_bam != tumor_alignment_bam:
donor.get('variant_calling_results').get(workflow + '_variant_calling')['is_tumor_bam_used_by_' + workflow + '_missing'] = True
donor.get('variant_calling_results').get(workflow + '_variant_calling')['is_bam_used_by_' + workflow + '_missing'] = True
def add_original_gnos_repo(donor, annotation):
if donor.get('gnos_repo'):
del donor['gnos_repo'] # get rid of this rather confusing old flag
if annotation.get(donor.get('donor_unique_id')):
donor['original_gnos_assignment'] = annotation.get(donor.get('donor_unique_id'))
elif annotation.get(donor.get('dcc_project_code')):
donor['original_gnos_assignment'] = annotation.get(donor.get('dcc_project_code'))
else:
donor['original_gnos_assignment'] = None
def add_train2_donor_flag(donor, train2_freeze_bams):
if train2_freeze_bams.get(donor.get('donor_unique_id')):
donor.get('flags')['is_train2_donor'] = True
else:
donor.get('flags')['is_train2_donor'] = False
def add_train2_pilot_flag(donor, annotation):
if donor.get('donor_unique_id') in annotation:
donor.get('flags')['is_train2_pilot'] = True
else:
donor.get('flags')['is_train2_pilot'] = False
def add_donor_blacklist_flag(donor, annotation):
if donor.get('donor_unique_id') in annotation:
donor.get('flags')['is_donor_blacklisted'] = True
else:
donor.get('flags')['is_donor_blacklisted'] = False
def add_manual_qc_failed_flag(donor, annotation):
if donor.get('donor_unique_id') in annotation:
donor.get('flags')['is_manual_qc_failed'] = True
else:
donor.get('flags')['is_manual_qc_failed'] = False
def add_gnos_repos_with_alignment_result(donor):
repos = set()
if (donor.get('normal_alignment_status')
and donor.get('normal_alignment_status').get('aligned_bam')):
repos = set(donor.get('normal_alignment_status').get('aligned_bam').get('gnos_repo'))
if donor.get('tumor_alignment_status'):
for t in donor.get('tumor_alignment_status'):
if t.get('aligned_bam'):
repos.update(set(t.get('aligned_bam').get('gnos_repo')))
donor['gnos_repos_with_alignment_result'] = repos
def add_gnos_repos_with_complete_alignment_set(donor):
repos = set()
if (donor.get('normal_alignment_status')
and donor.get('normal_alignment_status').get('aligned_bam')):
repos = set(donor.get('normal_alignment_status').get('aligned_bam').get('gnos_repo'))
if repos and donor.get('tumor_alignment_status'):
for t in donor.get('tumor_alignment_status'):
if t.get('aligned_bam'):
repos = set.intersection(repos, set(t.get('aligned_bam').get('gnos_repo')))
else:
repos = set()
else:
repos = set()
donor['gnos_repos_with_complete_alignment_set'] = repos
'''
# this flag is not entirely accurate, disable it for now
if repos:
donor['is_alignment_completed'] = True
else:
donor['is_alignment_completed'] = False
'''
def add_rna_seq_status_to_donor(donor, aggregated_bam_info):
for aliquot_id in aggregated_bam_info.keys():
alignment_status = aggregated_bam_info.get(aliquot_id)
if (alignment_status.get('tophat') and 'normal' in alignment_status.get('tophat').get('dcc_specimen_type').lower()) or \
(alignment_status.get('star') and 'normal' in alignment_status.get('star').get('dcc_specimen_type').lower()): # normal specimen
if not donor.get('rna_seq').get('alignment').get('normal'): #no normal yet in RNA-Seq alignment of this donor
donor.get('rna_seq').get('alignment')['normal'] = alignment_status
else:
logger.warning('more than one RNA-Seq normal aliquot found in donor: {}'.format(donor.get('donor_unique_id')))
elif (alignment_status.get('tophat') and 'tumour' in alignment_status.get('tophat').get('dcc_specimen_type').lower()) or \
(alignment_status.get('star') and 'tumour' in alignment_status.get('star').get('dcc_specimen_type').lower()):
if not donor.get('rna_seq').get('alignment').get('tumour'): #no tumor yet in RNA-Seq alignment of this donor
donor.get('rna_seq').get('alignment')['tumor'] = []
donor.get('rna_seq').get('alignment')['tumor'].append(alignment_status)
else:
logger.warning('invalid aliquot_id: {} in donor: {} '
.format(aliquot_id, donor.get('donor_unique_id'))
)
def add_alignment_status_to_donor(donor, aggregated_bam_info):
for aliquot_id in aggregated_bam_info.keys():
alignment_status = aggregated_bam_info.get(aliquot_id)
if 'normal' in alignment_status.get('dcc_specimen_type').lower(): # normal specimen
if not donor.get('normal_alignment_status'): # no normal yet in this donor, this is good
donor['normal_alignment_status'] = reorganize_unaligned_bam_info(alignment_status)
else: # another normal with different aliquot_id! this is no good
logger.warning('donor: {} has more than one normal, in use aliquot_id: {}, additional aliquot_id found: {}'
.format(donor.get('donor_unique_id'),
donor.get('normal_alignment_status').get('aliquot_id'),
aliquot_id)
)
elif 'tumour' in alignment_status.get('dcc_specimen_type').lower(): # tumour specimen
if not donor.get('tumor_alignment_status'):
donor['tumor_alignment_status'] = []
donor['tumor_alignment_status'].append(reorganize_unaligned_bam_info(alignment_status))
else:
logger.warning('invalid specimen type: {} in donor: {} with aliquot_id: {}'
.format(alignment_status.get('dcc_specimen_type'), donor.get('donor_unique_id'), aliquot_id)
)
def update_lane_count_flags(alignment_status):
if len(alignment_status.get('lane_count')) == 1:
alignment_status['do_lane_counts_in_every_bam_entry_match'] = True
if str(len(alignment_status.get('unaligned_bams'))) in alignment_status.get('lane_count'):
alignment_status['do_lane_count_and_bam_count_match'] = True
return alignment_status
def reorganize_unaligned_bam_info(alignment_status):
unaligned_bams = []
for gnos_id in alignment_status.get('unaligned_bams').keys():
unaligned_bams.append(
{
"gnos_id": gnos_id,
"bam_file_name": alignment_status.get('unaligned_bams').get(gnos_id).get('bam_file_name'),
"md5sum": alignment_status.get('unaligned_bams').get(gnos_id).get('md5sum'),
"gnos_repo": alignment_status.get('unaligned_bams').get(gnos_id).get('gnos_repo'),
}
)
alignment_status['unaligned_bams'] = unaligned_bams
update_lane_count_flags(alignment_status)
return alignment_status
def bam_aggregation(bam_files):
aggregated_bam_info_new = {}
if not aggregated_bam_info_new.get('WGS'):
aggregated_bam_info_new['WGS'] = {}
aggregated_bam_info = {}
for bam in bam_files: # check aligned BAM(s) first
if not bam['bam_type'] == 'Specimen level aligned BAM':
continue
if not aggregated_bam_info.get(bam['aliquot_id']): # new aliquot
aggregated_bam_info[bam['aliquot_id']] = {
"aliquot_id": bam['aliquot_id'],
"submitter_specimen_id": bam['submitter_specimen_id'],
"submitter_sample_id": bam['submitter_sample_id'],
"dcc_specimen_type": bam['dcc_specimen_type'],
"aligned": True,
"lane_count": set(),
"do_lane_counts_in_every_bam_entry_match": False,
"do_lane_count_and_bam_count_match": False,
"aligned_bam": {
"gnos_id": bam['bam_gnos_ao_id'],
"bam_file_name": bam['bam_file_name'],
"bam_file_size": bam['bam_file_size'],
"bam_file_md5sum": bam['md5sum'],
"gnos_last_modified": [bam['last_modified']],
"gnos_repo": [bam['gnos_repo']]
},
"bam_with_unmappable_reads": {},
"unaligned_bams": {}
}
else:
alignment_status = aggregated_bam_info.get(bam['aliquot_id'])
if alignment_status.get('aligned_bam').get('gnos_id') == bam['bam_gnos_ao_id']:
if bam['gnos_repo'] in alignment_status.get('aligned_bam').get('gnos_repo'):
logger.warning( 'Same aliquot: {}, same GNOS ID: {} in the same GNOS repo: {} more than once. This should never be possible.'
.format(
bam['aliquot_id'],
alignment_status.get('aligned_bam').get('gnos_id'),
bam['gnos_repo'])
)
else:
alignment_status.get('aligned_bam').get('gnos_repo').append(bam['gnos_repo'])
alignment_status.get('aligned_bam').get('gnos_last_modified').append(bam['last_modified'])
else:
logger.warning( 'Same aliquot: {} from donor: {} has different aligned GNOS BAM entries, in use: {}, additional: {}'
.format(
bam['aliquot_id'],
bam['donor_unique_id'],
alignment_status.get('aligned_bam').get('gnos_id'),
bam['gnos_metadata_url'])
)
sort_repos_by_time(aggregated_bam_info)
for bam in bam_files: # now check BAM with unmappable reads that were derived from aligned BAM
if not bam['bam_type'] == 'Specimen level unmapped reads after BWA alignment':
continue
if not aggregated_bam_info.get(bam['aliquot_id']): # new aliquot, too bad this is an orphaned unmapped read BAM the main aligned BAM is missing
logger.warning('aliquot: {} has GNOS BAM entry for unmapped reads found: {}, however the main aligned BAM entry is missing'
.format(bam['aliquot_id'], bam['bam_gnos_ao_id'])
)
else:
alignment_status = aggregated_bam_info.get(bam['aliquot_id'])
if not alignment_status.get('bam_with_unmappable_reads'):
alignment_status['bam_with_unmappable_reads'] = {
"gnos_id": bam['bam_gnos_ao_id'],
"bam_file_name": bam['bam_file_name'],
"bam_file_size": bam['bam_file_size'],
"gnos_repo": set([bam['gnos_repo']])
}
elif alignment_status.get('bam_with_unmappable_reads').get('gnos_id') == bam['bam_gnos_ao_id']:
alignment_status.get('bam_with_unmappable_reads').get('gnos_repo').add(bam['gnos_repo'])
else:
logger.warning( 'same aliquot: {} has different unmappable reads GNOS BAM entries, in use: {}, additional: {}'
.format(
bam['aliquot_id'],
alignment_status.get('bam_with_unmappable_reads').get('gnos_id'),
bam['bam_gnos_ao_id'])
)
for bam in bam_files: # last check original (submitted) unaligned BAM(s)
if not bam['bam_type'] == 'Unaligned BAM':
continue
if not aggregated_bam_info.get(bam['aliquot_id']): # new aliquot with no aligned BAM yet
aggregated_bam_info[bam['aliquot_id']] = {
"aliquot_id": bam['aliquot_id'],
"submitter_specimen_id": bam['submitter_specimen_id'],
"submitter_sample_id": bam['submitter_sample_id'],
"dcc_specimen_type": bam['dcc_specimen_type'],
"aligned": False,
"lane_count": set([bam['total_lanes']]),
"do_lane_counts_in_every_bam_entry_match": False,
"do_lane_count_and_bam_count_match": False,
"aligned_bam": {},
"bam_with_unmappable_reads": {},
"unaligned_bams": {
bam['bam_gnos_ao_id']: {
"bam_file_name": bam['bam_file_name'],
"md5sum": bam['md5sum'],
"gnos_repo": set([bam['gnos_repo']])
}
}
}
else: # aliquot already exists
alignment_status = aggregated_bam_info.get(bam['aliquot_id'])
alignment_status.get('lane_count').add(bam['total_lanes'])
if alignment_status.get('unaligned_bams').get(bam['bam_gnos_ao_id']): # this unaligned bam was encountered before
if alignment_status.get('unaligned_bams').get(bam['bam_gnos_ao_id']).get('md5sum') == bam['md5sum']: # this unaligned bam has the same md5sum with encountered one
alignment_status.get('unaligned_bams').get(bam['bam_gnos_ao_id']).get('gnos_repo').add(bam['gnos_repo'])
else:
logger.warning( 'Unaligend lane-level BAMs with same gnos_id: {} have different md5sum, in use entry at gnos repo: {}, additional entry at gnos repo: {}'
.format(
bam['bam_gnos_ao_id'],
alignment_status.get('unaligned_bams').get(bam['bam_gnos_ao_id']).get('gnos_repo')[-1],
bam['gnos_repo'])
)
else:
alignment_status.get('unaligned_bams')[bam['bam_gnos_ao_id']] = {
"bam_file_name": bam['bam_file_name'],
"md5sum": bam['md5sum'],
"gnos_repo": set([bam['gnos_repo']])
}
aggregated_bam_info_new['WGS'] = aggregated_bam_info
aggregated_bam_info = {}
if not aggregated_bam_info_new.get('RNA-Seq'):
aggregated_bam_info_new['RNA-Seq'] = {}
for bam in bam_files: #check RNA-Seq BAMs
if not bam['bam_type'] == 'RNA-Seq aligned BAM':
continue
if not aggregated_bam_info.get(bam['aliquot_id']): # new aliquot with RNA-Seq BAM
aggregated_bam_info[bam['aliquot_id']] = {}
aliquot_tmp = {
"aliquot_id": bam['aliquot_id'],
"submitter_specimen_id": bam['submitter_specimen_id'],
"submitter_sample_id": bam['submitter_sample_id'],
"dcc_specimen_type": bam['dcc_specimen_type'],
"aligned": True,
"gnos_info": {
"gnos_repo": [bam['gnos_repo']],
"gnos_id": bam['bam_gnos_ao_id'],
"bam_file_name": bam['bam_file_name'],
"bam_file_md5sum": bam['md5sum'],
"bam_file_size": bam['bam_file_size'],
"gnos_last_modified": [bam['last_modified']]
}
}
if 'tophat' in bam.get('alignment').get('workflow_name').lower():
aggregated_bam_info.get(bam['aliquot_id'])['tophat'] = aliquot_tmp
elif 'star' in bam.get('alignment').get('workflow_name').lower():
aggregated_bam_info.get(bam['aliquot_id'])['star'] = aliquot_tmp
else: # other unknown alignment workflows
logger.warning('unknown RNA-Seq alignment workflows: {}'
.format(bam.get('alignment').get('workflow_name') ))
return
else: #aliquot already exists
alignment_status = aggregated_bam_info.get(bam['aliquot_id'])
if 'tophat' in bam.get('alignment').get('workflow_name').lower():
if not alignment_status.get('tophat'): # no tophat workflow for the aliquot
aliquot_tmp = {
"aliquot_id": bam['aliquot_id'],
"submitter_specimen_id": bam['submitter_specimen_id'],
"submitter_sample_id": bam['submitter_sample_id'],
"dcc_specimen_type": bam['dcc_specimen_type'],
"aligned": True,
"gnos_info": {
"gnos_repo": [bam['gnos_repo']],
"gnos_id": bam['bam_gnos_ao_id'],
"bam_file_name": bam['bam_file_name'],
"bam_file_md5sum": bam['md5sum'],
"bam_file_size": bam['bam_file_size'],
"gnos_last_modified": [bam['last_modified']]
}
}
alignment_status['tophat'] = aliquot_tmp
elif alignment_status.get('tophat').get('gnos_info').get('gnos_id') == bam['bam_gnos_ao_id']:
if bam['gnos_repo'] in alignment_status.get('tophat').get('gnos_info').get('gnos_repo'):
logger.warning( 'Same aliquot: {}, same workflow: {}, same GNOS ID: {} in the same GNOS repo: {} more than once. This should never be possible.'
.format(
bam['aliquot_id'],
bam.get('alignment').get('workflow_name'),
alignment_status.get('tophat').get('gnos_info').get('gnos_id'),
bam['gnos_repo'])
)
else:
alignment_status.get('tophat').get('gnos_info').get('gnos_repo').append(bam['gnos_repo'])
alignment_status.get('tophat').get('gnos_info').get('gnos_last_modified').append(bam['last_modified'])
else:
logger.warning( 'Same aliquot: {} from donor: {} using same workflow: {} has different aligned GNOS BAM entries, in use: {}, additional: {}'
.format(
bam['aliquot_id'],
bam['donor_unique_id'],
bam.get('alignment').get('workflow_name'),
alignment_status.get('tophat').get('gnos_info').get('gnos_id'),
bam['gnos_metadata_url'])
)
elif 'star' in bam.get('alignment').get('workflow_name').lower():
if not alignment_status.get('star'): # no star workflow for the aliquot
aliquot_tmp = {
"aliquot_id": bam['aliquot_id'],
"submitter_specimen_id": bam['submitter_specimen_id'],
"submitter_sample_id": bam['submitter_sample_id'],
"dcc_specimen_type": bam['dcc_specimen_type'],
"aligned": True,
"gnos_info": {
"gnos_repo": [bam['gnos_repo']],
"gnos_id": bam['bam_gnos_ao_id'],
"bam_file_name": bam['bam_file_name'],
"bam_file_md5sum": bam['md5sum'],
"bam_file_size": bam['bam_file_size'],
"gnos_last_modified": [bam['last_modified']]
}
}
alignment_status['star'] = aliquot_tmp
elif alignment_status.get('star').get('gnos_info').get('gnos_id') == bam['bam_gnos_ao_id']:
if bam['gnos_repo'] in alignment_status.get('star').get('gnos_info').get('gnos_repo'):
logger.warning( 'Same aliquot: {}, same workflow: {}, same GNOS ID: {} in the same GNOS repo: {} more than once. This should never be possible.'
.format(
bam['aliquot_id'],
bam.get('alignment').get('workflow_name'),
alignment_status.get('star').get('gnos_info').get('gnos_id'),
bam['gnos_repo'])
)
else:
alignment_status.get('star').get('gnos_info').get('gnos_repo').append(bam['gnos_repo'])
alignment_status.get('star').get('gnos_info').get('gnos_last_modified').append(bam['last_modified'])
else:
logger.warning( 'Same aliquot: {} from donor: {} using same workflow: {} has different aligned GNOS BAM entries, in use: {}, additional: {}'
.format(
bam['aliquot_id'],
bam['donor_unique_id'],
bam.get('alignment').get('workflow_name'),
alignment_status.get('star').get('gnos_info').get('gnos_id'),
bam['gnos_metadata_url'])
)
else: # other unknown alignment workflows
logger.warning('unknown RNA-Seq alignment workflows: {}'
.format(bam.get('alignment').get('workflow_name') ))
return
aggregated_bam_info_new['RNA-Seq'] = aggregated_bam_info
return aggregated_bam_info_new
def sort_repos_by_time(aggregated_bam_info):
for aliquot in aggregated_bam_info:
agg_bam = aggregated_bam_info.get(aliquot)
if not agg_bam.get('aligned_bam'):
continue
modified_dates = agg_bam.get('aligned_bam').get('gnos_last_modified')
gnos_repos = agg_bam.get('aligned_bam').get('gnos_repo')
agg_bam.get('aligned_bam')['gnos_last_modified'], agg_bam.get('aligned_bam')['gnos_repo'] = \
izip(*sorted(izip(modified_dates, gnos_repos), key=lambda x: x[0]))
def find_latest_metadata_dir(output_dir):
dir_pattern = re.compile(u'^[0-9]{4}-[0-9]{2}-[0-9]{2}_[0-9]{2}-[0-9]{2}-[0-9]{2}_[A-Z]{3}$')
metadata_dirs = []
for dir in os.listdir(output_dir):
if not os.path.isdir(output_dir + '/' + dir):
continue
if dir_pattern.search(dir):
metadata_dirs.append(output_dir + '/' + dir)
return sorted(metadata_dirs)[-1]
def main(argv=None):
if argv is None:
argv = sys.argv
else:
sys.argv.extend(argv)
parser = ArgumentParser(description="PCAWG GNOS Metadata Parser",
formatter_class=RawDescriptionHelpFormatter)
parser.add_argument("-c", "--config", dest="config",
help="Configuration file for GNOS repositories", required=True)
parser.add_argument("-m", "--metadata_dir", dest="metadata_dir",
help="Directory containing metadata manifest files", required=False)
parser.add_argument("-r", "--gnos_repo", dest="repo",
help="Specify which GNOS repo to process, process all repos if none specified", required=False)
parser.add_argument("-x", "--exclude_gnos_id_lists", dest="exclude_gnos_id_lists", # don't use this option for daily cron job
help="File(s) containing GNOS IDs to be excluded, use filename pattern to specify the file(s)", required=False)
parser.add_argument("-s", "--es_index_suffix", dest="es_index_suffix", # don't use this option for daily cron job
help="Single letter suffix for ES index name", required=False)
args = parser.parse_args()
metadata_dir = args.metadata_dir
conf_file = args.config
repo = args.repo
exclude_gnos_id_lists = args.exclude_gnos_id_lists
es_index_suffix = args.es_index_suffix
if not es_index_suffix: es_index_suffix = ''
with open(conf_file) as f:
conf = yaml.safe_load(f)
for r in conf.get('gnos_repos'):
conf[r.get('base_url')] = r.get('repo_code')
# output_dir
output_dir = conf.get('output_dir')
if metadata_dir:
if not os.path.isdir(metadata_dir): # TODO: should add more directory name check to make sure it's right
sys.exit('Error: specified metadata directory does not exist!')
else:
metadata_dir = find_latest_metadata_dir(output_dir) # sorted(glob.glob(output_dir + '/[0-9]*_*_*[A-Z]'))[-1] # find the directory for latest metadata list
timestamp = str.split(metadata_dir, '/')[-1]
logger.setLevel(logging.INFO)
ch.setLevel(logging.WARN)
log_file = metadata_dir + '.metadata_parser' + ('' if not repo else '.'+repo) + '.log'
# delete old log first if exists
if os.path.isfile(log_file): os.remove(log_file)
fh = logging.FileHandler(log_file)
fh.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
fh.setFormatter(formatter)
ch.setFormatter(formatter)
logger.addHandler(fh)
logger.addHandler(ch)
es_host = 'localhost:9200'
es_index = 'p_' + ('' if not repo else repo+'_') + re.sub(r'\D', '', timestamp).replace('20','',1) + es_index_suffix
es = init_es(es_host, es_index)
logger.info('processing metadata list files in {} to build es index {}'.format(metadata_dir, es_index))
process(metadata_dir, conf, es_index, es, metadata_dir+'/donor_'+es_index+'.jsonl', metadata_dir+'/bam_'+es_index+'.jsonl', repo, exclude_gnos_id_lists)
# now update kibana dashboard
# donor
dashboard_name = ' ['+repo+']' if repo else ''
with open('kibana-donor.json', 'r') as d:
donor_dashboard = json.loads(d.read())
donor_dashboard['index']['default'] = es_index + '/donor'
title = 'PCAWG Donors' + dashboard_name + ' (beta)'
donor_dashboard['title'] = title
body = {
'dashboard': json.dumps(donor_dashboard),
'user': 'guest',
'group': 'guest',
'title': title
}
es.index(index='kibana-int', doc_type='dashboard', id='PCAWG Donors' + dashboard_name, body=body)
# bam search, no need this for now, not very useful
'''
with open('kibana-bam.json', 'r') as d:
bam_dashboard = json.loads(d.read())
bam_dashboard['index']['default'] = es_index + '/bam_file'
title = 'PCAWG BAMs' + dashboard_name + ' (beta)'
bam_dashboard['title'] = title
body = {
'dashboard': json.dumps(bam_dashboard),
'user': 'guest',
'group': 'guest',
'title': title
}
es.index(index='kibana-int', doc_type='dashboard', id='PCAWG BAMs' + dashboard_name, body=body)
'''
return 0
if __name__ == "__main__":
sys.exit(main())
|
ICGC-TCGA-PanCancer/pancancer-sandbox
|
pcawg_metadata_parser/parse_gnos_xml.py
|
Python
|
gpl-2.0
| 97,470
|
[
"BWA"
] |
5c330fb5ec4c04f6ac6cb0e71fd5465a96dba66b476539bbabe17ec2650ed5b7
|
#!/usr/bin/env python3
### VERY MUCH PYTHON 3 !!!
"""
Serve up static web pages
For Magnus Flora Demo
(Said static web pages then make ajax requests to jarvis and the simulator. All UI logic resides
on the client)
Made available under the MIT license as follows:
Copyright 2017 Brian Bulkowski brian@bulkowski.org
Permission is hereby granted, free of charge, to any person obtaining a copy of this software
and associated documentation files (the "Software"), to deal in the Software without restriction,
including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do
so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or
substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import sys
py_ver = sys.version_info[0] + ( sys.version_info[1] / 10.0 )
if py_ver < 3.5:
raise "Must be using Python 3.5 or better"
import threading
import time
import datetime
import os
import logging
import json
import argparse
import asyncio
import textwrap
from aiohttp import web
#
# A number of debug / demo endpoints
#
async def hello(request):
return web.Response(text="This is the static webserver.")
async def health(request):
return web.Response(text="OK")
async def index(request):
return web.FileResponse('./index.html')
# background tasks are covered near the bottom of this:
# http://aiohttp.readthedocs.io/en/stable/web.html
# Whatever tasks you create here will be executed and cancelled properly
def create_logger(args):
# create a logging object and add it to the app object
logger = logging.getLogger('MF_Web_Static')
logger.setLevel(logging.DEBUG)
# create a file output
fh = logging.FileHandler('mf_static.log')
fh.setLevel(logging.DEBUG)
# create a console handler
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
# if args.verbose:
# ch.setLevel(logging.ERROR)
# else:
# ch.setLevel(logging.DEBUG)
# what format would you like
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
fh.setFormatter(formatter)
ch.setFormatter(formatter)
logger.addHandler(fh)
logger.addHandler(ch)
return logger
async def init(app, args, logger):
app = web.Application()
app.router.add_get('/hello', hello)
app.router.add_get('/health', health)
app.router.add_get('/', index)
app.router.add_static('/static', 'static')
app.router.add_static('/images', 'static/images')
# stash it where everyone can find it
app['log'] = logger
return app
if __name__ == '__main__':
# Parse the command line options
parser = argparse.ArgumentParser(description="MagnusFlora Web Static")
parser.add_argument('--port', '-p', help="HTTP port", default="8080", type=int)
parser.add_argument('--verbose', '-v', help="Puts Lots of Printing Noise in", action='store_true')
parser.set_defaults(verbose=False)
args = parser.parse_args()
logger = create_logger(args)
logger.info('starting MagnusFlora Webserver: there will be %d cakes', 80 )
# register all the async stuff
loop = asyncio.get_event_loop()
app = loop.run_until_complete(init(web.Application(), args, logger))
# run the web server
web.run_app(app, port=args.port)
|
bbulkow/MagnusFlora
|
web/static_web.py
|
Python
|
mit
| 3,922
|
[
"Brian"
] |
80e395e2b3364ec458fc356df3e429840db6d98281eddb66cacd6a42d3eedb01
|
from lib.exception import MissingException
from lib.sitkaAPI import latestMetricInstance
from lib.exception import DataException, MissingException
from lib.metrics import CHaMPMetric
class UndercutMetrics(CHaMPMetric):
TEMPLATE = {
'VisitMetrics': {
'Length' : 0.0,
'LengthPercent' : 0.0,
'Area' : 0.0,
'AreaPerecent' : 0.0
}
}
def calc(self, apiData):
"""
Calculate undercut metrics
:param apiData: dictionary of API data. Key is API call name. Value is API data
:return: metrics dictionary
"""
self.log.info("Running UndercutMetrics")
if 'UndercutBanks' not in apiData:
raise MissingException("UndercutBanks missing from apiData")
# Retrieve the undercut API data
undercutVals = [val['value'] for val in apiData['UndercutBanks']['values'] ]
# Retrieve the latest topo metrics
metricInstance = latestMetricInstance(apiData['TopoVisitMetrics'])
if metricInstance is None:
raise MissingException('Missing topo visit metric instance')
# calculate metrics
self.metrics = self._calc(undercutVals, metricInstance)
@staticmethod
def _calc(undercutVals, visitTopoVals):
"""
Calculate undercut metrics
:param undercutVals: dictionary of undercut API data
:param visitTopoVals: dictionary of visit topo metrics
:return: metrics dictionary
"""
# initialize all metrics as zero
dMetrics = {
'Length' : 0.0,
'LengthPercent' : 0.0,
'Area' : 0.0,
'AreaPerecent' : 0.0
}
if len(undercutVals) > 0:
# Calculate the total undercut length and area
for undercut in undercutVals:
dMetrics['Length'] += undercut['EstimatedLength']
try:
dMetrics['Area'] += undercut['EstimatedLength'] * (undercut['Width25Percent'] + undercut['Width50Percent'] + undercut['Width75Percent']) / 3.0
except TypeError, e:
raise DataException("Undercut: Unhandled 'None' values during length calculation")
# Calculate the percent length and area of the site that is undercut
if visitTopoVals['Lgth_Wet'] is None:
raise DataException("Lgth_Wet cannot be null")
if visitTopoVals['Area_Wet'] is None:
raise DataException("Area_Wet cannot be null")
dMetrics['LengthPercent'] = dMetrics['Length'] / (visitTopoVals['Lgth_Wet'] * 100 / 2)
dMetrics['AreaPerecent'] = dMetrics['Area'] / (visitTopoVals['Area_Wet'] + dMetrics['Area']) * 100
dResults = { 'VisitMetrics' : dMetrics}
return dResults
|
SouthForkResearch/CHaMP_Metrics
|
tools/topoauxmetrics/methods/undercut.py
|
Python
|
gpl-3.0
| 2,827
|
[
"VisIt"
] |
10a43c755157082be384c955f466bf1c354152dd44b05a5a0d048ce9ce0e5f6f
|
""" DIRAC FileCatalog mix-in class to manage directory metadata
"""
# pylint: disable=protected-access
import six
import os
from DIRAC import S_OK, S_ERROR
from DIRAC.Core.Utilities.Time import queryTime
class DirectoryMetadata(object):
def __init__(self, database=None):
self.db = database
def setDatabase(self, database):
self.db = database
##############################################################################
#
# Manage Metadata fields
#
def addMetadataField(self, pName, pType, credDict):
"""Add a new metadata parameter to the Metadata Database.
:param str pName: parameter name
:param str pType: parameter type in the MySQL notation
:return: S_OK/S_ERROR, Value - comment on a positive result
"""
result = self.db.fmeta.getFileMetadataFields(credDict)
if not result["OK"]:
return result
if pName in result["Value"]:
return S_ERROR("The metadata %s is already defined for Files" % pName)
result = self._getMetadataFields(credDict)
if not result["OK"]:
return result
if pName in result["Value"]:
if pType.lower() == result["Value"][pName].lower():
return S_OK("Already exists")
return S_ERROR(
"Attempt to add an existing metadata with different type: %s/%s" % (pType, result["Value"][pName])
)
valueType = pType
if pType.lower()[:3] == "int":
valueType = "INT"
elif pType.lower() == "string":
valueType = "VARCHAR(128)"
elif pType.lower() == "float":
valueType = "FLOAT"
elif pType.lower() == "date":
valueType = "DATETIME"
elif pType == "MetaSet":
valueType = "VARCHAR(64)"
req = "CREATE TABLE FC_Meta_%s ( DirID INTEGER NOT NULL, Value %s, PRIMARY KEY (DirID), INDEX (Value) )" % (
pName,
valueType,
)
result = self.db._query(req)
if not result["OK"]:
return result
result = self.db.insertFields("FC_MetaFields", ["MetaName", "MetaType"], [pName, pType])
if not result["OK"]:
return result
metadataID = result["lastRowId"]
result = self.__transformMetaParameterToData(pName)
if not result["OK"]:
return result
return S_OK("Added new metadata: %d" % metadataID)
def deleteMetadataField(self, pName, credDict):
"""Remove metadata field
:param str pName: meta parameter name
:param dict credDict: client credential dictionary
:return: S_OK/S_ERROR
"""
req = "DROP TABLE FC_Meta_%s" % pName
result = self.db._update(req)
error = ""
if not result["OK"]:
error = result["Message"]
req = "DELETE FROM FC_MetaFields WHERE MetaName='%s'" % pName
result = self.db._update(req)
if not result["OK"]:
if error:
result["Message"] = error + "; " + result["Message"]
return result
def getMetadataFields(self, credDict):
"""Get all the defined metadata fields
:param dict credDict: client credential dictionary
:return: S_OK/S_ERROR, Value is the metadata:metadata type dictionary
"""
return self._getMetadataFields(credDict)
def _getMetadataFields(self, credDict):
"""Get all the defined metadata fields as they are defined in the database
:param dict credDict: client credential dictionary
:return: S_OK/S_ERROR, Value is the metadata:metadata type dictionary
"""
req = "SELECT MetaName,MetaType FROM FC_MetaFields"
result = self.db._query(req)
if not result["OK"]:
return result
metaDict = {}
for row in result["Value"]:
metaDict[row[0]] = row[1]
return S_OK(metaDict)
def addMetadataSet(self, metaSetName, metaSetDict, credDict):
"""Add a new metadata set with the contents from metaSetDict
:param str metaSetName: metaSet name
:param dict metaSetDict: contents of the meta set definition
:param dict credDict: client credential dictionary
:return: S_OK/S_ERROR
"""
result = self._getMetadataFields(credDict)
if not result["OK"]:
return result
metaTypeDict = result["Value"]
# Check the sanity of the metadata set contents
for key in metaSetDict:
if key not in metaTypeDict:
return S_ERROR("Unknown key %s" % key)
result = self.db.insertFields("FC_MetaSetNames", ["MetaSetName"], [metaSetName])
if not result["OK"]:
return result
metaSetID = result["lastRowId"]
req = "INSERT INTO FC_MetaSets (MetaSetID,MetaKey,MetaValue) VALUES %s"
vList = []
for key, value in metaSetDict.items():
vList.append("(%d,'%s','%s')" % (metaSetID, key, str(value)))
vString = ",".join(vList)
result = self.db._update(req % vString)
return result
def getMetadataSet(self, metaSetName, expandFlag, credDict):
"""Get fully expanded contents of the metadata set
:param str metaSetName: metaSet name
:param bool expandFlag: flag to whether to expand the metaset recursively
:param dict credDict: client credential dictionary
:return: S_OK/S_ERROR, Value dictionary of the meta set definition contents
"""
result = self._getMetadataFields(credDict)
if not result["OK"]:
return result
metaTypeDict = result["Value"]
req = "SELECT S.MetaKey,S.MetaValue FROM FC_MetaSets as S, FC_MetaSetNames as N "
req += "WHERE N.MetaSetName='%s' AND N.MetaSetID=S.MetaSetID" % metaSetName
result = self.db._query(req)
if not result["OK"]:
return result
if not result["Value"]:
return S_OK({})
resultDict = {}
for key, value in result["Value"]:
if key not in metaTypeDict:
return S_ERROR("Unknown key %s" % key)
if expandFlag:
if metaTypeDict[key] == "MetaSet":
result = self.getMetadataSet(value, expandFlag, credDict)
if not result["OK"]:
return result
resultDict.update(result["Value"])
else:
resultDict[key] = value
else:
resultDict[key] = value
return S_OK(resultDict)
#############################################################################################
#
# Set and get directory metadata
#
#############################################################################################
def setMetadata(self, dPath, metaDict, credDict):
"""Set the value of a given metadata field for the the given directory path
:param str dPath: directory path
:param dict metaDict: dictionary with metadata
:param dict credDict: client credential dictionary
:return: S_OK/S_ERROR
"""
result = self._getMetadataFields(credDict)
if not result["OK"]:
return result
metaFields = result["Value"]
result = self.db.dtree.findDir(dPath)
if not result["OK"]:
return result
if not result["Value"]:
return S_ERROR("Path not found: %s" % dPath)
dirID = result["Value"]
dirmeta = self.getDirectoryMetadata(dPath, credDict, ownData=False)
if not dirmeta["OK"]:
return dirmeta
for metaName, metaValue in metaDict.items():
if metaName not in metaFields:
result = self.setMetaParameter(dPath, metaName, metaValue, credDict)
if not result["OK"]:
return result
continue
# Check that the metadata is not defined for the parent directories
if metaName in dirmeta["Value"]:
return S_ERROR("Metadata conflict detected for %s for directory %s" % (metaName, dPath))
result = self.db.insertFields("FC_Meta_%s" % metaName, ["DirID", "Value"], [dirID, metaValue])
if not result["OK"]:
if result["Message"].find("Duplicate") != -1:
req = "UPDATE FC_Meta_%s SET Value='%s' WHERE DirID=%d" % (metaName, metaValue, dirID)
result = self.db._update(req)
if not result["OK"]:
return result
else:
return result
return S_OK()
def removeMetadata(self, dPath, metaData, credDict):
"""Remove the specified metadata for the given directory
:param str dPath: directory path
:param dict metaData: metadata dictionary
:param dict credDict: client credential dictionary
:return: standard Dirac result object
"""
result = self._getMetadataFields(credDict)
if not result["OK"]:
return result
metaFields = result["Value"]
result = self.db.dtree.findDir(dPath)
if not result["OK"]:
return result
if not result["Value"]:
return S_ERROR("Path not found: %s" % dPath)
dirID = result["Value"]
failedMeta = {}
for meta in metaData:
if meta in metaFields:
# Indexed meta case
req = "DELETE FROM FC_Meta_%s WHERE DirID=%d" % (meta, dirID)
result = self.db._update(req)
if not result["OK"]:
failedMeta[meta] = result["Value"]
else:
# Meta parameter case
req = "DELETE FROM FC_DirMeta WHERE MetaKey='%s' AND DirID=%d" % (meta, dirID)
result = self.db._update(req)
if not result["OK"]:
failedMeta[meta] = result["Value"]
if failedMeta:
metaExample = list(failedMeta)[0]
result = S_ERROR("Failed to remove %d metadata, e.g. %s" % (len(failedMeta), failedMeta[metaExample]))
result["FailedMetadata"] = failedMeta
else:
return S_OK()
def setMetaParameter(self, dPath, metaName, metaValue, credDict):
"""Set an meta parameter - metadata which is not used in the the data
search operations
:param str dPath: directory name
:param str metaName: meta parameter name
:param str metaValue: meta parameter value
:param dict credDict: client credential dictionary
:return: S_OK/S_ERROR
"""
result = self.db.dtree.findDir(dPath)
if not result["OK"]:
return result
if not result["Value"]:
return S_ERROR("Path not found: %s" % dPath)
dirID = result["Value"]
result = self.db.insertFields(
"FC_DirMeta", ["DirID", "MetaKey", "MetaValue"], [dirID, metaName, str(metaValue)]
)
return result
def getDirectoryMetaParameters(self, dpath, credDict, inherited=True):
"""Get meta parameters for the given directory
:param str dPath: directory name
:param dict credDict: client credential dictionary
:return: S_OK/S_ERROR, Value dictionary of meta parameters
"""
if inherited:
result = self.db.dtree.getPathIDs(dpath)
if not result["OK"]:
return result
pathIDs = result["Value"]
dirID = pathIDs[-1]
else:
result = self.db.dtree.findDir(dpath)
if not result["OK"]:
return result
if not result["Value"]:
return S_ERROR("Path not found: %s" % dpath)
dirID = result["Value"]
pathIDs = [dirID]
if len(pathIDs) > 1:
pathString = ",".join([str(x) for x in pathIDs])
req = "SELECT DirID,MetaKey,MetaValue from FC_DirMeta where DirID in (%s)" % pathString
else:
req = "SELECT DirID,MetaKey,MetaValue from FC_DirMeta where DirID=%d " % dirID
result = self.db._query(req)
if not result["OK"]:
return result
if not result["Value"]:
return S_OK({})
metaDict = {}
for _dID, key, value in result["Value"]:
if key in metaDict:
if isinstance(metaDict[key], list):
metaDict[key].append(value)
else:
metaDict[key] = [metaDict[key]].append(value)
else:
metaDict[key] = value
return S_OK(metaDict)
def getDirectoryMetadata(self, path, credDict, inherited=True, ownData=True):
"""Get metadata for the given directory aggregating metadata for the directory itself
and for all the parent directories if inherited flag is True. Get also the non-indexed
metadata parameters.
:param str path: directory name
:param dict credDict: client credential dictionary
:param bool inherited: flag to include metadata from the parent directories
:param bool ownData: flag to include metadata for the directory itself
:return: S_OK/S_ERROR, Value dictionary of metadata
"""
result = self.db.dtree.getPathIDs(path)
if not result["OK"]:
return result
pathIDs = result["Value"]
result = self._getMetadataFields(credDict)
if not result["OK"]:
return result
metaFields = result["Value"]
metaDict = {}
metaOwnerDict = {}
metaTypeDict = {}
dirID = pathIDs[-1]
if not inherited:
pathIDs = pathIDs[-1:]
if not ownData:
pathIDs = pathIDs[:-1]
pathString = ",".join([str(x) for x in pathIDs])
for meta in metaFields:
req = "SELECT Value,DirID FROM FC_Meta_%s WHERE DirID in (%s)" % (meta, pathString)
result = self.db._query(req)
if not result["OK"]:
return result
if len(result["Value"]) > 1:
return S_ERROR("Metadata conflict for %s for directory %s" % (meta, path))
if result["Value"]:
metaDict[meta] = result["Value"][0][0]
if int(result["Value"][0][1]) == dirID:
metaOwnerDict[meta] = "OwnMetadata"
else:
metaOwnerDict[meta] = "ParentMetadata"
metaTypeDict[meta] = metaFields[meta]
# Get also non-searchable data
result = self.getDirectoryMetaParameters(path, credDict, inherited)
if result["OK"]:
metaDict.update(result["Value"])
for meta in result["Value"]:
metaOwnerDict[meta] = "OwnParameter"
result = S_OK(metaDict)
result["MetadataOwner"] = metaOwnerDict
result["MetadataType"] = metaTypeDict
return result
def __transformMetaParameterToData(self, metaName):
"""Relocate the meta parameters of all the directories to the corresponding
indexed metadata table
:param str metaName: name of the parameter to transform
:return: S_OK/S_ERROR
"""
req = "SELECT DirID,MetaValue from FC_DirMeta WHERE MetaKey='%s'" % metaName
result = self.db._query(req)
if not result["OK"]:
return result
if not result["Value"]:
return S_OK()
dirDict = {}
for dirID, meta in result["Value"]:
dirDict[dirID] = meta
dirList = list(dirDict)
# Exclude child directories from the list
for dirID in dirList:
result = self.db.dtree.getSubdirectoriesByID(dirID)
if not result["OK"]:
return result
if not result["Value"]:
continue
childIDs = list(result["Value"])
for childID in childIDs:
if childID in dirList:
del dirList[dirList.index(childID)]
insertValueList = []
for dirID in dirList:
insertValueList.append("( %d,'%s' )" % (dirID, dirDict[dirID]))
req = "INSERT INTO FC_Meta_%s (DirID,Value) VALUES %s" % (metaName, ", ".join(insertValueList))
result = self.db._update(req)
if not result["OK"]:
return result
req = "DELETE FROM FC_DirMeta WHERE MetaKey='%s'" % metaName
result = self.db._update(req)
return result
############################################################################################
#
# Find directories corresponding to the metadata
#
def __createMetaSelection(self, value, table=""):
"""Create an SQL selection element for the given meta value
:param dict value: dictionary with selection instructions suitable for the database search
:param str table: table name
:return: selection string
"""
if isinstance(value, dict):
selectList = []
for operation, operand in value.items():
if operation in [">", "<", ">=", "<="]:
if isinstance(operand, list):
return S_ERROR("Illegal query: list of values for comparison operation")
if isinstance(operand, six.integer_types):
selectList.append("%sValue%s%d" % (table, operation, operand))
elif isinstance(operand, float):
selectList.append("%sValue%s%f" % (table, operation, operand))
else:
selectList.append("%sValue%s'%s'" % (table, operation, operand))
elif operation == "in" or operation == "=":
if isinstance(operand, list):
vString = ",".join(["'" + str(x) + "'" for x in operand])
selectList.append("%sValue IN (%s)" % (table, vString))
else:
selectList.append("%sValue='%s'" % (table, operand))
elif operation == "nin" or operation == "!=":
if isinstance(operand, list):
vString = ",".join(["'" + str(x) + "'" for x in operand])
selectList.append("%sValue NOT IN (%s)" % (table, vString))
else:
selectList.append("%sValue!='%s'" % (table, operand))
selectString = " AND ".join(selectList)
elif isinstance(value, list):
vString = ",".join(["'" + str(x) + "'" for x in value])
selectString = "%sValue in (%s)" % (table, vString)
else:
if value == "Any":
selectString = ""
else:
selectString = "%sValue='%s' " % (table, value)
return S_OK(selectString)
def __findSubdirByMeta(self, metaName, value, pathSelection="", subdirFlag=True):
"""Find directories for the given metaName datum. If the the metaName datum type is a list,
combine values in OR. In case the metaName datum is 'Any', finds all the subdirectories
for which the metaName datum is defined at all.
:param str metaName: metadata name
:param dict,list value: dictionary with selection instructions suitable for the database search
:param str pathSelection: directory path selection string
:param bool subdirFlag: fla to include subdirectories
:return: S_OK/S_ERROR, Value list of found directories
"""
result = self.__createMetaSelection(value, "M.")
if not result["OK"]:
return result
selectString = result["Value"]
req = " SELECT M.DirID FROM FC_Meta_%s AS M" % metaName
if pathSelection:
req += " JOIN ( %s ) AS P WHERE M.DirID=P.DirID" % pathSelection
if selectString:
if pathSelection:
req += " AND %s" % selectString
else:
req += " WHERE %s" % selectString
result = self.db._query(req)
if not result["OK"]:
return result
if not result["Value"]:
return S_OK([])
dirList = []
for row in result["Value"]:
dirID = row[0]
dirList.append(dirID)
# if subdirFlag:
# result = self.db.dtree.getSubdirectoriesByID( dirID )
# if not result['OK']:
# return result
# dirList += result['Value']
if subdirFlag:
result = self.db.dtree.getAllSubdirectoriesByID(dirList)
if not result["OK"]:
return result
dirList += result["Value"]
return S_OK(dirList)
def __findSubdirMissingMeta(self, metaName, pathSelection):
"""Find directories not having the given meta datum defined
:param str metaName: metadata name
:param str pathSelection: directory path selection string
:return: S_OK,S_ERROR , Value list of directories
"""
result = self.__findSubdirByMeta(metaName, "Any", pathSelection)
if not result["OK"]:
return result
dirList = result["Value"]
table = self.db.dtree.getTreeTable()
dirString = ",".join([str(x) for x in dirList])
if dirList:
req = "SELECT DirID FROM %s WHERE DirID NOT IN ( %s )" % (table, dirString)
else:
req = "SELECT DirID FROM %s" % table
result = self.db._query(req)
if not result["OK"]:
return result
if not result["Value"]:
return S_OK([])
dirList = [x[0] for x in result["Value"]]
return S_OK(dirList)
def __expandMetaDictionary(self, metaDict, credDict):
"""Update the dictionary with metadata query by expand metaSet type metadata
:param dict metaDict: metaDict to be expanded
:param dict credDict: client credential dictionary
:return: S_OK/S_ERROR , Value dictionary of metadata
"""
result = self._getMetadataFields(credDict)
if not result["OK"]:
return result
metaTypeDict = result["Value"]
resultDict = {}
extraDict = {}
for key, value in metaDict.items():
if key not in metaTypeDict:
# return S_ERROR( 'Unknown metadata field %s' % key )
extraDict[key] = value
continue
keyType = metaTypeDict[key]
if keyType != "MetaSet":
resultDict[key] = value
else:
result = self.getMetadataSet(value, True, credDict)
if not result["OK"]:
return result
mDict = result["Value"]
for mk, mv in mDict.items():
if mk in resultDict:
return S_ERROR("Contradictory query for key %s" % mk)
else:
resultDict[mk] = mv
result = S_OK(resultDict)
result["ExtraMetadata"] = extraDict
return result
def __checkDirsForMetadata(self, metaName, value, pathString):
"""Check if any of the given directories conform to the given metadata
:param str metaName: matadata name
:param dict,list value: dictionary with selection instructions suitable for the database search
:param str pathString: string of comma separated directory names
:return: S_OK/S_ERROR, Value directory ID
"""
result = self.__createMetaSelection(value, "M.")
if not result["OK"]:
return result
selectString = result["Value"]
if selectString:
req = "SELECT M.DirID FROM FC_Meta_%s AS M WHERE %s AND M.DirID IN (%s)" % (
metaName,
selectString,
pathString,
)
else:
req = "SELECT M.DirID FROM FC_Meta_%s AS M WHERE M.DirID IN (%s)" % (metaName, pathString)
result = self.db._query(req)
if not result["OK"]:
return result
elif not result["Value"]:
return S_OK(None)
elif len(result["Value"]) > 1:
return S_ERROR("Conflict in the directory metadata hierarchy")
else:
return S_OK(result["Value"][0][0])
@queryTime
def findDirIDsByMetadata(self, queryDict, path, credDict):
"""Find Directories satisfying the given metadata and being subdirectories of
the given path
:param dict queryDict: dictionary containing query data
:param str path: starting directory path
:param dict credDict: client credential dictionary
:return: S_OK/S_ERROR, Value list of selected directory IDs
"""
pathDirList = []
pathDirID = 0
pathString = "0"
if path != "/":
result = self.db.dtree.getPathIDs(path)
if not result["OK"]:
# as result[Value] is already checked in getPathIDs
return result
pathIDs = result["Value"]
pathDirID = pathIDs[-1]
pathString = ",".join([str(x) for x in pathIDs])
result = self.__expandMetaDictionary(queryDict, credDict)
if not result["OK"]:
return result
metaDict = result["Value"]
# Now check the meta data for the requested directory and its parents
finalMetaDict = dict(metaDict)
for meta in metaDict:
result = self.__checkDirsForMetadata(meta, metaDict[meta], pathString)
if not result["OK"]:
return result
elif result["Value"] is not None:
# Some directory in the parent hierarchy is already conforming with the
# given metadata, no need to check it further
del finalMetaDict[meta]
if finalMetaDict:
pathSelection = ""
if pathDirID:
result = self.db.dtree.getSubdirectoriesByID(pathDirID, includeParent=True, requestString=True)
if not result["OK"]:
return result
pathSelection = result["Value"]
dirList = []
first = True
for meta, value in finalMetaDict.items():
if value == "Missing":
result = self.__findSubdirMissingMeta(meta, pathSelection)
else:
result = self.__findSubdirByMeta(meta, value, pathSelection)
if not result["OK"]:
return result
mList = result["Value"]
if first:
dirList = mList
first = False
else:
newList = []
for d in dirList:
if d in mList:
newList.append(d)
dirList = newList
else:
if pathDirID:
result = self.db.dtree.getSubdirectoriesByID(pathDirID, includeParent=True)
if not result["OK"]:
return result
pathDirList = list(result["Value"])
finalList = []
dirSelect = False
if finalMetaDict:
dirSelect = True
finalList = dirList
if pathDirList:
finalList = list(set(dirList) & set(pathDirList))
else:
if pathDirList:
dirSelect = True
finalList = pathDirList
result = S_OK(finalList)
if finalList:
result["Selection"] = "Done"
elif dirSelect:
result["Selection"] = "None"
else:
result["Selection"] = "All"
return result
@queryTime
def findDirectoriesByMetadata(self, queryDict, path, credDict):
"""Find Directory names satisfying the given metadata and being subdirectories of
the given path
:param dict queryDict: dictionary containing query data
:param str path: starting directory path
:param dict credDict: client credential dictionary
:return: S_OK/S_ERROR, Value list of selected directory paths
"""
result = self.findDirIDsByMetadata(queryDict, path, credDict)
if not result["OK"]:
return result
dirIDList = result["Value"]
dirNameDict = {}
if dirIDList:
result = self.db.dtree.getDirectoryPaths(dirIDList)
if not result["OK"]:
return result
dirNameDict = result["Value"]
elif result["Selection"] == "None":
dirNameDict = {0: "None"}
elif result["Selection"] == "All":
dirNameDict = {0: "All"}
return S_OK(dirNameDict)
def findFilesByMetadata(self, metaDict, path, credDict):
"""Find Files satisfying the given metadata
:param dict metaDict: dictionary with the selection metadata
:param str path: starting directory path
:param dict credDict: client credential dictionary
:return: S_OK/S_ERROR, Value list files in selected directories
"""
result = self.findDirectoriesByMetadata(metaDict, path, credDict)
if not result["OK"]:
return result
dirDict = result["Value"]
dirList = list(dirDict)
fileList = []
result = self.db.dtree.getFilesInDirectory(dirList, credDict)
if not result["OK"]:
return result
for _fileID, dirID, fname in result["Value"]:
fileList.append(dirDict[dirID] + "/" + os.path.basename(fname))
return S_OK(fileList)
def findFileIDsByMetadata(self, metaDict, path, credDict, startItem=0, maxItems=25):
"""Find Files satisfying the given metadata
:param dict metaDict: dictionary with the selection metadata
:param str path: starting directory path
:param dict credDict: client credential dictionary
:param int startItem: offset in the file list
:param int maxItems: max number of files to rteurn
:return: S_OK/S_ERROR, Value list file IDs in selected directories
"""
result = self.findDirIDsByMetadata(metaDict, path, credDict)
if not result["OK"]:
return result
dirList = result["Value"]
return self.db.dtree.getFileIDsInDirectoryWithLimits(dirList, credDict, startItem, maxItems)
################################################################################################
#
# Find metadata compatible with other metadata in order to organize dynamically updated metadata selectors
def __findCompatibleDirectories(self, metaName, value, fromDirs):
"""Find directories compatible with the given metaName datum.
Optionally limit the list of compatible directories to only those in the
fromDirs list
:param str metaName: metadata name
:param dict,list value: dictionary with selection instructions suitable for the database search
:param list fromDirs: list of directories to choose from
:return: S_OK/S_ERROR, Value list of selected directories
"""
# The directories compatible with the given metaName datum are:
# - directory for which the datum is defined
# - all the subdirectories of the above directory
# - all the directories in the parent hierarchy of the above directory
# Find directories defining the metaName datum and their subdirectories
result = self.__findSubdirByMeta(metaName, value, subdirFlag=False)
if not result["OK"]:
return result
selectedDirs = result["Value"]
if not selectedDirs:
return S_OK([])
result = self.db.dtree.getAllSubdirectoriesByID(selectedDirs)
if not result["OK"]:
return result
subDirs = result["Value"]
# Find parent directories of the directories defining the metaName datum
parentDirs = []
for psub in selectedDirs:
result = self.db.dtree.getPathIDsByID(psub)
if not result["OK"]:
return result
parentDirs += result["Value"]
# Constrain the output to only those that are present in the input list
resDirs = parentDirs + subDirs + selectedDirs
if fromDirs:
resDirs = list(set(resDirs) & set(fromDirs))
return S_OK(resDirs)
def __findDistinctMetadata(self, metaList, dList):
"""Find distinct metadata values defined for the list of the input directories.
Limit the search for only metadata in the input list
:param list metaList: list of metadata names
:param list dList: list of directories to limit the selection
:return: S_OK/S_ERROR, Value dictionary of metadata
"""
if dList:
dString = ",".join([str(x) for x in dList])
else:
dString = None
metaDict = {}
for meta in metaList:
req = "SELECT DISTINCT(Value) FROM FC_Meta_%s" % meta
if dString:
req += " WHERE DirID in (%s)" % dString
result = self.db._query(req)
if not result["OK"]:
return result
if result["Value"]:
metaDict[meta] = []
for row in result["Value"]:
metaDict[meta].append(row[0])
return S_OK(metaDict)
def getCompatibleMetadata(self, queryDict, path, credDict):
"""Get distinct metadata values compatible with the given already defined metadata
:param dict queryDict: dictionary containing query data
:param str path: starting directory path
:param dict credDict: client credential dictionary
:return: S_OK/S_ERROR, Value dictionary of metadata
"""
pathDirID = 0
if path != "/":
result = self.db.dtree.findDir(path)
if not result["OK"]:
return result
if not result["Value"]:
return S_ERROR("Path not found: %s" % path)
pathDirID = int(result["Value"])
pathDirs = []
if pathDirID:
result = self.db.dtree.getSubdirectoriesByID(pathDirID, includeParent=True)
if not result["OK"]:
return result
if result["Value"]:
pathDirs = list(result["Value"])
result = self.db.dtree.getPathIDsByID(pathDirID)
if not result["OK"]:
return result
if result["Value"]:
pathDirs += result["Value"]
# Get the list of metadata fields to inspect
result = self._getMetadataFields(credDict)
if not result["OK"]:
return result
metaFields = result["Value"]
comFields = list(metaFields)
# Commented out to return compatible data also for selection metadata
# for m in metaDict:
# if m in comFields:
# del comFields[comFields.index( m )]
result = self.__expandMetaDictionary(queryDict, credDict)
if not result["OK"]:
return result
metaDict = result["Value"]
fromList = pathDirs
anyMeta = True
if metaDict:
anyMeta = False
for meta, value in metaDict.items():
result = self.__findCompatibleDirectories(meta, value, fromList)
if not result["OK"]:
return result
cdirList = result["Value"]
if cdirList:
fromList = cdirList
else:
fromList = []
break
if anyMeta or fromList:
result = self.__findDistinctMetadata(comFields, fromList)
else:
result = S_OK({})
return result
def removeMetadataForDirectory(self, dirList, credDict):
"""Remove all the metadata for the given directory list
:param list dirList: list of directory paths
:param dict credDict: client credential dictionary
:return: S_OK/S_ERROR, Value Successful/Failed dictionaries
"""
if not dirList:
return S_OK({"Successful": {}, "Failed": {}})
failed = {}
successful = {}
dirs = dirList
if not isinstance(dirList, list):
dirs = [dirList]
dirListString = ",".join([str(d) for d in dirs])
# Get the list of metadata fields to inspect
result = self._getMetadataFields(credDict)
if not result["OK"]:
return result
metaFields = result["Value"]
for meta in metaFields:
req = "DELETE FROM FC_Meta_%s WHERE DirID in ( %s )" % (meta, dirListString)
result = self.db._query(req)
if not result["OK"]:
failed[meta] = result["Message"]
else:
successful[meta] = "OK"
return S_OK({"Successful": successful, "Failed": failed})
|
DIRACGrid/DIRAC
|
src/DIRAC/DataManagementSystem/DB/FileCatalogComponents/DirectoryMetadata/DirectoryMetadata.py
|
Python
|
gpl-3.0
| 37,350
|
[
"DIRAC"
] |
e0b8d2061ed43035a9a41477971c8391f050a01d3db7261f82b520ee62ae1adf
|
#
# @BEGIN LICENSE
#
# Psi4: an open-source quantum chemistry software package
#
# Copyright (c) 2007-2018 The Psi4 Developers.
#
# The copyrights for code used from other parties are included in
# the corresponding files.
#
# This file is part of Psi4.
#
# Psi4 is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, version 3.
#
# Psi4 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with Psi4; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @END LICENSE
#
"""
| Database of Pulay corannulene structures. Subsumed into CFLOW.
- **cp** ``'off'`` || ``'on'``
- **rlxd** ``'off'``
"""
import re
import qcdb
# <<< CORE Database Module >>>
# Geometries and Reference energies from.
dbse = 'CORE'
# <<< Database Members >>>
HRXN = ['dimer3_54', 'dimer3_64', 'dimer3_73', 'dimer3_74', 'dimer3_84', ]
HRXN_SM = []
HRXN_LG = []
# <<< Chemical Systems Involved >>>
RXNM = {} # reaction matrix of reagent contributions per reaction
ACTV = {} # order of active reagents per reaction
ACTV_CP = {} # order of active reagents per counterpoise-corrected reaction
ACTV_SA = {} # order of active reagents for non-supermolecular calculations
for rxn in HRXN:
RXNM[ '%s-%s' % (dbse, rxn)] = {'%s-%s-dimer' % (dbse, rxn) : +1,
'%s-%s-monoA-CP' % (dbse, rxn) : -1,
'%s-%s-monoB-CP' % (dbse, rxn) : -1,
'%s-%s-monoA-unCP' % (dbse, rxn) : -1,
'%s-%s-monoB-unCP' % (dbse, rxn) : -1 }
ACTV_SA['%s-%s' % (dbse, rxn)] = ['%s-%s-dimer' % (dbse, rxn) ]
ACTV_CP['%s-%s' % (dbse, rxn)] = ['%s-%s-dimer' % (dbse, rxn),
'%s-%s-monoA-CP' % (dbse, rxn),
'%s-%s-monoB-CP' % (dbse, rxn) ]
ACTV[ '%s-%s' % (dbse, rxn)] = ['%s-%s-dimer' % (dbse, rxn),
'%s-%s-monoA-unCP' % (dbse, rxn),
'%s-%s-monoB-unCP' % (dbse, rxn) ]
# <<< Reference Values [kcal/mol] >>>
# Taken from
BIND = {}
BIND['%s-%s' % (dbse, 'dimer3_54' )] = -14.8000
BIND['%s-%s' % (dbse, 'dimer3_64' )] = -15.4000
BIND['%s-%s' % (dbse, 'dimer3_73' )] = -15.6000 # Bootstrapped, Pulay does not report
BIND['%s-%s' % (dbse, 'dimer3_74' )] = -15.4000
BIND['%s-%s' % (dbse, 'dimer3_84' )] = -15.0000
# <<< Comment Lines >>>
TAGL = {}
TAGL['%s-%s' % (dbse, 'dimer3_54' )] = """ """
TAGL['%s-%s-dimer' % (dbse, 'dimer3_54' )] = """Dimer from """
TAGL['%s-%s-monoA-CP' % (dbse, 'dimer3_54' )] = """Monomer A from """
TAGL['%s-%s-monoB-CP' % (dbse, 'dimer3_54' )] = """Monomer B from """
TAGL['%s-%s-monoA-unCP' % (dbse, 'dimer3_54' )] = """Monomer A from """
TAGL['%s-%s-monoB-unCP' % (dbse, 'dimer3_54' )] = """Monomer B from """
TAGL['%s-%s' % (dbse, 'dimer3_64' )] = """ """
TAGL['%s-%s-dimer' % (dbse, 'dimer3_64' )] = """Dimer from """
TAGL['%s-%s-monoA-CP' % (dbse, 'dimer3_64' )] = """Monomer A from """
TAGL['%s-%s-monoB-CP' % (dbse, 'dimer3_64' )] = """Monomer B from """
TAGL['%s-%s-monoA-unCP' % (dbse, 'dimer3_64' )] = """Monomer A from """
TAGL['%s-%s-monoB-unCP' % (dbse, 'dimer3_64' )] = """Monomer B from """
TAGL['%s-%s' % (dbse, 'dimer3_73' )] = """ """
TAGL['%s-%s-dimer' % (dbse, 'dimer3_73' )] = """Dimer from """
TAGL['%s-%s-monoA-CP' % (dbse, 'dimer3_73' )] = """Monomer A from """
TAGL['%s-%s-monoB-CP' % (dbse, 'dimer3_73' )] = """Monomer B from """
TAGL['%s-%s-monoA-unCP' % (dbse, 'dimer3_73' )] = """Monomer A from """
TAGL['%s-%s-monoB-unCP' % (dbse, 'dimer3_73' )] = """Monomer B from """
TAGL['%s-%s' % (dbse, 'dimer3_74' )] = """ """
TAGL['%s-%s-dimer' % (dbse, 'dimer3_74' )] = """Dimer from """
TAGL['%s-%s-monoA-CP' % (dbse, 'dimer3_74' )] = """Monomer A from """
TAGL['%s-%s-monoB-CP' % (dbse, 'dimer3_74' )] = """Monomer B from """
TAGL['%s-%s-monoA-unCP' % (dbse, 'dimer3_74' )] = """Monomer A from """
TAGL['%s-%s-monoB-unCP' % (dbse, 'dimer3_74' )] = """Monomer B from """
TAGL['%s-%s' % (dbse, 'dimer3_84' )] = """ """
TAGL['%s-%s-dimer' % (dbse, 'dimer3_84' )] = """Dimer from """
TAGL['%s-%s-monoA-CP' % (dbse, 'dimer3_84' )] = """Monomer A from """
TAGL['%s-%s-monoB-CP' % (dbse, 'dimer3_84' )] = """Monomer B from """
TAGL['%s-%s-monoA-unCP' % (dbse, 'dimer3_84' )] = """Monomer A from """
TAGL['%s-%s-monoB-unCP' % (dbse, 'dimer3_84' )] = """Monomer B from """
# <<< Geometry Specification Strings >>>
GEOS = {}
GEOS['%s-%s-dimer' % (dbse, 'dimer3_54')] = qcdb.Molecule("""
0 1
C 0.70622800 0.97211978 0.61694803
C -0.70622800 0.97211978 0.61694803
C -1.14280400 -0.37137722 0.61681203
C 0.00000000 -1.20165922 0.61659503
C 1.14280400 -0.37137722 0.61681203
C 1.45779000 2.00650178 0.09413403
C -1.45779000 2.00650178 0.09413403
C -2.35873800 -0.76639722 0.09397203
C 0.00000000 -2.48004022 0.09366903
C 2.35873800 -0.76639722 0.09397203
C 0.69261800 3.17923978 -0.25321497
C -0.69261800 3.17923978 -0.25321497
C -2.80958100 1.64119778 -0.25292797
C -3.23765700 0.32373778 -0.25303797
C -2.42918200 -2.16498922 -0.25302597
C -1.30841500 -2.97916822 -0.25327697
C 1.30841500 -2.97916822 -0.25327697
C 2.42918200 -2.16498922 -0.25302597
C 3.23765700 0.32373778 -0.25303797
C 2.80958100 1.64119778 -0.25292797
H 1.20851300 4.06642078 -0.61418797
H -1.20851300 4.06642078 -0.61418797
H -3.49401500 2.40602178 -0.61367197
H -4.24094400 0.10729578 -0.61373997
H -3.36816400 -2.57958822 -0.61350597
H -1.41248600 -4.00024222 -0.61397997
H 1.41248600 -4.00024222 -0.61397997
H 3.36816400 -2.57958822 -0.61350597
H 4.24094400 0.10729578 -0.61373997
H 3.49401500 2.40602178 -0.61367197
--
0 1
C 0.70622800 0.97211978 4.15694803
C -0.70622800 0.97211978 4.15694803
C -1.14280400 -0.37137722 4.15681203
C 0.00000000 -1.20165922 4.15659503
C 1.14280400 -0.37137722 4.15681203
C 1.45779000 2.00650178 3.63413403
C -1.45779000 2.00650178 3.63413403
C -2.35873800 -0.76639722 3.63397203
C 0.00000000 -2.48004022 3.63366903
C 2.35873800 -0.76639722 3.63397203
C 0.69261800 3.17923978 3.28678503
C -0.69261800 3.17923978 3.28678503
C -2.80958100 1.64119778 3.28707203
C -3.23765700 0.32373778 3.28696203
C -2.42918200 -2.16498922 3.28697403
C -1.30841500 -2.97916822 3.28672303
C 1.30841500 -2.97916822 3.28672303
C 2.42918200 -2.16498922 3.28697403
C 3.23765700 0.32373778 3.28696203
C 2.80958100 1.64119778 3.28707203
H 1.20851300 4.06642078 2.92581203
H -1.20851300 4.06642078 2.92581203
H -3.49401500 2.40602178 2.92632803
H -4.24094400 0.10729578 2.92626003
H -3.36816400 -2.57958822 2.92649403
H -1.41248600 -4.00024222 2.92602003
H 1.41248600 -4.00024222 2.92602003
H 3.36816400 -2.57958822 2.92649403
H 4.24094400 0.10729578 2.92626003
H 3.49401500 2.40602178 2.92632803
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, 'dimer3_64')] = qcdb.Molecule("""
0 1
C 0.70622800 0.97211978 0.61694803
C -0.70622800 0.97211978 0.61694803
C -1.14280400 -0.37137722 0.61681203
C 0.00000000 -1.20165922 0.61659503
C 1.14280400 -0.37137722 0.61681203
C 1.45779000 2.00650178 0.09413403
C -1.45779000 2.00650178 0.09413403
C -2.35873800 -0.76639722 0.09397203
C 0.00000000 -2.48004022 0.09366903
C 2.35873800 -0.76639722 0.09397203
C 0.69261800 3.17923978 -0.25321497
C -0.69261800 3.17923978 -0.25321497
C -2.80958100 1.64119778 -0.25292797
C -3.23765700 0.32373778 -0.25303797
C -2.42918200 -2.16498922 -0.25302597
C -1.30841500 -2.97916822 -0.25327697
C 1.30841500 -2.97916822 -0.25327697
C 2.42918200 -2.16498922 -0.25302597
C 3.23765700 0.32373778 -0.25303797
C 2.80958100 1.64119778 -0.25292797
H 1.20851300 4.06642078 -0.61418797
H -1.20851300 4.06642078 -0.61418797
H -3.49401500 2.40602178 -0.61367197
H -4.24094400 0.10729578 -0.61373997
H -3.36816400 -2.57958822 -0.61350597
H -1.41248600 -4.00024222 -0.61397997
H 1.41248600 -4.00024222 -0.61397997
H 3.36816400 -2.57958822 -0.61350597
H 4.24094400 0.10729578 -0.61373997
H 3.49401500 2.40602178 -0.61367197
--
0 1
C 0.70622800 0.97211978 4.25694803
C -0.70622800 0.97211978 4.25694803
C -1.14280400 -0.37137722 4.25681203
C 0.00000000 -1.20165922 4.25659503
C 1.14280400 -0.37137722 4.25681203
C 1.45779000 2.00650178 3.73413403
C -1.45779000 2.00650178 3.73413403
C -2.35873800 -0.76639722 3.73397203
C 0.00000000 -2.48004022 3.73366903
C 2.35873800 -0.76639722 3.73397203
C 0.69261800 3.17923978 3.38678503
C -0.69261800 3.17923978 3.38678503
C -2.80958100 1.64119778 3.38707203
C -3.23765700 0.32373778 3.38696203
C -2.42918200 -2.16498922 3.38697403
C -1.30841500 -2.97916822 3.38672303
C 1.30841500 -2.97916822 3.38672303
C 2.42918200 -2.16498922 3.38697403
C 3.23765700 0.32373778 3.38696203
C 2.80958100 1.64119778 3.38707203
H 1.20851300 4.06642078 3.02581203
H -1.20851300 4.06642078 3.02581203
H -3.49401500 2.40602178 3.02632803
H -4.24094400 0.10729578 3.02626003
H -3.36816400 -2.57958822 3.02649403
H -1.41248600 -4.00024222 3.02602003
H 1.41248600 -4.00024222 3.02602003
H 3.36816400 -2.57958822 3.02649403
H 4.24094400 0.10729578 3.02626003
H 3.49401500 2.40602178 3.02632803
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, 'dimer3_73')] = qcdb.Molecule("""
0 1
C 0.70622800 0.97211978 0.61694803
C -0.70622800 0.97211978 0.61694803
C -1.14280400 -0.37137722 0.61681203
C 0.00000000 -1.20165922 0.61659503
C 1.14280400 -0.37137722 0.61681203
C 1.45779000 2.00650178 0.09413403
C -1.45779000 2.00650178 0.09413403
C -2.35873800 -0.76639722 0.09397203
C 0.00000000 -2.48004022 0.09366903
C 2.35873800 -0.76639722 0.09397203
C 0.69261800 3.17923978 -0.25321497
C -0.69261800 3.17923978 -0.25321497
C -2.80958100 1.64119778 -0.25292797
C -3.23765700 0.32373778 -0.25303797
C -2.42918200 -2.16498922 -0.25302597
C -1.30841500 -2.97916822 -0.25327697
C 1.30841500 -2.97916822 -0.25327697
C 2.42918200 -2.16498922 -0.25302597
C 3.23765700 0.32373778 -0.25303797
C 2.80958100 1.64119778 -0.25292797
H 1.20851300 4.06642078 -0.61418797
H -1.20851300 4.06642078 -0.61418797
H -3.49401500 2.40602178 -0.61367197
H -4.24094400 0.10729578 -0.61373997
H -3.36816400 -2.57958822 -0.61350597
H -1.41248600 -4.00024222 -0.61397997
H 1.41248600 -4.00024222 -0.61397997
H 3.36816400 -2.57958822 -0.61350597
H 4.24094400 0.10729578 -0.61373997
H 3.49401500 2.40602178 -0.61367197
--
0 1
C 0.70622800 0.97211978 4.34694803
C -0.70622800 0.97211978 4.34694803
C -1.14280400 -0.37137722 4.34681203
C 0.00000000 -1.20165922 4.34659503
C 1.14280400 -0.37137722 4.34681203
C 1.45779000 2.00650178 3.82413403
C -1.45779000 2.00650178 3.82413403
C -2.35873800 -0.76639722 3.82397203
C 0.00000000 -2.48004022 3.82366903
C 2.35873800 -0.76639722 3.82397203
C 0.69261800 3.17923978 3.47678503
C -0.69261800 3.17923978 3.47678503
C -2.80958100 1.64119778 3.47707203
C -3.23765700 0.32373778 3.47696203
C -2.42918200 -2.16498922 3.47697403
C -1.30841500 -2.97916822 3.47672303
C 1.30841500 -2.97916822 3.47672303
C 2.42918200 -2.16498922 3.47697403
C 3.23765700 0.32373778 3.47696203
C 2.80958100 1.64119778 3.47707203
H 1.20851300 4.06642078 3.11581203
H -1.20851300 4.06642078 3.11581203
H -3.49401500 2.40602178 3.11632803
H -4.24094400 0.10729578 3.11626003
H -3.36816400 -2.57958822 3.11649403
H -1.41248600 -4.00024222 3.11602003
H 1.41248600 -4.00024222 3.11602003
H 3.36816400 -2.57958822 3.11649403
H 4.24094400 0.10729578 3.11626003
H 3.49401500 2.40602178 3.11632803
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, 'dimer3_74')] = qcdb.Molecule("""
0 1
C 0.70622800 0.97211978 0.61694803
C -0.70622800 0.97211978 0.61694803
C -1.14280400 -0.37137722 0.61681203
C 0.00000000 -1.20165922 0.61659503
C 1.14280400 -0.37137722 0.61681203
C 1.45779000 2.00650178 0.09413403
C -1.45779000 2.00650178 0.09413403
C -2.35873800 -0.76639722 0.09397203
C 0.00000000 -2.48004022 0.09366903
C 2.35873800 -0.76639722 0.09397203
C 0.69261800 3.17923978 -0.25321497
C -0.69261800 3.17923978 -0.25321497
C -2.80958100 1.64119778 -0.25292797
C -3.23765700 0.32373778 -0.25303797
C -2.42918200 -2.16498922 -0.25302597
C -1.30841500 -2.97916822 -0.25327697
C 1.30841500 -2.97916822 -0.25327697
C 2.42918200 -2.16498922 -0.25302597
C 3.23765700 0.32373778 -0.25303797
C 2.80958100 1.64119778 -0.25292797
H 1.20851300 4.06642078 -0.61418797
H -1.20851300 4.06642078 -0.61418797
H -3.49401500 2.40602178 -0.61367197
H -4.24094400 0.10729578 -0.61373997
H -3.36816400 -2.57958822 -0.61350597
H -1.41248600 -4.00024222 -0.61397997
H 1.41248600 -4.00024222 -0.61397997
H 3.36816400 -2.57958822 -0.61350597
H 4.24094400 0.10729578 -0.61373997
H 3.49401500 2.40602178 -0.61367197
--
0 1
C 0.70622800 0.97211978 4.35694803
C -0.70622800 0.97211978 4.35694803
C -1.14280400 -0.37137722 4.35681203
C 0.00000000 -1.20165922 4.35659503
C 1.14280400 -0.37137722 4.35681203
C 1.45779000 2.00650178 3.83413403
C -1.45779000 2.00650178 3.83413403
C -2.35873800 -0.76639722 3.83397203
C 0.00000000 -2.48004022 3.83366903
C 2.35873800 -0.76639722 3.83397203
C 0.69261800 3.17923978 3.48678503
C -0.69261800 3.17923978 3.48678503
C -2.80958100 1.64119778 3.48707203
C -3.23765700 0.32373778 3.48696203
C -2.42918200 -2.16498922 3.48697403
C -1.30841500 -2.97916822 3.48672303
C 1.30841500 -2.97916822 3.48672303
C 2.42918200 -2.16498922 3.48697403
C 3.23765700 0.32373778 3.48696203
C 2.80958100 1.64119778 3.48707203
H 1.20851300 4.06642078 3.12581203
H -1.20851300 4.06642078 3.12581203
H -3.49401500 2.40602178 3.12632803
H -4.24094400 0.10729578 3.12626003
H -3.36816400 -2.57958822 3.12649403
H -1.41248600 -4.00024222 3.12602003
H 1.41248600 -4.00024222 3.12602003
H 3.36816400 -2.57958822 3.12649403
H 4.24094400 0.10729578 3.12626003
H 3.49401500 2.40602178 3.12632803
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, 'dimer3_84')] = qcdb.Molecule("""
0 1
C 0.70622800 0.97211978 0.61694803
C -0.70622800 0.97211978 0.61694803
C -1.14280400 -0.37137722 0.61681203
C 0.00000000 -1.20165922 0.61659503
C 1.14280400 -0.37137722 0.61681203
C 1.45779000 2.00650178 0.09413403
C -1.45779000 2.00650178 0.09413403
C -2.35873800 -0.76639722 0.09397203
C 0.00000000 -2.48004022 0.09366903
C 2.35873800 -0.76639722 0.09397203
C 0.69261800 3.17923978 -0.25321497
C -0.69261800 3.17923978 -0.25321497
C -2.80958100 1.64119778 -0.25292797
C -3.23765700 0.32373778 -0.25303797
C -2.42918200 -2.16498922 -0.25302597
C -1.30841500 -2.97916822 -0.25327697
C 1.30841500 -2.97916822 -0.25327697
C 2.42918200 -2.16498922 -0.25302597
C 3.23765700 0.32373778 -0.25303797
C 2.80958100 1.64119778 -0.25292797
H 1.20851300 4.06642078 -0.61418797
H -1.20851300 4.06642078 -0.61418797
H -3.49401500 2.40602178 -0.61367197
H -4.24094400 0.10729578 -0.61373997
H -3.36816400 -2.57958822 -0.61350597
H -1.41248600 -4.00024222 -0.61397997
H 1.41248600 -4.00024222 -0.61397997
H 3.36816400 -2.57958822 -0.61350597
H 4.24094400 0.10729578 -0.61373997
H 3.49401500 2.40602178 -0.61367197
--
0 1
C 0.70622800 0.97211978 4.45694803
C -0.70622800 0.97211978 4.45694803
C -1.14280400 -0.37137722 4.45681203
C 0.00000000 -1.20165922 4.45659503
C 1.14280400 -0.37137722 4.45681203
C 1.45779000 2.00650178 3.93413403
C -1.45779000 2.00650178 3.93413403
C -2.35873800 -0.76639722 3.93397203
C 0.00000000 -2.48004022 3.93366903
C 2.35873800 -0.76639722 3.93397203
C 0.69261800 3.17923978 3.58678503
C -0.69261800 3.17923978 3.58678503
C -2.80958100 1.64119778 3.58707203
C -3.23765700 0.32373778 3.58696203
C -2.42918200 -2.16498922 3.58697403
C -1.30841500 -2.97916822 3.58672303
C 1.30841500 -2.97916822 3.58672303
C 2.42918200 -2.16498922 3.58697403
C 3.23765700 0.32373778 3.58696203
C 2.80958100 1.64119778 3.58707203
H 1.20851300 4.06642078 3.22581203
H -1.20851300 4.06642078 3.22581203
H -3.49401500 2.40602178 3.22632803
H -4.24094400 0.10729578 3.22626003
H -3.36816400 -2.57958822 3.22649403
H -1.41248600 -4.00024222 3.22602003
H 1.41248600 -4.00024222 3.22602003
H 3.36816400 -2.57958822 3.22649403
H 4.24094400 0.10729578 3.22626003
H 3.49401500 2.40602178 3.22632803
units angstrom
""")
# <<< Derived Geometry Strings >>>
for rxn in HRXN:
GEOS['%s-%s-monoA-unCP' % (dbse, rxn)] = GEOS['%s-%s-dimer' % (dbse, rxn)].extract_fragments(1)
GEOS['%s-%s-monoB-unCP' % (dbse, rxn)] = GEOS['%s-%s-dimer' % (dbse, rxn)].extract_fragments(2)
GEOS['%s-%s-monoA-CP' % (dbse, rxn)] = GEOS['%s-%s-dimer' % (dbse, rxn)].extract_fragments(1, 2)
GEOS['%s-%s-monoB-CP' % (dbse, rxn)] = GEOS['%s-%s-dimer' % (dbse, rxn)].extract_fragments(2, 1)
#########################################################################
# <<< Supplementary Quantum Chemical Results >>>
DATA = {}
DATA['NUCLEAR REPULSION ENERGY'] = {}
DATA['NUCLEAR REPULSION ENERGY']['CORE-dimer3_54-dimer' ] = 4584.11459289
DATA['NUCLEAR REPULSION ENERGY']['CORE-dimer3_54-monoA-unCP' ] = 1387.77369315
DATA['NUCLEAR REPULSION ENERGY']['CORE-dimer3_54-monoB-unCP' ] = 1387.77369315
DATA['NUCLEAR REPULSION ENERGY']['CORE-dimer3_64-dimer' ] = 4555.01239979
DATA['NUCLEAR REPULSION ENERGY']['CORE-dimer3_64-monoA-unCP' ] = 1387.77369315
DATA['NUCLEAR REPULSION ENERGY']['CORE-dimer3_64-monoB-unCP' ] = 1387.77369315
DATA['NUCLEAR REPULSION ENERGY']['CORE-dimer3_73-dimer' ] = 4529.48976988
DATA['NUCLEAR REPULSION ENERGY']['CORE-dimer3_73-monoA-unCP' ] = 1387.77369315
DATA['NUCLEAR REPULSION ENERGY']['CORE-dimer3_73-monoB-unCP' ] = 1387.77369315
DATA['NUCLEAR REPULSION ENERGY']['CORE-dimer3_74-dimer' ] = 4526.69216135
DATA['NUCLEAR REPULSION ENERGY']['CORE-dimer3_74-monoA-unCP' ] = 1387.77369315
DATA['NUCLEAR REPULSION ENERGY']['CORE-dimer3_74-monoB-unCP' ] = 1387.77369315
DATA['NUCLEAR REPULSION ENERGY']['CORE-dimer3_84-dimer' ] = 4499.12706628
DATA['NUCLEAR REPULSION ENERGY']['CORE-dimer3_84-monoA-unCP' ] = 1387.77369315
DATA['NUCLEAR REPULSION ENERGY']['CORE-dimer3_84-monoB-unCP' ] = 1387.77369315
DATA['NUCLEAR REPULSION ENERGY']['CORE-dimer3_54-monoA-CP' ] = 1387.77369315
DATA['NUCLEAR REPULSION ENERGY']['CORE-dimer3_54-monoB-CP' ] = 1387.77369315
DATA['NUCLEAR REPULSION ENERGY']['CORE-dimer3_64-monoA-CP' ] = 1387.77369315
DATA['NUCLEAR REPULSION ENERGY']['CORE-dimer3_64-monoB-CP' ] = 1387.77369315
DATA['NUCLEAR REPULSION ENERGY']['CORE-dimer3_73-monoA-CP' ] = 1387.77369315
DATA['NUCLEAR REPULSION ENERGY']['CORE-dimer3_73-monoB-CP' ] = 1387.77369315
DATA['NUCLEAR REPULSION ENERGY']['CORE-dimer3_74-monoA-CP' ] = 1387.77369315
DATA['NUCLEAR REPULSION ENERGY']['CORE-dimer3_74-monoB-CP' ] = 1387.77369315
DATA['NUCLEAR REPULSION ENERGY']['CORE-dimer3_84-monoA-CP' ] = 1387.77369315
DATA['NUCLEAR REPULSION ENERGY']['CORE-dimer3_84-monoB-CP' ] = 1387.77369315
|
amjames/psi4
|
psi4/share/psi4/databases/CORE.py
|
Python
|
lgpl-3.0
| 23,802
|
[
"Psi4"
] |
1f4057949ef7bf0c5680884b69bc7dc90658d2897abe29ac61371a00b37b3a79
|
from setuptools import setup
setup(
name='lazyweb',
version='0.1',
description='A lightweight jinja template compiler',
author='Brian McFee',
author_email='brm2132@columbia.edu',
url='http://github.com/bmcfee/lazyweb',
download_url='http://github.com/bmcfee/lazyweb/releases',
long_description="""\
A lightweight jinja template compiler.
""",
scripts=['lazyweb'],
classifiers=[
"License :: OSI Approved :: GNU General Public License (GPL)",
"Programming Language :: Python",
"Development Status :: 3 - Alpha",
"Intended Audience :: Developers",
"Topic :: Multimedia :: Text Processing :: Markup",
],
keywords='web template',
license='GPL',
install_requires=[
'jinja2',
'ujson',
],
extras_require = {
'cPickle': 'cPickle'
}
)
|
bmcfee/lazyweb
|
setup.py
|
Python
|
gpl-3.0
| 880
|
[
"Brian"
] |
abe59e76146dcbb555a542fbf3ee569c06feecc6c7358c6b8eb2d18a72487d28
|
import vtk
from vtk.test import Testing
# Data from our friends at Sandia
points = vtk.vtkPoints()
points.InsertNextPoint(0,0,0)
points.InsertNextPoint(1,0,0)
points.InsertNextPoint(1,1,0)
points.InsertNextPoint(0,1,0)
points.InsertNextPoint(0,0,5)
points.InsertNextPoint(1,0,4)
points.InsertNextPoint(1,1,4)
points.InsertNextPoint(0,1,5)
points.InsertNextPoint(5,0,7)
points.InsertNextPoint(5,0,6)
points.InsertNextPoint(5,1,6)
points.InsertNextPoint(5,1,7)
points.InsertNextPoint(11,1,5)
points.InsertNextPoint(10,1,4)
points.InsertNextPoint(10,0,4)
points.InsertNextPoint(11,0,5)
points.InsertNextPoint(10,0,0)
points.InsertNextPoint(11,0,0)
points.InsertNextPoint(11,1,0)
points.InsertNextPoint(10,1,0)
profile = vtk.vtkPolyData()
profile.SetPoints(points)
# triangulate them
#
del1 = vtk.vtkDelaunay3D()
del1.SetInputData(profile)
del1.SetTolerance(0.01)
del1.SetAlpha(2.8)
del1.AlphaTetsOn()
del1.AlphaTrisOn()
del1.AlphaLinesOff()
del1.AlphaVertsOn()
map = vtk.vtkDataSetMapper()
map.SetInputConnection(del1.GetOutputPort())
triangulation = vtk.vtkActor()
triangulation.SetMapper(map)
triangulation.GetProperty().SetColor(1,0,0)
# Create graphics stuff
#
ren1 = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren1)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
# Add the actors to the renderer, set the background and size
#
ren1.AddActor(triangulation)
ren1.SetBackground(1,1,1)
renWin.SetSize(250,250)
cam1 = ren1.GetActiveCamera()
cam1.SetFocalPoint(0,0,0)
cam1.SetPosition(1,1,1)
ren1.ResetCamera()
# render the image
#
renWin.Render()
# prevent the tk window from showing up then start the event loop
# --- end of script --
|
hlzz/dotfiles
|
graphics/VTK-7.0.0/Filters/Core/Testing/Python/Delaunay3DAlphaTest.py
|
Python
|
bsd-3-clause
| 1,751
|
[
"VTK"
] |
d2c79d02b761a0b48dcc34d3e3e48765effa079075f91d2fedc4395758fc21f4
|
import re
try:
from collections import OrderedDict
except ImportError:
from ordereddict import OrderedDict
from .utils import get_seq
class DatasetBlock(object):
"""
By default, the data sequences block generated is NEXUS and we use BioPython
tools to convert it to other formats such as FASTA.
However, sometimes the blo
Parameters:
data (named tuple): containing:
* gene_codes: list
* number_chars: string
* number_taxa: string
* seq_records: list of SeqRecordExpanded objects
* gene_codes_and_lengths: OrderedDict
codon_positions (str): str. Can be 1st, 2nd, 3rd, 1st-2nd, ALL (default).
partitioning (str):
aminoacids (boolean):
degenerate (str):
format (str): NEXUS, PHYLIP or FASTA.
outgroup (str): Specimen code of taxon that should be used as outgroup.
"""
def __init__(self, data, codon_positions, partitioning, aminoacids=None,
degenerate=None, format=None, outgroup=None):
self.warnings = []
self.data = data
self.codon_positions = codon_positions
self.partitioning = partitioning
self.aminoacids = aminoacids
self.degenerate = degenerate
self.format = format
self.outgroup = outgroup
self._blocks = []
def dataset_block(self):
"""Creates the block with taxon names and their sequences.
Override this function if the dataset block needs to be different
due to file format.
Example:
CP100_10_Aus_aus ACGATRGACGATRA...
CP100_11_Aus_bus ACGATRGACGATRA...
...
"""
self.split_data()
out = []
for block in self._blocks:
out.append(self.convert_to_string(block))
return '\n'.join(out).strip() + '\n;\nEND;'
def split_data(self):
"""Splits the list of SeqRecordExpanded objects into lists, which are
kept into a bigger list.
If the file_format is Nexus, then it is only partitioned by gene. If it
is FASTA, then it needs partitioning by codon positions if required.
Example:
>>> blocks = [
... [SeqRecord1, SeqRecord2], # for gene 1
... [SeqRecord1, SeqRecord2], # for gene 2
... [SeqRecord1, SeqRecord2], # for gene 3
... [SeqRecord1, SeqRecord2], # for gene 4
... ]
"""
this_gene_code = None
for seq_record in self.data.seq_records:
if this_gene_code is None or this_gene_code != seq_record.gene_code:
this_gene_code = seq_record.gene_code
self._blocks.append([])
list_length = len(self._blocks)
self._blocks[list_length - 1].append(seq_record)
def convert_to_string(self, block):
"""Makes gene_block as str from list of SeqRecordExpanded objects of a gene_code.
Override this function if the dataset block needs to be different
due to file format.
This block will need to be split further if the dataset is FASTA or
TNT and the partitioning scheme is 1st-2nd, 3rd.
As the dataset is split into several blocks due to 1st-2nd, 3rd
we cannot translate to aminoacids or degenerate the sequences.
"""
if self.partitioning != '1st-2nd, 3rd':
return self.make_datablock_by_gene(block)
else:
if self.format == 'FASTA':
return self.make_datablock_considering_codon_positions_as_fasta_format(block)
else:
return self.make_datablock_by_gene(block)
def make_datablock_considering_codon_positions_as_fasta_format(self, block):
block_1st2nd = OrderedDict()
block_1st = OrderedDict()
block_2nd = OrderedDict()
block_3rd = OrderedDict()
for seq_record in block: # splitting each block in two
if seq_record.gene_code not in block_1st2nd:
block_1st2nd[seq_record.gene_code] = []
if seq_record.gene_code not in block_1st:
block_1st[seq_record.gene_code] = []
if seq_record.gene_code not in block_2nd:
block_2nd[seq_record.gene_code] = []
if seq_record.gene_code not in block_3rd:
block_3rd[seq_record.gene_code] = []
taxonomy_as_string = self.flatten_taxonomy(seq_record)
taxon_id = '>{0}{1}'.format(seq_record.voucher_code,
taxonomy_as_string)
block_1st2nd[seq_record.gene_code].append('{0}\n{1}\n'.format(taxon_id,
seq_record.first_and_second_codon_positions()))
block_1st[seq_record.gene_code].append('{0}\n{1}\n'.format(taxon_id,
seq_record.first_codon_position()))
block_2nd[seq_record.gene_code].append('{0}\n{1}\n'.format(taxon_id,
seq_record.second_codon_position()))
block_3rd[seq_record.gene_code].append('{0}\n{1}\n'.format(taxon_id,
seq_record.third_codon_position()))
out = self.convert_block_dicts_to_string(block_1st2nd, block_1st, block_2nd, block_3rd)
return out
def convert_block_dicts_to_string(self, block_1st2nd, block_1st, block_2nd, block_3rd):
"""Takes into account whether we need to output all codon positions."""
out = ""
# We need 1st and 2nd positions
if self.codon_positions in ['ALL', '1st-2nd']:
for gene_code, seqs in block_1st2nd.items():
out += '>{0}_1st-2nd\n----\n'.format(gene_code)
for seq in seqs:
out += seq
elif self.codon_positions == '1st':
for gene_code, seqs in block_1st.items():
out += '>{0}_1st\n----\n'.format(gene_code)
for seq in seqs:
out += seq
elif self.codon_positions == '2nd':
for gene_code, seqs in block_2nd.items():
out += '>{0}_2nd\n----\n'.format(gene_code)
for seq in seqs:
out += seq
# We also need 3rd positions
if self.codon_positions in ['ALL', '3rd']:
for gene_code, seqs in block_3rd.items():
out += '\n>{0}_3rd\n----\n'.format(gene_code)
for seq in seqs:
out += seq
return out
def make_datablock_by_gene(self, block):
out = None
max_taxon_id = 0
for seq_record in block:
taxon_id = '{0}_{1}_{2}'.format(seq_record.voucher_code,
seq_record.taxonomy.get('genus', ''),
seq_record.taxonomy.get('species', ''),
)
if len(taxon_id) > max_taxon_id:
max_taxon_id = len(taxon_id)
pad_number = max_taxon_id + 1
if pad_number < 55:
pad_number = 55
for seq_record in block:
if not out:
out = '[{0}]\n'.format(seq_record.gene_code)
taxonomy_as_string = self.flatten_taxonomy(seq_record)
taxon_id = '{0}{1}'.format(seq_record.voucher_code,
taxonomy_as_string)
sequence = get_seq(seq_record, self.codon_positions,
aminoacids=self.aminoacids,
degenerate=self.degenerate)
seq = sequence.seq
if sequence.warning:
self.warnings.append(sequence.warning)
out += '{0}{1}\n'.format(taxon_id.ljust(pad_number), seq)
return out
def flatten_taxonomy(self, seq_record):
out = ''
if seq_record.taxonomy is None:
return out
else:
try:
out += "_" + seq_record.taxonomy['orden']
except KeyError:
pass
try:
out += "_" + seq_record.taxonomy['superfamily']
except KeyError:
pass
try:
out += "_" + seq_record.taxonomy['family']
except KeyError:
pass
try:
out += "_" + seq_record.taxonomy['subfamily']
except KeyError:
pass
try:
out += "_" + seq_record.taxonomy['tribe']
except KeyError:
pass
try:
out += "_" + seq_record.taxonomy['subtribe']
except KeyError:
pass
try:
out += "_" + seq_record.taxonomy['genus']
except KeyError:
pass
try:
out += "_" + seq_record.taxonomy['species']
except KeyError:
pass
try:
out += "_" + seq_record.taxonomy['subspecies']
except KeyError:
pass
try:
out += "_" + seq_record.taxonomy['author']
except KeyError:
pass
try:
out += "_" + seq_record.taxonomy['hostorg']
except KeyError:
pass
out = out.replace(" ", "_")
out = re.sub("_$", "", out)
return re.sub('_+', '_', out)
class DatasetFooter(object):
"""Builds charset block:
Parameters:
data (namedtuple): with necessary info for dataset creation.
codon_positions (str): `1st`, `2nd`, `3rd`, `1st-2nd`, `ALL`.
partitioning (str): `by gene`, `by codon position`, `1st-2nd, 3rd`.
outgroup (str): voucher code to be used as outgroup for NEXUS
and TNT files.
Example:
>>>
begin mrbayes;
charset ArgKin = 1-596;
charset COI-begin = 597-1265;
charset COI_end = 1266-2071;
charset ef1a = 2072-3311;
charset RpS2 = 3312-3722;
charset RpS5 = 3723-4339;
charset wingless = 4340-4739;
set autoclose=yes;
prset applyto=(all) ratepr=variable brlensp=unconstrained:Exp(100.0) shapepr=exp(1.0) tratiopr=beta(2.0,1.0);
lset applyto=(all) nst=mixed rates=gamma [invgamma];
unlink statefreq=(all);
unlink shape=(all) revmat=(all) tratio=(all) [pinvar=(all)];
mcmc ngen=10000000 printfreq=1000 samplefreq=1000 nchains=4 nruns=2 savebrlens=yes [temp=0.11];
sump relburnin=yes [no] burninfrac=0.25 [2500];
sumt relburnin=yes [no] burninfrac=0.25 [2500] contype=halfcompat [allcompat];
END;
"""
def __init__(self, data, codon_positions=None, partitioning=None,
outgroup=None):
self.data = data
self.codon_positions = codon_positions
self.partitioning = partitioning
self.outgroup = outgroup
self._validate_partitioning(partitioning)
self._validate_codon_positions(codon_positions)
self.charset_block = self.make_charset_block()
self.partition_line = self.make_partition_line()
def _validate_partitioning(self, partitioning):
if partitioning is None:
self.partitioning = 'by gene'
elif partitioning not in ['by gene', 'by codon position', '1st-2nd, 3rd']:
raise AttributeError("Partitioning parameter should be one of these: "
"None, 'by gene', 'by codon position', '1st-2nd, 3rd")
def _validate_codon_positions(self, codon_positions):
if codon_positions is None:
self.codon_positions = 'ALL'
elif codon_positions not in ['1st', '2nd', '3rd', '1st-2nd', 'ALL']:
raise AttributeError("Codon positions parameter should be one of these: "
"None, '1st', '2nd', '3rd', '1st-2nd', 'ALL'")
def make_charset_block(self):
"""
Override this function for Phylip dataset as the content is different and
goes into a separate file.
"""
out = 'begin mrbayes;\n'
out += self.make_charsets()
return out.strip()
def make_charsets(self):
"""
Override this function for Phylip dataset as the content is different and
goes into a separate file.
"""
count_start = 1
out = ''
for gene_code, lengths in self.data.gene_codes_and_lengths.items():
count_end = lengths[0] + count_start - 1
out += self.format_charset_line(gene_code, count_start, count_end)
count_start = count_end + 1
return out
def format_charset_line(self, gene_code, count_start, count_end):
slash_number = self.make_slash_number()
suffixes = self.make_gene_code_suffixes()
corrected_count = self.correct_count_using_reading_frames(gene_code, count_start, count_end)
out = ''
for index, val in enumerate(suffixes):
out += ' charset {0}{1} = {2}{3};\n'.format(gene_code, suffixes[index],
corrected_count[index], slash_number)
return out
def make_slash_number(self):
"""
Charset lines have \2 or \3 depending on type of partitioning and codon
positions requested for our dataset.
:return:
"""
if self.partitioning == 'by codon position' and self.codon_positions == '1st-2nd':
return '\\2'
elif self.partitioning in ['by codon position', '1st-2nd, 3rd'] and self.codon_positions in ['ALL', None]:
return '\\3'
else:
return ''
def make_gene_code_suffixes(self):
try:
return self.suffix_for_one_codon_position()
except KeyError:
return self.suffix_for_several_codon_positions()
def suffix_for_one_codon_position(self):
sufixes = {
'1st': '_pos1',
'2nd': '_pos2',
'3rd': '_pos3',
}
return [sufixes[self.codon_positions]]
def suffix_for_several_codon_positions(self):
if self.codon_positions == 'ALL' and self.partitioning == 'by gene':
return ['']
elif self.codon_positions == '1st-2nd' and self.partitioning in ['by gene', '1st-2nd, 3rd']:
return ['_pos12']
elif self.codon_positions == '1st-2nd' and self.partitioning == 'by codon position':
return ['_pos1', '_pos2']
if self.partitioning == 'by codon position':
return ['_pos1', '_pos2', '_pos3']
elif self.partitioning == '1st-2nd, 3rd':
return ['_pos12', '_pos3']
def correct_count_using_reading_frames(self, gene_code, count_start, count_end):
reading_frame = self.data.reading_frames[gene_code]
bp = BasePairCount(reading_frame, self.codon_positions, self.partitioning, count_start, count_end)
return bp.get_corrected_count()
def make_partition_line(self):
out = 'partition GENES = {0}: '.format(len(self.data.gene_codes) * len(self.make_gene_code_suffixes()))
out += ', '.join(self.add_suffixes_to_gene_codes())
out += ';'
out += '\n\nset partition = GENES;'
return out
def add_suffixes_to_gene_codes(self):
"""Appends pos1, pos2, etc to the gene_code if needed."""
out = []
for gene_code in self.data.gene_codes:
for sufix in self.make_gene_code_suffixes():
out.append('{0}{1}'.format(gene_code, sufix))
return out
def dataset_footer(self):
return self.make_footer()
def make_footer(self):
outgroup = self.get_outgroup()
footer = """{0}\n{1}
set autoclose=yes;{2}
prset applyto=(all) ratepr=variable brlensp=unconstrained:Exp(100.0) shapepr=exp(1.0) tratiopr=beta(2.0,1.0);
lset applyto=(all) nst=mixed rates=gamma [invgamma];
unlink statefreq=(all);
unlink shape=(all) revmat=(all) tratio=(all) [pinvar=(all)];
mcmc ngen=10000000 printfreq=1000 samplefreq=1000 nchains=4 nruns=2 savebrlens=yes [temp=0.11];
sump relburnin=yes [no] burninfrac=0.25 [2500];
sumt relburnin=yes [no] burninfrac=0.25 [2500] contype=halfcompat [allcompat];
END;
""".format(self.charset_block, self.partition_line, outgroup)
return footer.strip()
def get_outgroup(self):
"""Generates the outgroup line from the voucher code specified by the
user.
"""
if self.outgroup is not None:
outgroup_taxonomy = ''
for i in self.data.seq_records:
if self.outgroup == i.voucher_code:
outgroup_taxonomy = '{0}_{1}'.format(i.taxonomy['genus'],
i.taxonomy['species'])
break
outgroup = '\noutgroup {0}_{1};'.format(self.outgroup,
outgroup_taxonomy)
else:
outgroup = ''
return outgroup
class BasePairCount(object):
"""
Uses reading frame info, partitioning method and number of codon positions
to return corrected base pair count for charset lines.
Example:
>>> bp_count = BasePairCount(reading_frame=1, codon_positions='1st-2nd',
... partitioning='by codon position',
... count_start=100, count_end=512)
>>> bp_count.get_corrected_count()
[
'100-512',
'101-513',
]
"""
def __init__(self, reading_frame=None, codon_positions=None, partitioning=None,
count_start=None, count_end=None):
self._partitioning = self._set_partitioning(partitioning)
self._codon_positions = self._set_codon_positions(codon_positions)
self._reading_frame = self._set_reading_frame(reading_frame)
self._count_start = self._set_count_start(count_start)
self._count_end = self._set_count_end(count_end)
def _set_codon_positions(self, codon_positions):
if not codon_positions:
raise ValueError("_codon_positions argument is needed. Can't be None")
else:
return codon_positions
def _set_reading_frame(self, reading_frame):
if not reading_frame and self._partitioning in ['by codon position', '1st-2nd, 3rd']:
raise ValueError("_reading_frame argument is needed. Can't be None")
else:
return reading_frame
def _set_partitioning(self, partitioning):
if not partitioning:
raise ValueError("_partitioning argument is needed. Can't be None")
else:
return partitioning
def _set_count_start(self, count_start):
if not count_start:
raise ValueError("codon_start argument is needed. Can't be None")
else:
return count_start
def _set_count_end(self, count_end):
if not count_end:
raise ValueError("codon_end argument is needed. Can't be None")
else:
return count_end
def get_corrected_count(self):
if self._codon_positions == '1st-2nd' and self._partitioning in ['by gene',
'by codon position',
'1st-2nd, 3rd']:
return self._using_1st2nd_codons()
if self._codon_positions == 'ALL' and self._partitioning == 'by codon position':
return self._using_all_codons_partition_by_codon_position()
if self._codon_positions in ['ALL', '1st', '2nd', '3rd'] and self._partitioning == 'by gene':
return self._using_all_codons_partition_by_gene()
if self._codon_positions in ['1st', '2nd', '3rd'] and self._partitioning in ['by codon position', '1st-2nd, 3rd']:
return self._using_one_codon_position_partitioned_by_codon_position(self._codon_positions)
if self._codon_positions == 'ALL' and self._partitioning == '1st-2nd, 3rd':
return self._using_all_codons_partition_by_1st2nd_3rd()
def _using_1st2nd_codons(self):
return [
'{0}-{1}'.format(self._count_start, self._count_end),
'{0}-{1}'.format(self._count_start + 1, self._count_end),
]
def _using_all_codons_partition_by_codon_position(self):
if self._reading_frame == 1:
return [
'{0}-{1}'.format(self._count_start, self._count_end),
'{0}-{1}'.format(self._count_start + 1, self._count_end),
'{0}-{1}'.format(self._count_start + 2, self._count_end),
]
elif self._reading_frame == 2:
return [
'{0}-{1}'.format(self._count_start + 1, self._count_end),
'{0}-{1}'.format(self._count_start + 2, self._count_end),
'{0}-{1}'.format(self._count_start, self._count_end),
]
else:
return [
'{0}-{1}'.format(self._count_start + 2, self._count_end),
'{0}-{1}'.format(self._count_start, self._count_end),
'{0}-{1}'.format(self._count_start + 1, self._count_end),
]
def _using_one_codon_position_partitioned_by_codon_position(self, position):
return [
'{0}-{1}'.format(self._count_start, self._count_end),
]
def _using_all_codons_partition_by_gene(self):
return [
'{0}-{1}'.format(self._count_start, self._count_end,)
]
def _using_all_codons_partition_by_1st2nd_3rd(self):
if self._reading_frame == 1:
return [
'{0}-{1}\\3 {2}-{3}'.format(self._count_start, self._count_end,
self._count_start + 1,
self._count_end),
'{0}-{1}'.format(self._count_start + 2, self._count_end),
]
elif self._reading_frame == 2:
return [
'{0}-{1}\\3 {2}-{3}'.format(self._count_start + 1,
self._count_end,
self._count_start + 2,
self._count_end),
'{0}-{1}'.format(self._count_start, self._count_end),
]
else:
return [
'{0}-{1}\\3 {2}-{3}'.format(self._count_start + 2,
self._count_end,
self._count_start,
self._count_end),
'{0}-{1}'.format(self._count_start + 1, self._count_end),
]
|
carlosp420/dataset-creator
|
dataset_creator/base_dataset.py
|
Python
|
bsd-2-clause
| 23,206
|
[
"Biopython"
] |
06caef9966f3fd2a0162bc0e86fe5980586c764486981a0f2546c16261bc6d70
|
"""Silly data generator (Faker (https://github.com/joke2k/faker) and others
are much better, but we just need something simple"""
from __future__ import print_function
import string
# Third Party
import pandas as pd
import numpy as np
def df_random(num_numeric=3, num_categorical=3, num_rows=100):
"""Generate a dataframe with random data. This is a general method
to easily generate a random dataframe, for more control of the
random 'distributions' use the column methods (df_numeric_column, df_categorical_column)
For other distributions you can use numpy methods directly (see example at bottom of this file)
Args:
num_numeric (int): The number of numeric columns (default = 3)
num_categorical (int): The number of categorical columns (default = 3)
num_rows (int): The number of rows to generate (default = 100)
"""
# Construct DataFrame
df = pd.DataFrame()
column_names = string.ascii_lowercase
# Create numeric columns
for name in column_names[:num_numeric]:
df[name] = df_numeric_column(num_rows=num_rows)
# Create categorical columns
for name in column_names[num_numeric:num_numeric+num_categorical]:
df[name] = df_categorical_column(['foo', 'bar', 'baz'], num_rows=num_rows)
# Return the dataframe
return df
def df_numeric_column(min_value=0, max_value=1, num_rows=100):
"""Generate a numeric column with random data
Args:
min_value (float): Minimum value (default = 0)
max_value (float): Maximum value (default = 1)
num_rows (int): The number of rows to generate (default = 100)
"""
# Generate numeric column
return pd.Series(np.random.uniform(min_value, max_value, num_rows))
def df_categorical_column(category_values, num_rows=100, probabilities=None):
"""Generate a categorical column with random data
Args:
category_values (list): A list of category values (e.g. ['red', 'blue', 'green'])
num_rows (int): The number of rows to generate (default = 100)
probabilities (list): A list of probabilities of each value (e.g. [0.6, 0.2, 0.2]) (default=None an equal probability)
"""
splitter = np.random.choice(range(len(category_values)), num_rows, p=probabilities)
return pd.Series(pd.Categorical.from_codes(splitter, categories=category_values))
def test():
"""Test the data generator methods"""
df = df_random()
print('Random DataFrame')
print(df.head())
# Test the numerical column generator
df['delta_v'] = df_numeric_column(-100, 100)
print('\nNumerical column generator (added delta_v)')
print(df.head())
# Test the categorical column generator
df['color'] = df_categorical_column(['red', 'green', 'blue'])
print('\nCategorical column generator (added color)')
print(df.head())
# Test the categorical column generator with probabilities
df['color'] = df_categorical_column(['red', 'green', 'blue'], probabilities=[0.6, 0.3, 0.1])
print('\nProbabilities should be ~60% red, %30 green and %10 blue')
print(df['color'].value_counts())
# Also we can just use the built in Numpy method for detailed control
# over the numeric distribution
my_series = pd.Series(np.random.normal(0, 1, 1000))
print('\nStats on numpy normal (gaussian) distribution')
print(my_series.describe())
if __name__ == '__main__':
test()
|
jzadeh/Aktaion
|
python/parserDev/brothon/analysis/data_generator.py
|
Python
|
apache-2.0
| 3,478
|
[
"Gaussian"
] |
b4921ceb077fe5a33728002d0bcbfe5819ee30439d73a1cbd3c466142b8c5348
|
# coding=utf-8
from distutils.version import LooseVersion
from itertools import chain
import tempfile
import os
import logging
import hashlib
import random
import json
import types
import re
from collections import defaultdict
from datetime import datetime
from functools import wraps
from copy import deepcopy
from urllib2 import urlopen
from urlparse import urljoin
from couchdbkit import ResourceConflict, MultipleResultsFound
import itertools
from lxml import etree
from django.core.cache import cache
from django.utils.encoding import force_unicode
from django.utils.safestring import mark_safe
from django.utils.translation import override, ugettext as _, ugettext
from couchdbkit.exceptions import BadValueError, DocTypeError
from dimagi.ext.couchdbkit import *
from django.conf import settings
from django.core.urlresolvers import reverse
from django.http import Http404
from django.template.loader import render_to_string
from restkit.errors import ResourceError
from couchdbkit.resource import ResourceNotFound
from corehq import toggles, privileges
from corehq.const import USER_DATE_FORMAT, USER_TIME_FORMAT
from corehq.apps.app_manager.feature_support import CommCareFeatureSupportMixin
from corehq.util.quickcache import quickcache
from corehq.util.timezones.conversions import ServerTime
from dimagi.utils.couch.bulk import get_docs
from django_prbac.exceptions import PermissionDenied
from corehq.apps.accounting.utils import domain_has_privilege
from corehq.apps.app_manager.commcare_settings import check_condition
from corehq.apps.app_manager.const import *
from corehq.apps.app_manager.xpath import dot_interpolate, LocationXpath
from corehq.apps.builds import get_default_build_spec
from corehq.util.hash_compat import make_password
from dimagi.utils.couch.cache import cache_core
from dimagi.utils.couch.lazy_attachment_doc import LazyAttachmentDoc
from dimagi.utils.couch.undo import DeleteRecord, DELETED_SUFFIX
from dimagi.utils.decorators.memoized import memoized
from dimagi.utils.web import get_url_base, parse_int
from dimagi.utils.couch.database import get_db
import commcare_translations
from corehq.util import bitly
from corehq.util import view_utils
from corehq.apps.appstore.models import SnapshotMixin
from corehq.apps.builds.models import BuildSpec, CommCareBuildConfig, BuildRecord
from corehq.apps.hqmedia.models import HQMediaMixin
from corehq.apps.translations.models import TranslationMixin
from corehq.apps.users.models import CouchUser
from corehq.apps.users.util import cc_user_domain
from corehq.apps.domain.models import cached_property, Domain
from corehq.apps.app_manager import current_builds, app_strings, remote_app
from corehq.apps.app_manager import suite_xml, commcare_settings
from corehq.apps.app_manager.util import (
split_path,
save_xform,
get_correct_app_class,
ParentCasePropertyBuilder,
is_usercase_in_use)
from corehq.apps.app_manager.xform import XForm, parse_xml as _parse_xml, \
validate_xform
from corehq.apps.app_manager.templatetags.xforms_extras import trans
from .exceptions import (
AppEditingError,
BlankXFormError,
ConflictingCaseTypeError,
FormNotFoundException,
IncompatibleFormTypeException,
LocationXpathValidationError,
ModuleNotFoundException,
ModuleIdMissingException,
RearrangeError,
VersioningError,
XFormException,
XFormIdNotUnique,
XFormValidationError,
)
from corehq.apps.app_manager import id_strings
from jsonpath_rw import jsonpath, parse
WORKFLOW_DEFAULT = 'default'
WORKFLOW_ROOT = 'root'
WORKFLOW_MODULE = 'module'
WORKFLOW_PREVIOUS = 'previous_screen'
WORKFLOW_FORM = 'form'
DETAIL_TYPES = ['case_short', 'case_long', 'ref_short', 'ref_long']
FIELD_SEPARATOR = ':'
ATTACHMENT_REGEX = r'[^/]*\.xml'
ANDROID_LOGO_PROPERTY_MAPPING = {
'hq_logo_android_home': 'brand-banner-home',
'hq_logo_android_login': 'brand-banner-login',
}
def jsonpath_update(datum_context, value):
field = datum_context.path.fields[0]
parent = jsonpath.Parent().find(datum_context)[0]
parent.value[field] = value
# store a list of references to form ID's so that
# when an app is copied we can update the references
# with the new values
form_id_references = []
def FormIdProperty(expression, **kwargs):
"""
Create a StringProperty that references a form ID.
:param level: From where is the form referenced? One of 'app', 'module', 'form'
:param path: jsonpath to field that holds the form ID
"""
path_expression = parse(expression)
assert isinstance(path_expression, jsonpath.Child), "only child path expressions are supported"
field = path_expression.right
assert len(field.fields) == 1, 'path expression can only reference a single field'
form_id_references.append(path_expression)
return StringProperty(**kwargs)
def _rename_key(dct, old, new):
if old in dct:
if new in dct and dct[new]:
dct["%s_backup_%s" % (new, hex(random.getrandbits(32))[2:-1])] = dct[new]
dct[new] = dct[old]
del dct[old]
@memoized
def load_case_reserved_words():
with open(os.path.join(os.path.dirname(__file__), 'static', 'app_manager', 'json', 'case-reserved-words.json')) as f:
return json.load(f)
@memoized
def load_form_template(filename):
with open(os.path.join(os.path.dirname(__file__), 'data', filename)) as f:
return f.read()
def partial_escape(xpath):
"""
Copied from http://stackoverflow.com/questions/275174/how-do-i-perform-html-decoding-encoding-using-python-django
but without replacing the single quote
"""
return mark_safe(force_unicode(xpath).replace('&', '&').replace('<', '<').replace('>', '>').replace('"', '"'))
class IndexedSchema(DocumentSchema):
"""
Abstract class.
Meant for documents that appear in a list within another document
and need to know their own position within that list.
"""
def with_id(self, i, parent):
self._i = i
self._parent = parent
return self
@property
def id(self):
return self._i
def __eq__(self, other):
return other and (self.id == other.id) and (self._parent == other._parent)
class Getter(object):
def __init__(self, attr):
self.attr = attr
def __call__(self, instance):
items = getattr(instance, self.attr)
l = len(items)
for i,item in enumerate(items):
yield item.with_id(i%l, instance)
def __get__(self, instance, owner):
# thanks, http://metapython.blogspot.com/2010/11/python-instance-methods-how-are-they.html
# this makes Getter('foo') act like a bound method
return types.MethodType(self, instance, owner)
class FormActionCondition(DocumentSchema):
"""
The condition under which to open/update/close a case/referral
Either {'type': 'if', 'question': '/xpath/to/node', 'answer': 'value'}
in which case the action takes place if question has answer answer,
or {'type': 'always'} in which case the action always takes place.
"""
type = StringProperty(choices=["if", "always", "never"], default="never")
question = StringProperty()
answer = StringProperty()
operator = StringProperty(choices=['=', 'selected'], default='=')
def is_active(self):
return self.type in ('if', 'always')
class FormAction(DocumentSchema):
"""
Corresponds to Case XML
"""
condition = SchemaProperty(FormActionCondition)
def is_active(self):
return self.condition.is_active()
@classmethod
def get_action_paths(cls, action):
if action.condition.type == 'if':
yield action.condition.question
for __, path in cls.get_action_properties(action):
yield path
@classmethod
def get_action_properties(self, action):
action_properties = action.properties()
if 'name_path' in action_properties and action.name_path:
yield 'name', action.name_path
if 'case_name' in action_properties:
yield 'name', action.case_name
if 'external_id' in action_properties and action.external_id:
yield 'external_id', action.external_id
if 'update' in action_properties:
for name, path in action.update.items():
yield name, path
if 'case_properties' in action_properties:
for name, path in action.case_properties.items():
yield name, path
if 'preload' in action_properties:
for path, name in action.preload.items():
yield name, path
class UpdateCaseAction(FormAction):
update = DictProperty()
class PreloadAction(FormAction):
preload = DictProperty()
def is_active(self):
return bool(self.preload)
class UpdateReferralAction(FormAction):
followup_date = StringProperty()
def get_followup_date(self):
if self.followup_date:
return "if(date({followup_date}) >= date(today()), {followup_date}, date(today() + 2))".format(
followup_date=self.followup_date,
)
return self.followup_date or "date(today() + 2)"
class OpenReferralAction(UpdateReferralAction):
name_path = StringProperty()
class OpenCaseAction(FormAction):
name_path = StringProperty()
external_id = StringProperty()
class OpenSubCaseAction(FormAction):
case_type = StringProperty()
case_name = StringProperty()
reference_id = StringProperty()
case_properties = DictProperty()
repeat_context = StringProperty()
close_condition = SchemaProperty(FormActionCondition)
class FormActions(DocumentSchema):
open_case = SchemaProperty(OpenCaseAction)
update_case = SchemaProperty(UpdateCaseAction)
close_case = SchemaProperty(FormAction)
open_referral = SchemaProperty(OpenReferralAction)
update_referral = SchemaProperty(UpdateReferralAction)
close_referral = SchemaProperty(FormAction)
case_preload = SchemaProperty(PreloadAction)
referral_preload = SchemaProperty(PreloadAction)
usercase_update = SchemaProperty(UpdateCaseAction)
usercase_preload = SchemaProperty(PreloadAction)
subcases = SchemaListProperty(OpenSubCaseAction)
def all_property_names(self):
names = set()
names.update(self.update_case.update.keys())
names.update(self.case_preload.preload.values())
for subcase in self.subcases:
names.update(subcase.case_properties.keys())
return names
class AdvancedAction(IndexedSchema):
case_type = StringProperty()
case_tag = StringProperty()
case_properties = DictProperty()
parent_tag = StringProperty()
parent_reference_id = StringProperty(default='parent')
close_condition = SchemaProperty(FormActionCondition)
__eq__ = DocumentSchema.__eq__
def get_paths(self):
for path in self.case_properties.values():
yield path
if self.close_condition.type == 'if':
yield self.close_condition.question
def get_property_names(self):
return set(self.case_properties.keys())
@property
def is_subcase(self):
return self.parent_tag
class AutoSelectCase(DocumentSchema):
"""
Configuration for auto-selecting a case.
Attributes:
value_source Reference to the source of the value. For mode = fixture,
this represents the FixtureDataType ID. For mode = case
this represents the 'case_tag' for the case.
The modes 'user' and 'raw' don't require a value_source.
value_key The actual field that contains the case ID. Can be a case
index or a user data key or a fixture field name or the raw
xpath expression.
"""
mode = StringProperty(choices=[AUTO_SELECT_USER,
AUTO_SELECT_FIXTURE,
AUTO_SELECT_CASE,
AUTO_SELECT_USERCASE,
AUTO_SELECT_RAW])
value_source = StringProperty()
value_key = StringProperty(required=True)
class LoadUpdateAction(AdvancedAction):
"""
details_module: Use the case list configuration from this module to show the cases.
preload: Value from the case to load into the form. Keys are question paths, values are case properties.
auto_select: Configuration for auto-selecting the case
show_product_stock: If True list the product stock using the module's Product List configuration.
product_program: Only show products for this CommCare Supply program.
"""
details_module = StringProperty()
preload = DictProperty()
auto_select = SchemaProperty(AutoSelectCase, default=None)
show_product_stock = BooleanProperty(default=False)
product_program = StringProperty()
def get_paths(self):
for path in super(LoadUpdateAction, self).get_paths():
yield path
for path in self.preload.keys():
yield path
def get_property_names(self):
names = super(LoadUpdateAction, self).get_property_names()
names.update(self.preload.values())
return names
@property
def case_session_var(self):
return 'case_id_{0}'.format(self.case_tag)
class AdvancedOpenCaseAction(AdvancedAction):
name_path = StringProperty()
repeat_context = StringProperty()
open_condition = SchemaProperty(FormActionCondition)
def get_paths(self):
for path in super(AdvancedOpenCaseAction, self).get_paths():
yield path
yield self.name_path
if self.open_condition.type == 'if':
yield self.open_condition.question
@property
def case_session_var(self):
return 'case_id_new_{}_{}'.format(self.case_type, self.id)
class AdvancedFormActions(DocumentSchema):
load_update_cases = SchemaListProperty(LoadUpdateAction)
open_cases = SchemaListProperty(AdvancedOpenCaseAction)
get_load_update_actions = IndexedSchema.Getter('load_update_cases')
get_open_actions = IndexedSchema.Getter('open_cases')
def get_all_actions(self):
return itertools.chain(self.get_load_update_actions(), self.get_open_actions())
def get_subcase_actions(self):
return (a for a in self.get_all_actions() if a.parent_tag)
def get_open_subcase_actions(self, parent_case_type=None):
for action in [a for a in self.open_cases if a.parent_tag]:
if not parent_case_type:
yield action
else:
parent = self.actions_meta_by_tag[action.parent_tag]['action']
if parent.case_type == parent_case_type:
yield action
def get_case_tags(self):
for action in self.get_all_actions():
yield action.case_tag
def get_action_from_tag(self, tag):
return self.actions_meta_by_tag.get(tag, {}).get('action', None)
@property
def actions_meta_by_tag(self):
return self._action_meta()['by_tag']
@property
def actions_meta_by_parent_tag(self):
return self._action_meta()['by_parent_tag']
def get_action_hierarchy(self, action):
current = action
hierarchy = [current]
while current and current.parent_tag:
parent = self.get_action_from_tag(current.parent_tag)
current = parent
if parent:
if parent in hierarchy:
circular = [a.case_tag for a in hierarchy + [parent]]
raise ValueError("Circular reference in subcase hierarchy: {0}".format(circular))
hierarchy.append(parent)
return hierarchy
@property
def auto_select_actions(self):
return self._action_meta()['by_auto_select_mode']
@memoized
def _action_meta(self):
meta = {
'by_tag': {},
'by_parent_tag': {},
'by_auto_select_mode': {
AUTO_SELECT_USER: [],
AUTO_SELECT_CASE: [],
AUTO_SELECT_FIXTURE: [],
AUTO_SELECT_USERCASE: [],
AUTO_SELECT_RAW: [],
}
}
def add_actions(type, action_list):
for action in action_list:
meta['by_tag'][action.case_tag] = {
'type': type,
'action': action
}
if action.parent_tag:
meta['by_parent_tag'][action.parent_tag] = {
'type': type,
'action': action
}
if type == 'load' and action.auto_select and action.auto_select.mode:
meta['by_auto_select_mode'][action.auto_select.mode].append(action)
add_actions('load', self.get_load_update_actions())
add_actions('open', self.get_open_actions())
return meta
class FormSource(object):
def __get__(self, form, form_cls):
if not form:
return self
unique_id = form.get_unique_id()
app = form.get_app()
filename = "%s.xml" % unique_id
# for backwards compatibility of really old apps
try:
old_contents = form['contents']
except AttributeError:
pass
else:
app.lazy_put_attachment(old_contents, filename)
del form['contents']
try:
source = app.lazy_fetch_attachment(filename)
except ResourceNotFound:
source = ''
return source
def __set__(self, form, value):
unique_id = form.get_unique_id()
app = form.get_app()
filename = "%s.xml" % unique_id
app.lazy_put_attachment(value, filename)
form.validation_cache = None
try:
form.xmlns = form.wrapped_xform().data_node.tag_xmlns
except Exception:
form.xmlns = None
class CachedStringProperty(object):
def __init__(self, key):
self.get_key = key
def __get__(self, instance, owner):
return self.get(self.get_key(instance))
def __set__(self, instance, value):
self.set(self.get_key(instance), value)
@classmethod
def get(cls, key):
return cache.get(key)
@classmethod
def set(cls, key, value):
cache.set(key, value, 7*24*60*60) # cache for 7 days
class ScheduleVisit(DocumentSchema):
"""
due: Days after the anchor date that this visit is due
late_window: Days after the due day that this visit is valid until
"""
due = IntegerProperty()
late_window = IntegerProperty()
class FormLink(DocumentSchema):
"""
xpath: xpath condition that must be true in order to open next form
form_id: id of next form to open
"""
xpath = StringProperty()
form_id = FormIdProperty('modules[*].forms[*].form_links[*].form_id')
class FormSchedule(DocumentSchema):
"""
anchor: Case property containing a date after which this schedule becomes active
expiry: Days after the anchor date that this schedule expires (optional)
visit_list: List of visits in this schedule
post_schedule_increment: Repeat period for visits to occur after the last fixed visit (optional)
transition_condition: Condition under which the schedule transitions to the next phase
termination_condition: Condition under which the schedule terminates
"""
anchor = StringProperty()
expires = IntegerProperty()
visits = SchemaListProperty(ScheduleVisit)
post_schedule_increment = IntegerProperty()
transition_condition = SchemaProperty(FormActionCondition)
termination_condition = SchemaProperty(FormActionCondition)
class FormBase(DocumentSchema):
"""
Part of a Managed Application; configuration for a form.
Translates to a second-level menu on the phone
"""
form_type = None
name = DictProperty(unicode)
unique_id = StringProperty()
show_count = BooleanProperty(default=False)
xmlns = StringProperty()
version = IntegerProperty()
source = FormSource()
validation_cache = CachedStringProperty(
lambda self: "cache-%s-%s-validation" % (self.get_app().get_id, self.unique_id)
)
post_form_workflow = StringProperty(
default=WORKFLOW_DEFAULT,
choices=[WORKFLOW_DEFAULT, WORKFLOW_ROOT, WORKFLOW_MODULE, WORKFLOW_PREVIOUS, WORKFLOW_FORM]
)
auto_gps_capture = BooleanProperty(default=False)
no_vellum = BooleanProperty(default=False)
form_links = SchemaListProperty(FormLink)
@classmethod
def wrap(cls, data):
data.pop('validation_cache', '')
if cls is FormBase:
doc_type = data['doc_type']
if doc_type == 'Form':
return Form.wrap(data)
elif doc_type == 'AdvancedForm':
return AdvancedForm.wrap(data)
else:
try:
return CareplanForm.wrap(data)
except ValueError:
raise ValueError('Unexpected doc_type for Form', doc_type)
else:
return super(FormBase, cls).wrap(data)
@classmethod
def generate_id(cls):
return hex(random.getrandbits(160))[2:-1]
@classmethod
def get_form(cls, form_unique_id, and_app=False):
try:
d = get_db().view(
'app_manager/xforms_index',
key=form_unique_id
).one()
except MultipleResultsFound as e:
raise XFormIdNotUnique(
"xform id '%s' not unique: %s" % (form_unique_id, e)
)
if d:
d = d['value']
else:
raise ResourceNotFound()
# unpack the dict into variables app_id, module_id, form_id
app_id, unique_id = [d[key] for key in ('app_id', 'unique_id')]
app = Application.get(app_id)
form = app.get_form(unique_id)
if and_app:
return form, app
else:
return form
@property
def schedule_form_id(self):
return self.unique_id[:6]
def wrapped_xform(self):
return XForm(self.source)
def validate_form(self):
vc = self.validation_cache
if vc is None:
try:
validate_xform(self.source,
version=self.get_app().application_version)
except XFormValidationError as e:
validation_dict = {
"fatal_error": e.fatal_error,
"validation_problems": e.validation_problems,
"version": e.version,
}
vc = self.validation_cache = json.dumps(validation_dict)
else:
vc = self.validation_cache = ""
if vc:
try:
raise XFormValidationError(**json.loads(vc))
except ValueError:
self.validation_cache = None
return self.validate_form()
return self
def validate_for_build(self, validate_module=True):
errors = []
try:
module = self.get_module()
except AttributeError:
module = None
meta = {
'form_type': self.form_type,
'module': module.get_module_info() if module else {},
'form': {"id": self.id if hasattr(self, 'id') else None, "name": self.name}
}
xml_valid = False
if self.source == '':
errors.append(dict(type="blank form", **meta))
else:
try:
_parse_xml(self.source)
xml_valid = True
except XFormException as e:
errors.append(dict(
type="invalid xml",
message=unicode(e) if self.source else '',
**meta
))
except ValueError:
logging.error("Failed: _parse_xml(string=%r)" % self.source)
raise
else:
try:
self.validate_form()
except XFormValidationError as e:
error = {'type': 'validation error', 'validation_message': unicode(e)}
error.update(meta)
errors.append(error)
try:
self.case_list_module
except AssertionError:
msg = _("Form referenced as the registration form for multiple modules.")
error = {'type': 'validation error', 'validation_message': msg}
error.update(meta)
errors.append(error)
if self.post_form_workflow == WORKFLOW_FORM and not self.form_links:
errors.append(dict(type="no form links", **meta))
errors.extend(self.extended_build_validation(meta, xml_valid, validate_module))
return errors
def extended_build_validation(self, error_meta, xml_valid, validate_module=True):
"""
Override to perform additional validation during build process.
"""
return []
def get_unique_id(self):
"""
Return unique_id if it exists, otherwise initialize it
Does _not_ force a save, so it's the caller's responsibility to save the app
"""
if not self.unique_id:
self.unique_id = FormBase.generate_id()
return self.unique_id
def get_app(self):
return self._app
def get_version(self):
return self.version if self.version else self.get_app().version
def add_stuff_to_xform(self, xform):
app = self.get_app()
xform.exclude_languages(app.build_langs)
xform.set_default_language(app.build_langs[0])
xform.normalize_itext()
xform.strip_vellum_ns_attributes()
xform.set_version(self.get_version())
def render_xform(self):
xform = XForm(self.source)
self.add_stuff_to_xform(xform)
return xform.render()
@quickcache(['self.source', 'langs', 'include_triggers', 'include_groups', 'include_translations'])
def get_questions(self, langs, include_triggers=False,
include_groups=False, include_translations=False):
return XForm(self.source).get_questions(
langs=langs,
include_triggers=include_triggers,
include_groups=include_groups,
include_translations=include_translations,
)
@memoized
def get_case_property_name_formatter(self):
"""Get a function that formats case property names
The returned function requires two arguments
`(case_property_name, data_path)` and returns a string.
"""
try:
valid_paths = {question['value']: question['tag']
for question in self.get_questions(langs=[])}
except XFormException as e:
# punt on invalid xml (sorry, no rich attachments)
valid_paths = {}
def format_key(key, path):
if valid_paths.get(path) == "upload":
return u"{}{}".format(ATTACHMENT_PREFIX, key)
return key
return format_key
def export_json(self, dump_json=True):
source = self.to_json()
del source['unique_id']
return json.dumps(source) if dump_json else source
def rename_lang(self, old_lang, new_lang):
_rename_key(self.name, old_lang, new_lang)
try:
self.rename_xform_language(old_lang, new_lang)
except XFormException:
pass
def rename_xform_language(self, old_code, new_code):
source = XForm(self.source)
if source.exists():
source.rename_language(old_code, new_code)
source = source.render()
self.source = source
def default_name(self):
app = self.get_app()
return trans(
self.name,
[app.default_language] + app.build_langs,
include_lang=False
)
@property
def full_path_name(self):
return "%(app_name)s > %(module_name)s > %(form_name)s" % {
'app_name': self.get_app().name,
'module_name': self.get_module().default_name(),
'form_name': self.default_name()
}
@property
def has_fixtures(self):
return 'src="jr://fixture/item-list:' in self.source
def get_auto_gps_capture(self):
app = self.get_app()
if app.build_version and app.enable_auto_gps:
return self.auto_gps_capture or app.auto_gps_capture
else:
return False
def is_registration_form(self, case_type=None):
"""
Should return True if this form passes the following tests:
* does not require a case
* registers a case of type 'case_type' if supplied
"""
raise NotImplementedError()
def update_app_case_meta(self, app_case_meta):
pass
@property
@memoized
def case_list_module(self):
case_list_modules = [
mod for mod in self.get_app().get_modules() if mod.case_list_form.form_id == self.unique_id
]
assert len(case_list_modules) <= 1, "Form referenced my multiple modules"
return case_list_modules[0] if case_list_modules else None
@property
def is_case_list_form(self):
return self.case_list_module is not None
class IndexedFormBase(FormBase, IndexedSchema):
def get_app(self):
return self._parent._parent
def get_module(self):
return self._parent
def get_case_type(self):
return self._parent.case_type
def check_case_properties(self, all_names=None, subcase_names=None, case_tag=None):
all_names = all_names or []
subcase_names = subcase_names or []
errors = []
# reserved_words are hard-coded in three different places!
# Here, case-config-ui-*.js, and module_view.html
reserved_words = load_case_reserved_words()
for key in all_names:
try:
validate_property(key)
except ValueError:
errors.append({'type': 'update_case word illegal', 'word': key, 'case_tag': case_tag})
_, key = split_path(key)
if key in reserved_words:
errors.append({'type': 'update_case uses reserved word', 'word': key, 'case_tag': case_tag})
# no parent properties for subcase
for key in subcase_names:
if not re.match(r'^[a-zA-Z][\w_-]*$', key):
errors.append({'type': 'update_case word illegal', 'word': key, 'case_tag': case_tag})
return errors
def check_paths(self, paths):
errors = []
try:
valid_paths = {question['value']: question['tag']
for question in self.get_questions(langs=[])}
except XFormException as e:
errors.append({'type': 'invalid xml', 'message': unicode(e)})
else:
no_multimedia = not self.get_app().enable_multimedia_case_property
for path in set(paths):
if path not in valid_paths:
errors.append({'type': 'path error', 'path': path})
elif no_multimedia and valid_paths[path] == "upload":
errors.append({'type': 'multimedia case property not supported', 'path': path})
return errors
def add_property_save(self, app_case_meta, case_type, name,
questions, question_path, condition=None):
if question_path in questions:
app_case_meta.add_property_save(
case_type,
name,
self.unique_id,
questions[question_path],
condition
)
else:
app_case_meta.add_property_error(
case_type,
name,
self.unique_id,
"%s is not a valid question" % question_path
)
def add_property_load(self, app_case_meta, case_type, name,
questions, question_path):
if question_path in questions:
app_case_meta.add_property_load(
case_type,
name,
self.unique_id,
questions[question_path]
)
else:
app_case_meta.add_property_error(
case_type,
name,
self.unique_id,
"%s is not a valid question" % question_path
)
class JRResourceProperty(StringProperty):
def validate(self, value, required=True):
super(JRResourceProperty, self).validate(value, required)
if value is not None and not value.startswith('jr://'):
raise BadValueError("JR Resources must start with 'jr://")
return value
class NavMenuItemMediaMixin(DocumentSchema):
media_image = JRResourceProperty(required=False)
media_audio = JRResourceProperty(required=False)
class Form(IndexedFormBase, NavMenuItemMediaMixin):
form_type = 'module_form'
form_filter = StringProperty()
requires = StringProperty(choices=["case", "referral", "none"], default="none")
actions = SchemaProperty(FormActions)
def add_stuff_to_xform(self, xform):
super(Form, self).add_stuff_to_xform(xform)
xform.add_case_and_meta(self)
def all_other_forms_require_a_case(self):
m = self.get_module()
return all([form.requires == 'case' for form in m.get_forms() if form.id != self.id])
def session_var_for_action(self, action_type, subcase_index=None):
module_case_type = self.get_module().case_type
if action_type == 'open_case':
return 'case_id_new_{}_0'.format(module_case_type)
if action_type == 'subcase':
opens_case = 'open_case' in self.active_actions()
subcase_type = self.actions.subcases[subcase_index].case_type
if opens_case:
subcase_index += 1
return 'case_id_new_{}_{}'.format(subcase_type, subcase_index)
def _get_active_actions(self, types):
actions = {}
for action_type in types:
a = getattr(self.actions, action_type)
if isinstance(a, list):
if a:
actions[action_type] = a
elif a.is_active():
actions[action_type] = a
return actions
def active_actions(self):
if self.get_app().application_version == APP_V1:
action_types = (
'open_case', 'update_case', 'close_case',
'open_referral', 'update_referral', 'close_referral',
'case_preload', 'referral_preload'
)
else:
if self.requires == 'none':
action_types = (
'open_case', 'update_case', 'close_case', 'subcases',
'usercase_update', 'usercase_preload',
)
elif self.requires == 'case':
action_types = (
'update_case', 'close_case', 'case_preload', 'subcases',
'usercase_update', 'usercase_preload',
)
else:
# this is left around for legacy migrated apps
action_types = (
'open_case', 'update_case', 'close_case',
'case_preload', 'subcases',
'usercase_update', 'usercase_preload',
)
return self._get_active_actions(action_types)
def active_non_preloader_actions(self):
return self._get_active_actions((
'open_case', 'update_case', 'close_case',
'open_referral', 'update_referral', 'close_referral'))
def check_actions(self):
errors = []
subcase_names = set()
for subcase_action in self.actions.subcases:
if not subcase_action.case_type:
errors.append({'type': 'subcase has no case type'})
subcase_names.update(subcase_action.case_properties)
if self.requires == 'none' and self.actions.open_case.is_active() \
and not self.actions.open_case.name_path:
errors.append({'type': 'case_name required'})
errors.extend(self.check_case_properties(
all_names=self.actions.all_property_names(),
subcase_names=subcase_names
))
def generate_paths():
for action in self.active_actions().values():
if isinstance(action, list):
actions = action
else:
actions = [action]
for action in actions:
for path in FormAction.get_action_paths(action):
yield path
errors.extend(self.check_paths(generate_paths()))
return errors
def requires_case(self):
# all referrals also require cases
return self.requires in ("case", "referral")
def requires_case_type(self):
return self.requires_case() or \
bool(self.active_non_preloader_actions())
def requires_referral(self):
return self.requires == "referral"
def is_registration_form(self, case_type=None):
return not self.requires_case() and 'open_case' in self.active_actions() and \
(not case_type or self.get_module().case_type == case_type)
def extended_build_validation(self, error_meta, xml_valid, validate_module=True):
errors = []
if xml_valid:
for error in self.check_actions():
error.update(error_meta)
errors.append(error)
if validate_module:
needs_case_type = False
needs_case_detail = False
needs_referral_detail = False
if self.requires_case():
needs_case_detail = True
needs_case_type = True
if self.requires_case_type():
needs_case_type = True
if self.requires_referral():
needs_referral_detail = True
errors.extend(self.get_module().get_case_errors(
needs_case_type=needs_case_type,
needs_case_detail=needs_case_detail,
needs_referral_detail=needs_referral_detail,
))
return errors
def get_case_updates(self, case_type):
# This method is used by both get_all_case_properties and
# get_usercase_properties. In the case of usercase properties, use
# the usercase_update action, and for normal cases, use the
# update_case action
if case_type == self.get_module().case_type or case_type == USERCASE_TYPE:
format_key = self.get_case_property_name_formatter()
action = self.actions.usercase_update if case_type == USERCASE_TYPE else self.actions.update_case
return [format_key(*item) for item in action.update.items()]
return []
@memoized
def get_child_case_types(self):
'''
Return a list of each case type for which this Form opens a new child case.
:return:
'''
child_case_types = set()
for subcase in self.actions.subcases:
if subcase.close_condition.type == "never":
child_case_types.add(subcase.case_type)
return child_case_types
@memoized
def get_parent_types_and_contributed_properties(self, module_case_type, case_type):
parent_types = set()
case_properties = set()
for subcase in self.actions.subcases:
if subcase.case_type == case_type:
case_properties.update(
subcase.case_properties.keys()
)
if case_type != module_case_type and (
self.actions.open_case.is_active() or
self.actions.update_case.is_active() or
self.actions.close_case.is_active()):
parent_types.add((module_case_type, subcase.reference_id or 'parent'))
return parent_types, case_properties
def update_app_case_meta(self, app_case_meta):
from corehq.apps.reports.formdetails.readable import FormQuestionResponse
questions = {
q['value']: FormQuestionResponse(q)
for q in self.get_questions(self.get_app().langs, include_translations=True)
}
module_case_type = self.get_module().case_type
type_meta = app_case_meta.get_type(module_case_type)
for type_, action in self.active_actions().items():
if type_ == 'open_case':
type_meta.add_opener(self.unique_id, action.condition)
self.add_property_save(
app_case_meta,
module_case_type,
'name',
questions,
action.name_path
)
if type_ == 'close_case':
type_meta.add_closer(self.unique_id, action.condition)
if type_ == 'update_case':
for name, question_path in FormAction.get_action_properties(action):
self.add_property_save(
app_case_meta,
module_case_type,
name,
questions,
question_path
)
if type_ == 'case_preload':
for name, question_path in FormAction.get_action_properties(action):
self.add_property_load(
app_case_meta,
module_case_type,
name,
questions,
question_path
)
if type_ == 'subcases':
for act in action:
if act.is_active():
sub_type_meta = app_case_meta.get_type(act.case_type)
sub_type_meta.add_opener(self.unique_id, act.condition)
if act.close_condition.is_active():
sub_type_meta.add_closer(self.unique_id, act.close_condition)
for name, question_path in FormAction.get_action_properties(act):
self.add_property_save(
app_case_meta,
act.case_type,
name,
questions,
question_path
)
class UserRegistrationForm(FormBase):
form_type = 'user_registration'
username_path = StringProperty(default='username')
password_path = StringProperty(default='password')
data_paths = DictProperty()
def add_stuff_to_xform(self, xform):
super(UserRegistrationForm, self).add_stuff_to_xform(xform)
xform.add_user_registration(self.username_path, self.password_path, self.data_paths)
class MappingItem(DocumentSchema):
key = StringProperty()
# lang => localized string
value = DictProperty()
@property
def key_as_variable(self):
"""
Return an xml variable name to represent this key.
If the key has no spaces, return the key with "k" prepended.
If the key does contain spaces, return a hash of the key with "h" prepended.
The prepended characters prevent the variable name from starting with a
numeral, which is illegal.
"""
if " " not in self.key:
return 'k{key}'.format(key=self.key)
else:
return 'h{hash}'.format(hash=hashlib.md5(self.key).hexdigest()[:8])
class GraphAnnotations(IndexedSchema):
display_text = DictProperty()
x = StringProperty()
y = StringProperty()
class GraphSeries(DocumentSchema):
config = DictProperty()
data_path = StringProperty()
x_function = StringProperty()
y_function = StringProperty()
radius_function = StringProperty()
class GraphConfiguration(DocumentSchema):
config = DictProperty()
locale_specific_config = DictProperty()
annotations = SchemaListProperty(GraphAnnotations)
graph_type = StringProperty()
series = SchemaListProperty(GraphSeries)
class DetailTab(IndexedSchema):
"""
Represents a tab in the case detail screen on the phone. Ex:
{
'name': 'Medical',
'starting_index': 3
}
"""
header = DictProperty()
starting_index = IntegerProperty()
class DetailColumn(IndexedSchema):
"""
Represents a column in case selection screen on the phone. Ex:
{
'header': {'en': 'Sex', 'por': 'Sexo'},
'model': 'case',
'field': 'sex',
'format': 'enum',
'xpath': '.',
'enum': [
{'key': 'm', 'value': {'en': 'Male', 'por': 'Macho'},
{'key': 'f', 'value': {'en': 'Female', 'por': 'Fêmea'},
],
}
"""
header = DictProperty()
model = StringProperty()
field = StringProperty()
format = StringProperty()
enum = SchemaListProperty(MappingItem)
graph_configuration = SchemaProperty(GraphConfiguration)
case_tile_field = StringProperty()
late_flag = IntegerProperty(default=30)
advanced = StringProperty(default="")
calc_xpath = StringProperty(default=".")
filter_xpath = StringProperty(default="")
time_ago_interval = FloatProperty(default=365.25)
@property
def enum_dict(self):
"""for backwards compatibility with building 1.0 apps"""
import warnings
warnings.warn('You should not use enum_dict. Use enum instead',
DeprecationWarning)
return dict((item.key, item.value) for item in self.enum)
def rename_lang(self, old_lang, new_lang):
for dct in [self.header] + [item.value for item in self.enum]:
_rename_key(dct, old_lang, new_lang)
@property
def field_type(self):
if FIELD_SEPARATOR in self.field:
return self.field.split(FIELD_SEPARATOR, 1)[0]
else:
return 'property' # equivalent to property:parent/case_property
@property
def field_property(self):
if FIELD_SEPARATOR in self.field:
return self.field.split(FIELD_SEPARATOR, 1)[1]
else:
return self.field
class TimeAgoInterval(object):
map = {
'day': 1.0,
'week': 7.0,
'month': 30.4375,
'year': 365.25
}
@classmethod
def get_from_old_format(cls, format):
if format == 'years-ago':
return cls.map['year']
elif format == 'months-ago':
return cls.map['month']
@classmethod
def wrap(cls, data):
if data.get('format') in ('months-ago', 'years-ago'):
data['time_ago_interval'] = cls.TimeAgoInterval.get_from_old_format(data['format'])
data['format'] = 'time-ago'
# Lazy migration: enum used to be a dict, now is a list
if isinstance(data.get('enum'), dict):
data['enum'] = sorted({'key': key, 'value': value}
for key, value in data['enum'].items())
return super(DetailColumn, cls).wrap(data)
class SortElement(IndexedSchema):
field = StringProperty()
type = StringProperty()
direction = StringProperty()
class SortOnlyDetailColumn(DetailColumn):
"""This is a mock type, not intended to be part of a document"""
@property
def _i(self):
"""
assert that SortOnlyDetailColumn never has ._i or .id called
since it should never be in an app document
"""
raise NotImplementedError()
class CaseListLookupMixin(DocumentSchema):
"""
Allows for the addition of Android Callouts to do lookups from the CaseList
<lookup action="" image="" name="">
<extra key="" value = "" />
<response key ="" />
</lookup>
"""
lookup_enabled = BooleanProperty(default=False)
lookup_action = StringProperty()
lookup_name = StringProperty()
lookup_image = JRResourceProperty(required=False)
lookup_extras = SchemaListProperty()
lookup_responses = SchemaListProperty()
class Detail(IndexedSchema, CaseListLookupMixin):
"""
Full configuration for a case selection screen
"""
display = StringProperty(choices=['short', 'long'])
columns = SchemaListProperty(DetailColumn)
get_columns = IndexedSchema.Getter('columns')
tabs = SchemaListProperty(DetailTab)
get_tabs = IndexedSchema.Getter('tabs')
sort_elements = SchemaListProperty(SortElement)
filter = StringProperty()
custom_xml = StringProperty()
use_case_tiles = BooleanProperty()
persist_tile_on_forms = BooleanProperty()
pull_down_tile = BooleanProperty()
def get_tab_spans(self):
'''
Return the starting and ending indices into self.columns deliminating
the columns that should be in each tab.
:return:
'''
tabs = list(self.get_tabs())
ret = []
for tab in tabs:
try:
end = tabs[tab.id + 1].starting_index
except IndexError:
end = len(self.columns)
ret.append((tab.starting_index, end))
return ret
@parse_int([1])
def get_column(self, i):
return self.columns[i].with_id(i % len(self.columns), self)
def rename_lang(self, old_lang, new_lang):
for column in self.columns:
column.rename_lang(old_lang, new_lang)
class CaseList(IndexedSchema, NavMenuItemMediaMixin):
label = DictProperty()
show = BooleanProperty(default=False)
def rename_lang(self, old_lang, new_lang):
_rename_key(self.label, old_lang, new_lang)
class ParentSelect(DocumentSchema):
active = BooleanProperty(default=False)
relationship = StringProperty(default='parent')
module_id = StringProperty()
class DetailPair(DocumentSchema):
short = SchemaProperty(Detail)
long = SchemaProperty(Detail)
@classmethod
def wrap(cls, data):
self = super(DetailPair, cls).wrap(data)
self.short.display = 'short'
self.long.display = 'long'
return self
class CaseListForm(NavMenuItemMediaMixin):
form_id = FormIdProperty('modules[*].case_list_form.form_id')
label = DictProperty()
def rename_lang(self, old_lang, new_lang):
_rename_key(self.label, old_lang, new_lang)
class ModuleBase(IndexedSchema, NavMenuItemMediaMixin):
name = DictProperty(unicode)
unique_id = StringProperty()
case_type = StringProperty()
case_list_form = SchemaProperty(CaseListForm)
module_filter = StringProperty()
@classmethod
def wrap(cls, data):
if cls is ModuleBase:
doc_type = data['doc_type']
if doc_type == 'Module':
return Module.wrap(data)
elif doc_type == 'CareplanModule':
return CareplanModule.wrap(data)
elif doc_type == 'AdvancedModule':
return AdvancedModule.wrap(data)
elif doc_type == 'ReportModule':
return ReportModule.wrap(data)
else:
raise ValueError('Unexpected doc_type for Module', doc_type)
else:
return super(ModuleBase, cls).wrap(data)
def get_or_create_unique_id(self):
"""
It is the caller's responsibility to save the Application
after calling this function.
WARNING: If called on the same doc in different requests without saving,
this function will return a different uuid each time,
likely causing unexpected behavior
"""
if not self.unique_id:
self.unique_id = FormBase.generate_id()
return self.unique_id
get_forms = IndexedSchema.Getter('forms')
@parse_int([1])
def get_form(self, i):
try:
return self.forms[i].with_id(i % len(self.forms), self)
except IndexError:
raise FormNotFoundException()
def get_child_modules(self):
return [
module for module in self.get_app().get_modules()
if module.unique_id != self.unique_id and getattr(module, 'root_module_id', None) == self.unique_id
]
def requires_case_details(self):
return False
def get_case_types(self):
return set([self.case_type])
def get_module_info(self):
return {
'id': self.id,
'name': self.name,
}
def get_app(self):
return self._parent
def default_name(self):
app = self.get_app()
return trans(
self.name,
[app.default_language] + app.build_langs,
include_lang=False
)
def rename_lang(self, old_lang, new_lang):
_rename_key(self.name, old_lang, new_lang)
for form in self.get_forms():
form.rename_lang(old_lang, new_lang)
for _, detail, _ in self.get_details():
detail.rename_lang(old_lang, new_lang)
def validate_detail_columns(self, columns):
from corehq.apps.app_manager.suite_xml import FIELD_TYPE_LOCATION
from corehq.apps.locations.util import parent_child
hierarchy = None
for column in columns:
if column.format in ('enum', 'enum-image'):
for item in column.enum:
key = item.key
# key cannot contain certain characters because it is used
# to generate an xpath variable name within suite.xml
# (names with spaces will be hashed to form the xpath
# variable name)
if not re.match('^([\w_ -]*)$', key):
yield {
'type': 'invalid id key',
'key': key,
'module': self.get_module_info(),
}
elif column.field_type == FIELD_TYPE_LOCATION:
hierarchy = hierarchy or parent_child(self.get_app().domain)
try:
LocationXpath('').validate(column.field_property, hierarchy)
except LocationXpathValidationError, e:
yield {
'type': 'invalid location xpath',
'details': unicode(e),
'module': self.get_module_info(),
'column': column,
}
def get_form_by_unique_id(self, unique_id):
for form in self.get_forms():
if form.get_unique_id() == unique_id:
return form
def validate_for_build(self):
errors = []
if self.requires_case_details():
errors.extend(self.get_case_errors(
needs_case_type=True,
needs_case_detail=True
))
if self.case_list_form.form_id:
try:
form = self.get_app().get_form(self.case_list_form.form_id)
except FormNotFoundException:
errors.append({
'type': 'case list form missing',
'module': self.get_module_info()
})
else:
if not form.is_registration_form(self.case_type):
errors.append({
'type': 'case list form not registration',
'module': self.get_module_info(),
'form': form,
})
return errors
@memoized
def get_child_case_types(self):
'''
Return a list of each case type for which this module has a form that
opens a new child case of that type.
:return:
'''
child_case_types = set()
for form in self.get_forms():
if hasattr(form, 'get_child_case_types'):
child_case_types.update(form.get_child_case_types())
return child_case_types
def get_custom_entries(self):
"""
By default, suite entries are configured by forms, but you can also provide custom
entries by overriding this function.
See ReportModule for an example
"""
return []
def uses_media(self):
"""
Whether the module uses media. If this returns false then media will not be generated
for the module.
"""
return True
class Module(ModuleBase):
"""
A group of related forms, and configuration that applies to them all.
Translates to a top-level menu on the phone.
"""
module_type = 'basic'
case_label = DictProperty()
referral_label = DictProperty()
forms = SchemaListProperty(Form)
case_details = SchemaProperty(DetailPair)
ref_details = SchemaProperty(DetailPair)
put_in_root = BooleanProperty(default=False)
case_list = SchemaProperty(CaseList)
referral_list = SchemaProperty(CaseList)
task_list = SchemaProperty(CaseList)
parent_select = SchemaProperty(ParentSelect)
@classmethod
def wrap(cls, data):
if 'details' in data:
try:
case_short, case_long, ref_short, ref_long = data['details']
except ValueError:
# "need more than 0 values to unpack"
pass
else:
data['case_details'] = {
'short': case_short,
'long': case_long,
}
data['ref_details'] = {
'short': ref_short,
'long': ref_long,
}
finally:
del data['details']
return super(Module, cls).wrap(data)
@classmethod
def new_module(cls, name, lang):
detail = Detail(
columns=[DetailColumn(
format='plain',
header={(lang or 'en'): ugettext("Name")},
field='name',
model='case',
)]
)
module = Module(
name={(lang or 'en'): name or ugettext("Untitled Module")},
forms=[],
case_type='',
case_details=DetailPair(
short=Detail(detail.to_json()),
long=Detail(detail.to_json()),
),
)
module.get_or_create_unique_id()
return module
def new_form(self, name, lang, attachment=''):
form = Form(
name={lang if lang else "en": name if name else _("Untitled Form")},
)
self.forms.append(form)
form = self.get_form(-1)
form.source = attachment
return form
def add_insert_form(self, from_module, form, index=None, with_source=False):
if isinstance(form, Form):
new_form = form
elif isinstance(form, AdvancedForm) and not form.actions.get_all_actions():
new_form = Form(
name=form.name,
form_filter=form.form_filter,
media_image=form.media_image,
media_audio=form.media_audio
)
new_form._parent = self
form._parent = self
if with_source:
new_form.source = form.source
else:
raise IncompatibleFormTypeException()
if index is not None:
self.forms.insert(index, new_form)
else:
self.forms.append(new_form)
return self.get_form(index or -1)
def rename_lang(self, old_lang, new_lang):
super(Module, self).rename_lang(old_lang, new_lang)
for case_list in (self.case_list, self.referral_list):
case_list.rename_lang(old_lang, new_lang)
def get_details(self):
return (
('case_short', self.case_details.short, True),
('case_long', self.case_details.long, True),
('ref_short', self.ref_details.short, False),
('ref_long', self.ref_details.long, False),
)
@property
def detail_sort_elements(self):
try:
return self.case_details.short.sort_elements
except Exception:
return []
@property
def case_list_filter(self):
try:
return self.case_details.short.filter
except AttributeError:
return None
def validate_for_build(self):
errors = super(Module, self).validate_for_build()
if not self.forms and not self.case_list.show:
errors.append({
'type': 'no forms or case list',
'module': self.get_module_info(),
})
for sort_element in self.detail_sort_elements:
try:
validate_detail_screen_field(sort_element.field)
except ValueError:
errors.append({
'type': 'invalid sort field',
'field': sort_element.field,
'module': self.get_module_info(),
})
if self.case_list_filter:
try:
etree.XPath(self.case_list_filter)
except etree.XPathSyntaxError:
errors.append({
'type': 'invalid filter xpath',
'module': self.get_module_info(),
'filter': self.case_list_filter,
})
if self.parent_select.active and not self.parent_select.module_id:
errors.append({
'type': 'no parent select id',
'module': self.get_module_info()
})
for detail in [self.case_details.short, self.case_details.long]:
if detail.use_case_tiles:
if not detail.display == "short":
errors.append({
'type': "invalid tile configuration",
'module': self.get_module_info(),
'reason': _('Case tiles may only be used for the case list (not the case details).')
})
col_by_tile_field = {c.case_tile_field: c for c in detail.columns}
for field in ["header", "top_left", "sex", "bottom_left", "date"]:
if field not in col_by_tile_field:
errors.append({
'type': "invalid tile configuration",
'module': self.get_module_info(),
'reason': _('A case property must be assigned to the "{}" tile field.'.format(field))
})
return errors
def export_json(self, dump_json=True, keep_unique_id=False):
source = self.to_json()
if not keep_unique_id:
for form in source['forms']:
del form['unique_id']
return json.dumps(source) if dump_json else source
def export_jvalue(self):
return self.export_json(dump_json=False, keep_unique_id=True)
def requires(self):
r = set(["none"])
for form in self.get_forms():
r.add(form.requires)
if self.case_list.show:
r.add('case')
if self.referral_list.show:
r.add('referral')
for val in ("referral", "case", "none"):
if val in r:
return val
def detail_types(self):
return {
"referral": ["case_short", "case_long", "ref_short", "ref_long"],
"case": ["case_short", "case_long"],
"none": []
}[self.requires()]
def requires_case_details(self):
ret = False
if self.case_list.show:
return True
for form in self.get_forms():
if form.requires_case():
ret = True
break
return ret
@memoized
def all_forms_require_a_case(self):
return all([form.requires == 'case' for form in self.get_forms()])
def get_case_errors(self, needs_case_type, needs_case_detail, needs_referral_detail=False):
module_info = self.get_module_info()
if needs_case_type and not self.case_type:
yield {
'type': 'no case type',
'module': module_info,
}
if needs_case_detail:
if not self.case_details.short.columns:
yield {
'type': 'no case detail',
'module': module_info,
}
columns = self.case_details.short.columns + self.case_details.long.columns
errors = self.validate_detail_columns(columns)
for error in errors:
yield error
if needs_referral_detail and not self.ref_details.short.columns:
yield {
'type': 'no ref detail',
'module': module_info,
}
class AdvancedForm(IndexedFormBase, NavMenuItemMediaMixin):
form_type = 'advanced_form'
form_filter = StringProperty()
actions = SchemaProperty(AdvancedFormActions)
schedule = SchemaProperty(FormSchedule, default=None)
@classmethod
def wrap(cls, data):
# lazy migration to swap keys with values in action preload dict.
# http://manage.dimagi.com/default.asp?162213
load_actions = data.get('actions', {}).get('load_update_cases', [])
for action in load_actions:
preload = action['preload']
if preload and preload.values()[0].startswith('/'):
action['preload'] = {v: k for k, v in preload.items()}
return super(AdvancedForm, cls).wrap(data)
def add_stuff_to_xform(self, xform):
super(AdvancedForm, self).add_stuff_to_xform(xform)
xform.add_case_and_meta_advanced(self)
def requires_case(self):
return bool(self.actions.load_update_cases)
@property
def requires(self):
return 'case' if self.requires_case() else 'none'
def is_registration_form(self, case_type=None):
"""
Defined as form that opens a single case. Excludes forms that register
sub-cases and forms that require a case.
"""
reg_actions = self.get_registration_actions(case_type)
return not self.requires_case() and reg_actions and \
len(reg_actions) == 1
def get_registration_actions(self, case_type=None):
return [
action for action in self.actions.get_open_actions()
if not action.is_subcase and (not case_type or action.case_type == case_type)
]
def all_other_forms_require_a_case(self):
m = self.get_module()
return all([form.requires == 'case' for form in m.get_forms() if form.id != self.id])
def check_actions(self):
errors = []
for action in self.actions.get_subcase_actions():
if action.parent_tag not in self.actions.get_case_tags():
errors.append({'type': 'missing parent tag', 'case_tag': action.parent_tag})
if isinstance(action, AdvancedOpenCaseAction):
if not action.name_path:
errors.append({'type': 'case_name required', 'case_tag': action.case_tag})
meta = self.actions.actions_meta_by_tag.get(action.parent_tag)
if meta and meta['type'] == 'open' and meta['action'].repeat_context:
if not action.repeat_context or not action.repeat_context.startswith(meta['action'].repeat_context):
errors.append({'type': 'subcase repeat context', 'case_tag': action.case_tag})
try:
self.actions.get_action_hierarchy(action)
except ValueError:
errors.append({'type': 'circular ref', 'case_tag': action.case_tag})
errors.extend(self.check_case_properties(
subcase_names=action.get_property_names(),
case_tag=action.case_tag
))
for action in self.actions.get_all_actions():
if not action.case_type and (not isinstance(action, LoadUpdateAction) or not action.auto_select):
errors.append({'type': "no case type in action", 'case_tag': action.case_tag})
if isinstance(action, LoadUpdateAction) and action.auto_select:
mode = action.auto_select.mode
if not action.auto_select.value_key:
key_names = {
AUTO_SELECT_CASE: _('Case property'),
AUTO_SELECT_FIXTURE: _('Lookup Table field'),
AUTO_SELECT_USER: _('custom user property'),
AUTO_SELECT_RAW: _('custom XPath expression'),
}
if mode in key_names:
errors.append({'type': 'auto select key', 'key_name': key_names[mode]})
if not action.auto_select.value_source:
source_names = {
AUTO_SELECT_CASE: _('Case tag'),
AUTO_SELECT_FIXTURE: _('Lookup Table tag'),
}
if mode in source_names:
errors.append({'type': 'auto select source', 'source_name': source_names[mode]})
elif mode == AUTO_SELECT_CASE:
case_tag = action.auto_select.value_source
if not self.actions.get_action_from_tag(case_tag):
errors.append({'type': 'auto select case ref', 'case_tag': action.case_tag})
errors.extend(self.check_case_properties(
all_names=action.get_property_names(),
case_tag=action.case_tag
))
if self.form_filter:
if not any(action for action in self.actions.load_update_cases if not action.auto_select):
errors.append({'type': "filtering without case"})
def generate_paths():
for action in self.actions.get_all_actions():
for path in action.get_paths():
yield path
errors.extend(self.check_paths(generate_paths()))
return errors
def extended_build_validation(self, error_meta, xml_valid, validate_module=True):
errors = []
if xml_valid:
for error in self.check_actions():
error.update(error_meta)
errors.append(error)
module = self.get_module()
if module.has_schedule and not (self.schedule and self.schedule.anchor):
error = {
'type': 'validation error',
'validation_message': _("All forms in this module require a visit schedule.")
}
error.update(error_meta)
errors.append(error)
if validate_module:
errors.extend(module.get_case_errors(
needs_case_type=False,
needs_case_detail=module.requires_case_details(),
needs_referral_detail=False,
))
return errors
def get_case_updates(self, case_type):
updates = set()
format_key = self.get_case_property_name_formatter()
for action in self.actions.get_all_actions():
if action.case_type == case_type:
updates.update(format_key(*item)
for item in action.case_properties.iteritems())
return updates
@memoized
def get_parent_types_and_contributed_properties(self, module_case_type, case_type):
parent_types = set()
case_properties = set()
for subcase in self.actions.get_subcase_actions():
if subcase.case_type == case_type:
case_properties.update(
subcase.case_properties.keys()
)
parent = self.actions.get_action_from_tag(subcase.parent_tag)
if parent:
parent_types.add((parent.case_type, subcase.parent_reference_id or 'parent'))
return parent_types, case_properties
def update_app_case_meta(self, app_case_meta):
from corehq.apps.reports.formdetails.readable import FormQuestionResponse
questions = {
q['value']: FormQuestionResponse(q)
for q in self.get_questions(self.get_app().langs, include_translations=True)
}
for action in self.actions.load_update_cases:
for name, question_path in action.case_properties.items():
self.add_property_save(
app_case_meta,
action.case_type,
name,
questions,
question_path
)
for question_path, name in action.preload.items():
self.add_property_load(
app_case_meta,
action.case_type,
name,
questions,
question_path
)
if action.close_condition.is_active():
meta = app_case_meta.get_type(action.case_type)
meta.add_closer(self.unique_id, action.close_condition)
for action in self.actions.open_cases:
self.add_property_save(
app_case_meta,
action.case_type,
'name',
questions,
action.name_path,
action.open_condition
)
for name, question_path in action.case_properties.items():
self.add_property_save(
app_case_meta,
action.case_type,
name,
questions,
question_path,
action.open_condition
)
meta = app_case_meta.get_type(action.case_type)
meta.add_opener(self.unique_id, action.open_condition)
if action.close_condition.is_active():
meta.add_closer(self.unique_id, action.close_condition)
class AdvancedModule(ModuleBase):
module_type = 'advanced'
case_label = DictProperty()
forms = SchemaListProperty(AdvancedForm)
case_details = SchemaProperty(DetailPair)
product_details = SchemaProperty(DetailPair)
put_in_root = BooleanProperty(default=False)
case_list = SchemaProperty(CaseList)
has_schedule = BooleanProperty()
root_module_id = StringProperty()
@classmethod
def new_module(cls, name, lang):
detail = Detail(
columns=[DetailColumn(
format='plain',
header={(lang or 'en'): ugettext("Name")},
field='name',
model='case',
)]
)
module = AdvancedModule(
name={(lang or 'en'): name or ugettext("Untitled Module")},
forms=[],
case_type='',
case_details=DetailPair(
short=Detail(detail.to_json()),
long=Detail(detail.to_json()),
),
product_details=DetailPair(
short=Detail(
columns=[
DetailColumn(
format='plain',
header={(lang or 'en'): ugettext("Product")},
field='name',
model='product',
),
],
),
long=Detail(),
),
)
module.get_or_create_unique_id()
return module
def new_form(self, name, lang, attachment=''):
form = AdvancedForm(
name={lang if lang else "en": name if name else _("Untitled Form")},
)
if self.has_schedule:
form.schedule = FormSchedule()
self.forms.append(form)
form = self.get_form(-1)
form.source = attachment
return form
def add_insert_form(self, from_module, form, index=None, with_source=False):
if isinstance(form, AdvancedForm):
new_form = form
elif isinstance(form, Form):
new_form = AdvancedForm(
name=form.name,
form_filter=form.form_filter,
media_image=form.media_image,
media_audio=form.media_audio
)
new_form._parent = self
form._parent = self
if with_source:
new_form.source = form.source
actions = form.active_actions()
open = actions.get('open_case', None)
update = actions.get('update_case', None)
close = actions.get('close_case', None)
preload = actions.get('case_preload', None)
subcases = actions.get('subcases', None)
case_type = from_module.case_type
base_action = None
if open:
base_action = AdvancedOpenCaseAction(
case_type=case_type,
case_tag='open_{0}_0'.format(case_type),
name_path=open.name_path,
open_condition=open.condition,
case_properties=update.update if update else {},
)
new_form.actions.open_cases.append(base_action)
elif update or preload or close:
base_action = LoadUpdateAction(
case_type=case_type,
case_tag='load_{0}_0'.format(case_type),
case_properties=update.update if update else {},
preload=preload.preload if preload else {}
)
if from_module.parent_select.active:
app = self.get_app()
gen = suite_xml.SuiteGenerator(app, is_usercase_in_use(app.domain))
select_chain = gen.get_select_chain(from_module, include_self=False)
for n, link in enumerate(reversed(list(enumerate(select_chain)))):
i, module = link
new_form.actions.load_update_cases.append(LoadUpdateAction(
case_type=module.case_type,
case_tag='_'.join(['parent'] * (i + 1)),
details_module=module.unique_id,
parent_tag='_'.join(['parent'] * (i + 2)) if n > 0 else ''
))
base_action.parent_tag = 'parent'
if close:
base_action.close_condition = close.condition
new_form.actions.load_update_cases.append(base_action)
if subcases:
for i, subcase in enumerate(subcases):
open_subcase_action = AdvancedOpenCaseAction(
case_type=subcase.case_type,
case_tag='open_{0}_{1}'.format(subcase.case_type, i+1),
name_path=subcase.case_name,
open_condition=subcase.condition,
case_properties=subcase.case_properties,
repeat_context=subcase.repeat_context,
parent_reference_id=subcase.reference_id,
parent_tag=base_action.case_tag if base_action else ''
)
new_form.actions.open_cases.append(open_subcase_action)
else:
raise IncompatibleFormTypeException()
if index:
self.forms.insert(index, new_form)
else:
self.forms.append(new_form)
return self.get_form(index or -1)
def rename_lang(self, old_lang, new_lang):
super(AdvancedModule, self).rename_lang(old_lang, new_lang)
self.case_list.rename_lang(old_lang, new_lang)
@property
def root_module(self):
if self.root_module_id:
return self._parent.get_module_by_unique_id(self.root_module_id)
def requires_case_details(self):
if self.case_list.show:
return True
for form in self.forms:
if any(action.case_type == self.case_type for action in form.actions.load_update_cases):
return True
def all_forms_require_a_case(self):
return all(form.requires_case() for form in self.forms)
def get_details(self):
return (
('case_short', self.case_details.short, True),
('case_long', self.case_details.long, True),
('product_short', self.product_details.short, self.get_app().commtrack_enabled),
('product_long', self.product_details.long, False),
)
def get_case_errors(self, needs_case_type, needs_case_detail, needs_referral_detail=False):
module_info = self.get_module_info()
if needs_case_type and not self.case_type:
yield {
'type': 'no case type',
'module': module_info,
}
if needs_case_detail:
if not self.case_details.short.columns:
yield {
'type': 'no case detail',
'module': module_info,
}
if self.get_app().commtrack_enabled and not self.product_details.short.columns:
for form in self.forms:
if self.case_list.show or \
any(action.show_product_stock for action in form.actions.load_update_cases):
yield {
'type': 'no product detail',
'module': module_info,
}
break
columns = self.case_details.short.columns + self.case_details.long.columns
if self.get_app().commtrack_enabled:
columns += self.product_details.short.columns
errors = self.validate_detail_columns(columns)
for error in errors:
yield error
def validate_for_build(self):
errors = super(AdvancedModule, self).validate_for_build()
if not self.forms and not self.case_list.show:
errors.append({
'type': 'no forms or case list',
'module': self.get_module_info(),
})
if self.case_list_form.form_id:
forms = self.forms
case_tag = None
for form in forms:
info = self.get_module_info()
form_info = {"id": form.id if hasattr(form, 'id') else None, "name": form.name}
if not form.requires_case():
errors.append({
'type': 'case list module form must require case',
'module': info,
'form': form_info,
})
elif len(form.actions.load_update_cases) != 1:
errors.append({
'type': 'case list module form must require only one case',
'module': info,
'form': form_info,
})
case_action = form.actions.load_update_cases[0] if form.requires_case() else None
if case_action and case_action.case_type != self.case_type:
errors.append({
'type': 'case list module form must match module case type',
'module': info,
'form': form_info,
})
# set case_tag if not already set
case_tag = case_action.case_tag if not case_tag and case_action else case_tag
if case_action and case_action.case_tag != case_tag:
errors.append({
'type': 'all forms in case list module must have same case management',
'module': info,
'form': form_info,
'expected_tag': case_tag
})
if case_action and case_action.details_module and case_action.details_module != self.unique_id:
errors.append({
'type': 'forms in case list module must use modules details',
'module': info,
'form': form_info,
})
return errors
class CareplanForm(IndexedFormBase, NavMenuItemMediaMixin):
form_type = 'careplan_form'
mode = StringProperty(required=True, choices=['create', 'update'])
custom_case_updates = DictProperty()
case_preload = DictProperty()
@classmethod
def wrap(cls, data):
if cls is CareplanForm:
doc_type = data['doc_type']
if doc_type == 'CareplanGoalForm':
return CareplanGoalForm.wrap(data)
elif doc_type == 'CareplanTaskForm':
return CareplanTaskForm.wrap(data)
else:
raise ValueError('Unexpected doc_type for CareplanForm', doc_type)
else:
return super(CareplanForm, cls).wrap(data)
def add_stuff_to_xform(self, xform):
super(CareplanForm, self).add_stuff_to_xform(xform)
xform.add_care_plan(self)
def get_case_updates(self, case_type):
if case_type == self.case_type:
format_key = self.get_case_property_name_formatter()
return [format_key(*item) for item in self.case_updates().iteritems()]
else:
return []
def get_case_type(self):
return self.case_type
def get_parent_case_type(self):
return self._parent.case_type
def get_parent_types_and_contributed_properties(self, module_case_type, case_type):
parent_types = set()
case_properties = set()
if case_type == self.case_type:
if case_type == CAREPLAN_GOAL:
parent_types.add((module_case_type, 'parent'))
elif case_type == CAREPLAN_TASK:
parent_types.add((CAREPLAN_GOAL, 'goal'))
case_properties.update(self.case_updates().keys())
return parent_types, case_properties
def is_registration_form(self, case_type=None):
return self.mode == 'create' and (not case_type or self.case_type == case_type)
def update_app_case_meta(self, app_case_meta):
from corehq.apps.reports.formdetails.readable import FormQuestionResponse
questions = {
q['value']: FormQuestionResponse(q)
for q in self.get_questions(self.get_app().langs, include_translations=True)
}
meta = app_case_meta.get_type(self.case_type)
for name, question_path in self.case_updates().items():
self.add_property_save(
app_case_meta,
self.case_type,
name,
questions,
question_path
)
for name, question_path in self.case_preload.items():
self.add_property_load(
app_case_meta,
self.case_type,
name,
questions,
question_path
)
meta.add_opener(self.unique_id, FormActionCondition(
type='always',
))
meta.add_closer(self.unique_id, FormActionCondition(
type='if',
question=self.close_path,
answer='yes',
))
class CareplanGoalForm(CareplanForm):
case_type = CAREPLAN_GOAL
name_path = StringProperty(required=True, default='/data/name')
date_followup_path = StringProperty(required=True, default='/data/date_followup')
description_path = StringProperty(required=True, default='/data/description')
close_path = StringProperty(required=True, default='/data/close_goal')
@classmethod
def new_form(cls, lang, name, mode):
action = 'Update' if mode == 'update' else 'New'
form = CareplanGoalForm(mode=mode)
name = name or '%s Careplan %s' % (action, CAREPLAN_CASE_NAMES[form.case_type])
form.name = {lang: name}
if mode == 'update':
form.description_path = '/data/description_group/description'
source = load_form_template('%s_%s.xml' % (form.case_type, mode))
return form, source
def case_updates(self):
changes = self.custom_case_updates.copy()
changes.update({
'date_followup': self.date_followup_path,
'description': self.description_path,
})
return changes
def get_fixed_questions(self):
def q(name, case_key, label):
return {
'name': name,
'key': case_key,
'label': label,
'path': self[name]
}
questions = [
q('description_path', 'description', _('Description')),
q('date_followup_path', 'date_followup', _('Followup date')),
]
if self.mode == 'create':
return [q('name_path', 'name', _('Name'))] + questions
else:
return questions + [q('close_path', 'close', _('Close if'))]
class CareplanTaskForm(CareplanForm):
case_type = CAREPLAN_TASK
name_path = StringProperty(required=True, default='/data/task_repeat/name')
date_followup_path = StringProperty(required=True, default='/data/date_followup')
description_path = StringProperty(required=True, default='/data/description')
latest_report_path = StringProperty(required=True, default='/data/progress_group/progress_update')
close_path = StringProperty(required=True, default='/data/task_complete')
@classmethod
def new_form(cls, lang, name, mode):
action = 'Update' if mode == 'update' else 'New'
form = CareplanTaskForm(mode=mode)
name = name or '%s Careplan %s' % (action, CAREPLAN_CASE_NAMES[form.case_type])
form.name = {lang: name}
if mode == 'create':
form.date_followup_path = '/data/task_repeat/date_followup'
form.description_path = '/data/task_repeat/description'
source = load_form_template('%s_%s.xml' % (form.case_type, mode))
return form, source
def case_updates(self):
changes = self.custom_case_updates.copy()
changes.update({
'date_followup': self.date_followup_path,
})
if self.mode == 'create':
changes['description'] = self.description_path
else:
changes['latest_report'] = self.latest_report_path
return changes
def get_fixed_questions(self):
def q(name, case_key, label):
return {
'name': name,
'key': case_key,
'label': label,
'path': self[name]
}
questions = [
q('date_followup_path', 'date_followup', _('Followup date')),
]
if self.mode == 'create':
return [
q('name_path', 'name', _('Name')),
q('description_path', 'description', _('Description')),
] + questions
else:
return questions + [
q('latest_report_path', 'latest_report', _('Latest report')),
q('close_path', 'close', _('Close if')),
]
class CareplanModule(ModuleBase):
"""
A set of forms and configuration for managing the Care Plan workflow.
"""
module_type = 'careplan'
parent_select = SchemaProperty(ParentSelect)
display_separately = BooleanProperty(default=False)
forms = SchemaListProperty(CareplanForm)
goal_details = SchemaProperty(DetailPair)
task_details = SchemaProperty(DetailPair)
@classmethod
def new_module(cls, name, lang, target_module_id, target_case_type):
lang = lang or 'en'
module = CareplanModule(
name={lang: name or ugettext("Care Plan")},
parent_select=ParentSelect(
active=True,
relationship='parent',
module_id=target_module_id
),
case_type=target_case_type,
goal_details=DetailPair(
short=cls._get_detail(lang, 'goal_short'),
long=cls._get_detail(lang, 'goal_long'),
),
task_details=DetailPair(
short=cls._get_detail(lang, 'task_short'),
long=cls._get_detail(lang, 'task_long'),
)
)
module.get_or_create_unique_id()
return module
@classmethod
def _get_detail(cls, lang, detail_type):
header = ugettext('Goal') if detail_type.startswith('goal') else ugettext('Task')
columns = [
DetailColumn(
format='plain',
header={lang: header},
field='name',
model='case'),
DetailColumn(
format='date',
header={lang: ugettext("Followup")},
field='date_followup',
model='case')]
if detail_type.endswith('long'):
columns.append(DetailColumn(
format='plain',
header={lang: ugettext("Description")},
field='description',
model='case'))
if detail_type == 'tasks_long':
columns.append(DetailColumn(
format='plain',
header={lang: ugettext("Last update")},
field='latest_report',
model='case'))
return Detail(type=detail_type, columns=columns)
def add_insert_form(self, from_module, form, index=None, with_source=False):
if isinstance(form, CareplanForm):
if index:
self.forms.insert(index, form)
else:
self.forms.append(form)
return self.get_form(index or -1)
else:
raise IncompatibleFormTypeException()
def requires_case_details(self):
return True
def get_case_types(self):
return set([self.case_type]) | set(f.case_type for f in self.forms)
def get_form_by_type(self, case_type, mode):
for form in self.get_forms():
if form.case_type == case_type and form.mode == mode:
return form
def get_details(self):
return (
('%s_short' % CAREPLAN_GOAL, self.goal_details.short, True),
('%s_long' % CAREPLAN_GOAL, self.goal_details.long, True),
('%s_short' % CAREPLAN_TASK, self.task_details.short, True),
('%s_long' % CAREPLAN_TASK, self.task_details.long, True),
)
def get_case_errors(self, needs_case_type, needs_case_detail, needs_referral_detail=False):
module_info = self.get_module_info()
if needs_case_type and not self.case_type:
yield {
'type': 'no case type',
'module': module_info,
}
if needs_case_detail:
if not self.goal_details.short.columns:
yield {
'type': 'no case detail for goals',
'module': module_info,
}
if not self.task_details.short.columns:
yield {
'type': 'no case detail for tasks',
'module': module_info,
}
columns = self.goal_details.short.columns + self.goal_details.long.columns
columns += self.task_details.short.columns + self.task_details.long.columns
errors = self.validate_detail_columns(columns)
for error in errors:
yield error
def validate_for_build(self):
errors = super(CareplanModule, self).validate_for_build()
if not self.forms:
errors.append({
'type': 'no forms',
'module': self.get_module_info(),
})
return errors
class ReportAppConfig(DocumentSchema):
"""
Class for configuring how a user configurable report shows up in an app
"""
report_id = StringProperty(required=True)
header = DictProperty()
_report = None
@property
def report(self):
from corehq.apps.userreports.models import ReportConfiguration
if self._report is None:
self._report = ReportConfiguration.get(self.report_id)
return self._report
@property
def select_detail_id(self):
return 'reports.{}.select'.format(self.report_id)
@property
def summary_detail_id(self):
return 'reports.{}.summary'.format(self.report_id)
@property
def data_detail_id(self):
return 'reports.{}.data'.format(self.report_id)
def get_details(self):
yield (self.select_detail_id, self.select_details(), True)
yield (self.summary_detail_id, self.summary_details(), True)
yield (self.data_detail_id, self.data_details(), True)
def select_details(self):
return Detail(custom_xml=suite_xml.Detail(
id='reports.{}.select'.format(self.report_id),
title=suite_xml.Text(
locale=suite_xml.Locale(id=id_strings.report_menu()),
),
fields=[
suite_xml.Field(
header=suite_xml.Header(
text=suite_xml.Text(
locale=suite_xml.Locale(id=id_strings.report_name_header()),
)
),
template=suite_xml.Template(
text=suite_xml.Text(
xpath=suite_xml.Xpath(function='name'))
),
)
]
).serialize())
def summary_details(self):
def _get_graph_fields():
from corehq.apps.userreports.reports.specs import MultibarChartSpec
# todo: make this less hard-coded
for chart_config in self.report.charts:
if isinstance(chart_config, MultibarChartSpec):
def _column_to_series(column):
return suite_xml.Series(
nodeset="instance('reports')/reports/report[@id='{}']/rows/row".format(self.report_id),
x_function="column[@id='{}']".format(chart_config.x_axis_column),
y_function="column[@id='{}']".format(column),
)
yield suite_xml.Field(
header=suite_xml.Header(text=suite_xml.Text()),
template=suite_xml.GraphTemplate(
form='graph',
graph=suite_xml.Graph(
type='bar',
series=[_column_to_series(c) for c in chart_config.y_axis_columns],
)
)
)
return Detail(custom_xml=suite_xml.Detail(
id='reports.{}.summary'.format(self.report_id),
title=suite_xml.Text(
locale=suite_xml.Locale(id=id_strings.report_menu()),
),
fields=[
suite_xml.Field(
header=suite_xml.Header(
text=suite_xml.Text(
locale=suite_xml.Locale(id=id_strings.report_name_header()),
)
),
template=suite_xml.Template(
text=suite_xml.Text(
xpath=suite_xml.Xpath(function='name'))
),
),
suite_xml.Field(
header=suite_xml.Header(
text=suite_xml.Text(
locale=suite_xml.Locale(id=id_strings.report_description_header()),
)
),
template=suite_xml.Template(
text=suite_xml.Text(
xpath=suite_xml.Xpath(function='description'))
),
),
] + list(_get_graph_fields())
).serialize())
def data_details(self):
def _column_to_field(column):
return suite_xml.Field(
header=suite_xml.Header(
text=suite_xml.Text(
locale=suite_xml.Locale(
id=id_strings.report_column_header(self.report_id, column.column_id)
),
)
),
template=suite_xml.Template(
text=suite_xml.Text(
xpath=suite_xml.Xpath(function="column[@id='{}']".format(column.column_id)))
),
)
return Detail(custom_xml=suite_xml.Detail(
id='reports.{}.data'.format(self.report_id),
title=suite_xml.Text(
locale=suite_xml.Locale(id=id_strings.report_name(self.report_id)),
),
fields=[_column_to_field(c) for c in self.report.report_columns]
).serialize())
def get_entry(self):
return suite_xml.Entry(
form='fixmeclayton',
command=suite_xml.Command(
id='reports.{}'.format(self.report_id),
text=suite_xml.Text(
locale=suite_xml.Locale(id=id_strings.report_name(self.report_id)),
),
),
datums=[
suite_xml.SessionDatum(
detail_confirm=self.summary_detail_id,
detail_select=self.select_detail_id,
id='report_id_{}'.format(self.report_id),
nodeset="instance('reports')/reports/report[@id='{}']".format(self.report_id),
value='./@id',
),
# you are required to select something - even if you don't use it
suite_xml.SessionDatum(
detail_select=self.data_detail_id,
id='throwaway_{}'.format(self.report_id),
nodeset="instance('reports')/reports/report[@id='{}']/rows/row".format(self.report_id),
value="''",
)
]
)
class ReportModule(ModuleBase):
"""
Module for user configurable reports
"""
module_type = 'report'
report_configs = SchemaListProperty(ReportAppConfig)
forms = []
_loaded = False
@property
@memoized
def reports(self):
from corehq.apps.userreports.models import ReportConfiguration
return [
ReportConfiguration.wrap(doc) for doc in
get_docs(ReportConfiguration.get_db(), [r.report_id for r in self.report_configs])
]
@classmethod
def new_module(cls, name, lang):
module = ReportModule(
name={(lang or 'en'): name or ugettext("Reports")},
case_type='',
)
module.get_or_create_unique_id()
return module
def _load_reports(self):
if not self._loaded:
# load reports in bulk to avoid hitting the database for each one
for i, report in enumerate(self.reports):
self.report_configs[i]._report = report
self._loaded = True
def get_details(self):
self._load_reports()
for config in self.report_configs:
for details in config.get_details():
yield details
def get_custom_entries(self):
self._load_reports()
for config in self.report_configs:
yield config.get_entry()
def get_menus(self):
yield suite_xml.Menu(
id=id_strings.menu_id(self),
text=suite_xml.Text(
locale=suite_xml.Locale(id=id_strings.module_locale(self))
),
commands=[
suite_xml.Command(id=id_strings.report_command(config.report_id))
for config in self.report_configs
]
)
def uses_media(self):
# for now no media support for ReportModules
return False
class VersionedDoc(LazyAttachmentDoc):
"""
A document that keeps an auto-incrementing version number, knows how to make copies of itself,
delete a copy of itself, and revert back to an earlier copy of itself.
"""
domain = StringProperty()
copy_of = StringProperty()
version = IntegerProperty()
short_url = StringProperty()
short_odk_url = StringProperty()
short_odk_media_url = StringProperty()
_meta_fields = ['_id', '_rev', 'domain', 'copy_of', 'version', 'short_url', 'short_odk_url', 'short_odk_media_url']
@property
def id(self):
return self._id
def save(self, response_json=None, increment_version=None, **params):
if increment_version is None:
increment_version = not self.copy_of
if increment_version:
self.version = self.version + 1 if self.version else 1
super(VersionedDoc, self).save(**params)
if response_json is not None:
if 'update' not in response_json:
response_json['update'] = {}
response_json['update']['app-version'] = self.version
def make_build(self):
assert self.get_id
assert self.copy_of is None
cls = self.__class__
copies = cls.view('app_manager/applications', key=[self.domain, self._id, self.version], include_docs=True, limit=1).all()
if copies:
copy = copies[0]
else:
copy = deepcopy(self.to_json())
bad_keys = ('_id', '_rev', '_attachments',
'short_url', 'short_odk_url', 'short_odk_media_url', 'recipients')
for bad_key in bad_keys:
if bad_key in copy:
del copy[bad_key]
copy = cls.wrap(copy)
copy['copy_of'] = self._id
copy.copy_attachments(self)
return copy
def copy_attachments(self, other, regexp=ATTACHMENT_REGEX):
for name in other.lazy_list_attachments() or {}:
if regexp is None or re.match(regexp, name):
self.lazy_put_attachment(other.lazy_fetch_attachment(name), name)
def make_reversion_to_copy(self, copy):
"""
Replaces couch doc with a copy of the backup ("copy").
Returns the another Application/RemoteApp referring to this
updated couch doc. The returned doc should be used in place of
the original doc, i.e. should be called as follows:
app = app.make_reversion_to_copy(copy)
app.save()
"""
if copy.copy_of != self._id:
raise VersioningError("%s is not a copy of %s" % (copy, self))
app = deepcopy(copy.to_json())
app['_rev'] = self._rev
app['_id'] = self._id
app['version'] = self.version
app['copy_of'] = None
if '_attachments' in app:
del app['_attachments']
cls = self.__class__
app = cls.wrap(app)
app.copy_attachments(copy)
return app
def delete_copy(self, copy):
if copy.copy_of != self._id:
raise VersioningError("%s is not a copy of %s" % (copy, self))
copy.delete_app()
copy.save(increment_version=False)
def scrub_source(self, source):
"""
To be overridden.
Use this to scrub out anything
that should be shown in the
application source, such as ids, etc.
"""
raise NotImplemented()
def export_json(self, dump_json=True):
source = deepcopy(self.to_json())
for field in self._meta_fields:
if field in source:
del source[field]
_attachments = {}
for name in source.get('_attachments', {}):
if re.match(ATTACHMENT_REGEX, name):
_attachments[name] = self.fetch_attachment(name)
source['_attachments'] = _attachments
self.scrub_source(source)
return json.dumps(source) if dump_json else source
@classmethod
def from_source(cls, source, domain):
for field in cls._meta_fields:
if field in source:
del source[field]
source['domain'] = domain
app = cls.wrap(source)
return app
def is_deleted(self):
return self.doc_type.endswith(DELETED_SUFFIX)
def unretire(self):
self.doc_type = self.get_doc_type()
self.save()
def get_doc_type(self):
if self.doc_type.endswith(DELETED_SUFFIX):
return self.doc_type[:-len(DELETED_SUFFIX)]
else:
return self.doc_type
def absolute_url_property(method):
"""
Helper for the various fully qualified application URLs
Turns a method returning an unqualified URL
into a property returning a fully qualified URL
(e.g., '/my_url/' => 'https://www.commcarehq.org/my_url/')
Expects `self.url_base` to be fully qualified url base
"""
@wraps(method)
def _inner(self):
return "%s%s" % (self.url_base, method(self))
return property(_inner)
class ApplicationBase(VersionedDoc, SnapshotMixin,
CommCareFeatureSupportMixin):
"""
Abstract base class for Application and RemoteApp.
Contains methods for generating the various files and zipping them into CommCare.jar
"""
recipients = StringProperty(default="")
# this is the supported way of specifying which commcare build to use
build_spec = SchemaProperty(BuildSpec)
platform = StringProperty(
choices=["nokia/s40", "nokia/s60", "winmo", "generic"],
default="nokia/s40"
)
text_input = StringProperty(
choices=['roman', 'native', 'custom-keys', 'qwerty'],
default="roman"
)
success_message = DictProperty()
# The following properties should only appear on saved builds
# built_with stores a record of CommCare build used in a saved app
built_with = SchemaProperty(BuildRecord)
build_signed = BooleanProperty(default=True)
built_on = DateTimeProperty(required=False)
build_comment = StringProperty()
comment_from = StringProperty()
build_broken = BooleanProperty(default=False)
# not used yet, but nice for tagging/debugging
# currently only canonical value is 'incomplete-build',
# for when build resources aren't found where they should be
build_broken_reason = StringProperty()
# watch out for a past bug:
# when reverting to a build that happens to be released
# that got copied into into the new app doc, and when new releases were made,
# they were automatically starred
# AFAIK this is fixed in code, but my rear its ugly head in an as-yet-not-understood
# way for apps that already had this problem. Just keep an eye out
is_released = BooleanProperty(default=False)
# django-style salted hash of the admin password
admin_password = StringProperty()
# a=Alphanumeric, n=Numeric, x=Neither (not allowed)
admin_password_charset = StringProperty(choices=['a', 'n', 'x'], default='n')
# This is here instead of in Application because it needs to be available in stub representation
application_version = StringProperty(default=APP_V2, choices=[APP_V1, APP_V2], required=False)
langs = StringListProperty()
# only the languages that go in the build
build_langs = StringListProperty()
secure_submissions = BooleanProperty(default=False)
# metadata for data platform
amplifies_workers = StringProperty(
choices=['yes', 'no', 'not_set'],
default='not_set'
)
amplifies_project = StringProperty(
choices=['yes', 'no', 'not_set'],
default='not_set'
)
# exchange properties
cached_properties = DictProperty()
description = StringProperty()
deployment_date = DateTimeProperty()
phone_model = StringProperty()
user_type = StringProperty()
attribution_notes = StringProperty()
# always false for RemoteApp
case_sharing = BooleanProperty(default=False)
@classmethod
def wrap(cls, data):
# scrape for old conventions and get rid of them
if 'commcare_build' in data:
version, build_number = data['commcare_build'].split('/')
data['build_spec'] = BuildSpec.from_string("%s/latest" % version).to_json()
del data['commcare_build']
if 'commcare_tag' in data:
version, build_number = current_builds.TAG_MAP[data['commcare_tag']]
data['build_spec'] = BuildSpec.from_string("%s/latest" % version).to_json()
del data['commcare_tag']
if data.has_key("built_with") and isinstance(data['built_with'], basestring):
data['built_with'] = BuildSpec.from_string(data['built_with']).to_json()
if 'native_input' in data:
if 'text_input' not in data:
data['text_input'] = 'native' if data['native_input'] else 'roman'
del data['native_input']
should_save = False
if data.has_key('original_doc'):
data['copy_history'] = [data.pop('original_doc')]
should_save = True
data["description"] = data.get('description') or data.get('short_description')
self = super(ApplicationBase, cls).wrap(data)
if not self.build_spec or self.build_spec.is_null():
self.build_spec = get_default_build_spec(self.application_version)
if should_save:
self.save()
return self
@classmethod
def get_latest_build(cls, domain, app_id):
build = cls.view('app_manager/saved_app',
startkey=[domain, app_id, {}],
endkey=[domain, app_id],
descending=True,
limit=1).one()
return build if build else None
def rename_lang(self, old_lang, new_lang):
validate_lang(new_lang)
def is_remote_app(self):
return False
def get_latest_app(self, released_only=True):
if released_only:
return get_app(self.domain, self.get_id, latest=True)
else:
return self.view('app_manager/applications',
startkey=[self.domain, self.get_id, {}],
endkey=[self.domain, self.get_id],
include_docs=True,
limit=1,
descending=True,
).first()
def get_latest_saved(self):
"""
This looks really similar to get_latest_app, not sure why tim added
"""
if not hasattr(self, '_latest_saved'):
released = self.__class__.view('app_manager/applications',
startkey=['^ReleasedApplications', self.domain, self._id, {}],
endkey=['^ReleasedApplications', self.domain, self._id],
limit=1,
descending=True,
include_docs=True
)
if len(released) > 0:
self._latest_saved = released.all()[0]
else:
saved = self.__class__.view('app_manager/saved_app',
startkey=[self.domain, self._id, {}],
endkey=[self.domain, self._id],
descending=True,
limit=1,
include_docs=True
)
if len(saved) > 0:
self._latest_saved = saved.all()[0]
else:
self._latest_saved = None # do not return this app!
return self._latest_saved
def set_admin_password(self, raw_password):
salt = os.urandom(5).encode('hex')
self.admin_password = make_password(raw_password, salt=salt)
if raw_password.isnumeric():
self.admin_password_charset = 'n'
elif raw_password.isalnum():
self.admin_password_charset = 'a'
else:
self.admin_password_charset = 'x'
def check_password_charset(self):
errors = []
if hasattr(self, 'profile'):
password_format = self.profile.get('properties', {}).get('password_format', 'n')
message = ('Your app requires {0} passwords '
'but the admin password is not {0}')
if password_format == 'n' and self.admin_password_charset in 'ax':
errors.append({'type': 'password_format',
'message': message.format('numeric')})
if password_format == 'a' and self.admin_password_charset in 'x':
errors.append({'type': 'password_format',
'message': message.format('alphanumeric')})
return errors
def get_build(self):
return self.build_spec.get_build()
@property
def build_version(self):
# `LooseVersion`s are smart!
# LooseVersion('2.12.0') > '2.2'
# (even though '2.12.0' < '2.2')
if self.build_spec.version:
return LooseVersion(self.build_spec.version)
def get_preview_build(self):
preview = self.get_build()
for path in getattr(preview, '_attachments', {}):
if path.startswith('Generic/WebDemo'):
return preview
return CommCareBuildConfig.fetch().preview.get_build()
@property
def commcare_minor_release(self):
"""This is mostly just for views"""
return '%d.%d' % self.build_spec.minor_release()
def get_build_label(self):
for item in CommCareBuildConfig.fetch().menu:
if item['build'].to_string() == self.build_spec.to_string():
return item['label']
return self.build_spec.get_label()
@property
def short_name(self):
return self.name if len(self.name) <= 12 else '%s..' % self.name[:10]
@property
def has_careplan_module(self):
return False
@property
def url_base(self):
return get_url_base()
@absolute_url_property
def post_url(self):
if self.secure_submissions:
url_name = 'receiver_secure_post_with_app_id'
else:
url_name = 'receiver_post_with_app_id'
return reverse(url_name, args=[self.domain, self.get_id])
@absolute_url_property
def key_server_url(self):
return reverse('key_server_url', args=[self.domain])
@absolute_url_property
def ota_restore_url(self):
return reverse('corehq.apps.ota.views.restore', args=[self.domain])
@absolute_url_property
def form_record_url(self):
return '/a/%s/api/custom/pact_formdata/v1/' % self.domain
@absolute_url_property
def hq_profile_url(self):
return "%s?latest=true" % (
reverse('download_profile', args=[self.domain, self._id])
)
@absolute_url_property
def hq_media_profile_url(self):
return "%s?latest=true" % (
reverse('download_media_profile', args=[self.domain, self._id])
)
@property
def profile_loc(self):
return "jr://resource/profile.xml"
@absolute_url_property
def jar_url(self):
return reverse('corehq.apps.app_manager.views.download_jar', args=[self.domain, self._id])
def get_jar_path(self):
spec = {
'nokia/s40': 'Nokia/S40',
'nokia/s60': 'Nokia/S60',
'generic': 'Generic/Default',
'winmo': 'Native/WinMo'
}[self.platform]
if self.platform in ('nokia/s40', 'nokia/s60'):
spec += {
('native',): '-native-input',
('roman',): '-generic',
('custom-keys',): '-custom-keys',
('qwerty',): '-qwerty'
}[(self.text_input,)]
return spec
def get_jadjar(self):
return self.get_build().get_jadjar(self.get_jar_path())
def validate_fixtures(self):
if not domain_has_privilege(self.domain, privileges.LOOKUP_TABLES):
# remote apps don't support get_forms yet.
# for now they can circumvent the fixture limitation. sneaky bastards.
if hasattr(self, 'get_forms'):
for form in self.get_forms():
if form.has_fixtures:
raise PermissionDenied(_(
"Usage of lookup tables is not supported by your "
"current subscription. Please upgrade your "
"subscription before using this feature."
))
def validate_jar_path(self):
build = self.get_build()
setting = commcare_settings.SETTINGS_LOOKUP['hq']['text_input']
value = self.text_input
setting_version = setting['since'].get(value)
if setting_version:
setting_version = tuple(map(int, setting_version.split('.')))
my_version = build.minor_release()
if my_version < setting_version:
i = setting['values'].index(value)
assert i != -1
name = _(setting['value_names'][i])
raise AppEditingError((
'%s Text Input is not supported '
'in CommCare versions before %s.%s. '
'(You are using %s.%s)'
) % ((name,) + setting_version + my_version))
@property
def jad_settings(self):
settings = {
'JavaRosa-Admin-Password': self.admin_password,
'Profile': self.profile_loc,
'MIDlet-Jar-URL': self.jar_url,
#'MIDlet-Name': self.name,
# e.g. 2011-Apr-11 20:45
'CommCare-Release': "true",
}
if self.build_version < '2.8':
settings['Build-Number'] = self.version
return settings
def create_jadjar(self, save=False):
try:
return (
self.lazy_fetch_attachment('CommCare.jad'),
self.lazy_fetch_attachment('CommCare.jar'),
)
except (ResourceError, KeyError):
built_on = datetime.utcnow()
all_files = self.create_all_files()
jad_settings = {
'Released-on': built_on.strftime("%Y-%b-%d %H:%M"),
}
jad_settings.update(self.jad_settings)
jadjar = self.get_jadjar().pack(all_files, jad_settings)
if save:
self.built_on = built_on
self.built_with = BuildRecord(
version=jadjar.version,
build_number=jadjar.build_number,
signed=jadjar.signed,
datetime=built_on,
)
self.lazy_put_attachment(jadjar.jad, 'CommCare.jad')
self.lazy_put_attachment(jadjar.jar, 'CommCare.jar')
for filepath in all_files:
self.lazy_put_attachment(all_files[filepath],
'files/%s' % filepath)
return jadjar.jad, jadjar.jar
def validate_app(self):
errors = []
errors.extend(self.check_password_charset())
try:
self.validate_fixtures()
self.validate_jar_path()
self.create_all_files()
except (AppEditingError, XFormValidationError, XFormException,
PermissionDenied) as e:
errors.append({'type': 'error', 'message': unicode(e)})
except Exception as e:
if settings.DEBUG:
raise
# this is much less useful/actionable without a URL
# so make sure to include the request
logging.error('Unexpected error building app', exc_info=True,
extra={'request': view_utils.get_request()})
errors.append({'type': 'error', 'message': 'unexpected error: %s' % e})
return errors
@absolute_url_property
def odk_profile_url(self):
return reverse('corehq.apps.app_manager.views.download_odk_profile', args=[self.domain, self._id])
@absolute_url_property
def odk_media_profile_url(self):
return reverse('corehq.apps.app_manager.views.download_odk_media_profile', args=[self.domain, self._id])
@property
def odk_profile_display_url(self):
return self.short_odk_url or self.odk_profile_url
@property
def odk_media_profile_display_url(self):
return self.short_odk_media_url or self.odk_media_profile_url
def get_odk_qr_code(self, with_media=False):
"""Returns a QR code, as a PNG to install on CC-ODK"""
try:
return self.lazy_fetch_attachment("qrcode.png")
except ResourceNotFound:
try:
from pygooglechart import QRChart
except ImportError:
raise Exception(
"Aw shucks, someone forgot to install "
"the google chart library on this machine "
"and this feature needs it. "
"To get it, run easy_install pygooglechart. "
"Until you do that this won't work."
)
HEIGHT = WIDTH = 250
code = QRChart(HEIGHT, WIDTH)
code.add_data(self.odk_profile_url if not with_media else self.odk_media_profile_url)
# "Level L" error correction with a 0 pixel margin
code.set_ec('L', 0)
f, fname = tempfile.mkstemp()
code.download(fname)
os.close(f)
with open(fname, "rb") as f:
png_data = f.read()
self.lazy_put_attachment(png_data, "qrcode.png",
content_type="image/png")
return png_data
def generate_shortened_url(self, url_type):
try:
if settings.BITLY_LOGIN:
view_name = 'corehq.apps.app_manager.views.{}'.format(url_type)
long_url = "{}{}".format(get_url_base(), reverse(view_name, args=[self.domain, self._id]))
shortened_url = bitly.shorten(long_url)
else:
shortened_url = None
except Exception:
logging.exception("Problem creating bitly url for app %s. Do you have network?" % self.get_id)
else:
return shortened_url
def get_short_url(self):
if not self.short_url:
self.short_url = self.generate_shortened_url('download_jad')
self.save()
return self.short_url
def get_short_odk_url(self, with_media=False):
if with_media:
if not self.short_odk_media_url:
self.short_odk_media_url = self.generate_shortened_url('download_odk_media_profile')
self.save()
return self.short_odk_media_url
else:
if not self.short_odk_url:
self.short_odk_url = self.generate_shortened_url('download_odk_profile')
self.save()
return self.short_odk_url
def fetch_jar(self):
return self.get_jadjar().fetch_jar()
def make_build(self, comment=None, user_id=None, previous_version=None):
copy = super(ApplicationBase, self).make_build()
if not copy._id:
# I expect this always to be the case
# but check explicitly so as not to change the _id if it exists
copy._id = copy.get_db().server.next_uuid()
copy.set_form_versions(previous_version)
copy.set_media_versions(previous_version)
copy.create_jadjar(save=True)
try:
# since this hard to put in a test
# I'm putting this assert here if copy._id is ever None
# which makes tests error
assert copy._id
except AssertionError:
raise
copy.build_comment = comment
copy.comment_from = user_id
if user_id:
user = CouchUser.get(user_id)
if not user.has_built_app:
user.has_built_app = True
user.save()
copy.is_released = False
return copy
def delete_app(self):
self.doc_type += '-Deleted'
record = DeleteApplicationRecord(
domain=self.domain,
app_id=self.id,
datetime=datetime.utcnow()
)
record.save()
return record
def set_form_versions(self, previous_version):
# by default doing nothing here is fine.
pass
def set_media_versions(self, previous_version):
pass
def validate_lang(lang):
if not re.match(r'^[a-z]{2,3}(-[a-z]*)?$', lang):
raise ValueError("Invalid Language")
def validate_property(property):
"""
Validate a case property name
>>> validate_property('parent/maternal-grandmother_fullName')
>>> validate_property('foo+bar')
Traceback (most recent call last):
...
ValueError: Invalid Property
"""
# this regex is also copied in propertyList.ejs
if not re.match(r'^[a-zA-Z][\w_-]*(/[a-zA-Z][\w_-]*)*$', property):
raise ValueError("Invalid Property")
def validate_detail_screen_field(field):
# If you change here, also change here:
# corehq/apps/app_manager/static/app_manager/js/detail-screen-config.js
field_re = r'^([a-zA-Z][\w_-]*:)*([a-zA-Z][\w_-]*/)*#?[a-zA-Z][\w_-]*$'
if not re.match(field_re, field):
raise ValueError("Invalid Sort Field")
class SavedAppBuild(ApplicationBase):
def to_saved_build_json(self, timezone):
data = super(SavedAppBuild, self).to_json().copy()
for key in ('modules', 'user_registration',
'_attachments', 'profile', 'translations'
'description', 'short_description'):
data.pop(key, None)
built_on_user_time = ServerTime(self.built_on).user_time(timezone)
data.update({
'id': self.id,
'built_on_date': built_on_user_time.ui_string(USER_DATE_FORMAT),
'built_on_time': built_on_user_time.ui_string(USER_TIME_FORMAT),
'build_label': self.built_with.get_label(),
'jar_path': self.get_jar_path(),
'short_name': self.short_name,
'enable_offline_install': self.enable_offline_install,
})
comment_from = data['comment_from']
if comment_from:
try:
comment_user = CouchUser.get(comment_from)
except ResourceNotFound:
data['comment_user_name'] = comment_from
else:
data['comment_user_name'] = comment_user.full_name
return data
class Application(ApplicationBase, TranslationMixin, HQMediaMixin):
"""
An Application that can be created entirely through the online interface
"""
user_registration = SchemaProperty(UserRegistrationForm)
show_user_registration = BooleanProperty(default=False, required=True)
modules = SchemaListProperty(ModuleBase)
name = StringProperty()
# profile's schema is {'features': {}, 'properties': {}, 'custom_properties': {}}
# ended up not using a schema because properties is a reserved word
profile = DictProperty()
use_custom_suite = BooleanProperty(default=False)
cloudcare_enabled = BooleanProperty(default=False)
translation_strategy = StringProperty(default='select-known',
choices=app_strings.CHOICES.keys())
commtrack_requisition_mode = StringProperty(choices=CT_REQUISITION_MODES)
auto_gps_capture = BooleanProperty(default=False)
@property
@memoized
def commtrack_enabled(self):
if settings.UNIT_TESTING:
return False # override with .tests.util.commtrack_enabled
domain_obj = Domain.get_by_name(self.domain) if self.domain else None
return domain_obj.commtrack_enabled if domain_obj else False
@classmethod
def wrap(cls, data):
for module in data.get('modules', []):
for attr in ('case_label', 'referral_label'):
if not module.has_key(attr):
module[attr] = {}
for lang in data['langs']:
if not module['case_label'].get(lang):
module['case_label'][lang] = commcare_translations.load_translations(lang).get('cchq.case', 'Cases')
if not module['referral_label'].get(lang):
module['referral_label'][lang] = commcare_translations.load_translations(lang).get('cchq.referral', 'Referrals')
if not data.get('build_langs'):
data['build_langs'] = data['langs']
data.pop('commtrack_enabled', None) # Remove me after migrating apps
self = super(Application, cls).wrap(data)
# make sure all form versions are None on working copies
if not self.copy_of:
for form in self.get_forms():
form.version = None
# weird edge case where multimedia_map gets set to null and causes issues
if self.multimedia_map is None:
self.multimedia_map = {}
return self
def save(self, *args, **kwargs):
super(Application, self).save(*args, **kwargs)
# Import loop if this is imported at the top
# TODO: revamp so signal_connections <- models <- signals
from corehq.apps.app_manager import signals
signals.app_post_save.send(Application, application=self)
def make_reversion_to_copy(self, copy):
app = super(Application, self).make_reversion_to_copy(copy)
for form in app.get_forms():
# reset the form's validation cache, since the form content is
# likely to have changed in the revert!
form.validation_cache = None
form.version = None
app.build_broken = False
return app
@property
def profile_url(self):
return self.hq_profile_url
@property
def media_profile_url(self):
return self.hq_media_profile_url
@property
def url_base(self):
return get_url_base()
@absolute_url_property
def suite_url(self):
return reverse('download_suite', args=[self.domain, self.get_id])
@property
def suite_loc(self):
if self.enable_relative_suite_path:
return './suite.xml'
else:
return "jr://resource/suite.xml"
@absolute_url_property
def media_suite_url(self):
return reverse('download_media_suite', args=[self.domain, self.get_id])
@property
def media_suite_loc(self):
if self.enable_relative_suite_path:
return "./media_suite.xml"
else:
return "jr://resource/media_suite.xml"
@property
def default_language(self):
return self.build_langs[0] if len(self.build_langs) > 0 else "en"
def fetch_xform(self, module_id=None, form_id=None, form=None):
if not form:
form = self.get_module(module_id).get_form(form_id)
return form.validate_form().render_xform().encode('utf-8')
def set_form_versions(self, previous_version):
# this will make builds slower, but they're async now so hopefully
# that's fine.
def _hash(val):
return hashlib.md5(val).hexdigest()
if previous_version:
for form_stuff in self.get_forms(bare=False):
filename = 'files/%s' % self.get_form_filename(**form_stuff)
form = form_stuff["form"]
form_version = None
try:
previous_form = previous_version.get_form(form.unique_id)
# take the previous version's compiled form as-is
# (generation code may have changed since last build)
previous_source = previous_version.fetch_attachment(filename)
except (ResourceNotFound, FormNotFoundException):
pass
else:
previous_hash = _hash(previous_source)
# hack - temporarily set my version to the previous version
# so that that's not treated as the diff
previous_form_version = previous_form.get_version()
form.version = previous_form_version
my_hash = _hash(self.fetch_xform(form=form))
if previous_hash == my_hash:
form_version = previous_form_version
if form_version is None:
form.version = None
else:
form.version = form_version
def set_media_versions(self, previous_version):
# access to .multimedia_map is slow
prev_multimedia_map = previous_version.multimedia_map if previous_version else {}
for path, map_item in self.multimedia_map.iteritems():
prev_map_item = prev_multimedia_map.get(path, None)
if prev_map_item and prev_map_item.unique_id:
# Re-use the id so CommCare knows it's the same resource
map_item.unique_id = prev_map_item.unique_id
if (prev_map_item and prev_map_item.version
and prev_map_item.multimedia_id == map_item.multimedia_id):
map_item.version = prev_map_item.version
else:
map_item.version = self.version
def ensure_module_unique_ids(self, should_save=False):
"""
Creates unique_ids for modules that don't have unique_id attributes
should_save: the doc will be saved only if should_save is set to True
WARNING: If called on the same doc in different requests without saving,
this function will set different uuid each time,
likely causing unexpected behavior
"""
if any(not mod.unique_id for mod in self.modules):
for mod in self.modules:
mod.get_or_create_unique_id()
if should_save:
self.save()
def create_app_strings(self, lang):
gen = app_strings.CHOICES[self.translation_strategy]
if lang == 'default':
return gen.create_default_app_strings(self)
else:
return gen.create_app_strings(self, lang)
@property
def skip_validation(self):
properties = (self.profile or {}).get('properties', {})
return properties.get('cc-content-valid', 'yes')
@property
def jad_settings(self):
s = super(Application, self).jad_settings
s.update({
'Skip-Validation': self.skip_validation,
})
return s
def create_profile(self, is_odk=False, with_media=False, template='app_manager/profile.xml'):
self__profile = self.profile
app_profile = defaultdict(dict)
for setting in commcare_settings.SETTINGS:
setting_type = setting['type']
setting_id = setting['id']
if setting_type not in ('properties', 'features'):
setting_value = None
elif setting_id not in self__profile.get(setting_type, {}):
if 'commcare_default' in setting and setting['commcare_default'] != setting['default']:
setting_value = setting['default']
else:
setting_value = None
else:
setting_value = self__profile[setting_type][setting_id]
if setting_value:
app_profile[setting_type][setting_id] = {
'value': setting_value,
'force': setting.get('force', False)
}
# assert that it gets explicitly set once per loop
del setting_value
if self.case_sharing:
app_profile['properties']['server-tether'] = {
'force': True,
'value': 'sync',
}
logo_refs = [logo_name for logo_name in self.logo_refs if logo_name in ANDROID_LOGO_PROPERTY_MAPPING]
if logo_refs and domain_has_privilege(self.domain, privileges.COMMCARE_LOGO_UPLOADER):
for logo_name in logo_refs:
app_profile['properties'][ANDROID_LOGO_PROPERTY_MAPPING[logo_name]] = {
'value': self.logo_refs[logo_name]['path'],
}
if with_media:
profile_url = self.media_profile_url if not is_odk else (self.odk_media_profile_url + '?latest=true')
else:
profile_url = self.profile_url if not is_odk else (self.odk_profile_url + '?latest=true')
if toggles.CUSTOM_PROPERTIES.enabled(self.domain) and "custom_properties" in self__profile:
app_profile['custom_properties'].update(self__profile['custom_properties'])
return render_to_string(template, {
'is_odk': is_odk,
'app': self,
'profile_url': profile_url,
'app_profile': app_profile,
'cc_user_domain': cc_user_domain(self.domain),
'include_media_suite': with_media,
'uniqueid': self.copy_of or self.id,
'name': self.name,
'descriptor': u"Profile File"
}).encode('utf-8')
@property
def custom_suite(self):
try:
return self.lazy_fetch_attachment('custom_suite.xml')
except ResourceNotFound:
return ""
def set_custom_suite(self, value):
self.put_attachment(value, 'custom_suite.xml')
def create_suite(self):
if self.application_version == APP_V1:
template='app_manager/suite-%s.xml' % self.application_version
return render_to_string(template, {
'app': self,
'langs': ["default"] + self.build_langs
})
else:
return suite_xml.SuiteGenerator(self, is_usercase_in_use(self.domain)).generate_suite()
def create_media_suite(self):
return suite_xml.MediaSuiteGenerator(self).generate_suite()
@classmethod
def get_form_filename(cls, type=None, form=None, module=None):
if type == 'user_registration':
return 'user_registration.xml'
else:
return 'modules-%s/forms-%s.xml' % (module.id, form.id)
def create_all_files(self):
files = {
'profile.xml': self.create_profile(is_odk=False),
'profile.ccpr': self.create_profile(is_odk=True),
'media_profile.xml': self.create_profile(is_odk=False, with_media=True),
'media_profile.ccpr': self.create_profile(is_odk=True, with_media=True),
'suite.xml': self.create_suite(),
'media_suite.xml': self.create_media_suite(),
}
for lang in ['default'] + self.build_langs:
files["%s/app_strings.txt" % lang] = self.create_app_strings(lang)
for form_stuff in self.get_forms(bare=False):
filename = self.get_form_filename(**form_stuff)
form = form_stuff['form']
files[filename] = self.fetch_xform(form=form)
return files
get_modules = IndexedSchema.Getter('modules')
@parse_int([1])
def get_module(self, i):
try:
return self.modules[i].with_id(i % len(self.modules), self)
except IndexError:
raise ModuleNotFoundException()
def get_user_registration(self):
form = self.user_registration
form._app = self
if not (self._id and self._attachments and form.source):
form.source = load_form_template('register_user.xhtml')
return form
def get_module_by_unique_id(self, unique_id):
def matches(module):
return module.get_or_create_unique_id() == unique_id
for obj in self.get_modules():
if matches(obj):
return obj
raise ModuleNotFoundException(
("Module in app '%s' with unique id '%s' not found"
% (self.id, unique_id)))
def get_forms(self, bare=True):
if self.show_user_registration:
yield self.get_user_registration() if bare else {
'type': 'user_registration',
'form': self.get_user_registration()
}
for module in self.get_modules():
for form in module.get_forms():
yield form if bare else {
'type': 'module_form',
'module': module,
'form': form
}
def get_form(self, unique_form_id, bare=True):
def matches(form):
return form.get_unique_id() == unique_form_id
for obj in self.get_forms(bare):
if matches(obj if bare else obj['form']):
return obj
raise FormNotFoundException(
("Form in app '%s' with unique id '%s' not found"
% (self.id, unique_form_id)))
def get_form_location(self, unique_form_id):
for m_index, module in enumerate(self.get_modules()):
for f_index, form in enumerate(module.get_forms()):
if unique_form_id == form.unique_id:
return m_index, f_index
raise KeyError("Form in app '%s' with unique id '%s' not found" % (self.id, unique_form_id))
@classmethod
def new_app(cls, domain, name, application_version, lang="en"):
app = cls(domain=domain, modules=[], name=name, langs=[lang], build_langs=[lang], application_version=application_version)
return app
def add_module(self, module):
self.modules.append(module)
return self.get_module(-1)
def delete_module(self, module_unique_id):
try:
module = self.get_module_by_unique_id(module_unique_id)
except ModuleNotFoundException:
return None
record = DeleteModuleRecord(
domain=self.domain,
app_id=self.id,
module_id=module.id,
module=module,
datetime=datetime.utcnow()
)
del self.modules[module.id]
record.save()
return record
def new_form(self, module_id, name, lang, attachment=""):
module = self.get_module(module_id)
return module.new_form(name, lang, attachment)
def delete_form(self, module_unique_id, form_unique_id):
try:
module = self.get_module_by_unique_id(module_unique_id)
form = self.get_form(form_unique_id)
except (ModuleNotFoundException, FormNotFoundException):
return None
record = DeleteFormRecord(
domain=self.domain,
app_id=self.id,
module_unique_id=module_unique_id,
form_id=form.id,
form=form,
datetime=datetime.utcnow(),
)
record.save()
del module['forms'][form.id]
return record
def rename_lang(self, old_lang, new_lang):
validate_lang(new_lang)
if old_lang == new_lang:
return
if new_lang in self.langs:
raise AppEditingError("Language %s already exists!" % new_lang)
for i,lang in enumerate(self.langs):
if lang == old_lang:
self.langs[i] = new_lang
for module in self.get_modules():
module.rename_lang(old_lang, new_lang)
_rename_key(self.translations, old_lang, new_lang)
def rearrange_modules(self, i, j):
modules = self.modules
try:
modules.insert(i, modules.pop(j))
except IndexError:
raise RearrangeError()
self.modules = modules
def rearrange_forms(self, to_module_id, from_module_id, i, j):
"""
The case type of the two modules conflict,
ConflictingCaseTypeError is raised,
but the rearrangement (confusingly) goes through anyway.
This is intentional.
"""
to_module = self.get_module(to_module_id)
from_module = self.get_module(from_module_id)
try:
form = from_module.forms.pop(j)
to_module.add_insert_form(from_module, form, index=i, with_source=True)
except IndexError:
raise RearrangeError()
if to_module.case_type != from_module.case_type:
raise ConflictingCaseTypeError()
def scrub_source(self, source):
def change_unique_id(form):
unique_id = form['unique_id']
new_unique_id = FormBase.generate_id()
form['unique_id'] = new_unique_id
if ("%s.xml" % unique_id) in source['_attachments']:
source['_attachments']["%s.xml" % new_unique_id] = source['_attachments'].pop("%s.xml" % unique_id)
return new_unique_id
change_unique_id(source['user_registration'])
id_changes = {}
for m, module in enumerate(source['modules']):
for f, form in enumerate(module['forms']):
old_id = form['unique_id']
new_id = change_unique_id(source['modules'][m]['forms'][f])
id_changes[old_id] = new_id
for reference_path in form_id_references:
for reference in reference_path.find(source):
if reference.value in id_changes:
jsonpath_update(reference, id_changes[reference.value])
def copy_form(self, module_id, form_id, to_module_id):
"""
The case type of the two modules conflict,
ConflictingCaseTypeError is raised,
but the copying (confusingly) goes through anyway.
This is intentional.
"""
from_module = self.get_module(module_id)
form = from_module.get_form(form_id)
to_module = self.get_module(to_module_id)
self._copy_form(from_module, form, to_module, rename=True)
def _copy_form(self, from_module, form, to_module, *args, **kwargs):
if not form.source:
raise BlankXFormError()
copy_source = deepcopy(form.to_json())
if 'unique_id' in copy_source:
del copy_source['unique_id']
if 'rename' in kwargs and kwargs['rename']:
for lang, name in copy_source['name'].iteritems():
with override(lang):
copy_source['name'][lang] = _('Copy of {name}').format(name=name)
copy_form = to_module.add_insert_form(from_module, FormBase.wrap(copy_source))
save_xform(self, copy_form, form.source)
if from_module['case_type'] != to_module['case_type']:
raise ConflictingCaseTypeError()
def convert_module_to_advanced(self, module_id):
from_module = self.get_module(module_id)
name = {lang: u'{} (advanced)'.format(name) for lang, name in from_module.name.items()}
case_details = deepcopy(from_module.case_details.to_json())
to_module = AdvancedModule(
name=name,
forms=[],
case_type=from_module.case_type,
case_label=from_module.case_label,
put_in_root=from_module.put_in_root,
case_list=from_module.case_list,
case_details=DetailPair.wrap(case_details),
product_details=DetailPair(
short=Detail(
columns=[
DetailColumn(
format='plain',
header={'en': ugettext("Product")},
field='name',
model='product',
),
],
),
long=Detail(),
),
)
to_module.get_or_create_unique_id()
to_module = self.add_module(to_module)
for form in from_module.get_forms():
self._copy_form(from_module, form, to_module)
return to_module
@cached_property
def has_case_management(self):
for module in self.get_modules():
for form in module.get_forms():
if len(form.active_actions()) > 0:
return True
return False
@memoized
def case_type_exists(self, case_type):
return case_type in self.get_case_types()
@memoized
def get_case_types(self):
extra_types = set()
if is_usercase_in_use(self.domain):
extra_types.add(USERCASE_TYPE)
return set(chain(*[m.get_case_types() for m in self.get_modules()])) | extra_types
def has_media(self):
return len(self.multimedia_map) > 0
@memoized
def get_xmlns_map(self):
xmlns_map = defaultdict(list)
for form in self.get_forms():
xmlns_map[form.xmlns].append(form)
return xmlns_map
def get_form_by_xmlns(self, xmlns, log_missing=True):
if xmlns == "http://code.javarosa.org/devicereport":
return None
forms = self.get_xmlns_map()[xmlns]
if len(forms) != 1:
if log_missing or len(forms) > 1:
logging.error('App %s in domain %s has %s forms with xmlns %s' % (
self.get_id,
self.domain,
len(forms),
xmlns,
))
return None
else:
form, = forms
return form
def get_questions(self, xmlns):
form = self.get_form_by_xmlns(xmlns)
if not form:
return []
return form.get_questions(self.langs)
def validate_app(self):
xmlns_count = defaultdict(int)
errors = []
for lang in self.langs:
if not lang:
errors.append({'type': 'empty lang'})
if not self.modules:
errors.append({'type': "no modules"})
for module in self.get_modules():
errors.extend(module.validate_for_build())
for form in self.get_forms():
errors.extend(form.validate_for_build(validate_module=False))
# make sure that there aren't duplicate xmlns's
xmlns_count[form.xmlns] += 1
for xmlns in xmlns_count:
if xmlns_count[xmlns] > 1:
errors.append({'type': "duplicate xmlns", "xmlns": xmlns})
if any(not module.unique_id for module in self.get_modules()):
raise ModuleIdMissingException
modules_dict = {m.unique_id: m for m in self.get_modules()}
def _parent_select_fn(module):
if hasattr(module, 'parent_select') and module.parent_select.active:
return module.parent_select.module_id
if self._has_dependency_cycle(modules_dict, _parent_select_fn):
errors.append({'type': 'parent cycle'})
errors.extend(self._child_module_errors(modules_dict))
if not errors:
errors = super(Application, self).validate_app()
return errors
def _has_dependency_cycle(self, modules, neighbour_id_fn):
"""
Detect dependency cycles given modules and the neighbour_id_fn
:param modules: A mapping of module unique_ids to Module objects
:neighbour_id_fn: function to get the neibour module unique_id
:return: True if there is a cycle in the module relationship graph
"""
visited = set()
completed = set()
def cycle_helper(m):
if m.id in visited:
if m.id in completed:
return False
return True
visited.add(m.id)
parent = modules.get(neighbour_id_fn(m), None)
if parent is not None and cycle_helper(parent):
return True
completed.add(m.id)
return False
for module in modules.values():
if cycle_helper(module):
return True
return False
def _child_module_errors(self, modules_dict):
module_errors = []
def _root_module_fn(module):
if hasattr(module, 'root_module_id'):
return module.root_module_id
if self._has_dependency_cycle(modules_dict, _root_module_fn):
module_errors.append({'type': 'root cycle'})
module_ids = set([m.unique_id for m in self.get_modules()])
root_ids = set([_root_module_fn(m) for m in self.get_modules() if _root_module_fn(m) is not None])
if not root_ids.issubset(module_ids):
module_errors.append({'type': 'unknown root'})
return module_errors
@classmethod
def get_by_xmlns(cls, domain, xmlns):
r = cls.get_db().view('exports_forms/by_xmlns',
key=[domain, {}, xmlns],
group=True,
stale=settings.COUCH_STALE_QUERY,
).one()
return cls.get(r['value']['app']['id']) if r and 'app' in r['value'] else None
def get_profile_setting(self, s_type, s_id):
setting = self.profile.get(s_type, {}).get(s_id)
if setting is not None:
return setting
yaml_setting = commcare_settings.SETTINGS_LOOKUP[s_type][s_id]
for contingent in yaml_setting.get("contingent_default", []):
if check_condition(self, contingent["condition"]):
setting = contingent["value"]
if setting is not None:
return setting
if self.build_version < yaml_setting.get("since", "0"):
setting = yaml_setting.get("disabled_default", None)
if setting is not None:
return setting
return yaml_setting.get("default")
@property
def has_careplan_module(self):
return any((module for module in self.modules if isinstance(module, CareplanModule)))
@quickcache(['self.version'])
def get_case_metadata(self):
from corehq.apps.reports.formdetails.readable import AppCaseMetadata
builder = ParentCasePropertyBuilder(self)
case_relationships = builder.get_parent_type_map(self.get_case_types())
meta = AppCaseMetadata()
for case_type, relationships in case_relationships.items():
type_meta = meta.get_type(case_type)
type_meta.relationships = relationships
for module in self.get_modules():
for form in module.get_forms():
form.update_app_case_meta(meta)
seen_types = []
def get_children(case_type):
seen_types.append(case_type)
return [type_.name for type_ in meta.case_types if type_.relationships.get('parent') == case_type]
def get_hierarchy(case_type):
return {child: get_hierarchy(child) for child in get_children(case_type)}
roots = [type_ for type_ in meta.case_types if not type_.relationships]
for type_ in roots:
meta.type_hierarchy[type_.name] = get_hierarchy(type_.name)
for type_ in meta.case_types:
if type_.name not in seen_types:
meta.type_hierarchy[type_.name] = {}
type_.error = _("Error in case type hierarchy")
return meta
class RemoteApp(ApplicationBase):
"""
A wrapper for a url pointing to a suite or profile file. This allows you to
write all the files for an app by hand, and then give the url to app_manager
and let it package everything together for you.
"""
profile_url = StringProperty(default="http://")
name = StringProperty()
manage_urls = BooleanProperty(default=False)
questions_map = DictProperty(required=False)
def is_remote_app(self):
return True
@classmethod
def new_app(cls, domain, name, lang='en'):
app = cls(domain=domain, name=name, langs=[lang])
return app
def create_profile(self, is_odk=False):
# we don't do odk for now anyway
return remote_app.make_remote_profile(self)
def strip_location(self, location):
return remote_app.strip_location(self.profile_url, location)
def fetch_file(self, location):
location = self.strip_location(location)
url = urljoin(self.profile_url, location)
try:
content = urlopen(url).read()
except Exception:
raise AppEditingError('Unable to access resource url: "%s"' % url)
return location, content
@classmethod
def get_locations(cls, suite):
for resource in suite.findall('*/resource'):
try:
loc = resource.findtext('location[@authority="local"]')
except Exception:
loc = resource.findtext('location[@authority="remote"]')
yield resource.getparent().tag, loc
@property
def SUITE_XPATH(self):
return 'suite/resource/location[@authority="local"]'
def create_all_files(self):
files = {
'profile.xml': self.create_profile(),
}
tree = _parse_xml(files['profile.xml'])
def add_file_from_path(path, strict=False, transform=None):
added_files = []
# must find at least one
try:
tree.find(path).text
except (TypeError, AttributeError):
if strict:
raise AppEditingError("problem with file path reference!")
else:
return
for loc_node in tree.findall(path):
loc, file = self.fetch_file(loc_node.text)
if transform:
file = transform(file)
files[loc] = file
added_files.append(file)
return added_files
add_file_from_path('features/users/logo')
try:
suites = add_file_from_path(
self.SUITE_XPATH,
strict=True,
transform=(lambda suite:
remote_app.make_remote_suite(self, suite))
)
except AppEditingError:
raise AppEditingError(ugettext('Problem loading suite file from profile file. Is your profile file correct?'))
for suite in suites:
suite_xml = _parse_xml(suite)
for tag, location in self.get_locations(suite_xml):
location, data = self.fetch_file(location)
if tag == 'xform' and self.build_langs:
try:
xform = XForm(data)
except XFormException as e:
raise XFormException('In file %s: %s' % (location, e))
xform.exclude_languages(whitelist=self.build_langs)
data = xform.render()
files.update({location: data})
return files
def scrub_source(self, source):
pass
def make_questions_map(self):
if self.copy_of:
xmlns_map = {}
def fetch(location):
filepath = self.strip_location(location)
return self.fetch_attachment('files/%s' % filepath)
profile_xml = _parse_xml(fetch('profile.xml'))
suite_location = profile_xml.find(self.SUITE_XPATH).text
suite_xml = _parse_xml(fetch(suite_location))
for tag, location in self.get_locations(suite_xml):
if tag == 'xform':
xform = XForm(fetch(location))
xmlns = xform.data_node.tag_xmlns
questions = xform.get_questions(self.build_langs)
xmlns_map[xmlns] = questions
return xmlns_map
else:
return None
def get_questions(self, xmlns):
if not self.questions_map:
self.questions_map = self.make_questions_map()
if not self.questions_map:
return []
self.save()
questions = self.questions_map.get(xmlns, [])
return questions
def get_apps_in_domain(domain, full=False, include_remote=True):
"""
Returns all apps(not builds) in a domain
full use applications when true, otherwise applications_brief
"""
if full:
view_name = 'app_manager/applications'
startkey = [domain, None]
endkey = [domain, None, {}]
else:
view_name = 'app_manager/applications_brief'
startkey = [domain]
endkey = [domain, {}]
view_results = Application.get_db().view(view_name,
startkey=startkey,
endkey=endkey,
include_docs=True,
)
remote_app_filter = None if include_remote else lambda app: not app.is_remote_app()
wrapped_apps = [get_correct_app_class(row['doc']).wrap(row['doc']) for row in view_results]
return filter(remote_app_filter, wrapped_apps)
def get_app(domain, app_id, wrap_cls=None, latest=False, target=None):
"""
Utility for getting an app, making sure it's in the domain specified, and wrapping it in the right class
(Application or RemoteApp).
"""
if latest:
try:
original_app = get_db().get(app_id)
except ResourceNotFound:
raise Http404()
if not domain:
try:
domain = original_app['domain']
except Exception:
raise Http404()
if original_app.get('copy_of'):
parent_app_id = original_app.get('copy_of')
min_version = original_app['version'] if original_app.get('is_released') else -1
else:
parent_app_id = original_app['_id']
min_version = -1
if target == 'build':
# get latest-build regardless of star
couch_view = 'app_manager/saved_app'
startkey = [domain, parent_app_id, {}]
endkey = [domain, parent_app_id]
else:
# get latest starred-build
couch_view = 'app_manager/applications'
startkey = ['^ReleasedApplications', domain, parent_app_id, {}]
endkey = ['^ReleasedApplications', domain, parent_app_id, min_version]
latest_app = get_db().view(
couch_view,
startkey=startkey,
endkey=endkey,
limit=1,
descending=True,
include_docs=True
).one()
try:
app = latest_app['doc']
except TypeError:
# If no builds/starred-builds, return act as if latest=False
app = original_app
else:
try:
app = get_db().get(app_id)
except Exception:
raise Http404()
if domain and app['domain'] != domain:
raise Http404()
try:
cls = wrap_cls or get_correct_app_class(app)
except DocTypeError:
raise Http404()
app = cls.wrap(app)
return app
str_to_cls = {
"Application": Application,
"Application-Deleted": Application,
"RemoteApp": RemoteApp,
"RemoteApp-Deleted": RemoteApp,
}
def import_app(app_id_or_source, domain, name=None, validate_source_domain=None):
if isinstance(app_id_or_source, basestring):
app_id = app_id_or_source
source = get_app(None, app_id)
src_dom = source['domain']
if validate_source_domain:
validate_source_domain(src_dom)
source = source.export_json()
source = json.loads(source)
else:
source = app_id_or_source
try:
attachments = source['_attachments']
except KeyError:
attachments = {}
finally:
source['_attachments'] = {}
if name:
source['name'] = name
cls = str_to_cls[source['doc_type']]
# Allow the wrapper to update to the current default build_spec
if 'build_spec' in source:
del source['build_spec']
app = cls.from_source(source, domain)
app.save()
if not app.is_remote_app():
for _, m in app.get_media_objects():
if domain not in m.valid_domains:
m.valid_domains.append(domain)
m.save()
for name, attachment in attachments.items():
if re.match(ATTACHMENT_REGEX, name):
app.put_attachment(attachment, name)
return app
class DeleteApplicationRecord(DeleteRecord):
app_id = StringProperty()
def undo(self):
app = ApplicationBase.get(self.app_id)
app.doc_type = app.get_doc_type()
app.save(increment_version=False)
class DeleteModuleRecord(DeleteRecord):
app_id = StringProperty()
module_id = IntegerProperty()
module = SchemaProperty(ModuleBase)
def undo(self):
app = Application.get(self.app_id)
modules = app.modules
modules.insert(self.module_id, self.module)
app.modules = modules
app.save()
class DeleteFormRecord(DeleteRecord):
app_id = StringProperty()
module_id = IntegerProperty()
module_unique_id = StringProperty()
form_id = IntegerProperty()
form = SchemaProperty(FormBase)
def undo(self):
app = Application.get(self.app_id)
if self.module_unique_id is not None:
module = app.get_module_by_unique_id(self.module_unique_id)
else:
module = app.modules[self.module_id]
forms = module.forms
forms.insert(self.form_id, self.form)
module.forms = forms
app.save()
class CareplanAppProperties(DocumentSchema):
name = StringProperty()
latest_release = StringProperty()
case_type = StringProperty()
goal_conf = DictProperty()
task_conf = DictProperty()
class CareplanConfig(Document):
domain = StringProperty()
app_configs = SchemaDictProperty(CareplanAppProperties)
@classmethod
def for_domain(cls, domain):
res = cache_core.cached_view(
cls.get_db(),
"domain/docs",
key=[domain, 'CareplanConfig', None],
reduce=False,
include_docs=True,
wrapper=cls.wrap)
if len(res) > 0:
result = res[0]
else:
result = None
return result
# backwards compatibility with suite-1.0.xml
FormBase.get_command_id = lambda self: id_strings.form_command(self)
FormBase.get_locale_id = lambda self: id_strings.form_locale(self)
ModuleBase.get_locale_id = lambda self: id_strings.module_locale(self)
ModuleBase.get_case_list_command_id = lambda self: id_strings.case_list_command(self)
ModuleBase.get_case_list_locale_id = lambda self: id_strings.case_list_locale(self)
Module.get_referral_list_command_id = lambda self: id_strings.referral_list_command(self)
Module.get_referral_list_locale_id = lambda self: id_strings.referral_list_locale(self)
|
puttarajubr/commcare-hq
|
corehq/apps/app_manager/models.py
|
Python
|
bsd-3-clause
| 170,846
|
[
"VisIt"
] |
4a8b2f22fb893ef4019eb5fa4ce75fb962a41bb94b06844497b7054e0a713fd0
|
#!/usr/bin/env python
# Author: Andrew Jewett (jewett.aij at g mail)
# http://www.moltemplate.org
# http://www.chem.ucsb.edu/~sheagroup
# License: 3-clause BSD License (See LICENSE.TXT)
# Copyright (c) 2011, Regents of the University of California
# All rights reserved.
"""
lttree.py
lttree.py is an extension of the generic ttree.py program.
This version can understand and manipulate ttree-style templates which
are specialized for storing molecule-specific data for use in LAMMPS.
The main difference between lttree.py and ttree.py is:
Unlike ttree.py, lttree.py understands rigid-body movement commands like
"rot()" and "move()" which allows it to reorient and move each copy
of a molecule to a new location. (ttree.py just ignores these commands.
Consequently LAMMPS input file (fragments) created with ttree.py have
invalid (overlapping) atomic coordinates and must be modified or aguemted
later (by loading atomic coordinates from a PDB file or an XYZ file).
lttree.py understands the "Data Atoms" section of a LAMMPS
data file (in addition to the various "atom_styles" which effect it).
Additional LAMMPS-specific features may be added in the future.
"""
g_program_name = __file__.split('/')[-1] # ='lttree.py'
g_date_str = '2018-3-15'
g_version_str = '0.77.0'
import sys
from collections import defaultdict
import pkg_resources
try:
from .ttree import BasicUISettings, BasicUIParseArgs, EraseTemplateFiles, \
StackableCommand, PopCommand, PopRightCommand, PopLeftCommand, \
PushCommand, PushLeftCommand, PushRightCommand, ScopeCommand, \
WriteVarBindingsFile, StaticObj, InstanceObj, \
BasicUI, ScopeBegin, ScopeEnd, WriteFileCommand, Render
from .ttree_lex import InputError, TextBlock, DeleteLinesWithBadVars, \
TemplateLexer
from .lttree_styles import AtomStyle2ColNames, ColNames2AidAtypeMolid, \
ColNames2Coords, ColNames2Vects, \
data_atoms, data_prefix, data_masses, \
data_velocities, data_ellipsoids, data_triangles, data_lines, \
data_pair_coeffs, data_bond_coeffs, data_angle_coeffs, \
data_dihedral_coeffs, data_improper_coeffs, data_bondbond_coeffs, \
data_bondangle_coeffs, data_middlebondtorsion_coeffs, \
data_endbondtorsion_coeffs, data_angletorsion_coeffs, \
data_angleangletorsion_coeffs, data_bondbond13_coeffs, \
data_angleangle_coeffs, data_bonds_by_type, data_angles_by_type, \
data_dihedrals_by_type, data_impropers_by_type, \
data_bonds, data_bond_list, data_angles, data_dihedrals, data_impropers, \
data_boundary, data_pbc, data_prefix_no_space, in_init, in_settings, \
in_prefix
from .ttree_matrix_stack import AffineTransform, MultiAffineStack, \
LinTransform
except (ImportError, SystemError, ValueError):
# not installed as a package
from ttree import *
from ttree_lex import *
from lttree_styles import *
from ttree_matrix_stack import *
try:
unicode
except NameError:
# Python 3
basestring = unicode = str
class LttreeSettings(BasicUISettings):
def __init__(self,
user_bindings_x=None,
user_bindings=None,
order_method='by_command'):
BasicUISettings.__init__(self,
user_bindings_x,
user_bindings,
order_method)
# The following new member data indicate which columns store
# LAMMPS-specific information.
# The next 6 members store keep track of the different columns
# of the "Data Atoms" section of a LAMMPS data file:
self.column_names = [] # <--A list of column names (optional)
self.ii_coords = [] # <--A list of triplets of column indexes storing coordinate data
self.ii_vects = [] # <--A list of triplets of column indexes storing directional data
# (such as dipole or ellipsoid orientations)
self.i_atomid = None # <--An integer indicating which column has the atomid
self.i_atomtype = None # <--An integer indicating which column has the atomtype
self.i_molid = None # <--An integer indicating which column has the molid, if applicable
def LttreeParseArgs(argv, settings, main=False, show_warnings=True):
# By default, include force_fields provided with the package
argv.extend(["-import-path",
pkg_resources.resource_filename(__name__, 'force_fields/')])
BasicUIParseArgs(argv, settings)
# Loop over the remaining arguments not processed yet.
# These arguments are specific to the lttree.py program
# and are not understood by ttree.py:
i = 1
while i < len(argv):
#sys.stderr.write('argv['+str(i)+'] = \"'+argv[i]+'\"\n')
if ((argv[i].lower() == '-atomstyle') or
(argv[i].lower() == '-atom-style') or
(argv[i].lower() == '-atom_style')):
if i + 1 >= len(argv):
raise InputError('Error(' + g_program_name + '): The ' + argv[i] + ' flag should be followed by a LAMMPS\n'
' atom_style name (or single quoted string containing a space-separated\n'
' list of column names such as: atom-ID atom-type q x y z molecule-ID.)\n')
settings.column_names = AtomStyle2ColNames(argv[i + 1])
sys.stderr.write('\n \"' + data_atoms + '\" column format:\n')
sys.stderr.write(
' ' + (' '.join(settings.column_names)) + '\n\n')
settings.ii_coords = ColNames2Coords(settings.column_names)
settings.ii_vects = ColNames2Vects(settings.column_names)
settings.i_atomid, settings.i_atomtype, settings.i_molid = ColNames2AidAtypeMolid(
settings.column_names)
del(argv[i:i + 2])
elif (argv[i].lower() == '-icoord'):
if i + 1 >= len(argv):
raise InputError('Error: ' + argv[i] + ' flag should be followed by list of integers\n'
' corresponding to column numbers for coordinates in\n'
' the \"' + data_atoms + '\" section of a LAMMPS data file.\n')
ilist = argv[i + 1].split()
if (len(ilist) % 3) != 0:
raise InputError('Error: ' + argv[i] + ' flag should be followed by list of integers.\n'
' This is usually a list of 3 integers, but it can contain more.\n'
' The number of cooridnate columns must be divisible by 3,\n'
' (even if the simulation is in 2 dimensions)\n')
settings.iaffinevects = []
for i in range(0, len(ilist) / 3):
cols = [int(ilist[3 * i]) + 1,
int(ilist[3 * i + 1]) + 1,
int(ilist[3 * i + 2]) + 1]
settings.iaffinevects.append(cols)
del(argv[i:i + 2])
elif (argv[i].lower() == '-ivect'):
if i + 1 >= len(argv):
raise InputError('Error: ' + argv[i] + ' flag should be followed by list of integers\n'
' corresponding to column numbers for direction vectors in\n'
' the \"' + data_atoms + '\" section of a LAMMPS data file.\n')
ilist = argv[i + 1].split()
if (len(ilist) % 3) != 0:
raise InputError('Error: ' + argv[i] + ' flag should be followed by list of integers.\n'
' This is usually a list of 3 integers, but it can contain more.\n'
' The number of cooridnate columns must be divisible by 3,\n'
' (even if the simulation is in 2 dimensions)\n')
settings.ivects = []
for i in range(0, len(ilist) / 3):
cols = [int(ilist[3 * i]) + 1,
int(ilist[3 * i + 1]) + 1,
int(ilist[3 * i + 2]) + 1]
settings.ivects.append(cols)
del(argv[i:i + 2])
elif ((argv[i].lower() == '-iatomid') or
(argv[i].lower() == '-iid') or
(argv[i].lower() == '-iatom-id')):
if ((i + 1 >= len(argv)) or (not str.isdigit(argv[i + 1]))):
raise InputError('Error: ' + argv[i] + ' flag should be followed by an integer\n'
' (>=1) indicating which column in the \"' +
data_atoms + '\" section of a\n'
' LAMMPS data file contains the atom id number (typically 1).\n'
' (This argument is unnecessary if you use the -atomstyle argument.)\n')
i_atomid = int(argv[i + 1]) - 1
del(argv[i:i + 2])
elif ((argv[i].lower() == '-iatomtype') or
(argv[i].lower() == '-itype') or
(argv[i].lower() == '-iatom-type')):
if ((i + 1 >= len(argv)) or (not str.isdigit(argv[i + 1]))):
raise InputError('Error: ' + argv[i] + ' flag should be followed by an integer\n'
' (>=1) indicating which column in the \"' +
data_atoms + '\" section of a\n'
' LAMMPS data file contains the atom type.\n'
' (This argument is unnecessary if you use the -atomstyle argument.)\n')
i_atomtype = int(argv[i + 1]) - 1
del(argv[i:i + 2])
elif ((argv[i].lower() == '-imolid') or
(argv[i].lower() == '-imol') or
(argv[i].lower() == '-imol-id') or
(argv[i].lower() == '-imoleculeid') or
(argv[i].lower() == '-imolecule-id')):
if ((i + 1 >= len(argv)) or (not str.isdigit(argv[i + 1]))):
raise InputError('Error: ' + argv[i] + ' flag should be followed by an integer\n'
' (>=1) indicating which column in the \"' +
data_atoms + '\" section of a\n'
' LAMMPS data file contains the molecule id number.\n'
' (This argument is unnecessary if you use the -atomstyle argument.)\n')
i_molid = int(argv[i + 1]) - 1
del(argv[i:i + 2])
elif (argv[i].find('-') == 0) and main:
# elif (__name__ == "__main__"):
raise InputError('Error(' + g_program_name + '):\n'
'Unrecogized command line argument \"' + argv[i] + '\"\n')
else:
i += 1
if main:
# Instantiate the lexer we will be using.
# (The lexer's __init__() function requires an openned file.
# Assuming __name__ == "__main__", then the name of that file should
# be the last remaining (unprocessed) argument in the argument list.
# Otherwise, then name of that file will be determined later by the
# python script which imports this module, so we let them handle it.)
if len(argv) == 1:
raise InputError('Error: This program requires at least one argument\n'
' the name of a file containing ttree template commands\n')
elif len(argv) == 2:
try:
# Parse text from the file named argv[1]
settings.lex.infile = argv[1]
settings.lex.instream = open(argv[1], 'r')
except IOError:
sys.stderr.write('Error: unable to open file\n'
' \"' + argv[1] + '\"\n'
' for reading.\n')
sys.exit(1)
del(argv[1:2])
else:
# if there are more than 2 remaining arguments,
problem_args = ['\"' + arg + '\"' for arg in argv[1:]]
raise InputError('Syntax Error(' + g_program_name + '):\n\n'
' Problem with argument list.\n'
' The remaining arguments are:\n\n'
' ' + (' '.join(problem_args)) + '\n\n'
' (The actual problem may be earlier in the argument list.\n'
' If these arguments are source files, then keep in mind\n'
' that this program can not parse multiple source files.)\n'
' Check the syntax of the entire argument list.\n')
if len(settings.ii_coords) == 0 and show_warnings:
sys.stderr.write('########################################################\n'
'## WARNING: atom_style unspecified ##\n'
'## --> \"' + data_atoms + '\" column data has an unknown format ##\n'
'## Assuming atom_style = \"full\" ##\n'
# '########################################################\n'
# '## To specify the \"'+data_atoms+'\" column format you can: ##\n'
# '## 1) Use the -atomstyle \"STYLE\" argument ##\n'
# '## where \"STYLE\" is a string indicating a LAMMPS ##\n'
# '## atom_style, including hybrid styles.(Standard ##\n'
# '## atom styles defined in 2011 are supported.) ##\n'
# '## 2) Use the -atomstyle \"COL_LIST\" argument ##\n'
# '## where \"COL_LIST" is a quoted list of strings ##\n'
# '## indicating the name of each column. ##\n'
# '## Names \"x\",\"y\",\"z\" are interpreted as ##\n'
# '## atomic coordinates. \"mux\",\"muy\",\"muz\" ##\n'
# '## are interpreted as direction vectors. ##\n'
# '## 3) Use the -icoord \"cx cy cz...\" argument ##\n'
# '## where \"cx cy cz\" is a list of integers ##\n'
# '## indicating the column numbers for the x,y,z ##\n'
# '## coordinates of each atom. ##\n'
# '## 4) Use the -ivect \"cmux cmuy cmuz...\" argument ##\n'
# '## where \"cmux cmuy cmuz...\" is a list of ##\n'
# '## integers indicating the column numbers for ##\n'
# '## the vector that determines the direction of a ##\n'
# '## dipole or ellipsoid (ie. a rotateable vector).##\n'
# '## (More than one triplet can be specified. The ##\n'
# '## number of entries must be divisible by 3.) ##\n'
'########################################################\n')
# The default atom_style is "full"
settings.column_names = AtomStyle2ColNames('full')
settings.ii_coords = ColNames2Coords(settings.column_names)
settings.ii_vects = ColNames2Vects(settings.column_names)
settings.i_atomid, settings.i_atomtype, settings.i_molid = ColNames2AidAtypeMolid(
settings.column_names)
return
def TransformAtomText(text, matrix, settings):
""" Apply transformations to the coordinates and other vector degrees
of freedom stored in the \"Data Atoms\" section of a LAMMPS data file.
This is the \"text\" argument.
The \"matrix\" stores the aggregate sum of combined transformations
to be applied.
"""
#sys.stderr.write('matrix_stack.M = \n'+ MatToStr(matrix) + '\n')
lines = text.split('\n')
for i in range(0, len(lines)):
line_orig = lines[i]
ic = line_orig.find('#')
if ic != -1:
line = line_orig[:ic]
comment = ' ' + line_orig[ic:].rstrip('\n')
else:
line = line_orig.rstrip('\n')
comment = ''
columns = line.split()
if len(columns) > 0:
if len(columns) == len(settings.column_names) + 3:
raise InputError('Error: lttree.py does not yet support integer unit-cell counters \n'
' within the \"' + data_atoms + '\" section of a LAMMPS data file.\n'
' Instead please add the appropriate offsets (these offsets\n'
' should be multiples of the cell size) to the atom coordinates\n'
' in the data file, and eliminate the extra columns. Then try again.\n'
' (If you get this message often, email me and I\'ll fix this limitation.)')
if len(columns) < len(settings.column_names):
raise InputError('Error: The number of columns in your data file does not\n'
' match the LAMMPS atom_style you selected.\n'
' Use the -atomstyle <style> command line argument.\n')
x0 = [0.0, 0.0, 0.0]
x = [0.0, 0.0, 0.0]
# Atomic coordinates transform using "affine" transformations
# (translations plus rotations [or other linear transformations])
for cxcycz in settings.ii_coords:
for d in range(0, 3):
x0[d] = float(columns[cxcycz[d]])
AffineTransform(x, matrix, x0) # x = matrix * x0 + b
for d in range(0, 3): # ("b" is part of "matrix")
columns[cxcycz[d]] = str(x[d])
# Dipole moments and other direction-vectors
# are not effected by translational movement
for cxcycz in settings.ii_vects:
for d in range(0, 3):
x0[d] = float(columns[cxcycz[d]])
LinTransform(x, matrix, x0) # x = matrix * x0
for d in range(0, 3):
columns[cxcycz[d]] = str(x[d])
lines[i] = ' '.join(columns) + comment
return '\n'.join(lines)
def TransformEllipsoidText(text, matrix, settings):
""" Apply the transformation matrix to the quaternions represented
by the last four numbers on each line.
The \"matrix\" stores the aggregate sum of combined transformations
to be applied and the rotational part of this matrix
must be converted to a quaternion.
"""
#sys.stderr.write('matrix_stack.M = \n'+ MatToStr(matrix) + '\n')
lines = text.split('\n')
for i in range(0, len(lines)):
line_orig = lines[i]
ic = line_orig.find('#')
if ic != -1:
line = line_orig[:ic]
comment = ' ' + line_orig[ic:].rstrip('\n')
else:
line = line_orig.rstrip('\n')
comment = ''
columns = line.split()
if len(columns) != 0:
if len(columns) != 8:
raise InputError('Error (lttree.py): Expected 7 numbers'
+ ' instead of '
+ str(len(columns))
+ '\nline:\n'
+ line
+ ' in each line of the ellipsoids\" section.\n"')
q_orig = [float(columns[-4]),
float(columns[-3]),
float(columns[-2]),
float(columns[-1])]
qRot = [0.0, 0.0, 0.0, 0.0]
Matrix2Quaternion(matrix, qRot)
q_new = [0.0, 0.0, 0.0, 0.0]
MultQuat(q_new, qRot, q_orig)
columns[-4] = str(q_new[0])
columns[-3] = str(q_new[1])
columns[-2] = str(q_new[2])
columns[-1] = str(q_new[3])
lines[i] = ' '.join(columns) + comment
return '\n'.join(lines)
def CalcCM(text_Atoms,
text_Masses=None,
settings=None):
types2masses = None
# Loop through the "Masses" section: what is the mass of each atom type?
if text_Masses != None:
types2masses = {}
lines = text_Masses.split('\n')
for i in range(0, len(lines)):
line = lines[i]
columns = line.split()
if len(columns) == 2:
atomtype = columns[0]
m = float(columns[1])
types2masses[atomtype] = m
lines = text_Atoms.split('\n')
# Pass 1 through the "Data Atoms" section: Determine each atom's mass
if text_Masses != None:
assert(settings != None)
for i in range(0, len(lines)):
line = lines[i]
columns = line.split()
atomid = columns[settings.i_atomid]
atomtype = columns[settings.i_atomtype]
if atomtype not in types2masses[atomtype]:
raise InputError('Error(lttree): You have neglected to define the mass of atom type: \"' + atomtype + '\"\n'
'Did you specify the mass of every atom type using write(\"Masses\"){}?')
atomid2mass[atomid] = atomtype2mass[atomtype]
# Pass 2 through the "Data Atoms" section: Find the center of mass.
for i in range(0, len(lines)):
line = lines[i]
columns = line.split()
if len(columns) > 0:
if len(columns) == len(settings.column_names) + 3:
raise InputError('Error: lttree.py does not yet support integer unit-cell counters (ix, iy, iz)\n'
' within the \"' + data_atoms + '\" section of a LAMMPS data file.\n'
' Instead please add the appropriate offsets (these offsets\n'
' should be multiples of the cell size) to the atom coordinates\n'
' in the data file, and eliminate the extra columns. Then try again.\n'
' (If you get this message often, email me and I\'ll fix this limitation.)')
if len(columns) != len(settings.column_names):
raise InputError('Error: The number of columns in your data file does not\n'
' match the LAMMPS atom_style you selected.\n'
' Use the -atomstyle <style> command line argument.\n')
x = [0.0, 0.0, 0.0]
if atomids2masses != None:
m = atomids2masses[atomid]
else:
m = 1.0
tot_m += m
for cxcycz in settings.ii_coords:
for d in range(0, 3):
x[d] = float(columns[cxcycz[d]])
tot_x[d] += x[d]
# Note: dipole moments and other direction vectors don't effect
# the center of mass. So I commented out the loop below.
# for cxcycz in settings.ii_vects:
# for d in range(0,3):
# v[d] = float(columns[cxcycz[d]])
lines[i] = ' '.join(columns)
xcm = [0.0, 0.0, 0.0]
for d in range(0, 3):
xcm[d] = tot_x[d] / tot_m
return xcm
def _ExecCommands(command_list,
index,
global_files_content,
settings,
matrix_stack,
current_scope_id=None,
substitute_vars=True):
"""
_ExecCommands():
The argument "commands" is a nested list of lists of
"Command" data structures (defined in ttree.py).
Carry out the write() and write_once() commands (which
write out the contents of the templates contain inside them).
Instead of writing the files, save their contents in a string.
The argument "global_files_content" should be of type defaultdict(list)
It is an associative array whose key is a string (a filename)
and whose value is a lists of strings (of rendered templates).
"""
files_content = defaultdict(list)
postprocessing_commands = []
while index < len(command_list):
command = command_list[index]
index += 1
# For debugging only
if ((not isinstance(command, StackableCommand)) and
(not isinstance(command, ScopeCommand)) and
(not isinstance(command, WriteFileCommand))):
sys.stderr.write(str(command) + '\n')
if isinstance(command, PopCommand):
assert(current_scope_id != None)
if command.context_node == None:
command.context_node = current_scope_id
if isinstance(command, PopRightCommand):
matrix_stack.PopRight(which_stack=command.context_node)
elif isinstance(command, PopLeftCommand):
matrix_stack.PopLeft(which_stack=command.context_node)
else:
assert(False)
elif isinstance(command, PushCommand):
assert(current_scope_id != None)
if command.context_node == None:
command.context_node = current_scope_id
# Some commands are post-processing commands, and must be
# carried out AFTER all the text has been rendered. For example
# the "movecm(0,0,0)" waits until all of the coordinates have
# been rendered, calculates the center-of-mass, and then applies
# a translation moving the center of mass to the origin (0,0,0).
# We need to figure out which of these commands need to be
# postponed, and which commands can be carried out now.
# ("now"=pushing transformation matrices onto the matrix stack).
# UNFORTUNATELY POSTPONING SOME COMMANDS MAKES THE CODE UGLY
transform_list = command.contents.split('.')
transform_blocks = []
i_post_process = -1
# Example: Suppose:
#command.contents = '.rot(30,0,0,1).movecm(0,0,0).rot(45,1,0,0).scalecm(2.0).move(-2,1,0)'
# then
#transform_list = ['rot(30,0,0,1)', 'movecm(0,0,0)', 'rot(45,1,0,0)', 'scalecm(2.0)', 'move(-2,1,0)']
# Note: the first command 'rot(30,0,0,1)' is carried out now.
# The remaining commands are carried out during post-processing,
# (when processing the "ScopeEnd" command.
#
# We break up the commands into "blocks" separated by center-
# of-mass transformations ('movecm', 'rotcm', or 'scalecm')
#
# transform_blocks = ['.rot(30,0,0,1)',
# '.movecm(0,0,0).rot(45,1,0,0)',
# '.scalecm(2.0).move(-2,1,0)']
i = 0
while i < len(transform_list):
transform_block = ''
while i < len(transform_list):
transform = transform_list[i]
i += 1
if transform != '':
transform_block += '.' + transform
transform = transform.split('(')[0]
if ((transform == 'movecm') or
(transform == 'rotcm') or
(transform == 'scalecm')):
break
transform_blocks.append(transform_block)
if len(postprocessing_commands) == 0:
# The first block (before movecm, rotcm, or scalecm)
# can be executed now by modifying the matrix stack.
if isinstance(command, PushRightCommand):
matrix_stack.PushCommandsRight(transform_blocks[0].strip('.'),
command.srcloc,
which_stack=command.context_node)
elif isinstance(command, PushLeftCommand):
matrix_stack.PushCommandsLeft(transform_blocks[0].strip('.'),
command.srcloc,
which_stack=command.context_node)
# Everything else must be saved for later.
postprocessing_blocks = transform_blocks[1:]
else:
# If we already encountered a "movecm" "rotcm" or "scalecm"
# then all of the command blocks must be handled during
# postprocessing.
postprocessing_blocks = transform_blocks
for transform_block in postprocessing_blocks:
assert(isinstance(block, basestring))
if isinstance(command, PushRightCommand):
postprocessing_commands.append(PushRightCommand(transform_block,
command.srcloc,
command.context_node))
elif isinstance(command, PushLeftCommand):
postprocessing_commands.append(PushLeftCommand(transform_block,
command.srcloc,
command.context_node))
elif isinstance(command, WriteFileCommand):
# --- Throw away lines containin references to deleted variables:---
# First: To edit the content of a template,
# you need to make a deep local copy of it
tmpl_list = []
for entry in command.tmpl_list:
if isinstance(entry, TextBlock):
tmpl_list.append(TextBlock(entry.text,
entry.srcloc)) # , entry.srcloc_end))
else:
tmpl_list.append(entry)
# Now throw away lines with deleted variables
DeleteLinesWithBadVars(tmpl_list)
# --- Now render the text ---
text = Render(tmpl_list,
substitute_vars)
# ---- Coordinates of the atoms, must be rotated
# and translated after rendering.
# In addition, other vectors (dipoles, ellipsoid orientations)
# must be processed.
# This requires us to re-parse the contents of this text
# (after it has been rendered), and apply these transformations
# before passing them on to the caller.
if command.filename == data_atoms:
text = TransformAtomText(text, matrix_stack.M, settings)
if command.filename == data_ellipsoids:
text = TransformEllipsoidText(text, matrix_stack.M, settings)
files_content[command.filename].append(text)
elif isinstance(command, ScopeBegin):
if isinstance(command.node, InstanceObj):
if ((command.node.children != None) and
(len(command.node.children) > 0)):
matrix_stack.PushStack(command.node)
# "command_list" is a long list of commands.
# ScopeBegin and ScopeEnd are (usually) used to demarcate/enclose
# the commands which are issued for a single class or
# class instance. _ExecCommands() carries out the commands for
# a single class/instance. If we reach a ScopeBegin(),
# then recursively process the commands belonging to the child.
index = _ExecCommands(command_list,
index,
files_content,
settings,
matrix_stack,
command.node,
substitute_vars)
elif isinstance(command, ScopeEnd):
if data_atoms in files_content:
for ppcommand in postprocessing_commands:
if data_masses in files_content:
xcm = CalcCM(files_content[data_atoms],
files_content[data_masses],
settings)
else:
xcm = CalcCM(files_content[data_atoms])
if isinstance(ppcommand, PushRightCommand):
matrix_stack.PushCommandsRight(ppcommand.contents,
ppcommand.srcloc,
xcm,
which_stack=command.context_node)
elif isinstance(ppcommand, PushLeftCommand):
matrix_stack.PushCommandsLeft(ppcommand.contents,
ppcommand.srcloc,
xcm,
which_stack=command.context_node)
files_content[data_atoms] = \
TransformAtomText(files_content[data_atoms],
matrix_stack.M, settings)
files_content[data_ellipsoids] = \
TransformEllipsoidText(files_content[data_ellipsoids],
matrix_stack.M, settings)
for ppcommand in postprocessing_commands:
matrix_stack.Pop(which_stack=command.context_node)
#(same as PopRight())
if isinstance(command.node, InstanceObj):
if ((command.node.children != None) and
(len(command.node.children) > 0)):
matrix_stack.PopStack()
# "ScopeEnd" means we're done with this class/instance.
break
else:
assert(False)
# no other command types allowed at this point
# After processing the commands in this list,
# merge the templates with the callers template list
for filename, tmpl_list in files_content.items():
global_files_content[filename] += \
files_content[filename]
return index
def ExecCommands(commands,
files_content,
settings,
substitute_vars=True):
matrix_stack = MultiAffineStack()
index = _ExecCommands(commands,
0,
files_content,
settings,
matrix_stack,
None,
substitute_vars)
assert(index == len(commands))
def WriteFiles(files_content, suffix='', write_to_stdout=True):
for filename, str_list in files_content.items():
if filename != None:
out_file = None
if filename == '':
if write_to_stdout:
out_file = sys.stdout
else:
out_file = open(filename + suffix, 'a')
if out_file != None:
out_file.write(''.join(str_list))
if filename != '':
out_file.close()
return
def main():
"""
This is is a "main module" wrapper for invoking lttree.py
as a stand alone program. This program:
1)reads a ttree file,
2)constructs a tree of class definitions (g_objectdefs)
3)constructs a tree of instantiated class objects (g_objects),
4)automatically assigns values to the variables,
5)and carries out the "write" commands to write the templates a file(s).
"""
####### Main Code Below: #######
sys.stderr.write(g_program_name + ' v' +
g_version_str + ' ' + g_date_str + ' ')
sys.stderr.write('\n(python version ' + str(sys.version) + ')\n')
if sys.version < '2.6':
raise InputError(
'Error: Alas, you must upgrade to a newer version of python.')
try:
#settings = BasicUISettings()
#BasicUIParseArgs(sys.argv, settings)
settings = LttreeSettings()
LttreeParseArgs([arg for arg in sys.argv], #(deep copy of sys.argv)
settings, main=True, show_warnings=True)
# Data structures to store the class definitionss and instances
g_objectdefs = StaticObj('', None) # The root of the static tree
# has name '' (equivalent to '/')
g_objects = InstanceObj('', None) # The root of the instance tree
# has name '' (equivalent to '/')
# A list of commands to carry out
g_static_commands = []
g_instance_commands = []
BasicUI(settings,
g_objectdefs,
g_objects,
g_static_commands,
g_instance_commands)
# Interpret the the commands. (These are typically write() or
# write_once() commands, rendering templates into text.
# This step also handles coordinate transformations and delete commands.
# Coordinate transformations can be applied to the rendered text
# as a post-processing step.
sys.stderr.write(' done\nbuilding templates...')
files_content = defaultdict(list)
ExecCommands(g_static_commands,
files_content,
settings,
False)
ExecCommands(g_instance_commands,
files_content,
settings,
False)
# Finally: write the rendered text to actual files.
# Erase the files that will be written to:
sys.stderr.write(' done\nwriting templates...')
EraseTemplateFiles(g_static_commands)
EraseTemplateFiles(g_instance_commands)
# Write the files as templates
# (with the original variable names present)
WriteFiles(files_content, suffix=".template", write_to_stdout=False)
# Write the files with the variables substituted by values
sys.stderr.write(' done\nbuilding and rendering templates...')
files_content = defaultdict(list)
ExecCommands(g_static_commands, files_content, settings, True)
ExecCommands(g_instance_commands, files_content, settings, True)
sys.stderr.write(' done\nwriting rendered templates...\n')
WriteFiles(files_content)
sys.stderr.write(' done\n')
# Now write the variable bindings/assignments table.
sys.stderr.write('writing \"ttree_assignments.txt\" file...')
# <-- erase previous version.
open('ttree_assignments.txt', 'w').close()
WriteVarBindingsFile(g_objectdefs)
WriteVarBindingsFile(g_objects)
sys.stderr.write(' done\n')
except (ValueError, InputError) as err:
sys.stderr.write('\n\n' + str(err) + '\n')
sys.exit(-1)
return
if __name__ == '__main__':
main()
|
quang-ha/lammps
|
tools/moltemplate/moltemplate/lttree.py
|
Python
|
gpl-2.0
| 39,725
|
[
"LAMMPS"
] |
c122886a1454027a4d598337134a31f24e04536e6bf9a65a5b3d6e35c7fb8405
|
#
# Test executable #1 to exercise the beam space charge calculations.
# Here, we calculate the free-space wakefield of a relativistic beam.
#
# Copyright (c) 2013 UCLA and RadiaBeam Technologies. All rights reserved
# SciPy imports
import numpy as np
import matplotlib.pyplot as plt
# RadiaBeam imports
import radtrack.spacecharge.RbGaussBunchSpaceCharge as spacecharge
# Specify the beam parameters
# rmsPerp = 25.e-06 # [m], transverse beam size (rms)
# tauFWHM = 8.996e-14 # 1 ps FWHM
rmsPerp = 10.e-06
tauFWHM = 7.197e-14
rmsLong = 2.9979e8 * tauFWHM / 2.355 # [m], assumes Gaussian
xInit = 0. # [m], initial position (beam center)
charge = -3.0e-9 # [C], total charge
myKE = 23.e9 # [eV], average kinetic energy
# create an instance of the space charge class
mySC = spacecharge.RbGaussBunchSpaceCharge(charge,myKE,xInit,rmsLong,rmsPerp)
# Specify the desired grid size
numX = 40
numY = 160
# load up the x,y locations of the mesh
# xyMaxH = 4.*rmsLong
xMax = 1.e-04
xMin = -xMax
# xyMaxV = 20.*rmsPerp
yMax = 0.6e-03
yMin = -yMax
xArr = np.zeros(numX)
for iLoop in range(numX):
xArr[iLoop] = xMin + iLoop * (xMax-xMin) / (numX-1)
yArr = np.zeros(numY)
for jLoop in range(numY):
yArr[jLoop] = yMin + jLoop * (yMax-yMin) / (numY-1)
xGrid = np.zeros((numX, numY))
yGrid = np.zeros((numX, numY))
for iLoop in range(numX):
for jLoop in range(numY):
xGrid[iLoop,jLoop] = xMin + iLoop * (xMax-xMin) / (numX-1)
yGrid[iLoop,jLoop] = yMin + jLoop * (yMax-yMin) / (numY-1)
# Create field profile
Efield = np.zeros((numX, numY))
for iLoop in range(numX):
for jLoop in range(numY):
Efield[iLoop, jLoop] = mySC.calcEz3D(xGrid[iLoop,jLoop],yGrid[iLoop,jLoop],0.,0.)
ncLevels = 20
# plot contours of 3D transverse E-field
plt.figure(1)
cs1 = plt.contourf(xGrid, yGrid, Efield, ncLevels)
plt.colorbar(cs1)
plt.axis([xMin, xMax, yMin, yMax])
plt.xlabel('x [m]')
plt.ylabel('y [m]')
plt.title('Ez [V/m] for 3D Gaussian e- beam')
# Create field profile
for iLoop in range(numX):
for jLoop in range(numY):
Efield[iLoop, jLoop] = mySC.calcEx3D(xGrid[iLoop,jLoop],yGrid[iLoop,jLoop],0.,0.)
# plot contours of 2D transverse E-field
plt.figure(2)
cs2 = plt.contourf(xGrid, yGrid, Efield, ncLevels)
plt.colorbar(cs2)
plt.axis([xMin, xMax, yMin, yMax])
plt.xlabel('x [m]')
plt.ylabel('y [m]')
plt.title('Ex [V/m] for 3D Gaussian e- beam')
plt.show()
|
radiasoft/radtrack
|
experimental/selfFields/testWake01.py
|
Python
|
apache-2.0
| 2,518
|
[
"Gaussian"
] |
9955cca455a072a0fcdbe446d059ee078f567a640c40e8ca0fbacc947ae3048e
|
import numpy as np
import os
try:
import matplotlib
if not os.environ.get('DISPLAY'):
# Use non-interactive Agg backend
matplotlib.use('Agg')
from matplotlib import pyplot as plt
except ImportError:
import platform
if platform.python_implementation() == 'PyPy':
# PyPy doesn't have a version of matplotlib. Make fake classes and
# a Line2D function and that raise if used. This allows us to use
# other 'dark' code that happens to import dark.mutations but not
# use the functions that rely on matplotlib.
class plt(object):
def __getattr__(self, _):
raise NotImplementedError(
'matplotlib is not supported under pypy')
else:
raise
from dark.entrez import getSequence
class Feature(object):
"""
An offset-adjusted feature, with start and stop attributes and methods to
return a textual description and a legend label.
@param feature: A BioPython feature.
@param subfeature: A C{bool} to indicate if a feature is actually a
subfeature.
"""
def __init__(self, feature, subfeature=False):
self.feature = feature
self.color = None # Should be set with setColor
self.subfeature = subfeature
self.start = int(feature.location.start)
self.end = int(feature.location.end)
def setColor(self, color):
"""
An explicit method to set a feature's (plotting) color.
@param color: A C{str} color.
"""
self.color = color
def legendLabel(self):
"""
Provide a textual description of the feature and its qualifiers to be
used as a label in a plot legend.
@return: A C{str} description of the feature.
"""
excludedQualifiers = set((
'codon_start', 'db_xref', 'protein_id', 'region_name',
'ribosomal_slippage', 'rpt_type', 'translation', 'transl_except',
'transl_table')
)
maxValueLength = 30
result = []
if self.feature.qualifiers:
for qualifier in sorted(self.feature.qualifiers):
if qualifier not in excludedQualifiers:
value = ', '.join(self.feature.qualifiers[qualifier])
if qualifier == 'site_type' and value == 'other':
continue
if len(value) > maxValueLength:
value = value[:maxValueLength - 3] + '...'
result.append('%s: %s' % (qualifier, value))
return '%d-%d %s%s.%s' % (
int(self.feature.location.start),
int(self.feature.location.end),
self.feature.type,
' (subfeature)' if self.subfeature else '',
' ' + ', '.join(result) if result else '')
class FeatureList(list):
"""
Provide access to a list of L{Feature} objects.
@param title: A C{str} sequence title from a BLAST hit. Of the form
'gi|63148399|gb|DQ011818.1| Description...'.
@param database: The S{str} name of the Entrez database to search.
@param wantedTypes: A C{tuple} of feature types that are of interest.
Feature whose types are not in this list will be ignored.
@param sequenceFetcher: A function that takes a sequence title and a
database name and returns a C{Bio.SeqIO} instance. If C{None}, use
L{dark.entrez.getSequence}.
"""
def __init__(self, title, database, wantedTypes, sequenceFetcher=None):
list.__init__(self)
self.offline = False
sequenceFetcher = sequenceFetcher or getSequence
try:
record = sequenceFetcher(title, db=database)
except ValueError:
# Ignore. See https://github.com/acorg/dark-matter/issues/124
return
if record is None:
self.offline = True
else:
wantedTypes = set(wantedTypes)
for feature in record.features:
if feature.type in wantedTypes:
self.append(Feature(feature))
# Assign colors to features.
colormap = plt.cm.coolwarm
colors = [colormap(i) for i in np.linspace(0.0, 0.99, len(self))]
for feature, color in zip(self, colors):
feature.setColor(color)
class _FeatureAdder(object):
"""
Look up features for a title, and provide a method to add them to a figure
as well as returning them.
"""
TITLE_FONTSIZE = 16
FONTSIZE = 20
MAX_FEATURES_TO_DISPLAY = 50
DATABASE = None # Set in subclasses.
WANTED_TYPES = None # Set in subclasses.
def __init__(self):
self.tooManyFeaturesToPlot = False
def add(self, fig, title, minX, maxX, offsetAdjuster=None,
sequenceFetcher=None):
"""
Find the features for a sequence title. If there aren't too many, add
the features to C{fig}. Return information about the features, as
described below.
@param fig: A matplotlib figure.
@param title: A C{str} sequence title from a BLAST hit. Of the form
'gi|63148399|gb|DQ011818.1| Description...'.
@param minX: The smallest x coordinate.
@param maxX: The largest x coordinate.
@param offsetAdjuster: a function for adjusting feature X axis offsets
for plotting.
@param sequenceFetcher: A function that takes a sequence title and a
database name and returns a C{Bio.SeqIO} instance. If C{None}, use
L{dark.entrez.getSequence}.
@return: If we seem to be offline, return C{None}. Otherwise, return
a L{FeatureList} instance.
"""
offsetAdjuster = offsetAdjuster or (lambda x: x)
fig.set_title('Target sequence features', fontsize=self.TITLE_FONTSIZE)
fig.set_yticks([])
features = FeatureList(title, self.DATABASE, self.WANTED_TYPES,
sequenceFetcher=sequenceFetcher)
if features.offline:
fig.text(minX + (maxX - minX) / 3.0, 0,
'You (or Genbank) appear to be offline.',
fontsize=self.FONTSIZE)
fig.axis([minX, maxX, -1, 1])
return None
# If no interesting features were found, display a message saying
# so in the figure. Otherwise, if we don't have too many features
# to plot, add the feature info to the figure.
nFeatures = len(features)
if nFeatures == 0:
# fig.text(minX + (maxX - minX) / 3.0, 0, 'No features found',
# fontsize=self.FONTSIZE)
fig.text(0.5, 0.5, 'No features found',
horizontalalignment='center', verticalalignment='center',
transform=fig.transAxes, fontsize=self.FONTSIZE)
fig.axis([minX, maxX, -1, 1])
elif nFeatures <= self.MAX_FEATURES_TO_DISPLAY:
# Call the method in our subclass to do the figure display.
self._displayFeatures(fig, features, minX, maxX, offsetAdjuster)
else:
self.tooManyFeaturesToPlot = True
# fig.text(minX + (maxX - minX) / 3.0, 0,
# 'Too many features to plot.', fontsize=self.FONTSIZE)
fig.text(0.5, 0.5, 'Too many features to plot',
horizontalalignment='center', verticalalignment='center',
fontsize=self.FONTSIZE, transform=fig.transAxes)
fig.axis([minX, maxX, -1, 1])
return features
def _displayFeatures(self, fig, features, minX, maxX, offsetAdjuster):
"""
Add the given C{features} to the figure in C{fig}.
@param fig: A matplotlib figure.
@param features: A C{FeatureList} instance.
@param minX: The smallest x coordinate.
@param maxX: The largest x coordinate.
@param offsetAdjuster: a function for adjusting feature X axis offsets
for plotting.
"""
raise NotImplementedError('_displayFeatures must be implemented in '
'a subclass.')
class ProteinFeatureAdder(_FeatureAdder):
"""
Subclass L{_FeatureAdder} with a method to add protein features to a
figure.
"""
DATABASE = 'protein'
WANTED_TYPES = ('CDS', 'mat_peptide', 'rRNA', 'Site', 'Region')
def _displayFeatures(self, fig, features, minX, maxX, offsetAdjuster):
"""
Add the given C{features} to the figure in C{fig}.
@param fig: A matplotlib figure.
@param features: A C{FeatureList} instance.
@param minX: The smallest x coordinate.
@param maxX: The largest x coordinate.
@param offsetAdjuster: a function for adjusting feature X axis offsets
for plotting.
"""
labels = []
for index, feature in enumerate(features):
fig.plot([offsetAdjuster(feature.start),
offsetAdjuster(feature.end)],
[index * -0.2, index * -0.2], color=feature.color,
linewidth=2)
labels.append(feature.legendLabel())
# Note that minX and maxX do not need to be adjusted by the offset
# adjuster. They are the already-adjusted min/max values as
# computed in computePlotInfo in blast.py
fig.axis([minX, maxX, (len(features) + 1) * -0.2, 0.2])
if labels:
# Put a legend above the figure.
box = fig.get_position()
fig.set_position([box.x0, box.y0,
box.width, box.height * 0.2])
fig.legend(labels, loc='lower center', bbox_to_anchor=(0.5, 1.4),
fancybox=True, shadow=True, ncol=2)
class NucleotideFeatureAdder(_FeatureAdder):
"""
Subclass L{_FeatureAdder} with a method to add nucleotide features to a
figure.
"""
DATABASE = 'nucleotide'
WANTED_TYPES = ('CDS', 'LTR', 'mat_peptide', 'misc_feature',
'misc_structure', 'repeat_region', 'rRNA')
def _displayFeatures(self, fig, features, minX, maxX, offsetAdjuster):
"""
Add the given C{features} to the figure in C{fig}.
@param fig: A matplotlib figure.
@param features: A C{FeatureList} instance.
@param minX: The smallest x coordinate.
@param maxX: The largest x coordinate.
@param offsetAdjuster: a function for adjusting feature X axis offsets
for plotting.
"""
frame = None
labels = []
for feature in features:
start = offsetAdjuster(feature.start)
end = offsetAdjuster(feature.end)
if feature.subfeature:
subfeatureFrame = start % 3
if subfeatureFrame == frame:
# Move overlapping subfeatures down a little to make them
# visible.
y = subfeatureFrame - 0.2
else:
y = subfeatureFrame
else:
frame = start % 3
# If we have a polyprotein, shift it up slightly so we can see
# its components below it.
product = feature.feature.qualifiers.get('product', [''])[0]
if product.lower().find('polyprotein') > -1:
y = frame + 0.2
else:
y = frame
fig.plot([start, end], [y, y], color=feature.color, linewidth=2)
labels.append(feature.legendLabel())
# Note that minX and maxX do not need to be adjusted by the offset
# adjuster. They are the already-adjusted min/max values as
# computed in computePlotInfo in blast.py
fig.axis([minX, maxX, -0.5, 2.5])
fig.set_yticks(np.arange(3))
fig.set_ylabel('Frame')
if labels:
# Put a legend above the figure.
box = fig.get_position()
fig.set_position([box.x0, box.y0,
box.width, box.height * 0.3])
fig.legend(labels, loc='lower center', bbox_to_anchor=(0.5, 2.5),
fancybox=True, shadow=True, ncol=2)
|
terrycojones/dark-matter
|
dark/features.py
|
Python
|
mit
| 12,233
|
[
"BLAST",
"Biopython"
] |
672ab430470e3d4c29148e716fde4db5a605a8ffae48cc63b59ad0c8814b48ae
|
import tensorflow as tf
import numpy as np
import pickle
# HiddenLayer_dict[layer] = Number of hidden neurons in this layer, start from 1 !!!
def Hidden_layer_shape(l,n):
hidden_layer_shape = {}
# Modify hidden layer number "l" and neuron number "n" in each layer.
for layer in range(1,l+1):
hidden_layer_shape[layer]=n
print("Hidden layer number:", len(hidden_layer_shape.keys()))
return hidden_layer_shape
# HiddenLayer_dict[layer] = Number of hidden neurons in this layer, start from 1 !!!
def neural_network(Input_feature_shape, n_nodes_input, HiddenLayer_dict, n_classes):
# Define input feature number and number of neurons in hidden layers.
Input_and_hidden_layer = HiddenLayer_dict.copy()
Input_and_hidden_layer[0] = n_nodes_input
# Define shapes of hidden layers.
Hidden_Layer_structure = {}
for layer in HiddenLayer_dict.keys(): # Iteratively assign weight and bias to hidden layers.
Hidden_Layer_structure[layer] = {'weights': tf.Variable(tf.random_normal([Input_and_hidden_layer[(layer-1)] , HiddenLayer_dict[layer]])),
'biases': tf.Variable(tf.random_normal([HiddenLayer_dict[layer]]))}
# Neuron number of the last hidden layer as the input number of output layer.
output_layer = {'weights': tf.Variable(tf.random_normal([HiddenLayer_dict[max(HiddenLayer_dict.keys())], n_classes])),
'biases': tf.Variable(tf.random_normal([n_classes]))}
# Generate output from neuron network.
l_previous = Input_feature_shape
for layer in HiddenLayer_dict.keys():
l_current = tf.add(tf.matmul(l_previous, Hidden_Layer_structure[layer]['weights']), Hidden_Layer_structure[layer]['biases'])
l_current = tf.nn.relu(l_current) # Use relu as Activation function.
l_previous = l_current
output = tf.matmul(l_current, output_layer['weights']) + output_layer['biases']
return output
def Training(layer_num, neuron_num, input_feature_shape, input_label_shape, train_x, train_y, test_x, test_y):
n_input = len(train_x[0])
hidden_layer_shape = Hidden_layer_shape(layer_num, neuron_num) # Initialise hidden layer shape
classes = 2
batch_size = 100
prediction = neural_network(input_feature_shape, n_input, hidden_layer_shape, classes)
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(prediction, input_label_shape))
optimizer = tf.train.AdamOptimizer().minimize(cost)
epoch_num = 100
saver = tf.train.Saver()
with tf.Session() as sess:
sess.run(tf.initialize_all_variables())
accuracy_record = 0
invalid_training_number = 0
true_count = 0
for epoch in range(epoch_num):
epoch_loss = 0
i = 0
while i < len(train_x):
start = i
end = i + batch_size
batch_x = np.array(train_x[start:end])
batch_y = np.array(train_y[start:end])
_, c = sess.run([optimizer, cost], feed_dict = {input_feature_shape: batch_x, input_label_shape: batch_y})
epoch_loss += c
i += batch_size
print("Epoch:", epoch+1, "complete out of", epoch_num, "loss:", epoch_loss)
correct = tf.equal(tf.argmax(prediction, 1), tf.argmax(input_label_shape, 1))
accuracy = tf.reduce_mean(tf.cast(correct, "float"))
ACC = accuracy.eval({input_feature_shape: test_x, input_label_shape: test_y})
print("Accuracy:", ACC)
if ACC > accuracy_record: # Save the best variables!
accuracy_record = ACC
invalid_training_number = 0
save_path = saver.save(sess, "FNI_tuning.ckpt")
print("Training time %s has improvement. Model saved in file: %s" % (epoch+1, save_path))
else:
invalid_training_number += 1
if invalid_training_number >= 20: # Epoch Window
break
return prediction
# End training
if __name__ == "__main__":
with open("FacebookFeatures of 5000 node pairs for Training.pickle", 'rb') as pickle_file:
train_x, train_y, train_edge_names, test_x, test_y, test_edge_names = pickle.load(pickle_file)
print("Feature length:", len(train_x[0]), "Feature example:", train_x[0], "Label: ", train_y[0])
#Define Input tensor shape, features as x: height x Width, labels as y: width.
x_shape = tf.placeholder('float',[None, len(train_x[0])]) #features
y_shape = tf.placeholder('float') #label
Training(2, 600, x_shape, y_shape, train_x, train_y, test_x, test_y)
|
PassiveVision/Feature-Net-Learn
|
TrainingFeatureNetInf.py
|
Python
|
gpl-3.0
| 4,180
|
[
"NEURON"
] |
9dbc8226aa34432edd5616137d40b7a82cab7bb605cb446b49a81e60b0a0ad45
|
"""Alignment with SNAP: http://snap.cs.berkeley.edu/
"""
import os
from bcbio import utils
from bcbio.pipeline import config_utils
from bcbio.pipeline import datadict as dd
from bcbio.ngsalign import alignprep, novoalign, postalign
from bcbio.provenance import do
def align(fastq_file, pair_file, index_dir, names, align_dir, data):
"""Perform piped alignment of fastq input files, generating sorted, deduplicated BAM.
Pipes in input, handling paired and split inputs, using interleaving magic
from: https://biowize.wordpress.com/2015/03/26/the-fastest-darn-fastq-decoupling-procedure-i-ever-done-seen/
"""
out_file = os.path.join(align_dir, "{0}-sort.bam".format(dd.get_sample_name(data)))
num_cores = data["config"]["algorithm"].get("num_cores", 1)
resources = config_utils.get_resources("snap", data["config"])
rg_info = novoalign.get_rg_info(names)
if data.get("align_split"):
final_file = out_file
out_file, data = alignprep.setup_combine(final_file, data)
fastq_file, pair_file = alignprep.split_namedpipe_cls(fastq_file, pair_file, data)
fastq_file = fastq_file[2:-1]
if pair_file:
pair_file = pair_file[2:-1]
stream_input = (r"paste <({fastq_file} | paste - - - -) "
r"<({pair_file} | paste - - - -) | tr '\t' '\n'")
else:
stream_input = fastq_file[2:-1]
else:
assert fastq_file.endswith(".gz")
if pair_file:
stream_input = (r"paste <(zcat {fastq_file} | paste - - - -) "
r"<(zcat {pair_file} | paste - - - -) | tr '\t' '\n'")
else:
stream_input = "zcat {fastq_file}"
pair_file = pair_file if pair_file else ""
if not utils.file_exists(out_file) and (final_file is None or not utils.file_exists(final_file)):
with postalign.tobam_cl(data, out_file, pair_file is not None) as (tobam_cl, tx_out_file):
if pair_file:
sub_cmd = "paired"
input_cmd = "-pairedInterleavedFastq -"
else:
sub_cmd = "single"
input_cmd = "-fastq -"
stream_input = stream_input.format(**locals())
cmd = ("{stream_input} | snap-aligner {sub_cmd} {index_dir} {input_cmd} "
"-R '{rg_info}' -t {num_cores} -M -o -sam - | ")
do.run(cmd.format(**locals()) + tobam_cl, "SNAP alignment: %s" % names["sample"])
data["work_bam"] = out_file
return data
# Optional galaxy location file. Falls back on remap_index_fn if not found
galaxy_location_file = "snap_indices.loc"
def remap_index_fn(ref_file):
"""Map sequence references to snap reference directory, using standard layout.
"""
snap_dir = os.path.join(os.path.dirname(ref_file), os.pardir, "snap")
assert os.path.exists(snap_dir) and os.path.isdir(snap_dir), snap_dir
return snap_dir
|
Cyberbio-Lab/bcbio-nextgen
|
bcbio/ngsalign/snap.py
|
Python
|
mit
| 2,929
|
[
"Galaxy"
] |
c513a9166e78b7e0c68bfa32b966a7bd42080e53fbb329249db010091aae6c34
|
#!/usr/bin/env python
# encoding: utf-8
# The MIT License (MIT)
# Copyright (c) 2012-2016 CNRS
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# AUTHORS
# Hervé BREDIN - http://herve.niderb.fr
from __future__ import unicode_literals
import six
import itertools
import numpy as np
import scipy.signal
from pyannote.core import Timeline
from pyannote.core.segment import Segment, SlidingWindow
from ..stats.gaussian import Gaussian
from pyannote.core.util import pairwise
class SlidingWindowsSegmentation(object):
"""
<---d---><-g-><---d--->
[ L ] [ R ]
[ L ] [ R ]
<-s->
Parameters
----------
duration : float, optional
Set left/right window duration. Defaults to 1 second.
step : float, optional
Set step duration. Defaults to 100ms
gap : float, optional
Set gap duration. Defaults to no gap (i.e. 0 second)
min_duration : float, optional
Minimum duration of segments. Defaults to 0 (no minimum).
"""
def __init__(self, duration=1.0, step=0.1, gap=0.0,
threshold=0., min_duration=0., **kwargs):
super(SlidingWindowsSegmentation, self).__init__()
self.duration = duration
self.step = step
self.gap = gap
self.threshold = threshold
self.min_duration = min_duration
for key, value in six.iteritems(kwargs):
setattr(self, key, value)
def diff(self, left, right, feature):
raise NotImplementedError()
def iterdiff(self, feature, focus):
"""(middle, difference) generator
`middle`
`difference`
Parameters
----------
feature : SlidingWindowFeature
Pre-extracted features
"""
sliding_window = SlidingWindow(
duration=self.duration,
step=self.step,
start=focus.start, end=focus.end)
for left in sliding_window:
right = Segment(
start=left.end + self.gap,
end=left.end + self.gap + self.duration
)
middle = .5 * (left.end + right.start)
yield middle, self.diff(left, right, feature)
def apply(self, feature, segmentation=None):
if segmentation is None:
focus = feature.getExtent()
segmentation = Timeline(segments=[focus], uri=None)
result = Timeline()
for focus in segmentation:
x, y = list(zip(*[
(m, d) for m, d in self.iterdiff(feature, focus)
]))
x = np.array(x)
y = np.array(y)
# find local maxima
order = 1
if self.min_duration > 0:
order = int(self.min_duration / self.step)
maxima = scipy.signal.argrelmax(y, order=order)
x = x[maxima]
y = y[maxima]
# only keep high enough local maxima
high_maxima = np.where(y > self.threshold)
# create list of segment boundaries
# do not forget very first and last boundaries
boundaries = itertools.chain(
[focus.start], x[high_maxima], [focus.end]
)
# create list of segments from boundaries
segments = [Segment(*p) for p in pairwise(boundaries)]
result.update(Timeline(segments=segments))
return result
class GaussianDivergenceMixin:
def diff(self, left, right, feature):
"""Compute diagonal gaussian divergence between left and right windows
Parameters
----------
left, right : Segment
Left and right window
feature : Feature
Returns
-------
divergence : float
Gaussian divergence between left and right windows
"""
gl = Gaussian(covariance_type='diag')
Xl = feature.crop(left)
gl.fit(Xl)
gr = Gaussian(covariance_type='diag')
Xr = feature.crop(right)
gr.fit(Xr)
try:
divergence = gl.divergence(gr)
except:
divergence = np.NaN
return divergence
class SegmentationGaussianDivergence(GaussianDivergenceMixin,
SlidingWindowsSegmentation):
pass
|
pyannote/pyannote-algorithms
|
pyannote/algorithms/segmentation/sliding_window.py
|
Python
|
mit
| 5,318
|
[
"Gaussian"
] |
d1d3fb29c39fae5815e9d727a453f0ea751a1fda362ca04cf39f0a6b40b24bf7
|
# -*- coding: utf-8 -*-
# LaTeX math to Unicode symbols translation dictionaries.
# Generated with ``write_tex2unichar.py`` from the data in
# http://milde.users.sourceforge.net/LUCR/Math/
# Includes commands from: wasysym, stmaryrd, mathdots, mathabx, esint, bbold, amsxtra, amsmath, amssymb, standard LaTeX
mathaccent = {
'acute': u'\u0301', # x́ COMBINING ACUTE ACCENT
'bar': u'\u0304', # x̄ COMBINING MACRON
'breve': u'\u0306', # x̆ COMBINING BREVE
'check': u'\u030c', # x̌ COMBINING CARON
'ddddot': u'\u20dc', # x⃜ COMBINING FOUR DOTS ABOVE
'dddot': u'\u20db', # x⃛ COMBINING THREE DOTS ABOVE
'ddot': u'\u0308', # ẍ COMBINING DIAERESIS
'dot': u'\u0307', # ẋ COMBINING DOT ABOVE
'grave': u'\u0300', # x̀ COMBINING GRAVE ACCENT
'hat': u'\u0302', # x̂ COMBINING CIRCUMFLEX ACCENT
'mathring': u'\u030a', # x̊ COMBINING RING ABOVE
'not': u'\u0338', # x̸ COMBINING LONG SOLIDUS OVERLAY
'overleftarrow': u'\u20d6', # x⃖ COMBINING LEFT ARROW ABOVE
'overleftrightarrow': u'\u20e1', # x⃡ COMBINING LEFT RIGHT ARROW ABOVE
'overline': u'\u0305', # x̅ COMBINING OVERLINE
'overrightarrow': u'\u20d7', # x⃗ COMBINING RIGHT ARROW ABOVE
'tilde': u'\u0303', # x̃ COMBINING TILDE
'underbar': u'\u0331', # x̱ COMBINING MACRON BELOW
'underleftarrow': u'\u20ee', # x⃮ COMBINING LEFT ARROW BELOW
'underline': u'\u0332', # x̲ COMBINING LOW LINE
'underrightarrow': u'\u20ef', # x⃯ COMBINING RIGHT ARROW BELOW
'vec': u'\u20d7', # x⃗ COMBINING RIGHT ARROW ABOVE
'widehat': u'\u0302', # x̂ COMBINING CIRCUMFLEX ACCENT
'widetilde': u'\u0303', # x̃ COMBINING TILDE
}
mathalpha = {
'Bbbk': u'\U0001d55c', # 𝕜 MATHEMATICAL DOUBLE-STRUCK SMALL K
'Delta': u'\u0394', # Δ GREEK CAPITAL LETTER DELTA
'Gamma': u'\u0393', # Γ GREEK CAPITAL LETTER GAMMA
'Im': u'\u2111', # ℑ BLACK-LETTER CAPITAL I
'Lambda': u'\u039b', # Λ GREEK CAPITAL LETTER LAMDA
'Omega': u'\u03a9', # Ω GREEK CAPITAL LETTER OMEGA
'Phi': u'\u03a6', # Φ GREEK CAPITAL LETTER PHI
'Pi': u'\u03a0', # Π GREEK CAPITAL LETTER PI
'Psi': u'\u03a8', # Ψ GREEK CAPITAL LETTER PSI
'Re': u'\u211c', # ℜ BLACK-LETTER CAPITAL R
'Sigma': u'\u03a3', # Σ GREEK CAPITAL LETTER SIGMA
'Theta': u'\u0398', # Θ GREEK CAPITAL LETTER THETA
'Upsilon': u'\u03a5', # Υ GREEK CAPITAL LETTER UPSILON
'Xi': u'\u039e', # Ξ GREEK CAPITAL LETTER XI
'aleph': u'\u2135', # ℵ ALEF SYMBOL
'alpha': u'\u03b1', # α GREEK SMALL LETTER ALPHA
'beta': u'\u03b2', # β GREEK SMALL LETTER BETA
'beth': u'\u2136', # ℶ BET SYMBOL
'chi': u'\u03c7', # χ GREEK SMALL LETTER CHI
'daleth': u'\u2138', # ℸ DALET SYMBOL
'delta': u'\u03b4', # δ GREEK SMALL LETTER DELTA
'digamma': u'\u03dc', # Ϝ GREEK LETTER DIGAMMA
'ell': u'\u2113', # ℓ SCRIPT SMALL L
'epsilon': u'\u03f5', # ϵ GREEK LUNATE EPSILON SYMBOL
'eta': u'\u03b7', # η GREEK SMALL LETTER ETA
'eth': u'\xf0', # ð LATIN SMALL LETTER ETH
'gamma': u'\u03b3', # γ GREEK SMALL LETTER GAMMA
'gimel': u'\u2137', # ℷ GIMEL SYMBOL
'hbar': u'\u210f', # ℏ PLANCK CONSTANT OVER TWO PI
'hslash': u'\u210f', # ℏ PLANCK CONSTANT OVER TWO PI
'imath': u'\u0131', # ı LATIN SMALL LETTER DOTLESS I
'iota': u'\u03b9', # ι GREEK SMALL LETTER IOTA
'jmath': u'\u0237', # ȷ LATIN SMALL LETTER DOTLESS J
'kappa': u'\u03ba', # κ GREEK SMALL LETTER KAPPA
'lambda': u'\u03bb', # λ GREEK SMALL LETTER LAMDA
'mu': u'\u03bc', # μ GREEK SMALL LETTER MU
'nu': u'\u03bd', # ν GREEK SMALL LETTER NU
'omega': u'\u03c9', # ω GREEK SMALL LETTER OMEGA
'phi': u'\u03d5', # ϕ GREEK PHI SYMBOL
'pi': u'\u03c0', # π GREEK SMALL LETTER PI
'psi': u'\u03c8', # ψ GREEK SMALL LETTER PSI
'rho': u'\u03c1', # ρ GREEK SMALL LETTER RHO
'sigma': u'\u03c3', # σ GREEK SMALL LETTER SIGMA
'tau': u'\u03c4', # τ GREEK SMALL LETTER TAU
'theta': u'\u03b8', # θ GREEK SMALL LETTER THETA
'upsilon': u'\u03c5', # υ GREEK SMALL LETTER UPSILON
'varDelta': u'\U0001d6e5', # 𝛥 MATHEMATICAL ITALIC CAPITAL DELTA
'varGamma': u'\U0001d6e4', # 𝛤 MATHEMATICAL ITALIC CAPITAL GAMMA
'varLambda': u'\U0001d6ec', # 𝛬 MATHEMATICAL ITALIC CAPITAL LAMDA
'varOmega': u'\U0001d6fa', # 𝛺 MATHEMATICAL ITALIC CAPITAL OMEGA
'varPhi': u'\U0001d6f7', # 𝛷 MATHEMATICAL ITALIC CAPITAL PHI
'varPi': u'\U0001d6f1', # 𝛱 MATHEMATICAL ITALIC CAPITAL PI
'varPsi': u'\U0001d6f9', # 𝛹 MATHEMATICAL ITALIC CAPITAL PSI
'varSigma': u'\U0001d6f4', # 𝛴 MATHEMATICAL ITALIC CAPITAL SIGMA
'varTheta': u'\U0001d6e9', # 𝛩 MATHEMATICAL ITALIC CAPITAL THETA
'varUpsilon': u'\U0001d6f6', # 𝛶 MATHEMATICAL ITALIC CAPITAL UPSILON
'varXi': u'\U0001d6ef', # 𝛯 MATHEMATICAL ITALIC CAPITAL XI
'varepsilon': u'\u03b5', # ε GREEK SMALL LETTER EPSILON
'varkappa': u'\U0001d718', # 𝜘 MATHEMATICAL ITALIC KAPPA SYMBOL
'varphi': u'\u03c6', # φ GREEK SMALL LETTER PHI
'varpi': u'\u03d6', # ϖ GREEK PI SYMBOL
'varrho': u'\u03f1', # ϱ GREEK RHO SYMBOL
'varsigma': u'\u03c2', # ς GREEK SMALL LETTER FINAL SIGMA
'vartheta': u'\u03d1', # ϑ GREEK THETA SYMBOL
'wp': u'\u2118', # ℘ SCRIPT CAPITAL P
'xi': u'\u03be', # ξ GREEK SMALL LETTER XI
'zeta': u'\u03b6', # ζ GREEK SMALL LETTER ZETA
}
mathbin = {
'Cap': u'\u22d2', # ⋒ DOUBLE INTERSECTION
'Circle': u'\u25cb', # ○ WHITE CIRCLE
'Cup': u'\u22d3', # ⋓ DOUBLE UNION
'LHD': u'\u25c0', # ◀ BLACK LEFT-POINTING TRIANGLE
'RHD': u'\u25b6', # ▶ BLACK RIGHT-POINTING TRIANGLE
'amalg': u'\u2a3f', # ⨿ AMALGAMATION OR COPRODUCT
'ast': u'\u2217', # ∗ ASTERISK OPERATOR
'barwedge': u'\u22bc', # ⊼ NAND
'bigtriangledown': u'\u25bd', # ▽ WHITE DOWN-POINTING TRIANGLE
'bigtriangleup': u'\u25b3', # △ WHITE UP-POINTING TRIANGLE
'bindnasrepma': u'\u214b', # ⅋ TURNED AMPERSAND
'blacklozenge': u'\u29eb', # ⧫ BLACK LOZENGE
'blacktriangledown': u'\u25be', # ▾ BLACK DOWN-POINTING SMALL TRIANGLE
'blacktriangleleft': u'\u25c2', # ◂ BLACK LEFT-POINTING SMALL TRIANGLE
'blacktriangleright': u'\u25b8', # ▸ BLACK RIGHT-POINTING SMALL TRIANGLE
'blacktriangleup': u'\u25b4', # ▴ BLACK UP-POINTING SMALL TRIANGLE
'boxast': u'\u29c6', # ⧆ SQUARED ASTERISK
'boxbar': u'\u25eb', # ◫ WHITE SQUARE WITH VERTICAL BISECTING LINE
'boxbox': u'\u29c8', # ⧈ SQUARED SQUARE
'boxbslash': u'\u29c5', # ⧅ SQUARED FALLING DIAGONAL SLASH
'boxcircle': u'\u29c7', # ⧇ SQUARED SMALL CIRCLE
'boxdot': u'\u22a1', # ⊡ SQUARED DOT OPERATOR
'boxminus': u'\u229f', # ⊟ SQUARED MINUS
'boxplus': u'\u229e', # ⊞ SQUARED PLUS
'boxslash': u'\u29c4', # ⧄ SQUARED RISING DIAGONAL SLASH
'boxtimes': u'\u22a0', # ⊠ SQUARED TIMES
'bullet': u'\u2219', # ∙ BULLET OPERATOR
'cap': u'\u2229', # ∩ INTERSECTION
'cdot': u'\u22c5', # ⋅ DOT OPERATOR
'circ': u'\u2218', # ∘ RING OPERATOR
'circledast': u'\u229b', # ⊛ CIRCLED ASTERISK OPERATOR
'circledcirc': u'\u229a', # ⊚ CIRCLED RING OPERATOR
'circleddash': u'\u229d', # ⊝ CIRCLED DASH
'cup': u'\u222a', # ∪ UNION
'curlyvee': u'\u22ce', # ⋎ CURLY LOGICAL OR
'curlywedge': u'\u22cf', # ⋏ CURLY LOGICAL AND
'dagger': u'\u2020', # † DAGGER
'ddagger': u'\u2021', # ‡ DOUBLE DAGGER
'diamond': u'\u22c4', # ⋄ DIAMOND OPERATOR
'div': u'\xf7', # ÷ DIVISION SIGN
'divideontimes': u'\u22c7', # ⋇ DIVISION TIMES
'dotplus': u'\u2214', # ∔ DOT PLUS
'doublebarwedge': u'\u2a5e', # ⩞ LOGICAL AND WITH DOUBLE OVERBAR
'intercal': u'\u22ba', # ⊺ INTERCALATE
'interleave': u'\u2af4', # ⫴ TRIPLE VERTICAL BAR BINARY RELATION
'land': u'\u2227', # ∧ LOGICAL AND
'leftthreetimes': u'\u22cb', # ⋋ LEFT SEMIDIRECT PRODUCT
'lhd': u'\u25c1', # ◁ WHITE LEFT-POINTING TRIANGLE
'lor': u'\u2228', # ∨ LOGICAL OR
'ltimes': u'\u22c9', # ⋉ LEFT NORMAL FACTOR SEMIDIRECT PRODUCT
'mp': u'\u2213', # ∓ MINUS-OR-PLUS SIGN
'odot': u'\u2299', # ⊙ CIRCLED DOT OPERATOR
'ominus': u'\u2296', # ⊖ CIRCLED MINUS
'oplus': u'\u2295', # ⊕ CIRCLED PLUS
'oslash': u'\u2298', # ⊘ CIRCLED DIVISION SLASH
'otimes': u'\u2297', # ⊗ CIRCLED TIMES
'pm': u'\xb1', # ± PLUS-MINUS SIGN
'rhd': u'\u25b7', # ▷ WHITE RIGHT-POINTING TRIANGLE
'rightthreetimes': u'\u22cc', # ⋌ RIGHT SEMIDIRECT PRODUCT
'rtimes': u'\u22ca', # ⋊ RIGHT NORMAL FACTOR SEMIDIRECT PRODUCT
'setminus': u'\u29f5', # ⧵ REVERSE SOLIDUS OPERATOR
'slash': u'\u2215', # ∕ DIVISION SLASH
'smallsetminus': u'\u2216', # ∖ SET MINUS
'smalltriangledown': u'\u25bf', # ▿ WHITE DOWN-POINTING SMALL TRIANGLE
'smalltriangleleft': u'\u25c3', # ◃ WHITE LEFT-POINTING SMALL TRIANGLE
'smalltriangleright': u'\u25b9', # ▹ WHITE RIGHT-POINTING SMALL TRIANGLE
'smalltriangleup': u'\u25b5', # ▵ WHITE UP-POINTING SMALL TRIANGLE
'sqcap': u'\u2293', # ⊓ SQUARE CAP
'sqcup': u'\u2294', # ⊔ SQUARE CUP
'sslash': u'\u2afd', # ⫽ DOUBLE SOLIDUS OPERATOR
'star': u'\u22c6', # ⋆ STAR OPERATOR
'talloblong': u'\u2afe', # ⫾ WHITE VERTICAL BAR
'times': u'\xd7', # × MULTIPLICATION SIGN
'triangle': u'\u25b3', # △ WHITE UP-POINTING TRIANGLE
'triangledown': u'\u25bf', # ▿ WHITE DOWN-POINTING SMALL TRIANGLE
'triangleleft': u'\u25c3', # ◃ WHITE LEFT-POINTING SMALL TRIANGLE
'triangleright': u'\u25b9', # ▹ WHITE RIGHT-POINTING SMALL TRIANGLE
'uplus': u'\u228e', # ⊎ MULTISET UNION
'vartriangle': u'\u25b3', # △ WHITE UP-POINTING TRIANGLE
'vee': u'\u2228', # ∨ LOGICAL OR
'veebar': u'\u22bb', # ⊻ XOR
'wedge': u'\u2227', # ∧ LOGICAL AND
'wr': u'\u2240', # ≀ WREATH PRODUCT
}
mathclose = {
'Rbag': u'\u27c6', # ⟆ RIGHT S-SHAPED BAG DELIMITER
'lrcorner': u'\u231f', # ⌟ BOTTOM RIGHT CORNER
'rangle': u'\u27e9', # ⟩ MATHEMATICAL RIGHT ANGLE BRACKET
'rbag': u'\u27c6', # ⟆ RIGHT S-SHAPED BAG DELIMITER
'rbrace': u'}', # } RIGHT CURLY BRACKET
'rbrack': u']', # ] RIGHT SQUARE BRACKET
'rceil': u'\u2309', # ⌉ RIGHT CEILING
'rfloor': u'\u230b', # ⌋ RIGHT FLOOR
'rgroup': u'\u27ef', # ⟯ MATHEMATICAL RIGHT FLATTENED PARENTHESIS
'rrbracket': u'\u27e7', # ⟧ MATHEMATICAL RIGHT WHITE SQUARE BRACKET
'rrparenthesis': u'\u2988', # ⦈ Z NOTATION RIGHT IMAGE BRACKET
'urcorner': u'\u231d', # ⌝ TOP RIGHT CORNER
'}': u'}', # } RIGHT CURLY BRACKET
}
mathfence = {
'Vert': u'\u2016', # ‖ DOUBLE VERTICAL LINE
'vert': u'|', # | VERTICAL LINE
'|': u'\u2016', # ‖ DOUBLE VERTICAL LINE
}
mathop = {
'Join': u'\u2a1d', # ⨝ JOIN
'bigcap': u'\u22c2', # ⋂ N-ARY INTERSECTION
'bigcup': u'\u22c3', # ⋃ N-ARY UNION
'biginterleave': u'\u2afc', # ⫼ LARGE TRIPLE VERTICAL BAR OPERATOR
'bigodot': u'\u2a00', # ⨀ N-ARY CIRCLED DOT OPERATOR
'bigoplus': u'\u2a01', # ⨁ N-ARY CIRCLED PLUS OPERATOR
'bigotimes': u'\u2a02', # ⨂ N-ARY CIRCLED TIMES OPERATOR
'bigsqcup': u'\u2a06', # ⨆ N-ARY SQUARE UNION OPERATOR
'biguplus': u'\u2a04', # ⨄ N-ARY UNION OPERATOR WITH PLUS
'bigvee': u'\u22c1', # ⋁ N-ARY LOGICAL OR
'bigwedge': u'\u22c0', # ⋀ N-ARY LOGICAL AND
'coprod': u'\u2210', # ∐ N-ARY COPRODUCT
'fatsemi': u'\u2a1f', # ⨟ Z NOTATION SCHEMA COMPOSITION
'fint': u'\u2a0f', # ⨏ INTEGRAL AVERAGE WITH SLASH
'iiiint': u'\u2a0c', # ⨌ QUADRUPLE INTEGRAL OPERATOR
'iiint': u'\u222d', # ∭ TRIPLE INTEGRAL
'iint': u'\u222c', # ∬ DOUBLE INTEGRAL
'int': u'\u222b', # ∫ INTEGRAL
'oiint': u'\u222f', # ∯ SURFACE INTEGRAL
'oint': u'\u222e', # ∮ CONTOUR INTEGRAL
'ointctrclockwise': u'\u2233', # ∳ ANTICLOCKWISE CONTOUR INTEGRAL
'prod': u'\u220f', # ∏ N-ARY PRODUCT
'sqint': u'\u2a16', # ⨖ QUATERNION INTEGRAL OPERATOR
'sum': u'\u2211', # ∑ N-ARY SUMMATION
'varointclockwise': u'\u2232', # ∲ CLOCKWISE CONTOUR INTEGRAL
}
mathopen = {
'Lbag': u'\u27c5', # ⟅ LEFT S-SHAPED BAG DELIMITER
'langle': u'\u27e8', # ⟨ MATHEMATICAL LEFT ANGLE BRACKET
'lbag': u'\u27c5', # ⟅ LEFT S-SHAPED BAG DELIMITER
'lbrace': u'{', # { LEFT CURLY BRACKET
'lbrack': u'[', # [ LEFT SQUARE BRACKET
'lceil': u'\u2308', # ⌈ LEFT CEILING
'lfloor': u'\u230a', # ⌊ LEFT FLOOR
'lgroup': u'\u27ee', # ⟮ MATHEMATICAL LEFT FLATTENED PARENTHESIS
'llbracket': u'\u27e6', # ⟦ MATHEMATICAL LEFT WHITE SQUARE BRACKET
'llcorner': u'\u231e', # ⌞ BOTTOM LEFT CORNER
'llparenthesis': u'\u2987', # ⦇ Z NOTATION LEFT IMAGE BRACKET
'ulcorner': u'\u231c', # ⌜ TOP LEFT CORNER
'{': u'{', # { LEFT CURLY BRACKET
}
mathord = {
'#': u'#', # # NUMBER SIGN
'$': u'$', # $ DOLLAR SIGN
'%': u'%', # % PERCENT SIGN
'&': u'&', # & AMPERSAND
'AC': u'\u223f', # ∿ SINE WAVE
'APLcomment': u'\u235d', # ⍝ APL FUNCTIONAL SYMBOL UP SHOE JOT
'APLdownarrowbox': u'\u2357', # ⍗ APL FUNCTIONAL SYMBOL QUAD DOWNWARDS ARROW
'APLinput': u'\u235e', # ⍞ APL FUNCTIONAL SYMBOL QUOTE QUAD
'APLinv': u'\u2339', # ⌹ APL FUNCTIONAL SYMBOL QUAD DIVIDE
'APLleftarrowbox': u'\u2347', # ⍇ APL FUNCTIONAL SYMBOL QUAD LEFTWARDS ARROW
'APLlog': u'\u235f', # ⍟ APL FUNCTIONAL SYMBOL CIRCLE STAR
'APLrightarrowbox': u'\u2348', # ⍈ APL FUNCTIONAL SYMBOL QUAD RIGHTWARDS ARROW
'APLuparrowbox': u'\u2350', # ⍐ APL FUNCTIONAL SYMBOL QUAD UPWARDS ARROW
'Aries': u'\u2648', # ♈ ARIES
'CIRCLE': u'\u25cf', # ● BLACK CIRCLE
'CheckedBox': u'\u2611', # ☑ BALLOT BOX WITH CHECK
'Diamond': u'\u25c7', # ◇ WHITE DIAMOND
'Finv': u'\u2132', # Ⅎ TURNED CAPITAL F
'Game': u'\u2141', # ⅁ TURNED SANS-SERIF CAPITAL G
'Gemini': u'\u264a', # ♊ GEMINI
'Jupiter': u'\u2643', # ♃ JUPITER
'LEFTCIRCLE': u'\u25d6', # ◖ LEFT HALF BLACK CIRCLE
'LEFTcircle': u'\u25d0', # ◐ CIRCLE WITH LEFT HALF BLACK
'Leo': u'\u264c', # ♌ LEO
'Libra': u'\u264e', # ♎ LIBRA
'Mars': u'\u2642', # ♂ MALE SIGN
'Mercury': u'\u263f', # ☿ MERCURY
'Neptune': u'\u2646', # ♆ NEPTUNE
'Pluto': u'\u2647', # ♇ PLUTO
'RIGHTCIRCLE': u'\u25d7', # ◗ RIGHT HALF BLACK CIRCLE
'RIGHTcircle': u'\u25d1', # ◑ CIRCLE WITH RIGHT HALF BLACK
'Saturn': u'\u2644', # ♄ SATURN
'Scorpio': u'\u264f', # ♏ SCORPIUS
'Square': u'\u2610', # ☐ BALLOT BOX
'Sun': u'\u2609', # ☉ SUN
'Taurus': u'\u2649', # ♉ TAURUS
'Uranus': u'\u2645', # ♅ URANUS
'Venus': u'\u2640', # ♀ FEMALE SIGN
'XBox': u'\u2612', # ☒ BALLOT BOX WITH X
'Yup': u'\u2144', # ⅄ TURNED SANS-SERIF CAPITAL Y
'_': u'_', # _ LOW LINE
'angle': u'\u2220', # ∠ ANGLE
'aquarius': u'\u2652', # ♒ AQUARIUS
'aries': u'\u2648', # ♈ ARIES
'ast': u'*', # * ASTERISK
'backepsilon': u'\u03f6', # ϶ GREEK REVERSED LUNATE EPSILON SYMBOL
'backprime': u'\u2035', # ‵ REVERSED PRIME
'backslash': u'\\', # \ REVERSE SOLIDUS
'because': u'\u2235', # ∵ BECAUSE
'bigstar': u'\u2605', # ★ BLACK STAR
'binampersand': u'&', # & AMPERSAND
'blacklozenge': u'\u2b27', # ⬧ BLACK MEDIUM LOZENGE
'blacksmiley': u'\u263b', # ☻ BLACK SMILING FACE
'blacksquare': u'\u25fc', # ◼ BLACK MEDIUM SQUARE
'bot': u'\u22a5', # ⊥ UP TACK
'boy': u'\u2642', # ♂ MALE SIGN
'cancer': u'\u264b', # ♋ CANCER
'capricornus': u'\u2651', # ♑ CAPRICORN
'cdots': u'\u22ef', # ⋯ MIDLINE HORIZONTAL ELLIPSIS
'cent': u'\xa2', # ¢ CENT SIGN
'centerdot': u'\u2b1d', # ⬝ BLACK VERY SMALL SQUARE
'checkmark': u'\u2713', # ✓ CHECK MARK
'circlearrowleft': u'\u21ba', # ↺ ANTICLOCKWISE OPEN CIRCLE ARROW
'circlearrowright': u'\u21bb', # ↻ CLOCKWISE OPEN CIRCLE ARROW
'circledR': u'\xae', # ® REGISTERED SIGN
'circledcirc': u'\u25ce', # ◎ BULLSEYE
'clubsuit': u'\u2663', # ♣ BLACK CLUB SUIT
'complement': u'\u2201', # ∁ COMPLEMENT
'dasharrow': u'\u21e2', # ⇢ RIGHTWARDS DASHED ARROW
'dashleftarrow': u'\u21e0', # ⇠ LEFTWARDS DASHED ARROW
'dashrightarrow': u'\u21e2', # ⇢ RIGHTWARDS DASHED ARROW
'diameter': u'\u2300', # ⌀ DIAMETER SIGN
'diamondsuit': u'\u2662', # ♢ WHITE DIAMOND SUIT
'earth': u'\u2641', # ♁ EARTH
'exists': u'\u2203', # ∃ THERE EXISTS
'female': u'\u2640', # ♀ FEMALE SIGN
'flat': u'\u266d', # ♭ MUSIC FLAT SIGN
'forall': u'\u2200', # ∀ FOR ALL
'fourth': u'\u2057', # ⁗ QUADRUPLE PRIME
'frownie': u'\u2639', # ☹ WHITE FROWNING FACE
'gemini': u'\u264a', # ♊ GEMINI
'girl': u'\u2640', # ♀ FEMALE SIGN
'heartsuit': u'\u2661', # ♡ WHITE HEART SUIT
'infty': u'\u221e', # ∞ INFINITY
'invneg': u'\u2310', # ⌐ REVERSED NOT SIGN
'jupiter': u'\u2643', # ♃ JUPITER
'ldots': u'\u2026', # … HORIZONTAL ELLIPSIS
'leftmoon': u'\u263e', # ☾ LAST QUARTER MOON
'leftturn': u'\u21ba', # ↺ ANTICLOCKWISE OPEN CIRCLE ARROW
'leo': u'\u264c', # ♌ LEO
'libra': u'\u264e', # ♎ LIBRA
'lnot': u'\xac', # ¬ NOT SIGN
'lozenge': u'\u25ca', # ◊ LOZENGE
'male': u'\u2642', # ♂ MALE SIGN
'maltese': u'\u2720', # ✠ MALTESE CROSS
'mathdollar': u'$', # $ DOLLAR SIGN
'measuredangle': u'\u2221', # ∡ MEASURED ANGLE
'mercury': u'\u263f', # ☿ MERCURY
'mho': u'\u2127', # ℧ INVERTED OHM SIGN
'nabla': u'\u2207', # ∇ NABLA
'natural': u'\u266e', # ♮ MUSIC NATURAL SIGN
'neg': u'\xac', # ¬ NOT SIGN
'neptune': u'\u2646', # ♆ NEPTUNE
'nexists': u'\u2204', # ∄ THERE DOES NOT EXIST
'notbackslash': u'\u2340', # ⍀ APL FUNCTIONAL SYMBOL BACKSLASH BAR
'partial': u'\u2202', # ∂ PARTIAL DIFFERENTIAL
'pisces': u'\u2653', # ♓ PISCES
'pluto': u'\u2647', # ♇ PLUTO
'pounds': u'\xa3', # £ POUND SIGN
'prime': u'\u2032', # ′ PRIME
'quarternote': u'\u2669', # ♩ QUARTER NOTE
'rightmoon': u'\u263d', # ☽ FIRST QUARTER MOON
'rightturn': u'\u21bb', # ↻ CLOCKWISE OPEN CIRCLE ARROW
'sagittarius': u'\u2650', # ♐ SAGITTARIUS
'saturn': u'\u2644', # ♄ SATURN
'scorpio': u'\u264f', # ♏ SCORPIUS
'second': u'\u2033', # ″ DOUBLE PRIME
'sharp': u'\u266f', # ♯ MUSIC SHARP SIGN
'sim': u'~', # ~ TILDE
'slash': u'/', # / SOLIDUS
'smiley': u'\u263a', # ☺ WHITE SMILING FACE
'spadesuit': u'\u2660', # ♠ BLACK SPADE SUIT
'spddot': u'\xa8', # ¨ DIAERESIS
'sphat': u'^', # ^ CIRCUMFLEX ACCENT
'sphericalangle': u'\u2222', # ∢ SPHERICAL ANGLE
'sptilde': u'~', # ~ TILDE
'square': u'\u25fb', # ◻ WHITE MEDIUM SQUARE
'sun': u'\u263c', # ☼ WHITE SUN WITH RAYS
'taurus': u'\u2649', # ♉ TAURUS
'therefore': u'\u2234', # ∴ THEREFORE
'third': u'\u2034', # ‴ TRIPLE PRIME
'top': u'\u22a4', # ⊤ DOWN TACK
'triangleleft': u'\u25c5', # ◅ WHITE LEFT-POINTING POINTER
'triangleright': u'\u25bb', # ▻ WHITE RIGHT-POINTING POINTER
'twonotes': u'\u266b', # ♫ BEAMED EIGHTH NOTES
'uranus': u'\u2645', # ♅ URANUS
'varEarth': u'\u2641', # ♁ EARTH
'varnothing': u'\u2205', # ∅ EMPTY SET
'virgo': u'\u264d', # ♍ VIRGO
'wasylozenge': u'\u2311', # ⌑ SQUARE LOZENGE
'wasytherefore': u'\u2234', # ∴ THEREFORE
'yen': u'\xa5', # ¥ YEN SIGN
}
mathover = {
'overbrace': u'\u23de', # ⏞ TOP CURLY BRACKET
'wideparen': u'\u23dc', # ⏜ TOP PARENTHESIS
}
mathradical = {
'sqrt': u'\u221a', # √ SQUARE ROOT
'sqrt[3]': u'\u221b', # ∛ CUBE ROOT
'sqrt[4]': u'\u221c', # ∜ FOURTH ROOT
}
mathrel = {
'Bumpeq': u'\u224e', # ≎ GEOMETRICALLY EQUIVALENT TO
'Doteq': u'\u2251', # ≑ GEOMETRICALLY EQUAL TO
'Downarrow': u'\u21d3', # ⇓ DOWNWARDS DOUBLE ARROW
'Leftarrow': u'\u21d0', # ⇐ LEFTWARDS DOUBLE ARROW
'Leftrightarrow': u'\u21d4', # ⇔ LEFT RIGHT DOUBLE ARROW
'Lleftarrow': u'\u21da', # ⇚ LEFTWARDS TRIPLE ARROW
'Longleftarrow': u'\u27f8', # ⟸ LONG LEFTWARDS DOUBLE ARROW
'Longleftrightarrow': u'\u27fa', # ⟺ LONG LEFT RIGHT DOUBLE ARROW
'Longmapsfrom': u'\u27fd', # ⟽ LONG LEFTWARDS DOUBLE ARROW FROM BAR
'Longmapsto': u'\u27fe', # ⟾ LONG RIGHTWARDS DOUBLE ARROW FROM BAR
'Longrightarrow': u'\u27f9', # ⟹ LONG RIGHTWARDS DOUBLE ARROW
'Lsh': u'\u21b0', # ↰ UPWARDS ARROW WITH TIP LEFTWARDS
'Mapsfrom': u'\u2906', # ⤆ LEFTWARDS DOUBLE ARROW FROM BAR
'Mapsto': u'\u2907', # ⤇ RIGHTWARDS DOUBLE ARROW FROM BAR
'Rightarrow': u'\u21d2', # ⇒ RIGHTWARDS DOUBLE ARROW
'Rrightarrow': u'\u21db', # ⇛ RIGHTWARDS TRIPLE ARROW
'Rsh': u'\u21b1', # ↱ UPWARDS ARROW WITH TIP RIGHTWARDS
'Subset': u'\u22d0', # ⋐ DOUBLE SUBSET
'Supset': u'\u22d1', # ⋑ DOUBLE SUPERSET
'Uparrow': u'\u21d1', # ⇑ UPWARDS DOUBLE ARROW
'Updownarrow': u'\u21d5', # ⇕ UP DOWN DOUBLE ARROW
'VDash': u'\u22ab', # ⊫ DOUBLE VERTICAL BAR DOUBLE RIGHT TURNSTILE
'Vdash': u'\u22a9', # ⊩ FORCES
'Vvdash': u'\u22aa', # ⊪ TRIPLE VERTICAL BAR RIGHT TURNSTILE
'apprge': u'\u2273', # ≳ GREATER-THAN OR EQUIVALENT TO
'apprle': u'\u2272', # ≲ LESS-THAN OR EQUIVALENT TO
'approx': u'\u2248', # ≈ ALMOST EQUAL TO
'approxeq': u'\u224a', # ≊ ALMOST EQUAL OR EQUAL TO
'asymp': u'\u224d', # ≍ EQUIVALENT TO
'backsim': u'\u223d', # ∽ REVERSED TILDE
'backsimeq': u'\u22cd', # ⋍ REVERSED TILDE EQUALS
'barin': u'\u22f6', # ⋶ ELEMENT OF WITH OVERBAR
'barleftharpoon': u'\u296b', # ⥫ LEFTWARDS HARPOON WITH BARB DOWN BELOW LONG DASH
'barrightharpoon': u'\u296d', # ⥭ RIGHTWARDS HARPOON WITH BARB DOWN BELOW LONG DASH
'between': u'\u226c', # ≬ BETWEEN
'bowtie': u'\u22c8', # ⋈ BOWTIE
'bumpeq': u'\u224f', # ≏ DIFFERENCE BETWEEN
'circeq': u'\u2257', # ≗ RING EQUAL TO
'coloneq': u'\u2254', # ≔ COLON EQUALS
'cong': u'\u2245', # ≅ APPROXIMATELY EQUAL TO
'corresponds': u'\u2259', # ≙ ESTIMATES
'curlyeqprec': u'\u22de', # ⋞ EQUAL TO OR PRECEDES
'curlyeqsucc': u'\u22df', # ⋟ EQUAL TO OR SUCCEEDS
'curvearrowleft': u'\u21b6', # ↶ ANTICLOCKWISE TOP SEMICIRCLE ARROW
'curvearrowright': u'\u21b7', # ↷ CLOCKWISE TOP SEMICIRCLE ARROW
'dashv': u'\u22a3', # ⊣ LEFT TACK
'ddots': u'\u22f1', # ⋱ DOWN RIGHT DIAGONAL ELLIPSIS
'dlsh': u'\u21b2', # ↲ DOWNWARDS ARROW WITH TIP LEFTWARDS
'doteq': u'\u2250', # ≐ APPROACHES THE LIMIT
'doteqdot': u'\u2251', # ≑ GEOMETRICALLY EQUAL TO
'downarrow': u'\u2193', # ↓ DOWNWARDS ARROW
'downdownarrows': u'\u21ca', # ⇊ DOWNWARDS PAIRED ARROWS
'downdownharpoons': u'\u2965', # ⥥ DOWNWARDS HARPOON WITH BARB LEFT BESIDE DOWNWARDS HARPOON WITH BARB RIGHT
'downharpoonleft': u'\u21c3', # ⇃ DOWNWARDS HARPOON WITH BARB LEFTWARDS
'downharpoonright': u'\u21c2', # ⇂ DOWNWARDS HARPOON WITH BARB RIGHTWARDS
'downuparrows': u'\u21f5', # ⇵ DOWNWARDS ARROW LEFTWARDS OF UPWARDS ARROW
'downupharpoons': u'\u296f', # ⥯ DOWNWARDS HARPOON WITH BARB LEFT BESIDE UPWARDS HARPOON WITH BARB RIGHT
'drsh': u'\u21b3', # ↳ DOWNWARDS ARROW WITH TIP RIGHTWARDS
'eqcirc': u'\u2256', # ≖ RING IN EQUAL TO
'eqcolon': u'\u2255', # ≕ EQUALS COLON
'eqsim': u'\u2242', # ≂ MINUS TILDE
'eqslantgtr': u'\u2a96', # ⪖ SLANTED EQUAL TO OR GREATER-THAN
'eqslantless': u'\u2a95', # ⪕ SLANTED EQUAL TO OR LESS-THAN
'equiv': u'\u2261', # ≡ IDENTICAL TO
'fallingdotseq': u'\u2252', # ≒ APPROXIMATELY EQUAL TO OR THE IMAGE OF
'frown': u'\u2322', # ⌢ FROWN
'ge': u'\u2265', # ≥ GREATER-THAN OR EQUAL TO
'geq': u'\u2265', # ≥ GREATER-THAN OR EQUAL TO
'geqq': u'\u2267', # ≧ GREATER-THAN OVER EQUAL TO
'geqslant': u'\u2a7e', # ⩾ GREATER-THAN OR SLANTED EQUAL TO
'gets': u'\u2190', # ← LEFTWARDS ARROW
'gg': u'\u226b', # ≫ MUCH GREATER-THAN
'ggcurly': u'\u2abc', # ⪼ DOUBLE SUCCEEDS
'ggg': u'\u22d9', # ⋙ VERY MUCH GREATER-THAN
'gnapprox': u'\u2a8a', # ⪊ GREATER-THAN AND NOT APPROXIMATE
'gneq': u'\u2a88', # ⪈ GREATER-THAN AND SINGLE-LINE NOT EQUAL TO
'gneqq': u'\u2269', # ≩ GREATER-THAN BUT NOT EQUAL TO
'gnsim': u'\u22e7', # ⋧ GREATER-THAN BUT NOT EQUIVALENT TO
'gtrapprox': u'\u2a86', # ⪆ GREATER-THAN OR APPROXIMATE
'gtrdot': u'\u22d7', # ⋗ GREATER-THAN WITH DOT
'gtreqless': u'\u22db', # ⋛ GREATER-THAN EQUAL TO OR LESS-THAN
'gtreqqless': u'\u2a8c', # ⪌ GREATER-THAN ABOVE DOUBLE-LINE EQUAL ABOVE LESS-THAN
'gtrless': u'\u2277', # ≷ GREATER-THAN OR LESS-THAN
'gtrsim': u'\u2273', # ≳ GREATER-THAN OR EQUIVALENT TO
'hash': u'\u22d5', # ⋕ EQUAL AND PARALLEL TO
'hookleftarrow': u'\u21a9', # ↩ LEFTWARDS ARROW WITH HOOK
'hookrightarrow': u'\u21aa', # ↪ RIGHTWARDS ARROW WITH HOOK
'iddots': u'\u22f0', # ⋰ UP RIGHT DIAGONAL ELLIPSIS
'impliedby': u'\u27f8', # ⟸ LONG LEFTWARDS DOUBLE ARROW
'implies': u'\u27f9', # ⟹ LONG RIGHTWARDS DOUBLE ARROW
'in': u'\u2208', # ∈ ELEMENT OF
'le': u'\u2264', # ≤ LESS-THAN OR EQUAL TO
'leftarrow': u'\u2190', # ← LEFTWARDS ARROW
'leftarrowtail': u'\u21a2', # ↢ LEFTWARDS ARROW WITH TAIL
'leftarrowtriangle': u'\u21fd', # ⇽ LEFTWARDS OPEN-HEADED ARROW
'leftbarharpoon': u'\u296a', # ⥪ LEFTWARDS HARPOON WITH BARB UP ABOVE LONG DASH
'leftharpoondown': u'\u21bd', # ↽ LEFTWARDS HARPOON WITH BARB DOWNWARDS
'leftharpoonup': u'\u21bc', # ↼ LEFTWARDS HARPOON WITH BARB UPWARDS
'leftleftarrows': u'\u21c7', # ⇇ LEFTWARDS PAIRED ARROWS
'leftleftharpoons': u'\u2962', # ⥢ LEFTWARDS HARPOON WITH BARB UP ABOVE LEFTWARDS HARPOON WITH BARB DOWN
'leftrightarrow': u'\u2194', # ↔ LEFT RIGHT ARROW
'leftrightarrows': u'\u21c6', # ⇆ LEFTWARDS ARROW OVER RIGHTWARDS ARROW
'leftrightarrowtriangle': u'\u21ff', # ⇿ LEFT RIGHT OPEN-HEADED ARROW
'leftrightharpoon': u'\u294a', # ⥊ LEFT BARB UP RIGHT BARB DOWN HARPOON
'leftrightharpoons': u'\u21cb', # ⇋ LEFTWARDS HARPOON OVER RIGHTWARDS HARPOON
'leftrightsquigarrow': u'\u21ad', # ↭ LEFT RIGHT WAVE ARROW
'leftslice': u'\u2aa6', # ⪦ LESS-THAN CLOSED BY CURVE
'leftsquigarrow': u'\u21dc', # ⇜ LEFTWARDS SQUIGGLE ARROW
'leq': u'\u2264', # ≤ LESS-THAN OR EQUAL TO
'leqq': u'\u2266', # ≦ LESS-THAN OVER EQUAL TO
'leqslant': u'\u2a7d', # ⩽ LESS-THAN OR SLANTED EQUAL TO
'lessapprox': u'\u2a85', # ⪅ LESS-THAN OR APPROXIMATE
'lessdot': u'\u22d6', # ⋖ LESS-THAN WITH DOT
'lesseqgtr': u'\u22da', # ⋚ LESS-THAN EQUAL TO OR GREATER-THAN
'lesseqqgtr': u'\u2a8b', # ⪋ LESS-THAN ABOVE DOUBLE-LINE EQUAL ABOVE GREATER-THAN
'lessgtr': u'\u2276', # ≶ LESS-THAN OR GREATER-THAN
'lesssim': u'\u2272', # ≲ LESS-THAN OR EQUIVALENT TO
'lightning': u'\u21af', # ↯ DOWNWARDS ZIGZAG ARROW
'll': u'\u226a', # ≪ MUCH LESS-THAN
'llcurly': u'\u2abb', # ⪻ DOUBLE PRECEDES
'lll': u'\u22d8', # ⋘ VERY MUCH LESS-THAN
'lnapprox': u'\u2a89', # ⪉ LESS-THAN AND NOT APPROXIMATE
'lneq': u'\u2a87', # ⪇ LESS-THAN AND SINGLE-LINE NOT EQUAL TO
'lneqq': u'\u2268', # ≨ LESS-THAN BUT NOT EQUAL TO
'lnsim': u'\u22e6', # ⋦ LESS-THAN BUT NOT EQUIVALENT TO
'longleftarrow': u'\u27f5', # ⟵ LONG LEFTWARDS ARROW
'longleftrightarrow': u'\u27f7', # ⟷ LONG LEFT RIGHT ARROW
'longmapsfrom': u'\u27fb', # ⟻ LONG LEFTWARDS ARROW FROM BAR
'longmapsto': u'\u27fc', # ⟼ LONG RIGHTWARDS ARROW FROM BAR
'longrightarrow': u'\u27f6', # ⟶ LONG RIGHTWARDS ARROW
'looparrowleft': u'\u21ab', # ↫ LEFTWARDS ARROW WITH LOOP
'looparrowright': u'\u21ac', # ↬ RIGHTWARDS ARROW WITH LOOP
'mapsfrom': u'\u21a4', # ↤ LEFTWARDS ARROW FROM BAR
'mapsto': u'\u21a6', # ↦ RIGHTWARDS ARROW FROM BAR
'mid': u'\u2223', # ∣ DIVIDES
'models': u'\u22a7', # ⊧ MODELS
'multimap': u'\u22b8', # ⊸ MULTIMAP
'nLeftarrow': u'\u21cd', # ⇍ LEFTWARDS DOUBLE ARROW WITH STROKE
'nLeftrightarrow': u'\u21ce', # ⇎ LEFT RIGHT DOUBLE ARROW WITH STROKE
'nRightarrow': u'\u21cf', # ⇏ RIGHTWARDS DOUBLE ARROW WITH STROKE
'nVDash': u'\u22af', # ⊯ NEGATED DOUBLE VERTICAL BAR DOUBLE RIGHT TURNSTILE
'nVdash': u'\u22ae', # ⊮ DOES NOT FORCE
'ncong': u'\u2247', # ≇ NEITHER APPROXIMATELY NOR ACTUALLY EQUAL TO
'ne': u'\u2260', # ≠ NOT EQUAL TO
'nearrow': u'\u2197', # ↗ NORTH EAST ARROW
'neq': u'\u2260', # ≠ NOT EQUAL TO
'ngeq': u'\u2271', # ≱ NEITHER GREATER-THAN NOR EQUAL TO
'ngtr': u'\u226f', # ≯ NOT GREATER-THAN
'ni': u'\u220b', # ∋ CONTAINS AS MEMBER
'nleftarrow': u'\u219a', # ↚ LEFTWARDS ARROW WITH STROKE
'nleftrightarrow': u'\u21ae', # ↮ LEFT RIGHT ARROW WITH STROKE
'nleq': u'\u2270', # ≰ NEITHER LESS-THAN NOR EQUAL TO
'nless': u'\u226e', # ≮ NOT LESS-THAN
'nmid': u'\u2224', # ∤ DOES NOT DIVIDE
'notasymp': u'\u226d', # ≭ NOT EQUIVALENT TO
'notin': u'\u2209', # ∉ NOT AN ELEMENT OF
'notowner': u'\u220c', # ∌ DOES NOT CONTAIN AS MEMBER
'notslash': u'\u233f', # ⌿ APL FUNCTIONAL SYMBOL SLASH BAR
'nparallel': u'\u2226', # ∦ NOT PARALLEL TO
'nprec': u'\u2280', # ⊀ DOES NOT PRECEDE
'npreceq': u'\u22e0', # ⋠ DOES NOT PRECEDE OR EQUAL
'nrightarrow': u'\u219b', # ↛ RIGHTWARDS ARROW WITH STROKE
'nsim': u'\u2241', # ≁ NOT TILDE
'nsubseteq': u'\u2288', # ⊈ NEITHER A SUBSET OF NOR EQUAL TO
'nsucc': u'\u2281', # ⊁ DOES NOT SUCCEED
'nsucceq': u'\u22e1', # ⋡ DOES NOT SUCCEED OR EQUAL
'nsupseteq': u'\u2289', # ⊉ NEITHER A SUPERSET OF NOR EQUAL TO
'ntriangleleft': u'\u22ea', # ⋪ NOT NORMAL SUBGROUP OF
'ntrianglelefteq': u'\u22ec', # ⋬ NOT NORMAL SUBGROUP OF OR EQUAL TO
'ntriangleright': u'\u22eb', # ⋫ DOES NOT CONTAIN AS NORMAL SUBGROUP
'ntrianglerighteq': u'\u22ed', # ⋭ DOES NOT CONTAIN AS NORMAL SUBGROUP OR EQUAL
'nvDash': u'\u22ad', # ⊭ NOT TRUE
'nvdash': u'\u22ac', # ⊬ DOES NOT PROVE
'nwarrow': u'\u2196', # ↖ NORTH WEST ARROW
'owns': u'\u220b', # ∋ CONTAINS AS MEMBER
'parallel': u'\u2225', # ∥ PARALLEL TO
'perp': u'\u27c2', # ⟂ PERPENDICULAR
'pitchfork': u'\u22d4', # ⋔ PITCHFORK
'prec': u'\u227a', # ≺ PRECEDES
'precapprox': u'\u2ab7', # ⪷ PRECEDES ABOVE ALMOST EQUAL TO
'preccurlyeq': u'\u227c', # ≼ PRECEDES OR EQUAL TO
'preceq': u'\u2aaf', # ⪯ PRECEDES ABOVE SINGLE-LINE EQUALS SIGN
'precnapprox': u'\u2ab9', # ⪹ PRECEDES ABOVE NOT ALMOST EQUAL TO
'precnsim': u'\u22e8', # ⋨ PRECEDES BUT NOT EQUIVALENT TO
'precsim': u'\u227e', # ≾ PRECEDES OR EQUIVALENT TO
'propto': u'\u221d', # ∝ PROPORTIONAL TO
'restriction': u'\u21be', # ↾ UPWARDS HARPOON WITH BARB RIGHTWARDS
'rightarrow': u'\u2192', # → RIGHTWARDS ARROW
'rightarrowtail': u'\u21a3', # ↣ RIGHTWARDS ARROW WITH TAIL
'rightarrowtriangle': u'\u21fe', # ⇾ RIGHTWARDS OPEN-HEADED ARROW
'rightbarharpoon': u'\u296c', # ⥬ RIGHTWARDS HARPOON WITH BARB UP ABOVE LONG DASH
'rightharpoondown': u'\u21c1', # ⇁ RIGHTWARDS HARPOON WITH BARB DOWNWARDS
'rightharpoonup': u'\u21c0', # ⇀ RIGHTWARDS HARPOON WITH BARB UPWARDS
'rightleftarrows': u'\u21c4', # ⇄ RIGHTWARDS ARROW OVER LEFTWARDS ARROW
'rightleftharpoon': u'\u294b', # ⥋ LEFT BARB DOWN RIGHT BARB UP HARPOON
'rightleftharpoons': u'\u21cc', # ⇌ RIGHTWARDS HARPOON OVER LEFTWARDS HARPOON
'rightrightarrows': u'\u21c9', # ⇉ RIGHTWARDS PAIRED ARROWS
'rightrightharpoons': u'\u2964', # ⥤ RIGHTWARDS HARPOON WITH BARB UP ABOVE RIGHTWARDS HARPOON WITH BARB DOWN
'rightslice': u'\u2aa7', # ⪧ GREATER-THAN CLOSED BY CURVE
'rightsquigarrow': u'\u21dd', # ⇝ RIGHTWARDS SQUIGGLE ARROW
'risingdotseq': u'\u2253', # ≓ IMAGE OF OR APPROXIMATELY EQUAL TO
'searrow': u'\u2198', # ↘ SOUTH EAST ARROW
'sim': u'\u223c', # ∼ TILDE OPERATOR
'simeq': u'\u2243', # ≃ ASYMPTOTICALLY EQUAL TO
'smallfrown': u'\u2322', # ⌢ FROWN
'smallsmile': u'\u2323', # ⌣ SMILE
'smile': u'\u2323', # ⌣ SMILE
'sqsubset': u'\u228f', # ⊏ SQUARE IMAGE OF
'sqsubseteq': u'\u2291', # ⊑ SQUARE IMAGE OF OR EQUAL TO
'sqsupset': u'\u2290', # ⊐ SQUARE ORIGINAL OF
'sqsupseteq': u'\u2292', # ⊒ SQUARE ORIGINAL OF OR EQUAL TO
'subset': u'\u2282', # ⊂ SUBSET OF
'subseteq': u'\u2286', # ⊆ SUBSET OF OR EQUAL TO
'subseteqq': u'\u2ac5', # ⫅ SUBSET OF ABOVE EQUALS SIGN
'subsetneq': u'\u228a', # ⊊ SUBSET OF WITH NOT EQUAL TO
'subsetneqq': u'\u2acb', # ⫋ SUBSET OF ABOVE NOT EQUAL TO
'succ': u'\u227b', # ≻ SUCCEEDS
'succapprox': u'\u2ab8', # ⪸ SUCCEEDS ABOVE ALMOST EQUAL TO
'succcurlyeq': u'\u227d', # ≽ SUCCEEDS OR EQUAL TO
'succeq': u'\u2ab0', # ⪰ SUCCEEDS ABOVE SINGLE-LINE EQUALS SIGN
'succnapprox': u'\u2aba', # ⪺ SUCCEEDS ABOVE NOT ALMOST EQUAL TO
'succnsim': u'\u22e9', # ⋩ SUCCEEDS BUT NOT EQUIVALENT TO
'succsim': u'\u227f', # ≿ SUCCEEDS OR EQUIVALENT TO
'supset': u'\u2283', # ⊃ SUPERSET OF
'supseteq': u'\u2287', # ⊇ SUPERSET OF OR EQUAL TO
'supseteqq': u'\u2ac6', # ⫆ SUPERSET OF ABOVE EQUALS SIGN
'supsetneq': u'\u228b', # ⊋ SUPERSET OF WITH NOT EQUAL TO
'supsetneqq': u'\u2acc', # ⫌ SUPERSET OF ABOVE NOT EQUAL TO
'swarrow': u'\u2199', # ↙ SOUTH WEST ARROW
'to': u'\u2192', # → RIGHTWARDS ARROW
'trianglelefteq': u'\u22b4', # ⊴ NORMAL SUBGROUP OF OR EQUAL TO
'triangleq': u'\u225c', # ≜ DELTA EQUAL TO
'trianglerighteq': u'\u22b5', # ⊵ CONTAINS AS NORMAL SUBGROUP OR EQUAL TO
'twoheadleftarrow': u'\u219e', # ↞ LEFTWARDS TWO HEADED ARROW
'twoheadrightarrow': u'\u21a0', # ↠ RIGHTWARDS TWO HEADED ARROW
'uparrow': u'\u2191', # ↑ UPWARDS ARROW
'updownarrow': u'\u2195', # ↕ UP DOWN ARROW
'updownarrows': u'\u21c5', # ⇅ UPWARDS ARROW LEFTWARDS OF DOWNWARDS ARROW
'updownharpoons': u'\u296e', # ⥮ UPWARDS HARPOON WITH BARB LEFT BESIDE DOWNWARDS HARPOON WITH BARB RIGHT
'upharpoonleft': u'\u21bf', # ↿ UPWARDS HARPOON WITH BARB LEFTWARDS
'upharpoonright': u'\u21be', # ↾ UPWARDS HARPOON WITH BARB RIGHTWARDS
'upuparrows': u'\u21c8', # ⇈ UPWARDS PAIRED ARROWS
'upupharpoons': u'\u2963', # ⥣ UPWARDS HARPOON WITH BARB LEFT BESIDE UPWARDS HARPOON WITH BARB RIGHT
'vDash': u'\u22a8', # ⊨ TRUE
'varpropto': u'\u221d', # ∝ PROPORTIONAL TO
'vartriangleleft': u'\u22b2', # ⊲ NORMAL SUBGROUP OF
'vartriangleright': u'\u22b3', # ⊳ CONTAINS AS NORMAL SUBGROUP
'vdash': u'\u22a2', # ⊢ RIGHT TACK
'vdots': u'\u22ee', # ⋮ VERTICAL ELLIPSIS
}
mathunder = {
'underbrace': u'\u23df', # ⏟ BOTTOM CURLY BRACKET
}
space = {
':': u'\u205f', # MEDIUM MATHEMATICAL SPACE
'medspace': u'\u205f', # MEDIUM MATHEMATICAL SPACE
'quad': u'\u2001', # EM QUAD
}
|
JulienMcJay/eclock
|
windows/Python27/Lib/site-packages/docutils/utils/math/tex2unichar.py
|
Python
|
gpl-2.0
| 35,109
|
[
"Bowtie"
] |
d2d5d5dbdb5ab4610273acc357bf32bf1fbabdce67a5f3b51ae919af36c8e69b
|
#!/usr/bin/env python
"""
This script prints out how great is it, shows raw queries and sets the
number of pings.
Example:
$ dirac-my-great-script detail Bob MyService
Your name is: Bob
This is the servicesList: MyService
We are done with detail report.
"""
from DIRAC import S_OK, S_ERROR, gLogger, exit as DIRACExit
from DIRAC.Core.Utilities.DIRACScript import DIRACScript as Script
class Params:
"""
Class holding the parameters raw and pingsToDo, and callbacks for their respective switches.
"""
def __init__(self):
"""C'or"""
self.raw = False
self.pingsToDo = 1
# Defined all switches that can be used while calling the script from the command line interface.
self.switches = [
("", "text=", "Text to be printed"),
("u", "upper", "Print text on upper case"),
("r", "showRaw", "Show raw result from the query", self.setRawResult),
("p:", "numPings=", "Number of pings to do (by default 1)", self.setNumOfPingsToDo),
]
def setRawResult(self, _):
"""ShowRaw option callback function, no option argument.
:return: S_OK()
"""
self.raw = True
return S_OK()
def setNumOfPingsToDo(self, value):
"""NumPings option callback function
:param value: option argument
:return: S_OK()/S_ERROR()
"""
try:
self.pingsToDo = max(1, int(value))
except ValueError:
return S_ERROR("Number of pings to do has to be a number")
return S_OK()
def registerArguments():
"""
Registers a positional arguments that can be used while calling the script from the command line interface.
"""
# it is important to add a colon after the name of the argument in the description
Script.registerArgument(" ReportType: report type", values=["short", "detail"])
Script.registerArgument(("Name: user name", "DN: user DN"))
Script.registerArgument(["Service: list of services"], default="no elements", mandatory=False)
def parseSwitchesAndPositionalArguments():
"""
Parse switches and positional arguments given to the script
"""
# Parse the command line and initialize DIRAC
Script.parseCommandLine(ignoreErrors=False)
# Get arguments
allArgs = Script.getPositionalArgs()
gLogger.debug("All arguments: %s" % ", ".join(allArgs))
# Get unprocessed switches
switches = dict(Script.getUnprocessedSwitches())
gLogger.debug("The switches used are:")
map(gLogger.debug, switches.iteritems())
# Get grouped positional arguments
repType, user, services = Script.getPositionalArgs(group=True)
gLogger.debug("The positional arguments are:")
gLogger.debug("Report type:", repType)
gLogger.debug("Name or DN:", user)
gLogger.debug("Services:", services)
return switches, repType, user, services
# IMPORTANT: Make sure to add the console-scripts entry to setup.cfg as well!
@Script()
def main():
"""
This is the script main method, which will hold all the logic.
"""
params = Params()
# Script initialization
Script.registerSwitches(params.switches)
registerArguments()
switchDict, repType, user, services = parseSwitchesAndPositionalArguments()
# Import the required DIRAC modules
from DIRAC.Interfaces.API.Dirac import Dirac
# let's do something
if services == "no elements":
gLogger.error("No services defined")
DIRACExit(1)
gLogger.notice("Your %s is:" % ("DN" if user.startswith("/") else "name"), user)
gLogger.notice("This is the servicesList:", ", ".join(services))
gLogger.notice("We are done with %s report." % repType)
DIRACExit(0)
if __name__ == "__main__":
main()
|
ic-hep/DIRAC
|
docs/source/DeveloperGuide/AddingNewComponents/DevelopingCommands/dirac_my_great_script.py
|
Python
|
gpl-3.0
| 3,791
|
[
"DIRAC"
] |
d2ac4a876f9fac7d91dadbbe9790206cf291e901dbd33d8b8a461bd9867ae272
|
import pytest
@pytest.mark.requires("js")
class TestGoBack:
def test_fetches_a_response_from_the_driver_from_the_previous_page(self, session):
session.visit("/")
assert session.has_text("Hello world!")
session.visit("/foo")
assert session.has_text("Another World")
session.go_back()
assert session.has_text("Hello world!")
|
elliterate/capybara.py
|
capybara/tests/session/test_go_back.py
|
Python
|
mit
| 377
|
[
"VisIt"
] |
8e4e2b9bc73a320f3a9c00e2dec4a9dce8bc66ccd465f8d2933cde0c1c7495df
|
import os
import os.path as op
try:
from conda_build.metadata import MetaData
except Exception as e:
import traceback
traceback.print_exc()
raise e
from distutils.version import LooseVersion
#import matplotlib
#matplotlib.use("agg")
#import matplotlib.pyplot as plt
#from wordcloud import WordCloud
BASE_DIR = op.dirname(op.abspath(__file__))
RECIPE_DIR = op.join(op.dirname(BASE_DIR), 'recipes')
OUTPUT_DIR = op.join(BASE_DIR, 'recipes')
README_TEMPLATE = u"""\
.. _`{title}`:
{title}
{title_underline}
|downloads|
{summary}
======== ===========
Home {home}
Versions {versions}
License {license}
Recipe {recipe}
======== ===========
Installation
------------
.. highlight: bash
With an activated Bioconda channel (see :ref:`setup`), install with::
conda install {title}
and update with::
conda update {title}
{notes}
|docker|
A Docker container is available at https://quay.io/repository/biocontainers/{title}.
Link to this page
-----------------
Render an |badge| badge with the following Markdown::
[](http://bioconda.github.io/recipes/{title}/README.html)
.. |badge| image:: https://img.shields.io/badge/install%20with-bioconda-brightgreen.svg?style=flat-square
:target: http://bioconda.github.io/recipes/{title}/README.html
.. |downloads| image:: https://anaconda.org/bioconda/{title}/badges/downloads.svg
:target: https://anaconda.org/bioconda/{title}
.. |docker| image:: https://quay.io/repository/biocontainers/{title}/status
:target: https://quay.io/repository/biocontainers/{title}
"""
def setup(*args):
"""
Go through every folder in the `bioconda-recipes/recipes` dir
and generate a README.rst file.
"""
print('Generating package READMEs...')
# TODO obtain information from repodata.json.
summaries = []
for folder in os.listdir(RECIPE_DIR):
# Subfolders correspond to different versions
versions = []
for sf in os.listdir(op.join(RECIPE_DIR, folder)):
if not op.isdir(op.join(RECIPE_DIR, folder, sf)):
# Not a folder
continue
try:
LooseVersion(sf)
except ValueError:
print("'{}' does not look like a proper version!".format(sf))
continue
versions.append(sf)
#versions.sort(key=LooseVersion, reverse=True)
# Read the meta.yaml file
recipe = op.join(RECIPE_DIR, folder, "meta.yaml")
if op.exists(recipe):
metadata = MetaData(recipe)
if metadata.version() not in versions:
versions.insert(0, metadata.version())
else:
if versions:
recipe = op.join(RECIPE_DIR, folder, versions[0], "meta.yaml")
metadata = MetaData(recipe)
else:
# ignore non-recipe folders
continue
# Format the README
notes = metadata.get_section('extra').get('notes', '')
if notes:
notes = 'Notes\n-----\n\n' + notes
summary = metadata.get_section('about').get('summary', '')
summaries.append(summary)
template_options = {
'title': metadata.name(),
'title_underline': '=' * len(metadata.name()),
'summary': summary,
'home': metadata.get_section('about').get('home', ''),
'versions': ', '.join(versions),
'license': metadata.get_section('about').get('license', ''),
'recipe': ('https://github.com/bioconda/bioconda-recipes/tree/master/recipes/' +
op.dirname(op.relpath(metadata.meta_path, RECIPE_DIR))),
'notes': notes
}
readme = README_TEMPLATE.format(**template_options)
# Write to file
try:
os.makedirs(op.join(OUTPUT_DIR, folder)) # exist_ok=True on Python 3
except OSError:
pass
output_file = op.join(OUTPUT_DIR, folder, 'README.rst')
with open(output_file, 'wb') as ofh:
ofh.write(readme.encode('utf-8'))
#wordcloud = WordCloud(max_font_size=40,
# background_color='white',
# stopwords=set(['package', 'tool'])).generate(" ".join(summaries))
#plt.imshow(wordcloud)
#plt.axis("off")
#plt.savefig(op.join(BASE_DIR, 'wordcloud.png'), bbox_inches='tight')
if __name__ == '__main__':
setup()
|
ThomasWollmann/bioconda-recipes
|
docs/generate_docs.py
|
Python
|
mit
| 4,571
|
[
"Bioconda"
] |
516d345db70d9c297e91da1f83e3be790bf32c2b80e4498de152353a06a92cd1
|
#!/usr/bin/python
import scipy
import scipy.interpolate
import scipy.optimize
import sys
import os
import subprocess
from astropy.io import fits as pyfits
import pylab
import numpy as np
import MakeModel
import DataStructures
import FitTellurics as utils
import FitsUtils
import Units
import RotBroad
# Get stellar model first, rotationally broadened
homedir = os.environ['HOME']
Bstarfile = homedir + "/School/Research/McDonaldData/BstarModels/BG19000g425v2.vis.7"
starmodel = RotBroad.Broaden(Bstarfile, 150 * Units.cm / Units.km)
BSTAR_first = starmodel.x[0]
BSTAR_last = starmodel.x[-1]
BSTAR_fcn = scipy.interpolate.UnivariateSpline(starmodel.x, starmodel.y / starmodel.cont, s=0)
def BSTAR(x):
value = BSTAR_fcn(x)
value[x < BSTAR_first] = 1.0
value[x > BSTAR_last] = 1.0
return value
UsedLineList = "UsedLines.log"
outfile = open(UsedLineList, "w")
outfile.close()
class fitpoints:
def __init__(self):
self.x = []
self.y = []
#Main part of the code
class Improve:
def __init__(self, filename):
self.filename = filename
hdulist = pyfits.open(filename)
self.header = hdulist[0].header
self.orders = FitsUtils.MakeXYpoints(self.header, hdulist[0].data)
def ImportTelluric(self, filename):
wave, trans = np.loadtxt(filename, usecols=(0, 1), unpack=True)
self.telluric = DataStructures.xypoint(wave.size)
self.telluric.x = wave[::-1] * Units.nm / Units.um
self.telluric.y = trans[::-1]
self.telluric = FittingUtilities.ReduceResolution(self.telluric, 70000)
#Here is the really important function.
def Fit(self, plot=False):
#main function. Will plot each order separately, and allow user to interact if plot=True
self.clicks = []
#interpolate the telluric (earth's atmospheric transmission) function
Telluric = scipy.interpolate.UnivariateSpline(self.telluric.x, self.telluric.y, s=0)
print "Plotting... press i to begin clicking points, and d when done"
outfile = open("residuals.log", "w")
outfile2 = open("UsedLines.log", "a")
linelist = np.loadtxt(utils.LineListFile)
#Loop over the spectral orders
for i in range(37, 51):
print "Fitting order #" + str(i + 1)
self.orderNum = i
self.fitpoints = fitpoints()
wave = self.orders[i].x
flux = self.orders[i].y / self.orders[i].cont
tell = Telluric(wave)
print "wave = ", wave
#Do a cross-correlation first, to get the wavelength solution close
ycorr = scipy.correlate(flux - 1.0, tell - 1.0, mode="full")
xcorr = np.arange(ycorr.size)
lags = xcorr - (flux.size - 1)
distancePerLag = (wave[-1] - wave[0]) / float(wave.size)
offsets = -lags * distancePerLag
offsets = offsets[::-1]
ycorr = ycorr[::-1]
fit = np.poly1d(np.polyfit(offsets, ycorr, ycorr.size / 100))
ycorr = ycorr - fit(offsets)
left = np.searchsorted(offsets, -1.0)
right = np.searchsorted(offsets, +1.0)
maxindex = ycorr[left:right].argmax() + left
print "maximum offset: ", offsets[maxindex], " nm"
pylab.plot(offsets, ycorr)
pylab.show()
userin = raw_input("Apply Cross-correlation correction? ")
if "y" in userin:
self.orders[i].x = self.orders[i].x + offsets[maxindex]
#Fit using the (GridSearch) utility function
data = DataStructures.xypoint(self.orders[i].x.size)
data.x = np.copy(self.orders[i].x)
data.y = np.copy(self.orders[i].y)
data.cont = np.copy(self.orders[i].cont)
fitfcn, offset = FitWavelength2(data, self.telluric, linelist)
self.orders[i].x = fitfcn(self.orders[i].x - offset)
#Let user fix, if plot is true
if plot:
#First, just plot all at once so user can examine fit
left = np.searchsorted(self.telluric.x, self.orders[i].x[0])
right = np.searchsorted(self.telluric.x, self.orders[i].x[-1])
pylab.plot(self.orders[i].x, self.orders[i].y / self.orders[i].cont, label="data")
pylab.plot(self.telluric.x[left:right],
self.telluric.y[left:right] * BSTAR(self.telluric.x[left:right]), label="model")
pylab.legend(loc='best')
pylab.title("Order " + str(self.orderNum + 1))
pylab.show()
#We only want to plot about 3 nm at a time
spacing = 3.0
data_left = 0
data_right = np.searchsorted(self.orders[i].x, self.orders[i].x[0] + spacing)
while (data_left < self.orders[i].x.size):
#Bind mouseclick:
self.fig = pylab.figure()
self.clickid = self.fig.canvas.mpl_connect('button_press_event', self.onclick)
left = np.searchsorted(self.telluric.x, self.orders[i].x[data_left])
right = np.searchsorted(self.telluric.x,
self.orders[i].x[min(self.orders[i].x.size - 1, data_right)])
pylab.plot(self.orders[i].x[data_left:data_right],
self.orders[i].y[data_left:data_right] / self.orders[i].cont[data_left:data_right],
label="data")
pylab.plot(self.telluric.x[left:right],
self.telluric.y[left:right] * BSTAR(self.telluric.x[left:right]), label="model")
pylab.legend(loc='best')
pylab.title("Order " + str(self.orderNum + 1))
pylab.show()
data_left = data_right
data_right = np.searchsorted(self.orders[i].x,
self.orders[i].x[min(self.orders[i].x.size - 1, data_left)] + spacing)
#Once you close the window, you will get past the pylab.show() command
#Fit the points to a cubic
#This is done in a loop, to iteratively remove outliers
done = False
while not done:
#self.fitpoints is filled when you are clicking in the window
if (len(self.fitpoints.x) > 3):
pars = np.polyfit(self.fitpoints.x, self.fitpoints.y, 3)
else:
pars = [0, 1, 0] #y=x... meaning don't try to improve on this order
func = np.poly1d(pars)
ignorelist = []
x = np.array(self.fitpoints.x)
y = np.array(self.fitpoints.y)
resid = y - func(x) #residuals from the fit
mean = resid.mean()
std_dev = resid.std()
#Find outliers (points with residuals over 0.01 or more than 2.5
# standard deviations from the mean
for j in range(len(self.fitpoints.x)):
residual = self.fitpoints.y[j] - func(self.fitpoints.x[j])
if np.abs(residual) > 0.01 or np.abs(residual) > std_dev * 2.5:
ignorelist.append(j)
if len(ignorelist) == 0:
done = True
else:
for index in ignorelist[::-1]:
print "removing point ", index, " of ", len(self.fitpoints.x)
self.fitpoints.x.pop(index)
self.fitpoints.y.pop(index)
#Done removing outliers. Apply fit to the wavelengths
print "y = ", pars[0], "x^2 + ", pars[1], "x + ", pars[2]
self.orders[i].x = func(self.orders[i].x)
#Output the residuals, and plot them. Make sure they look alright
for j in range(len(self.fitpoints.x)):
outfile.write(str(self.fitpoints.x[j]) + "\t" + str(self.fitpoints.y[j]) + "\t" + str(
self.fitpoints.y[j] - func(self.fitpoints.x[j])) + "\n")
outfile.write("\n\n\n\n")
pylab.plot(self.fitpoints.x, self.fitpoints.y - func(self.fitpoints.x), 'ro')
pylab.show()
#Finally, add the lines to UsedLineList.log
for line in self.fitpoints.y:
outfile2.write("%.10g\n" % line)
#Output after every order, in case program crashes
FitsUtils.OutputFitsFile(self.filename, self.orders, func_order=5)
outfile.close()
#Output calibrated spectrum to file
return FitsUtils.OutputFitsFile(self.filename, self.orders, func_order=5)
#This function gets called when hit a key
def keypress(self, event):
if (event.key == "i"):
#allow user to click on the canvas
print "Mouse press active!"
self.clickid = self.fig.canvas.mpl_connect('button_press_event', self.onclick)
return
elif (event.key == "d"):
#Done clicking. allow user to zoom to another set of lines
print "Mouse press deactivated"
self.fig.canvas.mpl_disconnect(self.clickid)
elif (event.key == "r"):
#User made a mistake in the previous click. Remove
print 'Removing last click, which was at x = '
if len(self.fitpoints.x) == len(self.fitpoints.y):
print self.fitpoints.y.pop()
else:
print self.fitpoints.x.pop()
#This function gets called when you click on the canvas, if the binding is active
def onclick(self, event):
tol = 0.04 #This is how close you have to be to the lowest point in the peak, in nm
if len(self.fitpoints.x) == len(self.fitpoints.y):
#Find lowest point in observed array:
left = np.searchsorted(self.orders[self.orderNum].x, event.xdata - tol)
right = np.searchsorted(self.orders[self.orderNum].x, event.xdata + tol)
i = self.orders[self.orderNum].y[left:right].argmin()
left = left + i
left = np.searchsorted(self.orders[self.orderNum].x, self.orders[self.orderNum].x[left] - tol)
right = np.searchsorted(self.orders[self.orderNum].x, self.orders[self.orderNum].x[left] + 2 * tol)
centroid = self.Centroid(self.orders[self.orderNum].x[left:right], self.orders[self.orderNum].y[left:right])
#Fit to gaussian:
cont = 1.0
depth = cont - self.orders[self.orderNum].y[(left + right) / 2] / self.orders[self.orderNum].cont[i]
mu = self.orders[self.orderNum].x[(left + right) / 2]
sig = 0.025
params = [cont, depth, mu, sig]
params, success = scipy.optimize.leastsq(ErrFunction, params, args=(
self.orders[self.orderNum].x[left:right],
self.orders[self.orderNum].y[left:right] / self.orders[self.orderNum].cont[left:right]))
print "mean: ", params[2]
self.fitpoints.x.append(params[2])
else:
#Find lowest point in observed array:
left = np.searchsorted(self.telluric.x, event.xdata - tol)
right = np.searchsorted(self.telluric.x, event.xdata + tol)
i = self.telluric.y[left:right].argmin()
left = left + i
left = np.searchsorted(self.telluric.x, self.telluric.x[left] - tol)
right = np.searchsorted(self.telluric.x, self.telluric.x[left] + 2 * tol)
centroid = self.Centroid(self.telluric.x[left:right], self.telluric.y[left:right])
#Fit to gaussian:
cont = 1.0
depth = cont - self.telluric.y[(left + right) / 2]
mu = self.telluric.x[(left + right) / 2]
sig = 0.025
params = [cont, depth, mu, sig]
params, success = scipy.optimize.leastsq(ErrFunction, params,
args=(self.telluric.x[left:right], self.telluric.y[left:right]))
self.fitpoints.y.append(params[2])
print "mean = ", params[2]
return
#Find centroid of the absorption line. I don't think I use this
def Centroid(self, x, y):
if x.size != y.size:
print "Error! x and y not same size!"
sys.exit()
centroid = 0
norm = sum(1 / y)
for i in range(x.size):
centroid = centroid + x[i] / y[i]
return centroid / norm
#Gaussian absorption line
def FitFunction(x, params):
cont = params[0]
depth = params[1]
mu = params[2]
sig = params[3]
return cont - depth * np.exp(-(x - mu) ** 2 / (2 * sig ** 2))
#Returns the residuals between the fit from above and the actual values
def ErrFunction(params, x, y):
return FitFunction(x, params) - y
#Second wavelength-fitting function that just shifts lines, instead of fitting them to gaussians
def WavelengthErrorFunction(shift, data, model):
modelfcn = scipy.interpolate.UnivariateSpline(model.x, model.y, s=0)
weight = 1.0 / np.sqrt(data.y)
weight[weight < 0.01] = 0.0
newmodel = modelfcn(model.x + float(shift))
if shift < 0:
newmodel[model.x - float(shift) < model.x[0]] = 0
else:
newmodel[model.x - float(shift) > model.x[-1]] = 0
returnvec = (data.y - newmodel) * weight
return returnvec
#Gaussian absorption line
def GaussianFitFunction(x, params):
cont = params[0]
depth = params[1]
mu = params[2]
sig = params[3]
return cont - depth * np.exp(-(x - mu) ** 2 / (2 * sig ** 2))
#Returns the residuals between the fit from above and the actual values
def GaussianErrorFunction(params, x, y):
return GaussianFitFunction(x, params) - y
#Function to Fit the wavelength solution, using a bunch of telluric lines
#This assumes that we are already quite close to the correct solution
#Note: it comes from FitTellurics and is just slightly modified.
# Therefore, it takes FitTellurics structures
def FitWavelength2(order, telluric, linelist, tol=0.05, oversampling=4, fit_order=3, max_change=2.0, debug=False):
old = []
new = []
#Interpolate to finer spacing
DATA_FCN = scipy.interpolate.UnivariateSpline(order.x, order.y, s=0)
CONT_FCN = scipy.interpolate.UnivariateSpline(order.x, order.cont, s=0)
MODEL_FCN = scipy.interpolate.UnivariateSpline(telluric.x, telluric.y, s=0)
data = DataStructures.xypoint(order.x.size * oversampling)
data.x = np.linspace(order.x[0], order.x[-1], order.x.size * oversampling)
data.y = DATA_FCN(data.x)
data.cont = CONT_FCN(data.x)
model = DataStructures.xypoint(data.x.size)
model.x = np.copy(data.x)
model.y = MODEL_FCN(model.x) * BSTAR(model.x)
#Begin loop over the lines
for line in linelist:
if line - tol > data.x[0] and line + tol < data.x[-1]:
#Find line in the model
left = np.searchsorted(model.x, line - tol)
right = np.searchsorted(model.x, line + tol)
minindex = model.y[left:right].argmin() + left
mean = model.x[minindex]
left2 = np.searchsorted(model.x, mean - tol * 2)
right2 = np.searchsorted(model.x, mean + tol * 2)
argmodel = DataStructures.xypoint(right2 - left2)
argmodel.x = np.copy(model.x[left2:right2])
argmodel.y = np.copy(model.y[left2:right2])
#Do the same for the data
left = np.searchsorted(data.x, line - tol)
right = np.searchsorted(data.x, line + tol)
minindex = data.y[left:right].argmin() + left
mean = data.x[minindex]
argdata = DataStructures.xypoint(right2 - left2)
argdata.x = np.copy(data.x[left2:right2])
argdata.y = np.copy(data.y[left2:right2] / data.cont[left2:right2])
#Do a cross-correlation first, to get the wavelength solution close
ycorr = scipy.correlate(argdata.y - 1.0, argmodel.y - 1.0, mode="full")
xcorr = np.arange(ycorr.size)
maxindex = ycorr.argmax()
lags = xcorr - (argdata.x.size - 1)
distancePerLag = (argdata.x[-1] - argdata.x[0]) / float(argdata.x.size)
offsets = -lags * distancePerLag
shift = offsets[maxindex]
shift, success = scipy.optimize.leastsq(WavelengthErrorFunction, shift, args=(argdata, argmodel))
if (debug):
print argdata.x[0], argdata.x[-1], argdata.x.size
print "wave: ", mean, "\tshift: ", shift, "\tsuccess = ", success
pylab.plot(model.x[left:right] - shift, model.y[left:right], 'g-')
pylab.plot(argmodel.x, argmodel.y, 'r-')
pylab.plot(argdata.x, argdata.y, 'k-')
if (success < 5):
old.append(mean)
new.append(mean + float(shift))
if debug:
pylab.show()
pylab.plot(old, new, 'ro')
pylab.show()
#fit = UnivariateSpline(old, new, k=1, s=0)
#Iteratively fit to a cubic with sigma-clipping
fit = np.poly1d((1, 0))
mean = 0.0
done = False
while not done and len(old) > fit_order:
done = True
mean = np.mean(old)
fit = np.poly1d(np.polyfit(old - mean, new, fit_order))
residuals = fit(old - mean) - new
std = np.std(residuals)
#if debug:
# pylab.plot(old, residuals, 'ro')
# pylab.plot(old, std*np.ones(len(old)))
# pylab.show()
badindices = np.where(np.logical_or(residuals > 2 * std, residuals < -2 * std))[0]
for badindex in badindices[::-1]:
print "Deleting index ", badindex + 1, "of ", len(old)
del old[badindex]
del new[badindex]
done = False
#Check if the function changed things by too much
difference = np.abs(order.x - fit(order.x - mean))
if np.any(difference > max_change):
fit = np.poly1d((1, 0))
mean = 0.0
if debug:
pylab.plot(old, fit(old - mean) - new, 'ro')
pylab.show()
pylab.plot(fit(order.x - mean), order.y, 'k-')
pylab.plot(model.x, model.y, 'g-')
print mean
print fit
pylab.show()
return fit, mean
if __name__ == "__main__":
#You will need to change these filename to to reflect where you store the models
if sys.platform.startswith("linux"):
outfilename = "/media/FreeAgent_Drive/TelluricLibrary/transmission-743.15-283.38-60.0-40.0-368.50-4.00-1.71-1.40"
else:
outfilename = "/Users/kgulliks//School/Research/lblrtm/run_examples/MyModel/OutputFiles/Generic.dat"
if len(sys.argv) > 1:
for fname in sys.argv[1:]:
improve = Improve(fname)
improve.ImportTelluric(outfilename)
improve.Fit(True)
else:
improve = Improve(raw_input("Enter file to calibrate: "))
improve.ImportTelluric(outfilename)
improve.Fit(True)
|
kgullikson88/TS23-Scripts
|
ImproveWavelengthSolutionGUI.py
|
Python
|
gpl-3.0
| 19,329
|
[
"Gaussian"
] |
a3f968a45f2ccbd9268ea7a5288db2a37dc802ad7ec21bfab5580b4b541ceff1
|
import math
import numpy as np
import pandas as pd
SIGMA = 0.1
N = 40
def radial_kernel(x0, X, sigma):
return np.exp(np.sum((X - x0) ** 2, axis=1) / (-2 * sigma * sigma))
def gaussian(sigma):
f = 1 / (math.sqrt(2*math.pi) * sigma)
def kernel(x, X):
return f * np.exp(np.sum((X - x) ** 2, axis=1) / (-2 * sigma * sigma))
return kernel
GAUSSIAN = gaussian(SIGMA)
def lowess(X, Y, kernel=GAUSSIAN):
# add bias term
X = np.c_[np.ones(len(X)), X]
def f(x):
x = np.r_[1, x]
# fit model: normal equations with kernel
xw = X.T * kernel(x, X)
beta = np.linalg.pinv(xw @ X) @ xw @ Y
# predict value
return x @ beta
return f
def sample_lowess(S, X, Y, kernel=GAUSSIAN):
f = lowess(X, Y, kernel)
return np.array([f(s) for s in S])
def inverse_lowess(X, Y, S=None, kernel=GAUSSIAN, n=N):
if S is None:
S = np.linspace(np.amin(Y), np.amax(Y), n)
return sample_lowess(S, Y, X, kernel) # note swap of X and Y
def inverse_lowess_std(X, Y, S=None, kernel=GAUSSIAN, n=N):
if S is None:
S = np.linspace(np.amin(Y), np.amax(Y), n)
Y1 = np.c_[np.ones(len(Y)), Y]
S1 = np.c_[np.ones(len(S)), S]
W = np.array([kernel(s, Y1) for s in S1])
denom = W.sum(axis=1)
rho = (inverse_lowess(X, Y, S=Y, kernel=kernel) - X) ** 2
wr = W @ rho
std = np.sqrt(np.c_[[wr[c]/denom for c in list(wr)]])
return std.T
def inverse(X, Y, kernel=GAUSSIAN, S=None, scaler=None):
if S is None:
S = np.linspace(np.amin(Y), np.amax(Y), N)
if len(S) < 2:
return pd.DataFrame(columns=X.columns), pd.DataFrame(columns=X.columns)
line = inverse_lowess(X, Y, S, kernel)
std = inverse_lowess_std(X, Y, S, kernel=kernel)
if scaler is not None:
line = scaler.inverse_transform(line)
std = std * scaler.scale_
# return S, line, std
index = pd.Index(S, name=Y.name)
curve = pd.DataFrame(line, index=index, columns=X.columns)
curve_std = pd.DataFrame(std, index=index, columns=X.columns)
return curve, curve_std
# def inverse_regression_generator(kernel=GAUSSIAN, bandwidth=0.3, scale=True):
# def f(context, node):
# partition = node.data
# if partition.y.size < 2:
# return []
#
# sigma = bandwidth * (partition.max() - partition.min())
# kernel = gaussian(sigma)
# scaler = node.regulus.pts.scaler if scale else None
# S, line, std = inverse(partition.x, partition.y, kernel, scaler)
# return [dict(x=line[:, c], y=S, std=std[:, c]) for c in range(partition.x.shape[1])]
# return f
def def_inverse(bandwidth_factor=0.2):
def f(context, node):
if hasattr(node, 'data'):
partition = node.data
else:
partition = node
if partition.y.size < 2:
return []
sigma = bandwidth_factor * (partition.max() - partition.min())
kernel = gaussian(sigma)
scaler = node.regulus.pts.scaler
data_range = context['data_range']
S = np.linspace(*data_range, N)
S1 = S[(S >= np.amin(partition.y)) & (S <= np.amax(partition.y))]
return inverse(partition.x, partition.y, kernel, S1, scaler)
return f
def inverse_regression(context, node):
if hasattr(node, 'data'):
partition = node.data
else:
partition = node
if partition.y.size < 2:
return []
sigma = 0.2 * (partition.max() - partition.min())
kernel = gaussian(sigma)
scaler = node.regulus.pts.scaler
data_range = context['data_range']
S = np.linspace(*data_range, N)
S1 = S[(S >= np.amin(partition.y)) & (S <= np.amax(partition.y))]
return inverse(partition.x, partition.y, kernel, S1, scaler )
|
yarden-livnat/regulus
|
regulus/models/inv_reg.py
|
Python
|
bsd-3-clause
| 3,790
|
[
"Gaussian"
] |
0e262f17cdd7e3364a1636aa90ef1d9db2133148c1f9e798e336435af65e79b6
|
#!/usr/bin/env python
"""
This is a simple tool to simulate mock stream catalog
for image generation with imSim.
Need ugali and dsphsim to run the code (see README.md)
The stream is generated as a stack of a line of dwarf galaxies
with Gaussian density profile. It is close to uniform along the
stream except for edges of the stream.
Limits of this version:
1. Only one-band generation at a time. Need to modify instcat
to change to another band. TODO: Make a two-band version for CMD
similar to satsim
2. Assume stream is east-west oriented. Input Position Angle
is currently ignored. TODO: make P.A. an effective input
3. Total luminosity is calculated surface_brightness*(width*length).
In reality, the stream is not uniform at the edge of the stream,
so this is just an approximation.
4. When the simulated dwarf is too bright (Mv < -13) or
too faint (Mv > 2), the catalog will not be generated
Inputs:
(check --help)
distance (kpc) or distance_module
surface brightness (mag/arcsec^2)
width (deg) or angular width (pc)
length (deg) or angular length (pc)
ra, dec
example:
python generateInstCat.py --surface_brightness=29.00 --angular_width=0.100000 \
--angular_length=0.2 --distance=100.00 'catalogs/sb_29_width_0.1000deg_d_100kpc.txt'
"""
import os,sys
import numpy as np
import logging
import scipy.stats as stats
import dsphsim
from dsphsim.dwarf import Dwarf
from dsphsim.instruments import factory as instrumentFactory
from dsphsim.tactician import factory as tacticianFactory
from dsphsim.velocity import PhysicalVelocity
import instcat
class Simulator(object):
@staticmethod
def simulate(dwarf,**kwargs):
""" Simulate dwarf galaxy """
# Set the second band to 'i' (matches CaT lines)
dwarf.band_1 = 'g'; dwarf.band_2 = 'r'
mag_1,mag_2,ra,dec,velocity = dwarf.simulate()
angsep = dwarf.kernel.angsep(ra,dec)
rproj = dwarf.distance * np.tan(np.radians(angsep))
idx = np.arange(1,len(mag_1)+1)
# Do we also want to save vsyserr as VSYSERR?
names = ['ID','RA','DEC',
'MAG_%s'%dwarf.band_1.upper(),'MAG_%s'%dwarf.band_2.upper(),
'ANGSEP','RPROJ']
data = [idx, ra, dec,
mag_1, mag_2,
angsep, rproj]
return np.rec.fromarrays(data,names=names)
@classmethod
def parser(cls):
import argparse
description = "Simulate the observable properties of a dwarf galaxy."
formatter = argparse.ArgumentDefaultsHelpFormatter
parser = argparse.ArgumentParser(description=description,
formatter_class=formatter)
parser.add_argument('outfile',nargs='?',
help="optional output file")
parser.add_argument('--seed',type=int,default=None,
help="random seed")
parser.add_argument('-v','--verbose',action='store_true',
help="verbose output")
parser.add_argument('-n','--nsims',type=int,default=1,
help="number of simulations")
group = parser.add_argument_group('Kinematic')
group.add_argument('--kinematics',type=str,default='Gaussian',
help='kinematic distribution function')
group.add_argument('--vmean',type=float,default=60.,
help='mean systemic velocity (km/s)')
# should be mutually exclusive with vmax and rs
egroup = group.add_mutually_exclusive_group()
egroup.add_argument('--vdisp',type=float,default=3.3,
help='gaussian velocity dispersion (km/s)')
egroup.add_argument('--vmax',type=float,default=10.0,
help='maximum circular velocity (km/s)')
egroup.add_argument('--rhos',type=float,default=None,
help='maximum circular velocity (Msun/pc^3)')
egroup = group.add_mutually_exclusive_group()
egroup.add_argument('--rvmax',type=float,default=0.4,
help='radius of max circular velocity (kpc)')
# ADW: it would be nice to remove this...
egroup.add_argument('--rs',type=float,default=None,
help='scale radius for NFW halo (kpc)')
group = parser.add_argument_group('Isochrone')
group.add_argument('--isochrone',type=str,default='Bressan2012',
help='isochrone type')
group.add_argument('--age',type=float,default=12.0,
help='age of stellar population (Gyr)')
group.add_argument('--metallicity',type=float,default=2e-4,
help='metallicity of stellar population')
# Distance modulus and distance are mutually exclusive
egroup = group.add_mutually_exclusive_group()
egroup.add_argument('--distance_modulus',type=float,default=17.5,
help='distance modulus')
egroup.add_argument('--distance',type=float,default=None,
help='distance to satellite (kpc)')
group = parser.add_argument_group('Kernel')
group.add_argument('--kernel',type=str,default='Gaussian',
help='kernel type')
group.add_argument('--ra',type=float,default=54.0,
help='centroid right acension (deg)')
group.add_argument('--dec',type=float,default=-54.0,
help='centroid declination (deg)')
group.add_argument('--position_angle',type=float,default=0.0,
help='position angle east-of-north (deg)')
# ignore Position Angle for now, assuming the stream is east-west oriented.
# this code will not using the following parameters:
# stellar_mass
# absolute_magnitude
# extension
# ellipticity
# half_light_radius
# Extra terms for streams.
group = parser.add_argument_group('Additional parameters for streams')
group.add_argument('--surface_brightness',type=float,default=30,
help='average surface brightness within the width of stream (mag/arcsec^2)')
egroup = group.add_mutually_exclusive_group()
egroup.add_argument('--angular_width',type=float,default=0.3,
help='angular width (FWHM) of stream (deg)')
egroup.add_argument('--width',type=float,default=None,
help='physical width (FWHM) of stream (pc)')
egroup = group.add_mutually_exclusive_group()
egroup.add_argument('--angular_length',type=float,default=3.,
help='angular length of stream (deg)')
egroup.add_argument('--length',type=float,default=None,
help='physical length of stream (pc)')
# assuming Gaussian across stream.
return parser
if __name__ == "__main__":
parser = Simulator.parser()
args = parser.parse_args()
kwargs = vars(args)
if args.verbose:
logging.getLogger().setLevel(logging.DEBUG)
if args.seed is not None:
np.random.seed(args.seed)
dwarf = Dwarf()
if args.distance == None:
distance_modulus = args.distance_modulus
else:
# Calculate distance modulus from distance in kpc
distance_modulus = 5. * (np.log10(args.distance * 1.e3) - 1.)
print 'distance_modulus', distance_modulus
distance = 10 ** (distance_modulus / 5. + 1) / 1000
print 'distance', distance, 'kpc'
isochrone=Dwarf.createIsochrone(name=args.isochrone, age=args.age,
metallicity=args.metallicity,
distance_modulus=distance_modulus)
dwarf.set_isochrone(isochrone)
if args.width == None:
angular_width = args.angular_width
else:
# Convert physical width to angular width in degrees
angular_width = args.width/(distance*1000) * 180./np.pi
if args.length == None:
angular_length = args.angular_length
else:
# Convert physical length to angular width in degrees
angular_length = args.length/(distance*1000) * 180./np.pi
print 'angular_width:', angular_width, 'deg'
print 'angular_length:', angular_length, 'deg'
print 'width', angular_width*np.pi/180.*(distance*1000), 'pc'
print 'length', angular_length*np.pi/180.*(distance*1000), 'pc'
angular_radius = angular_width / 2.35 # in degree
print 'angular_radius:', angular_radius, 'deg'
ndwarf = int(angular_length/angular_radius)
print 'going to simulate', ndwarf, 'dwarfs'
#convert from surface brightness to absolute magnitude
#this is a simple estimation and need to be modified in the future
area = angular_width * 3600 * angular_length * 3600
apparent_magnitude = args.surface_brightness - 2.5 * np.log10(area)
absolute_magnitude = apparent_magnitude - distance_modulus
print 'stream extension:', area, 'in arcsec^2'
print 'surface brightness', args.surface_brightness
print 'total apparent magnitude', apparent_magnitude
print 'total absolute magnitude', absolute_magnitude
absolute_magnitude_single = absolute_magnitude + 2.5 * np.log10(ndwarf)
print 'single absolute magnitude', absolute_magnitude_single
if absolute_magnitude < -13:
print "WARNING: The simulated dwarfs are too massive and it will take a huge of computer time \
so I'm not going to run it, please change the input parameter"
sys.exit(1)
if absolute_magnitude_single > 2:
print "WARNING: The simulated dwarfs are too faint and it is not physical for stellar population generation \
so I'm not going to run it, please change the input parameter"
sys.exit(1)
#convert from absolute magnitude to stellar mass / richness
from scipy.interpolate import UnivariateSpline
rich = np.logspace(2., 9., 1000)
mag = isochrone.absolute_magnitude(rich)
rich = rich[np.argsort(mag)]
mag = np.sort(mag)
mag_to_rich = UnivariateSpline(mag, rich, s=0.)
dwarf.richness = mag_to_rich(absolute_magnitude_single)
#generate array for the center of ndwarfs
ra_arr = args.ra + np.arange(-(ndwarf-1.)/2, (ndwarf-1.)/2+1., 1.) * angular_radius / np.cos(np.deg2rad(args.dec))
dec_arr = args.dec + np.zeros(ndwarf)
writer = instcat.InstCatWriter()
data_all = [] # initial the big dataset for writing
k = ndwarf #used for reassign the object ID, starting with ndwarf because 0-ndwarf will be for unresolved background
# Write output
if args.outfile:
outfile = args.outfile
if args.nsims > 1:
base, ext = os.path.splitext(outfile)
suffix = '_{:0{width}d}'.format(i + 1, width=len(str(args.nsims)))
outfile = base + suffix + ext
if os.path.exists(outfile): os.remove(outfile)
logging.info("Writing %s..." % outfile)
out = open(outfile, 'w', 1)
else:
out = sys.stdout
kernel = Dwarf.createKernel(name=args.kernel, extension=angular_radius,
ellipticity=0,
position_angle=0,
lon=args.ra, lat=args.dec)
dwarf.set_kernel(kernel)
writer.write_toppart(out, dwarf)
print 'center ra, dec:', dwarf.lon, dwarf.lat
for j in range(args.nsims):
for i in range(ndwarf):
np.random.seed(i) # random seeds to make dwarfs different
kernel=Dwarf.createKernel(name=args.kernel,extension=angular_radius,
ellipticity=0,
position_angle=0,
lon=ra_arr[i],lat=dec_arr[i])
dwarf.set_kernel(kernel)
# Set the kinematic properties
if args.rs is not None: args.rvmax = 2.163*args.rs
if args.rhos is not None: raise Exception('Not implemented')
kinematics=Dwarf.createKinematics(name=args.kinematics,
vdisp=args.vdisp, vmean=args.vmean,
vmax=args.vmax, rvmax=args.rvmax)
dwarf.set_kinematics(kinematics)
logging.debug(str(dwarf))
# Run the simulation
logging.info("Simulating galaxy %i..."%i)
data = Simulator.simulate(dwarf)
data['ID'] = data['ID'] + k
k = data['ID'][-1]
writer.write_middlepart(out,dwarf,data,i)
data_all.append(data)
#print data.shape
print 'each dwarf has a total of', len(data), 'stars (including stars below detection limit)'
print 'the stream has', len(data_all)*len(data), 'stars (including stars below detection limit)'
data_all = np.concatenate(data_all)
writer.write_endpart(out,dwarf,data_all)
out.flush()
|
LSSTDESC/LSSTDarkMatter
|
streamsim/generateInstCat.py
|
Python
|
mit
| 13,024
|
[
"Galaxy",
"Gaussian"
] |
5ad413472a110c3d76155f74df1f3004c7fa2cb44e3ca5980254b5860f205924
|
#!/usr/bin/env python3
#* This file is part of the MOOSE framework
#* https://www.mooseframework.org
#*
#* All rights reserved, see COPYRIGHT for full restrictions
#* https://github.com/idaholab/moose/blob/master/COPYRIGHT
#*
#* Licensed under LGPL 2.1, please see LICENSE for details
#* https://www.gnu.org/licenses/lgpl-2.1.html
import os
import sys
import unittest
# so we can find our libraries, no matter how we're called
findbin = os.path.dirname(os.path.realpath(sys.argv[0]))
sys.path.append(os.path.join(findbin, "../"))
from readers import eq36_reader
from dbclass import ThermoDB
class TestGWBReader(unittest.TestCase):
"""
Test that the Geochemist WorkBench database reader functions correctly
"""
def readDatabase(self):
"""
Read the database
"""
with open('testdata/eq36testdata.dat', 'r') as dbfile:
dblist = dbfile.readlines()
# Parse the database
self.db = eq36_reader.readDatabase(dblist)
def testTemperature(self):
"""
Test that the temperatures are correctly parsed
"""
self.readDatabase()
gold = [0.0100, 25.0000, 60.0000, 100.0000, 150.0000, 200.0000, 250.0000, 300.0000]
self.assertEqual(self.db.temperatures, gold)
def testPressure(self):
"""
Test that the pressures are correctly parsed
"""
self.readDatabase()
gold = [1.0132, 1.0132, 1.0132, 1.0132, 4.7572, 15.5365, 39.7365, 85.8378]
self.assertEqual(self.db.pressures, gold)
def testActivityModel(self):
"""
Test that the activity model is set correctly
"""
self.readDatabase()
gold = 'debye-huckel'
adhgold = [0.4939, 0.5114, 0.5465, 0.5995, 0.6855, 0.7994, 0.9593, 1.2180]
bdhgold = [0.3253, 0.3288, 0.3346, 0.3421, 0.3525, 0.3639, 0.3766, 0.3925]
bdotgold = [0.0394, 0.0410, 0.0438, 0.0460, 0.0470, 0.0470, 0.0340, 0.0000]
self.assertEqual(self.db.activity_model, gold)
self.assertEqual(self.db.adh, adhgold)
self.assertEqual(self.db.bdh, bdhgold)
self.assertEqual(self.db.bdot, bdotgold)
def testFugacityModel(self):
"""
Test that the fugacity model is set correctly
"""
self.readDatabase()
gold = None
self.assertEqual(self.db.fugacity_model, gold)
def testLogkModel(self):
"""
Test that the equilibrium constant model is set correctly
"""
self.readDatabase()
gold = 'maier-kelly'
eqngold = 'a_0 ln(T) + a_1 + a_2 T + a_3 / T + a_4 / T^2'
self.assertEqual(self.db.logk_model, gold)
self.assertEqual(self.db.logk_model_eqn, eqngold)
def testNeutralSpecies(self):
"""
Test that the neutral species coefficients are correctly parsed
"""
self.readDatabase()
gold = {'co2': {'(coefficients': [-1.0312, 0.0012806, 255.9, 0.4445, -0.001606]}}
self.assertDictEqual(self.db.neutral_species, gold)
def testElements(self):
"""
Test that the elements are correctly parsed
"""
self.readDatabase()
gold = {'O': {'molecular weight': 15.99940},
'Ag': {'molecular weight': 107.86820}}
self.assertDictEqual(self.db.elements, gold)
def testBasisSpecies(self):
"""
Test that the basis species are correctly parsed
"""
self.readDatabase()
gold = {'H2O': {'charge': 0.0, 'radius': 3.0, 'molecular weight': 18.015,
'elements': {'H': 2.0000, 'O': 1.0000}},
'Ag+': {'charge': 1.0, 'radius': 2.5, 'molecular weight': 107.868,
'elements': {'Ag': 1.0000}}}
self.assertDictEqual(self.db.basis_species, gold)
def testSecondarySpecies(self):
"""
Test that the secondary species are correctly parsed
"""
self.readDatabase()
gold = {'(NH4)2Sb2S4(aq)': {'charge': 0.0, 'radius': 3.0, 'molecular weight': 407.841,
'elements': {'H': 8.0000, 'N': 2.0000, 'S': 4.0000, 'Sb': 2.0000},
'species': {'H2O': -6.0000, 'NH3(aq)': 2.0000, 'Sb(OH)3(aq)': 2.0000,
'H+': 4.0000, 'HS-': 4.0000},
'logk': [-74.5361, -67.6490, -59.8877, -52.5457, -45.0674, -38.8180, -33.1941, -28.0323],
'note': 'Missing array values in original database have been filled using a maier-kelly fit. Original values are [-74.5361, -67.6490, -59.8877, -52.5457, -45.0674, -38.8180, 500.0000, 500.0000]'}}
self.assertDictEqual(self.db.secondary_species, gold)
def testMineralSpecies(self):
"""
Test that the mineral species are correctly parsed
"""
self.readDatabase()
gold = {'Calcite': {'molar volume': 36.934,
'molecular weight': 100.087,
'elements': {'C': 1.0000, 'Ca': 1.0000, 'O': 3.0000},
'species': {'H+': -1.0000, 'Ca++': 1.0000, 'HCO3-': 1.0000},
'logk': [2.2257, 1.8487, 1.3330, 0.7743, 0.0999, -0.5838, -1.3262, -2.2154]}}
self.assertDictEqual(self.db.mineral_species, gold)
def testGasSpecies(self):
"""
Test that the gas species are correctly parsed
"""
self.readDatabase()
gold = {'Ag(g)': {'species': {'H+': -1.0000, 'O2(g)': -0.2500, 'Ag+': 1.0000, 'H2O': 0.5000},
'elements': {'Ag': 1.0000},
'molecular weight': 107.868,
'logk': [55.5420, 50.3678, 44.4606, 39.1093, 33.8926, 29.8196, 26.2832, 23.1649],
'note': 'Missing array values in original database have been filled using a maier-kelly fit. Original values are [55.5420, 50.3678, 44.4606, 39.1093, 33.8926, 29.8196, 500.0000, 500.0000]'}}
self.assertDictEqual(self.db.gas_species, gold)
def testRedoxCouples(self):
"""
Test that the redox couples are correctly parsed
"""
self.readDatabase()
gold = {'HS-': {'species': {'SO4--': 1.0000, 'H+': 1.0000, 'O2(g)': -2.0000},
'elements': {'H': 1.0000, 'S': 1.0000},
'charge': -1.0,
'radius': 3.5,
'molecular weight': 33.074,
'logk': [146.7859, 132.5203, 116.0105, 100.8144, 85.7147, 73.6540, 63.7280, 55.2988]}}
self.assertDictEqual(self.db.redox_couples, gold)
if __name__ == '__main__':
unittest.main(module=__name__, verbosity=2, buffer=True, exit=False)
|
nuclear-wizard/moose
|
modules/geochemistry/python/tests/test_eq36reader.py
|
Python
|
lgpl-2.1
| 6,784
|
[
"MOOSE"
] |
ee12521360120d4a0f1100dc5691320fdf58521553b7f8b0ad8d3c41afc3c20b
|
from pyaxiom.netcdf.sensors.timeseries import TimeSeries, get_dataframe_from_variable
from pyaxiom.netcdf.sensors.profile import Profile, IncompleteProfile
|
axiom-data-science/pyaxiom
|
pyaxiom/netcdf/sensors/__init__.py
|
Python
|
mit
| 156
|
[
"NetCDF"
] |
7a55a5f58e0f5d9134fafdf7fece512bbfb80cf3e80bdb26d6a96093d9874d3c
|
#
# @BEGIN LICENSE
#
# Psi4: an open-source quantum chemistry software package
#
# Copyright (c) 2007-2019 The Psi4 Developers.
#
# The copyrights for code used from other parties are included in
# the corresponding files.
#
# This file is part of Psi4.
#
# Psi4 is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, version 3.
#
# Psi4 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with Psi4; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @END LICENSE
#
import collections
def sapt_psivars():
"""Returns dictionary of PsiVariable definitions.
"""
pv1 = collections.OrderedDict()
pv1['SAPT EXCHSCAL1'] = {'func': lambda x: 1.0 if x[0] < 1.0e-5 else x[0] / x[1], 'args': ['SAPT EXCH10 ENERGY', 'SAPT EXCH10(S^2) ENERGY']} # special treatment in pandas
pv1['SAPT EXCHSCAL3'] = {'func': lambda x: x[0] ** 3, 'args': ['SAPT EXCHSCAL1']}
pv1['SAPT EXCHSCAL'] = {'func': lambda x: x[0] ** x[1], 'args': ['SAPT EXCHSCAL1', 'SAPT ALPHA']}
pv1['SAPT HF(2) ALPHA=0.0 ENERGY'] = {'func': lambda x: x[0] - (x[1] + x[2] + x[3] + x[4]),
'args': ['SAPT HF TOTAL ENERGY', 'SAPT ELST10,R ENERGY', 'SAPT EXCH10 ENERGY',
'SAPT IND20,R ENERGY', 'SAPT EXCH-IND20,R ENERGY']}
pv1['SAPT HF(2),U ALPHA=0.0 ENERGY'] = {'func': lambda x: x[0] - (x[1] + x[2] + x[3] + x[4]),
'args': ['SAPT HF TOTAL ENERGY', 'SAPT ELST10,R ENERGY', 'SAPT EXCH10 ENERGY',
'SAPT IND20,U ENERGY', 'SAPT EXCH-IND20,U ENERGY']}
pv1['SAPT HF(2) ENERGY'] = {'func': lambda x: x[1] + (1.0 - x[0]) * x[2],
'args': ['SAPT EXCHSCAL', 'SAPT HF(2) ALPHA=0.0 ENERGY', 'SAPT EXCH-IND20,R ENERGY']}
pv1['SAPT HF(2),U ENERGY'] = {'func': lambda x: x[1] + (1.0 - x[0]) * x[2],
'args': ['SAPT EXCHSCAL', 'SAPT HF(2),U ALPHA=0.0 ENERGY', 'SAPT EXCH-IND20,U ENERGY']}
pv1['SAPT HF(3) ENERGY'] = {'func': lambda x: x[1] - (x[2] + x[0] * x[3]),
'args': ['SAPT EXCHSCAL', 'SAPT HF(2) ENERGY', 'SAPT IND30,R ENERGY', 'SAPT EXCH-IND30,R ENERGY']}
pv1['SAPT MP2(2) ENERGY'] = {'func': lambda x: x[1] - (x[2] + x[3] + x[4] + x[0] * (x[5] + x[6] + x[7] + x[8])),
'args': ['SAPT EXCHSCAL', 'SAPT MP2 CORRELATION ENERGY', 'SAPT ELST12,R ENERGY', # MP2 CORRELATION ENERGY renamed here from pandas since this is IE # renamed again SA --> SAPT
'SAPT IND22 ENERGY', 'SAPT DISP20 ENERGY', 'SAPT EXCH11(S^2) ENERGY',
'SAPT EXCH12(S^2) ENERGY', 'SAPT EXCH-IND22 ENERGY', 'SAPT EXCH-DISP20 ENERGY']}
pv1['SAPT MP2(3) ENERGY'] = {'func': lambda x: x[1] - (x[2] + x[0] * x[3]),
'args': ['SAPT EXCHSCAL', 'SAPT MP2(2) ENERGY', 'SAPT IND-DISP30 ENERGY', 'SAPT EXCH-IND-DISP30 ENERGY']}
pv1['SAPT MP4 DISP'] = {'func': lambda x: x[0] * x[1] + x[2] + x[3] + x[4] + x[5],
'args': ['SAPT EXCHSCAL', 'SAPT EXCH-DISP20 ENERGY', 'SAPT DISP20 ENERGY',
'SAPT DISP21 ENERGY', 'SAPT DISP22(SDQ) ENERGY', 'SAPT EST.DISP22(T) ENERGY']}
pv1['SAPT CCD DISP'] = {'func': lambda x: x[0] * x[1] + x[2] + x[3] + x[4],
'args': ['SAPT EXCHSCAL', 'SAPT EXCH-DISP20 ENERGY', 'SAPT DISP2(CCD) ENERGY',
'SAPT DISP22(S)(CCD) ENERGY', 'SAPT EST.DISP22(T)(CCD) ENERGY']}
pv1['SAPT0 ELST ENERGY'] = {'func': sum, 'args': ['SAPT ELST10,R ENERGY']}
pv1['SAPT0 EXCH ENERGY'] = {'func': sum, 'args': ['SAPT EXCH10 ENERGY']}
pv1['SAPT0 IND ENERGY'] = {'func': lambda x: x[1] + x[2] + x[0] * x[3],
'args': ['SAPT EXCHSCAL', 'SAPT HF(2) ENERGY', 'SAPT IND20,R ENERGY', 'SAPT EXCH-IND20,R ENERGY']}
pv1['SAPT0 IND,U ENERGY'] = {'func': lambda x: x[1] + x[2] + x[0] * x[3],
'args': ['SAPT EXCHSCAL', 'SAPT HF(2),U ENERGY', 'SAPT IND20,U ENERGY', 'SAPT EXCH-IND20,U ENERGY']}
pv1['SAPT0 DISP ENERGY'] = {'func': lambda x: x[0] * x[1] + x[2],
'args': ['SAPT EXCHSCAL', 'SAPT EXCH-DISP20 ENERGY', 'SAPT DISP20 ENERGY']}
pv1['SAPT0 TOTAL ENERGY'] = {'func': sum, 'args': ['SAPT0 ELST ENERGY', 'SAPT0 EXCH ENERGY', 'SAPT0 IND ENERGY', 'SAPT0 DISP ENERGY']}
pv1['SSAPT0 ELST ENERGY'] = {'func': sum, 'args': ['SAPT0 ELST ENERGY']}
pv1['SSAPT0 EXCH ENERGY'] = {'func': sum, 'args': ['SAPT0 EXCH ENERGY']}
pv1['SSAPT0 IND ENERGY'] = {'func': lambda x: x[1] + (x[0] - 1.0) * x[2],
'args': ['SAPT EXCHSCAL3', 'SAPT0 IND ENERGY', 'SAPT EXCH-IND20,R ENERGY']}
pv1['SSAPT0 IND,U ENERGY'] = {'func': lambda x: x[1] + (x[0] - 1.0) * x[2],
'args': ['SAPT EXCHSCAL3', 'SAPT0 IND,U ENERGY', 'SAPT EXCH-IND20,U ENERGY']}
pv1['SSAPT0 DISP ENERGY'] = {'func': lambda x: x[0] * x[1] + x[2],
'args': ['SAPT EXCHSCAL3', 'SAPT EXCH-DISP20 ENERGY', 'SAPT DISP20 ENERGY']}
pv1['SSAPT0 TOTAL ENERGY'] = {'func': sum, 'args': ['SSAPT0 ELST ENERGY', 'SSAPT0 EXCH ENERGY', 'SSAPT0 IND ENERGY', 'SSAPT0 DISP ENERGY']}
pv1['SCS-SAPT0 ELST ENERGY'] = {'func': sum, 'args': ['SAPT0 ELST ENERGY']}
pv1['SCS-SAPT0 EXCH ENERGY'] = {'func': sum, 'args': ['SAPT0 EXCH ENERGY']}
pv1['SCS-SAPT0 IND ENERGY'] = {'func': sum, 'args': ['SAPT0 IND ENERGY']}
pv1['SCS-SAPT0 IND,U ENERGY'] = {'func': sum, 'args': ['SAPT0 IND,U ENERGY']}
pv1['SCS-SAPT0 DISP ENERGY'] = {'func': lambda x: (x[0] - x[3]) * (x[1] + x[2]) + x[3] * (x[4] + x[5]),
'args': [0.66, 'SAPT SAME-SPIN EXCH-DISP20 ENERGY', 'SAPT SAME-SPIN DISP20 ENERGY',
1.2, 'SAPT EXCH-DISP20 ENERGY', 'SAPT DISP20 ENERGY']} # note no xs for SCS disp
pv1['SCS-SAPT0 TOTAL ENERGY'] = {'func': sum, 'args': ['SCS-SAPT0 ELST ENERGY', 'SCS-SAPT0 EXCH ENERGY', 'SCS-SAPT0 IND ENERGY', 'SCS-SAPT0 DISP ENERGY']}
pv1['SAPT2 ELST ENERGY'] = {'func': sum, 'args': ['SAPT ELST10,R ENERGY', 'SAPT ELST12,R ENERGY']}
pv1['SAPT2 EXCH ENERGY'] = {'func': lambda x: x[1] + x[0] * (x[2] + x[3]),
'args': ['SAPT EXCHSCAL', 'SAPT EXCH10 ENERGY', 'SAPT EXCH11(S^2) ENERGY', 'SAPT EXCH12(S^2) ENERGY']}
pv1['SAPT2 IND ENERGY'] = {'func': lambda x: x[1] + x[2] + x[0] * x[3] + x[4] + x[0] * x[5],
'args': ['SAPT EXCHSCAL', 'SAPT HF(2) ENERGY', 'SAPT IND20,R ENERGY', 'SAPT EXCH-IND20,R ENERGY',
'SAPT IND22 ENERGY', 'SAPT EXCH-IND22 ENERGY']}
pv1['SAPT2 DISP ENERGY'] = {'func': lambda x: x[0] * x[1] + x[2],
'args': ['SAPT EXCHSCAL', 'SAPT EXCH-DISP20 ENERGY', 'SAPT DISP20 ENERGY']}
pv1['SAPT2 TOTAL ENERGY'] = {'func': sum, 'args': ['SAPT2 ELST ENERGY', 'SAPT2 EXCH ENERGY', 'SAPT2 IND ENERGY', 'SAPT2 DISP ENERGY']}
pv1['SAPT2+ ELST ENERGY'] = {'func': sum, 'args': ['SAPT ELST10,R ENERGY', 'SAPT ELST12,R ENERGY']}
pv1['SAPT2+ EXCH ENERGY'] = {'func': lambda x: x[1] + x[0] * (x[2] + x[3]),
'args': ['SAPT EXCHSCAL', 'SAPT EXCH10 ENERGY', 'SAPT EXCH11(S^2) ENERGY', 'SAPT EXCH12(S^2) ENERGY']}
pv1['SAPT2+ IND ENERGY'] = {'func': lambda x: x[1] + x[2] + x[0] * x[3] + x[4] + x[0] * x[5],
'args': ['SAPT EXCHSCAL', 'SAPT HF(2) ENERGY', 'SAPT IND20,R ENERGY', 'SAPT EXCH-IND20,R ENERGY',
'SAPT IND22 ENERGY', 'SAPT EXCH-IND22 ENERGY']}
pv1['SAPT2+ DISP ENERGY'] = {'func': sum, 'args': ['SAPT MP4 DISP']}
pv1['SAPT2+ TOTAL ENERGY'] = {'func': sum, 'args': ['SAPT2+ ELST ENERGY', 'SAPT2+ EXCH ENERGY', 'SAPT2+ IND ENERGY', 'SAPT2+ DISP ENERGY']}
pv1['SAPT2+(CCD) ELST ENERGY'] = {'func': sum, 'args': ['SAPT2+ ELST ENERGY']}
pv1['SAPT2+(CCD) EXCH ENERGY'] = {'func': sum, 'args': ['SAPT2+ EXCH ENERGY']}
pv1['SAPT2+(CCD) IND ENERGY'] = {'func': sum, 'args': ['SAPT2+ IND ENERGY']}
pv1['SAPT2+(CCD) DISP ENERGY'] = {'func': sum, 'args': ['SAPT CCD DISP']}
pv1['SAPT2+(CCD) TOTAL ENERGY'] = {'func': sum, 'args': ['SAPT2+(CCD) ELST ENERGY', 'SAPT2+(CCD) EXCH ENERGY', 'SAPT2+(CCD) IND ENERGY', 'SAPT2+(CCD) DISP ENERGY']}
pv1['SAPT2+DMP2 ELST ENERGY'] = {'func': sum, 'args': ['SAPT2+ ELST ENERGY']}
pv1['SAPT2+DMP2 EXCH ENERGY'] = {'func': sum, 'args': ['SAPT2+ EXCH ENERGY']}
pv1['SAPT2+DMP2 IND ENERGY'] = {'func': sum, 'args': ['SAPT2+ IND ENERGY', 'SAPT MP2(2) ENERGY']}
pv1['SAPT2+DMP2 DISP ENERGY'] = {'func': sum, 'args': ['SAPT2+ DISP ENERGY']}
pv1['SAPT2+DMP2 TOTAL ENERGY'] = {'func': sum, 'args': ['SAPT2+DMP2 ELST ENERGY', 'SAPT2+DMP2 EXCH ENERGY', 'SAPT2+DMP2 IND ENERGY', 'SAPT2+DMP2 DISP ENERGY']}
pv1['SAPT2+(CCD)DMP2 ELST ENERGY'] = {'func': sum, 'args': ['SAPT2+ ELST ENERGY']}
pv1['SAPT2+(CCD)DMP2 EXCH ENERGY'] = {'func': sum, 'args': ['SAPT2+ EXCH ENERGY']}
pv1['SAPT2+(CCD)DMP2 IND ENERGY'] = {'func': sum, 'args': ['SAPT2+DMP2 IND ENERGY']}
pv1['SAPT2+(CCD)DMP2 DISP ENERGY'] = {'func': sum, 'args': ['SAPT2+(CCD) DISP ENERGY']}
pv1['SAPT2+(CCD)DMP2 TOTAL ENERGY'] = {'func': sum, 'args': ['SAPT2+(CCD)DMP2 ELST ENERGY', 'SAPT2+(CCD)DMP2 EXCH ENERGY', 'SAPT2+(CCD)DMP2 IND ENERGY', 'SAPT2+(CCD)DMP2 DISP ENERGY']}
pv1['SAPT2+(3) ELST ENERGY'] = {'func': sum, 'args': ['SAPT ELST10,R ENERGY', 'SAPT ELST12,R ENERGY', 'SAPT ELST13,R ENERGY']}
pv1['SAPT2+(3) EXCH ENERGY'] = {'func': lambda x: x[1] + x[0] * (x[2] + x[3]),
'args': ['SAPT EXCHSCAL', 'SAPT EXCH10 ENERGY', 'SAPT EXCH11(S^2) ENERGY', 'SAPT EXCH12(S^2) ENERGY']}
pv1['SAPT2+(3) IND ENERGY'] = {'func': lambda x: x[1] + x[2] + x[0] * x[3] + x[4] + x[0] * x[5],
'args': ['SAPT EXCHSCAL', 'SAPT HF(2) ENERGY', 'SAPT IND20,R ENERGY', 'SAPT EXCH-IND20,R ENERGY',
'SAPT IND22 ENERGY', 'SAPT EXCH-IND22 ENERGY']}
pv1['SAPT2+(3) DISP ENERGY'] = {'func': sum, 'args': ['SAPT MP4 DISP', 'SAPT DISP30 ENERGY']}
pv1['SAPT2+(3) TOTAL ENERGY'] = {'func': sum, 'args': ['SAPT2+(3) ELST ENERGY', 'SAPT2+(3) EXCH ENERGY', 'SAPT2+(3) IND ENERGY', 'SAPT2+(3) DISP ENERGY']}
pv1['SAPT2+(3)(CCD) ELST ENERGY'] = {'func': sum, 'args': ['SAPT2+(3) ELST ENERGY']}
pv1['SAPT2+(3)(CCD) EXCH ENERGY'] = {'func': sum, 'args': ['SAPT2+(3) EXCH ENERGY']}
pv1['SAPT2+(3)(CCD) IND ENERGY'] = {'func': sum, 'args': ['SAPT2+(3) IND ENERGY']}
pv1['SAPT2+(3)(CCD) DISP ENERGY'] = {'func': sum, 'args': ['SAPT CCD DISP', 'SAPT DISP30 ENERGY']}
pv1['SAPT2+(3)(CCD) TOTAL ENERGY'] = {'func': sum, 'args': ['SAPT2+(3)(CCD) ELST ENERGY', 'SAPT2+(3)(CCD) EXCH ENERGY', 'SAPT2+(3)(CCD) IND ENERGY', 'SAPT2+(3)(CCD) DISP ENERGY']}
pv1['SAPT2+(3)DMP2 ELST ENERGY'] = {'func': sum, 'args': ['SAPT2+(3) ELST ENERGY']}
pv1['SAPT2+(3)DMP2 EXCH ENERGY'] = {'func': sum, 'args': ['SAPT2+(3) EXCH ENERGY']}
pv1['SAPT2+(3)DMP2 IND ENERGY'] = {'func': sum, 'args': ['SAPT2+(3) IND ENERGY', 'SAPT MP2(2) ENERGY']}
pv1['SAPT2+(3)DMP2 DISP ENERGY'] = {'func': sum, 'args': ['SAPT2+(3) DISP ENERGY']}
pv1['SAPT2+(3)DMP2 TOTAL ENERGY'] = {'func': sum, 'args': ['SAPT2+(3)DMP2 ELST ENERGY', 'SAPT2+(3)DMP2 EXCH ENERGY', 'SAPT2+(3)DMP2 IND ENERGY', 'SAPT2+(3)DMP2 DISP ENERGY']}
pv1['SAPT2+(3)(CCD)DMP2 ELST ENERGY'] = {'func': sum, 'args': ['SAPT2+(3) ELST ENERGY']}
pv1['SAPT2+(3)(CCD)DMP2 EXCH ENERGY'] = {'func': sum, 'args': ['SAPT2+(3) EXCH ENERGY']}
pv1['SAPT2+(3)(CCD)DMP2 IND ENERGY'] = {'func': sum, 'args': ['SAPT2+(3)DMP2 IND ENERGY']}
pv1['SAPT2+(3)(CCD)DMP2 DISP ENERGY'] = {'func': sum, 'args': ['SAPT2+(3)(CCD) DISP ENERGY']}
pv1['SAPT2+(3)(CCD)DMP2 TOTAL ENERGY'] = {'func': sum, 'args': ['SAPT2+(3)(CCD)DMP2 ELST ENERGY', 'SAPT2+(3)(CCD)DMP2 EXCH ENERGY', 'SAPT2+(3)(CCD)DMP2 IND ENERGY', 'SAPT2+(3)(CCD)DMP2 DISP ENERGY']}
pv1['SAPT2+3 ELST ENERGY'] = {'func': sum, 'args': ['SAPT ELST10,R ENERGY', 'SAPT ELST12,R ENERGY', 'SAPT ELST13,R ENERGY']}
pv1['SAPT2+3 EXCH ENERGY'] = {'func': lambda x: x[1] + x[0] * (x[2] + x[3]),
'args': ['SAPT EXCHSCAL', 'SAPT EXCH10 ENERGY', 'SAPT EXCH11(S^2) ENERGY', 'SAPT EXCH12(S^2) ENERGY']}
pv1['SAPT2+3 IND ENERGY'] = {'func': lambda x: x[1] + x[2] + x[0] * x[3] + x[4] + x[0] * x[5] + x[6] + x[0] * x[7],
'args': ['SAPT EXCHSCAL', 'SAPT HF(3) ENERGY', 'SAPT IND20,R ENERGY', 'SAPT EXCH-IND20,R ENERGY',
'SAPT IND22 ENERGY', 'SAPT EXCH-IND22 ENERGY', 'SAPT IND30,R ENERGY', 'SAPT EXCH-IND30,R ENERGY']}
pv1['SAPT2+3 DISP ENERGY'] = {'func': lambda x: x[1] + x[2] + x[0] * x[3] + x[4] + x[0] * x[5],
'args': ['SAPT EXCHSCAL', 'SAPT MP4 DISP', 'SAPT DISP30 ENERGY', 'SAPT EXCH-DISP30 ENERGY',
'SAPT IND-DISP30 ENERGY', 'SAPT EXCH-IND-DISP30 ENERGY']}
pv1['SAPT2+3 TOTAL ENERGY'] = {'func': sum, 'args': ['SAPT2+3 ELST ENERGY', 'SAPT2+3 EXCH ENERGY', 'SAPT2+3 IND ENERGY', 'SAPT2+3 DISP ENERGY']}
pv1['SAPT2+3(CCD) ELST ENERGY'] = {'func': sum, 'args': ['SAPT2+3 ELST ENERGY']}
pv1['SAPT2+3(CCD) EXCH ENERGY'] = {'func': sum, 'args': ['SAPT2+3 EXCH ENERGY']}
pv1['SAPT2+3(CCD) IND ENERGY'] = {'func': sum, 'args': ['SAPT2+3 IND ENERGY']}
pv1['SAPT2+3(CCD) DISP ENERGY'] = {'func': lambda x: x[1] + x[2] + x[0] * x[3] + x[4] + x[0] * x[5],
'args': ['SAPT EXCHSCAL', 'SAPT CCD DISP', 'SAPT DISP30 ENERGY', 'SAPT EXCH-DISP30 ENERGY',
'SAPT IND-DISP30 ENERGY', 'SAPT EXCH-IND-DISP30 ENERGY']}
pv1['SAPT2+3(CCD) TOTAL ENERGY'] = {'func': sum, 'args': ['SAPT2+3(CCD) ELST ENERGY', 'SAPT2+3(CCD) EXCH ENERGY', 'SAPT2+3(CCD) IND ENERGY', 'SAPT2+3(CCD) DISP ENERGY']}
pv1['SAPT2+3DMP2 ELST ENERGY'] = {'func': sum, 'args': ['SAPT2+3 ELST ENERGY']}
pv1['SAPT2+3DMP2 EXCH ENERGY'] = {'func': sum, 'args': ['SAPT2+3 EXCH ENERGY']}
pv1['SAPT2+3DMP2 IND ENERGY'] = {'func': sum, 'args': ['SAPT2+3 IND ENERGY', 'SAPT MP2(3) ENERGY']}
pv1['SAPT2+3DMP2 DISP ENERGY'] = {'func': sum, 'args': ['SAPT2+3 DISP ENERGY']}
pv1['SAPT2+3DMP2 TOTAL ENERGY'] = {'func': sum, 'args': ['SAPT2+3DMP2 ELST ENERGY', 'SAPT2+3DMP2 EXCH ENERGY', 'SAPT2+3DMP2 IND ENERGY', 'SAPT2+3DMP2 DISP ENERGY']}
pv1['SAPT2+3(CCD)DMP2 ELST ENERGY'] = {'func': sum, 'args': ['SAPT2+3 ELST ENERGY']}
pv1['SAPT2+3(CCD)DMP2 EXCH ENERGY'] = {'func': sum, 'args': ['SAPT2+3 EXCH ENERGY']}
pv1['SAPT2+3(CCD)DMP2 IND ENERGY'] = {'func': sum, 'args': ['SAPT2+3DMP2 IND ENERGY']}
pv1['SAPT2+3(CCD)DMP2 DISP ENERGY'] = {'func': sum, 'args': ['SAPT2+3(CCD) DISP ENERGY']}
pv1['SAPT2+3(CCD)DMP2 TOTAL ENERGY'] = {'func': sum, 'args': ['SAPT2+3(CCD)DMP2 ELST ENERGY', 'SAPT2+3(CCD)DMP2 EXCH ENERGY', 'SAPT2+3(CCD)DMP2 IND ENERGY', 'SAPT2+3(CCD)DMP2 DISP ENERGY']}
return pv1
|
CDSherrill/psi4
|
psi4/driver/qcdb/psivardefs.py
|
Python
|
lgpl-3.0
| 15,690
|
[
"Psi4"
] |
971d2bce5ec9bb345072d1a6c828b863960582b8d4ec12a60b2d84585e7f12a5
|
# Copyright 2009 by Cymon J. Cox. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Command line wrapper for the multiple alignment program PROBCONS.
"""
from __future__ import print_function
__docformat__ = "restructuredtext en" # Don't just use plain text in epydoc API pages!
from Bio.Application import _Option, _Switch, _Argument, AbstractCommandline
class ProbconsCommandline(AbstractCommandline):
"""Command line wrapper for the multiple alignment program PROBCONS.
http://probcons.stanford.edu/
Example:
--------
To align a FASTA file (unaligned.fasta) with the output in ClustalW
format, and otherwise default settings, use:
>>> from Bio.Align.Applications import ProbconsCommandline
>>> probcons_cline = ProbconsCommandline(input="unaligned.fasta",
... clustalw=True)
>>> print(probcons_cline)
probcons -clustalw unaligned.fasta
You would typically run the command line with probcons_cline() or via
the Python subprocess module, as described in the Biopython tutorial.
Note that PROBCONS will write the alignment to stdout, which you may
want to save to a file and then parse, e.g.::
stdout, stderr = probcons_cline()
with open("aligned.aln", "w") as handle:
handle.write(stdout)
from Bio import AlignIO
align = AlignIO.read("aligned.fasta", "clustalw")
Alternatively, to parse the output with AlignIO directly you can
use StringIO to turn the string into a handle::
stdout, stderr = probcons_cline()
from StringIO import StringIO
from Bio import AlignIO
align = AlignIO.read(StringIO(stdout), "clustalw")
Citations:
----------
Do, C.B., Mahabhashyam, M.S.P., Brudno, M., and Batzoglou, S. 2005.
PROBCONS: Probabilistic Consistency-based Multiple Sequence Alignment.
Genome Research 15: 330-340.
Last checked against version: 1.12
"""
def __init__(self, cmd="probcons", **kwargs):
self.parameters = \
[
# Note that some options cannot be assigned via properties using the
# original documented option (because hyphens are not valid for names in
# python), e.g cmdline.pre-training = 3 will not work
# In these cases the shortened option name should be used
# cmdline.pre = 3
_Switch(["-clustalw", "clustalw"],
"Use CLUSTALW output format instead of MFA"),
_Option(["-c", "c", "--consistency", "consistency"],
"Use 0 <= REPS <= 5 (default: 2) passes of consistency transformation",
checker_function=lambda x: x in range(0, 6),
equate=False),
_Option(["-ir", "--iterative-refinement", "iterative-refinement", "ir"],
"Use 0 <= REPS <= 1000 (default: 100) passes of "
"iterative-refinement",
checker_function=lambda x: x in range(0, 1001),
equate=False),
_Option(["-pre", "--pre-training", "pre-training", "pre"],
"Use 0 <= REPS <= 20 (default: 0) rounds of pretraining",
checker_function=lambda x: x in range(0, 21),
equate=False),
_Switch(["-pairs", "pairs"],
"Generate all-pairs pairwise alignments"),
_Switch(["-viterbi", "viterbi"],
"Use Viterbi algorithm to generate all pairs "
"(automatically enables -pairs)"),
_Switch(["-verbose", "verbose"],
"Report progress while aligning (default: off)"),
_Option(["-annot", "annot"],
"Write annotation for multiple alignment to FILENAME",
equate=False),
_Option(["-t", "t", "--train", "train"],
"Compute EM transition probabilities, store in FILENAME "
"(default: no training)",
equate=False),
_Switch(["-e", "e", "--emissions", "emissions"],
"Also reestimate emission probabilities (default: off)"),
_Option(["-p", "p", "--paramfile", "paramfile"],
"Read parameters from FILENAME",
equate=False),
_Switch(["-a", "--alignment-order", "alignment-order", "a"],
"Print sequences in alignment order rather than input "
"order (default: off)"),
# Input file name
_Argument(["input"],
"Input file name. Must be multiple FASTA alignment "
"(MFA) format",
filename=True,
is_required=True),
]
AbstractCommandline.__init__(self, cmd, **kwargs)
def _test():
"""Run the module's doctests (PRIVATE)."""
print("Running modules doctests...")
import doctest
doctest.testmod()
print("Done")
if __name__ == "__main__":
_test()
|
poojavade/Genomics_Docker
|
Dockerfiles/gedlab-khmer-filter-abund/pymodules/python2.7/lib/python/Bio/Align/Applications/_Probcons.py
|
Python
|
apache-2.0
| 5,219
|
[
"Biopython"
] |
26f459efd7b05e8328931d9a8a24af78100aa974f0784126f3fa6a00051b7839
|
import ctypes
import sys
sys.setdlopenflags((sys.getdlopenflags() | ctypes.RTLD_GLOBAL ))
import espresso as es
import numpy
es._espressoHandle.Tcl_Eval("thermostat langevin 1. 1.")
N=100
es.glob.time_step=0.01
es.glob.skin=1.
es.glob.box_l=[10., 10., 10.]
es.lb["cpu"].dens=1
print es.lb["cpu"].dens
for i in range(N):
es.part[i].pos=numpy.random.random(3)*es.glob.box_l
es.inter[0,0].lennardJones = {"eps":1,"sigma":1,"shift":0.25}
es._espressoHandle.Tcl_Eval("integrate 100")
for i in range(N):
print es.part[i].pos
es._espressoHandle.die()
|
roehm/cython
|
cython/examples/hello_parallel_world.py
|
Python
|
gpl-3.0
| 555
|
[
"ESPResSo"
] |
df8989fff2fa2025ad836bb1606b32f9e2de1052dcbda16990f8024b06de0f7c
|
import unittest
from DIRAC.Core.Workflow.Module import ModuleDefinition
from DIRAC.Core.Workflow.Step import StepDefinition
from DIRAC.Interfaces.API.Job import Job
from DIRAC.Workflow.Utilities.Utils import getStepDefinition, getStepCPUTimes
#############################################################################
class UtilitiesTestCase( unittest.TestCase ):
""" Base class
"""
def setUp( self ):
self.job = Job()
pass
class UtilsSuccess( UtilitiesTestCase ):
def test__getStepDefinition( self ):
importLine = """
from DIRAC.Workflow.Modules.<MODULE> import <MODULE>
"""
# modules
gaudiApp = ModuleDefinition( 'Script' )
body = importLine.replace( '<MODULE>', 'Script' )
gaudiApp.setDescription( getattr( __import__( "%s.%s" % ( 'DIRAC.Workflow.Modules', 'Script' ),
globals(), locals(), ['__doc__'] ),
"__doc__" ) )
gaudiApp.setBody( body )
genBKReport = ModuleDefinition( 'FailoverRequest' )
body = importLine.replace( '<MODULE>', 'FailoverRequest' )
genBKReport.setDescription( getattr( __import__( "%s.%s" % ( 'DIRAC.Workflow.Modules', 'FailoverRequest' ),
globals(), locals(), ['__doc__'] ),
"__doc__" ) )
genBKReport.setBody( body )
# step
appDefn = StepDefinition( 'App_Step' )
appDefn.addModule( gaudiApp )
appDefn.createModuleInstance( 'Script', 'Script' )
appDefn.addModule( genBKReport )
appDefn.createModuleInstance( 'FailoverRequest', 'FailoverRequest' )
appDefn.addParameterLinked( gaudiApp.parameters )
stepDef = getStepDefinition( 'App_Step', ['Script', 'FailoverRequest'] )
self.assertTrue( str( appDefn ) == str( stepDef ) )
self.job._addParameter( appDefn, 'name', 'type', 'value', 'desc' )
self.job._addParameter( appDefn, 'name1', 'type1', 'value1', 'desc1' )
stepDef = getStepDefinition( 'App_Step', ['Script', 'FailoverRequest'],
parametersList = [[ 'name', 'type', 'value', 'desc' ],
[ 'name1', 'type1', 'value1', 'desc1' ]] )
self.assertTrue( str( appDefn ) == str( stepDef ) )
def test_getStepCPUTimes( self ):
execT, cpuT = getStepCPUTimes( {} )
self.assertEqual( execT, 0 )
self.assertEqual( cpuT, 0 )
execT, cpuT = getStepCPUTimes( {'StartTime':0, 'StartStats': ( 0, 0, 0, 0, 0 )} )
print execT, cpuT
if __name__ == '__main__':
suite = unittest.defaultTestLoader.loadTestsFromTestCase( UtilitiesTestCase )
suite.addTest( unittest.defaultTestLoader.loadTestsFromTestCase( UtilsSuccess ) )
testResult = unittest.TextTestRunner( verbosity = 2 ).run( suite )
|
andresailer/DIRAC
|
Workflow/Utilities/test/Test_Utilities.py
|
Python
|
gpl-3.0
| 2,818
|
[
"DIRAC"
] |
f31a82a5141f13162900e4ffcc8c1a84307b9d2ffe6fce05d648733b503e150b
|
"""
Module to set up run time parameters for Clawpack.
The values set in the function setrun are then written out to data files
that will be read in by the Fortran code.
"""
import os
import numpy as np
#-----------------------------------------------
# Set these parameters for adjoint flagging....
# location of output from computing adjoint:
adjoint_output = os.path.abspath('../adjoint/_output')
print('Will flag using adjoint solution from %s' % adjoint_output)
# Time period of interest:
t1 = 1.
t2 = 6.
# Determining type of adjoint flagging:
# taking inner product with forward solution or Richardson error:
flag_forward_adjoint = False
flag_richardson_adjoint = True
# tolerance for adjoint flagging:
#adjoint_flag_tolerance = 0.002 # suggested if using forward solution
adjoint_flag_tolerance = 0.001 # suggested if using Richardson error
#-----------------------------------------------
#------------------------------
def setrun(claw_pkg='amrclaw'):
#------------------------------
"""
Define the parameters used for running Clawpack.
INPUT:
claw_pkg expected to be "amrclaw" for this setrun.
OUTPUT:
rundata - object of class ClawRunData
"""
from clawpack.clawutil import data
assert claw_pkg.lower() == 'amrclaw', "Expected claw_pkg = 'amrclaw'"
num_dim = 2
rundata = data.ClawRunData(claw_pkg, num_dim)
#------------------------------------------------------------------
# Problem-specific parameters to be written to setprob.data:
#------------------------------------------------------------------
probdata = rundata.new_UserData(name='probdata',fname='setprob.data')
probdata.add_param('rho', 1., 'density of medium')
probdata.add_param('bulk', 4., 'bulk modulus')
#------------------------------------------------------------------
# Standard Clawpack parameters to be written to claw.data:
# (or to amrclaw.data for AMR)
#------------------------------------------------------------------
clawdata = rundata.clawdata # initialized when rundata instantiated
# Set single grid parameters first.
# See below for AMR parameters.
# ---------------
# Spatial domain:
# ---------------
# Number of space dimensions:
clawdata.num_dim = num_dim
# Lower and upper edge of computational domain:
clawdata.lower[0] = -4.000000e+00 # xlower
clawdata.upper[0] = 8.000000e+00 # xupper
clawdata.lower[1] = -1.000000e+00 # ylower
clawdata.upper[1] = 11.000000e+00 # yupper
# Number of grid cells:
clawdata.num_cells[0] = 50 # mx
clawdata.num_cells[1] = 50 # my
# ---------------
# Size of system:
# ---------------
# Number of equations in the system:
clawdata.num_eqn = 3
# Number of auxiliary variables in the aux array (initialized in setaux)
# see setadjoint
# Index of aux array corresponding to capacity function, if there is one:
clawdata.capa_index = 0
# -------------
# Initial time:
# -------------
clawdata.t0 = 0.000000
# Restart from checkpoint file of a previous run?
# If restarting, t0 above should be from original run, and the
# restart_file 'fort.chkNNNNN' specified below should be in
# the OUTDIR indicated in Makefile.
clawdata.restart = False # True to restart from prior results
clawdata.restart_file = 'fort.chk00006' # File to use for restart data
# -------------
# Output times:
#--------------
# Specify at what times the results should be written to fort.q files.
# Note that the time integration stops after the final output time.
clawdata.output_style = 1
if clawdata.output_style==1:
# Output ntimes frames at equally spaced times up to tfinal:
# Can specify num_output_times = 0 for no output
clawdata.num_output_times = 20
clawdata.tfinal = 6.0
clawdata.output_t0 = True # output at initial (or restart) time?
elif clawdata.output_style == 2:
# Specify a list or numpy array of output times:
# Include t0 if you want output at the initial time.
clawdata.output_times = [0., 0.1]
elif clawdata.output_style == 3:
# Output every step_interval timesteps over total_steps timesteps:
clawdata.output_step_interval = 2
clawdata.total_steps = 4
clawdata.output_t0 = True # output at initial (or restart) time?
clawdata.output_format = 'ascii' # 'ascii', 'binary', 'netcdf'
clawdata.output_q_components = 'all' # could be list such as [True,True]
clawdata.output_aux_components = 'all' # could be list
clawdata.output_aux_onlyonce = False # output aux arrays only at t0
# ---------------------------------------------------
# Verbosity of messages to screen during integration:
# ---------------------------------------------------
# The current t, dt, and cfl will be printed every time step
# at AMR levels <= verbosity. Set verbosity = 0 for no printing.
# (E.g. verbosity == 2 means print only on levels 1 and 2.)
clawdata.verbosity = 0
# --------------
# Time stepping:
# --------------
# if dt_variable==True: variable time steps used based on cfl_desired,
# if dt_variable==False: fixed time steps dt = dt_initial always used.
clawdata.dt_variable = True
# Initial time step for variable dt.
# (If dt_variable==0 then dt=dt_initial for all steps)
clawdata.dt_initial = 1.00000e-02
# Max time step to be allowed if variable dt used:
clawdata.dt_max = 1.000000e+99
# Desired Courant number if variable dt used
clawdata.cfl_desired = 0.900000
# max Courant number to allow without retaking step with a smaller dt:
clawdata.cfl_max = 1.000000
# Maximum number of time steps to allow between output times:
clawdata.steps_max = 50000
# ------------------
# Method to be used:
# ------------------
# Order of accuracy: 1 => Godunov, 2 => Lax-Wendroff plus limiters
clawdata.order = 2
# Use dimensional splitting? (not yet available for AMR)
clawdata.dimensional_split = 'unsplit'
# For unsplit method, transverse_waves can be
# 0 or 'none' ==> donor cell (only normal solver used)
# 1 or 'increment' ==> corner transport of waves
# 2 or 'all' ==> corner transport of 2nd order corrections too
clawdata.transverse_waves = 2
# Number of waves in the Riemann solution:
clawdata.num_waves = 2
# List of limiters to use for each wave family:
# Required: len(limiter) == num_waves
# Some options:
# 0 or 'none' ==> no limiter (Lax-Wendroff)
# 1 or 'minmod' ==> minmod
# 2 or 'superbee' ==> superbee
# 3 or 'vanleer' ==> van Leer
# 4 or 'mc' ==> MC limiter
clawdata.limiter = ['vanleer','vanleer']
clawdata.use_fwaves = False # True ==> use f-wave version of algorithms
# Source terms splitting:
# src_split == 0 or 'none' ==> no source term (src routine never called)
# src_split == 1 or 'godunov' ==> Godunov (1st order) splitting used,
# src_split == 2 or 'strang' ==> Strang (2nd order) splitting used, not recommended.
clawdata.source_split = 0
# --------------------
# Boundary conditions:
# --------------------
# Number of ghost cells (usually 2)
clawdata.num_ghost = 2
# Choice of BCs at xlower and xupper:
# 0 or 'user' => user specified (must modify bcNamr.f to use this option)
# 1 or 'extrap' => extrapolation (non-reflecting outflow)
# 2 or 'periodic' => periodic (must specify this at both boundaries)
# 3 or 'wall' => solid wall for systems where q(2) is normal velocity
clawdata.bc_lower[0] = 'extrap' # at xlower
clawdata.bc_upper[0] = 'wall' # at xupper
clawdata.bc_lower[1] = 'wall' # at ylower
clawdata.bc_upper[1] = 'extrap' # at yupper
# ---------------
# Gauges:
# ---------------
rundata.gaugedata.gauges = []
# for gauges append lines of the form [gaugeno, x, y, t1, t2]
rundata.gaugedata.gauges.append([0, 3.5, 0.5, 1.22, 2.85])
#rundata.gaugedata.gauges.append([1, 3.6, 0.5, 2.7, 2.85])
# --------------
# Checkpointing:
# --------------
# Specify when checkpoint files should be created that can be
# used to restart a computation.
clawdata.checkpt_style = 0
if clawdata.checkpt_style == 0:
# Do not checkpoint at all
pass
elif clawdata.checkpt_style == 1:
# Checkpoint only at tfinal.
pass
elif clawdata.checkpt_style == 2:
# Specify a list of checkpoint times.
clawdata.checkpt_times = [0.1,0.15]
elif clawdata.checkpt_style == 3:
# Checkpoint every checkpt_interval timesteps (on Level 1)
# and at the final time.
clawdata.checkpt_interval = 5
# ---------------
# AMR parameters:
# ---------------
amrdata = rundata.amrdata
# max number of refinement levels:
amrdata.amr_levels_max = 3
# List of refinement ratios at each level (length at least amr_level_max-1)
amrdata.refinement_ratios_x = [2, 2]
amrdata.refinement_ratios_y = [2, 2]
amrdata.refinement_ratios_t = [2, 2]
# Specify type of each aux variable in clawdata.auxtype.
# This must be a list of length num_aux, each element of which is one of:
# 'center', 'capacity', 'xleft', or 'yleft' (see documentation).
# need 1 value, set in setadjoint
# Flag for refinement based on Richardson error estimater:
amrdata.flag_richardson = False
# Flag for refinement using routine flag2refine:
amrdata.flag2refine = False
# see setadjoint to set tolerance for adjoint flagging
# steps to take on each level L between regriddings of level L+1:
amrdata.regrid_interval = 2
# width of buffer zone around flagged points:
# (typically the same as regrid_interval so waves don't escape):
amrdata.regrid_buffer_width = 2
# clustering alg. cutoff for (# flagged pts) / (total # of cells refined)
# (closer to 1.0 => more small grids may be needed to cover flagged cells)
amrdata.clustering_cutoff = 0.7
# print info about each regridding up to this level:
amrdata.verbosity_regrid = 0
# ---------------
# Regions:
# ---------------
rundata.regiondata.regions = []
# to specify regions of refinement append lines of the form
# [minlevel,maxlevel,t1,t2,x1,x2,y1,y2]
#------------------------------------------------------------------
# Adjoint specific data:
#------------------------------------------------------------------
rundata = setadjoint(rundata)
# ----- For developers -----
# Toggle debugging print statements:
amrdata.dprint = False # print domain flags
amrdata.eprint = False # print err est flags
amrdata.edebug = False # even more err est flags
amrdata.gprint = False # grid bisection/clustering
amrdata.nprint = False # proper nesting output
amrdata.pprint = False # proj. of tagged points
amrdata.rprint = False # print regridding summary
amrdata.sprint = False # space/memory output
amrdata.tprint = False # time step reporting each level
amrdata.uprint = False # update/upbnd reporting
return rundata
# end of function setrun
# ----------------------
#-------------------
def setadjoint(rundata):
#-------------------
"""
Setting up adjoint variables and
reading in all of the checkpointed Adjoint files
"""
import glob
# Set these parameters at top of this file:
# adjoint_flag_tolerance, t1, t2, adjoint_output
# Then you don't need to modify this function...
# flag and tolerance for adjoint flagging:
if flag_forward_adjoint == True:
# setting up taking inner product with forward solution
rundata.amrdata.flag2refine = True
rundata.amrdata.flag2refine_tol = adjoint_flag_tolerance
elif flag_richardson_adjoint == True:
# setting up taking inner product with Richardson error
rundata.amrdata.flag_richardson = True
rundata.amrdata.flag_richardson_tol = adjoint_flag_tolerance
else:
print("No refinement flag set!")
rundata.clawdata.num_aux = 1 # 1 required for adjoint flagging
rundata.amrdata.aux_type = ['center']
adjointdata = rundata.new_UserData(name='adjointdata',fname='adjoint.data')
adjointdata.add_param('adjoint_output',adjoint_output,'adjoint_output')
adjointdata.add_param('t1',t1,'t1, start time of interest')
adjointdata.add_param('t2',t2,'t2, final time of interest')
files = glob.glob(os.path.join(adjoint_output,"fort.b*"))
files.sort()
if (len(files) == 0):
print("No binary files found for adjoint output!")
adjointdata.add_param('numadjoints', len(files), 'Number of adjoint checkpoint files.')
adjointdata.add_param('innerprod_index', 1, 'Index for innerproduct data in aux array.')
counter = 1
for fname in files:
f = open(fname)
time = f.readline().split()[-1]
adjointdata.add_param('file' + str(counter), fname, 'Binary file' + str(counter))
counter = counter + 1
return rundata
# end of function setadjoint
# ----------------------
if __name__ == '__main__':
# Set up run-time parameters and write all data files.
import sys
rundata = setrun(*sys.argv[1:])
rundata.write()
|
clawpack/adjoint
|
examples/acoustics_2d_radial_mixed/compare/setrun.py
|
Python
|
bsd-2-clause
| 13,991
|
[
"NetCDF"
] |
5df9174f5ba044600028e0ed7a0a11cf9e9a4ae934e8f0c000fd34aa18b6326c
|
from __future__ import division
import numpy as np
from scipy import optimize
def d4s(data):
"""
Beam parameter calculation according to the ISO standard D4sigma integrals
input: 2D array of intensity values (pixels)
output:
xx, yy: x and y centres
dx, dy: 4 sigma widths for x and y
angle: inferred rotation angle, radians
"""
gg = data
dimy, dimx = np.shape(data)
X, Y = np.mgrid[0:dimx,0:dimy]
X = X.T
Y = Y.T
P = np.sum(data)
xx = np.sum(data * X) / P
yy = np.sum(data * Y) / P
xx2 = np.sum(data * (X - xx)**2)/P
yy2 = np.sum(data * (Y - yy)**2)/P
xy = np.sum(data * (X - xx) * (Y - yy)) / P
gamm = np.sign(xx2 - yy2)
angle = 0.5 * np.arctan(2*xy / (xx2 - yy2))
try:
dx = 2 * np.sqrt(2) * (xx2 + yy2 + gamm * ( (xx2 - yy2)**2 + 4*xy**2)**0.5)**(0.5)
dy = 2 * np.sqrt(2) * (xx2 + yy2 - gamm * ( (xx2 - yy2)**2 + 4*xy**2)**0.5)**(0.5)
except:
# In case of error, just make the size very large
print "Fitting error"
dx, dy = data.shape
return xx, yy, dx, dy, angle
def getellipse(xx, yy, dx, dy, angle):
t = np.linspace(0, np.pi*2, 101)
a = dx/2
b = dy/2
angle = np.pi + angle # instead doing multiplication with -1 in the input
xr = a * np.cos(t) * np.cos(angle) - b * np.sin(t) * np.sin(angle) + xx
yr = a * np.cos(t) * np.sin(angle) + b * np.sin(t) * np.cos(angle) + yy
return xr, yr
def gauss2d(cx, cy, sx, sy, h, X, Y):
res = h * np.exp(-(X - cx)**2 / (2*sx**2)) * np.exp(-(Y - cy)**2 / (2*sy**2))
return res
def waves(th, period, phase, amp, X, Y):
pos = np.cos(th)*X + np.sin(th)*Y
ret = np.sin(2*np.pi*pos/period + phase) * amp
return ret
def wobble(cx, cy, sx, sy, h, th, period, phase, amp, offset):
return lambda X, Y: gauss2d(cx, cy, sx, sy, h, X, Y) * (1 + waves(th, period, phase, amp, X, Y)) + offset
def invwobble(cx, cy, sx, sy, h, th, period, phase, amp, offset):
return lambda X, Y: gauss2d(cx, cy, sx, sy, h, X, Y) * waves(th, period, phase, amp, X, Y) + offset
def fitwobble(data, X, Y, p0):
"""Returns (height, x, y, width_x, width_y)
the gaussian parameters of a 2D distribution found by a fit"""
errorfunction = lambda p: np.ravel(wobble(*p)(X, Y) - data)
p, success = optimize.leastsq(errorfunction, p0)
return p
if __name__ == "__main__":
import pylab as pl
filename = "img_343444.npy"
# filename = "img_418733.npy"
data = np.load(filename)
xx, yy, dx, dy, angle = d4s(data)
minx = max(0, int(xx-dx*0.6))
maxx = min(640, int(xx+dx*0.6))
miny = max(0, int(yy-dy*0.6))
maxy = min(480, int(yy+dy*0.6))
d2 = data[miny:maxy, minx:maxx]
print (maxx-minx)*(maxy-miny)/(640*480)
pl.figure()
pl.subplot(221)
pl.imshow(d2)
pl.title("Original image")
cx, cy = xx-minx, yy-miny
sx, sy = dx / 4, dy / 4
# sx, sy = abs(dx / 4 * np.tan(angle)), abs(dy / 4 * np.tan(angle))
h = np.max(d2)
th = -60/180*np.pi
period = 35
phase = 4.5
amp = 0.14
offset = 5
p0 = (cx, cy, sx, sy, h, th, period, phase, amp, offset)
print p0
dimx = maxx - minx
dimy = maxy - miny
X, Y = np.mgrid[0:dimx,0:dimy]
X = X.T
Y = Y.T
import time
start = time.time()
p = fitwobble(d2, X, Y, p0)
print time.time()-start
cx, cy, sx, sy, h, th, period, phase, amp, offset = p
print th, period
w = wobble(*p0)(X, Y)
iw = d2 - invwobble(*p)(X, Y)
pl.subplot(222)
pl.imshow(w)
pl.title('fitted')
pl.subplot(223)
xr, yr = getellipse(cx, cy, 4*sx, 4*sy, 0)
pl.imshow(iw)
pl.plot(xr, yr, 'k-')
pl.title('un-wobbled')
pl.xlim([0, dimx])
pl.ylim([dimy-1, 0])
if abs(sx - sy)/(sx+sy) < 0.1:
csign = '~'
elif (sx > sy):
csign = '>'
else:
csign = '<'
ctext = "sx | sy\n%.1f %s %.1f" %(sx, csign, sy)
pl.figtext(0.75, 0.25, ctext, horizontalalignment='center', fontsize=30)
# pl.plot(xx, yy, '+')
pl.show()
|
imrehg/labhardware
|
projects/beamprofile/fastfit.py
|
Python
|
mit
| 4,052
|
[
"Gaussian"
] |
493b5b1655983a25531da67e9fe3024bdc2b2b7df771283731f02ded01e9b8ee
|
""" Implementation of Module
"""
#pylint: disable=unused-wildcard-import,wildcard-import
import copy
import os
#try: # this part to import as part of the DIRAC framework
from DIRAC.Core.Workflow.Parameter import *
__RCSID__ = "$Id$"
class ModuleDefinition( AttributeCollection ):
def __init__( self, type = None, obj = None, parent = None ):
# we can create an object from another module
# or from the ParameterCollection
AttributeCollection.__init__( self )
self.main_class_obj = None # used for the interpretation only
#self.module_obj = None # used for the interpretation only
self.parent = parent
if ( obj == None ) or isinstance( obj, ParameterCollection ):
self.setType( 'nitgiven' )
self.setDescrShort( '' )
self.setDescription( '' )
self.setRequired( '' )
self.setBody( '' )
self.setOrigin( '' )
self.setVersion( 0.0 )
self.parameters = ParameterCollection( obj ) # creating copy
elif isinstance( obj, ModuleDefinition ):
self.setType( obj.getType() )
self.setDescrShort( obj.getDescrShort() )
self.setDescription( obj.getDescription() )
self.setBody( obj.getBody() )
self.setRequired( obj.getRequired() )
self.setOrigin( obj.getOrigin() )
self.setVersion( obj.getVersion() )
self.parameters = ParameterCollection( obj.parameters )
else:
raise TypeError( 'Can not create object type ' + str( type( self ) ) + ' from the ' + str( type( obj ) ) )
if type :
self.setType( type )
def createCode( self ):
return self.getBody() + '\n'
def __str__( self ):
return str( type( self ) ) + ':\n' + AttributeCollection.__str__( self ) + self.parameters.__str__()
def toXML( self ):
ret = '<ModuleDefinition>\n'
ret = ret + AttributeCollection.toXML( self )
ret = ret + self.parameters.toXML()
ret = ret + '</ModuleDefinition>\n'
return ret
def toXMLFile( self, outFile ):
if os.path.exists( outFile ):
os.remove( outFile )
with open( outFile, 'w' ) as xmlfile:
xmlfile.write( self.toXML() )
def loadCode( self ):
#print 'Loading code of the Module =', self.getType()
# version 1 - OLD sample
#ret = compile(self.getBody(),'<string>','exec')
#eval(ret)
#return ret #returning ref just in case we might need it
#
if len( self.getBody() ): # checking the size of the string
# version 2 - we assume that each self.body is a module oblect
#module = new.module(self.getType()) # create empty module object
#sys.modules[self.getType()] = module # add reference for the import operator
#exec self.getBody() in module.__dict__ # execute code itself
#self.module_obj = module # save pointer to this module
#if module.__dict__.has_key(self.getType()):
# self.main_class_obj = module.__dict__[self.getType()] # save class object
# version 3
# A.T. Use vars() function to inspect local objects instead of playing with
# fake modules. We assume that after the body execution there will be
# a class with name "self.getType()" defined in the local scope.
exec self.getBody()
if vars().has_key( self.getType() ):
self.main_class_obj = vars()[self.getType()] # save class object
else:
# it is possible to have this class in another module, we have to check for this
# but it is advisible to use 'from module import class' operator
# otherwise i could not find the module. But it is possible that
# in the future I can change this code to do it more wisely
raise TypeError( 'Can not find class ' + self.getType() + ' in the module created from the body of the module ' + self.getOrigin() )
else:
raise TypeError( 'The body of the Module ' + self.getType() + ' seems empty' )
return self.main_class_obj
class ModuleInstance( AttributeCollection ):
def __init__( self, name, obj = None, parent = None ):
AttributeCollection.__init__( self )
self.instance_obj = None # used for the interpretation only
self.parent = parent
if obj == None:
self.parameters = ParameterCollection()
elif isinstance( obj, ModuleInstance ) or isinstance( obj, ModuleDefinition ):
if name == None:
self.setName( obj.getName() )
else:
self.setName( name )
self.setType( obj.getType() )
self.setDescrShort( obj.getDescrShort() )
self.parameters = ParameterCollection( obj.parameters )
elif isinstance( obj, ParameterCollection ):
# set attributes
self.setName( name )
self.setType( "" )
self.setDescrShort( "" )
self.parameters = ParameterCollection( obj )
elif obj != None:
raise TypeError( 'Can not create object type ' + str( type( self ) ) + ' from the ' + str( type( obj ) ) )
def createCode( self, ind = 2 ):
str = indent( ind ) + self.getName() + ' = ' + self.getType() + '()\n'
str = str + self.parameters.createParametersCode( ind, self.getName() )
str = str + indent( ind ) + self.getName() + '.execute()\n\n'
return str
def __str__( self ):
return str( type( self ) ) + ':\n' + AttributeCollection.__str__( self ) + self.parameters.__str__()
def toXML( self ):
ret = '<ModuleInstance>\n'
ret = ret + AttributeCollection.toXML( self )
ret = ret + self.parameters.toXML()
ret = ret + '</ModuleInstance>\n'
return ret
def execute( self, step_parameters, definitions ):
#print 'Executing ModuleInstance ',self.getName(),'of type',self.getType()
self.instance_obj = definitions[self.getType()].main_class_obj() # creating instance
#FIXME: pylint complains that ParameterCollection doesn't have execute. What should this be?
self.parameters.execute( self.getName() ) #pylint: disable=no-member
self.instance_obj.execute2()
class DefinitionsPool( dict ):
def __init__( self, parent, pool = None ):
dict.__init__( self )
self.parent = parent # this is a cache value, we propagate it into next level
if isinstance( pool, DefinitionsPool ):
for k in pool.keys():
v = pool[k]
if isinstance( v, ModuleDefinition ):
obj = ModuleDefinition( None, v, self.parent )
elif isinstance( v, StepDefinition ): #pylint: disable=undefined-variable
obj = StepDefinition( None, v, self.parent ) #pylint: disable=undefined-variable
else:
raise TypeError( 'Error: __init__ Wrong type of object stored in the DefinitionPool ' + str( type( pool[v] ) ) )
self.append( obj )
elif pool != None:
raise TypeError( 'Can not create object type ' + str( type( self ) ) + ' from the ' + str( type( pool ) ) )
def __setitem__( self, i, obj ):
if not self.has_key( i ):
dict.__setitem__( self, i, obj )
# print 'We need to write piece of code to replace existent DefinitionsPool.__setitem__()'
# print 'For now we ignore it for the', obj.getType()
def append( self, obj ):
""" We add new Definition (Module, Step)
"""
self[obj.getType()] = obj
obj.setParent( self.parent )
return obj
def remove( self, obj ):
del self[obj.getType()]
obj.setParent( None )
def compare( self, s ):
if not isinstance( s, DefinitionsPool ):
return False # chacking types of objects
if len( s ) != len( self ):
return False # checkin size
# we need to compare the keys of dictionaries
if self.keys() != s.keys():
return False
for k in self.keys():
if ( not s.has_key( k ) ) or ( not self[k].compare( s[k] ) ):
return False
return True
def __str__( self ):
ret = str( type( self ) ) + ': number of Definitions:' + str( len( self ) ) + '\n'
index = 0
for k in self.keys():
ret = ret + 'definition(' + str( index ) + ')=' + str( self[k] ) + '\n'
index = index + 1
return ret
def setParent( self, parent ):
self.parent = parent
# we need to propagate it just in case it was different one
for k in self.keys():
self[k].setParent( parent )
def getParent( self ):
return self.parent
def updateParents( self, parent ):
self.parent = parent
for k in self.keys():
self[k].updateParents( parent )
def toXML( self ):
ret = ''
for k in self.keys():
ret = ret + self[k].toXML()
return ret
def createCode( self ):
str = ''
for k in self.keys():
#str=str+indent(2)+'# flush code for instance\n'
str = str + self[k].createCode()
return str
def loadCode( self ):
for k in self.keys():
# load code of the modules
self[k].loadCode()
class InstancesPool( list ):
def __init__( self, parent, pool = None ):
list.__init__( self )
self.parent = None # this is a cache value, we propagate it into next level
if isinstance( pool, InstancesPool ):
for v in pool:
# I need to check this fubction
# if it would be a costructor we coul pass parent into it
self.append( copy.deepcopy( v ) )
if isinstance( v, ModuleInstance ):
obj = ModuleInstance( None, v, self.parent )
elif isinstance( v, StepInstance ): #pylint: disable=undefined-variable
obj = StepInstance( None, v, self.parent ) #pylint: disable=undefined-variable
else:
raise TypeError( 'Error: __init__ Wrong type of object stored in the DefinitionPool ' + str( type( pool[v] ) ) )
self.append( obj )
elif pool != None:
raise TypeError( 'Can not create object type ' + str( type( self ) ) + ' from the ' + str( type( pool ) ) )
def __str__( self ):
ret = str( type( self ) ) + ': number of Instances:' + str( len( self ) ) + '\n'
index = 0
for v in self:
ret = ret + 'instance(' + str( index ) + ')=' + str( v ) + '\n'
index = index + 1
return ret
def setParent( self, parent ):
self.parent = parent
for v in self:
v.setParent( parent )
def getParent( self ):
return self.parent
def updateParents( self, parent ):
self.parent = parent
for v in self:
v.updateParents( parent )
def append( self, obj ):
list.append( self, obj )
obj.setParent( self.parent )
def toXML( self ):
ret = ''
for v in self:
ret = ret + v.toXML()
return ret
def findIndex( self, name ):
i = 0
for v in self:
if v.getName() == name:
return i
i = i + 1
return - 1
def find( self, name ):
for v in self:
if v.getName() == name:
return v
return None
def delete( self, name ):
for v in self:
if v.getName() == name:
self.remove( v )
v.setParent( None )
def compare( self, s ):
if ( not isinstance( s, InstancesPool ) or ( len( s ) != len( self ) ) ):
return False
for v in self:
for i in s:
if v.getName() == i.getName():
if not v.compare( i ):
return False
else:
break
else:
#if we reached this place naturally we can not find matching name
return False
return True
def createCode( self ):
str = ''
for inst in self:
str = str + inst.createCode()
str = str + indent( 2 ) + '# output assignment\n'
for v in inst.parameters:
if v.isOutput():
str = str + v.createParameterCode( 2, 'self' )
str = str + '\n'
return str
|
Andrew-McNab-UK/DIRAC
|
Core/Workflow/Module.py
|
Python
|
gpl-3.0
| 11,481
|
[
"DIRAC"
] |
4d325aab3803cb770394b4a06789555ef6e9d1ccebfcdccf233e42e0f08ae79c
|
"""
siesta module for sids
======================
Enables access to siesta DFT output files.
Currently supported files are:
*.HSX
"""
|
zerothi/siesta-es
|
sids/siesta/__init__.py
|
Python
|
gpl-3.0
| 138
|
[
"SIESTA"
] |
96a2947a96c594db4ac30a72916bea674eb6144bf0ab07279512fa75b383e592
|
"""
Student Views
"""
import datetime
import logging
import uuid
import json
import warnings
from collections import defaultdict
from urlparse import urljoin
from pytz import UTC
from requests import HTTPError
from ipware.ip import get_ip
from django.conf import settings
from django.contrib.auth import logout, authenticate, login
from django.contrib.auth.models import User, AnonymousUser
from django.contrib.auth.decorators import login_required
from django.contrib.auth.views import password_reset_confirm
from django.contrib import messages
from django.core.context_processors import csrf
from django.core import mail
from django.core.urlresolvers import reverse, NoReverseMatch
from django.core.validators import validate_email, ValidationError
from django.db import IntegrityError, transaction
from django.http import (HttpResponse, HttpResponseBadRequest, HttpResponseForbidden,
HttpResponseServerError, Http404)
from django.shortcuts import redirect
from django.utils.encoding import force_bytes, force_text
from django.utils.translation import ungettext
from django.utils.http import base36_to_int, urlsafe_base64_encode
from django.utils.translation import ugettext as _, get_language
from django.views.decorators.csrf import csrf_exempt, ensure_csrf_cookie
from django.views.decorators.http import require_POST, require_GET
from django.db.models.signals import post_save
from django.dispatch import receiver
from django.template.response import TemplateResponse
from ratelimitbackend.exceptions import RateLimitException
from social.apps.django_app import utils as social_utils
from social.backends import oauth as social_oauth
from social.exceptions import AuthException, AuthAlreadyAssociated
from edxmako.shortcuts import render_to_response, render_to_string
from course_modes.models import CourseMode
from shoppingcart.api import order_history
from student.models import (
Registration, UserProfile,
PendingEmailChange, CourseEnrollment, CourseEnrollmentAttribute, unique_id_for_user,
CourseEnrollmentAllowed, UserStanding, LoginFailures,
create_comments_service_user, PasswordHistory, UserSignupSource,
DashboardConfiguration, LinkedInAddToProfileConfiguration, ManualEnrollmentAudit, ALLOWEDTOENROLL_TO_ENROLLED)
from student.forms import AccountCreationForm, PasswordResetFormNoActive, get_registration_extension_form
from lms.djangoapps.reg_form.forms import regextrafields
from lms.djangoapps.commerce.utils import EcommerceService # pylint: disable=import-error
from lms.djangoapps.verify_student.models import SoftwareSecurePhotoVerification # pylint: disable=import-error
from certificates.models import CertificateStatuses, certificate_status_for_student
from certificates.api import ( # pylint: disable=import-error
get_certificate_url,
has_html_certificates_enabled,
)
from xmodule.modulestore.django import modulestore
from opaque_keys import InvalidKeyError
from opaque_keys.edx.keys import CourseKey
from opaque_keys.edx.locations import SlashSeparatedCourseKey
from opaque_keys.edx.locator import CourseLocator
from xmodule.modulestore import ModuleStoreEnum
from collections import namedtuple
from courseware.courses import get_courses, sort_by_announcement, sort_by_start_date # pylint: disable=import-error
from courseware.access import has_access
from django_comment_common.models import Role
from external_auth.models import ExternalAuthMap
import external_auth.views
from external_auth.login_and_register import (
login as external_auth_login,
register as external_auth_register
)
from bulk_email.models import Optout, CourseAuthorization
from lang_pref import LANGUAGE_KEY
import track.views
import dogstats_wrapper as dog_stats_api
from util.db import outer_atomic
from util.json_request import JsonResponse
from util.bad_request_rate_limiter import BadRequestRateLimiter
from util.milestones_helpers import (
get_pre_requisite_courses_not_completed,
)
from microsite_configuration import microsite
from util.password_policy_validators import (
validate_password_length, validate_password_complexity,
validate_password_dictionary
)
import third_party_auth
from third_party_auth import pipeline, provider
from student.helpers import (
check_verify_status_by_course,
auth_pipeline_urls, get_next_url_for_login_page,
DISABLE_UNENROLL_CERT_STATES,
)
from student.cookies import set_logged_in_cookies, delete_logged_in_cookies
from student.models import anonymous_id_for_user
from shoppingcart.models import DonationConfiguration, CourseRegistrationCode
from embargo import api as embargo_api
import analytics
from eventtracking import tracker
# Note that this lives in LMS, so this dependency should be refactored.
from notification_prefs.views import enable_notifications
# Note that this lives in openedx, so this dependency should be refactored.
from openedx.core.djangoapps.credentials.utils import get_user_program_credentials
from openedx.core.djangoapps.credit.email_utils import get_credit_provider_display_names, make_providers_strings
from openedx.core.djangoapps.user_api.preferences import api as preferences_api
from openedx.core.djangoapps.programs.utils import get_programs_for_dashboard
from openedx.core.djangoapps.programs.models import ProgramsApiConfig
log = logging.getLogger("edx.student")
AUDIT_LOG = logging.getLogger("audit")
ReverifyInfo = namedtuple('ReverifyInfo', 'course_id course_name course_number date status display') # pylint: disable=invalid-name
SETTING_CHANGE_INITIATED = 'edx.user.settings.change_initiated'
# Disable this warning because it doesn't make sense to completely refactor tests to appease Pylint
# pylint: disable=logging-format-interpolation
def csrf_token(context):
"""A csrf token that can be included in a form."""
token = context.get('csrf_token', '')
if token == 'NOTPROVIDED':
return ''
return (u'<div style="display:none"><input type="hidden"'
' name="csrfmiddlewaretoken" value="%s" /></div>' % (token))
# NOTE: This view is not linked to directly--it is called from
# branding/views.py:index(), which is cached for anonymous users.
# This means that it should always return the same thing for anon
# users. (in particular, no switching based on query params allowed)
def index(request, extra_context=None, user=AnonymousUser()):
"""
Render the edX main page.
extra_context is used to allow immediate display of certain modal windows, eg signup,
as used by external_auth.
"""
if extra_context is None:
extra_context = {}
courses = get_courses(user)
if microsite.get_value("ENABLE_COURSE_SORTING_BY_START_DATE",
settings.FEATURES["ENABLE_COURSE_SORTING_BY_START_DATE"]):
courses = sort_by_start_date(courses)
else:
courses = sort_by_announcement(courses)
context = {'courses': courses}
context['homepage_overlay_html'] = microsite.get_value('homepage_overlay_html')
# This appears to be an unused context parameter, at least for the master templates...
context['show_partners'] = microsite.get_value('show_partners', True)
# TO DISPLAY A YOUTUBE WELCOME VIDEO
# 1) Change False to True
context['show_homepage_promo_video'] = microsite.get_value('show_homepage_promo_video', False)
# 2) Add your video's YouTube ID (11 chars, eg "123456789xX"), or specify via microsite config
# Note: This value should be moved into a configuration setting and plumbed-through to the
# context via the microsite configuration workflow, versus living here
youtube_video_id = microsite.get_value('homepage_promo_video_youtube_id', "your-youtube-id")
context['homepage_promo_video_youtube_id'] = youtube_video_id
# allow for microsite override of the courses list
context['courses_list'] = microsite.get_template_path('courses_list.html')
# Insert additional context for use in the template
context.update(extra_context)
return render_to_response('index.html', context)
def process_survey_link(survey_link, user):
"""
If {UNIQUE_ID} appears in the link, replace it with a unique id for the user.
Currently, this is sha1(user.username). Otherwise, return survey_link.
"""
return survey_link.format(UNIQUE_ID=unique_id_for_user(user))
def cert_info(user, course_overview, course_mode):
"""
Get the certificate info needed to render the dashboard section for the given
student and course.
Arguments:
user (User): A user.
course_overview (CourseOverview): A course.
course_mode (str): The enrollment mode (honor, verified, audit, etc.)
Returns:
dict: Empty dict if certificates are disabled or hidden, or a dictionary with keys:
'status': one of 'generating', 'ready', 'notpassing', 'processing', 'restricted'
'show_download_url': bool
'download_url': url, only present if show_download_url is True
'show_disabled_download_button': bool -- true if state is 'generating'
'show_survey_button': bool
'survey_url': url, only if show_survey_button is True
'grade': if status is not 'processing'
'can_unenroll': if status allows for unenrollment
"""
if not course_overview.may_certify():
return {}
return _cert_info(
user,
course_overview,
certificate_status_for_student(user, course_overview.id),
course_mode
)
def reverification_info(statuses):
"""
Returns reverification-related information for *all* of user's enrollments whose
reverification status is in statuses.
Args:
statuses (list): a list of reverification statuses we want information for
example: ["must_reverify", "denied"]
Returns:
dictionary of lists: dictionary with one key per status, e.g.
dict["must_reverify"] = []
dict["must_reverify"] = [some information]
"""
reverifications = defaultdict(list)
# Sort the data by the reverification_end_date
for status in statuses:
if reverifications[status]:
reverifications[status].sort(key=lambda x: x.date)
return reverifications
def get_course_enrollments(user, org_to_include, orgs_to_exclude):
"""
Given a user, return a filtered set of his or her course enrollments.
Arguments:
user (User): the user in question.
org_to_include (str): for use in Microsites. If not None, ONLY courses
of this org will be returned.
orgs_to_exclude (list[str]): If org_to_include is not None, this
argument is ignored. Else, courses of this org will be excluded.
Returns:
generator[CourseEnrollment]: a sequence of enrollments to be displayed
on the user's dashboard.
"""
for enrollment in CourseEnrollment.enrollments_for_user(user):
# If the course is missing or broken, log an error and skip it.
course_overview = enrollment.course_overview
if not course_overview:
log.error(
"User %s enrolled in broken or non-existent course %s",
user.username,
enrollment.course_id
)
continue
# If we are in a Microsite, then filter out anything that is not
# attributed (by ORG) to that Microsite.
if org_to_include and course_overview.location.org != org_to_include:
continue
# Conversely, if we are not in a Microsite, then filter out any enrollments
# with courses attributed (by ORG) to Microsites.
elif course_overview.location.org in orgs_to_exclude:
continue
# Else, include the enrollment.
else:
yield enrollment
def _cert_info(user, course_overview, cert_status, course_mode): # pylint: disable=unused-argument
"""
Implements the logic for cert_info -- split out for testing.
Arguments:
user (User): A user.
course_overview (CourseOverview): A course.
course_mode (str): The enrollment mode (honor, verified, audit, etc.)
"""
# simplify the status for the template using this lookup table
template_state = {
CertificateStatuses.generating: 'generating',
CertificateStatuses.regenerating: 'generating',
CertificateStatuses.downloadable: 'ready',
CertificateStatuses.notpassing: 'notpassing',
CertificateStatuses.restricted: 'restricted',
CertificateStatuses.auditing: 'auditing',
CertificateStatuses.audit_passing: 'auditing',
CertificateStatuses.audit_notpassing: 'auditing',
}
default_status = 'processing'
default_info = {
'status': default_status,
'show_disabled_download_button': False,
'show_download_url': False,
'show_survey_button': False,
'can_unenroll': True,
}
if cert_status is None:
return default_info
is_hidden_status = cert_status['status'] in ('unavailable', 'processing', 'generating', 'notpassing', 'auditing')
if course_overview.certificates_display_behavior == 'early_no_info' and is_hidden_status:
return {}
status = template_state.get(cert_status['status'], default_status)
status_dict = {
'status': status,
'show_download_url': status == 'ready',
'show_disabled_download_button': status == 'generating',
'mode': cert_status.get('mode', None),
'linked_in_url': None,
'can_unenroll': status not in DISABLE_UNENROLL_CERT_STATES,
}
if (status in ('generating', 'ready', 'notpassing', 'restricted', 'auditing') and
course_overview.end_of_course_survey_url is not None):
status_dict.update({
'show_survey_button': True,
'survey_url': process_survey_link(course_overview.end_of_course_survey_url, user)})
else:
status_dict['show_survey_button'] = False
if status == 'ready':
# showing the certificate web view button if certificate is ready state and feature flags are enabled.
if has_html_certificates_enabled(course_overview.id, course_overview):
if course_overview.has_any_active_web_certificate:
status_dict.update({
'show_cert_web_view': True,
'cert_web_view_url': get_certificate_url(course_id=course_overview.id, uuid=cert_status['uuid'])
})
else:
# don't show download certificate button if we don't have an active certificate for course
status_dict['show_download_url'] = False
elif 'download_url' not in cert_status:
log.warning(
u"User %s has a downloadable cert for %s, but no download url",
user.username,
course_overview.id
)
return default_info
else:
status_dict['download_url'] = cert_status['download_url']
# If enabled, show the LinkedIn "add to profile" button
# Clicking this button sends the user to LinkedIn where they
# can add the certificate information to their profile.
linkedin_config = LinkedInAddToProfileConfiguration.current()
# posting certificates to LinkedIn is not currently
# supported in microsites/White Labels
if linkedin_config.enabled and not microsite.is_request_in_microsite():
status_dict['linked_in_url'] = linkedin_config.add_to_profile_url(
course_overview.id,
course_overview.display_name,
cert_status.get('mode'),
cert_status['download_url']
)
if status in ('generating', 'ready', 'notpassing', 'restricted', 'auditing'):
if 'grade' not in cert_status:
# Note: as of 11/20/2012, we know there are students in this state-- cs169.1x,
# who need to be regraded (we weren't tracking 'notpassing' at first).
# We can add a log.warning here once we think it shouldn't happen.
return default_info
else:
status_dict['grade'] = cert_status['grade']
return status_dict
@ensure_csrf_cookie
def signin_user(request):
"""Deprecated. To be replaced by :class:`student_account.views.login_and_registration_form`."""
external_auth_response = external_auth_login(request)
if external_auth_response is not None:
return external_auth_response
# Determine the URL to redirect to following login:
redirect_to = get_next_url_for_login_page(request)
if request.user.is_authenticated():
return redirect(redirect_to)
third_party_auth_error = None
for msg in messages.get_messages(request):
if msg.extra_tags.split()[0] == "social-auth":
# msg may or may not be translated. Try translating [again] in case we are able to:
third_party_auth_error = _(unicode(msg)) # pylint: disable=translation-of-non-string
break
context = {
'login_redirect_url': redirect_to, # This gets added to the query string of the "Sign In" button in the header
# Bool injected into JS to submit form if we're inside a running third-
# party auth pipeline; distinct from the actual instance of the running
# pipeline, if any.
'pipeline_running': 'true' if pipeline.running(request) else 'false',
'pipeline_url': auth_pipeline_urls(pipeline.AUTH_ENTRY_LOGIN, redirect_url=redirect_to),
'platform_name': microsite.get_value(
'platform_name',
settings.PLATFORM_NAME
),
'third_party_auth_error': third_party_auth_error
}
return render_to_response('login.html', context)
@ensure_csrf_cookie
def register_user(request, extra_context=None):
"""Deprecated. To be replaced by :class:`student_account.views.login_and_registration_form`."""
# Determine the URL to redirect to following login:
redirect_to = get_next_url_for_login_page(request)
if request.user.is_authenticated():
return redirect(redirect_to)
external_auth_response = external_auth_register(request)
if external_auth_response is not None:
return external_auth_response
context = {
'login_redirect_url': redirect_to, # This gets added to the query string of the "Sign In" button in the header
'email': '',
'name': '',
'running_pipeline': None,
'pipeline_urls': auth_pipeline_urls(pipeline.AUTH_ENTRY_REGISTER, redirect_url=redirect_to),
'platform_name': microsite.get_value(
'platform_name',
settings.PLATFORM_NAME
),
'selected_provider': '',
'username': '',
}
if extra_context is not None:
context.update(extra_context)
if context.get("extauth_domain", '').startswith(external_auth.views.SHIBBOLETH_DOMAIN_PREFIX):
return render_to_response('register-shib.html', context)
# If third-party auth is enabled, prepopulate the form with data from the
# selected provider.
if third_party_auth.is_enabled() and pipeline.running(request):
running_pipeline = pipeline.get(request)
current_provider = provider.Registry.get_from_pipeline(running_pipeline)
if current_provider is not None:
overrides = current_provider.get_register_form_data(running_pipeline.get('kwargs'))
overrides['running_pipeline'] = running_pipeline
overrides['selected_provider'] = current_provider.name
context.update(overrides)
return render_to_response('register.html', context)
def complete_course_mode_info(course_id, enrollment, modes=None):
"""
We would like to compute some more information from the given course modes
and the user's current enrollment
Returns the given information:
- whether to show the course upsell information
- numbers of days until they can't upsell anymore
"""
if modes is None:
modes = CourseMode.modes_for_course_dict(course_id)
mode_info = {'show_upsell': False, 'days_for_upsell': None}
# we want to know if the user is already enrolled as verified or credit and
# if verified is an option.
if CourseMode.VERIFIED in modes and enrollment.mode in CourseMode.UPSELL_TO_VERIFIED_MODES:
mode_info['show_upsell'] = True
mode_info['verified_sku'] = modes['verified'].sku
# if there is an expiration date, find out how long from now it is
if modes['verified'].expiration_datetime:
today = datetime.datetime.now(UTC).date()
mode_info['days_for_upsell'] = (modes['verified'].expiration_datetime.date() - today).days
return mode_info
def is_course_blocked(request, redeemed_registration_codes, course_key):
"""Checking either registration is blocked or not ."""
blocked = False
for redeemed_registration in redeemed_registration_codes:
# registration codes may be generated via Bulk Purchase Scenario
# we have to check only for the invoice generated registration codes
# that their invoice is valid or not
if redeemed_registration.invoice_item:
if not redeemed_registration.invoice_item.invoice.is_valid:
blocked = True
# disabling email notifications for unpaid registration courses
Optout.objects.get_or_create(user=request.user, course_id=course_key)
log.info(
u"User %s (%s) opted out of receiving emails from course %s",
request.user.username,
request.user.email,
course_key,
)
track.views.server_track(
request,
"change-email1-settings",
{"receive_emails": "no", "course": course_key.to_deprecated_string()},
page='dashboard',
)
break
return blocked
@login_required
@ensure_csrf_cookie
def dashboard(request):
user = request.user
platform_name = microsite.get_value("platform_name", settings.PLATFORM_NAME)
# for microsites, we want to filter and only show enrollments for courses within
# the microsites 'ORG'
course_org_filter = microsite.get_value('course_org_filter')
# Let's filter out any courses in an "org" that has been declared to be
# in a Microsite
org_filter_out_set = microsite.get_all_orgs()
# remove our current Microsite from the "filter out" list, if applicable
if course_org_filter:
org_filter_out_set.remove(course_org_filter)
# Build our (course, enrollment) list for the user, but ignore any courses that no
# longer exist (because the course IDs have changed). Still, we don't delete those
# enrollments, because it could have been a data push snafu.
course_enrollments = list(get_course_enrollments(user, course_org_filter, org_filter_out_set))
# sort the enrollment pairs by the enrollment date
course_enrollments.sort(key=lambda x: x.created, reverse=True)
# Retrieve the course modes for each course
enrolled_course_ids = [enrollment.course_id for enrollment in course_enrollments]
__, unexpired_course_modes = CourseMode.all_and_unexpired_modes_for_courses(enrolled_course_ids)
course_modes_by_course = {
course_id: {
mode.slug: mode
for mode in modes
}
for course_id, modes in unexpired_course_modes.iteritems()
}
# Check to see if the student has recently enrolled in a course.
# If so, display a notification message confirming the enrollment.
enrollment_message = _create_recent_enrollment_message(
course_enrollments, course_modes_by_course
)
course_optouts = Optout.objects.filter(user=user).values_list('course_id', flat=True)
message = ""
if not user.is_active:
message = render_to_string(
'registration/activate_account_notice.html',
{'email': user.email, 'platform_name': platform_name}
)
# Global staff can see what courses errored on their dashboard
staff_access = False
errored_courses = {}
if has_access(user, 'staff', 'global'):
# Show any courses that errored on load
staff_access = True
errored_courses = modulestore().get_errored_courses()
show_courseware_links_for = frozenset(
enrollment.course_id for enrollment in course_enrollments
if has_access(request.user, 'load', enrollment.course_overview)
and has_access(request.user, 'view_courseware_with_prerequisites', enrollment.course_overview)
)
# Get any programs associated with courses being displayed.
# This is passed along in the template context to allow rendering of
# program-related information on the dashboard.
course_programs = _get_course_programs(user, [enrollment.course_id for enrollment in course_enrollments])
xseries_credentials = _get_xseries_credentials(user)
# Construct a dictionary of course mode information
# used to render the course list. We re-use the course modes dict
# we loaded earlier to avoid hitting the database.
course_mode_info = {
enrollment.course_id: complete_course_mode_info(
enrollment.course_id, enrollment,
modes=course_modes_by_course[enrollment.course_id]
)
for enrollment in course_enrollments
}
# Determine the per-course verification status
# This is a dictionary in which the keys are course locators
# and the values are one of:
#
# VERIFY_STATUS_NEED_TO_VERIFY
# VERIFY_STATUS_SUBMITTED
# VERIFY_STATUS_APPROVED
# VERIFY_STATUS_MISSED_DEADLINE
#
# Each of which correspond to a particular message to display
# next to the course on the dashboard.
#
# If a course is not included in this dictionary,
# there is no verification messaging to display.
verify_status_by_course = check_verify_status_by_course(user, course_enrollments)
cert_statuses = {
enrollment.course_id: cert_info(request.user, enrollment.course_overview, enrollment.mode)
for enrollment in course_enrollments
}
# only show email settings for Mongo course and when bulk email is turned on
show_email_settings_for = frozenset(
enrollment.course_id for enrollment in course_enrollments if (
settings.FEATURES['ENABLE_INSTRUCTOR_EMAIL'] and
modulestore().get_modulestore_type(enrollment.course_id) != ModuleStoreEnum.Type.xml and
CourseAuthorization.instructor_email_enabled(enrollment.course_id)
)
)
# Verification Attempts
# Used to generate the "you must reverify for course x" banner
verification_status, verification_msg = SoftwareSecurePhotoVerification.user_status(user)
# Gets data for midcourse reverifications, if any are necessary or have failed
statuses = ["approved", "denied", "pending", "must_reverify"]
reverifications = reverification_info(statuses)
show_refund_option_for = frozenset(
enrollment.course_id for enrollment in course_enrollments
if enrollment.refundable()
)
block_courses = frozenset(
enrollment.course_id for enrollment in course_enrollments
if is_course_blocked(
request,
CourseRegistrationCode.objects.filter(
course_id=enrollment.course_id,
registrationcoderedemption__redeemed_by=request.user
),
enrollment.course_id
)
)
enrolled_courses_either_paid = frozenset(
enrollment.course_id for enrollment in course_enrollments
if enrollment.is_paid_course()
)
# If there are *any* denied reverifications that have not been toggled off,
# we'll display the banner
denied_banner = any(item.display for item in reverifications["denied"])
# Populate the Order History for the side-bar.
order_history_list = order_history(user, course_org_filter=course_org_filter, org_filter_out_set=org_filter_out_set)
# get list of courses having pre-requisites yet to be completed
courses_having_prerequisites = frozenset(
enrollment.course_id for enrollment in course_enrollments
if enrollment.course_overview.pre_requisite_courses
)
courses_requirements_not_met = get_pre_requisite_courses_not_completed(user, courses_having_prerequisites)
if 'notlive' in request.GET:
redirect_message = _("The course you are looking for does not start until {date}.").format(
date=request.GET['notlive']
)
elif 'course_closed' in request.GET:
redirect_message = _("The course you are looking for is closed for enrollment as of {date}.").format(
date=request.GET['course_closed']
)
else:
redirect_message = ''
context = {
'enrollment_message': enrollment_message,
'redirect_message': redirect_message,
'course_enrollments': course_enrollments,
'course_optouts': course_optouts,
'message': message,
'staff_access': staff_access,
'errored_courses': errored_courses,
'show_courseware_links_for': show_courseware_links_for,
'all_course_modes': course_mode_info,
'cert_statuses': cert_statuses,
'credit_statuses': _credit_statuses(user, course_enrollments),
'show_email_settings_for': show_email_settings_for,
'reverifications': reverifications,
'verification_status': verification_status,
'verification_status_by_course': verify_status_by_course,
'verification_msg': verification_msg,
'show_refund_option_for': show_refund_option_for,
'block_courses': block_courses,
'denied_banner': denied_banner,
'billing_email': settings.PAYMENT_SUPPORT_EMAIL,
'user': user,
'logout_url': reverse(logout_user),
'platform_name': platform_name,
'enrolled_courses_either_paid': enrolled_courses_either_paid,
'provider_states': [],
'order_history_list': order_history_list,
'courses_requirements_not_met': courses_requirements_not_met,
'nav_hidden': True,
'course_programs': course_programs,
'disable_courseware_js': True,
'xseries_credentials': xseries_credentials,
'show_program_listing': ProgramsApiConfig.current().show_program_listing,
}
ecommerce_service = EcommerceService()
if ecommerce_service.is_enabled(request.user):
context.update({
'use_ecommerce_payment_flow': True,
'ecommerce_payment_page': ecommerce_service.payment_page_url(),
})
return render_to_response('dashboard.html', context)
def _create_recent_enrollment_message(course_enrollments, course_modes): # pylint: disable=invalid-name
"""
Builds a recent course enrollment message.
Constructs a new message template based on any recent course enrollments
for the student.
Args:
course_enrollments (list[CourseEnrollment]): a list of course enrollments.
course_modes (dict): Mapping of course ID's to course mode dictionaries.
Returns:
A string representing the HTML message output from the message template.
None if there are no recently enrolled courses.
"""
recently_enrolled_courses = _get_recently_enrolled_courses(course_enrollments)
if recently_enrolled_courses:
enroll_messages = [
{
"course_id": enrollment.course_overview.id,
"course_name": enrollment.course_overview.display_name,
"allow_donation": _allow_donation(course_modes, enrollment.course_overview.id, enrollment)
}
for enrollment in recently_enrolled_courses
]
platform_name = microsite.get_value('platform_name', settings.PLATFORM_NAME)
return render_to_string(
'enrollment/course_enrollment_message.html',
{'course_enrollment_messages': enroll_messages, 'platform_name': platform_name}
)
def _get_recently_enrolled_courses(course_enrollments):
"""
Given a list of enrollments, filter out all but recent enrollments.
Args:
course_enrollments (list[CourseEnrollment]): A list of course enrollments.
Returns:
list[CourseEnrollment]: A list of recent course enrollments.
"""
seconds = DashboardConfiguration.current().recent_enrollment_time_delta
time_delta = (datetime.datetime.now(UTC) - datetime.timedelta(seconds=seconds))
return [
enrollment for enrollment in course_enrollments
# If the enrollment has no created date, we are explicitly excluding the course
# from the list of recent enrollments.
if enrollment.is_active and enrollment.created > time_delta
]
def _allow_donation(course_modes, course_id, enrollment):
"""Determines if the dashboard will request donations for the given course.
Check if donations are configured for the platform, and if the current course is accepting donations.
Args:
course_modes (dict): Mapping of course ID's to course mode dictionaries.
course_id (str): The unique identifier for the course.
enrollment(CourseEnrollment): The enrollment object in which the user is enrolled
Returns:
True if the course is allowing donations.
"""
donations_enabled = DonationConfiguration.current().enabled
return (
donations_enabled and
enrollment.mode in course_modes[course_id] and
course_modes[course_id][enrollment.mode].min_price == 0
)
def _update_email_opt_in(request, org):
"""Helper function used to hit the profile API if email opt-in is enabled."""
email_opt_in = request.POST.get('email_opt_in')
if email_opt_in is not None:
email_opt_in_boolean = email_opt_in == 'true'
preferences_api.update_email_opt_in(request.user, org, email_opt_in_boolean)
def _credit_statuses(user, course_enrollments):
"""
Retrieve the status for credit courses.
A credit course is a course for which a user can purchased
college credit. The current flow is:
1. User becomes eligible for credit (submits verifications, passes the course, etc.)
2. User purchases credit from a particular credit provider.
3. User requests credit from the provider, usually creating an account on the provider's site.
4. The credit provider notifies us whether the user's request for credit has been accepted or rejected.
The dashboard is responsible for communicating the user's state in this flow.
Arguments:
user (User): The currently logged-in user.
course_enrollments (list[CourseEnrollment]): List of enrollments for the
user.
Returns: dict
The returned dictionary has keys that are `CourseKey`s and values that
are dictionaries with:
* eligible (bool): True if the user is eligible for credit in this course.
* deadline (datetime): The deadline for purchasing and requesting credit for this course.
* purchased (bool): Whether the user has purchased credit for this course.
* provider_name (string): The display name of the credit provider.
* provider_status_url (string): A URL the user can visit to check on their credit request status.
* request_status (string): Either "pending", "approved", or "rejected"
* error (bool): If true, an unexpected error occurred when retrieving the credit status,
so the user should contact the support team.
Example:
>>> _credit_statuses(user, course_enrollments)
{
CourseKey.from_string("edX/DemoX/Demo_Course"): {
"course_key": "edX/DemoX/Demo_Course",
"eligible": True,
"deadline": 2015-11-23 00:00:00 UTC,
"purchased": True,
"provider_name": "Hogwarts",
"provider_status_url": "http://example.com/status",
"request_status": "pending",
"error": False
}
}
"""
from openedx.core.djangoapps.credit import api as credit_api
# Feature flag off
if not settings.FEATURES.get("ENABLE_CREDIT_ELIGIBILITY"):
return {}
request_status_by_course = {
request["course_key"]: request["status"]
for request in credit_api.get_credit_requests_for_user(user.username)
}
credit_enrollments = {
enrollment.course_id: enrollment
for enrollment in course_enrollments
if enrollment.mode == "credit"
}
# When a user purchases credit in a course, the user's enrollment
# mode is set to "credit" and an enrollment attribute is set
# with the ID of the credit provider. We retrieve *all* such attributes
# here to minimize the number of database queries.
purchased_credit_providers = {
attribute.enrollment.course_id: attribute.value
for attribute in CourseEnrollmentAttribute.objects.filter(
namespace="credit",
name="provider_id",
enrollment__in=credit_enrollments.values()
).select_related("enrollment")
}
provider_info_by_id = {
provider["id"]: provider
for provider in credit_api.get_credit_providers()
}
statuses = {}
for eligibility in credit_api.get_eligibilities_for_user(user.username):
course_key = CourseKey.from_string(unicode(eligibility["course_key"]))
providers_names = get_credit_provider_display_names(course_key)
status = {
"course_key": unicode(course_key),
"eligible": True,
"deadline": eligibility["deadline"],
"purchased": course_key in credit_enrollments,
"provider_name": make_providers_strings(providers_names),
"provider_status_url": None,
"provider_id": None,
"request_status": request_status_by_course.get(course_key),
"error": False,
}
# If the user has purchased credit, then include information about the credit
# provider from which the user purchased credit.
# We retrieve the provider's ID from the an "enrollment attribute" set on the user's
# enrollment when the user's order for credit is fulfilled by the E-Commerce service.
if status["purchased"]:
provider_id = purchased_credit_providers.get(course_key)
if provider_id is None:
status["error"] = True
log.error(
u"Could not find credit provider associated with credit enrollment "
u"for user %s in course %s. The user will not be able to see his or her "
u"credit request status on the student dashboard. This attribute should "
u"have been set when the user purchased credit in the course.",
user.id, course_key
)
else:
provider_info = provider_info_by_id.get(provider_id, {})
status["provider_name"] = provider_info.get("display_name")
status["provider_status_url"] = provider_info.get("status_url")
status["provider_id"] = provider_id
statuses[course_key] = status
return statuses
@transaction.non_atomic_requests
@require_POST
@outer_atomic(read_committed=True)
def change_enrollment(request, check_access=True):
"""
Modify the enrollment status for the logged-in user.
The request parameter must be a POST request (other methods return 405)
that specifies course_id and enrollment_action parameters. If course_id or
enrollment_action is not specified, if course_id is not valid, if
enrollment_action is something other than "enroll" or "unenroll", if
enrollment_action is "enroll" and enrollment is closed for the course, or
if enrollment_action is "unenroll" and the user is not enrolled in the
course, a 400 error will be returned. If the user is not logged in, 403
will be returned; it is important that only this case return 403 so the
front end can redirect the user to a registration or login page when this
happens. This function should only be called from an AJAX request, so
the error messages in the responses should never actually be user-visible.
Args:
request (`Request`): The Django request object
Keyword Args:
check_access (boolean): If True, we check that an accessible course actually
exists for the given course_key before we enroll the student.
The default is set to False to avoid breaking legacy code or
code with non-standard flows (ex. beta tester invitations), but
for any standard enrollment flow you probably want this to be True.
Returns:
Response
"""
# Get the user
user = request.user
# Ensure the user is authenticated
if not user.is_authenticated():
return HttpResponseForbidden()
# Ensure we received a course_id
action = request.POST.get("enrollment_action")
if 'course_id' not in request.POST:
return HttpResponseBadRequest(_("Course id not specified"))
try:
course_id = SlashSeparatedCourseKey.from_deprecated_string(request.POST.get("course_id"))
except InvalidKeyError:
log.warning(
u"User %s tried to %s with invalid course id: %s",
user.username,
action,
request.POST.get("course_id"),
)
return HttpResponseBadRequest(_("Invalid course id"))
if action == "enroll":
# Make sure the course exists
# We don't do this check on unenroll, or a bad course id can't be unenrolled from
if not modulestore().has_course(course_id):
log.warning(
u"User %s tried to enroll in non-existent course %s",
user.username,
course_id
)
return HttpResponseBadRequest(_("Course id is invalid"))
# Record the user's email opt-in preference
if settings.FEATURES.get('ENABLE_MKTG_EMAIL_OPT_IN'):
_update_email_opt_in(request, course_id.org)
available_modes = CourseMode.modes_for_course_dict(course_id)
# Check whether the user is blocked from enrolling in this course
# This can occur if the user's IP is on a global blacklist
# or if the user is enrolling in a country in which the course
# is not available.
redirect_url = embargo_api.redirect_if_blocked(
course_id, user=user, ip_address=get_ip(request),
url=request.path
)
if redirect_url:
return HttpResponse(redirect_url)
# Check that auto enrollment is allowed for this course
# (= the course is NOT behind a paywall)
if CourseMode.can_auto_enroll(course_id):
# Enroll the user using the default mode (audit)
# We're assuming that users of the course enrollment table
# will NOT try to look up the course enrollment model
# by its slug. If they do, it's possible (based on the state of the database)
# for no such model to exist, even though we've set the enrollment type
# to "audit".
try:
enroll_mode = CourseMode.auto_enroll_mode(course_id, available_modes)
if enroll_mode:
CourseEnrollment.enroll(user, course_id, check_access=check_access, mode=enroll_mode)
except Exception: # pylint: disable=broad-except
return HttpResponseBadRequest(_("Could not enroll"))
# If we have more than one course mode or professional ed is enabled,
# then send the user to the choose your track page.
# (In the case of no-id-professional/professional ed, this will redirect to a page that
# funnels users directly into the verification / payment flow)
if CourseMode.has_verified_mode(available_modes) or CourseMode.has_professional_mode(available_modes):
return HttpResponse(
reverse("course_modes_choose", kwargs={'course_id': unicode(course_id)})
)
# Otherwise, there is only one mode available (the default)
return HttpResponse()
elif action == "unenroll":
enrollment = CourseEnrollment.get_enrollment(user, course_id)
if not enrollment:
return HttpResponseBadRequest(_("You are not enrolled in this course"))
certificate_info = cert_info(user, enrollment.course_overview, enrollment.mode)
if certificate_info.get('status') in DISABLE_UNENROLL_CERT_STATES:
return HttpResponseBadRequest(_("Your certificate prevents you from unenrolling from this course"))
CourseEnrollment.unenroll(user, course_id)
return HttpResponse()
else:
return HttpResponseBadRequest(_("Enrollment action is invalid"))
# Need different levels of logging
@ensure_csrf_cookie
def login_user(request, error=""): # pylint: disable=too-many-statements,unused-argument
"""AJAX request to log in the user."""
backend_name = None
email = None
password = None
redirect_url = None
response = None
running_pipeline = None
third_party_auth_requested = third_party_auth.is_enabled() and pipeline.running(request)
third_party_auth_successful = False
trumped_by_first_party_auth = bool(request.POST.get('email')) or bool(request.POST.get('password'))
user = None
platform_name = microsite.get_value("platform_name", settings.PLATFORM_NAME)
if third_party_auth_requested and not trumped_by_first_party_auth:
# The user has already authenticated via third-party auth and has not
# asked to do first party auth by supplying a username or password. We
# now want to put them through the same logging and cookie calculation
# logic as with first-party auth.
running_pipeline = pipeline.get(request)
username = running_pipeline['kwargs'].get('username')
backend_name = running_pipeline['backend']
third_party_uid = running_pipeline['kwargs']['uid']
requested_provider = provider.Registry.get_from_pipeline(running_pipeline)
try:
user = pipeline.get_authenticated_user(requested_provider, username, third_party_uid)
third_party_auth_successful = True
except User.DoesNotExist:
AUDIT_LOG.warning(
u"Login failed - user with username {username} has no social auth "
"with backend_name {backend_name}".format(
username=username, backend_name=backend_name)
)
message = _(
"You've successfully logged into your {provider_name} account, "
"but this account isn't linked with an {platform_name} account yet."
).format(
platform_name=platform_name,
provider_name=requested_provider.name,
)
message += "<br/><br/>"
message += _(
"Use your {platform_name} username and password to log into {platform_name} below, "
"and then link your {platform_name} account with {provider_name} from your dashboard."
).format(
platform_name=platform_name,
provider_name=requested_provider.name,
)
message += "<br/><br/>"
message += _(
"If you don't have an {platform_name} account yet, "
"click <strong>Register</strong> at the top of the page."
).format(
platform_name=platform_name
)
return HttpResponse(message, content_type="text/plain", status=403)
else:
if 'email' not in request.POST or 'password' not in request.POST:
return JsonResponse({
"success": False,
# TODO: User error message
"value": _('There was an error receiving your login information. Please email us.'),
}) # TODO: this should be status code 400
email = request.POST['email']
password = request.POST['password']
try:
user = User.objects.get(email=email)
except User.DoesNotExist:
if settings.FEATURES['SQUELCH_PII_IN_LOGS']:
AUDIT_LOG.warning(u"Login failed - Unknown user email")
else:
AUDIT_LOG.warning(u"Login failed - Unknown user email: {0}".format(email))
# check if the user has a linked shibboleth account, if so, redirect the user to shib-login
# This behavior is pretty much like what gmail does for shibboleth. Try entering some @stanford.edu
# address into the Gmail login.
if settings.FEATURES.get('AUTH_USE_SHIB') and user:
try:
eamap = ExternalAuthMap.objects.get(user=user)
if eamap.external_domain.startswith(external_auth.views.SHIBBOLETH_DOMAIN_PREFIX):
return JsonResponse({
"success": False,
"redirect": reverse('shib-login'),
}) # TODO: this should be status code 301 # pylint: disable=fixme
except ExternalAuthMap.DoesNotExist:
# This is actually the common case, logging in user without external linked login
AUDIT_LOG.info(u"User %s w/o external auth attempting login", user)
# see if account has been locked out due to excessive login failures
user_found_by_email_lookup = user
if user_found_by_email_lookup and LoginFailures.is_feature_enabled():
if LoginFailures.is_user_locked_out(user_found_by_email_lookup):
lockout_message = _('This account has been temporarily locked due '
'to excessive login failures. Try again later.')
return JsonResponse({
"success": False,
"value": lockout_message,
}) # TODO: this should be status code 429 # pylint: disable=fixme
# see if the user must reset his/her password due to any policy settings
if user_found_by_email_lookup and PasswordHistory.should_user_reset_password_now(user_found_by_email_lookup):
return JsonResponse({
"success": False,
"value": _('Your password has expired due to password policy on this account. You must '
'reset your password before you can log in again. Please click the '
'"Forgot Password" link on this page to reset your password before logging in again.'),
}) # TODO: this should be status code 403 # pylint: disable=fixme
# if the user doesn't exist, we want to set the username to an invalid
# username so that authentication is guaranteed to fail and we can take
# advantage of the ratelimited backend
username = user.username if user else ""
if not third_party_auth_successful:
try:
user = authenticate(username=username, password=password, request=request)
# this occurs when there are too many attempts from the same IP address
except RateLimitException:
return JsonResponse({
"success": False,
"value": _('Too many failed login attempts. Try again later.'),
}) # TODO: this should be status code 429 # pylint: disable=fixme
if user is None:
# tick the failed login counters if the user exists in the database
if user_found_by_email_lookup and LoginFailures.is_feature_enabled():
LoginFailures.increment_lockout_counter(user_found_by_email_lookup)
# if we didn't find this username earlier, the account for this email
# doesn't exist, and doesn't have a corresponding password
if username != "":
if settings.FEATURES['SQUELCH_PII_IN_LOGS']:
loggable_id = user_found_by_email_lookup.id if user_found_by_email_lookup else "<unknown>"
AUDIT_LOG.warning(u"Login failed - password for user.id: {0} is invalid".format(loggable_id))
else:
AUDIT_LOG.warning(u"Login failed - password for {0} is invalid".format(email))
return JsonResponse({
"success": False,
"value": _('Email or password is incorrect.'),
}) # TODO: this should be status code 400 # pylint: disable=fixme
# successful login, clear failed login attempts counters, if applicable
if LoginFailures.is_feature_enabled():
LoginFailures.clear_lockout_counter(user)
# Track the user's sign in
if hasattr(settings, 'LMS_SEGMENT_KEY') and settings.LMS_SEGMENT_KEY:
tracking_context = tracker.get_tracker().resolve_context()
analytics.identify(
user.id,
{
'email': email,
'username': username
},
{
# Disable MailChimp because we don't want to update the user's email
# and username in MailChimp on every page load. We only need to capture
# this data on registration/activation.
'MailChimp': False
}
)
analytics.track(
user.id,
"edx.bi.user.account.authenticated",
{
'category': "conversion",
'label': request.POST.get('course_id'),
'provider': None
},
context={
'ip': tracking_context.get('ip'),
'Google Analytics': {
'clientId': tracking_context.get('client_id')
}
}
)
if user is not None and user.is_active:
try:
# We do not log here, because we have a handler registered
# to perform logging on successful logins.
login(request, user)
if request.POST.get('remember') == 'true':
request.session.set_expiry(604800)
log.debug("Setting user session to never expire")
else:
request.session.set_expiry(0)
except Exception as exc: # pylint: disable=broad-except
AUDIT_LOG.critical("Login failed - Could not create session. Is memcached running?")
log.critical("Login failed - Could not create session. Is memcached running?")
log.exception(exc)
raise
redirect_url = None # The AJAX method calling should know the default destination upon success
if third_party_auth_successful:
redirect_url = pipeline.get_complete_url(backend_name)
response = JsonResponse({
"success": True,
"redirect_url": redirect_url,
})
# Ensure that the external marketing site can
# detect that the user is logged in.
return set_logged_in_cookies(request, response, user)
if settings.FEATURES['SQUELCH_PII_IN_LOGS']:
AUDIT_LOG.warning(u"Login failed - Account not active for user.id: {0}, resending activation".format(user.id))
else:
AUDIT_LOG.warning(u"Login failed - Account not active for user {0}, resending activation".format(username))
reactivation_email_for_user(user)
not_activated_msg = _("This account has not been activated. We have sent another activation "
"message. Please check your email for the activation instructions.")
return JsonResponse({
"success": False,
"value": not_activated_msg,
}) # TODO: this should be status code 400 # pylint: disable=fixme
@csrf_exempt
@require_POST
@social_utils.strategy("social:complete")
def login_oauth_token(request, backend):
"""
Authenticate the client using an OAuth access token by using the token to
retrieve information from a third party and matching that information to an
existing user.
"""
warnings.warn("Please use AccessTokenExchangeView instead.", DeprecationWarning)
backend = request.backend
if isinstance(backend, social_oauth.BaseOAuth1) or isinstance(backend, social_oauth.BaseOAuth2):
if "access_token" in request.POST:
# Tell third party auth pipeline that this is an API call
request.session[pipeline.AUTH_ENTRY_KEY] = pipeline.AUTH_ENTRY_LOGIN_API
user = None
try:
user = backend.do_auth(request.POST["access_token"])
except (HTTPError, AuthException):
pass
# do_auth can return a non-User object if it fails
if user and isinstance(user, User):
login(request, user)
return JsonResponse(status=204)
else:
# Ensure user does not re-enter the pipeline
request.social_strategy.clean_partial_pipeline()
return JsonResponse({"error": "invalid_token"}, status=401)
else:
return JsonResponse({"error": "invalid_request"}, status=400)
raise Http404
@ensure_csrf_cookie
def logout_user(request):
"""
HTTP request to log out the user. Redirects to marketing page.
Deletes both the CSRF and sessionid cookies so the marketing
site can determine the logged in state of the user
"""
# We do not log here, because we have a handler registered
# to perform logging on successful logouts.
request.is_from_logout = True
logout(request)
if settings.FEATURES.get('AUTH_USE_CAS'):
target = reverse('cas-logout')
else:
target = '/'
response = redirect(target)
delete_logged_in_cookies(response)
return response
@require_GET
@login_required
@ensure_csrf_cookie
def manage_user_standing(request):
"""
Renders the view used to manage user standing. Also displays a table
of user accounts that have been disabled and who disabled them.
"""
if not request.user.is_staff:
raise Http404
all_disabled_accounts = UserStanding.objects.filter(
account_status=UserStanding.ACCOUNT_DISABLED
)
all_disabled_users = [standing.user for standing in all_disabled_accounts]
headers = ['username', 'account_changed_by']
rows = []
for user in all_disabled_users:
row = [user.username, user.standing.changed_by]
rows.append(row)
context = {'headers': headers, 'rows': rows}
return render_to_response("manage_user_standing.html", context)
@require_POST
@login_required
@ensure_csrf_cookie
def disable_account_ajax(request):
"""
Ajax call to change user standing. Endpoint of the form
in manage_user_standing.html
"""
if not request.user.is_staff:
raise Http404
username = request.POST.get('username')
context = {}
if username is None or username.strip() == '':
context['message'] = _('Please enter a username')
return JsonResponse(context, status=400)
account_action = request.POST.get('account_action')
if account_action is None:
context['message'] = _('Please choose an option')
return JsonResponse(context, status=400)
username = username.strip()
try:
user = User.objects.get(username=username)
except User.DoesNotExist:
context['message'] = _("User with username {} does not exist").format(username)
return JsonResponse(context, status=400)
else:
user_account, _success = UserStanding.objects.get_or_create(
user=user, defaults={'changed_by': request.user},
)
if account_action == 'disable':
user_account.account_status = UserStanding.ACCOUNT_DISABLED
context['message'] = _("Successfully disabled {}'s account").format(username)
log.info(u"%s disabled %s's account", request.user, username)
elif account_action == 'reenable':
user_account.account_status = UserStanding.ACCOUNT_ENABLED
context['message'] = _("Successfully reenabled {}'s account").format(username)
log.info(u"%s reenabled %s's account", request.user, username)
else:
context['message'] = _("Unexpected account status")
return JsonResponse(context, status=400)
user_account.changed_by = request.user
user_account.standing_last_changed_at = datetime.datetime.now(UTC)
user_account.save()
return JsonResponse(context)
@login_required
@ensure_csrf_cookie
def change_setting(request):
"""JSON call to change a profile setting: Right now, location"""
# TODO (vshnayder): location is no longer used
u_prof = UserProfile.objects.get(user=request.user) # request.user.profile_cache
if 'location' in request.POST:
u_prof.location = request.POST['location']
u_prof.save()
return JsonResponse({
"success": True,
"location": u_prof.location,
})
class AccountValidationError(Exception):
def __init__(self, message, field):
super(AccountValidationError, self).__init__(message)
self.field = field
@receiver(post_save, sender=User)
def user_signup_handler(sender, **kwargs): # pylint: disable=unused-argument
"""
handler that saves the user Signup Source
when the user is created
"""
if 'created' in kwargs and kwargs['created']:
site = microsite.get_value('SITE_NAME')
if site:
user_signup_source = UserSignupSource(user=kwargs['instance'], site=site)
user_signup_source.save()
log.info(u'user {} originated from a white labeled "Microsite"'.format(kwargs['instance'].id))
def _do_create_account(form, custom_form=None):
"""
Given cleaned post variables, create the User and UserProfile objects, as well as the
registration for this user.
Returns a tuple (User, UserProfile, Registration).
Note: this function is also used for creating test users.
"""
errors = {}
errors.update(form.errors)
if custom_form:
errors.update(custom_form.errors)
if errors:
raise ValidationError(errors)
user = User(
username=form.cleaned_data["username"],
email=form.cleaned_data["email"],
is_active=False
)
user.set_password(form.cleaned_data["password"])
registration = Registration()
# TODO: Rearrange so that if part of the process fails, the whole process fails.
# Right now, we can have e.g. no registration e-mail sent out and a zombie account
try:
with transaction.atomic():
user.save()
if custom_form:
custom_model = custom_form.save(commit=False)
custom_model.user = user
custom_model.save()
except IntegrityError:
# Figure out the cause of the integrity error
if len(User.objects.filter(username=user.username)) > 0:
raise AccountValidationError(
_("An account with the Public Username '{username}' already exists.").format(username=user.username),
field="username"
)
elif len(User.objects.filter(email=user.email)) > 0:
raise AccountValidationError(
_("An account with the Email '{email}' already exists.").format(email=user.email),
field="email"
)
else:
raise
# add this account creation to password history
# NOTE, this will be a NOP unless the feature has been turned on in configuration
password_history_entry = PasswordHistory()
password_history_entry.create(user)
registration.register(user)
profile_fields = [
"name", "level_of_education", "gender", "mailing_address", "city", "country", "goals",
"year_of_birth"
]
profile = UserProfile(
user=user,
**{key: form.cleaned_data.get(key) for key in profile_fields}
)
extended_profile = form.cleaned_extended_profile
if extended_profile:
profile.meta = json.dumps(extended_profile)
try:
profile.save()
except Exception: # pylint: disable=broad-except
log.exception("UserProfile creation failed for user {id}.".format(id=user.id))
raise
return (user, profile, registration)
def create_account_with_params(request, params):
"""
Given a request and a dict of parameters (which may or may not have come
from the request), create an account for the requesting user, including
creating a comments service user object and sending an activation email.
This also takes external/third-party auth into account, updates that as
necessary, and authenticates the user for the request's session.
Does not return anything.
Raises AccountValidationError if an account with the username or email
specified by params already exists, or ValidationError if any of the given
parameters is invalid for any other reason.
Issues with this code:
* It is not transactional. If there is a failure part-way, an incomplete
account will be created and left in the database.
* Third-party auth passwords are not verified. There is a comment that
they are unused, but it would be helpful to have a sanity check that
they are sane.
* It is over 300 lines long (!) and includes disprate functionality, from
registration e-mails to all sorts of other things. It should be broken
up into semantically meaningful functions.
* The user-facing text is rather unfriendly (e.g. "Username must be a
minimum of two characters long" rather than "Please use a username of
at least two characters").
"""
# Copy params so we can modify it; we can't just do dict(params) because if
# params is request.POST, that results in a dict containing lists of values
params = dict(params.items())
# allow for microsites to define their own set of required/optional/hidden fields
extra_fields = microsite.get_value(
'REGISTRATION_EXTRA_FIELDS',
getattr(settings, 'REGISTRATION_EXTRA_FIELDS', {})
)
# Boolean of whether a 3rd party auth provider and credentials were provided in
# the API so the newly created account can link with the 3rd party account.
#
# Note: this is orthogonal to the 3rd party authentication pipeline that occurs
# when the account is created via the browser and redirect URLs.
should_link_with_social_auth = third_party_auth.is_enabled() and 'provider' in params
if should_link_with_social_auth or (third_party_auth.is_enabled() and pipeline.running(request)):
params["password"] = pipeline.make_random_password()
# if doing signup for an external authorization, then get email, password, name from the eamap
# don't use the ones from the form, since the user could have hacked those
# unless originally we didn't get a valid email or name from the external auth
# TODO: We do not check whether these values meet all necessary criteria, such as email length
do_external_auth = 'ExternalAuthMap' in request.session
if do_external_auth:
eamap = request.session['ExternalAuthMap']
try:
validate_email(eamap.external_email)
params["email"] = eamap.external_email
except ValidationError:
pass
if eamap.external_name.strip() != '':
params["name"] = eamap.external_name
params["password"] = eamap.internal_password
log.debug(u'In create_account with external_auth: user = %s, email=%s', params["name"], params["email"])
extended_profile_fields = microsite.get_value('extended_profile_fields', [])
enforce_password_policy = (
settings.FEATURES.get("ENFORCE_PASSWORD_POLICY", False) and
not do_external_auth
)
# Can't have terms of service for certain SHIB users, like at Stanford
registration_fields = getattr(settings, 'REGISTRATION_EXTRA_FIELDS', {})
tos_required = (
registration_fields.get('terms_of_service') != 'hidden' or
registration_fields.get('honor_code') != 'hidden'
) and (
not settings.FEATURES.get("AUTH_USE_SHIB") or
not settings.FEATURES.get("SHIB_DISABLE_TOS") or
not do_external_auth or
not eamap.external_domain.startswith(
external_auth.views.SHIBBOLETH_DOMAIN_PREFIX
)
)
form = AccountCreationForm(
data=params,
extra_fields=extra_fields,
extended_profile_fields=extended_profile_fields,
enforce_username_neq_password=True,
enforce_password_policy=enforce_password_policy,
tos_required=tos_required,
)
custom_form = get_registration_extension_form(data=params)
# Perform operations within a transaction that are critical to account creation
with transaction.atomic():
# first, create the account
(user, profile, registration) = _do_create_account(form, custom_form)
# next, link the account with social auth, if provided via the API.
# (If the user is using the normal register page, the social auth pipeline does the linking, not this code)
if should_link_with_social_auth:
backend_name = params['provider']
request.social_strategy = social_utils.load_strategy(request)
redirect_uri = reverse('social:complete', args=(backend_name, ))
request.backend = social_utils.load_backend(request.social_strategy, backend_name, redirect_uri)
social_access_token = params.get('access_token')
if not social_access_token:
raise ValidationError({
'access_token': [
_("An access_token is required when passing value ({}) for provider.").format(
params['provider']
)
]
})
request.session[pipeline.AUTH_ENTRY_KEY] = pipeline.AUTH_ENTRY_REGISTER_API
pipeline_user = None
error_message = ""
try:
pipeline_user = request.backend.do_auth(social_access_token, user=user)
except AuthAlreadyAssociated:
error_message = _("The provided access_token is already associated with another user.")
except (HTTPError, AuthException):
error_message = _("The provided access_token is not valid.")
if not pipeline_user or not isinstance(pipeline_user, User):
# Ensure user does not re-enter the pipeline
request.social_strategy.clean_partial_pipeline()
raise ValidationError({'access_token': [error_message]})
# Perform operations that are non-critical parts of account creation
preferences_api.set_user_preference(user, LANGUAGE_KEY, get_language())
if settings.FEATURES.get('ENABLE_DISCUSSION_EMAIL_DIGEST'):
try:
enable_notifications(user)
except Exception: # pylint: disable=broad-except
log.exception("Enable discussion notifications failed for user {id}.".format(id=user.id))
dog_stats_api.increment("common.student.account_created")
# If the user is registering via 3rd party auth, track which provider they use
third_party_provider = None
running_pipeline = None
if third_party_auth.is_enabled() and pipeline.running(request):
running_pipeline = pipeline.get(request)
third_party_provider = provider.Registry.get_from_pipeline(running_pipeline)
# Track the user's registration
if hasattr(settings, 'LMS_SEGMENT_KEY') and settings.LMS_SEGMENT_KEY:
tracking_context = tracker.get_tracker().resolve_context()
identity_args = [
user.id, # pylint: disable=no-member
{
'email': user.email,
'username': user.username,
'name': profile.name,
# Mailchimp requires the age & yearOfBirth to be integers, we send a sane integer default if falsey.
'age': profile.age or -1,
'yearOfBirth': profile.year_of_birth or datetime.datetime.now(UTC).year,
'education': profile.level_of_education_display,
'address': profile.mailing_address,
'gender': profile.gender_display,
'country': unicode(profile.country),
}
]
if hasattr(settings, 'MAILCHIMP_NEW_USER_LIST_ID'):
identity_args.append({
"MailChimp": {
"listId": settings.MAILCHIMP_NEW_USER_LIST_ID
}
})
analytics.identify(*identity_args)
analytics.track(
user.id,
"edx.bi.user.account.registered",
{
'category': 'conversion',
'label': params.get('course_id'),
'provider': third_party_provider.name if third_party_provider else None
},
context={
'ip': tracking_context.get('ip'),
'Google Analytics': {
'clientId': tracking_context.get('client_id')
}
}
)
create_comments_service_user(user)
# Don't send email if we are:
#
# 1. Doing load testing.
# 2. Random user generation for other forms of testing.
# 3. External auth bypassing activation.
# 4. Have the platform configured to not require e-mail activation.
# 5. Registering a new user using a trusted third party provider (with skip_email_verification=True)
#
# Note that this feature is only tested as a flag set one way or
# the other for *new* systems. we need to be careful about
# changing settings on a running system to make sure no users are
# left in an inconsistent state (or doing a migration if they are).
send_email = (
not settings.FEATURES.get('SKIP_EMAIL_VALIDATION', None) and
not settings.FEATURES.get('AUTOMATIC_AUTH_FOR_TESTING') and
not (do_external_auth and settings.FEATURES.get('BYPASS_ACTIVATION_EMAIL_FOR_EXTAUTH')) and
not (
third_party_provider and third_party_provider.skip_email_verification and
user.email == running_pipeline['kwargs'].get('details', {}).get('email')
)
)
if send_email:
context = {
'name': profile.name,
'key': registration.activation_key,
}
# composes activation email
subject = render_to_string('emails/activation_email_subject.txt', context)
# Email subject *must not* contain newlines
subject = ''.join(subject.splitlines())
message = render_to_string('emails/activation_email.txt', context)
from_address = microsite.get_value(
'email_from_address',
settings.DEFAULT_FROM_EMAIL
)
try:
if settings.FEATURES.get('REROUTE_ACTIVATION_EMAIL'):
dest_addr = settings.FEATURES['REROUTE_ACTIVATION_EMAIL']
message = ("Activation for %s (%s): %s\n" % (user, user.email, profile.name) +
'-' * 80 + '\n\n' + message)
mail.send_mail(subject, message, from_address, [dest_addr], fail_silently=False)
else:
user.email_user(subject, message, from_address)
except Exception: # pylint: disable=broad-except
log.error(u'Unable to send activation email to user from "%s"', from_address, exc_info=True)
else:
registration.activate()
_enroll_user_in_pending_courses(user) # Enroll student in any pending courses
# Immediately after a user creates an account, we log them in. They are only
# logged in until they close the browser. They can't log in again until they click
# the activation link from the email.
new_user = authenticate(username=user.username, password=params['password'])
login(request, new_user)
request.session.set_expiry(0)
# TODO: there is no error checking here to see that the user actually logged in successfully,
# and is not yet an active user.
if new_user is not None:
AUDIT_LOG.info(u"Login success on new account creation - {0}".format(new_user.username))
if do_external_auth:
eamap.user = new_user
eamap.dtsignup = datetime.datetime.now(UTC)
eamap.save()
AUDIT_LOG.info(u"User registered with external_auth %s", new_user.username)
AUDIT_LOG.info(u'Updated ExternalAuthMap for %s to be %s', new_user.username, eamap)
if settings.FEATURES.get('BYPASS_ACTIVATION_EMAIL_FOR_EXTAUTH'):
log.info('bypassing activation email')
new_user.is_active = True
new_user.save()
AUDIT_LOG.info(u"Login activated on extauth account - {0} ({1})".format(new_user.username, new_user.email))
return new_user
def _enroll_user_in_pending_courses(student):
"""
Enroll student in any pending courses he/she may have.
"""
ceas = CourseEnrollmentAllowed.objects.filter(email=student.email)
for cea in ceas:
if cea.auto_enroll:
enrollment = CourseEnrollment.enroll(student, cea.course_id)
manual_enrollment_audit = ManualEnrollmentAudit.get_manual_enrollment_by_email(student.email)
if manual_enrollment_audit is not None:
# get the enrolled by user and reason from the ManualEnrollmentAudit table.
# then create a new ManualEnrollmentAudit table entry for the same email
# different transition state.
ManualEnrollmentAudit.create_manual_enrollment_audit(
manual_enrollment_audit.enrolled_by, student.email, ALLOWEDTOENROLL_TO_ENROLLED,
manual_enrollment_audit.reason, enrollment
)
@csrf_exempt
def create_account(request, post_override=None):
"""
JSON call to create new edX account.
Used by form in signup_modal.html, which is included into navigation.html
"""
warnings.warn("Please use RegistrationView instead.", DeprecationWarning)
try:
user = create_account_with_params(request, post_override or request.POST)
except AccountValidationError as exc:
return JsonResponse({'success': False, 'value': exc.message, 'field': exc.field}, status=400)
except ValidationError as exc:
field, error_list = next(exc.message_dict.iteritems())
return JsonResponse(
{
"success": False,
"field": field,
"value": error_list[0],
},
status=400
)
redirect_url = None # The AJAX method calling should know the default destination upon success
# Resume the third-party-auth pipeline if necessary.
if third_party_auth.is_enabled() and pipeline.running(request):
running_pipeline = pipeline.get(request)
redirect_url = pipeline.get_complete_url(running_pipeline['backend'])
response = JsonResponse({
'success': True,
'redirect_url': redirect_url,
})
set_logged_in_cookies(request, response, user)
return response
def auto_auth(request):
"""
Create or configure a user account, then log in as that user.
Enabled only when
settings.FEATURES['AUTOMATIC_AUTH_FOR_TESTING'] is true.
Accepts the following querystring parameters:
* `username`, `email`, and `password` for the user account
* `full_name` for the user profile (the user's full name; defaults to the username)
* `staff`: Set to "true" to make the user global staff.
* `course_id`: Enroll the student in the course with `course_id`
* `roles`: Comma-separated list of roles to grant the student in the course with `course_id`
* `no_login`: Define this to create the user but not login
* `redirect`: Set to "true" will redirect to course if course_id is defined, otherwise it will redirect to dashboard
If username, email, or password are not provided, use
randomly generated credentials.
"""
# Generate a unique name to use if none provided
unique_name = uuid.uuid4().hex[0:30]
# Use the params from the request, otherwise use these defaults
username = request.GET.get('username', unique_name)
password = request.GET.get('password', unique_name)
email = request.GET.get('email', unique_name + "@example.com")
full_name = request.GET.get('full_name', username)
is_staff = request.GET.get('staff', None)
is_superuser = request.GET.get('superuser', None)
course_id = request.GET.get('course_id', None)
# mode has to be one of 'honor'/'professional'/'verified'/'audit'/'no-id-professional'/'credit'
enrollment_mode = request.GET.get('enrollment_mode', 'honor')
course_key = None
if course_id:
course_key = CourseLocator.from_string(course_id)
role_names = [v.strip() for v in request.GET.get('roles', '').split(',') if v.strip()]
redirect_when_done = request.GET.get('redirect', '').lower() == 'true'
login_when_done = 'no_login' not in request.GET
form = AccountCreationForm(
data={
'username': username,
'email': email,
'password': password,
'name': full_name,
},
tos_required=False
)
# Attempt to create the account.
# If successful, this will return a tuple containing
# the new user object.
try:
user, profile, reg = _do_create_account(form)
except (AccountValidationError, ValidationError):
# Attempt to retrieve the existing user.
user = User.objects.get(username=username)
user.email = email
user.set_password(password)
user.save()
profile = UserProfile.objects.get(user=user)
reg = Registration.objects.get(user=user)
# Set the user's global staff bit
if is_staff is not None:
user.is_staff = (is_staff == "true")
user.save()
if is_superuser is not None:
user.is_superuser = (is_superuser == "true")
user.save()
# Activate the user
reg.activate()
reg.save()
# ensure parental consent threshold is met
year = datetime.date.today().year
age_limit = settings.PARENTAL_CONSENT_AGE_LIMIT
profile.year_of_birth = (year - age_limit) - 1
profile.save()
# Enroll the user in a course
if course_key is not None:
CourseEnrollment.enroll(user, course_key, mode=enrollment_mode)
# Apply the roles
for role_name in role_names:
role = Role.objects.get(name=role_name, course_id=course_key)
user.roles.add(role)
# Log in as the user
if login_when_done:
user = authenticate(username=username, password=password)
login(request, user)
create_comments_service_user(user)
# Provide the user with a valid CSRF token
# then return a 200 response unless redirect is true
if redirect_when_done:
# Redirect to course info page if course_id is known
if course_id:
try:
# redirect to course info page in LMS
redirect_url = reverse(
'info',
kwargs={'course_id': course_id}
)
except NoReverseMatch:
# redirect to course outline page in Studio
redirect_url = reverse(
'course_handler',
kwargs={'course_key_string': course_id}
)
else:
try:
# redirect to dashboard for LMS
redirect_url = reverse('dashboard')
except NoReverseMatch:
# redirect to home for Studio
redirect_url = reverse('home')
return redirect(redirect_url)
elif request.META.get('HTTP_ACCEPT') == 'application/json':
response = JsonResponse({
'created_status': u"Logged in" if login_when_done else "Created",
'username': username,
'email': email,
'password': password,
'user_id': user.id, # pylint: disable=no-member
'anonymous_id': anonymous_id_for_user(user, None),
})
else:
success_msg = u"{} user {} ({}) with password {} and user_id {}".format(
u"Logged in" if login_when_done else "Created",
username, email, password, user.id # pylint: disable=no-member
)
response = HttpResponse(success_msg)
response.set_cookie('csrftoken', csrf(request)['csrf_token'])
return response
@ensure_csrf_cookie
def activate_account(request, key):
"""When link in activation e-mail is clicked"""
regs = Registration.objects.filter(activation_key=key)
if len(regs) == 1:
user_logged_in = request.user.is_authenticated()
already_active = True
if not regs[0].user.is_active:
regs[0].activate()
already_active = False
# Enroll student in any pending courses he/she may have if auto_enroll flag is set
_enroll_user_in_pending_courses(regs[0].user)
resp = render_to_response(
"registration/activation_complete.html",
{
'user_logged_in': user_logged_in,
'already_active': already_active
}
)
return resp
if len(regs) == 0:
return render_to_response(
"registration/activation_invalid.html",
{'csrf': csrf(request)['csrf_token']}
)
return HttpResponseServerError(_("Unknown error. Please e-mail us to let us know how it happened."))
@csrf_exempt
@require_POST
def password_reset(request):
""" Attempts to send a password reset e-mail. """
# Add some rate limiting here by re-using the RateLimitMixin as a helper class
limiter = BadRequestRateLimiter()
if limiter.is_rate_limit_exceeded(request):
AUDIT_LOG.warning("Rate limit exceeded in password_reset")
return HttpResponseForbidden()
form = PasswordResetFormNoActive(request.POST)
if form.is_valid():
form.save(use_https=request.is_secure(),
from_email=microsite.get_value('email_from_address', settings.DEFAULT_FROM_EMAIL),
request=request,
domain_override=request.get_host())
# When password change is complete, a "edx.user.settings.changed" event will be emitted.
# But because changing the password is multi-step, we also emit an event here so that we can
# track where the request was initiated.
tracker.emit(
SETTING_CHANGE_INITIATED,
{
"setting": "password",
"old": None,
"new": None,
"user_id": request.user.id,
}
)
else:
# bad user? tick the rate limiter counter
AUDIT_LOG.info("Bad password_reset user passed in.")
limiter.tick_bad_request_counter(request)
return JsonResponse({
'success': True,
'value': render_to_string('registration/password_reset_done.html', {}),
})
def password_reset_confirm_wrapper(
request,
uidb36=None,
token=None,
):
""" A wrapper around django.contrib.auth.views.password_reset_confirm.
Needed because we want to set the user as active at this step.
"""
# cribbed from django.contrib.auth.views.password_reset_confirm
try:
uid_int = base36_to_int(uidb36)
user = User.objects.get(id=uid_int)
user.is_active = True
user.save()
except (ValueError, User.DoesNotExist):
pass
# tie in password strength enforcement as an optional level of
# security protection
err_msg = None
if request.method == 'POST':
password = request.POST['new_password1']
if settings.FEATURES.get('ENFORCE_PASSWORD_POLICY', False):
try:
validate_password_length(password)
validate_password_complexity(password)
validate_password_dictionary(password)
except ValidationError, err:
err_msg = _('Password: ') + '; '.join(err.messages)
# also, check the password reuse policy
if not PasswordHistory.is_allowable_password_reuse(user, password):
if user.is_staff:
num_distinct = settings.ADVANCED_SECURITY_CONFIG['MIN_DIFFERENT_STAFF_PASSWORDS_BEFORE_REUSE']
else:
num_distinct = settings.ADVANCED_SECURITY_CONFIG['MIN_DIFFERENT_STUDENT_PASSWORDS_BEFORE_REUSE']
# Because of how ngettext is, splitting the following into shorter lines would be ugly.
# pylint: disable=line-too-long
err_msg = ungettext(
"You are re-using a password that you have used recently. You must have {num} distinct password before reusing a previous password.",
"You are re-using a password that you have used recently. You must have {num} distinct passwords before reusing a previous password.",
num_distinct
).format(num=num_distinct)
# also, check to see if passwords are getting reset too frequent
if PasswordHistory.is_password_reset_too_soon(user):
num_days = settings.ADVANCED_SECURITY_CONFIG['MIN_TIME_IN_DAYS_BETWEEN_ALLOWED_RESETS']
# Because of how ngettext is, splitting the following into shorter lines would be ugly.
# pylint: disable=line-too-long
err_msg = ungettext(
"You are resetting passwords too frequently. Due to security policies, {num} day must elapse between password resets.",
"You are resetting passwords too frequently. Due to security policies, {num} days must elapse between password resets.",
num_days
).format(num=num_days)
if err_msg:
# We have an password reset attempt which violates some security policy, use the
# existing Django template to communicate this back to the user
context = {
'validlink': True,
'form': None,
'title': _('Password reset unsuccessful'),
'err_msg': err_msg,
'platform_name': microsite.get_value('platform_name', settings.PLATFORM_NAME),
}
return TemplateResponse(request, 'registration/password_reset_confirm.html', context)
else:
# we also want to pass settings.PLATFORM_NAME in as extra_context
extra_context = {"platform_name": microsite.get_value('platform_name', settings.PLATFORM_NAME)}
# Support old password reset URLs that used base36 encoded user IDs.
# https://github.com/django/django/commit/1184d077893ff1bc947e45b00a4d565f3df81776#diff-c571286052438b2e3190f8db8331a92bR231
try:
uidb64 = force_text(urlsafe_base64_encode(force_bytes(base36_to_int(uidb36))))
except ValueError:
uidb64 = '1' # dummy invalid ID (incorrect padding for base64)
if request.method == 'POST':
# remember what the old password hash is before we call down
old_password_hash = user.password
result = password_reset_confirm(
request, uidb64=uidb64, token=token, extra_context=extra_context
)
# get the updated user
updated_user = User.objects.get(id=uid_int)
# did the password hash change, if so record it in the PasswordHistory
if updated_user.password != old_password_hash:
entry = PasswordHistory()
entry.create(updated_user)
return result
else:
return password_reset_confirm(
request, uidb64=uidb64, token=token, extra_context=extra_context
)
def reactivation_email_for_user(user):
try:
reg = Registration.objects.get(user=user)
except Registration.DoesNotExist:
return JsonResponse({
"success": False,
"error": _('No inactive user with this e-mail exists'),
}) # TODO: this should be status code 400 # pylint: disable=fixme
context = {
'name': user.profile.name,
'key': reg.activation_key,
}
subject = render_to_string('emails/activation_email_subject.txt', context)
subject = ''.join(subject.splitlines())
message = render_to_string('emails/activation_email.txt', context)
try:
user.email_user(subject, message, settings.DEFAULT_FROM_EMAIL)
except Exception: # pylint: disable=broad-except
log.error(u'Unable to send reactivation email from "%s"', settings.DEFAULT_FROM_EMAIL, exc_info=True)
return JsonResponse({
"success": False,
"error": _('Unable to send reactivation email')
}) # TODO: this should be status code 500 # pylint: disable=fixme
return JsonResponse({"success": True})
def validate_new_email(user, new_email):
"""
Given a new email for a user, does some basic verification of the new address If any issues are encountered
with verification a ValueError will be thrown.
"""
try:
validate_email(new_email)
except ValidationError:
raise ValueError(_('Valid e-mail address required.'))
if new_email == user.email:
raise ValueError(_('Old email is the same as the new email.'))
if User.objects.filter(email=new_email).count() != 0:
raise ValueError(_('An account with this e-mail already exists.'))
def do_email_change_request(user, new_email, activation_key=None):
"""
Given a new email for a user, does some basic verification of the new address and sends an activation message
to the new address. If any issues are encountered with verification or sending the message, a ValueError will
be thrown.
"""
pec_list = PendingEmailChange.objects.filter(user=user)
if len(pec_list) == 0:
pec = PendingEmailChange()
pec.user = user
else:
pec = pec_list[0]
# if activation_key is not passing as an argument, generate a random key
if not activation_key:
activation_key = uuid.uuid4().hex
pec.new_email = new_email
pec.activation_key = activation_key
pec.save()
context = {
'key': pec.activation_key,
'old_email': user.email,
'new_email': pec.new_email
}
subject = render_to_string('emails/email_change_subject.txt', context)
subject = ''.join(subject.splitlines())
message = render_to_string('emails/email_change.txt', context)
from_address = microsite.get_value(
'email_from_address',
settings.DEFAULT_FROM_EMAIL
)
try:
mail.send_mail(subject, message, from_address, [pec.new_email])
except Exception: # pylint: disable=broad-except
log.error(u'Unable to send email activation link to user from "%s"', from_address, exc_info=True)
raise ValueError(_('Unable to send email activation link. Please try again later.'))
# When the email address change is complete, a "edx.user.settings.changed" event will be emitted.
# But because changing the email address is multi-step, we also emit an event here so that we can
# track where the request was initiated.
tracker.emit(
SETTING_CHANGE_INITIATED,
{
"setting": "email",
"old": context['old_email'],
"new": context['new_email'],
"user_id": user.id,
}
)
@ensure_csrf_cookie
def confirm_email_change(request, key): # pylint: disable=unused-argument
"""
User requested a new e-mail. This is called when the activation
link is clicked. We confirm with the old e-mail, and update
"""
with transaction.atomic():
try:
pec = PendingEmailChange.objects.get(activation_key=key)
except PendingEmailChange.DoesNotExist:
response = render_to_response("invalid_email_key.html", {})
transaction.set_rollback(True)
return response
user = pec.user
address_context = {
'old_email': user.email,
'new_email': pec.new_email
}
if len(User.objects.filter(email=pec.new_email)) != 0:
response = render_to_response("email_exists.html", {})
transaction.set_rollback(True)
return response
subject = render_to_string('emails/email_change_subject.txt', address_context)
subject = ''.join(subject.splitlines())
message = render_to_string('emails/confirm_email_change.txt', address_context)
u_prof = UserProfile.objects.get(user=user)
meta = u_prof.get_meta()
if 'old_emails' not in meta:
meta['old_emails'] = []
meta['old_emails'].append([user.email, datetime.datetime.now(UTC).isoformat()])
u_prof.set_meta(meta)
u_prof.save()
# Send it to the old email...
try:
user.email_user(subject, message, settings.DEFAULT_FROM_EMAIL)
except Exception: # pylint: disable=broad-except
log.warning('Unable to send confirmation email to old address', exc_info=True)
response = render_to_response("email_change_failed.html", {'email': user.email})
transaction.set_rollback(True)
return response
user.email = pec.new_email
user.save()
pec.delete()
# And send it to the new email...
try:
user.email_user(subject, message, settings.DEFAULT_FROM_EMAIL)
except Exception: # pylint: disable=broad-except
log.warning('Unable to send confirmation email to new address', exc_info=True)
response = render_to_response("email_change_failed.html", {'email': pec.new_email})
transaction.set_rollback(True)
return response
response = render_to_response("email_change_successful.html", address_context)
return response
@require_POST
@login_required
@ensure_csrf_cookie
def change_email_settings(request):
"""Modify logged-in user's setting for receiving emails from a course."""
user = request.user
course_id = request.POST.get("course_id")
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)
receive_emails = request.POST.get("receive_emails")
if receive_emails:
optout_object = Optout.objects.filter(user=user, course_id=course_key)
if optout_object:
optout_object.delete()
log.info(
u"User %s (%s) opted in to receive emails from course %s",
user.username,
user.email,
course_id,
)
track.views.server_track(
request,
"change-email-settings",
{"receive_emails": "yes", "course": course_id},
page='dashboard',
)
else:
Optout.objects.get_or_create(user=user, course_id=course_key)
log.info(
u"User %s (%s) opted out of receiving emails from course %s",
user.username,
user.email,
course_id,
)
track.views.server_track(
request,
"change-email-settings",
{"receive_emails": "no", "course": course_id},
page='dashboard',
)
return JsonResponse({"success": True})
def _get_course_programs(user, user_enrolled_courses): # pylint: disable=invalid-name
"""Build a dictionary of program data required for display on the student dashboard.
Given a user and an iterable of course keys, find all programs relevant to the
user and return them in a dictionary keyed by course key.
Arguments:
user (User): The user to authenticate as when requesting programs.
user_enrolled_courses (list): List of course keys representing the courses in which
the given user has active enrollments.
Returns:
dict, containing programs keyed by course.
"""
course_programs = get_programs_for_dashboard(user, user_enrolled_courses)
programs_data = {}
for course_key, programs in course_programs.viewitems():
for program in programs:
if program.get('status') == 'active' and program.get('category') == 'xseries':
try:
programs_for_course = programs_data.setdefault(course_key, {})
programs_for_course.setdefault('course_program_list', []).append({
'course_count': len(program['course_codes']),
'display_name': program['name'],
'program_id': program['id'],
'program_marketing_url': urljoin(
settings.MKTG_URLS.get('ROOT'),
'xseries' + '/{}'
).format(program['marketing_slug'])
})
programs_for_course['display_category'] = program.get('display_category')
programs_for_course['category'] = program.get('category')
except KeyError:
log.warning('Program structure is invalid, skipping display: %r', program)
return programs_data
def _get_xseries_credentials(user):
"""Return program credentials data required for display on
the learner dashboard.
Given a user, find all programs for which certificates have been earned
and return list of dictionaries of required program data.
Arguments:
user (User): user object for getting programs credentials.
Returns:
list of dict, containing data corresponding to the programs for which
the user has been awarded a credential.
"""
programs_credentials = get_user_program_credentials(user)
credentials_data = []
for program in programs_credentials:
if program.get('category') == 'xseries':
try:
program_data = {
'display_name': program['name'],
'subtitle': program['subtitle'],
'credential_url': program['credential_url'],
}
credentials_data.append(program_data)
except KeyError:
log.warning('Program structure is invalid: %r', program)
return credentials_data
|
devs1991/test_edx_docmode
|
common/djangoapps/student/views.py
|
Python
|
agpl-3.0
| 102,710
|
[
"VisIt"
] |
11352420e5e61de0119e58706be28db89a08db175473b0e0663160d615b843df
|
""" :mod: TransformationCleaningAgent
=================================
.. module: TransformationCleaningAgent
:synopsis: clean up of finalised transformations
"""
__RCSID__ = "$Id$"
# # imports
import re
from datetime import datetime, timedelta
# # from DIRAC
from DIRAC import S_OK, S_ERROR
from DIRAC.Core.Base.AgentModule import AgentModule
from DIRAC.Core.Utilities.List import breakListIntoChunks
from DIRAC.ConfigurationSystem.Client.Helpers.Operations import Operations
from DIRAC.Resources.Catalog.FileCatalogClient import FileCatalogClient
from DIRAC.TransformationSystem.Client.TransformationClient import TransformationClient
from DIRAC.WorkloadManagementSystem.Client.WMSClient import WMSClient
from DIRAC.DataManagementSystem.Client.DataManager import DataManager
from DIRAC.Resources.Storage.StorageElement import StorageElement
from DIRAC.Core.Utilities.ReturnValues import returnSingleResult
from DIRAC.Resources.Catalog.FileCatalog import FileCatalog
from DIRAC.RequestManagementSystem.Client.ReqClient import ReqClient
# # agent's name
AGENT_NAME = 'Transformation/TransformationCleaningAgent'
class TransformationCleaningAgent( AgentModule ):
"""
.. class:: TransformationCleaningAgent
:param DataManger dm: DataManager instance
:param TransfromationClient transClient: TransfromationClient instance
:param FileCatalogClient metadataClient: FileCatalogClient instance
"""
def __init__( self, *args, **kwargs ):
""" c'tor
"""
AgentModule.__init__( self, *args, **kwargs )
# # data manager
self.dm = None
# # transformation client
self.transClient = None
# # wms client
self.wmsClient = None
# # request client
self.reqClient = None
# # file catalog client
self.metadataClient = None
# # transformations types
self.transformationTypes = None
# # directory locations
self.directoryLocations = None
# # transformation metadata
self.transfidmeta = None
# # archive periof in days
self.archiveAfter = None
# # active SEs
self.activeStorages = None
# # transformation log SEs
self.logSE = None
# # enable/disable execution
self.enableFlag = None
def initialize( self ):
""" agent initialisation
reading and setting confing opts
:param self: self reference
"""
# # shifter proxy
self.am_setOption( 'shifterProxy', 'DataManager' )
# # transformations types
self.dataProcTTypes = Operations().getValue( 'Transformations/DataProcessing', ['MCSimulation', 'Merge'] )
self.dataManipTTypes = Operations().getValue( 'Transformations/DataManipulation', ['Replication', 'Removal'] )
agentTSTypes = self.am_getOption( 'TransformationTypes', [] )
if agentTSTypes:
self.transformationTypes = sorted( agentTSTypes )
else:
self.transformationTypes = sorted( self.dataProcTTypes + self.dataManipTTypes )
self.log.info( "Will consider the following transformation types: %s" % str( self.transformationTypes ) )
# # directory locations
self.directoryLocations = sorted( self.am_getOption( 'DirectoryLocations', [ 'TransformationDB',
'MetadataCatalog' ] ) )
self.log.info( "Will search for directories in the following locations: %s" % str( self.directoryLocations ) )
# # transformation metadata
self.transfidmeta = self.am_getOption( 'TransfIDMeta', "TransformationID" )
self.log.info( "Will use %s as metadata tag name for TransformationID" % self.transfidmeta )
# # archive periof in days
self.archiveAfter = self.am_getOption( 'ArchiveAfter', 7 ) # days
self.log.info( "Will archive Completed transformations after %d days" % self.archiveAfter )
# # active SEs
self.activeStorages = sorted( self.am_getOption( 'ActiveSEs', [] ) )
self.log.info( "Will check the following storage elements: %s" % str( self.activeStorages ) )
# # transformation log SEs
self.logSE = self.am_getOption( 'TransformationLogSE', 'LogSE' )
self.log.info( "Will remove logs found on storage element: %s" % self.logSE )
# # enable/disable execution, should be using CS option Status?? with default value as 'Active'??
self.enableFlag = self.am_getOption( 'EnableFlag', 'True' )
# # data manager
self.dm = DataManager()
# # transformation client
self.transClient = TransformationClient()
# # wms client
self.wmsClient = WMSClient()
# # request client
self.reqClient = ReqClient()
# # file catalog client
self.metadataClient = FileCatalogClient()
return S_OK()
#############################################################################
def execute( self ):
""" execution in one agent's cycle
:param self: self reference
"""
self.enableFlag = self.am_getOption( 'EnableFlag', 'True' )
if not self.enableFlag == 'True':
self.log.info( 'TransformationCleaningAgent is disabled by configuration option EnableFlag' )
return S_OK( 'Disabled via CS flag' )
# # Obtain the transformations in Cleaning status and remove any mention of the jobs/files
res = self.transClient.getTransformations( { 'Status' : 'Cleaning',
'Type' : self.transformationTypes } )
if res['OK']:
for transDict in res['Value']:
# # if transformation is of type `Replication` or `Removal`, there is nothing to clean.
# # We just archive
if transDict[ 'Type' ] in self.dataManipTTypes:
res = self.archiveTransformation( transDict['TransformationID'] )
if not res['OK']:
self.log.error( "Problems archiving transformation %s: %s" % ( transDict['TransformationID'],
res['Message'] ) )
else:
res = self.cleanTransformation( transDict['TransformationID'] )
if not res['OK']:
self.log.error( "Problems cleaning transformation %s: %s" % ( transDict['TransformationID'],
res['Message'] ) )
# # Obtain the transformations in RemovingFiles status and (wait for it) removes the output files
res = self.transClient.getTransformations( { 'Status' : 'RemovingFiles',
'Type' : self.transformationTypes} )
if res['OK']:
for transDict in res['Value']:
res = self.removeTransformationOutput( transDict['TransformationID'] )
if not res['OK']:
self.log.error( "Problems removing transformation %s: %s" % ( transDict['TransformationID'],
res['Message'] ) )
# # Obtain the transformations in Completed status and archive if inactive for X days
olderThanTime = datetime.utcnow() - timedelta( days = self.archiveAfter )
res = self.transClient.getTransformations( { 'Status' : 'Completed',
'Type' : self.transformationTypes },
older = olderThanTime,
timeStamp = 'LastUpdate' )
if res['OK']:
for transDict in res['Value']:
res = self.archiveTransformation( transDict['TransformationID'] )
if not res['OK']:
self.log.error( "Problems archiving transformation %s: %s" % ( transDict['TransformationID'],
res['Message'] ) )
else:
self.log.error( "Could not get the transformations" )
return S_OK()
#############################################################################
#
# Get the transformation directories for checking
#
def getTransformationDirectories( self, transID ):
""" get the directories for the supplied transformation from the transformation system
:param self: self reference
:param int transID: transformation ID
"""
directories = []
if 'TransformationDB' in self.directoryLocations:
res = self.transClient.getTransformationParameters( transID, ['OutputDirectories'] )
if not res['OK']:
self.log.error( "Failed to obtain transformation directories", res['Message'] )
return res
transDirectories = res['Value'].splitlines()
directories = self._addDirs( transID, transDirectories, directories )
if 'MetadataCatalog' in self.directoryLocations:
res = self.metadataClient.findDirectoriesByMetadata( {self.transfidmeta:transID} )
if not res['OK']:
self.log.error( "Failed to obtain metadata catalog directories", res['Message'] )
return res
transDirectories = res['Value']
directories = self._addDirs( transID, transDirectories, directories )
if not directories:
self.log.info( "No output directories found" )
directories = sorted( directories )
return S_OK( directories )
@classmethod
def _addDirs( cls, transID, newDirs, existingDirs ):
""" append uniqe :newDirs: list to :existingDirs: list
:param self: self reference
:param int transID: transformationID
:param list newDirs: src list of paths
:param list existingDirs: dest list of paths
"""
for folder in newDirs:
transStr = str( transID ).zfill( 8 )
if re.search( transStr, str( folder ) ):
if not folder in existingDirs:
existingDirs.append( folder )
return existingDirs
#############################################################################
#
# These are the methods for performing the cleaning of catalogs and storage
#
def cleanStorageContents( self, directory ):
""" delete lfn dir from all active SE
:param self: self reference
:param sre directory: folder name
"""
for storageElement in self.activeStorages:
res = self.__removeStorageDirectory( directory, storageElement )
if not res['OK']:
return res
return S_OK()
def __removeStorageDirectory( self, directory, storageElement ):
""" wipe out all contents from :directory: at :storageElement:
:param self: self reference
:param str directory: path
:param str storageElement: SE name
"""
self.log.info( 'Removing the contents of %s at %s' % ( directory, storageElement ) )
se = StorageElement( storageElement )
res = returnSingleResult( se.exists( directory ) )
if not res['OK']:
self.log.error( "Failed to obtain existance of directory", res['Message'] )
return res
exists = res['Value']
if not exists:
self.log.info( "The directory %s does not exist at %s " % ( directory, storageElement ) )
return S_OK()
res = returnSingleResult( se.removeDirectory( directory, recursive = True ) )
if not res['OK']:
self.log.error( "Failed to remove storage directory", res['Message'] )
return res
self.log.info( "Successfully removed %d files from %s at %s" % ( res['Value']['FilesRemoved'],
directory,
storageElement ) )
return S_OK()
def cleanCatalogContents( self, directory ):
""" wipe out everything from catalog under folder :directory:
:param self: self reference
:params str directory: folder name
"""
res = self.__getCatalogDirectoryContents( [directory] )
if not res['OK']:
return res
filesFound = res['Value']
if not filesFound:
self.log.info( "No files are registered in the catalog directory %s" % directory )
return S_OK()
self.log.info( "Attempting to remove %d possible remnants from the catalog and storage" % len( filesFound ) )
res = self.dm.removeFile( filesFound, force = True )
if not res['OK']:
return res
realFailure = False
for lfn, reason in res['Value']['Failed'].items():
if "File does not exist" in str( reason ):
self.log.warn( "File %s not found in some catalog: " % ( lfn ) )
else:
self.log.error( "Failed to remove file found in the catalog", "%s %s" % ( lfn, reason ) )
realFailure = True
if realFailure:
return S_ERROR( "Failed to remove all files found in the catalog" )
return S_OK()
def __getCatalogDirectoryContents( self, directories ):
""" get catalog contents under paths :directories:
:param self: self reference
:param list directories: list of paths in catalog
"""
self.log.info( 'Obtaining the catalog contents for %d directories:' % len( directories ) )
for directory in directories:
self.log.info( directory )
activeDirs = directories
allFiles = {}
fc = FileCatalog()
while len( activeDirs ) > 0:
currentDir = activeDirs[0]
res = returnSingleResult( fc.listDirectory( currentDir ) )
activeDirs.remove( currentDir )
if not res['OK'] and res['Message'].endswith( 'The supplied path does not exist' ):
self.log.info( "The supplied directory %s does not exist" % currentDir )
elif not res['OK']:
if "No such file or directory" in res['Message']:
self.log.info( "%s: %s" % ( currentDir, res['Message'] ) )
else:
self.log.error( "Failed to get directory %s content: %s" % ( currentDir, res['Message'] ) )
else:
dirContents = res['Value']
activeDirs.extend( dirContents['SubDirs'] )
allFiles.update( dirContents['Files'] )
self.log.info( "Found %d files" % len( allFiles ) )
return S_OK( allFiles.keys() )
def cleanTransformationLogFiles( self, directory ):
""" clean up transformation logs from directory :directory:
:param self: self reference
:param str directory: folder name
"""
self.log.info( "Removing log files found in the directory %s" % directory )
res = returnSingleResult( StorageElement( self.logSE ).removeDirectory( directory ) )
if not res['OK']:
self.log.error( "Failed to remove log files", res['Message'] )
return res
self.log.info( "Successfully removed transformation log directory" )
return S_OK()
#############################################################################
#
# These are the functional methods for archiving and cleaning transformations
#
def removeTransformationOutput( self, transID ):
""" This just removes any mention of the output data from the catalog and storage """
self.log.info( "Removing output data for transformation %s" % transID )
res = self.getTransformationDirectories( transID )
if not res['OK']:
self.log.error( 'Problem obtaining directories for transformation %s with result "%s"' % ( transID, res ) )
return S_OK()
directories = res['Value']
for directory in directories:
if not re.search( '/LOG/', directory ):
res = self.cleanCatalogContents( directory )
if not res['OK']:
return res
res = self.cleanStorageContents( directory )
if not res['OK']:
return res
self.log.info( "Removed directories in the catalog and storage for transformation" )
# Clean ALL the possible remnants found in the metadata catalog
res = self.cleanMetadataCatalogFiles( transID )
if not res['OK']:
return res
self.log.info( "Successfully removed output of transformation %d" % transID )
# Change the status of the transformation to RemovedFiles
res = self.transClient.setTransformationParameter( transID, 'Status', 'RemovedFiles' )
if not res['OK']:
self.log.error( "Failed to update status of transformation %s to RemovedFiles" % ( transID ), res['Message'] )
return res
self.log.info( "Updated status of transformation %s to RemovedFiles" % ( transID ) )
return S_OK()
def archiveTransformation( self, transID ):
""" This just removes job from the jobDB and the transformation DB
:param self: self reference
:param int transID: transformation ID
"""
self.log.info( "Archiving transformation %s" % transID )
# Clean the jobs in the WMS and any failover requests found
res = self.cleanTransformationTasks( transID )
if not res['OK']:
return res
# Clean the transformation DB of the files and job information
res = self.transClient.cleanTransformation( transID )
if not res['OK']:
return res
self.log.info( "Successfully archived transformation %d" % transID )
# Change the status of the transformation to archived
res = self.transClient.setTransformationParameter( transID, 'Status', 'Archived' )
if not res['OK']:
self.log.error( "Failed to update status of transformation %s to Archived" % ( transID ), res['Message'] )
return res
self.log.info( "Updated status of transformation %s to Archived" % ( transID ) )
return S_OK()
def cleanTransformation( self, transID ):
""" This removes what was produced by the supplied transformation,
leaving only some info and log in the transformation DB.
"""
self.log.info( "Cleaning transformation %s" % transID )
res = self.getTransformationDirectories( transID )
if not res['OK']:
self.log.error( 'Problem obtaining directories for transformation %s with result "%s"' % ( transID, res ) )
return S_OK()
directories = res['Value']
# Clean the jobs in the WMS and any failover requests found
res = self.cleanTransformationTasks( transID )
if not res['OK']:
return res
# Clean the log files for the jobs
for directory in directories:
if re.search( '/LOG/', directory ):
res = self.cleanTransformationLogFiles( directory )
if not res['OK']:
return res
res = self.cleanCatalogContents( directory )
if not res['OK']:
return res
res = self.cleanStorageContents( directory )
if not res['OK']:
return res
# Clean ALL the possible remnants found in the BK
res = self.cleanMetadataCatalogFiles( transID )
if not res['OK']:
return res
# Clean the transformation DB of the files and job information
res = self.transClient.cleanTransformation( transID )
if not res['OK']:
return res
self.log.info( "Successfully cleaned transformation %d" % transID )
res = self.transClient.setTransformationParameter( transID, 'Status', 'Cleaned' )
if not res['OK']:
self.log.error( "Failed to update status of transformation %s to Cleaned" % ( transID ), res['Message'] )
return res
self.log.info( "Updated status of transformation %s to Cleaned" % ( transID ) )
return S_OK()
def cleanMetadataCatalogFiles( self, transID ):
""" wipe out files from catalog """
res = self.metadataClient.findFilesByMetadata( { self.transfidmeta : transID } )
if not res['OK']:
return res
fileToRemove = res['Value']
if not fileToRemove:
self.log.info( 'No files found for transID %s' % transID )
return S_OK()
res = self.dm.removeFile( fileToRemove, force = True )
if not res['OK']:
return res
for lfn, reason in res['Value']['Failed'].items():
self.log.error( "Failed to remove file found in metadata catalog", "%s %s" % ( lfn, reason ) )
if res['Value']['Failed']:
return S_ERROR( "Failed to remove all files found in the metadata catalog" )
self.log.info( "Successfully removed all files found in the BK" )
return S_OK()
#############################################################################
#
# These are the methods for removing the jobs from the WMS and transformation DB
#
def cleanTransformationTasks( self, transID ):
""" clean tasks from WMS, or from the RMS if it is a DataManipulation transformation
"""
res = self.__getTransformationExternalIDs( transID )
if not res['OK']:
return res
externalIDs = res['Value']
if externalIDs:
res = self.transClient.getTransformationParameters( transID, ['Type'] )
if not res['OK']:
self.log.error( "Failed to determine transformation type" )
return res
transType = res['Value']
if transType in self.dataProcTTypes:
res = self.__removeWMSTasks( externalIDs )
else:
res = self.__removeRequests( externalIDs )
if not res['OK']:
return res
return S_OK()
def __getTransformationExternalIDs( self, transID ):
""" collect all ExternalIDs for transformation :transID:
:param self: self reference
:param int transID: transforamtion ID
"""
res = self.transClient.getTransformationTasks( condDict = { 'TransformationID' : transID } )
if not res['OK']:
self.log.error( "Failed to get externalIDs for transformation %d" % transID, res['Message'] )
return res
externalIDs = [ taskDict['ExternalID'] for taskDict in res["Value"] ]
self.log.info( "Found %d tasks for transformation" % len( externalIDs ) )
return S_OK( externalIDs )
def __removeRequests( self, requestIDs ):
""" This will remove requests from the RMS system -
"""
rIDs = [ int( long( j ) ) for j in requestIDs if long( j ) ]
for reqID in rIDs:
self.reqClient.deleteRequest( reqID )
return S_OK()
def __removeWMSTasks( self, transJobIDs ):
""" wipe out jobs and their requests from the system
TODO: should check request status, maybe FTS files as well ???
:param self: self reference
:param list trasnJobIDs: job IDs
"""
# Prevent 0 job IDs
jobIDs = [ int( j ) for j in transJobIDs if int( j ) ]
allRemove = True
for jobList in breakListIntoChunks( jobIDs, 500 ):
res = self.wmsClient.killJob( jobList )
if res['OK']:
self.log.info( "Successfully killed %d jobs from WMS" % len( jobList ) )
elif ( "InvalidJobIDs" in res ) and ( "NonauthorizedJobIDs" not in res ) and ( "FailedJobIDs" not in res ):
self.log.info( "Found %s jobs which did not exist in the WMS" % len( res['InvalidJobIDs'] ) )
elif "NonauthorizedJobIDs" in res:
self.log.error( "Failed to kill %s jobs because not authorized" % len( res['NonauthorizedJobIDs'] ) )
allRemove = False
elif "FailedJobIDs" in res:
self.log.error( "Failed to kill %s jobs" % len( res['FailedJobIDs'] ) )
allRemove = False
res = self.wmsClient.deleteJob( jobList )
if res['OK']:
self.log.info( "Successfully removed %d jobs from WMS" % len( jobList ) )
elif ( "InvalidJobIDs" in res ) and ( "NonauthorizedJobIDs" not in res ) and ( "FailedJobIDs" not in res ):
self.log.info( "Found %s jobs which did not exist in the WMS" % len( res['InvalidJobIDs'] ) )
elif "NonauthorizedJobIDs" in res:
self.log.error( "Failed to remove %s jobs because not authorized" % len( res['NonauthorizedJobIDs'] ) )
allRemove = False
elif "FailedJobIDs" in res:
self.log.error( "Failed to remove %s jobs" % len( res['FailedJobIDs'] ) )
allRemove = False
if not allRemove:
return S_ERROR( "Failed to remove all remnants from WMS" )
self.log.info( "Successfully removed all tasks from the WMS" )
if not jobIDs:
self.log.info( "JobIDs not present, unable to remove asociated requests." )
return S_OK()
failed = 0
failoverRequests = {}
res = self.reqClient.getRequestIDsForJobs( jobIDs )
if not res['OK']:
self.log.error( "Failed to get requestID for jobs.", res['Message'] )
return res
failoverRequests.update( res['Value']['Successful'] )
if not failoverRequests:
return S_OK()
for jobID, requestID in res['Value']['Successful'].items():
# Put this check just in case, tasks must have associated jobs
if jobID == 0 or jobID == '0':
continue
res = self.reqClient.deleteRequest( requestID )
if not res['OK']:
self.log.error( "Failed to remove request from RequestDB", res['Message'] )
failed += 1
else:
self.log.verbose( "Removed request %s associated to job %d." % ( requestID, jobID ) )
if failed:
self.log.info( "Successfully removed %s requests" % ( len( failoverRequests ) - failed ) )
self.log.info( "Failed to remove %s requests" % failed )
return S_ERROR( "Failed to remove all the request from RequestDB" )
self.log.info( "Successfully removed all the associated failover requests" )
return S_OK()
|
fibbo/DIRAC
|
TransformationSystem/Agent/TransformationCleaningAgent.py
|
Python
|
gpl-3.0
| 24,704
|
[
"DIRAC"
] |
f1dd4740045224448ed2391e6f69b0dab211475e03d19f22240e8e54a181aa9b
|
#!/usr/bin/env python
#
# Copyright 2008 Jose Fonseca
#
# This program is free software: you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
'''Visualize dot graphs via the xdot format.'''
__author__ = "Jose Fonseca et al"
import os
import sys
import subprocess
import math
import colorsys
import time
import re
import optparse
import gobject
import gtk
import gtk.gdk
import gtk.keysyms
import cairo
import pango
import pangocairo
# See http://www.graphviz.org/pub/scm/graphviz-cairo/plugin/cairo/gvrender_cairo.c
# For pygtk inspiration and guidance see:
# - http://mirageiv.berlios.de/
# - http://comix.sourceforge.net/
class Pen:
"""Store pen attributes."""
def __init__(self):
# set default attributes
self.color = (0.0, 0.0, 0.0, 1.0)
self.fillcolor = (0.0, 0.0, 0.0, 1.0)
self.linewidth = 1.0
self.fontsize = 14.0
self.fontname = "Times-Roman"
self.dash = ()
def copy(self):
"""Create a copy of this pen."""
pen = Pen()
pen.__dict__ = self.__dict__.copy()
return pen
def highlighted(self):
pen = self.copy()
pen.color = (1, 0, 0, 1)
pen.fillcolor = (1, .8, .8, 1)
return pen
class Shape:
"""Abstract base class for all the drawing shapes."""
def __init__(self):
pass
def draw(self, cr, highlight=False):
"""Draw this shape with the given cairo context"""
raise NotImplementedError
def select_pen(self, highlight):
if highlight:
if not hasattr(self, 'highlight_pen'):
self.highlight_pen = self.pen.highlighted()
return self.highlight_pen
else:
return self.pen
def search_text(self, regexp):
return False
class TextShape(Shape):
LEFT, CENTER, RIGHT = -1, 0, 1
def __init__(self, pen, x, y, j, w, t):
Shape.__init__(self)
self.pen = pen.copy()
self.x = x
self.y = y
self.j = j
self.w = w
self.t = t
def draw(self, cr, highlight=False):
try:
layout = self.layout
except AttributeError:
layout = cr.create_layout()
# set font options
# see http://lists.freedesktop.org/archives/cairo/2007-February/009688.html
context = layout.get_context()
fo = cairo.FontOptions()
fo.set_antialias(cairo.ANTIALIAS_DEFAULT)
fo.set_hint_style(cairo.HINT_STYLE_NONE)
fo.set_hint_metrics(cairo.HINT_METRICS_OFF)
try:
pangocairo.context_set_font_options(context, fo)
except TypeError:
# XXX: Some broken pangocairo bindings show the error
# 'TypeError: font_options must be a cairo.FontOptions or None'
pass
# set font
font = pango.FontDescription()
font.set_family(self.pen.fontname)
font.set_absolute_size(self.pen.fontsize*pango.SCALE)
layout.set_font_description(font)
# set text
layout.set_text(self.t)
# cache it
self.layout = layout
else:
cr.update_layout(layout)
descent = 2 # XXX get descender from font metrics
width, height = layout.get_size()
width = float(width)/pango.SCALE
height = float(height)/pango.SCALE
# we know the width that dot thinks this text should have
# we do not necessarily have a font with the same metrics
# scale it so that the text fits inside its box
if width > self.w:
f = self.w / width
width = self.w # equivalent to width *= f
height *= f
descent *= f
else:
f = 1.0
if self.j == self.LEFT:
x = self.x
elif self.j == self.CENTER:
x = self.x - 0.5*width
elif self.j == self.RIGHT:
x = self.x - width
else:
assert 0
y = self.y - height + descent
cr.move_to(x, y)
cr.save()
cr.scale(f, f)
cr.set_source_rgba(*self.select_pen(highlight).color)
cr.show_layout(layout)
cr.restore()
if 0: # DEBUG
# show where dot thinks the text should appear
cr.set_source_rgba(1, 0, 0, .9)
if self.j == self.LEFT:
x = self.x
elif self.j == self.CENTER:
x = self.x - 0.5*self.w
elif self.j == self.RIGHT:
x = self.x - self.w
cr.move_to(x, self.y)
cr.line_to(x+self.w, self.y)
cr.stroke()
def search_text(self, regexp):
return regexp.search(self.t) is not None
class ImageShape(Shape):
def __init__(self, pen, x0, y0, w, h, path):
Shape.__init__(self)
self.pen = pen.copy()
self.x0 = x0
self.y0 = y0
self.w = w
self.h = h
self.path = path
def draw(self, cr, highlight=False):
cr2 = gtk.gdk.CairoContext(cr)
pixbuf = gtk.gdk.pixbuf_new_from_file(self.path)
sx = float(self.w)/float(pixbuf.get_width())
sy = float(self.h)/float(pixbuf.get_height())
cr.save()
cr.translate(self.x0, self.y0 - self.h)
cr.scale(sx, sy)
cr2.set_source_pixbuf(pixbuf, 0, 0)
cr2.paint()
cr.restore()
class EllipseShape(Shape):
def __init__(self, pen, x0, y0, w, h, filled=False):
Shape.__init__(self)
self.pen = pen.copy()
self.x0 = x0
self.y0 = y0
self.w = w
self.h = h
self.filled = filled
def draw(self, cr, highlight=False):
cr.save()
cr.translate(self.x0, self.y0)
cr.scale(self.w, self.h)
cr.move_to(1.0, 0.0)
cr.arc(0.0, 0.0, 1.0, 0, 2.0*math.pi)
cr.restore()
pen = self.select_pen(highlight)
if self.filled:
cr.set_source_rgba(*pen.fillcolor)
cr.fill()
else:
cr.set_dash(pen.dash)
cr.set_line_width(pen.linewidth)
cr.set_source_rgba(*pen.color)
cr.stroke()
class PolygonShape(Shape):
def __init__(self, pen, points, filled=False):
Shape.__init__(self)
self.pen = pen.copy()
self.points = points
self.filled = filled
def draw(self, cr, highlight=False):
x0, y0 = self.points[-1]
cr.move_to(x0, y0)
for x, y in self.points:
cr.line_to(x, y)
cr.close_path()
pen = self.select_pen(highlight)
if self.filled:
cr.set_source_rgba(*pen.fillcolor)
cr.fill_preserve()
cr.fill()
else:
cr.set_dash(pen.dash)
cr.set_line_width(pen.linewidth)
cr.set_source_rgba(*pen.color)
cr.stroke()
class LineShape(Shape):
def __init__(self, pen, points):
Shape.__init__(self)
self.pen = pen.copy()
self.points = points
def draw(self, cr, highlight=False):
x0, y0 = self.points[0]
cr.move_to(x0, y0)
for x1, y1 in self.points[1:]:
cr.line_to(x1, y1)
pen = self.select_pen(highlight)
cr.set_dash(pen.dash)
cr.set_line_width(pen.linewidth)
cr.set_source_rgba(*pen.color)
cr.stroke()
class BezierShape(Shape):
def __init__(self, pen, points, filled=False):
Shape.__init__(self)
self.pen = pen.copy()
self.points = points
self.filled = filled
def draw(self, cr, highlight=False):
x0, y0 = self.points[0]
cr.move_to(x0, y0)
for i in xrange(1, len(self.points), 3):
x1, y1 = self.points[i]
x2, y2 = self.points[i + 1]
x3, y3 = self.points[i + 2]
cr.curve_to(x1, y1, x2, y2, x3, y3)
pen = self.select_pen(highlight)
if self.filled:
cr.set_source_rgba(*pen.fillcolor)
cr.fill_preserve()
cr.fill()
else:
cr.set_dash(pen.dash)
cr.set_line_width(pen.linewidth)
cr.set_source_rgba(*pen.color)
cr.stroke()
class CompoundShape(Shape):
def __init__(self, shapes):
Shape.__init__(self)
self.shapes = shapes
def draw(self, cr, highlight=False):
for shape in self.shapes:
shape.draw(cr, highlight=highlight)
def search_text(self, regexp):
for shape in self.shapes:
if shape.search_text(regexp):
return True
return False
class Url(object):
def __init__(self, item, url, highlight=None):
self.item = item
self.url = url
if highlight is None:
highlight = set([item])
self.highlight = highlight
class Jump(object):
def __init__(self, item, x, y, highlight=None):
self.item = item
self.x = x
self.y = y
if highlight is None:
highlight = set([item])
self.highlight = highlight
class Element(CompoundShape):
"""Base class for graph nodes and edges."""
def __init__(self, shapes):
CompoundShape.__init__(self, shapes)
def is_inside(self, x, y):
return False
def get_url(self, x, y):
return None
def get_jump(self, x, y):
return None
class Node(Element):
def __init__(self, id, x, y, w, h, shapes, url):
Element.__init__(self, shapes)
self.id = id
self.x = x
self.y = y
self.x1 = x - 0.5*w
self.y1 = y - 0.5*h
self.x2 = x + 0.5*w
self.y2 = y + 0.5*h
self.url = url
def is_inside(self, x, y):
return self.x1 <= x and x <= self.x2 and self.y1 <= y and y <= self.y2
def get_url(self, x, y):
if self.url is None:
return None
if self.is_inside(x, y):
return Url(self, self.url)
return None
def get_jump(self, x, y):
if self.is_inside(x, y):
return Jump(self, self.x, self.y)
return None
def __repr__(self):
return "<Node %s>" % self.id
def square_distance(x1, y1, x2, y2):
deltax = x2 - x1
deltay = y2 - y1
return deltax*deltax + deltay*deltay
class Edge(Element):
def __init__(self, src, dst, points, shapes):
Element.__init__(self, shapes)
self.src = src
self.dst = dst
self.points = points
RADIUS = 10
def is_inside_begin(self, x, y):
return square_distance(x, y, *self.points[0]) <= self.RADIUS*self.RADIUS
def is_inside_end(self, x, y):
return square_distance(x, y, *self.points[-1]) <= self.RADIUS*self.RADIUS
def is_inside(self, x, y):
if self.is_inside_begin(x, y):
return True
if self.is_inside_end(x, y):
return True
return False
def get_jump(self, x, y):
if self.is_inside_begin(x, y):
return Jump(self, self.dst.x, self.dst.y, highlight=set([self, self.dst]))
if self.is_inside_end(x, y):
return Jump(self, self.src.x, self.src.y, highlight=set([self, self.src]))
return None
def __repr__(self):
return "<Edge %s -> %s>" % (self.src, self.dst)
class Graph(Shape):
def __init__(self, width=1, height=1, shapes=(), nodes=(), edges=()):
Shape.__init__(self)
self.width = width
self.height = height
self.shapes = shapes
self.nodes = nodes
self.edges = edges
def get_size(self):
return self.width, self.height
def draw(self, cr, highlight_items=None):
if highlight_items is None:
highlight_items = ()
cr.set_source_rgba(0.0, 0.0, 0.0, 1.0)
cr.set_line_cap(cairo.LINE_CAP_BUTT)
cr.set_line_join(cairo.LINE_JOIN_MITER)
for shape in self.shapes:
shape.draw(cr)
for edge in self.edges:
edge.draw(cr, highlight=(edge in highlight_items))
for node in self.nodes:
node.draw(cr, highlight=(node in highlight_items))
def get_element(self, x, y):
for node in self.nodes:
if node.is_inside(x, y):
return node
for edge in self.edges:
if edge.is_inside(x, y):
return edge
def get_url(self, x, y):
for node in self.nodes:
url = node.get_url(x, y)
if url is not None:
return url
return None
def get_jump(self, x, y):
for edge in self.edges:
jump = edge.get_jump(x, y)
if jump is not None:
return jump
for node in self.nodes:
jump = node.get_jump(x, y)
if jump is not None:
return jump
return None
BOLD = 1
ITALIC = 2
UNDERLINE = 4
SUPERSCRIPT = 8
SUBSCRIPT = 16
STRIKE_THROUGH = 32
class XDotAttrParser:
"""Parser for xdot drawing attributes.
See also:
- http://www.graphviz.org/doc/info/output.html#d:xdot
"""
def __init__(self, parser, buf):
self.parser = parser
self.buf = buf
self.pos = 0
self.pen = Pen()
self.shapes = []
def __nonzero__(self):
return self.pos < len(self.buf)
def read_code(self):
pos = self.buf.find(" ", self.pos)
res = self.buf[self.pos:pos]
self.pos = pos + 1
while self.pos < len(self.buf) and self.buf[self.pos].isspace():
self.pos += 1
return res
def read_int(self):
return int(self.read_code())
def read_float(self):
return float(self.read_code())
def read_point(self):
x = self.read_float()
y = self.read_float()
return self.transform(x, y)
def read_text(self):
num = self.read_int()
pos = self.buf.find("-", self.pos) + 1
self.pos = pos + num
res = self.buf[pos:self.pos]
while self.pos < len(self.buf) and self.buf[self.pos].isspace():
self.pos += 1
return res
def read_polygon(self):
n = self.read_int()
p = []
for i in range(n):
x, y = self.read_point()
p.append((x, y))
return p
def read_color(self):
# See http://www.graphviz.org/doc/info/attrs.html#k:color
c = self.read_text()
c1 = c[:1]
if c1 == '#':
hex2float = lambda h: float(int(h, 16)/255.0)
r = hex2float(c[1:3])
g = hex2float(c[3:5])
b = hex2float(c[5:7])
try:
a = hex2float(c[7:9])
except (IndexError, ValueError):
a = 1.0
return r, g, b, a
elif c1.isdigit() or c1 == ".":
# "H,S,V" or "H S V" or "H, S, V" or any other variation
h, s, v = map(float, c.replace(",", " ").split())
r, g, b = colorsys.hsv_to_rgb(h, s, v)
a = 1.0
return r, g, b, a
elif c1 == "[":
sys.stderr.write('warning: color gradients not supported yet\n')
return None
else:
return self.lookup_color(c)
def lookup_color(self, c):
try:
color = gtk.gdk.color_parse(c)
except ValueError:
pass
else:
s = 1.0/65535.0
r = color.red*s
g = color.green*s
b = color.blue*s
a = 1.0
return r, g, b, a
try:
dummy, scheme, index = c.split('/')
r, g, b = brewer_colors[scheme][int(index)]
except (ValueError, KeyError):
pass
else:
s = 1.0/255.0
r = r*s
g = g*s
b = b*s
a = 1.0
return r, g, b, a
sys.stderr.write("warning: unknown color '%s'\n" % c)
return None
def parse(self):
s = self
while s:
op = s.read_code()
if op == "c":
color = s.read_color()
if color is not None:
self.handle_color(color, filled=False)
elif op == "C":
color = s.read_color()
if color is not None:
self.handle_color(color, filled=True)
elif op == "S":
# http://www.graphviz.org/doc/info/attrs.html#k:style
style = s.read_text()
if style.startswith("setlinewidth("):
lw = style.split("(")[1].split(")")[0]
lw = float(lw)
self.handle_linewidth(lw)
elif style in ("solid", "dashed", "dotted"):
self.handle_linestyle(style)
elif op == "F":
size = s.read_float()
name = s.read_text()
self.handle_font(size, name)
elif op == "T":
x, y = s.read_point()
j = s.read_int()
w = s.read_float()
t = s.read_text()
self.handle_text(x, y, j, w, t)
elif op == "t":
f = s.read_int()
self.handle_font_characteristics(f)
elif op == "E":
x0, y0 = s.read_point()
w = s.read_float()
h = s.read_float()
self.handle_ellipse(x0, y0, w, h, filled=True)
elif op == "e":
x0, y0 = s.read_point()
w = s.read_float()
h = s.read_float()
self.handle_ellipse(x0, y0, w, h, filled=False)
elif op == "L":
points = self.read_polygon()
self.handle_line(points)
elif op == "B":
points = self.read_polygon()
self.handle_bezier(points, filled=False)
elif op == "b":
points = self.read_polygon()
self.handle_bezier(points, filled=True)
elif op == "P":
points = self.read_polygon()
self.handle_polygon(points, filled=True)
elif op == "p":
points = self.read_polygon()
self.handle_polygon(points, filled=False)
elif op == "I":
x0, y0 = s.read_point()
w = s.read_float()
h = s.read_float()
path = s.read_text()
self.handle_image(x0, y0, w, h, path)
else:
sys.stderr.write("error: unknown xdot opcode '%s'\n" % op)
sys.exit(1)
return self.shapes
def transform(self, x, y):
return self.parser.transform(x, y)
def handle_color(self, color, filled=False):
if filled:
self.pen.fillcolor = color
else:
self.pen.color = color
def handle_linewidth(self, linewidth):
self.pen.linewidth = linewidth
def handle_linestyle(self, style):
if style == "solid":
self.pen.dash = ()
elif style == "dashed":
self.pen.dash = (6, ) # 6pt on, 6pt off
elif style == "dotted":
self.pen.dash = (2, 4) # 2pt on, 4pt off
def handle_font(self, size, name):
self.pen.fontsize = size
self.pen.fontname = name
def handle_font_characteristics(self, flags):
# TODO
if flags != 0:
sys.stderr.write("warning: font characteristics not supported yet\n" % op)
def handle_text(self, x, y, j, w, t):
self.shapes.append(TextShape(self.pen, x, y, j, w, t))
def handle_ellipse(self, x0, y0, w, h, filled=False):
if filled:
# xdot uses this to mean "draw a filled shape with an outline"
self.shapes.append(EllipseShape(self.pen, x0, y0, w, h, filled=True))
self.shapes.append(EllipseShape(self.pen, x0, y0, w, h))
def handle_image(self, x0, y0, w, h, path):
self.shapes.append(ImageShape(self.pen, x0, y0, w, h, path))
def handle_line(self, points):
self.shapes.append(LineShape(self.pen, points))
def handle_bezier(self, points, filled=False):
if filled:
# xdot uses this to mean "draw a filled shape with an outline"
self.shapes.append(BezierShape(self.pen, points, filled=True))
self.shapes.append(BezierShape(self.pen, points))
def handle_polygon(self, points, filled=False):
if filled:
# xdot uses this to mean "draw a filled shape with an outline"
self.shapes.append(PolygonShape(self.pen, points, filled=True))
self.shapes.append(PolygonShape(self.pen, points))
EOF = -1
SKIP = -2
class ParseError(Exception):
def __init__(self, msg=None, filename=None, line=None, col=None):
self.msg = msg
self.filename = filename
self.line = line
self.col = col
def __str__(self):
return ':'.join([str(part) for part in (self.filename, self.line, self.col, self.msg) if part != None])
class Scanner:
"""Stateless scanner."""
# should be overriden by derived classes
tokens = []
symbols = {}
literals = {}
ignorecase = False
def __init__(self):
flags = re.DOTALL
if self.ignorecase:
flags |= re.IGNORECASE
self.tokens_re = re.compile(
'|'.join(['(' + regexp + ')' for type, regexp, test_lit in self.tokens]),
flags
)
def next(self, buf, pos):
if pos >= len(buf):
return EOF, '', pos
mo = self.tokens_re.match(buf, pos)
if mo:
text = mo.group()
type, regexp, test_lit = self.tokens[mo.lastindex - 1]
pos = mo.end()
if test_lit:
type = self.literals.get(text, type)
return type, text, pos
else:
c = buf[pos]
return self.symbols.get(c, None), c, pos + 1
class Token:
def __init__(self, type, text, line, col):
self.type = type
self.text = text
self.line = line
self.col = col
class Lexer:
# should be overriden by derived classes
scanner = None
tabsize = 8
newline_re = re.compile(r'\r\n?|\n')
def __init__(self, buf = None, pos = 0, filename = None, fp = None):
if fp is not None:
try:
fileno = fp.fileno()
length = os.path.getsize(fp.name)
import mmap
except:
# read whole file into memory
buf = fp.read()
pos = 0
else:
# map the whole file into memory
if length:
# length must not be zero
buf = mmap.mmap(fileno, length, access = mmap.ACCESS_READ)
pos = os.lseek(fileno, 0, 1)
else:
buf = ''
pos = 0
if filename is None:
try:
filename = fp.name
except AttributeError:
filename = None
self.buf = buf
self.pos = pos
self.line = 1
self.col = 1
self.filename = filename
def next(self):
while True:
# save state
pos = self.pos
line = self.line
col = self.col
type, text, endpos = self.scanner.next(self.buf, pos)
assert pos + len(text) == endpos
self.consume(text)
type, text = self.filter(type, text)
self.pos = endpos
if type == SKIP:
continue
elif type is None:
msg = 'unexpected char '
if text >= ' ' and text <= '~':
msg += "'%s'" % text
else:
msg += "0x%X" % ord(text)
raise ParseError(msg, self.filename, line, col)
else:
break
return Token(type = type, text = text, line = line, col = col)
def consume(self, text):
# update line number
pos = 0
for mo in self.newline_re.finditer(text, pos):
self.line += 1
self.col = 1
pos = mo.end()
# update column number
while True:
tabpos = text.find('\t', pos)
if tabpos == -1:
break
self.col += tabpos - pos
self.col = ((self.col - 1)//self.tabsize + 1)*self.tabsize + 1
pos = tabpos + 1
self.col += len(text) - pos
class Parser:
def __init__(self, lexer):
self.lexer = lexer
self.lookahead = self.lexer.next()
def match(self, type):
if self.lookahead.type != type:
raise ParseError(
msg = 'unexpected token %r' % self.lookahead.text,
filename = self.lexer.filename,
line = self.lookahead.line,
col = self.lookahead.col)
def skip(self, type):
while self.lookahead.type != type:
self.consume()
def consume(self):
token = self.lookahead
self.lookahead = self.lexer.next()
return token
ID = 0
STR_ID = 1
HTML_ID = 2
EDGE_OP = 3
LSQUARE = 4
RSQUARE = 5
LCURLY = 6
RCURLY = 7
COMMA = 8
COLON = 9
SEMI = 10
EQUAL = 11
PLUS = 12
STRICT = 13
GRAPH = 14
DIGRAPH = 15
NODE = 16
EDGE = 17
SUBGRAPH = 18
class DotScanner(Scanner):
# token regular expression table
tokens = [
# whitespace and comments
(SKIP,
r'[ \t\f\r\n\v]+|'
r'//[^\r\n]*|'
r'/\*.*?\*/|'
r'#[^\r\n]*',
False),
# Alphanumeric IDs
(ID, r'[a-zA-Z_\x80-\xff][a-zA-Z0-9_\x80-\xff]*', True),
# Numeric IDs
(ID, r'-?(?:\.[0-9]+|[0-9]+(?:\.[0-9]*)?)', False),
# String IDs
(STR_ID, r'"[^"\\]*(?:\\.[^"\\]*)*"', False),
# HTML IDs
(HTML_ID, r'<[^<>]*(?:<[^<>]*>[^<>]*)*>', False),
# Edge operators
(EDGE_OP, r'-[>-]', False),
]
# symbol table
symbols = {
'[': LSQUARE,
']': RSQUARE,
'{': LCURLY,
'}': RCURLY,
',': COMMA,
':': COLON,
';': SEMI,
'=': EQUAL,
'+': PLUS,
}
# literal table
literals = {
'strict': STRICT,
'graph': GRAPH,
'digraph': DIGRAPH,
'node': NODE,
'edge': EDGE,
'subgraph': SUBGRAPH,
}
ignorecase = True
class DotLexer(Lexer):
scanner = DotScanner()
def filter(self, type, text):
# TODO: handle charset
if type == STR_ID:
text = text[1:-1]
# line continuations
text = text.replace('\\\r\n', '')
text = text.replace('\\\r', '')
text = text.replace('\\\n', '')
# quotes
text = text.replace('\\"', '"')
# layout engines recognize other escape codes (many non-standard)
# but we don't translate them here
type = ID
elif type == HTML_ID:
text = text[1:-1]
type = ID
return type, text
class DotParser(Parser):
def __init__(self, lexer):
Parser.__init__(self, lexer)
self.graph_attrs = {}
self.node_attrs = {}
self.edge_attrs = {}
def parse(self):
self.parse_graph()
self.match(EOF)
def parse_graph(self):
if self.lookahead.type == STRICT:
self.consume()
self.skip(LCURLY)
self.consume()
while self.lookahead.type != RCURLY:
self.parse_stmt()
self.consume()
def parse_subgraph(self):
id = None
if self.lookahead.type == SUBGRAPH:
self.consume()
if self.lookahead.type == ID:
id = self.lookahead.text
self.consume()
if self.lookahead.type == LCURLY:
self.consume()
while self.lookahead.type != RCURLY:
self.parse_stmt()
self.consume()
return id
def parse_stmt(self):
if self.lookahead.type == GRAPH:
self.consume()
attrs = self.parse_attrs()
self.graph_attrs.update(attrs)
self.handle_graph(attrs)
elif self.lookahead.type == NODE:
self.consume()
self.node_attrs.update(self.parse_attrs())
elif self.lookahead.type == EDGE:
self.consume()
self.edge_attrs.update(self.parse_attrs())
elif self.lookahead.type in (SUBGRAPH, LCURLY):
self.parse_subgraph()
else:
id = self.parse_node_id()
if self.lookahead.type == EDGE_OP:
self.consume()
node_ids = [id, self.parse_node_id()]
while self.lookahead.type == EDGE_OP:
node_ids.append(self.parse_node_id())
attrs = self.parse_attrs()
for i in range(0, len(node_ids) - 1):
self.handle_edge(node_ids[i], node_ids[i + 1], attrs)
elif self.lookahead.type == EQUAL:
self.consume()
self.parse_id()
else:
attrs = self.parse_attrs()
self.handle_node(id, attrs)
if self.lookahead.type == SEMI:
self.consume()
def parse_attrs(self):
attrs = {}
while self.lookahead.type == LSQUARE:
self.consume()
while self.lookahead.type != RSQUARE:
name, value = self.parse_attr()
attrs[name] = value
if self.lookahead.type == COMMA:
self.consume()
self.consume()
return attrs
def parse_attr(self):
name = self.parse_id()
if self.lookahead.type == EQUAL:
self.consume()
value = self.parse_id()
else:
value = 'true'
return name, value
def parse_node_id(self):
node_id = self.parse_id()
if self.lookahead.type == COLON:
self.consume()
port = self.parse_id()
if self.lookahead.type == COLON:
self.consume()
compass_pt = self.parse_id()
else:
compass_pt = None
else:
port = None
compass_pt = None
# XXX: we don't really care about port and compass point values when parsing xdot
return node_id
def parse_id(self):
self.match(ID)
id = self.lookahead.text
self.consume()
return id
def handle_graph(self, attrs):
pass
def handle_node(self, id, attrs):
pass
def handle_edge(self, src_id, dst_id, attrs):
pass
class XDotParser(DotParser):
XDOTVERSION = '1.6'
def __init__(self, xdotcode):
lexer = DotLexer(buf = xdotcode)
DotParser.__init__(self, lexer)
self.nodes = []
self.edges = []
self.shapes = []
self.node_by_name = {}
self.top_graph = True
def handle_graph(self, attrs):
if self.top_graph:
# Check xdot version
try:
xdotversion = attrs['xdotversion']
except KeyError:
pass
else:
if float(xdotversion) > float(self.XDOTVERSION):
sys.stderr.write('warning: xdot version %s, but supported is %s\n' % (xdotversion, self.XDOTVERSION))
# Parse bounding box
try:
bb = attrs['bb']
except KeyError:
return
if bb:
xmin, ymin, xmax, ymax = map(float, bb.split(","))
self.xoffset = -xmin
self.yoffset = -ymax
self.xscale = 1.0
self.yscale = -1.0
# FIXME: scale from points to pixels
self.width = max(xmax - xmin, 1)
self.height = max(ymax - ymin, 1)
self.top_graph = False
for attr in ("_draw_", "_ldraw_", "_hdraw_", "_tdraw_", "_hldraw_", "_tldraw_"):
if attr in attrs:
parser = XDotAttrParser(self, attrs[attr])
self.shapes.extend(parser.parse())
def handle_node(self, id, attrs):
try:
pos = attrs['pos']
except KeyError:
return
x, y = self.parse_node_pos(pos)
w = float(attrs.get('width', 0))*72
h = float(attrs.get('height', 0))*72
shapes = []
for attr in ("_draw_", "_ldraw_"):
if attr in attrs:
parser = XDotAttrParser(self, attrs[attr])
shapes.extend(parser.parse())
url = attrs.get('URL', None)
node = Node(id, x, y, w, h, shapes, url)
self.node_by_name[id] = node
if shapes:
self.nodes.append(node)
def handle_edge(self, src_id, dst_id, attrs):
try:
pos = attrs['pos']
except KeyError:
return
points = self.parse_edge_pos(pos)
shapes = []
for attr in ("_draw_", "_ldraw_", "_hdraw_", "_tdraw_", "_hldraw_", "_tldraw_"):
if attr in attrs:
parser = XDotAttrParser(self, attrs[attr])
shapes.extend(parser.parse())
if shapes:
src = self.node_by_name[src_id]
dst = self.node_by_name[dst_id]
self.edges.append(Edge(src, dst, points, shapes))
def parse(self):
DotParser.parse(self)
return Graph(self.width, self.height, self.shapes, self.nodes, self.edges)
def parse_node_pos(self, pos):
x, y = pos.split(",")
return self.transform(float(x), float(y))
def parse_edge_pos(self, pos):
points = []
for entry in pos.split(' '):
fields = entry.split(',')
try:
x, y = fields
except ValueError:
# TODO: handle start/end points
continue
else:
points.append(self.transform(float(x), float(y)))
return points
def transform(self, x, y):
# XXX: this is not the right place for this code
x = (x + self.xoffset)*self.xscale
y = (y + self.yoffset)*self.yscale
return x, y
class Animation(object):
step = 0.03 # seconds
def __init__(self, dot_widget):
self.dot_widget = dot_widget
self.timeout_id = None
def start(self):
self.timeout_id = gobject.timeout_add(int(self.step * 1000), self.tick)
def stop(self):
self.dot_widget.animation = NoAnimation(self.dot_widget)
if self.timeout_id is not None:
gobject.source_remove(self.timeout_id)
self.timeout_id = None
def tick(self):
self.stop()
class NoAnimation(Animation):
def start(self):
pass
def stop(self):
pass
class LinearAnimation(Animation):
duration = 0.6
def start(self):
self.started = time.time()
Animation.start(self)
def tick(self):
t = (time.time() - self.started) / self.duration
self.animate(max(0, min(t, 1)))
return (t < 1)
def animate(self, t):
pass
class MoveToAnimation(LinearAnimation):
def __init__(self, dot_widget, target_x, target_y):
Animation.__init__(self, dot_widget)
self.source_x = dot_widget.x
self.source_y = dot_widget.y
self.target_x = target_x
self.target_y = target_y
def animate(self, t):
sx, sy = self.source_x, self.source_y
tx, ty = self.target_x, self.target_y
self.dot_widget.x = tx * t + sx * (1-t)
self.dot_widget.y = ty * t + sy * (1-t)
self.dot_widget.queue_draw()
class ZoomToAnimation(MoveToAnimation):
def __init__(self, dot_widget, target_x, target_y):
MoveToAnimation.__init__(self, dot_widget, target_x, target_y)
self.source_zoom = dot_widget.zoom_ratio
self.target_zoom = self.source_zoom
self.extra_zoom = 0
middle_zoom = 0.5 * (self.source_zoom + self.target_zoom)
distance = math.hypot(self.source_x - self.target_x,
self.source_y - self.target_y)
rect = self.dot_widget.get_allocation()
visible = min(rect.width, rect.height) / self.dot_widget.zoom_ratio
visible *= 0.9
if distance > 0:
desired_middle_zoom = visible / distance
self.extra_zoom = min(0, 4 * (desired_middle_zoom - middle_zoom))
def animate(self, t):
a, b, c = self.source_zoom, self.extra_zoom, self.target_zoom
self.dot_widget.zoom_ratio = c*t + b*t*(1-t) + a*(1-t)
self.dot_widget.zoom_to_fit_on_resize = False
MoveToAnimation.animate(self, t)
class DragAction(object):
def __init__(self, dot_widget):
self.dot_widget = dot_widget
def on_button_press(self, event):
self.startmousex = self.prevmousex = event.x
self.startmousey = self.prevmousey = event.y
self.start()
def on_motion_notify(self, event):
if event.is_hint:
x, y, state = event.window.get_pointer()
else:
x, y, state = event.x, event.y, event.state
deltax = self.prevmousex - x
deltay = self.prevmousey - y
self.drag(deltax, deltay)
self.prevmousex = x
self.prevmousey = y
def on_button_release(self, event):
self.stopmousex = event.x
self.stopmousey = event.y
self.stop()
def draw(self, cr):
pass
def start(self):
pass
def drag(self, deltax, deltay):
pass
def stop(self):
pass
def abort(self):
pass
class NullAction(DragAction):
def on_motion_notify(self, event):
if event.is_hint:
x, y, state = event.window.get_pointer()
else:
x, y, state = event.x, event.y, event.state
dot_widget = self.dot_widget
item = dot_widget.get_url(x, y)
if item is None:
item = dot_widget.get_jump(x, y)
if item is not None:
dot_widget.window.set_cursor(gtk.gdk.Cursor(gtk.gdk.HAND2))
dot_widget.set_highlight(item.highlight)
else:
dot_widget.window.set_cursor(gtk.gdk.Cursor(gtk.gdk.ARROW))
dot_widget.set_highlight(None)
class PanAction(DragAction):
def start(self):
self.dot_widget.window.set_cursor(gtk.gdk.Cursor(gtk.gdk.FLEUR))
def drag(self, deltax, deltay):
self.dot_widget.x += deltax / self.dot_widget.zoom_ratio
self.dot_widget.y += deltay / self.dot_widget.zoom_ratio
self.dot_widget.queue_draw()
def stop(self):
self.dot_widget.window.set_cursor(gtk.gdk.Cursor(gtk.gdk.ARROW))
abort = stop
class ZoomAction(DragAction):
def drag(self, deltax, deltay):
self.dot_widget.zoom_ratio *= 1.005 ** (deltax + deltay)
self.dot_widget.zoom_to_fit_on_resize = False
self.dot_widget.queue_draw()
def stop(self):
self.dot_widget.queue_draw()
class ZoomAreaAction(DragAction):
def drag(self, deltax, deltay):
self.dot_widget.queue_draw()
def draw(self, cr):
cr.save()
cr.set_source_rgba(.5, .5, 1.0, 0.25)
cr.rectangle(self.startmousex, self.startmousey,
self.prevmousex - self.startmousex,
self.prevmousey - self.startmousey)
cr.fill()
cr.set_source_rgba(.5, .5, 1.0, 1.0)
cr.set_line_width(1)
cr.rectangle(self.startmousex - .5, self.startmousey - .5,
self.prevmousex - self.startmousex + 1,
self.prevmousey - self.startmousey + 1)
cr.stroke()
cr.restore()
def stop(self):
x1, y1 = self.dot_widget.window2graph(self.startmousex,
self.startmousey)
x2, y2 = self.dot_widget.window2graph(self.stopmousex,
self.stopmousey)
self.dot_widget.zoom_to_area(x1, y1, x2, y2)
def abort(self):
self.dot_widget.queue_draw()
class DotWidget(gtk.DrawingArea):
"""PyGTK widget that draws dot graphs."""
__gsignals__ = {
'expose-event': 'override',
'clicked' : (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, (gobject.TYPE_STRING, gtk.gdk.Event))
}
filter = 'dot'
def __init__(self):
gtk.DrawingArea.__init__(self)
self.graph = Graph()
self.openfilename = None
self.set_flags(gtk.CAN_FOCUS)
self.add_events(gtk.gdk.BUTTON_PRESS_MASK | gtk.gdk.BUTTON_RELEASE_MASK)
self.connect("button-press-event", self.on_area_button_press)
self.connect("button-release-event", self.on_area_button_release)
self.add_events(gtk.gdk.POINTER_MOTION_MASK | gtk.gdk.POINTER_MOTION_HINT_MASK | gtk.gdk.BUTTON_RELEASE_MASK)
self.connect("motion-notify-event", self.on_area_motion_notify)
self.connect("scroll-event", self.on_area_scroll_event)
self.connect("size-allocate", self.on_area_size_allocate)
self.connect('key-press-event', self.on_key_press_event)
self.last_mtime = None
gobject.timeout_add(1000, self.update)
self.x, self.y = 0.0, 0.0
self.zoom_ratio = 1.0
self.zoom_to_fit_on_resize = False
self.animation = NoAnimation(self)
self.drag_action = NullAction(self)
self.presstime = None
self.highlight = None
def set_filter(self, filter):
self.filter = filter
def run_filter(self, dotcode):
if not self.filter:
return dotcode
p = subprocess.Popen(
[self.filter, '-Txdot'],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=False,
universal_newlines=True
)
xdotcode, error = p.communicate(dotcode)
sys.stderr.write(error)
if p.returncode != 0:
dialog = gtk.MessageDialog(type=gtk.MESSAGE_ERROR,
message_format=error,
buttons=gtk.BUTTONS_OK)
dialog.set_title('Dot Viewer')
dialog.run()
dialog.destroy()
return None
return xdotcode
def set_dotcode(self, dotcode, filename=None):
self.openfilename = None
if isinstance(dotcode, unicode):
dotcode = dotcode.encode('utf8')
xdotcode = self.run_filter(dotcode)
if xdotcode is None:
return False
try:
self.set_xdotcode(xdotcode)
except ParseError as ex:
dialog = gtk.MessageDialog(type=gtk.MESSAGE_ERROR,
message_format=str(ex),
buttons=gtk.BUTTONS_OK)
dialog.set_title('Dot Viewer')
dialog.run()
dialog.destroy()
return False
else:
if filename is None:
self.last_mtime = None
else:
self.last_mtime = os.stat(filename).st_mtime
self.openfilename = filename
return True
def set_xdotcode(self, xdotcode):
parser = XDotParser(xdotcode)
self.graph = parser.parse()
self.zoom_image(self.zoom_ratio, center=True)
def reload(self):
if self.openfilename is not None:
try:
fp = file(self.openfilename, 'rt')
self.set_dotcode(fp.read(), self.openfilename)
fp.close()
except IOError:
pass
def update(self):
if self.openfilename is not None:
current_mtime = os.stat(self.openfilename).st_mtime
if current_mtime != self.last_mtime:
self.last_mtime = current_mtime
self.reload()
return True
def do_expose_event(self, event):
cr = self.window.cairo_create()
# set a clip region for the expose event
cr.rectangle(
event.area.x, event.area.y,
event.area.width, event.area.height
)
cr.clip()
cr.set_source_rgba(1.0, 1.0, 1.0, 1.0)
cr.paint()
cr.save()
rect = self.get_allocation()
cr.translate(0.5*rect.width, 0.5*rect.height)
cr.scale(self.zoom_ratio, self.zoom_ratio)
cr.translate(-self.x, -self.y)
self.graph.draw(cr, highlight_items=self.highlight)
cr.restore()
self.drag_action.draw(cr)
return False
def get_current_pos(self):
return self.x, self.y
def set_current_pos(self, x, y):
self.x = x
self.y = y
self.queue_draw()
def set_highlight(self, items):
if self.highlight != items:
self.highlight = items
self.queue_draw()
def zoom_image(self, zoom_ratio, center=False, pos=None):
# Constrain zoom ratio to a sane range to prevent numeric instability.
zoom_ratio = min(zoom_ratio, 1E4)
zoom_ratio = max(zoom_ratio, 1E-6)
if center:
self.x = self.graph.width/2
self.y = self.graph.height/2
elif pos is not None:
rect = self.get_allocation()
x, y = pos
x -= 0.5*rect.width
y -= 0.5*rect.height
self.x += x / self.zoom_ratio - x / zoom_ratio
self.y += y / self.zoom_ratio - y / zoom_ratio
self.zoom_ratio = zoom_ratio
self.zoom_to_fit_on_resize = False
self.queue_draw()
def zoom_to_area(self, x1, y1, x2, y2):
rect = self.get_allocation()
width = abs(x1 - x2)
height = abs(y1 - y2)
if width == 0 and height == 0:
self.zoom_ratio *= self.ZOOM_INCREMENT
else:
self.zoom_ratio = min(
float(rect.width)/float(width),
float(rect.height)/float(height)
)
self.zoom_to_fit_on_resize = False
self.x = (x1 + x2) / 2
self.y = (y1 + y2) / 2
self.queue_draw()
def zoom_to_fit(self):
rect = self.get_allocation()
rect.x += self.ZOOM_TO_FIT_MARGIN
rect.y += self.ZOOM_TO_FIT_MARGIN
rect.width -= 2 * self.ZOOM_TO_FIT_MARGIN
rect.height -= 2 * self.ZOOM_TO_FIT_MARGIN
zoom_ratio = min(
float(rect.width)/float(self.graph.width),
float(rect.height)/float(self.graph.height)
)
self.zoom_image(zoom_ratio, center=True)
self.zoom_to_fit_on_resize = True
ZOOM_INCREMENT = 1.25
ZOOM_TO_FIT_MARGIN = 12
def on_zoom_in(self, action):
self.zoom_image(self.zoom_ratio * self.ZOOM_INCREMENT)
def on_zoom_out(self, action):
self.zoom_image(self.zoom_ratio / self.ZOOM_INCREMENT)
def on_zoom_fit(self, action):
self.zoom_to_fit()
def on_zoom_100(self, action):
self.zoom_image(1.0)
POS_INCREMENT = 100
def on_key_press_event(self, widget, event):
if event.keyval == gtk.keysyms.Left:
self.x -= self.POS_INCREMENT/self.zoom_ratio
self.queue_draw()
return True
if event.keyval == gtk.keysyms.Right:
self.x += self.POS_INCREMENT/self.zoom_ratio
self.queue_draw()
return True
if event.keyval == gtk.keysyms.Up:
self.y -= self.POS_INCREMENT/self.zoom_ratio
self.queue_draw()
return True
if event.keyval == gtk.keysyms.Down:
self.y += self.POS_INCREMENT/self.zoom_ratio
self.queue_draw()
return True
if event.keyval in (gtk.keysyms.Page_Up,
gtk.keysyms.plus,
gtk.keysyms.equal,
gtk.keysyms.KP_Add):
self.zoom_image(self.zoom_ratio * self.ZOOM_INCREMENT)
self.queue_draw()
return True
if event.keyval in (gtk.keysyms.Page_Down,
gtk.keysyms.minus,
gtk.keysyms.KP_Subtract):
self.zoom_image(self.zoom_ratio / self.ZOOM_INCREMENT)
self.queue_draw()
return True
if event.keyval == gtk.keysyms.Escape:
self.drag_action.abort()
self.drag_action = NullAction(self)
return True
if event.keyval == gtk.keysyms.r:
self.reload()
return True
if event.keyval == gtk.keysyms.f:
win = widget.get_toplevel()
find_toolitem = win.uimanager.get_widget('/ToolBar/Find')
textentry = find_toolitem.get_children()
win.set_focus(textentry[0])
return True
if event.keyval == gtk.keysyms.q:
gtk.main_quit()
return True
if event.keyval == gtk.keysyms.p:
self.on_print()
return True
return False
print_settings = None
def on_print(self, action=None):
print_op = gtk.PrintOperation()
if self.print_settings != None:
print_op.set_print_settings(self.print_settings)
print_op.connect("begin_print", self.begin_print)
print_op.connect("draw_page", self.draw_page)
res = print_op.run(gtk.PRINT_OPERATION_ACTION_PRINT_DIALOG, self.parent.parent)
if res == gtk.PRINT_OPERATION_RESULT_APPLY:
print_settings = print_op.get_print_settings()
def begin_print(self, operation, context):
operation.set_n_pages(1)
return True
def draw_page(self, operation, context, page_nr):
cr = context.get_cairo_context()
rect = self.get_allocation()
cr.translate(0.5*rect.width, 0.5*rect.height)
cr.scale(self.zoom_ratio, self.zoom_ratio)
cr.translate(-self.x, -self.y)
self.graph.draw(cr, highlight_items=self.highlight)
def get_drag_action(self, event):
state = event.state
if event.button in (1, 2): # left or middle button
if state & gtk.gdk.CONTROL_MASK:
return ZoomAction
elif state & gtk.gdk.SHIFT_MASK:
return ZoomAreaAction
else:
return PanAction
return NullAction
def on_area_button_press(self, area, event):
self.animation.stop()
self.drag_action.abort()
action_type = self.get_drag_action(event)
self.drag_action = action_type(self)
self.drag_action.on_button_press(event)
self.presstime = time.time()
self.pressx = event.x
self.pressy = event.y
return False
def is_click(self, event, click_fuzz=4, click_timeout=1.0):
assert event.type == gtk.gdk.BUTTON_RELEASE
if self.presstime is None:
# got a button release without seeing the press?
return False
# XXX instead of doing this complicated logic, shouldn't we listen
# for gtk's clicked event instead?
deltax = self.pressx - event.x
deltay = self.pressy - event.y
return (time.time() < self.presstime + click_timeout
and math.hypot(deltax, deltay) < click_fuzz)
def on_click(self, element, event):
"""Override this method in subclass to process
click events. Note that element can be None
(click on empty space)."""
return False
def on_area_button_release(self, area, event):
self.drag_action.on_button_release(event)
self.drag_action = NullAction(self)
x, y = int(event.x), int(event.y)
if self.is_click(event):
el = self.get_element(x, y)
if self.on_click(el, event):
return True
if event.button == 1:
url = self.get_url(x, y)
if url is not None:
self.emit('clicked', unicode(url.url), event)
else:
jump = self.get_jump(x, y)
if jump is not None:
self.animate_to(jump.x, jump.y)
return True
if event.button == 1 or event.button == 2:
return True
return False
def on_area_scroll_event(self, area, event):
if event.direction == gtk.gdk.SCROLL_UP:
self.zoom_image(self.zoom_ratio * self.ZOOM_INCREMENT,
pos=(event.x, event.y))
return True
if event.direction == gtk.gdk.SCROLL_DOWN:
self.zoom_image(self.zoom_ratio / self.ZOOM_INCREMENT,
pos=(event.x, event.y))
return True
return False
def on_area_motion_notify(self, area, event):
self.drag_action.on_motion_notify(event)
return True
def on_area_size_allocate(self, area, allocation):
if self.zoom_to_fit_on_resize:
self.zoom_to_fit()
def animate_to(self, x, y):
self.animation = ZoomToAnimation(self, x, y)
self.animation.start()
def window2graph(self, x, y):
rect = self.get_allocation()
x -= 0.5*rect.width
y -= 0.5*rect.height
x /= self.zoom_ratio
y /= self.zoom_ratio
x += self.x
y += self.y
return x, y
def get_element(self, x, y):
x, y = self.window2graph(x, y)
return self.graph.get_element(x, y)
def get_url(self, x, y):
x, y = self.window2graph(x, y)
return self.graph.get_url(x, y)
def get_jump(self, x, y):
x, y = self.window2graph(x, y)
return self.graph.get_jump(x, y)
class FindMenuToolAction(gtk.Action):
__gtype_name__ = "FindMenuToolAction"
def __init__(self, *args, **kw):
gtk.Action.__init__(self, *args, **kw)
self.set_tool_item_type(gtk.ToolItem)
class DotWindow(gtk.Window):
ui = '''
<ui>
<toolbar name="ToolBar">
<toolitem action="Open"/>
<toolitem action="Reload"/>
<toolitem action="Print"/>
<separator/>
<toolitem action="ZoomIn"/>
<toolitem action="ZoomOut"/>
<toolitem action="ZoomFit"/>
<toolitem action="Zoom100"/>
<separator/>
<toolitem name="Find" action="Find"/>
</toolbar>
</ui>
'''
base_title = 'Dot Viewer'
def __init__(self, widget=None):
gtk.Window.__init__(self)
self.graph = Graph()
window = self
window.set_title(self.base_title)
window.set_default_size(512, 512)
vbox = gtk.VBox()
window.add(vbox)
self.widget = widget or DotWidget()
# Create a UIManager instance
uimanager = self.uimanager = gtk.UIManager()
# Add the accelerator group to the toplevel window
accelgroup = uimanager.get_accel_group()
window.add_accel_group(accelgroup)
# Create an ActionGroup
actiongroup = gtk.ActionGroup('Actions')
self.actiongroup = actiongroup
# Create actions
actiongroup.add_actions((
('Open', gtk.STOCK_OPEN, None, None, None, self.on_open),
('Reload', gtk.STOCK_REFRESH, None, None, None, self.on_reload),
('Print', gtk.STOCK_PRINT, None, None, "Prints the currently visible part of the graph", self.widget.on_print),
('ZoomIn', gtk.STOCK_ZOOM_IN, None, None, None, self.widget.on_zoom_in),
('ZoomOut', gtk.STOCK_ZOOM_OUT, None, None, None, self.widget.on_zoom_out),
('ZoomFit', gtk.STOCK_ZOOM_FIT, None, None, None, self.widget.on_zoom_fit),
('Zoom100', gtk.STOCK_ZOOM_100, None, None, None, self.widget.on_zoom_100),
))
find_action = FindMenuToolAction("Find", None,
"Find a node by name", None)
actiongroup.add_action(find_action)
# Add the actiongroup to the uimanager
uimanager.insert_action_group(actiongroup, 0)
# Add a UI descrption
uimanager.add_ui_from_string(self.ui)
# Create a Toolbar
toolbar = uimanager.get_widget('/ToolBar')
vbox.pack_start(toolbar, False)
vbox.pack_start(self.widget)
self.last_open_dir = "."
self.set_focus(self.widget)
# Add Find text search
find_toolitem = uimanager.get_widget('/ToolBar/Find')
self.textentry = gtk.Entry(max=20)
self.textentry.set_icon_from_stock(0, gtk.STOCK_FIND)
find_toolitem.add(self.textentry)
self.textentry.set_activates_default(True)
self.textentry.connect ("activate", self.textentry_activate, self.textentry);
self.textentry.connect ("changed", self.textentry_changed, self.textentry);
self.show_all()
def find_text(self, entry_text):
found_items = []
dot_widget = self.widget
regexp = re.compile(entry_text)
for node in dot_widget.graph.nodes:
if node.search_text(regexp):
found_items.append(node)
return found_items
def textentry_changed(self, widget, entry):
entry_text = entry.get_text()
dot_widget = self.widget
if not entry_text:
dot_widget.set_highlight(None)
return
found_items = self.find_text(entry_text)
dot_widget.set_highlight(found_items)
def textentry_activate(self, widget, entry):
entry_text = entry.get_text()
dot_widget = self.widget
if not entry_text:
dot_widget.set_highlight(None)
return;
found_items = self.find_text(entry_text)
dot_widget.set_highlight(found_items)
if(len(found_items) == 1):
dot_widget.animate_to(found_items[0].x, found_items[0].y)
def set_filter(self, filter):
self.widget.set_filter(filter)
def set_dotcode(self, dotcode, filename=None):
if self.widget.set_dotcode(dotcode, filename):
self.update_title(filename)
self.widget.zoom_to_fit()
def set_xdotcode(self, xdotcode, filename=None):
if self.widget.set_xdotcode(xdotcode):
self.update_title(filename)
self.widget.zoom_to_fit()
def update_title(self, filename=None):
if filename is None:
self.set_title(self.base_title)
else:
self.set_title(os.path.basename(filename) + ' - ' + self.base_title)
def open_file(self, filename):
try:
fp = file(filename, 'rt')
self.set_dotcode(fp.read(), filename)
fp.close()
except IOError as ex:
dlg = gtk.MessageDialog(type=gtk.MESSAGE_ERROR,
message_format=str(ex),
buttons=gtk.BUTTONS_OK)
dlg.set_title(self.base_title)
dlg.run()
dlg.destroy()
def on_open(self, action):
chooser = gtk.FileChooserDialog(title="Open dot File",
action=gtk.FILE_CHOOSER_ACTION_OPEN,
buttons=(gtk.STOCK_CANCEL,
gtk.RESPONSE_CANCEL,
gtk.STOCK_OPEN,
gtk.RESPONSE_OK))
chooser.set_default_response(gtk.RESPONSE_OK)
chooser.set_current_folder(self.last_open_dir)
filter = gtk.FileFilter()
filter.set_name("Graphviz dot files")
filter.add_pattern("*.dot")
chooser.add_filter(filter)
filter = gtk.FileFilter()
filter.set_name("All files")
filter.add_pattern("*")
chooser.add_filter(filter)
if chooser.run() == gtk.RESPONSE_OK:
filename = chooser.get_filename()
self.last_open_dir = chooser.get_current_folder()
chooser.destroy()
self.open_file(filename)
else:
chooser.destroy()
def on_reload(self, action):
self.widget.reload()
class OptionParser(optparse.OptionParser):
def format_epilog(self, formatter):
# Prevent stripping the newlines in epilog message
# http://stackoverflow.com/questions/1857346/python-optparse-how-to-include-additional-info-in-usage-output
return self.epilog
def main():
parser = OptionParser(
usage='\n\t%prog [file]',
epilog='''
Shortcuts:
Up, Down, Left, Right scroll
PageUp, +, = zoom in
PageDown, - zoom out
R reload dot file
F find
Q quit
P print
Escape halt animation
Ctrl-drag zoom in/out
Shift-drag zooms an area
'''
)
parser.add_option(
'-f', '--filter',
type='choice', choices=('dot', 'neato', 'twopi', 'circo', 'fdp'),
dest='filter', default='dot',
help='graphviz filter: dot, neato, twopi, circo, or fdp [default: %default]')
parser.add_option(
'-n', '--no-filter',
action='store_const', const=None, dest='filter',
help='assume input is already filtered into xdot format (use e.g. dot -Txdot)')
(options, args) = parser.parse_args(sys.argv[1:])
if len(args) > 1:
parser.error('incorrect number of arguments')
win = DotWindow()
win.connect('destroy', gtk.main_quit)
win.set_filter(options.filter)
if len(args) == 0:
if not sys.stdin.isatty():
win.set_dotcode(sys.stdin.read())
else:
if args[0] == '-':
win.set_dotcode(sys.stdin.read())
else:
win.open_file(args[0])
gtk.main()
# Apache-Style Software License for ColorBrewer software and ColorBrewer Color
# Schemes, Version 1.1
#
# Copyright (c) 2002 Cynthia Brewer, Mark Harrower, and The Pennsylvania State
# University. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions as source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. The end-user documentation included with the redistribution, if any,
# must include the following acknowledgment:
#
# This product includes color specifications and designs developed by
# Cynthia Brewer (http://colorbrewer.org/).
#
# Alternately, this acknowledgment may appear in the software itself, if and
# wherever such third-party acknowledgments normally appear.
#
# 3. The name "ColorBrewer" must not be used to endorse or promote products
# derived from this software without prior written permission. For written
# permission, please contact Cynthia Brewer at cbrewer@psu.edu.
#
# 4. Products derived from this software may not be called "ColorBrewer",
# nor may "ColorBrewer" appear in their name, without prior written
# permission of Cynthia Brewer.
#
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESSED OR IMPLIED WARRANTIES,
# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
# FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL CYNTHIA
# BREWER, MARK HARROWER, OR THE PENNSYLVANIA STATE UNIVERSITY BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
brewer_colors = {
'accent3': [(127, 201, 127), (190, 174, 212), (253, 192, 134)],
'accent4': [(127, 201, 127), (190, 174, 212), (253, 192, 134), (255, 255, 153)],
'accent5': [(127, 201, 127), (190, 174, 212), (253, 192, 134), (255, 255, 153), (56, 108, 176)],
'accent6': [(127, 201, 127), (190, 174, 212), (253, 192, 134), (255, 255, 153), (56, 108, 176), (240, 2, 127)],
'accent7': [(127, 201, 127), (190, 174, 212), (253, 192, 134), (255, 255, 153), (56, 108, 176), (240, 2, 127), (191, 91, 23)],
'accent8': [(127, 201, 127), (190, 174, 212), (253, 192, 134), (255, 255, 153), (56, 108, 176), (240, 2, 127), (191, 91, 23), (102, 102, 102)],
'blues3': [(222, 235, 247), (158, 202, 225), (49, 130, 189)],
'blues4': [(239, 243, 255), (189, 215, 231), (107, 174, 214), (33, 113, 181)],
'blues5': [(239, 243, 255), (189, 215, 231), (107, 174, 214), (49, 130, 189), (8, 81, 156)],
'blues6': [(239, 243, 255), (198, 219, 239), (158, 202, 225), (107, 174, 214), (49, 130, 189), (8, 81, 156)],
'blues7': [(239, 243, 255), (198, 219, 239), (158, 202, 225), (107, 174, 214), (66, 146, 198), (33, 113, 181), (8, 69, 148)],
'blues8': [(247, 251, 255), (222, 235, 247), (198, 219, 239), (158, 202, 225), (107, 174, 214), (66, 146, 198), (33, 113, 181), (8, 69, 148)],
'blues9': [(247, 251, 255), (222, 235, 247), (198, 219, 239), (158, 202, 225), (107, 174, 214), (66, 146, 198), (33, 113, 181), (8, 81, 156), (8, 48, 107)],
'brbg10': [(84, 48, 5), (0, 60, 48), (140, 81, 10), (191, 129, 45), (223, 194, 125), (246, 232, 195), (199, 234, 229), (128, 205, 193), (53, 151, 143), (1, 102, 94)],
'brbg11': [(84, 48, 5), (1, 102, 94), (0, 60, 48), (140, 81, 10), (191, 129, 45), (223, 194, 125), (246, 232, 195), (245, 245, 245), (199, 234, 229), (128, 205, 193), (53, 151, 143)],
'brbg3': [(216, 179, 101), (245, 245, 245), (90, 180, 172)],
'brbg4': [(166, 97, 26), (223, 194, 125), (128, 205, 193), (1, 133, 113)],
'brbg5': [(166, 97, 26), (223, 194, 125), (245, 245, 245), (128, 205, 193), (1, 133, 113)],
'brbg6': [(140, 81, 10), (216, 179, 101), (246, 232, 195), (199, 234, 229), (90, 180, 172), (1, 102, 94)],
'brbg7': [(140, 81, 10), (216, 179, 101), (246, 232, 195), (245, 245, 245), (199, 234, 229), (90, 180, 172), (1, 102, 94)],
'brbg8': [(140, 81, 10), (191, 129, 45), (223, 194, 125), (246, 232, 195), (199, 234, 229), (128, 205, 193), (53, 151, 143), (1, 102, 94)],
'brbg9': [(140, 81, 10), (191, 129, 45), (223, 194, 125), (246, 232, 195), (245, 245, 245), (199, 234, 229), (128, 205, 193), (53, 151, 143), (1, 102, 94)],
'bugn3': [(229, 245, 249), (153, 216, 201), (44, 162, 95)],
'bugn4': [(237, 248, 251), (178, 226, 226), (102, 194, 164), (35, 139, 69)],
'bugn5': [(237, 248, 251), (178, 226, 226), (102, 194, 164), (44, 162, 95), (0, 109, 44)],
'bugn6': [(237, 248, 251), (204, 236, 230), (153, 216, 201), (102, 194, 164), (44, 162, 95), (0, 109, 44)],
'bugn7': [(237, 248, 251), (204, 236, 230), (153, 216, 201), (102, 194, 164), (65, 174, 118), (35, 139, 69), (0, 88, 36)],
'bugn8': [(247, 252, 253), (229, 245, 249), (204, 236, 230), (153, 216, 201), (102, 194, 164), (65, 174, 118), (35, 139, 69), (0, 88, 36)],
'bugn9': [(247, 252, 253), (229, 245, 249), (204, 236, 230), (153, 216, 201), (102, 194, 164), (65, 174, 118), (35, 139, 69), (0, 109, 44), (0, 68, 27)],
'bupu3': [(224, 236, 244), (158, 188, 218), (136, 86, 167)],
'bupu4': [(237, 248, 251), (179, 205, 227), (140, 150, 198), (136, 65, 157)],
'bupu5': [(237, 248, 251), (179, 205, 227), (140, 150, 198), (136, 86, 167), (129, 15, 124)],
'bupu6': [(237, 248, 251), (191, 211, 230), (158, 188, 218), (140, 150, 198), (136, 86, 167), (129, 15, 124)],
'bupu7': [(237, 248, 251), (191, 211, 230), (158, 188, 218), (140, 150, 198), (140, 107, 177), (136, 65, 157), (110, 1, 107)],
'bupu8': [(247, 252, 253), (224, 236, 244), (191, 211, 230), (158, 188, 218), (140, 150, 198), (140, 107, 177), (136, 65, 157), (110, 1, 107)],
'bupu9': [(247, 252, 253), (224, 236, 244), (191, 211, 230), (158, 188, 218), (140, 150, 198), (140, 107, 177), (136, 65, 157), (129, 15, 124), (77, 0, 75)],
'dark23': [(27, 158, 119), (217, 95, 2), (117, 112, 179)],
'dark24': [(27, 158, 119), (217, 95, 2), (117, 112, 179), (231, 41, 138)],
'dark25': [(27, 158, 119), (217, 95, 2), (117, 112, 179), (231, 41, 138), (102, 166, 30)],
'dark26': [(27, 158, 119), (217, 95, 2), (117, 112, 179), (231, 41, 138), (102, 166, 30), (230, 171, 2)],
'dark27': [(27, 158, 119), (217, 95, 2), (117, 112, 179), (231, 41, 138), (102, 166, 30), (230, 171, 2), (166, 118, 29)],
'dark28': [(27, 158, 119), (217, 95, 2), (117, 112, 179), (231, 41, 138), (102, 166, 30), (230, 171, 2), (166, 118, 29), (102, 102, 102)],
'gnbu3': [(224, 243, 219), (168, 221, 181), (67, 162, 202)],
'gnbu4': [(240, 249, 232), (186, 228, 188), (123, 204, 196), (43, 140, 190)],
'gnbu5': [(240, 249, 232), (186, 228, 188), (123, 204, 196), (67, 162, 202), (8, 104, 172)],
'gnbu6': [(240, 249, 232), (204, 235, 197), (168, 221, 181), (123, 204, 196), (67, 162, 202), (8, 104, 172)],
'gnbu7': [(240, 249, 232), (204, 235, 197), (168, 221, 181), (123, 204, 196), (78, 179, 211), (43, 140, 190), (8, 88, 158)],
'gnbu8': [(247, 252, 240), (224, 243, 219), (204, 235, 197), (168, 221, 181), (123, 204, 196), (78, 179, 211), (43, 140, 190), (8, 88, 158)],
'gnbu9': [(247, 252, 240), (224, 243, 219), (204, 235, 197), (168, 221, 181), (123, 204, 196), (78, 179, 211), (43, 140, 190), (8, 104, 172), (8, 64, 129)],
'greens3': [(229, 245, 224), (161, 217, 155), (49, 163, 84)],
'greens4': [(237, 248, 233), (186, 228, 179), (116, 196, 118), (35, 139, 69)],
'greens5': [(237, 248, 233), (186, 228, 179), (116, 196, 118), (49, 163, 84), (0, 109, 44)],
'greens6': [(237, 248, 233), (199, 233, 192), (161, 217, 155), (116, 196, 118), (49, 163, 84), (0, 109, 44)],
'greens7': [(237, 248, 233), (199, 233, 192), (161, 217, 155), (116, 196, 118), (65, 171, 93), (35, 139, 69), (0, 90, 50)],
'greens8': [(247, 252, 245), (229, 245, 224), (199, 233, 192), (161, 217, 155), (116, 196, 118), (65, 171, 93), (35, 139, 69), (0, 90, 50)],
'greens9': [(247, 252, 245), (229, 245, 224), (199, 233, 192), (161, 217, 155), (116, 196, 118), (65, 171, 93), (35, 139, 69), (0, 109, 44), (0, 68, 27)],
'greys3': [(240, 240, 240), (189, 189, 189), (99, 99, 99)],
'greys4': [(247, 247, 247), (204, 204, 204), (150, 150, 150), (82, 82, 82)],
'greys5': [(247, 247, 247), (204, 204, 204), (150, 150, 150), (99, 99, 99), (37, 37, 37)],
'greys6': [(247, 247, 247), (217, 217, 217), (189, 189, 189), (150, 150, 150), (99, 99, 99), (37, 37, 37)],
'greys7': [(247, 247, 247), (217, 217, 217), (189, 189, 189), (150, 150, 150), (115, 115, 115), (82, 82, 82), (37, 37, 37)],
'greys8': [(255, 255, 255), (240, 240, 240), (217, 217, 217), (189, 189, 189), (150, 150, 150), (115, 115, 115), (82, 82, 82), (37, 37, 37)],
'greys9': [(255, 255, 255), (240, 240, 240), (217, 217, 217), (189, 189, 189), (150, 150, 150), (115, 115, 115), (82, 82, 82), (37, 37, 37), (0, 0, 0)],
'oranges3': [(254, 230, 206), (253, 174, 107), (230, 85, 13)],
'oranges4': [(254, 237, 222), (253, 190, 133), (253, 141, 60), (217, 71, 1)],
'oranges5': [(254, 237, 222), (253, 190, 133), (253, 141, 60), (230, 85, 13), (166, 54, 3)],
'oranges6': [(254, 237, 222), (253, 208, 162), (253, 174, 107), (253, 141, 60), (230, 85, 13), (166, 54, 3)],
'oranges7': [(254, 237, 222), (253, 208, 162), (253, 174, 107), (253, 141, 60), (241, 105, 19), (217, 72, 1), (140, 45, 4)],
'oranges8': [(255, 245, 235), (254, 230, 206), (253, 208, 162), (253, 174, 107), (253, 141, 60), (241, 105, 19), (217, 72, 1), (140, 45, 4)],
'oranges9': [(255, 245, 235), (254, 230, 206), (253, 208, 162), (253, 174, 107), (253, 141, 60), (241, 105, 19), (217, 72, 1), (166, 54, 3), (127, 39, 4)],
'orrd3': [(254, 232, 200), (253, 187, 132), (227, 74, 51)],
'orrd4': [(254, 240, 217), (253, 204, 138), (252, 141, 89), (215, 48, 31)],
'orrd5': [(254, 240, 217), (253, 204, 138), (252, 141, 89), (227, 74, 51), (179, 0, 0)],
'orrd6': [(254, 240, 217), (253, 212, 158), (253, 187, 132), (252, 141, 89), (227, 74, 51), (179, 0, 0)],
'orrd7': [(254, 240, 217), (253, 212, 158), (253, 187, 132), (252, 141, 89), (239, 101, 72), (215, 48, 31), (153, 0, 0)],
'orrd8': [(255, 247, 236), (254, 232, 200), (253, 212, 158), (253, 187, 132), (252, 141, 89), (239, 101, 72), (215, 48, 31), (153, 0, 0)],
'orrd9': [(255, 247, 236), (254, 232, 200), (253, 212, 158), (253, 187, 132), (252, 141, 89), (239, 101, 72), (215, 48, 31), (179, 0, 0), (127, 0, 0)],
'paired10': [(166, 206, 227), (106, 61, 154), (31, 120, 180), (178, 223, 138), (51, 160, 44), (251, 154, 153), (227, 26, 28), (253, 191, 111), (255, 127, 0), (202, 178, 214)],
'paired11': [(166, 206, 227), (106, 61, 154), (255, 255, 153), (31, 120, 180), (178, 223, 138), (51, 160, 44), (251, 154, 153), (227, 26, 28), (253, 191, 111), (255, 127, 0), (202, 178, 214)],
'paired12': [(166, 206, 227), (106, 61, 154), (255, 255, 153), (177, 89, 40), (31, 120, 180), (178, 223, 138), (51, 160, 44), (251, 154, 153), (227, 26, 28), (253, 191, 111), (255, 127, 0), (202, 178, 214)],
'paired3': [(166, 206, 227), (31, 120, 180), (178, 223, 138)],
'paired4': [(166, 206, 227), (31, 120, 180), (178, 223, 138), (51, 160, 44)],
'paired5': [(166, 206, 227), (31, 120, 180), (178, 223, 138), (51, 160, 44), (251, 154, 153)],
'paired6': [(166, 206, 227), (31, 120, 180), (178, 223, 138), (51, 160, 44), (251, 154, 153), (227, 26, 28)],
'paired7': [(166, 206, 227), (31, 120, 180), (178, 223, 138), (51, 160, 44), (251, 154, 153), (227, 26, 28), (253, 191, 111)],
'paired8': [(166, 206, 227), (31, 120, 180), (178, 223, 138), (51, 160, 44), (251, 154, 153), (227, 26, 28), (253, 191, 111), (255, 127, 0)],
'paired9': [(166, 206, 227), (31, 120, 180), (178, 223, 138), (51, 160, 44), (251, 154, 153), (227, 26, 28), (253, 191, 111), (255, 127, 0), (202, 178, 214)],
'pastel13': [(251, 180, 174), (179, 205, 227), (204, 235, 197)],
'pastel14': [(251, 180, 174), (179, 205, 227), (204, 235, 197), (222, 203, 228)],
'pastel15': [(251, 180, 174), (179, 205, 227), (204, 235, 197), (222, 203, 228), (254, 217, 166)],
'pastel16': [(251, 180, 174), (179, 205, 227), (204, 235, 197), (222, 203, 228), (254, 217, 166), (255, 255, 204)],
'pastel17': [(251, 180, 174), (179, 205, 227), (204, 235, 197), (222, 203, 228), (254, 217, 166), (255, 255, 204), (229, 216, 189)],
'pastel18': [(251, 180, 174), (179, 205, 227), (204, 235, 197), (222, 203, 228), (254, 217, 166), (255, 255, 204), (229, 216, 189), (253, 218, 236)],
'pastel19': [(251, 180, 174), (179, 205, 227), (204, 235, 197), (222, 203, 228), (254, 217, 166), (255, 255, 204), (229, 216, 189), (253, 218, 236), (242, 242, 242)],
'pastel23': [(179, 226, 205), (253, 205, 172), (203, 213, 232)],
'pastel24': [(179, 226, 205), (253, 205, 172), (203, 213, 232), (244, 202, 228)],
'pastel25': [(179, 226, 205), (253, 205, 172), (203, 213, 232), (244, 202, 228), (230, 245, 201)],
'pastel26': [(179, 226, 205), (253, 205, 172), (203, 213, 232), (244, 202, 228), (230, 245, 201), (255, 242, 174)],
'pastel27': [(179, 226, 205), (253, 205, 172), (203, 213, 232), (244, 202, 228), (230, 245, 201), (255, 242, 174), (241, 226, 204)],
'pastel28': [(179, 226, 205), (253, 205, 172), (203, 213, 232), (244, 202, 228), (230, 245, 201), (255, 242, 174), (241, 226, 204), (204, 204, 204)],
'piyg10': [(142, 1, 82), (39, 100, 25), (197, 27, 125), (222, 119, 174), (241, 182, 218), (253, 224, 239), (230, 245, 208), (184, 225, 134), (127, 188, 65), (77, 146, 33)],
'piyg11': [(142, 1, 82), (77, 146, 33), (39, 100, 25), (197, 27, 125), (222, 119, 174), (241, 182, 218), (253, 224, 239), (247, 247, 247), (230, 245, 208), (184, 225, 134), (127, 188, 65)],
'piyg3': [(233, 163, 201), (247, 247, 247), (161, 215, 106)],
'piyg4': [(208, 28, 139), (241, 182, 218), (184, 225, 134), (77, 172, 38)],
'piyg5': [(208, 28, 139), (241, 182, 218), (247, 247, 247), (184, 225, 134), (77, 172, 38)],
'piyg6': [(197, 27, 125), (233, 163, 201), (253, 224, 239), (230, 245, 208), (161, 215, 106), (77, 146, 33)],
'piyg7': [(197, 27, 125), (233, 163, 201), (253, 224, 239), (247, 247, 247), (230, 245, 208), (161, 215, 106), (77, 146, 33)],
'piyg8': [(197, 27, 125), (222, 119, 174), (241, 182, 218), (253, 224, 239), (230, 245, 208), (184, 225, 134), (127, 188, 65), (77, 146, 33)],
'piyg9': [(197, 27, 125), (222, 119, 174), (241, 182, 218), (253, 224, 239), (247, 247, 247), (230, 245, 208), (184, 225, 134), (127, 188, 65), (77, 146, 33)],
'prgn10': [(64, 0, 75), (0, 68, 27), (118, 42, 131), (153, 112, 171), (194, 165, 207), (231, 212, 232), (217, 240, 211), (166, 219, 160), (90, 174, 97), (27, 120, 55)],
'prgn11': [(64, 0, 75), (27, 120, 55), (0, 68, 27), (118, 42, 131), (153, 112, 171), (194, 165, 207), (231, 212, 232), (247, 247, 247), (217, 240, 211), (166, 219, 160), (90, 174, 97)],
'prgn3': [(175, 141, 195), (247, 247, 247), (127, 191, 123)],
'prgn4': [(123, 50, 148), (194, 165, 207), (166, 219, 160), (0, 136, 55)],
'prgn5': [(123, 50, 148), (194, 165, 207), (247, 247, 247), (166, 219, 160), (0, 136, 55)],
'prgn6': [(118, 42, 131), (175, 141, 195), (231, 212, 232), (217, 240, 211), (127, 191, 123), (27, 120, 55)],
'prgn7': [(118, 42, 131), (175, 141, 195), (231, 212, 232), (247, 247, 247), (217, 240, 211), (127, 191, 123), (27, 120, 55)],
'prgn8': [(118, 42, 131), (153, 112, 171), (194, 165, 207), (231, 212, 232), (217, 240, 211), (166, 219, 160), (90, 174, 97), (27, 120, 55)],
'prgn9': [(118, 42, 131), (153, 112, 171), (194, 165, 207), (231, 212, 232), (247, 247, 247), (217, 240, 211), (166, 219, 160), (90, 174, 97), (27, 120, 55)],
'pubu3': [(236, 231, 242), (166, 189, 219), (43, 140, 190)],
'pubu4': [(241, 238, 246), (189, 201, 225), (116, 169, 207), (5, 112, 176)],
'pubu5': [(241, 238, 246), (189, 201, 225), (116, 169, 207), (43, 140, 190), (4, 90, 141)],
'pubu6': [(241, 238, 246), (208, 209, 230), (166, 189, 219), (116, 169, 207), (43, 140, 190), (4, 90, 141)],
'pubu7': [(241, 238, 246), (208, 209, 230), (166, 189, 219), (116, 169, 207), (54, 144, 192), (5, 112, 176), (3, 78, 123)],
'pubu8': [(255, 247, 251), (236, 231, 242), (208, 209, 230), (166, 189, 219), (116, 169, 207), (54, 144, 192), (5, 112, 176), (3, 78, 123)],
'pubu9': [(255, 247, 251), (236, 231, 242), (208, 209, 230), (166, 189, 219), (116, 169, 207), (54, 144, 192), (5, 112, 176), (4, 90, 141), (2, 56, 88)],
'pubugn3': [(236, 226, 240), (166, 189, 219), (28, 144, 153)],
'pubugn4': [(246, 239, 247), (189, 201, 225), (103, 169, 207), (2, 129, 138)],
'pubugn5': [(246, 239, 247), (189, 201, 225), (103, 169, 207), (28, 144, 153), (1, 108, 89)],
'pubugn6': [(246, 239, 247), (208, 209, 230), (166, 189, 219), (103, 169, 207), (28, 144, 153), (1, 108, 89)],
'pubugn7': [(246, 239, 247), (208, 209, 230), (166, 189, 219), (103, 169, 207), (54, 144, 192), (2, 129, 138), (1, 100, 80)],
'pubugn8': [(255, 247, 251), (236, 226, 240), (208, 209, 230), (166, 189, 219), (103, 169, 207), (54, 144, 192), (2, 129, 138), (1, 100, 80)],
'pubugn9': [(255, 247, 251), (236, 226, 240), (208, 209, 230), (166, 189, 219), (103, 169, 207), (54, 144, 192), (2, 129, 138), (1, 108, 89), (1, 70, 54)],
'puor10': [(127, 59, 8), (45, 0, 75), (179, 88, 6), (224, 130, 20), (253, 184, 99), (254, 224, 182), (216, 218, 235), (178, 171, 210), (128, 115, 172), (84, 39, 136)],
'puor11': [(127, 59, 8), (84, 39, 136), (45, 0, 75), (179, 88, 6), (224, 130, 20), (253, 184, 99), (254, 224, 182), (247, 247, 247), (216, 218, 235), (178, 171, 210), (128, 115, 172)],
'puor3': [(241, 163, 64), (247, 247, 247), (153, 142, 195)],
'puor4': [(230, 97, 1), (253, 184, 99), (178, 171, 210), (94, 60, 153)],
'puor5': [(230, 97, 1), (253, 184, 99), (247, 247, 247), (178, 171, 210), (94, 60, 153)],
'puor6': [(179, 88, 6), (241, 163, 64), (254, 224, 182), (216, 218, 235), (153, 142, 195), (84, 39, 136)],
'puor7': [(179, 88, 6), (241, 163, 64), (254, 224, 182), (247, 247, 247), (216, 218, 235), (153, 142, 195), (84, 39, 136)],
'puor8': [(179, 88, 6), (224, 130, 20), (253, 184, 99), (254, 224, 182), (216, 218, 235), (178, 171, 210), (128, 115, 172), (84, 39, 136)],
'puor9': [(179, 88, 6), (224, 130, 20), (253, 184, 99), (254, 224, 182), (247, 247, 247), (216, 218, 235), (178, 171, 210), (128, 115, 172), (84, 39, 136)],
'purd3': [(231, 225, 239), (201, 148, 199), (221, 28, 119)],
'purd4': [(241, 238, 246), (215, 181, 216), (223, 101, 176), (206, 18, 86)],
'purd5': [(241, 238, 246), (215, 181, 216), (223, 101, 176), (221, 28, 119), (152, 0, 67)],
'purd6': [(241, 238, 246), (212, 185, 218), (201, 148, 199), (223, 101, 176), (221, 28, 119), (152, 0, 67)],
'purd7': [(241, 238, 246), (212, 185, 218), (201, 148, 199), (223, 101, 176), (231, 41, 138), (206, 18, 86), (145, 0, 63)],
'purd8': [(247, 244, 249), (231, 225, 239), (212, 185, 218), (201, 148, 199), (223, 101, 176), (231, 41, 138), (206, 18, 86), (145, 0, 63)],
'purd9': [(247, 244, 249), (231, 225, 239), (212, 185, 218), (201, 148, 199), (223, 101, 176), (231, 41, 138), (206, 18, 86), (152, 0, 67), (103, 0, 31)],
'purples3': [(239, 237, 245), (188, 189, 220), (117, 107, 177)],
'purples4': [(242, 240, 247), (203, 201, 226), (158, 154, 200), (106, 81, 163)],
'purples5': [(242, 240, 247), (203, 201, 226), (158, 154, 200), (117, 107, 177), (84, 39, 143)],
'purples6': [(242, 240, 247), (218, 218, 235), (188, 189, 220), (158, 154, 200), (117, 107, 177), (84, 39, 143)],
'purples7': [(242, 240, 247), (218, 218, 235), (188, 189, 220), (158, 154, 200), (128, 125, 186), (106, 81, 163), (74, 20, 134)],
'purples8': [(252, 251, 253), (239, 237, 245), (218, 218, 235), (188, 189, 220), (158, 154, 200), (128, 125, 186), (106, 81, 163), (74, 20, 134)],
'purples9': [(252, 251, 253), (239, 237, 245), (218, 218, 235), (188, 189, 220), (158, 154, 200), (128, 125, 186), (106, 81, 163), (84, 39, 143), (63, 0, 125)],
'rdbu10': [(103, 0, 31), (5, 48, 97), (178, 24, 43), (214, 96, 77), (244, 165, 130), (253, 219, 199), (209, 229, 240), (146, 197, 222), (67, 147, 195), (33, 102, 172)],
'rdbu11': [(103, 0, 31), (33, 102, 172), (5, 48, 97), (178, 24, 43), (214, 96, 77), (244, 165, 130), (253, 219, 199), (247, 247, 247), (209, 229, 240), (146, 197, 222), (67, 147, 195)],
'rdbu3': [(239, 138, 98), (247, 247, 247), (103, 169, 207)],
'rdbu4': [(202, 0, 32), (244, 165, 130), (146, 197, 222), (5, 113, 176)],
'rdbu5': [(202, 0, 32), (244, 165, 130), (247, 247, 247), (146, 197, 222), (5, 113, 176)],
'rdbu6': [(178, 24, 43), (239, 138, 98), (253, 219, 199), (209, 229, 240), (103, 169, 207), (33, 102, 172)],
'rdbu7': [(178, 24, 43), (239, 138, 98), (253, 219, 199), (247, 247, 247), (209, 229, 240), (103, 169, 207), (33, 102, 172)],
'rdbu8': [(178, 24, 43), (214, 96, 77), (244, 165, 130), (253, 219, 199), (209, 229, 240), (146, 197, 222), (67, 147, 195), (33, 102, 172)],
'rdbu9': [(178, 24, 43), (214, 96, 77), (244, 165, 130), (253, 219, 199), (247, 247, 247), (209, 229, 240), (146, 197, 222), (67, 147, 195), (33, 102, 172)],
'rdgy10': [(103, 0, 31), (26, 26, 26), (178, 24, 43), (214, 96, 77), (244, 165, 130), (253, 219, 199), (224, 224, 224), (186, 186, 186), (135, 135, 135), (77, 77, 77)],
'rdgy11': [(103, 0, 31), (77, 77, 77), (26, 26, 26), (178, 24, 43), (214, 96, 77), (244, 165, 130), (253, 219, 199), (255, 255, 255), (224, 224, 224), (186, 186, 186), (135, 135, 135)],
'rdgy3': [(239, 138, 98), (255, 255, 255), (153, 153, 153)],
'rdgy4': [(202, 0, 32), (244, 165, 130), (186, 186, 186), (64, 64, 64)],
'rdgy5': [(202, 0, 32), (244, 165, 130), (255, 255, 255), (186, 186, 186), (64, 64, 64)],
'rdgy6': [(178, 24, 43), (239, 138, 98), (253, 219, 199), (224, 224, 224), (153, 153, 153), (77, 77, 77)],
'rdgy7': [(178, 24, 43), (239, 138, 98), (253, 219, 199), (255, 255, 255), (224, 224, 224), (153, 153, 153), (77, 77, 77)],
'rdgy8': [(178, 24, 43), (214, 96, 77), (244, 165, 130), (253, 219, 199), (224, 224, 224), (186, 186, 186), (135, 135, 135), (77, 77, 77)],
'rdgy9': [(178, 24, 43), (214, 96, 77), (244, 165, 130), (253, 219, 199), (255, 255, 255), (224, 224, 224), (186, 186, 186), (135, 135, 135), (77, 77, 77)],
'rdpu3': [(253, 224, 221), (250, 159, 181), (197, 27, 138)],
'rdpu4': [(254, 235, 226), (251, 180, 185), (247, 104, 161), (174, 1, 126)],
'rdpu5': [(254, 235, 226), (251, 180, 185), (247, 104, 161), (197, 27, 138), (122, 1, 119)],
'rdpu6': [(254, 235, 226), (252, 197, 192), (250, 159, 181), (247, 104, 161), (197, 27, 138), (122, 1, 119)],
'rdpu7': [(254, 235, 226), (252, 197, 192), (250, 159, 181), (247, 104, 161), (221, 52, 151), (174, 1, 126), (122, 1, 119)],
'rdpu8': [(255, 247, 243), (253, 224, 221), (252, 197, 192), (250, 159, 181), (247, 104, 161), (221, 52, 151), (174, 1, 126), (122, 1, 119)],
'rdpu9': [(255, 247, 243), (253, 224, 221), (252, 197, 192), (250, 159, 181), (247, 104, 161), (221, 52, 151), (174, 1, 126), (122, 1, 119), (73, 0, 106)],
'rdylbu10': [(165, 0, 38), (49, 54, 149), (215, 48, 39), (244, 109, 67), (253, 174, 97), (254, 224, 144), (224, 243, 248), (171, 217, 233), (116, 173, 209), (69, 117, 180)],
'rdylbu11': [(165, 0, 38), (69, 117, 180), (49, 54, 149), (215, 48, 39), (244, 109, 67), (253, 174, 97), (254, 224, 144), (255, 255, 191), (224, 243, 248), (171, 217, 233), (116, 173, 209)],
'rdylbu3': [(252, 141, 89), (255, 255, 191), (145, 191, 219)],
'rdylbu4': [(215, 25, 28), (253, 174, 97), (171, 217, 233), (44, 123, 182)],
'rdylbu5': [(215, 25, 28), (253, 174, 97), (255, 255, 191), (171, 217, 233), (44, 123, 182)],
'rdylbu6': [(215, 48, 39), (252, 141, 89), (254, 224, 144), (224, 243, 248), (145, 191, 219), (69, 117, 180)],
'rdylbu7': [(215, 48, 39), (252, 141, 89), (254, 224, 144), (255, 255, 191), (224, 243, 248), (145, 191, 219), (69, 117, 180)],
'rdylbu8': [(215, 48, 39), (244, 109, 67), (253, 174, 97), (254, 224, 144), (224, 243, 248), (171, 217, 233), (116, 173, 209), (69, 117, 180)],
'rdylbu9': [(215, 48, 39), (244, 109, 67), (253, 174, 97), (254, 224, 144), (255, 255, 191), (224, 243, 248), (171, 217, 233), (116, 173, 209), (69, 117, 180)],
'rdylgn10': [(165, 0, 38), (0, 104, 55), (215, 48, 39), (244, 109, 67), (253, 174, 97), (254, 224, 139), (217, 239, 139), (166, 217, 106), (102, 189, 99), (26, 152, 80)],
'rdylgn11': [(165, 0, 38), (26, 152, 80), (0, 104, 55), (215, 48, 39), (244, 109, 67), (253, 174, 97), (254, 224, 139), (255, 255, 191), (217, 239, 139), (166, 217, 106), (102, 189, 99)],
'rdylgn3': [(252, 141, 89), (255, 255, 191), (145, 207, 96)],
'rdylgn4': [(215, 25, 28), (253, 174, 97), (166, 217, 106), (26, 150, 65)],
'rdylgn5': [(215, 25, 28), (253, 174, 97), (255, 255, 191), (166, 217, 106), (26, 150, 65)],
'rdylgn6': [(215, 48, 39), (252, 141, 89), (254, 224, 139), (217, 239, 139), (145, 207, 96), (26, 152, 80)],
'rdylgn7': [(215, 48, 39), (252, 141, 89), (254, 224, 139), (255, 255, 191), (217, 239, 139), (145, 207, 96), (26, 152, 80)],
'rdylgn8': [(215, 48, 39), (244, 109, 67), (253, 174, 97), (254, 224, 139), (217, 239, 139), (166, 217, 106), (102, 189, 99), (26, 152, 80)],
'rdylgn9': [(215, 48, 39), (244, 109, 67), (253, 174, 97), (254, 224, 139), (255, 255, 191), (217, 239, 139), (166, 217, 106), (102, 189, 99), (26, 152, 80)],
'reds3': [(254, 224, 210), (252, 146, 114), (222, 45, 38)],
'reds4': [(254, 229, 217), (252, 174, 145), (251, 106, 74), (203, 24, 29)],
'reds5': [(254, 229, 217), (252, 174, 145), (251, 106, 74), (222, 45, 38), (165, 15, 21)],
'reds6': [(254, 229, 217), (252, 187, 161), (252, 146, 114), (251, 106, 74), (222, 45, 38), (165, 15, 21)],
'reds7': [(254, 229, 217), (252, 187, 161), (252, 146, 114), (251, 106, 74), (239, 59, 44), (203, 24, 29), (153, 0, 13)],
'reds8': [(255, 245, 240), (254, 224, 210), (252, 187, 161), (252, 146, 114), (251, 106, 74), (239, 59, 44), (203, 24, 29), (153, 0, 13)],
'reds9': [(255, 245, 240), (254, 224, 210), (252, 187, 161), (252, 146, 114), (251, 106, 74), (239, 59, 44), (203, 24, 29), (165, 15, 21), (103, 0, 13)],
'set13': [(228, 26, 28), (55, 126, 184), (77, 175, 74)],
'set14': [(228, 26, 28), (55, 126, 184), (77, 175, 74), (152, 78, 163)],
'set15': [(228, 26, 28), (55, 126, 184), (77, 175, 74), (152, 78, 163), (255, 127, 0)],
'set16': [(228, 26, 28), (55, 126, 184), (77, 175, 74), (152, 78, 163), (255, 127, 0), (255, 255, 51)],
'set17': [(228, 26, 28), (55, 126, 184), (77, 175, 74), (152, 78, 163), (255, 127, 0), (255, 255, 51), (166, 86, 40)],
'set18': [(228, 26, 28), (55, 126, 184), (77, 175, 74), (152, 78, 163), (255, 127, 0), (255, 255, 51), (166, 86, 40), (247, 129, 191)],
'set19': [(228, 26, 28), (55, 126, 184), (77, 175, 74), (152, 78, 163), (255, 127, 0), (255, 255, 51), (166, 86, 40), (247, 129, 191), (153, 153, 153)],
'set23': [(102, 194, 165), (252, 141, 98), (141, 160, 203)],
'set24': [(102, 194, 165), (252, 141, 98), (141, 160, 203), (231, 138, 195)],
'set25': [(102, 194, 165), (252, 141, 98), (141, 160, 203), (231, 138, 195), (166, 216, 84)],
'set26': [(102, 194, 165), (252, 141, 98), (141, 160, 203), (231, 138, 195), (166, 216, 84), (255, 217, 47)],
'set27': [(102, 194, 165), (252, 141, 98), (141, 160, 203), (231, 138, 195), (166, 216, 84), (255, 217, 47), (229, 196, 148)],
'set28': [(102, 194, 165), (252, 141, 98), (141, 160, 203), (231, 138, 195), (166, 216, 84), (255, 217, 47), (229, 196, 148), (179, 179, 179)],
'set310': [(141, 211, 199), (188, 128, 189), (255, 255, 179), (190, 186, 218), (251, 128, 114), (128, 177, 211), (253, 180, 98), (179, 222, 105), (252, 205, 229), (217, 217, 217)],
'set311': [(141, 211, 199), (188, 128, 189), (204, 235, 197), (255, 255, 179), (190, 186, 218), (251, 128, 114), (128, 177, 211), (253, 180, 98), (179, 222, 105), (252, 205, 229), (217, 217, 217)],
'set312': [(141, 211, 199), (188, 128, 189), (204, 235, 197), (255, 237, 111), (255, 255, 179), (190, 186, 218), (251, 128, 114), (128, 177, 211), (253, 180, 98), (179, 222, 105), (252, 205, 229), (217, 217, 217)],
'set33': [(141, 211, 199), (255, 255, 179), (190, 186, 218)],
'set34': [(141, 211, 199), (255, 255, 179), (190, 186, 218), (251, 128, 114)],
'set35': [(141, 211, 199), (255, 255, 179), (190, 186, 218), (251, 128, 114), (128, 177, 211)],
'set36': [(141, 211, 199), (255, 255, 179), (190, 186, 218), (251, 128, 114), (128, 177, 211), (253, 180, 98)],
'set37': [(141, 211, 199), (255, 255, 179), (190, 186, 218), (251, 128, 114), (128, 177, 211), (253, 180, 98), (179, 222, 105)],
'set38': [(141, 211, 199), (255, 255, 179), (190, 186, 218), (251, 128, 114), (128, 177, 211), (253, 180, 98), (179, 222, 105), (252, 205, 229)],
'set39': [(141, 211, 199), (255, 255, 179), (190, 186, 218), (251, 128, 114), (128, 177, 211), (253, 180, 98), (179, 222, 105), (252, 205, 229), (217, 217, 217)],
'spectral10': [(158, 1, 66), (94, 79, 162), (213, 62, 79), (244, 109, 67), (253, 174, 97), (254, 224, 139), (230, 245, 152), (171, 221, 164), (102, 194, 165), (50, 136, 189)],
'spectral11': [(158, 1, 66), (50, 136, 189), (94, 79, 162), (213, 62, 79), (244, 109, 67), (253, 174, 97), (254, 224, 139), (255, 255, 191), (230, 245, 152), (171, 221, 164), (102, 194, 165)],
'spectral3': [(252, 141, 89), (255, 255, 191), (153, 213, 148)],
'spectral4': [(215, 25, 28), (253, 174, 97), (171, 221, 164), (43, 131, 186)],
'spectral5': [(215, 25, 28), (253, 174, 97), (255, 255, 191), (171, 221, 164), (43, 131, 186)],
'spectral6': [(213, 62, 79), (252, 141, 89), (254, 224, 139), (230, 245, 152), (153, 213, 148), (50, 136, 189)],
'spectral7': [(213, 62, 79), (252, 141, 89), (254, 224, 139), (255, 255, 191), (230, 245, 152), (153, 213, 148), (50, 136, 189)],
'spectral8': [(213, 62, 79), (244, 109, 67), (253, 174, 97), (254, 224, 139), (230, 245, 152), (171, 221, 164), (102, 194, 165), (50, 136, 189)],
'spectral9': [(213, 62, 79), (244, 109, 67), (253, 174, 97), (254, 224, 139), (255, 255, 191), (230, 245, 152), (171, 221, 164), (102, 194, 165), (50, 136, 189)],
'ylgn3': [(247, 252, 185), (173, 221, 142), (49, 163, 84)],
'ylgn4': [(255, 255, 204), (194, 230, 153), (120, 198, 121), (35, 132, 67)],
'ylgn5': [(255, 255, 204), (194, 230, 153), (120, 198, 121), (49, 163, 84), (0, 104, 55)],
'ylgn6': [(255, 255, 204), (217, 240, 163), (173, 221, 142), (120, 198, 121), (49, 163, 84), (0, 104, 55)],
'ylgn7': [(255, 255, 204), (217, 240, 163), (173, 221, 142), (120, 198, 121), (65, 171, 93), (35, 132, 67), (0, 90, 50)],
'ylgn8': [(255, 255, 229), (247, 252, 185), (217, 240, 163), (173, 221, 142), (120, 198, 121), (65, 171, 93), (35, 132, 67), (0, 90, 50)],
'ylgn9': [(255, 255, 229), (247, 252, 185), (217, 240, 163), (173, 221, 142), (120, 198, 121), (65, 171, 93), (35, 132, 67), (0, 104, 55), (0, 69, 41)],
'ylgnbu3': [(237, 248, 177), (127, 205, 187), (44, 127, 184)],
'ylgnbu4': [(255, 255, 204), (161, 218, 180), (65, 182, 196), (34, 94, 168)],
'ylgnbu5': [(255, 255, 204), (161, 218, 180), (65, 182, 196), (44, 127, 184), (37, 52, 148)],
'ylgnbu6': [(255, 255, 204), (199, 233, 180), (127, 205, 187), (65, 182, 196), (44, 127, 184), (37, 52, 148)],
'ylgnbu7': [(255, 255, 204), (199, 233, 180), (127, 205, 187), (65, 182, 196), (29, 145, 192), (34, 94, 168), (12, 44, 132)],
'ylgnbu8': [(255, 255, 217), (237, 248, 177), (199, 233, 180), (127, 205, 187), (65, 182, 196), (29, 145, 192), (34, 94, 168), (12, 44, 132)],
'ylgnbu9': [(255, 255, 217), (237, 248, 177), (199, 233, 180), (127, 205, 187), (65, 182, 196), (29, 145, 192), (34, 94, 168), (37, 52, 148), (8, 29, 88)],
'ylorbr3': [(255, 247, 188), (254, 196, 79), (217, 95, 14)],
'ylorbr4': [(255, 255, 212), (254, 217, 142), (254, 153, 41), (204, 76, 2)],
'ylorbr5': [(255, 255, 212), (254, 217, 142), (254, 153, 41), (217, 95, 14), (153, 52, 4)],
'ylorbr6': [(255, 255, 212), (254, 227, 145), (254, 196, 79), (254, 153, 41), (217, 95, 14), (153, 52, 4)],
'ylorbr7': [(255, 255, 212), (254, 227, 145), (254, 196, 79), (254, 153, 41), (236, 112, 20), (204, 76, 2), (140, 45, 4)],
'ylorbr8': [(255, 255, 229), (255, 247, 188), (254, 227, 145), (254, 196, 79), (254, 153, 41), (236, 112, 20), (204, 76, 2), (140, 45, 4)],
'ylorbr9': [(255, 255, 229), (255, 247, 188), (254, 227, 145), (254, 196, 79), (254, 153, 41), (236, 112, 20), (204, 76, 2), (153, 52, 4), (102, 37, 6)],
'ylorrd3': [(255, 237, 160), (254, 178, 76), (240, 59, 32)],
'ylorrd4': [(255, 255, 178), (254, 204, 92), (253, 141, 60), (227, 26, 28)],
'ylorrd5': [(255, 255, 178), (254, 204, 92), (253, 141, 60), (240, 59, 32), (189, 0, 38)],
'ylorrd6': [(255, 255, 178), (254, 217, 118), (254, 178, 76), (253, 141, 60), (240, 59, 32), (189, 0, 38)],
'ylorrd7': [(255, 255, 178), (254, 217, 118), (254, 178, 76), (253, 141, 60), (252, 78, 42), (227, 26, 28), (177, 0, 38)],
'ylorrd8': [(255, 255, 204), (255, 237, 160), (254, 217, 118), (254, 178, 76), (253, 141, 60), (252, 78, 42), (227, 26, 28), (177, 0, 38)],
}
if __name__ == '__main__':
main()
|
glaudsonml/kurgan-ai
|
tools/sqlmap/thirdparty/xdot/xdot.py
|
Python
|
apache-2.0
| 96,496
|
[
"FLEUR"
] |
aa158ee7357055a222880a0fcedc16694e51329fe6b48b9985abcd08c61d2c94
|
#! /usr/bin/env python
#
# Copyright (C) 2003-2021 ABINIT group
#
# Written by Gabriel Antonius in python (compatible v2.7).
# This is free software, and you are welcome to redistribute it
# under certain conditions (GNU General Public License,
# see ~abinit/COPYING or http://www.gnu.org/copyleft/gpl.txt).
#
# ABINIT is a project of the Universite Catholique de Louvain,
# Corning Inc. and other collaborators, see ~abinit/doc/developers/contributors.txt.
# Please read ~abinit/doc/biblio/generated_files/bib_acknow.html for suggested
# acknowledgments of the ABINIT effort.
#
# For more information, see https://www.abinit.org .
"""
This script can be run interactively,
but it is recommended to import it as a module:
>>> from merge_gkk_nc import merge_gkk_nc
>>> merge_gkk_nc(out_fname, fnames)
"""
from __future__ import print_function
import numpy as np
import netCDF4 as nc
__version__ = '1.0.0'
def merge_gkk_nc(out_fname, fnames):
"""
Merge a list of GKK<i>.nc files containing different elements of the same qpoint.
Arguments
---------
out_fname: Name for the merged file (will overwrite any existing file).
fnames: List of GKK<i>.nc files.
"""
if not fnames:
raise Exception('Empty list of files given for merge')
fname0 = fnames[0]
with nc.Dataset(out_fname, 'w') as dsout:
with nc.Dataset(fname0, 'r') as dsin:
nc_copy(dsin, dsout,
except_dimensions=['number_of_atoms_for_gkk', 'number_of_cartesian_directions_for_gkk'],
except_variables=['second_derivative_eigenenergies_actif'],
)
q0 = dsin.variables['current_q_point'][...]
natom = len(dsin.dimensions[u'number_of_atoms'])
ncart = len(dsin.dimensions[u'number_of_cartesian_directions'])
dsout.createDimension('number_of_atoms_for_gkk', natom)
dsout.createDimension('number_of_cartesian_directions_for_gkk', ncart)
gkk = dsout.createVariable('second_derivative_eigenenergies_actif', np.dtype('float64'),
('max_number_of_states', 'number_of_atoms_for_gkk',
'number_of_cartesian_directions_for_gkk', 'number_of_kpoints',
'product_mband_nsppol2'))
for i, fname in enumerate(fnames):
iatom = i // ncart
icart = i % ncart
with nc.Dataset(fname, 'r') as dsin:
# Check that the qpoints are the same
q = dsin.variables['current_q_point'][...]
if not all(np.isclose(q0, q)):
raise Exception('Cannot merge GKK.nc at different q-points.')
gkki = dsin.variables[u'second_derivative_eigenenergies_actif'][:,0,0,...]
gkk[:,iatom,icart,...] = gkki
def nc_copy(dsin, dsout, except_dimensions=None, except_variables=None):
"""
Copy all dimensions and variable of one nc.Dataset instance into another.
"""
#Copy dimensions
for dname, dim in dsin.dimensions.iteritems():
if except_dimensions and dname in except_dimensions:
continue
dsout.createDimension(dname, len(dim))
#Copy variables
for vname, varin in dsin.variables.iteritems():
if except_variables and vname in except_variables:
continue
outVar = dsout.createVariable(vname, varin.datatype, varin.dimensions)
outVar[...] = varin[...]
def interactive_merge_gkk_nc():
"""Get inputs from the user and run merge_gkk_nc."""
program_name = 'merge_gkk_nc'
description = """Merge several GKK<i>.nc files, belonging to the same q-point."""
def get_user(s):
return raw_input(s.rstrip() + '\n').split('#')[0]
print(program_name)
print(len(program_name) * '-')
print(description + '\n')
ui = get_user('Enter a name for the output file in which to merge (will overwrite any existing file):')
out_fname = str(ui)
ui = get_user('Enter the number of files to merge:')
nfiles = int(ui)
fnames = list()
for i in range(nfiles):
ui = get_user('Enter the name of file {}:'.format(i+1))
fname = str(ui)
fnames.append(fname)
# Main execution
print('Executing...')
merge_gkk_nc(out_fname, fnames)
print('All done.')
# =========================================================================== #
# Run interactive program
# =========================================================================== #
if __name__ == '__main__':
interactive_merge_gkk_nc()
|
abinit/abinit
|
scripts/post_processing/merge_gkk_nc.py
|
Python
|
gpl-3.0
| 4,641
|
[
"ABINIT"
] |
67a6093a1c446430310689a22e0adaa0d47ad91136ff1591721a90ba6a59ea13
|
"""Utilities
The module contains some commonly functions and classes.
"""
from __future__ import division
from bisect import bisect_right
from distutils.spawn import find_executable
from functools import wraps
from os import environ
from numpy import arcsin, ceil, floor, pi, round, sin, sqrt
from progressbar import ETA, Bar, Percentage, ProgressBar
from scipy.stats import norm
#: Error values used to indicate missing or bad data.
#: Code -999 is used if the reconstruction of a quantity failed.
#: Code -1 is used if that detector/sensor is not present.
ERR = [-1, -999]
#: Speed of light in vacuum in m / ns.
c = 0.299792458
def get_publicdb_base():
"""Get the HiSPARC Public Database base URL
This can be configured by setting the PUBLICDB_BASE environment
variable to the desired URL.
"""
return environ.get('PUBLICDB_BASE', 'http://data.hisparc.nl')
def pbar(iterable, length=None, show=True, **kwargs):
"""Get a new progressbar with our default widgets
:param iterable: the iterable over which will be looped.
:param length: in case iterable is a generator, this should be its
expected length.
:param show: boolean, if False simply return the iterable.
:return: a new iterable which iterates over the same elements as
the input, but shows a progressbar if possible.
"""
if not show:
return iterable
if length is None:
try:
length = len(iterable)
except TypeError:
pass
if length:
pb = ProgressBar(max_value=length,
widgets=[Percentage(), Bar(), ETA()], **kwargs)
return pb(iterable)
else:
return iterable
def ceil_in_base(value, base):
"""Get nearest multiple of base above the value"""
return base * ceil(value / base)
def floor_in_base(value, base):
"""Get nearest multiple of base below the value"""
return base * floor(value / base)
def round_in_base(value, base):
"""Get nearest multiple of base to the value"""
return base * round(value / base)
def closest_in_list(value, items):
"""Get nearest item from a list of items to the value"""
return min(items, key=lambda x: abs(x - value))
def get_active_index(values, value):
"""Get the index where the value fits.
:param values: sorted list of values (e.g. list of timestamps).
:param value: value for which to find the position (e.g. a timestamp).
:return: index into the values list.
"""
idx = bisect_right(values, value, lo=0)
if idx == 0:
idx = 1
return idx - 1
def gauss(x, n, mu, sigma):
"""Gaussian distribution
To be used for fitting where the integral is not 1.
"""
return n * norm.pdf(x, mu, sigma)
def norm_angle(angle):
"""Normalize an angle to the range [-pi, pi)
We use the range from -pi upto but not including pi to represent
angles.
"""
return (angle + pi) % (2 * pi) - pi
def angle_between(zenith1, azimuth1, zenith2, azimuth2):
"""Calculate the angle between two (zenith, azimuth) coordinates
Using the haversine formula,
from: https://www.movable-type.co.uk/scripts/latlong.html
:param zenith#: Zenith parts of the coordinates, in radians (0, pi/2).
:param azimuth#: Azimuth parts of the coordinates, in radians (-pi, pi).
:return: Angle between the two coordinates.
"""
dlat = zenith1 - zenith2
dlon = azimuth2 - azimuth1
a = (sin(dlat / 2) ** 2 + sin(zenith1) * sin(zenith2) * sin(dlon / 2) ** 2)
angle = 2 * arcsin(sqrt(a))
return angle
def vector_length(x, y, z=0):
"""Length of a vector given by (x, y, z) coordinates
:param x,y,z: vector components.
:return: length of vector.
"""
return sqrt(x ** 2 + y ** 2 + z ** 2)
def distance_between(x1, y1, x2, y2):
"""Calculate the distance between two (x, y) coordinates
:param x#: x parts of the coordinates.
:param y#: y parts of the coordinates.
:return: distance between the two coordinates.
"""
return vector_length(x1 - x2, y1 - y2)
def make_relative(x):
"""Make first element the origin and make rest relative to it."""
return [xi - x[0] for xi in x]
def which(program):
"""Check if a command line program is available
An Exception is raised if the program is not available.
:param program: name or program to check for, e.g. 'wget'.
"""
path = find_executable(program)
if not path:
raise Exception('The program %s is not available.' % program)
def memoize(method):
"""Memoisation cache decorator
Source: https://stackoverflow.com/a/29954160/1033535
"""
@wraps(method)
def memoizer(self, *args, **kwargs):
# Prepare and get reference to cache
attr = "_memo_{name}".format(name=method.__name__)
if not hasattr(self, attr):
setattr(self, attr, {})
cache = getattr(self, attr)
# Actual caching
key = '{args}{kwargs}'.format(args=args, kwargs=sorted(kwargs.items()))
try:
return cache[key]
except KeyError:
cache[key] = method(self, *args, **kwargs)
return cache[key]
return memoizer
|
HiSPARC/sapphire
|
sapphire/utils.py
|
Python
|
gpl-3.0
| 5,252
|
[
"Gaussian"
] |
5c23de6623d1ff12995c7b266c60656d8eceb6c5a0d88d70b92313de27a7c294
|
"""
Gaussian calculator for ASE written by:
Glen R. Jenness
University of Wisconsin - Madison
Based off of code written by:
Glen R. Jenness
Kuang Yu
Torsten Kerber, Ecole normale superieure de Lyon (*)
Paul Fleurat-Lessard, Ecole normale superieure de Lyon (*)
Martin Krupicka
(*) This work is supported by Award No. UK-C0017, made by King Abdullah
University of Science and Technology (KAUST), Saudi Arabia.
See accompanying license files for details.
"""
import os
import glob
import numpy as np
from ase.calculators.general import Calculator
"""
Gaussian has two generic classes of keywords: link0 and route.
Since both types of keywords have different input styles, we will
distinguish between both types, dividing each type into str's, int's
etc.
For more information on the Link0 commands see:
http://www.gaussian.com/g_tech/g_ur/k_link0.htm
For more information on the route section keywords, see:
http://www.gaussian.com/g_tech/g_ur/l_keywords09.htm
"""
link0_str_keys = ['chk',
'mem',
'rwf',
'int',
'd2e',
'lindaworkers',
'kjob',
'subst',
'save',
'nosave',
]
link0_int_keys = ['nprocshared',
'nproc',
]
# Multiplicity isn't really a route keyword, but we will put it here anyways
route_int_keys = ['multiplicity',
'cachesize',
'cbsextrapolate',
'constants',
]
route_str_keys = ['method',
'functional',
'basis',
'maxdisk',
'cphf',
'density',
'densityfit',
'ept',
'field',
'geom',
'guess',
'gvb',
'integral',
'irc',
'ircmax',
'name',
'nmr',
'nodensityfit',
'oniom',
'output',
'punch',
'scf',
'symmetry',
'td',
'units',
]
# This one is a little strange. Gaussian has several keywords where you just
# specify the keyword, but the keyword itself has several options.
# Ex: Opt, Opt=QST2, Opt=Conical, etc.
# These keywords are given here.
route_self_keys = ['opt',
'force',
'freq',
'complex',
'fmm',
'genchk',
'polar',
'prop',
'pseudo',
'restart',
'scan',
'scrf',
'sp',
'sparse',
'stable',
'volume',
]
route_float_keys = ['pressure',
'scale',
'temperature',
]
route_bool_keys = [
]
class Gaussian(Calculator):
"""
Gaussian calculator
"""
name = 'Gaussian'
def __init__(self, label='ase', ioplist=list(), basisfile=None,
directory=None, **kwargs):
Calculator.__init__(self)
# Form a set of dictionaries for each input variable type
self.link0_int_params = dict()
self.link0_str_params = dict()
self.route_str_params = dict()
self.route_int_params = dict()
self.route_float_params = dict()
self.route_bool_params = dict()
self.route_self_params = dict()
for key in link0_int_keys:
self.link0_int_params[key] = None
for key in link0_str_keys:
self.link0_str_params[key] = None
for key in route_str_keys:
self.route_str_params[key] = None
for key in route_int_keys:
self.route_int_params[key] = None
for key in route_float_keys:
self.route_float_params[key] = None
for key in route_bool_keys:
self.route_bool_params[key] = None
for key in route_self_keys:
self.route_self_params[key] = None
self.set(**kwargs)
self.atoms = None
self.positions = None
self.old_positions = None
self.old_link0_str_params = None
self.old_link0_int_params = None
self.old_route_str_params = None
self.old_route_int_params = None
self.old_route_float_params = None
self.old_route_bool_params = None
self.old_route_self_params = None
self.old_basisfile = None
self.old_label = None
self.old_ioplist = None
self.basisfile = basisfile
self.label = label
self.ioplist = list(ioplist)[:]
self.directory = directory
self.multiplicity = 1
self.converged = False
def set(self, **kwargs):
"""Assigns values to dictionary keys"""
for key in kwargs:
if key in self.link0_str_params:
self.link0_str_params[key] = kwargs[key]
elif key in self.link0_int_params:
self.link0_int_params[key] = kwargs[key]
elif key in self.route_str_params:
self.route_str_params[key] = kwargs[key]
elif key in self.route_int_params:
self.route_int_params[key] = kwargs[key]
elif key in self.route_float_params:
self.route_float_params[key] = kwargs[key]
elif key in self.route_bool_params:
self.route_bool_params[key] = kwargs[key]
elif key in self.route_self_params:
self.route_self_params[key] = kwargs[key]
def initialize(self, atoms):
if (self.route_int_params['multiplicity'] is None):
self.multiplicity = 1
else:
self.multiplicity = self.route_int_params['multiplicity']
# Set some default behavior
if (self.route_str_params['method'] is None):
self.route_str_params['method'] = 'hf'
if (self.route_str_params['basis'] is None):
self.route_str_params['basis'] = '6-31g*'
if (self.route_self_params['force'] is None):
self.route_self_params['force'] = 'force'
self.converged = None
def write_input(self, filename, atoms):
"""Writes the input file"""
inputfile = open(filename, 'w')
# First print the Link0 commands
for key, val in self.link0_str_params.items():
if val is not None:
inputfile.write('%%%s=%s\n' % (key, val))
for key, val in self.link0_int_params.items():
if val is not None:
inputfile.write('%%%s=%i\n' % (key, val))
# Print the route commands. By default we will always use "#p" to start.
route = '#p %s/%s' % (self.route_str_params['method'],
self.route_str_params['basis'])
# Add keywords and IOp options
# For the 'self' keywords, there are several suboptions available, and if more
# than 1 is given, then they are wrapped in ()'s and separated by a ','.
for key, val in self.route_self_params.items():
if val is not None:
if (val == key):
route += (' ' + val)
else:
if ',' in val:
route += ' %s(%s)' % (key, val)
else:
route += ' %s=%s' % (key, val)
for key, val in self.route_float_params.items():
if val is not None:
route += ' %s=%f' % (key, val)
for key, val in self.route_int_params.items():
if (val is not None) and (key is not 'multiplicity'):
route += ' %s=%i' % (key, val)
for key, val in self.route_str_params.items():
if (val is not None) and (key is not 'method') and \
(key is not 'basis'):
route += ' %s=%s' % (key, val)
for key, val in self.route_bool_params.items():
if val is not None:
route += ' %s=%s' % (key, val)
if (self.ioplist):
route += ' IOp('
for iop in self.ioplist:
route += (' ' + iop)
if (len(self.ioplist) > 1) and (iop != len(self.ioplist) - 1):
route += ','
route += ')'
inputfile.write(route)
inputfile.write(' \n\n')
inputfile.write('Gaussian input prepared by ASE\n\n')
charge = sum(atoms.get_charges())
inputfile.write('%i %i\n' % (charge, self.multiplicity))
symbols = atoms.get_chemical_symbols()
coordinates = atoms.get_positions()
for i in range(len(atoms)):
inputfile.write('%-10s' % symbols[i])
for j in range(3):
inputfile.write('%20.10f' % coordinates[i, j])
inputfile.write('\n')
inputfile.write('\n')
if (self.route_str_params['basis'].lower() == 'gen'):
if (self.basisfile is None):
raise RuntimeError('Please set basisfile.')
elif (not os.path.isfile(self.basisfile)):
raise RuntimeError('Basis file %s does not exist.' \
% self.basisfile)
else:
f2 = open(self.basisfile, 'r')
inputfile.write(f2.read())
f2.close()
if atoms.get_pbc().any():
cell = atoms.get_cell()
line = str()
for v in cell:
line += 'TV %20.10f%20.10f%20.10f\n' % (v[0], v[1], v[2])
inputfile.write(line)
inputfile.write('\n\n')
inputfile.close()
def read_output(self, filename, quantity):
"""Reads the output file using GaussianReader"""
from ase.io.gaussian import read_gaussian_out
if (quantity == 'energy'):
return read_gaussian_out(filename, quantity='energy')
elif (quantity == 'forces'):
forces = read_gaussian_out(filename, quantity='forces')
return forces
elif (quantity == 'dipole'):
return read_gaussian_out(filename, quantity='dipole')
elif (quantity == 'version'):
return read_gaussian_out(filename, quantity='version')
def read_energy(self):
"""Reads and returns the energy"""
energy = self.read_output(self.label + '.log', 'energy')
return [energy, energy]
def read_forces(self, atoms):
"""Reads and returns the forces"""
forces = self.read_output(self.label + '.log', 'forces')
return forces
def read_dipole(self):
"""Reads and returns the dipole"""
dipole = self.read_output(self.label + '.log', 'dipole')
return dipole
def read_fermi(self):
"""No fermi energy, so return 0.0"""
return 0.0
def read_stress(self):
raise NotImplementedError
def update(self, atoms):
"""Updates and does a check to see if a calculation is required"""
if self.calculation_required(atoms, ['energy']):
if (self.atoms is None or
self.atoms.positions.shape != atoms.positions.shape):
self.clean()
if (self.directory is not None):
curdir = os.getcwd()
if not os.path.exists(self.directory):
os.makedirs(self.directory)
os.chdir(self.directory)
self.calculate(atoms)
os.chdir(curdir)
else:
self.calculate(atoms)
def calculation_required(self, atoms, quantities):
"""Checks if a calculation is required"""
if (self.positions is None or
(self.atoms != atoms) or
(self.link0_str_params != self.old_link0_str_params) or
(self.link0_int_params != self.old_link0_int_params) or
(self.route_str_params != self.old_route_str_params) or
(self.route_int_params != self.old_route_int_params) or
(self.route_float_params != self.old_route_float_params) or
(self.route_bool_params != self.old_route_bool_params) or
(self.route_self_params != self.old_route_self_params) or
(self.basisfile != self.old_basisfile) or
(self.label != self.old_label) or
(self.ioplist != self.old_ioplist)):
return True
return False
def clean(self):
"""Cleans up from a previous run"""
extensions = ['.chk', '.com', '.log']
for ext in extensions:
f = self.label + ext
try:
if (self.directory is not None):
os.remove(os.path.join(self.directory, f))
else:
os.remove(f)
except OSError:
pass
def get_command(self):
"""Return command string if program installed, otherwise None. """
command = None
if ('GAUSS_EXEDIR' in os.environ) \
and ('GAUSSIAN_COMMAND' in os.environ):
command = os.environ['GAUSSIAN_COMMAND']
return command
def run(self):
"""Runs Gaussian"""
command = self.get_command()
if command is None:
raise RuntimeError('GAUSS_EXEDIR or GAUSSIAN_COMMAND not set')
exitcode = os.system('%s < %s > %s'
% (command, self.label + '.com', self.label + '.log'))
if (exitcode != 0):
raise RuntimeError('Gaussian exited with error code' % exitcode)
def calculate(self, atoms):
"""initializes calculation and runs Gaussian"""
self.initialize(atoms)
self.write_input(self.label + '.com', atoms)
self.run()
self.converged = self.read_convergence()
self.set_results(atoms)
def read_convergence(self):
"""Determines if calculations converged"""
converged = False
gauss_dir = os.environ['GAUSS_EXEDIR']
test = '(Enter ' + gauss_dir + '/l9999.exe)'
f = open(self.label + '.log', 'r')
lines = f.readlines()
f.close()
for line in lines:
if (line.rfind(test) > -1):
converged = True
else:
converged = False
return converged
def set_results(self, atoms):
"""Sets results"""
self.read(atoms)
self.atoms = atoms.copy()
self.old_positions = atoms.get_positions().copy()
self.old_link0_str_params = self.link0_str_params.copy()
self.old_link0_int_params = self.link0_int_params.copy()
self.old_route_str_params = self.route_str_params.copy()
self.old_route_int_params = self.route_int_params.copy()
self.old_route_float_params = self.route_float_params.copy()
self.old_route_bool_params = self.route_bool_params.copy()
self.old_route_self_params = self.route_self_params.copy()
self.old_basisfile = self.basisfile
self.old_label = self.label
self.old_ioplist = self.ioplist[:]
def get_version(self):
return self.read_output(self.label + '.log', 'version')
|
alexei-matveev/ase-local
|
ase/calculators/gaussian.py
|
Python
|
gpl-2.0
| 15,357
|
[
"ASE",
"Gaussian"
] |
f856539b795fb08fa60b21cd91cea868aac6f0050870eaf9d0175dfb6cc540fb
|
# -*- coding:utf-8 -*-
# ##### BEGIN LGPL LICENSE BLOCK #####
# GEOS - Geometry Engine Open Source
# http:#geos.osgeo.org
#
# Copyright (C) 2011 Sandro Santilli <strk@kbt.io>
# Copyright (C) 2005 2006 Refractions Research Inc.
# Copyright (C) 2001-2002 Vivid Solutions Inc.
# Copyright (C) 1995 Olivier Devillers <Olivier.Devillers@sophia.inria.fr>
#
# This is free software you can redistribute and/or modify it under
# the terms of the GNU Lesser General Public Licence as published
# by the Free Software Foundation.
# See the COPYING file for more information.
#
# ##### END LGPL LICENSE BLOCK #####
# <pep8 compliant>
# ----------------------------------------------------------
# Partial port (version 3.7.0) by: Stephen Leger (s-leger)
#
# ----------------------------------------------------------
from .shared import (
logger,
Envelope,
GeomTypeId,
GeometryComponentFilter,
GeometryTransformer
)
from .algorithms import (
LineSegment,
LineIntersector,
ItemVisitor
)
from .index_quadtree import Quadtree
class DouglasPeuckerLineSimplifier():
"""
* Simplifies a linestring (sequence of points) using
* the standard Douglas-Peucker algorithm.
"""
def __init__(self, coords):
self.coords = coords
self.usePt = []
self.tolerance = 0
@staticmethod
def simplify(coords, tolerance):
dpls = DouglasPeuckerLineSimplifier(coords)
dpls.tolerance = tolerance
return dpls._simplify()
def _simplify(self):
nCoords = len(self.coords)
if nCoords == 0:
return self.coords
self.usePt = [True for i in range(nCoords)]
self.simplifySection(0, nCoords - 1)
# S.L add : remove first/last point of closed curves when apply
if self.coords[0] == self.coords[-1] and nCoords > 2:
c = self.coords
# find one valid points on both ends
start = -2
end = 1
if self.usePt[end] and self.usePt[start]:
seg = LineSegment(c[start], c[end])
distance = seg.distance(c[0])
# remove first and last point
if distance < self.tolerance:
self.usePt[0] = False
self.usePt[-1] = False
newCoords = [coord for i, coord in enumerate(self.coords) if self.usePt[i]]
# close the ring
newCoords.append(newCoords[0])
return newCoords
return [coord for i, coord in enumerate(self.coords) if self.usePt[i]]
def simplifySection(self, i: int, j: int) -> None:
c = self.coords
if i + 1 == j:
return
seg = LineSegment(c[i], c[j])
maxDistance = -1.0
maxIndex = i
for k in range(i + 1, j):
distance = seg.distance(c[k])
if distance > maxDistance:
maxDistance = distance
maxIndex = k
if maxDistance <= self.tolerance:
for k in range(i + 1, j):
self.usePt[k] = False
else:
self.simplifySection(i, maxIndex)
self.simplifySection(maxIndex, j)
class DPTransformer(GeometryTransformer):
def __init__(self, tolerance: float):
GeometryTransformer.__init__(self)
self.tolerance = tolerance
self.setSkipTransformedInvalidInteriorRings = True
def transformCoordinates(self, coords, parent):
newPts = DouglasPeuckerLineSimplifier.simplify(coords, self.tolerance)
return self._factory.coordinateSequenceFactory.create(newPts)
def transformPolygon(self, geom, parent):
roughGeom = GeometryTransformer.transformPolygon(self, geom, parent)
if parent.type_id == GeomTypeId.GEOS_MULTIPOLYGON:
return roughGeom
return self.createValidArea(roughGeom)
def transformMultiPolygon(self, geom, parent):
roughGeom = GeometryTransformer.transformMultiPolygon(self, geom, parent)
return self.createValidArea(roughGeom)
def createValidArea(self, roughAreaGeom):
"""
* Creates a valid area geometry from one that possibly has
* bad topology (i.e. self-intersections).
* Since buffer can handle invalid topology, but always returns
* valid geometry, constructing a 0-width buffer "corrects" the
* topology.
* Note this only works for area geometries, since buffer always returns
* areas. This also may return empty geometries, if the input
* has no actual area.
*
* @param roughAreaGeom an area geometry possibly containing
* self-intersections
* @return a valid area geometry
"""
return roughAreaGeom.buffer(0)
class DouglasPeukerSimplifier():
"""
* Simplifies a Geometry using the standard Douglas-Peucker algorithm.
*
* Ensures that any polygonal geometries returned are valid.
* Simple lines are not guaranteed to remain simple after simplification.
*
* Note that in general D-P does not preserve topology -
* e.g. polygons can be split, collapse to lines or disappear
* interiors can be created or disappear,
* and lines can cross.
* To simplify geometry while preserving topology use TopologyPreservingSimplifier.
* (However, using D-P is significantly faster).
"""
def __init__(self, geom):
self.geom = geom
"""
* Sets the distance tolerance for the simplification.
*
* All vertices in the simplified geometry will be within this
* distance of the original geometry.
* The tolerance value must be non-negative. A tolerance value
* of zero is effectively a no-op.
*
* @param distanceTolerance the approximation tolerance to use
"""
self.tolerance = 0
@staticmethod
def simplify(geom, tolerance: float):
dps = DouglasPeukerSimplifier(geom)
logger.debug("******************************\n")
logger.debug("DouglasPeukerSimplifier.simplify()\n")
logger.debug("******************************")
dps.tolerance = tolerance
return dps.getResultGeometry()
def getResultGeometry(self):
dpt = DPTransformer(self.tolerance)
return dpt.transform(self.geom)
class LineStringTransformer(GeometryTransformer):
def __init__(self, linestringMap):
"""
* @param nMap - reference to LinesMap instance.
"""
# LinesMap
self.linestringMap = linestringMap
def transformCoordinates(self, coords, parent):
if parent.type_id == GeomTypeId.GEOS_LINESTRING:
taggedLine = self.linestringMap.find(parent)
newCoords = taggedLine.resultCoordinates
logger.debug("LineStringTransformer.transformCoordinates(%s)", len(newCoords))
return newCoords
else:
# for anything else (e.g. points) just copy the coordinates
return GeometryTransformer.transformCoordinates(self, coords, parent)
class LineStringMapBuilderFilter(GeometryComponentFilter):
"""
* A filter to add linear geometries to the linestring map
* with the appropriate minimum size constraint.
* Closed {@link LineString}s (including {@link LinearRing}s
* have a minimum output size constraint of 4,
* to ensure the output is valid.
* For all other linestrings, the minimum size is 2 points.
*
* This class populates the given LineString=>TaggedLineString map
* with newly created TaggedLineString objects.
* Users must take care of deleting the map's values (elem.second).
"""
def __init__(self, linestringMap):
# LinesMap
self.linestringMap = linestringMap
def filter_ro(self, geom):
if geom.type_id == GeomTypeId.GEOS_LINESTRING:
if geom.isClosed:
minSize = 4
else:
minSize = 2
taggedLine = TaggedLineString(geom, minSize)
self.linestringMap.insert(geom, taggedLine)
else:
return
class LineSegmentVisitor(ItemVisitor):
def __init__(self, seg):
ItemVisitor.__init__(self),
self.seg = seg
self.items = []
def visitItem(self, seg):
if Envelope.static_intersects(seg.p0, seg.p1, self.seg.p0, self.seg.p1):
self.items.append(seg)
class LineSegmentIndex():
"""
"""
def __init__(self):
self.index = Quadtree()
def add(self, line):
for seg in line.segs:
self.addSegment(seg)
def addSegment(self, seg):
env = Envelope(seg.p0, seg.p1)
self.index.insert(env, seg)
def remove(self, seg):
env = Envelope(seg.p0, seg.p1)
self.index.remove(env, seg)
def query(self, seg):
env = Envelope(seg.p0, seg.p1)
visitor = LineSegmentVisitor(seg)
self.index.visit(env, visitor)
# LineSegment
return visitor.items
class TaggedLineSegment(LineSegment):
"""
* A geom.LineSegment which is tagged with its location in a geom.Geometry.
*
* Used to index the segments in a geometry and recover the segment locations
* from the index.
"""
def __init__(self, p0, p1=None, parent=None, index: int=0):
if p1 is None:
# using another TaggedLineSegment
p0, p1, parent, index = p0.p0, p0.p1, p0.parent, p0.index
LineSegment.__init__(self, p0, p1)
self.parent = parent
self.index = index
class TaggedLineString():
"""
* Contains and owns a list of TaggedLineSegments
"""
def __init__(self, parent, minimumSize: int=2) -> None:
# Linestring
self.parent = parent
self.minimumSize = minimumSize
# TaggedLineSegments
self.segs = []
self.result = []
self.init()
def init(self) -> None:
coords = self.parent.coords
if len(coords) > 0:
for i in range(len(coords) - 1):
seg = TaggedLineSegment(coords[i], coords[i + 1], self.parent, i)
self.segs.append(seg)
@property
def resultCoordinates(self):
coords = self.extractCoordinates(self.result)
return self.parent._factory.coordinateSequenceFactory.create(coords)
def asLineString(self):
return self.parent._factory.createLineString(self.resultCoordinates)
def asLinearRing(self):
return self.parent._factory.createLinearRing(self.resultCoordinates)
@property
def resultSize(self) -> int:
res = len(self.result)
if res > 0:
res += 1
return res
def addToResult(self, seg):
self.result.append(seg)
def extractCoordinates(self, segs):
coords = [seg.p0 for seg in segs]
coords.append(segs[-1].p1)
return coords
class TaggedLineStringSimplifier():
"""
* Simplifies a TaggedLineString, preserving topology
* (in the sense that no new intersections are introduced).
* Uses the recursive Douglas-Peucker algorithm.
"""
def __init__(self, inputIndex, outputIndex) -> None:
self.inputIndex = inputIndex
self.outputIndex = outputIndex
self.li = LineIntersector()
# TaggedLineString
self.line = None
self.coords = None
self.tolerance = 0
def simplify(self, line) -> None:
"""
* Simplifies the given {@link TaggedLineString}
* using the distance tolerance specified.
*
* @param line the linestring to simplify
"""
self.line = line
self.coords = line.parent.coords
if len(self.coords) == 0:
logger.warning("TaggedLineStringSimplifier.simplify parent.coords == 0")
return
self.simplifySection(0, len(self.coords) - 1, 0)
logger.debug("TaggedLineStringSimplifier.simplify segs:%s result:%s", len(self.line.segs), self.line.resultSize)
def simplifySection(self, i: int, j: int, depth: int) -> None:
depth += 1
sectionIndex = [0, 0]
if i + 1 == j:
self.line.addToResult(self.line.segs[i])
# leave this segment in the input index, for efficiency
return
isValidToSimplify = True
"""
* Following logic ensures that there is enough points in the
* output line.
* If there is already more points than the minimum, there's
* nothing to check.
* Otherwise, if in the worst case there wouldn't be enough points,
* don't flatten this segment (which avoids the worst case scenario)
"""
if self.line.resultSize < self.line.minimumSize:
worstCaseSize = depth + 1
if worstCaseSize < self.line.minimumSize:
isValidToSimplify = False
furthestPtIndex, distance = self.findFurthestPoint(self.coords, i, j)
# flattening must be less than distanceTolerance
if distance > self.tolerance:
isValidToSimplify = False
candidateSeg = LineSegment(self.coords[i], self.coords[j])
sectionIndex[0] = i
sectionIndex[1] = j
if self.hasBadIntersection(self.line, sectionIndex, candidateSeg):
isValidToSimplify = False
if isValidToSimplify:
# TaggedLineSegment
newSeg = self.flatten(i, j)
self.line.addToResult(newSeg)
return
self.simplifySection(i, furthestPtIndex, depth)
self.simplifySection(furthestPtIndex, j, depth)
def findFurthestPoint(self, coords, i: int, j: int):
seg = LineSegment(coords[i], coords[j])
maxDist = -1.0
maxIndex = i
for k in range(i + 1, j):
midPt = coords[k]
distance = seg.distance(midPt)
if distance > maxDist:
maxDist = distance
maxIndex = k
return maxIndex, maxDist
def hasBadIntersection(self, parentLine, sectionIndex: list, candidateSeg) -> bool:
if self.hasBadOutputIntersection(candidateSeg):
return True
if self.hasBadInputIntersection(parentLine, sectionIndex, candidateSeg):
return True
return False
def hasBadInputIntersection(self, parentLine, sectionIndex: list, candidateSeg) -> bool:
querySegs = self.inputIndex.query(candidateSeg)
for seg in querySegs:
if self.hasInteriorIntersection(seg, candidateSeg):
if self.isInLineSection(parentLine, sectionIndex, seg):
continue
return True
return False
def hasBadOutputIntersection(self, candidateSeg) -> bool:
querySegs = self.outputIndex.query(candidateSeg)
for seg in querySegs:
if self.hasInteriorIntersection(seg, candidateSeg):
return True
return False
def hasInteriorIntersection(self, seg0, seg1) -> bool:
self.li.computeLinesIntersection(seg0.p0, seg0.p1, seg1.p0, seg1.p1)
return self.li.isInteriorIntersection
def flatten(self, start: int, end: int):
p0 = self.coords[start]
p1 = self.coords[end]
newSeg = TaggedLineSegment(p0, p1)
self.remove(self.line, start, end)
self.outputIndex.addSegment(newSeg)
return newSeg
def isInLineSection(self, parentLine, sectionIndex: list, seg) -> bool:
"""
* Tests whether a segment is in a section of a TaggedLineString
*
* @param line
* @param sectionIndex
* @param seg
* @return
"""
if seg.parent is not self.line.parent:
return False
segIndex = seg.index
if segIndex >= sectionIndex[0] and segIndex < sectionIndex[1]:
return True
return False
def remove(self, line, start: int, end: int) -> None:
"""
* Remove the segs in the section of the line
*
* @param line
* @param pts
* @param sectionStartIndex
* @param sectionEndIndex
"""
for i in range(start, end):
seg = line.segs[i]
self.inputIndex.remove(seg)
class TaggedLinesSimplifier():
def __init__(self):
# LineSegmentIndex
self.inputIndex = LineSegmentIndex()
self.outputIndex = LineSegmentIndex()
self.taggedlineSimplifier = TaggedLineStringSimplifier(self.inputIndex, self.outputIndex)
@property
def tolerance(self):
return self.taggedlineSimplifier.tolerance
@tolerance.setter
def tolerance(self, tolerance):
self.taggedlineSimplifier.tolerance = tolerance
def simplify(self, linestrings, start, end) -> None:
"""
* Simplify a set of {@link TaggedLineString}s
* @param linestrings set of TaggedLineString(s)
* @param start: start index
* @param end: end index
"""
for i in range(start, end):
self.inputIndex.add(linestrings[i])
for i in range(start, end):
self.taggedlineSimplifier.simplify(linestrings[i])
class LinesMap(dict):
def __init__(self):
dict.__init__(self)
def insert(self, geom, taggedLine):
tl = self.find(id(geom))
if tl is None:
self[id(geom)] = taggedLine
def find(self, geom):
return self.get(id(geom))
class TopologyPreservingSimplifier():
"""
* Simplifies a geometry, ensuring that
* the result is a valid geometry having the
* same dimension and number of components as the input.
*
* The simplification uses a maximum distance difference algorithm
* similar to the one used in the Douglas-Peucker algorithm.
*
* In particular, if the input is an areal geometry
* ( Polygon or MultiPolygon )
*
* - The result has the same number of exteriors and interiors (rings) as the input,
* in the same order
* - The result rings touch at <b>no more</b> than the number of touching point in the input
* (although they may touch at fewer points)
*
"""
def __init__(self, geom):
self.geom = geom
"""
* Sets the distance tolerance for the simplification.
*
* All vertices in the simplified geometry will be within this
* distance of the original geometry.
* The tolerance value must be non-negative. A tolerance value
* of zero is effectively a no-op.
*
* @param distanceTolerance the approximation tolerance to use
"""
self.tolerance = 0
self.lineSimplifier = TaggedLinesSimplifier()
@staticmethod
def simplify(geom, tolerance):
tps = TopologyPreservingSimplifier(geom)
tps.lineSimplifier.tolerance = tolerance
return tps.getResultGeometry()
def getResultGeometry(self):
if self.geom.is_empty:
return self.geom.clone()
linestringMap = LinesMap()
lsmbf = LineStringMapBuilderFilter(linestringMap)
self.geom.apply_ro(lsmbf)
linestrings = list(linestringMap.values())
logger.debug("TopologyPreservingSimplifier.getResultGeometry linestrings:%s", len(linestrings))
self.lineSimplifier.simplify(linestrings, 0, len(linestrings))
trans = LineStringTransformer(linestringMap)
return trans.transform(self.geom)
|
s-leger/archipack
|
pygeos/simplify.py
|
Python
|
gpl-3.0
| 20,253
|
[
"VisIt"
] |
e29930827cff6727d443f5060786094c4bf48eaca2b20cdbc9c22ecdebb085be
|
#
# @BEGIN LICENSE
#
# Psi4: an open-source quantum chemistry software package
#
# Copyright (c) 2007-2017 The Psi4 Developers.
#
# The copyrights for code used from other parties are included in
# the corresponding files.
#
# This file is part of Psi4.
#
# Psi4 is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, version 3.
#
# Psi4 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with Psi4; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @END LICENSE
#
import os
import numpy as np
from psi4 import core
from psi4.driver import p4util
from psi4.driver import qcdb
from psi4.driver.p4util import solvers
from .augmented_hessian import ah_iteration
from .. import proc_util
def print_iteration(mtype, niter, energy, de, orb_rms, ci_rms, nci, norb, stype):
core.print_out("%s %2d: % 18.12f % 1.4e %1.2e %1.2e %3d %3d %s\n" %
(mtype, niter, energy, de, orb_rms, ci_rms, nci, norb, stype))
def mcscf_solver(ref_wfn):
# Build CIWavefunction
core.prepare_options_for_module("DETCI")
ciwfn = core.CIWavefunction(ref_wfn)
# Hush a lot of CI output
ciwfn.set_print(0)
# Begin with a normal two-step
step_type = 'Initial CI'
total_step = core.Matrix("Total step", ciwfn.get_dimension('OA'), ciwfn.get_dimension('AV'))
start_orbs = ciwfn.get_orbitals("ROT").clone()
ciwfn.set_orbitals("ROT", start_orbs)
# Grab da options
mcscf_orb_grad_conv = core.get_option("DETCI", "MCSCF_R_CONVERGENCE")
mcscf_e_conv = core.get_option("DETCI", "MCSCF_E_CONVERGENCE")
mcscf_max_macroiteration = core.get_option("DETCI", "MCSCF_MAXITER")
mcscf_type = core.get_option("DETCI", "MCSCF_TYPE")
mcscf_d_file = core.get_option("DETCI", "CI_FILE_START") + 3
mcscf_nroots = core.get_option("DETCI", "NUM_ROOTS")
mcscf_wavefunction_type = core.get_option("DETCI", "WFN")
mcscf_ndet = ciwfn.ndet()
mcscf_nuclear_energy = ciwfn.molecule().nuclear_repulsion_energy()
mcscf_steplimit = core.get_option("DETCI", "MCSCF_MAX_ROT")
mcscf_rotate = core.get_option("DETCI", "MCSCF_ROTATE")
# DIIS info
mcscf_diis_start = core.get_option("DETCI", "MCSCF_DIIS_START")
mcscf_diis_freq = core.get_option("DETCI", "MCSCF_DIIS_FREQ")
mcscf_diis_error_type = core.get_option("DETCI", "MCSCF_DIIS_ERROR_TYPE")
mcscf_diis_max_vecs = core.get_option("DETCI", "MCSCF_DIIS_MAX_VECS")
# One-step info
mcscf_target_conv_type = core.get_option("DETCI", "MCSCF_ALGORITHM")
mcscf_so_start_grad = core.get_option("DETCI", "MCSCF_SO_START_GRAD")
mcscf_so_start_e = core.get_option("DETCI", "MCSCF_SO_START_E")
mcscf_current_step_type = 'Initial CI'
# Start with SCF energy and other params
scf_energy = core.get_variable("HF TOTAL ENERGY")
eold = scf_energy
norb_iter = 1
converged = False
ah_step = False
qc_step = False
approx_integrals_only = True
# Fake info to start with the inital diagonalization
ediff = 1.e-4
orb_grad_rms = 1.e-3
# Grab needed objects
diis_obj = solvers.DIIS(mcscf_diis_max_vecs)
mcscf_obj = ciwfn.mcscf_object()
# Execute the rotate command
for rot in mcscf_rotate:
if len(rot) != 4:
raise p4util.PsiException("Each element of the MCSCF rotate command requires 4 arguements (irrep, orb1, orb2, theta).")
irrep, orb1, orb2, theta = rot
if irrep > ciwfn.Ca().nirrep():
raise p4util.PsiException("MCSCF_ROTATE: Expression %s irrep number is larger than the number of irreps" %
(str(rot)))
if max(orb1, orb2) > ciwfn.Ca().coldim()[irrep]:
raise p4util.PsiException("MCSCF_ROTATE: Expression %s orbital number exceeds number of orbitals in irrep" %
(str(rot)))
theta = np.deg2rad(theta)
x = ciwfn.Ca().nph[irrep][:, orb1].copy()
y = ciwfn.Ca().nph[irrep][:, orb2].copy()
xp = np.cos(theta) * x - np.sin(theta) * y
yp = np.sin(theta) * x + np.cos(theta) * y
ciwfn.Ca().nph[irrep][:, orb1] = xp
ciwfn.Ca().nph[irrep][:, orb2] = yp
# Limited RAS functionality
if core.get_local_option("DETCI", "WFN") == "RASSCF" and mcscf_target_conv_type != "TS":
core.print_out("\n Warning! Only the TS algorithm for RASSCF wavefunction is currently supported.\n")
core.print_out(" Switching to the TS algorithm.\n\n")
mcscf_target_conv_type = "TS"
# Print out headers
if mcscf_type == "CONV":
mtype = " @MCSCF"
core.print_out("\n ==> Starting MCSCF iterations <==\n\n")
core.print_out(" Iter Total Energy Delta E Orb RMS CI RMS NCI NORB\n")
elif mcscf_type == "DF":
mtype = " @DF-MCSCF"
core.print_out("\n ==> Starting DF-MCSCF iterations <==\n\n")
core.print_out(" Iter Total Energy Delta E Orb RMS CI RMS NCI NORB\n")
else:
mtype = " @AO-MCSCF"
core.print_out("\n ==> Starting AO-MCSCF iterations <==\n\n")
core.print_out(" Iter Total Energy Delta E Orb RMS CI RMS NCI NORB\n")
# Iterate !
for mcscf_iter in range(1, mcscf_max_macroiteration + 1):
# Transform integrals, diagonalize H
ciwfn.transform_mcscf_integrals(approx_integrals_only)
nci_iter = ciwfn.diag_h(abs(ediff) * 1.e-2, orb_grad_rms * 1.e-3)
# After the first diag we need to switch to READ
ciwfn.set_ci_guess("DFILE")
ciwfn.form_opdm()
ciwfn.form_tpdm()
ci_grad_rms = core.get_variable("DETCI AVG DVEC NORM")
# Update MCSCF object
Cocc = ciwfn.get_orbitals("DOCC")
Cact = ciwfn.get_orbitals("ACT")
Cvir = ciwfn.get_orbitals("VIR")
opdm = ciwfn.get_opdm(-1, -1, "SUM", False)
tpdm = ciwfn.get_tpdm("SUM", True)
mcscf_obj.update(Cocc, Cact, Cvir, opdm, tpdm)
current_energy = core.get_variable("MCSCF TOTAL ENERGY")
orb_grad_rms = mcscf_obj.gradient_rms()
ediff = current_energy - eold
# Print iterations
print_iteration(mtype, mcscf_iter, current_energy, ediff, orb_grad_rms, ci_grad_rms,
nci_iter, norb_iter, mcscf_current_step_type)
eold = current_energy
if mcscf_current_step_type == 'Initial CI':
mcscf_current_step_type = 'TS'
# Check convergence
if (orb_grad_rms < mcscf_orb_grad_conv) and (abs(ediff) < abs(mcscf_e_conv)) and\
(mcscf_iter > 3) and not qc_step:
core.print_out("\n %s has converged!\n\n" % mtype);
converged = True
break
# Which orbital convergence are we doing?
if ah_step:
converged, norb_iter, step = ah_iteration(mcscf_obj, print_micro=False)
norb_iter += 1
if converged:
mcscf_current_step_type = 'AH'
else:
core.print_out(" !Warning. Augmented Hessian did not converge. Taking an approx step.\n")
step = mcscf_obj.approx_solve()
mcscf_current_step_type = 'TS, AH failure'
else:
step = mcscf_obj.approx_solve()
step_type = 'TS'
maxstep = step.absmax()
if maxstep > mcscf_steplimit:
core.print_out(' Warning! Maxstep = %4.2f, scaling to %4.2f\n' % (maxstep, mcscf_steplimit))
step.scale(mcscf_steplimit / maxstep)
xstep = total_step.clone()
total_step.add(step)
# Do or add DIIS
if (mcscf_iter >= mcscf_diis_start) and ("TS" in mcscf_current_step_type):
# Figure out DIIS error vector
if mcscf_diis_error_type == "GRAD":
error = core.Matrix.triplet(ciwfn.get_orbitals("OA"),
mcscf_obj.gradient(),
ciwfn.get_orbitals("AV"),
False, False, True)
else:
error = step
diis_obj.add(total_step, error)
if not (mcscf_iter % mcscf_diis_freq):
total_step = diis_obj.extrapolate()
mcscf_current_step_type = 'TS, DIIS'
# Build the rotation by continuous updates
if mcscf_iter == 1:
totalU = mcscf_obj.form_rotation_matrix(total_step)
else:
xstep.axpy(-1.0, total_step)
xstep.scale(-1.0)
Ustep = mcscf_obj.form_rotation_matrix(xstep)
totalU = core.Matrix.doublet(totalU, Ustep, False, False)
# Build the rotation directly (not recommended)
# orbs_mat = mcscf_obj.Ck(start_orbs, total_step)
# Finally rotate and set orbitals
orbs_mat = core.Matrix.doublet(start_orbs, totalU, False, False)
ciwfn.set_orbitals("ROT", orbs_mat)
# Figure out what the next step should be
if (orb_grad_rms < mcscf_so_start_grad) and (abs(ediff) < abs(mcscf_so_start_e)) and\
(mcscf_iter >= 2):
if mcscf_target_conv_type == 'AH':
approx_integrals_only = False
ah_step = True
elif mcscf_target_conv_type == 'OS':
approx_integrals_only = False
mcscf_current_step_type = 'OS, Prep'
break
else:
continue
#raise p4util.PsiException("")
# If we converged do not do onestep
if converged or (mcscf_target_conv_type != 'OS'):
one_step_iters = []
# If we are not converged load in Dvec and build iters array
else:
one_step_iters = range(mcscf_iter + 1, mcscf_max_macroiteration + 1)
dvec = ciwfn.D_vector()
dvec.init_io_files(True)
dvec.read(0, 0)
dvec.symnormalize(1.0, 0)
ci_grad = ciwfn.new_civector(1, mcscf_d_file + 1, True, True)
ci_grad.set_nvec(1)
ci_grad.init_io_files(True)
# Loop for onestep
for mcscf_iter in one_step_iters:
# Transform integrals and update the MCSCF object
ciwfn.transform_mcscf_integrals(ciwfn.H(), False)
ciwfn.form_opdm()
ciwfn.form_tpdm()
# Update MCSCF object
Cocc = ciwfn.get_orbitals("DOCC")
Cact = ciwfn.get_orbitals("ACT")
Cvir = ciwfn.get_orbitals("VIR")
opdm = ciwfn.get_opdm(-1, -1, "SUM", False)
tpdm = ciwfn.get_tpdm("SUM", True)
mcscf_obj.update(Cocc, Cact, Cvir, opdm, tpdm)
orb_grad_rms = mcscf_obj.gradient_rms()
# Warning! Does not work for SA-MCSCF
current_energy = mcscf_obj.current_total_energy()
current_energy += mcscf_nuclear_energy
core.set_variable("CI ROOT %d TOTAL ENERGY" % 1, current_energy)
core.set_variable("CURRENT ENERGY", current_energy)
docc_energy = mcscf_obj.current_docc_energy()
ci_energy = mcscf_obj.current_ci_energy()
# Compute CI gradient
ciwfn.sigma(dvec, ci_grad, 0, 0)
ci_grad.scale(2.0, 0)
ci_grad.axpy(-2.0 * ci_energy, dvec, 0, 0)
ci_grad_rms = ci_grad.norm(0)
orb_grad_rms = mcscf_obj.gradient().rms()
ediff = current_energy - eold
print_iteration(mtype, mcscf_iter, current_energy, ediff, orb_grad_rms, ci_grad_rms,
nci_iter, norb_iter, mcscf_current_step_type)
mcscf_current_step_type = 'OS'
eold = current_energy
if (orb_grad_rms < mcscf_orb_grad_conv) and (abs(ediff) < abs(mcscf_e_conv)):
core.print_out("\n %s has converged!\n\n" % mtype);
converged = True
break
# Take a step
converged, norb_iter, nci_iter, step = qc_iteration(dvec, ci_grad, ciwfn, mcscf_obj)
# Rotate integrals to new frame
total_step.add(step)
orbs_mat = mcscf_obj.Ck(ciwfn.get_orbitals("ROT"), step)
ciwfn.set_orbitals("ROT", orbs_mat)
core.print_out(mtype + " Final Energy: %20.15f\n" % current_energy)
# Die if we did not converge
if (not converged):
if core.get_global_option("DIE_IF_NOT_CONVERGED"):
raise p4util.PsiException("MCSCF: Iterations did not converge!")
else:
core.print_out("\nWarning! MCSCF iterations did not converge!\n\n")
# Print out CI vector information
if mcscf_target_conv_type == 'OS':
dvec.close_io_files()
ci_grad.close_io_files()
# For orbital invariant methods we transform the orbitals to the natural or
# semicanonical basis. Frozen doubly occupied and virtual orbitals are not
# modified.
if core.get_option("DETCI", "WFN") == "CASSCF":
# Do we diagonalize the opdm?
if core.get_option("DETCI", "NAT_ORBS"):
ciwfn.ci_nat_orbs()
else:
ciwfn.semicanonical_orbs()
# Retransform intragrals and update CI coeffs., OPDM, and TPDM
ciwfn.transform_mcscf_integrals(approx_integrals_only)
nci_iter = ciwfn.diag_h(abs(ediff) * 1.e-2, orb_grad_rms * 1.e-3)
ciwfn.set_ci_guess("DFILE")
ciwfn.form_opdm()
ciwfn.form_tpdm()
proc_util.print_ci_results(ciwfn, "MCSCF", scf_energy, current_energy, print_opdm_no=True)
# Set final energy
core.set_variable("CURRENT ENERGY", core.get_variable("MCSCF TOTAL ENERGY"))
# What do we need to cleanup?
if core.get_option("DETCI", "MCSCF_CI_CLEANUP"):
ciwfn.cleanup_ci()
if core.get_option("DETCI", "MCSCF_DPD_CLEANUP"):
ciwfn.cleanup_dpd()
del diis_obj
del mcscf_obj
return ciwfn
|
jH0ward/psi4
|
psi4/driver/procrouting/mcscf/mcscf_solver.py
|
Python
|
lgpl-3.0
| 14,155
|
[
"Psi4"
] |
f8a425e3ae4a2442245ac2b2945f8eb8831be03f63a5552cea7e2f1d1012db9b
|
import bpy, sys, os, time, tempfile
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
from splinter import Browser
bl_info = {
"name": "sheepit",
"description": "Addon for uploading your project to sheepIt",
"author": "maximmaxim345",
"version": (0, 1),
"blender": (2, 78, 0),
"location": "Propertys > Render",
"warning": "Early alpha",
"support": "TESTING",
"category": "Render"
}
class SheepitPropertys(bpy.types.PropertyGroup):
version_options = [
("blender278c", "2.78c", '', 'BLENDER', 0),
("blender278c-filmic", "2.78c Filmic", '', 'BLENDER', 1),
("blender277a", "2.77a", '', 'BLENDER', 2),
("blender276b", "2.76b", '', 'BLENDER', 3),
("blender275a", "2.75a", '', 'BLENDER', 4),
("blender274", "2.74", '', 'BLENDER', 5),
("blender273a", "2.73a", '', 'BLENDER', 6),
("blender272b", "2.72b", '', 'BLENDER', 7),
("blender271", "2.71", '', 'BLENDER', 8),
("blender270a", "2.70a", '', 'BLENDER', 9),
("blender269", "2.69", '', 'BLENDER', 10),
("blender268a", "2.68a", '', 'BLENDER', 11),
("blender267b", "2.67b", '', 'BLENDER', 12),
("blender266a", "2.66a", '', 'BLENDER', 13),
("blender265a", "2.65a", '', 'BLENDER', 14)
]
Version = bpy.props.EnumProperty(
items=version_options,
description="Executable to use",
default="blender278c"
)
Renderable_by_all_members = bpy.props.BoolProperty(
name="renderable by all members",
description = "By default every members can render your project. If you want to restrict the access to your project do not check this box. On the project administration page will be able to modify this settings and add specific members to renderers",
default = True
)
RenderMode = bpy.props.EnumProperty(
name = "Rendering mode",
description = "Chose rendering mode",
items = [("singleframe", "Single Frame", "Render only one Frame"),
("animation", "Animation", "Render Animation")
]
)
stillSplitting = bpy.props.IntProperty(
min=4,
max=64,
default=4,
name="Split in tiles",
description="To increase the render time alowed by frame you can split each frame in tiles. Tiles will act as layers and be put in top of each other to create the final frame. You are allow of 25 min per tile."
)
animationSplitting = bpy.props.IntProperty(
min=1,
max=64,
default=1,
description="To increase the render time alowed by frame you can split each frame in tiles. Tiles will act as layers and be put in top of each other to create the final frame. You are allow of 25 min per tile."
)
sendProject=bpy.props.BoolProperty(
name="Send Project",
description="Send Project to the Renderfarm"
)
class sendProject(bpy.types.Operator):
bl_label = "Send Project"
bl_idname = "sheepit.send"
def invoke(self, context, event):
return context.window_manager.invoke_props_dialog(self)
def draw(self, context):
layout = self.layout
layout.label("Save file and upload to Sheepit.")
layout.label("(This could take a couple of minutes)")
def execute(self, context):
if not bpy.data.is_saved:
self.report({"ERROR"}, "Save your file first.")
return {"CANCELLED"}
if bpy.context.scene.camera==None:
self.report({"ERROR"}, "No camera on scene")
return {"CANCELLED"}
bpy.ops.wm.save_mainfile()
a = send(self)
return {a}
class editLogin(bpy.types.Operator):
bl_label = "Edit credentials"
bl_idname = "sheepit.editlogin"
signIn_login = bpy.props.StringProperty(
name="username",
description="Username"
)
signIn_password = bpy.props.StringProperty(
name="password",
description="Password",
subtype="PASSWORD"
)
def invoke(self, context, event):
return context.window_manager.invoke_props_dialog(self)
def execute(self, context):
self.report({'INFO'}, "saved")
saveLogin(self.signIn_login, self.signIn_password)
return {"FINISHED"}
def draw(self, context):
layout = self.layout
layout.prop(self, "signIn_login")
layout.prop(self, "signIn_password")
class sheepItAddon(bpy.types.Panel):
bl_label = "SheepIt! renderfarm"
bl_idname = "SHEEP_IT"
bl_space_type = 'PROPERTIES'
bl_region_type = 'WINDOW'
bl_context = "render"
def draw(self, context):
layout = self.layout
scene = context.scene
column = layout.column()
column.label(text="Settings:")
column.prop(scene.sheepIt, "Version")
column.prop(scene.sheepIt, "Renderable_by_all_members")
row = column.row()
row.prop(scene.sheepIt, "RenderMode", expand=True)
if scene.sheepIt.RenderMode == "singleframe":
column.prop(scene, "frame_current", text="Frame")
column.prop(scene.sheepIt, "stillSplitting", slider=True)
else:
column.prop(scene, "frame_start")
column.prop(scene, "frame_end")
column.prop(scene.sheepIt, "animationSplitting", slider=True)
layout.operator("sheepit.send")
class sheepIt_preferences(bpy.types.AddonPreferences):
bl_idname = __name__
def draw(self, context):
layout = self.layout
scene = context.scene
layout.operator("sheepIt.editlogin")
def saveLogin(username, password):
with open(os.path.join(os.path.dirname( __file__ ), '..', '..', "presets/sheepit.config"), "w") as f:
f.write(username + "\n" + password)
def getLogin():
try:
with open(os.path.join(os.path.dirname( __file__ ), '..', '..', "presets/sheepit.config"), "r") as f:
login = f.read().split("\n")
return login[0], login[1]
except FileNotFoundError:
return "failed", "fileNotFound"
def send(self):
usrname, passwd = getLogin()
if usrname=="failed":
if passwd=="fileNotFound":
self.report({"ERROR"}, "you must enter your account in first. You can find them in the user preferences under the addon")
return "CANCELLED"
pjsname = "phantomjs-win.exe"
pjspath = os.path.dirname(os.path.abspath(__file__))+ "\\" + pjsname
browser = Browser("phantomjs", executable_path=pjspath, service_log_path=tempfile.gettempdir() + "\\l.log")
browser.visit("https://www.sheepit-renderfarm.com/index.php")
Button1 = browser.find_by_css("button.navbar-toggle")
Button1.first.click()
time.sleep(1)
Button2 = browser.find_by_css("a.dropdown-toggle.dropdown-form-toggle")
Button2.first.click()
usernameField = browser.find_by_id("login-header_login")
usernameField.type(usrname)
passwordField = browser.find_by_id("login-header_password")
passwordField.type(passwd)
browser.screenshot(name="A", suffix='.png')
signInButton = browser.find_by_id("login-header_submit")
signInButton.click()
time.sleep(1)
browser.screenshot(name="B", suffix='.png')
if browser.is_element_present_by_id("login_login"):
self.report({"ERROR"}, "Check your credentials. You can find them in the user preferences under the addon")
return "CANCELLED"
browser.visit("https://www.sheepit-renderfarm.com/jobs.php?mode=add")
browser.screenshot(name="C", suffix='.png')
if browser.is_element_present_by_text("Your current limit is 2 projects."):
self.report({"ERROR"}, "You allready have 2 Projects")
return "CANCELLED"
browser.attach_file("addjob_archive", bpy.context.blend_data.filepath)
browser.screenshot(name="C", suffix='.png')
sendButton = browser.find_by_value("Send this file")
sendButton.first.click()
browser.screenshot(name="D", suffix='.png')
exeVersion = browser.find_by_id("addjob_exe")
exeVersion.first.select(bpy.context.scene.sheepIt.Version);
renderableByAll = browser.find_by_name("public_render")
if bpy.context.scene.sheepIt.Renderable_by_all_members:
renderableByAll.check()
else:
renderableByAll.uncheck()
browser.choose("addjob_change_type_0", bpy.context.scene.sheepIt.RenderMode)
if bpy.context.scene.sheepIt.RenderMode=="singleframe":
browser.execute_script("addjob_split_sample_range_value_0.value = " + str(bpy.context.scene.sheepIt.stillSplitting))
else:
browser.execute_script("addjob_split_animation_sample_range_value_0.value = " + str(bpy.context.scene.sheepIt.animationSplitting))
browser.screenshot(name="E", suffix='.png')
okButton = browser.find_by_id("addjob_submit_0")
okButton.first.click()
time.sleep(2)
browser.screenshot(name="F", suffix='.png')
self.report({'INFO'}, "uploaded")
browser.quit()
return "FINISHED"
def register():
bpy.utils.register_module(__name__)
bpy.types.Scene.sheepIt = bpy.props.PointerProperty(type=SheepitPropertys)
def unregister():
bpy.utils.unregister_module(__name__)
del bpy.types.Scene.sheepIt
if __name__ == "__main__":
register()
|
maximmaxim345/Sheep-it-blender-plugin
|
__init__.py
|
Python
|
gpl-3.0
| 8,315
|
[
"VisIt"
] |
b35b86c7840531ed1af8605867dcba7c9c4af3b1a84ccf14adb009e655cbd7a3
|
from DIRAC import gLogger
from DIRAC.Core.Tornado.Server.TornadoService import TornadoService
from DIRAC.DataManagementSystem.Service.FTS3ManagerHandler import FTS3ManagerHandlerMixin
sLog = gLogger.getSubLogger(__name__)
class TornadoFTS3ManagerHandler(FTS3ManagerHandlerMixin, TornadoService):
"""Tornado handler for the FTS3Manager"""
log = sLog
|
DIRACGrid/DIRAC
|
src/DIRAC/DataManagementSystem/Service/TornadoFTS3ManagerHandler.py
|
Python
|
gpl-3.0
| 361
|
[
"DIRAC"
] |
e64bbe6670e45a6562c22705900dfd31c33f9ebb3cfa3430ce1e22f2c280a76a
|
#!/usr/bin/env python
########################################################################
# $HeadURL$
# File : dirac-dms-add-file
# Author : Stuart Paterson
########################################################################
"""
Upload a file to the grid storage and register it in the File Catalog
"""
__RCSID__ = "$Id$"
from DIRAC.Core.Base import Script
from DIRAC import S_OK
import os
Script.setUsageMessage( '\n'.join( [ __doc__.split( '\n' )[1],
'Usage:',
' %s [option|cfgfile] ... LFN Path SE [GUID]' % Script.scriptName,
'Arguments:',
' LFN: Logical File Name',
' Path: Local path to the file',
' SE: DIRAC Storage Element',
' GUID: GUID to use in the registration (optional)' ,
'',
' ++ OR ++',
'',
'Usage:',
' %s [option|cfgfile] ... LocalFile' % Script.scriptName,
'Arguments:',
' LocalFile: Path to local file containing all the above, i.e.:',
' lfn1 localfile1 SE [GUID1]',
' lfn2 localfile2 SE [GUID2]'] )
)
overwrite = False
def setOverwrite( arg ):
global overwrite
overwrite = True
return S_OK()
Script.registerSwitch( "f", "force", "Force overwrite of existing file", setOverwrite )
Script.parseCommandLine( ignoreErrors = True )
args = Script.getPositionalArgs()
if len( args ) < 1 or len( args ) > 4:
Script.showHelp()
def getDict( item_list ):
"""
From the input list, populate the dictionary
"""
lfn_dict = {}
lfn_dict['lfn'] = item_list[0].replace( 'LFN:', '' ).replace( 'lfn:', '' )
lfn_dict['localfile'] = item_list[1]
lfn_dict['SE'] = item_list[2]
guid = None
if len( item_list ) > 3:
guid = item_list[3]
lfn_dict['guid'] = guid
return lfn_dict
lfns = []
if len( args ) == 1:
inputFileName = args[0]
if os.path.exists( inputFileName ):
inputFile = open( inputFileName, 'r' )
for line in inputFile:
line = line.rstrip()
items = line.split()
items[0] = items[0].replace( 'LFN:', '' ).replace( 'lfn:', '' )
lfns.append( getDict( items ) )
inputFile.close()
else:
lfns.append( getDict( args ) )
from DIRAC.DataManagementSystem.Client.DataManager import DataManager
from DIRAC import gLogger
import DIRAC
exitCode = 0
dm = DataManager()
for lfn in lfns:
if not os.path.exists( lfn['localfile'] ):
gLogger.error( "File %s must exist locally" % lfn['localfile'] )
exitCode = 1
continue
if not os.path.isfile( lfn['localfile'] ):
gLogger.error( "%s is not a file" % lfn['localfile'] )
exitCode = 2
continue
gLogger.notice( "\nUploading %s" % lfn['lfn'] )
res = dm.putAndRegister( lfn['lfn'], lfn['localfile'], lfn['SE'], lfn['guid'], overwrite = overwrite )
if not res['OK']:
exitCode = 3
gLogger.error( 'Error: failed to upload %s to %s' % ( lfn['lfn'], lfn['SE'] ) )
continue
else:
gLogger.notice( 'Successfully uploaded file to %s' % lfn['SE'] )
DIRAC.exit( exitCode )
|
Andrew-McNab-UK/DIRAC
|
DataManagementSystem/scripts/dirac-dms-add-file.py
|
Python
|
gpl-3.0
| 3,489
|
[
"DIRAC"
] |
a679714fb5f30187e9d9c636126431a4e445b44db12ed9fbac647b545a175aec
|
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""General utility functions."""
_INTERNAL = False # pylint: disable=g-statement-before-imports
import os.path
if not _INTERNAL:
import cv2 # pylint: disable=g-import-not-at-top
import numpy as np
import skimage
import skimage.morphology
import tensorflow as tf
from tensorflow_addons import image as tfa_image
from tensorflow_addons.utils import types as tfa_types
# Small number added to near-zero quantities to avoid numerical instability.
_EPS = 1e-7
def _gaussian_kernel(kernel_size, sigma, n_channels,
dtype):
x = tf.range(-kernel_size // 2 + 1, kernel_size // 2 + 1, dtype=dtype)
g = tf.math.exp(-(tf.pow(x, 2) / (2 * tf.pow(tf.cast(sigma, dtype), 2))))
g_norm2d = tf.pow(tf.reduce_sum(g), 2)
g_kernel = tf.tensordot(g, g, axes=0) / g_norm2d
g_kernel = tf.expand_dims(g_kernel, axis=-1)
return tf.expand_dims(tf.tile(g_kernel, (1, 1, n_channels)), axis=-1)
def apply_blur(im, sigma):
"""Applies a Gaussian blur to an image tensor."""
blur = _gaussian_kernel(21, sigma, im.shape[-1], im.dtype)
im = tf.nn.depthwise_conv2d(im, blur, [1, 1, 1, 1], 'SAME')
return im
def remove_flare(combined, flare, gamma = 2.2):
"""Subtracts flare from the image in linear space.
Args:
combined: gamma-encoded image of a flare-polluted scene.
flare: gamma-encoded image of the flare.
gamma: [value in linear domain] = [gamma-encoded value] ^ gamma.
Returns:
Gamma-encoded flare-free scene.
"""
# Avoid zero. Otherwise, the gradient of pow() below will be undefined when
# gamma < 1.
combined = tf.clip_by_value(combined, _EPS, 1.0)
flare = tf.clip_by_value(flare, _EPS, 1.0)
combined_linear = tf.pow(combined, gamma)
flare_linear = tf.pow(flare, gamma)
scene_linear = combined_linear - flare_linear
# Avoid zero. Otherwise, the gradient of pow() below will be undefined when
# gamma > 1.
scene_linear = tf.clip_by_value(scene_linear, _EPS, 1.0)
scene = tf.pow(scene_linear, 1.0 / gamma)
return scene
def quantize_8(image):
"""Converts and quantizes an image to 2^8 discrete levels in [0, 1]."""
q8 = tf.image.convert_image_dtype(image, tf.uint8, saturate=True)
return tf.cast(q8, tf.float32) * (1.0 / 255.0)
def write_image(image, path, overwrite = True):
"""Writes an image represented by a tensor to a PNG or JPG file."""
if not os.path.basename(path):
raise ValueError(f'The given path doesn\'t represent a file: {path}')
if tf.io.gfile.exists(path):
if tf.io.gfile.isdir(path):
raise ValueError(f'The given path is an existing directory: {path}')
if not overwrite:
print(f'Not overwriting an existing file at {path}')
return False
tf.io.gfile.remove(path)
else:
tf.io.gfile.makedirs(os.path.dirname(path))
image_u8 = tf.image.convert_image_dtype(image, tf.uint8, saturate=True)
if path.lower().endswith('.png'):
encoded = tf.io.encode_png(image_u8)
elif path.lower().endswith('.jpg') or path.lower().endswith('.jpeg'):
encoded = tf.io.encode_jpeg(image_u8, progressive=True)
else:
raise ValueError(f'Unsupported image format: {os.path.basename(path)}')
with tf.io.gfile.GFile(path, 'wb') as f:
f.write(encoded.numpy())
return True
def _center_transform(t, height, width):
"""Modifies a homography such that the origin is at the image center.
The transform matrices are represented using 8-vectors, following the
`tensorflow_addons,image` package.
Args:
t: A [8]- or [B, 8]-tensor representing projective transform(s) defined
relative to the origin (0, 0).
height: Image height, in pixels.
width: Image width, in pixels.
Returns:
The same transform(s), but applied relative to the image center (width / 2,
height / 2) instead.
"""
center_to_origin = tfa_image.translations_to_projective_transforms(
[-width / 2, -height / 2])
origin_to_center = tfa_image.translations_to_projective_transforms(
[width / 2, height / 2])
t = tfa_image.compose_transforms([center_to_origin, t, origin_to_center])
return t
def scales_to_projective_transforms(scales, height,
width):
"""Returns scaling transform matrices for a batched input.
The scaling is applied relative to the image center, instead of (0, 0).
Args:
scales: 2-element tensor [sx, sy], or a [B, 2]-tensor reprenting a batch of
such inputs. `sx` and `sy` are the scaling ratio in x and y respectively.
height: Image height, in pixels.
width: Image width, in pixels.
Returns:
A [B, 8]-tensor representing the transform that can be passed to
`tensorflow_addons.image.transform`.
"""
scales = tf.convert_to_tensor(scales)
if tf.rank(scales) == 1:
scales = scales[None, :]
scales_x = tf.reshape(scales[:, 0], (-1, 1))
scales_y = tf.reshape(scales[:, 1], (-1, 1))
zeros = tf.zeros_like(scales_x)
transform = tf.concat(
[scales_x, zeros, zeros, zeros, scales_y, zeros, zeros, zeros], axis=-1)
return _center_transform(transform, height, width)
def shears_to_projective_transforms(shears, height,
width):
"""Returns shear transform matrices for a batched input.
The shear is applied relative to the image center, instead of (0, 0).
Args:
shears: 2-element tensor [sx, sy], or a [B, 2]-tensor reprenting a batch of
such inputs. `sx` and `sy` are the shear angle (in radians) in x and y
respectively.
height: Image height, in pixels.
width: Image width, in pixels.
Returns:
A [B, 8]-tensor representing the transform that can be passed to
`tensorflow_addons.image.transform`.
"""
shears = tf.convert_to_tensor(shears)
if tf.rank(shears) == 1:
shears = shears[None, :]
shears_x = tf.reshape(tf.tan(shears[:, 0]), (-1, 1))
shears_y = tf.reshape(tf.tan(shears[:, 1]), (-1, 1))
ones = tf.ones_like(shears_x)
zeros = tf.zeros_like(shears_x)
transform = tf.concat(
[ones, shears_x, zeros, shears_y, ones, zeros, zeros, zeros], axis=-1)
return _center_transform(transform, height, width)
def apply_affine_transform(image,
rotation = 0.,
shift_x = 0.,
shift_y = 0.,
shear_x = 0.,
shear_y = 0.,
scale_x = 1.,
scale_y = 1.,
interpolation = 'bilinear'):
"""Applies affine transform(s) on the input images.
The rotation, shear, and scaling transforms are applied relative to the image
center, instead of (0, 0). The transform parameters can either be scalars
(applied to all images in the batch) or [B]-tensors (applied to each image
individually).
Args:
image: Input images in [B, H, W, C] format.
rotation: Rotation angle in radians. Positive value rotates the image
counter-clockwise.
shift_x: Translation in x direction, in pixels.
shift_y: Translation in y direction, in pixels.
shear_x: Shear angle (radians) in x direction.
shear_y: Shear angle (radians) in y direction.
scale_x: Scaling factor in x direction.
scale_y: Scaling factor in y direction.
interpolation: Interpolation mode. Supported values: 'nearest', 'bilinear'.
Returns:
The transformed images in [B, H, W, C] format.
"""
height, width = image.shape[1:3]
rotation = tfa_image.angles_to_projective_transforms(rotation, height, width)
shear = shears_to_projective_transforms([shear_x, shear_y], height, width)
scaling = scales_to_projective_transforms([scale_x, scale_y], height, width)
translation = tfa_image.translations_to_projective_transforms(
[shift_x, shift_y])
t = tfa_image.compose_transforms([rotation, shear, scaling, translation])
transformed = tfa_image.transform(image, t, interpolation=interpolation)
return transformed
def get_highlight_mask(im,
threshold = 0.99,
dtype = tf.float32):
"""Returns a binary mask indicating the saturated regions in the input image.
Args:
im: Image tensor with shape [H, W, C], or [B, H, W, C].
threshold: A pixel is considered saturated if its channel-averaged intensity
is above this value.
dtype: Expected output data type.
Returns:
A `dtype` tensor with shape [H, W, 1] or [B, H, W, 1].
"""
binary_mask = tf.reduce_mean(im, axis=-1, keepdims=True) > threshold
mask = tf.cast(binary_mask, dtype)
return mask
def refine_mask(mask, morph_size = 0.01):
"""Refines a mask by applying mophological operations.
Args:
mask: A float array of shape [H, W] or [B, H, W].
morph_size: Size of the morphological kernel relative to the long side of
the image.
Returns:
Refined mask of shape [H, W] or [B, H, W].
"""
mask_size = max(np.shape(mask))
kernel_radius = .5 * morph_size * mask_size
kernel = skimage.morphology.disk(np.ceil(kernel_radius))
opened = skimage.morphology.binary_opening(mask, kernel)
return opened
def _create_disk_kernel(kernel_size):
x = np.arange(kernel_size) - (kernel_size - 1) / 2
xx, yy = np.meshgrid(x, x)
rr = np.sqrt(xx**2 + yy**2)
kernel = np.float32(rr <= np.max(x)) + _EPS
kernel = kernel / np.sum(kernel)
return kernel
def blend_light_source(scene_input, scene_pred):
"""Adds suspected light source in the input to the flare-free image."""
binary_mask = get_highlight_mask(scene_input, dtype=tf.bool).numpy()
binary_mask = np.squeeze(binary_mask, axis=-1)
binary_mask = refine_mask(binary_mask)
labeled = skimage.measure.label(binary_mask)
properties = skimage.measure.regionprops(labeled)
max_diameter = 0
for p in properties:
max_diameter = max(max_diameter, p['equivalent_diameter'])
mask = np.float32(binary_mask)
kernel_size = round(1.5 * max_diameter)
if kernel_size > 0:
kernel = _create_disk_kernel(kernel_size)
mask = cv2.filter2D(mask, -1, kernel)
mask = np.clip(mask * 3.0, 0.0, 1.0)
mask_rgb = np.stack([mask] * 3, axis=-1)
else:
mask_rgb = 0
blend = scene_input * mask_rgb + scene_pred * (1 - mask_rgb)
return blend
def normalize_white_balance(im):
"""Normalizes the RGB channels so the image appears neutral in color.
Args:
im: Image tensor with shape [H, W, C], or [B, H, W, C].
Returns:
Image(s) with equal channel mean. (The channel mean may be different across
images for batched input.)
"""
channel_mean = tf.reduce_mean(im, axis=(-3, -2), keepdims=True)
max_of_mean = tf.reduce_max(channel_mean, axis=(-3, -2, -1), keepdims=True)
normalized = max_of_mean * im / (channel_mean + _EPS)
return normalized
def remove_background(im):
"""Removes the DC component in the background.
Args:
im: Image tensor with shape [H, W, C], or [B, H, W, C].
Returns:
Image(s) with DC background removed. The white level (maximum pixel value)
stays the same.
"""
im_min = tf.reduce_min(im, axis=(-3, -2), keepdims=True)
im_max = tf.reduce_max(im, axis=(-3, -2), keepdims=True)
return (im - im_min) * im_max / (im_max - im_min + _EPS)
|
google-research/google-research
|
flare_removal/python/utils.py
|
Python
|
apache-2.0
| 11,734
|
[
"Gaussian"
] |
53c81dc3f5519a0cd8bd6be80c3e31c0c1cc00df73d0a31f16ec83edd56be040
|
# -*- coding: utf-8 -*-
# Copyright (c) 2006-2007, 2009-2014 LOGILAB S.A. (Paris, FRANCE) <contact@logilab.fr>
# Copyright (c) 2009 Mads Kiilerich <mads@kiilerich.com>
# Copyright (c) 2010 Daniel Harding <dharding@gmail.com>
# Copyright (c) 2012-2014 Google, Inc.
# Copyright (c) 2012 FELD Boris <lothiraldan@gmail.com>
# Copyright (c) 2013-2018 Claudiu Popa <pcmanticore@gmail.com>
# Copyright (c) 2014 Brett Cannon <brett@python.org>
# Copyright (c) 2014 Ricardo Gemignani <ricardo.gemignani@gmail.com>
# Copyright (c) 2014 Arun Persaud <arun@nubati.net>
# Copyright (c) 2015 Dmitry Pribysh <dmand@yandex.ru>
# Copyright (c) 2015 Florian Bruhin <me@the-compiler.org>
# Copyright (c) 2015 Radu Ciorba <radu@devrandom.ro>
# Copyright (c) 2015 Ionel Cristian Maries <contact@ionelmc.ro>
# Copyright (c) 2016, 2018 Ashley Whetter <ashley@awhetter.co.uk>
# Copyright (c) 2016-2017 Łukasz Rogalski <rogalski.91@gmail.com>
# Copyright (c) 2016-2017 Moises Lopez <moylop260@vauxoo.com>
# Copyright (c) 2016 Brian C. Lane <bcl@redhat.com>
# Copyright (c) 2017-2018 hippo91 <guillaume.peillex@gmail.com>
# Copyright (c) 2017 ttenhoeve-aa <ttenhoeve@appannie.com>
# Copyright (c) 2018 Bryce Guinta <bryce.guinta@protonmail.com>
# Copyright (c) 2018 Bryce Guinta <bryce.paul.guinta@gmail.com>
# Copyright (c) 2018 Ville Skyttä <ville.skytta@upcloud.com>
# Copyright (c) 2018 Brian Shaginaw <brian.shaginaw@warbyparker.com>
# Copyright (c) 2018 Caio Carrara <ccarrara@redhat.com>
# Licensed under the GPL: https://www.gnu.org/licenses/old-licenses/gpl-2.0.html
# For details: https://github.com/PyCQA/pylint/blob/master/COPYING
"""some functions that may be useful for various checkers
"""
import builtins
from functools import lru_cache, partial
import itertools
import numbers
import re
import sys
import string
from typing import Optional, Iterable, Tuple, Callable, Set, Union, Match, Dict, List
import _string # pylint: disable=wrong-import-position, wrong-import-order
import astroid
from astroid.exceptions import _NonDeducibleTypeHierarchy
from astroid import bases as _bases
from astroid import scoped_nodes
BUILTINS_NAME = builtins.__name__
COMP_NODE_TYPES = (
astroid.ListComp,
astroid.SetComp,
astroid.DictComp,
astroid.GeneratorExp,
)
PY3K = sys.version_info[0] == 3
if not PY3K:
EXCEPTIONS_MODULE = "exceptions"
else:
EXCEPTIONS_MODULE = "builtins"
ABC_METHODS = {
"abc.abstractproperty",
"abc.abstractmethod",
"abc.abstractclassmethod",
"abc.abstractstaticmethod",
}
ITER_METHOD = "__iter__"
AITER_METHOD = "__aiter__"
NEXT_METHOD = "__next__"
GETITEM_METHOD = "__getitem__"
CLASS_GETITEM_METHOD = "__class_getitem__"
SETITEM_METHOD = "__setitem__"
DELITEM_METHOD = "__delitem__"
CONTAINS_METHOD = "__contains__"
KEYS_METHOD = "keys"
# Dictionary which maps the number of expected parameters a
# special method can have to a set of special methods.
# The following keys are used to denote the parameters restrictions:
#
# * None: variable number of parameters
# * number: exactly that number of parameters
# * tuple: this are the odd ones. Basically it means that the function
# can work with any number of arguments from that tuple,
# although it's best to implement it in order to accept
# all of them.
_SPECIAL_METHODS_PARAMS = {
None: ("__new__", "__init__", "__call__"),
0: (
"__del__",
"__repr__",
"__str__",
"__bytes__",
"__hash__",
"__bool__",
"__dir__",
"__len__",
"__length_hint__",
"__iter__",
"__reversed__",
"__neg__",
"__pos__",
"__abs__",
"__invert__",
"__complex__",
"__int__",
"__float__",
"__neg__",
"__pos__",
"__abs__",
"__complex__",
"__int__",
"__float__",
"__index__",
"__enter__",
"__aenter__",
"__getnewargs_ex__",
"__getnewargs__",
"__getstate__",
"__reduce__",
"__copy__",
"__unicode__",
"__nonzero__",
"__await__",
"__aiter__",
"__anext__",
"__fspath__",
),
1: (
"__format__",
"__lt__",
"__le__",
"__eq__",
"__ne__",
"__gt__",
"__ge__",
"__getattr__",
"__getattribute__",
"__delattr__",
"__delete__",
"__instancecheck__",
"__subclasscheck__",
"__getitem__",
"__missing__",
"__delitem__",
"__contains__",
"__add__",
"__sub__",
"__mul__",
"__truediv__",
"__floordiv__",
"__mod__",
"__divmod__",
"__lshift__",
"__rshift__",
"__and__",
"__xor__",
"__or__",
"__radd__",
"__rsub__",
"__rmul__",
"__rtruediv__",
"__rmod__",
"__rdivmod__",
"__rpow__",
"__rlshift__",
"__rrshift__",
"__rand__",
"__rxor__",
"__ror__",
"__iadd__",
"__isub__",
"__imul__",
"__itruediv__",
"__ifloordiv__",
"__imod__",
"__ilshift__",
"__irshift__",
"__iand__",
"__ixor__",
"__ior__",
"__ipow__",
"__setstate__",
"__reduce_ex__",
"__deepcopy__",
"__cmp__",
"__matmul__",
"__rmatmul__",
"__div__",
),
2: ("__setattr__", "__get__", "__set__", "__setitem__", "__set_name__"),
3: ("__exit__", "__aexit__"),
(0, 1): ("__round__",),
}
SPECIAL_METHODS_PARAMS = {
name: params
for params, methods in _SPECIAL_METHODS_PARAMS.items()
for name in methods # type: ignore
}
PYMETHODS = set(SPECIAL_METHODS_PARAMS)
class NoSuchArgumentError(Exception):
pass
def is_inside_except(node):
"""Returns true if node is inside the name of an except handler."""
current = node
while current and not isinstance(current.parent, astroid.ExceptHandler):
current = current.parent
return current and current is current.parent.name
def is_inside_lambda(node: astroid.node_classes.NodeNG) -> bool:
"""Return true if given node is inside lambda"""
parent = node.parent
while parent is not None:
if isinstance(parent, astroid.Lambda):
return True
parent = parent.parent
return False
def get_all_elements(
node: astroid.node_classes.NodeNG
) -> Iterable[astroid.node_classes.NodeNG]:
"""Recursively returns all atoms in nested lists and tuples."""
if isinstance(node, (astroid.Tuple, astroid.List)):
for child in node.elts:
for e in get_all_elements(child):
yield e
else:
yield node
def clobber_in_except(
node: astroid.node_classes.NodeNG
) -> Tuple[bool, Tuple[str, str]]:
"""Checks if an assignment node in an except handler clobbers an existing
variable.
Returns (True, args for W0623) if assignment clobbers an existing variable,
(False, None) otherwise.
"""
if isinstance(node, astroid.AssignAttr):
return True, (node.attrname, "object %r" % (node.expr.as_string(),))
if isinstance(node, astroid.AssignName):
name = node.name
if is_builtin(name):
return (True, (name, "builtins"))
stmts = node.lookup(name)[1]
if stmts and not isinstance(
stmts[0].assign_type(),
(astroid.Assign, astroid.AugAssign, astroid.ExceptHandler),
):
return True, (name, "outer scope (line %s)" % stmts[0].fromlineno)
return False, None
def is_super(node: astroid.node_classes.NodeNG) -> bool:
"""return True if the node is referencing the "super" builtin function
"""
if getattr(node, "name", None) == "super" and node.root().name == BUILTINS_NAME:
return True
return False
def is_error(node: astroid.node_classes.NodeNG) -> bool:
"""return true if the function does nothing but raising an exception"""
for child_node in node.get_children():
if isinstance(child_node, astroid.Raise):
return True
return False
builtins = builtins.__dict__.copy() # type: ignore
SPECIAL_BUILTINS = ("__builtins__",) # '__path__', '__file__')
def is_builtin_object(node: astroid.node_classes.NodeNG) -> bool:
"""Returns True if the given node is an object from the __builtin__ module."""
return node and node.root().name == BUILTINS_NAME
def is_builtin(name: str) -> bool:
"""return true if <name> could be considered as a builtin defined by python
"""
return name in builtins or name in SPECIAL_BUILTINS # type: ignore
def is_defined_in_scope(
var_node: astroid.node_classes.NodeNG,
varname: str,
scope: astroid.node_classes.NodeNG,
) -> bool:
if isinstance(scope, astroid.If):
for node in scope.body:
if (
isinstance(node, astroid.Assign)
and any(
isinstance(target, astroid.AssignName) and target.name == varname
for target in node.targets
)
) or (isinstance(node, astroid.Nonlocal) and varname in node.names):
return True
elif isinstance(scope, (COMP_NODE_TYPES, astroid.For)):
for ass_node in scope.nodes_of_class(astroid.AssignName):
if ass_node.name == varname:
return True
elif isinstance(scope, astroid.With):
for expr, ids in scope.items:
if expr.parent_of(var_node):
break
if ids and isinstance(ids, astroid.AssignName) and ids.name == varname:
return True
elif isinstance(scope, (astroid.Lambda, astroid.FunctionDef)):
if scope.args.is_argument(varname):
# If the name is found inside a default value
# of a function, then let the search continue
# in the parent's tree.
if scope.args.parent_of(var_node):
try:
scope.args.default_value(varname)
scope = scope.parent
is_defined_in_scope(var_node, varname, scope)
except astroid.NoDefault:
pass
return True
if getattr(scope, "name", None) == varname:
return True
elif isinstance(scope, astroid.ExceptHandler):
if isinstance(scope.name, astroid.AssignName):
ass_node = scope.name
if ass_node.name == varname:
return True
return False
def is_defined_before(var_node: astroid.node_classes.NodeNG) -> bool:
"""return True if the variable node is defined by a parent node (list,
set, dict, or generator comprehension, lambda) or in a previous sibling
node on the same line (statement_defining ; statement_using)
"""
varname = var_node.name
_node = var_node.parent
while _node:
if is_defined_in_scope(var_node, varname, _node):
return True
_node = _node.parent
# possibly multiple statements on the same line using semi colon separator
stmt = var_node.statement()
_node = stmt.previous_sibling()
lineno = stmt.fromlineno
while _node and _node.fromlineno == lineno:
for assign_node in _node.nodes_of_class(astroid.AssignName):
if assign_node.name == varname:
return True
for imp_node in _node.nodes_of_class((astroid.ImportFrom, astroid.Import)):
if varname in [name[1] or name[0] for name in imp_node.names]:
return True
_node = _node.previous_sibling()
return False
def is_default_argument(node: astroid.node_classes.NodeNG) -> bool:
"""return true if the given Name node is used in function or lambda
default argument's value
"""
parent = node.scope()
if isinstance(parent, (astroid.FunctionDef, astroid.Lambda)):
for default_node in parent.args.defaults:
for default_name_node in default_node.nodes_of_class(astroid.Name):
if default_name_node is node:
return True
return False
def is_func_decorator(node: astroid.node_classes.NodeNG) -> bool:
"""return true if the name is used in function decorator"""
parent = node.parent
while parent is not None:
if isinstance(parent, astroid.Decorators):
return True
if parent.is_statement or isinstance(
parent,
(astroid.Lambda, scoped_nodes.ComprehensionScope, scoped_nodes.ListComp),
):
break
parent = parent.parent
return False
def is_ancestor_name(
frame: astroid.node_classes.NodeNG, node: astroid.node_classes.NodeNG
) -> bool:
"""return True if `frame` is an astroid.Class node with `node` in the
subtree of its bases attribute
"""
try:
bases = frame.bases
except AttributeError:
return False
for base in bases:
if node in base.nodes_of_class(astroid.Name):
return True
return False
def assign_parent(node: astroid.node_classes.NodeNG) -> astroid.node_classes.NodeNG:
"""return the higher parent which is not an AssignName, Tuple or List node
"""
while node and isinstance(node, (astroid.AssignName, astroid.Tuple, astroid.List)):
node = node.parent
return node
def overrides_a_method(class_node: astroid.node_classes.NodeNG, name: str) -> bool:
"""return True if <name> is a method overridden from an ancestor"""
for ancestor in class_node.ancestors():
if name in ancestor and isinstance(ancestor[name], astroid.FunctionDef):
return True
return False
def check_messages(*messages: str) -> Callable:
"""decorator to store messages that are handled by a checker method"""
def store_messages(func):
func.checks_msgs = messages
return func
return store_messages
class IncompleteFormatString(Exception):
"""A format string ended in the middle of a format specifier."""
class UnsupportedFormatCharacter(Exception):
"""A format character in a format string is not one of the supported
format characters."""
def __init__(self, index):
Exception.__init__(self, index)
self.index = index
def parse_format_string(
format_string: str
) -> Tuple[Set[str], int, Dict[str, str], List[str]]:
"""Parses a format string, returning a tuple of (keys, num_args), where keys
is the set of mapping keys in the format string, and num_args is the number
of arguments required by the format string. Raises
IncompleteFormatString or UnsupportedFormatCharacter if a
parse error occurs."""
keys = set()
key_types = dict()
pos_types = []
num_args = 0
def next_char(i):
i += 1
if i == len(format_string):
raise IncompleteFormatString
return (i, format_string[i])
i = 0
while i < len(format_string):
char = format_string[i]
if char == "%":
i, char = next_char(i)
# Parse the mapping key (optional).
key = None
if char == "(":
depth = 1
i, char = next_char(i)
key_start = i
while depth != 0:
if char == "(":
depth += 1
elif char == ")":
depth -= 1
i, char = next_char(i)
key_end = i - 1
key = format_string[key_start:key_end]
# Parse the conversion flags (optional).
while char in "#0- +":
i, char = next_char(i)
# Parse the minimum field width (optional).
if char == "*":
num_args += 1
i, char = next_char(i)
else:
while char in string.digits:
i, char = next_char(i)
# Parse the precision (optional).
if char == ".":
i, char = next_char(i)
if char == "*":
num_args += 1
i, char = next_char(i)
else:
while char in string.digits:
i, char = next_char(i)
# Parse the length modifier (optional).
if char in "hlL":
i, char = next_char(i)
# Parse the conversion type (mandatory).
if PY3K:
flags = "diouxXeEfFgGcrs%a"
else:
flags = "diouxXeEfFgGcrs%"
if char not in flags:
raise UnsupportedFormatCharacter(i)
if key:
keys.add(key)
key_types[key] = char
elif char != "%":
num_args += 1
pos_types.append(char)
i += 1
return keys, num_args, key_types, pos_types
def split_format_field_names(format_string) -> Tuple[str, Iterable[Tuple[bool, str]]]:
try:
return _string.formatter_field_name_split(format_string)
except ValueError:
raise IncompleteFormatString()
def collect_string_fields(format_string) -> Iterable[Optional[str]]:
""" Given a format string, return an iterator
of all the valid format fields. It handles nested fields
as well.
"""
formatter = string.Formatter()
try:
parseiterator = formatter.parse(format_string)
for result in parseiterator:
if all(item is None for item in result[1:]):
# not a replacement format
continue
name = result[1]
nested = result[2]
yield name
if nested:
for field in collect_string_fields(nested):
yield field
except ValueError as exc:
# Probably the format string is invalid.
if exc.args[0].startswith("cannot switch from manual"):
# On Jython, parsing a string with both manual
# and automatic positions will fail with a ValueError,
# while on CPython it will simply return the fields,
# the validation being done in the interpreter (?).
# We're just returning two mixed fields in order
# to trigger the format-combined-specification check.
yield ""
yield "1"
return
raise IncompleteFormatString(format_string)
def parse_format_method_string(
format_string: str
) -> Tuple[List[Tuple[str, List[Tuple[bool, str]]]], int, int]:
"""
Parses a PEP 3101 format string, returning a tuple of
(keyword_arguments, implicit_pos_args_cnt, explicit_pos_args),
where keyword_arguments is the set of mapping keys in the format string, implicit_pos_args_cnt
is the number of arguments required by the format string and
explicit_pos_args is the number of arguments passed with the position.
"""
keyword_arguments = []
implicit_pos_args_cnt = 0
explicit_pos_args = set()
for name in collect_string_fields(format_string):
if name and str(name).isdigit():
explicit_pos_args.add(str(name))
elif name:
keyname, fielditerator = split_format_field_names(name)
if isinstance(keyname, numbers.Number):
# In Python 2 it will return long which will lead
# to different output between 2 and 3
explicit_pos_args.add(str(keyname))
keyname = int(keyname)
try:
keyword_arguments.append((keyname, list(fielditerator)))
except ValueError:
raise IncompleteFormatString()
else:
implicit_pos_args_cnt += 1
return keyword_arguments, implicit_pos_args_cnt, len(explicit_pos_args)
def is_attr_protected(attrname: str) -> bool:
"""return True if attribute name is protected (start with _ and some other
details), False otherwise.
"""
return (
attrname[0] == "_"
and attrname != "_"
and not (attrname.startswith("__") and attrname.endswith("__"))
)
def node_frame_class(
node: astroid.node_classes.NodeNG
) -> Optional[astroid.node_classes.NodeNG]:
"""return klass node for a method node (or a staticmethod or a
classmethod), return null otherwise
"""
klass = node.frame()
while klass is not None and not isinstance(klass, astroid.ClassDef):
if klass.parent is None:
klass = None
else:
klass = klass.parent.frame()
return klass
def is_attr_private(attrname: str) -> Optional[Match[str]]:
"""Check that attribute name is private (at least two leading underscores,
at most one trailing underscore)
"""
regex = re.compile("^_{2,}.*[^_]+_?$")
return regex.match(attrname)
def get_argument_from_call(
call_node: astroid.Call, position: int = None, keyword: str = None
) -> astroid.Name:
"""Returns the specified argument from a function call.
:param astroid.Call call_node: Node representing a function call to check.
:param int position: position of the argument.
:param str keyword: the keyword of the argument.
:returns: The node representing the argument, None if the argument is not found.
:rtype: astroid.Name
:raises ValueError: if both position and keyword are None.
:raises NoSuchArgumentError: if no argument at the provided position or with
the provided keyword.
"""
if position is None and keyword is None:
raise ValueError("Must specify at least one of: position or keyword.")
if position is not None:
try:
return call_node.args[position]
except IndexError:
pass
if keyword and call_node.keywords:
for arg in call_node.keywords:
if arg.arg == keyword:
return arg.value
raise NoSuchArgumentError
def inherit_from_std_ex(node: astroid.node_classes.NodeNG) -> bool:
"""
Return true if the given class node is subclass of
exceptions.Exception.
"""
if (
node.name in ("Exception", "BaseException")
and node.root().name == EXCEPTIONS_MODULE
):
return True
if not hasattr(node, "ancestors"):
return False
return any(inherit_from_std_ex(parent) for parent in node.ancestors(recurs=True))
def error_of_type(handler: astroid.ExceptHandler, error_type) -> bool:
"""
Check if the given exception handler catches
the given error_type.
The *handler* parameter is a node, representing an ExceptHandler node.
The *error_type* can be an exception, such as AttributeError,
the name of an exception, or it can be a tuple of errors.
The function will return True if the handler catches any of the
given errors.
"""
def stringify_error(error):
if not isinstance(error, str):
return error.__name__
return error
if not isinstance(error_type, tuple):
error_type = (error_type,) # type: ignore
expected_errors = {stringify_error(error) for error in error_type} # type: ignore
if not handler.type:
return True
return handler.catch(expected_errors)
def decorated_with_property(node: astroid.FunctionDef) -> bool:
""" Detect if the given function node is decorated with a property. """
if not node.decorators:
return False
for decorator in node.decorators.nodes:
if not isinstance(decorator, astroid.Name):
continue
try:
if _is_property_decorator(decorator):
return True
except astroid.InferenceError:
pass
return False
def _is_property_decorator(decorator: astroid.Name) -> bool:
for infered in decorator.infer():
if isinstance(infered, astroid.ClassDef):
if infered.root().name == BUILTINS_NAME and infered.name == "property":
return True
for ancestor in infered.ancestors():
if (
ancestor.name == "property"
and ancestor.root().name == BUILTINS_NAME
):
return True
return False
def decorated_with(func: astroid.FunctionDef, qnames: Iterable[str]) -> bool:
"""Determine if the `func` node has a decorator with the qualified name `qname`."""
decorators = func.decorators.nodes if func.decorators else []
for decorator_node in decorators:
try:
if any(
i is not None and i.qname() in qnames for i in decorator_node.infer()
):
return True
except astroid.InferenceError:
continue
return False
@lru_cache(maxsize=1024)
def unimplemented_abstract_methods(
node: astroid.node_classes.NodeNG, is_abstract_cb: astroid.FunctionDef = None
) -> Dict[str, astroid.node_classes.NodeNG]:
"""
Get the unimplemented abstract methods for the given *node*.
A method can be considered abstract if the callback *is_abstract_cb*
returns a ``True`` value. The check defaults to verifying that
a method is decorated with abstract methods.
The function will work only for new-style classes. For old-style
classes, it will simply return an empty dictionary.
For the rest of them, it will return a dictionary of abstract method
names and their inferred objects.
"""
if is_abstract_cb is None:
is_abstract_cb = partial(decorated_with, qnames=ABC_METHODS)
visited = {} # type: Dict[str, astroid.node_classes.NodeNG]
try:
mro = reversed(node.mro())
except NotImplementedError:
# Old style class, it will not have a mro.
return {}
except astroid.ResolveError:
# Probably inconsistent hierarchy, don'try
# to figure this out here.
return {}
for ancestor in mro:
for obj in ancestor.values():
infered = obj
if isinstance(obj, astroid.AssignName):
infered = safe_infer(obj)
if not infered:
# Might be an abstract function,
# but since we don't have enough information
# in order to take this decision, we're taking
# the *safe* decision instead.
if obj.name in visited:
del visited[obj.name]
continue
if not isinstance(infered, astroid.FunctionDef):
if obj.name in visited:
del visited[obj.name]
if isinstance(infered, astroid.FunctionDef):
# It's critical to use the original name,
# since after inferring, an object can be something
# else than expected, as in the case of the
# following assignment.
#
# class A:
# def keys(self): pass
# __iter__ = keys
abstract = is_abstract_cb(infered)
if abstract:
visited[obj.name] = infered
elif not abstract and obj.name in visited:
del visited[obj.name]
return visited
def find_try_except_wrapper_node(
node: astroid.node_classes.NodeNG
) -> Union[astroid.ExceptHandler, astroid.TryExcept]:
"""Return the ExceptHandler or the TryExcept node in which the node is."""
current = node
ignores = (astroid.ExceptHandler, astroid.TryExcept)
while current and not isinstance(current.parent, ignores):
current = current.parent
if current and isinstance(current.parent, ignores):
return current.parent
return None
def is_from_fallback_block(node: astroid.node_classes.NodeNG) -> bool:
"""Check if the given node is from a fallback import block."""
context = find_try_except_wrapper_node(node)
if not context:
return False
if isinstance(context, astroid.ExceptHandler):
other_body = context.parent.body
handlers = context.parent.handlers
else:
other_body = itertools.chain.from_iterable(
handler.body for handler in context.handlers
)
handlers = context.handlers
has_fallback_imports = any(
isinstance(import_node, (astroid.ImportFrom, astroid.Import))
for import_node in other_body
)
ignores_import_error = _except_handlers_ignores_exception(handlers, ImportError)
return ignores_import_error or has_fallback_imports
def _except_handlers_ignores_exception(
handlers: astroid.ExceptHandler, exception
) -> bool:
func = partial(error_of_type, error_type=(exception,))
return any(map(func, handlers))
def get_exception_handlers(
node: astroid.node_classes.NodeNG, exception=Exception
) -> List[astroid.ExceptHandler]:
"""Return the collections of handlers handling the exception in arguments.
Args:
node (astroid.NodeNG): A node that is potentially wrapped in a try except.
exception (builtin.Exception or str): exception or name of the exception.
Returns:
list: the collection of handlers that are handling the exception or None.
"""
context = find_try_except_wrapper_node(node)
if isinstance(context, astroid.TryExcept):
return [
handler for handler in context.handlers if error_of_type(handler, exception)
]
return None
def is_node_inside_try_except(node: astroid.Raise) -> bool:
"""Check if the node is directly under a Try/Except statement.
(but not under an ExceptHandler!)
Args:
node (astroid.Raise): the node raising the exception.
Returns:
bool: True if the node is inside a try/except statement, False otherwise.
"""
context = find_try_except_wrapper_node(node)
return isinstance(context, astroid.TryExcept)
def node_ignores_exception(
node: astroid.node_classes.NodeNG, exception=Exception
) -> bool:
"""Check if the node is in a TryExcept which handles the given exception.
If the exception is not given, the function is going to look for bare
excepts.
"""
managing_handlers = get_exception_handlers(node, exception)
if not managing_handlers:
return False
return any(managing_handlers)
def class_is_abstract(node: astroid.ClassDef) -> bool:
"""return true if the given class node should be considered as an abstract
class
"""
for method in node.methods():
if method.parent.frame() is node:
if method.is_abstract(pass_is_abstract=False):
return True
return False
def _supports_protocol_method(value: astroid.node_classes.NodeNG, attr: str) -> bool:
try:
attributes = value.getattr(attr)
except astroid.NotFoundError:
return False
first = attributes[0]
if isinstance(first, astroid.AssignName):
if isinstance(first.parent.value, astroid.Const):
return False
return True
def is_comprehension(node: astroid.node_classes.NodeNG) -> bool:
comprehensions = (
astroid.ListComp,
astroid.SetComp,
astroid.DictComp,
astroid.GeneratorExp,
)
return isinstance(node, comprehensions)
def _supports_mapping_protocol(value: astroid.node_classes.NodeNG) -> bool:
return _supports_protocol_method(
value, GETITEM_METHOD
) and _supports_protocol_method(value, KEYS_METHOD)
def _supports_membership_test_protocol(value: astroid.node_classes.NodeNG) -> bool:
return _supports_protocol_method(value, CONTAINS_METHOD)
def _supports_iteration_protocol(value: astroid.node_classes.NodeNG) -> bool:
return _supports_protocol_method(value, ITER_METHOD) or _supports_protocol_method(
value, GETITEM_METHOD
)
def _supports_async_iteration_protocol(value: astroid.node_classes.NodeNG) -> bool:
return _supports_protocol_method(value, AITER_METHOD)
def _supports_getitem_protocol(value: astroid.node_classes.NodeNG) -> bool:
return _supports_protocol_method(value, GETITEM_METHOD)
def _supports_setitem_protocol(value: astroid.node_classes.NodeNG) -> bool:
return _supports_protocol_method(value, SETITEM_METHOD)
def _supports_delitem_protocol(value: astroid.node_classes.NodeNG) -> bool:
return _supports_protocol_method(value, DELITEM_METHOD)
def _is_abstract_class_name(name: str) -> bool:
lname = name.lower()
is_mixin = lname.endswith("mixin")
is_abstract = lname.startswith("abstract")
is_base = lname.startswith("base") or lname.endswith("base")
return is_mixin or is_abstract or is_base
def is_inside_abstract_class(node: astroid.node_classes.NodeNG) -> bool:
while node is not None:
if isinstance(node, astroid.ClassDef):
if class_is_abstract(node):
return True
name = getattr(node, "name", None)
if name is not None and _is_abstract_class_name(name):
return True
node = node.parent
return False
def _supports_protocol(
value: astroid.node_classes.NodeNG, protocol_callback: astroid.FunctionDef
) -> bool:
if isinstance(value, astroid.ClassDef):
if not has_known_bases(value):
return True
# classobj can only be iterable if it has an iterable metaclass
meta = value.metaclass()
if meta is not None:
if protocol_callback(meta):
return True
if isinstance(value, astroid.BaseInstance):
if not has_known_bases(value):
return True
if value.has_dynamic_getattr():
return True
if protocol_callback(value):
return True
# TODO: this is not needed in astroid 2.0, where we can
# check the type using a virtual base class instead.
if (
isinstance(value, _bases.Proxy)
and isinstance(value._proxied, astroid.BaseInstance)
and has_known_bases(value._proxied)
):
value = value._proxied
return protocol_callback(value)
return False
def is_iterable(value: astroid.node_classes.NodeNG, check_async: bool = False) -> bool:
if check_async:
protocol_check = _supports_async_iteration_protocol
else:
protocol_check = _supports_iteration_protocol
return _supports_protocol(value, protocol_check)
def is_mapping(value: astroid.node_classes.NodeNG) -> bool:
return _supports_protocol(value, _supports_mapping_protocol)
def supports_membership_test(value: astroid.node_classes.NodeNG) -> bool:
supported = _supports_protocol(value, _supports_membership_test_protocol)
return supported or is_iterable(value)
def supports_getitem(value: astroid.node_classes.NodeNG) -> bool:
if isinstance(value, astroid.ClassDef):
if _supports_protocol_method(value, CLASS_GETITEM_METHOD):
return True
return _supports_protocol(value, _supports_getitem_protocol)
def supports_setitem(value: astroid.node_classes.NodeNG) -> bool:
return _supports_protocol(value, _supports_setitem_protocol)
def supports_delitem(value: astroid.node_classes.NodeNG) -> bool:
return _supports_protocol(value, _supports_delitem_protocol)
# TODO(cpopa): deprecate these or leave them as aliases?
@lru_cache(maxsize=1024)
def safe_infer(
node: astroid.node_classes.NodeNG, context=None
) -> Optional[astroid.node_classes.NodeNG]:
"""Return the inferred value for the given node.
Return None if inference failed or if there is some ambiguity (more than
one node has been inferred).
"""
try:
inferit = node.infer(context=context)
value = next(inferit)
except astroid.InferenceError:
return None
try:
next(inferit)
return None # None if there is ambiguity on the inferred node
except astroid.InferenceError:
return None # there is some kind of ambiguity
except StopIteration:
return value
def has_known_bases(klass: astroid.ClassDef, context=None) -> bool:
"""Return true if all base classes of a class could be inferred."""
try:
return klass._all_bases_known
except AttributeError:
pass
for base in klass.bases:
result = safe_infer(base, context=context)
# TODO: check for A->B->A->B pattern in class structure too?
if (
not isinstance(result, astroid.ClassDef)
or result is klass
or not has_known_bases(result, context=context)
):
klass._all_bases_known = False
return False
klass._all_bases_known = True
return True
def is_none(node: astroid.node_classes.NodeNG) -> bool:
return (
node is None
or (isinstance(node, astroid.Const) and node.value is None)
or (isinstance(node, astroid.Name) and node.name == "None")
)
def node_type(node: astroid.node_classes.NodeNG) -> Optional[type]:
"""Return the inferred type for `node`
If there is more than one possible type, or if inferred type is Uninferable or None,
return None
"""
# check there is only one possible type for the assign node. Else we
# don't handle it for now
types = set()
try:
for var_type in node.infer():
if var_type == astroid.Uninferable or is_none(var_type):
continue
types.add(var_type)
if len(types) > 1:
return None
except astroid.InferenceError:
return None
return types.pop() if types else None
def is_registered_in_singledispatch_function(node: astroid.FunctionDef) -> bool:
"""Check if the given function node is a singledispatch function."""
singledispatch_qnames = (
"functools.singledispatch",
"singledispatch.singledispatch",
)
if not isinstance(node, astroid.FunctionDef):
return False
decorators = node.decorators.nodes if node.decorators else []
for decorator in decorators:
# func.register are function calls
if not isinstance(decorator, astroid.Call):
continue
func = decorator.func
if not isinstance(func, astroid.Attribute) or func.attrname != "register":
continue
try:
func_def = next(func.expr.infer())
except astroid.InferenceError:
continue
if isinstance(func_def, astroid.FunctionDef):
# pylint: disable=redundant-keyword-arg; some flow inference goes wrong here
return decorated_with(func_def, singledispatch_qnames)
return False
def get_node_last_lineno(node: astroid.node_classes.NodeNG) -> int:
"""
Get the last lineno of the given node. For a simple statement this will just be node.lineno,
but for a node that has child statements (e.g. a method) this will be the lineno of the last
child statement recursively.
"""
# 'finalbody' is always the last clause in a try statement, if present
if getattr(node, "finalbody", False):
return get_node_last_lineno(node.finalbody[-1])
# For if, while, and for statements 'orelse' is always the last clause.
# For try statements 'orelse' is the last in the absence of a 'finalbody'
if getattr(node, "orelse", False):
return get_node_last_lineno(node.orelse[-1])
# try statements have the 'handlers' last if there is no 'orelse' or 'finalbody'
if getattr(node, "handlers", False):
return get_node_last_lineno(node.handlers[-1])
# All compound statements have a 'body'
if getattr(node, "body", False):
return get_node_last_lineno(node.body[-1])
# Not a compound statement
return node.lineno
def is_postponed_evaluation_enabled(node: astroid.node_classes.NodeNG) -> bool:
"""Check if the postponed evaluation of annotations is enabled"""
name = "annotations"
module = node.root()
stmt = module.locals.get(name)
return (
stmt
and isinstance(stmt[0], astroid.ImportFrom)
and stmt[0].modname == "__future__"
)
def is_subclass_of(child: astroid.ClassDef, parent: astroid.ClassDef) -> bool:
"""
Check if first node is a subclass of second node.
:param child: Node to check for subclass.
:param parent: Node to check for superclass.
:returns: True if child is derived from parent. False otherwise.
"""
if not all(isinstance(node, astroid.ClassDef) for node in (child, parent)):
return False
for ancestor in child.ancestors():
try:
if astroid.helpers.is_subtype(ancestor, parent):
return True
except _NonDeducibleTypeHierarchy:
continue
return False
|
ekwoodrich/python-dvrip
|
env/lib/python3.5/site-packages/pylint/checkers/utils.py
|
Python
|
mit
| 40,766
|
[
"Brian"
] |
99a5f551295bd0e25068629318c28eb6cd451bad8079c5ccecf2df2883aad9bf
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
import unittest
import os
import copy
from monty.serialization import loadfn # , dumpfn
from pymatgen.command_line.critic2_caller import Critic2Output
from pymatgen.core.structure import Molecule, Structure, FunctionalGroups, Site
from pymatgen.analysis.graphs import *
from pymatgen.analysis.local_env import (
MinimumDistanceNN,
MinimumOKeeffeNN,
OpenBabelNN,
CutOffDictNN,
VoronoiNN,
CovalentBondNN
)
from pymatgen.util.testing import PymatgenTest
try:
from openbabel import openbabel as ob
except ImportError:
ob = None
try:
import networkx as nx
import networkx.algorithms.isomorphism as iso
except ImportError:
nx = None
__author__ = "Matthew Horton, Evan Spotte-Smith"
__version__ = "0.1"
__maintainer__ = "Matthew Horton"
__email__ = "mkhorton@lbl.gov"
__status__ = "Beta"
__date__ = "August 2017"
module_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)))
class StructureGraphTest(PymatgenTest):
def setUp(self):
self.maxDiff = None
# trivial example, simple square lattice for testing
structure = Structure(Lattice.tetragonal(5.0, 50.0), ["H"], [[0, 0, 0]])
self.square_sg = StructureGraph.with_empty_graph(
structure, edge_weight_name="", edge_weight_units=""
)
self.square_sg.add_edge(0, 0, from_jimage=(0, 0, 0), to_jimage=(1, 0, 0))
self.square_sg.add_edge(0, 0, from_jimage=(0, 0, 0), to_jimage=(-1, 0, 0))
self.square_sg.add_edge(0, 0, from_jimage=(0, 0, 0), to_jimage=(0, 1, 0))
self.square_sg.add_edge(0, 0, from_jimage=(0, 0, 0), to_jimage=(0, -1, 0))
# TODO: decorating still fails because the structure graph gives a CN of 8 for this square lattice
# self.square_sg.decorate_structure_with_ce_info()
# body-centered square lattice for testing
structure = Structure(
Lattice.tetragonal(5.0, 50.0), ["H", "He"], [[0, 0, 0], [0.5, 0.5, 0.5]]
)
self.bc_square_sg = StructureGraph.with_empty_graph(
structure, edge_weight_name="", edge_weight_units=""
)
self.bc_square_sg.add_edge(0, 0, from_jimage=(0, 0, 0), to_jimage=(1, 0, 0))
self.bc_square_sg.add_edge(0, 0, from_jimage=(0, 0, 0), to_jimage=(-1, 0, 0))
self.bc_square_sg.add_edge(0, 0, from_jimage=(0, 0, 0), to_jimage=(0, 1, 0))
self.bc_square_sg.add_edge(0, 0, from_jimage=(0, 0, 0), to_jimage=(0, -1, 0))
self.bc_square_sg.add_edge(0, 1, from_jimage=(0, 0, 0), to_jimage=(0, 0, 0))
self.bc_square_sg.add_edge(0, 1, from_jimage=(0, 0, 0), to_jimage=(-1, 0, 0))
self.bc_square_sg.add_edge(0, 1, from_jimage=(0, 0, 0), to_jimage=(-1, -1, 0))
self.bc_square_sg.add_edge(0, 1, from_jimage=(0, 0, 0), to_jimage=(0, -1, 0))
# body-centered square lattice for testing
# directions reversed, should be equivalent to as bc_square
structure = Structure(
Lattice.tetragonal(5.0, 50.0), ["H", "He"], [[0, 0, 0], [0.5, 0.5, 0.5]]
)
self.bc_square_sg_r = StructureGraph.with_empty_graph(
structure, edge_weight_name="", edge_weight_units=""
)
self.bc_square_sg_r.add_edge(0, 0, from_jimage=(0, 0, 0), to_jimage=(1, 0, 0))
self.bc_square_sg_r.add_edge(0, 0, from_jimage=(0, 0, 0), to_jimage=(-1, 0, 0))
self.bc_square_sg_r.add_edge(0, 0, from_jimage=(0, 0, 0), to_jimage=(0, 1, 0))
self.bc_square_sg_r.add_edge(0, 0, from_jimage=(0, 0, 0), to_jimage=(0, -1, 0))
self.bc_square_sg_r.add_edge(0, 1, from_jimage=(0, 0, 0), to_jimage=(0, 0, 0))
self.bc_square_sg_r.add_edge(1, 0, from_jimage=(-1, 0, 0), to_jimage=(0, 0, 0))
self.bc_square_sg_r.add_edge(1, 0, from_jimage=(-1, -1, 0), to_jimage=(0, 0, 0))
self.bc_square_sg_r.add_edge(1, 0, from_jimage=(0, -1, 0), to_jimage=(0, 0, 0))
# MoS2 example, structure graph obtained from critic2
# (not ground state, from mp-1023924, single layer)
stdout_file = os.path.join(
os.path.dirname(__file__),
"..",
"..",
"..",
"test_files/critic2/MoS2_critic2_stdout.txt",
)
with open(stdout_file, "r") as f:
reference_stdout = f.read()
self.structure = Structure.from_file(
os.path.join(
os.path.dirname(__file__),
"..",
"..",
"..",
"test_files/critic2/MoS2.cif",
)
)
c2o = Critic2Output(self.structure, reference_stdout)
self.mos2_sg = c2o.structure_graph(
edge_weight="bond_length", edge_weight_units="Å", include_critical_points=False
)
latt = Lattice.cubic(4.17)
species = ["Ni", "O"]
coords = [[0, 0, 0], [0.5, 0.5, 0.5]]
self.NiO = Structure.from_spacegroup(
225, latt, species, coords
).get_primitive_structure()
# BCC example.
self.bcc = Structure(
Lattice.cubic(5.0), ["He", "He"], [[0, 0, 0], [0.5, 0.5, 0.5]]
)
warnings.simplefilter("ignore")
def tearDown(self):
warnings.simplefilter("default")
def test_inappropriate_construction(self):
# Check inappropriate strategy
with self.assertRaises(ValueError):
StructureGraph.with_local_env_strategy(self.NiO, CovalentBondNN())
def test_properties(self):
self.assertEqual(self.mos2_sg.name, "bonds")
self.assertEqual(self.mos2_sg.edge_weight_name, "bond_length")
self.assertEqual(self.mos2_sg.edge_weight_unit, "Å")
self.assertEqual(self.mos2_sg.get_coordination_of_site(0), 6)
self.assertEqual(len(self.mos2_sg.get_connected_sites(0)), 6)
self.assertTrue(
isinstance(self.mos2_sg.get_connected_sites(0)[0].site, PeriodicSite)
)
self.assertEqual(str(self.mos2_sg.get_connected_sites(0)[0].site.specie), "S")
self.assertAlmostEqual(
self.mos2_sg.get_connected_sites(0, jimage=(0, 0, 100))[0].site.frac_coords[
2
],
100.303027,
)
# these two graphs should be equivalent
for n in range(len(self.bc_square_sg)):
self.assertEqual(
self.bc_square_sg.get_coordination_of_site(n),
self.bc_square_sg_r.get_coordination_of_site(n),
)
# test we're not getting duplicate connected sites
# thanks to Jack D. Sundberg for reporting this bug
# known example where this bug occurred due to edge weights not being
# bit-for-bit identical in otherwise identical edges
nacl_lattice = Lattice(
[
[3.48543625, 0.0, 2.01231756],
[1.16181208, 3.28610081, 2.01231756],
[0.0, 0.0, 4.02463512],
]
)
nacl = Structure(nacl_lattice, ["Na", "Cl"], [[0, 0, 0], [0.5, 0.5, 0.5]])
nacl_graph = StructureGraph.with_local_env_strategy(
nacl, CutOffDictNN({("Cl", "Cl"): 5.0})
)
self.assertEqual(len(nacl_graph.get_connected_sites(1)), 12)
self.assertEqual(len(nacl_graph.graph.get_edge_data(1, 1)), 12)
@unittest.skipIf(not nx, "NetworkX not present. Skipping...")
def test_set_node_attributes(self):
self.square_sg.set_node_attributes()
specie = nx.get_node_attributes(self.square_sg.graph, "specie")
coords = nx.get_node_attributes(self.square_sg.graph, "coords")
properties = nx.get_node_attributes(self.square_sg.graph, "properties")
for i in range(len(self.square_sg.structure)):
self.assertEqual(str(specie[i]), str(self.square_sg.structure[i].specie))
self.assertEqual(coords[i][0], self.square_sg.structure[i].coords[0])
self.assertEqual(coords[i][1], self.square_sg.structure[i].coords[1])
self.assertEqual(coords[i][2], self.square_sg.structure[i].coords[2])
self.assertEqual(properties[i], self.square_sg.structure[i].properties)
def test_edge_editing(self):
square = copy.deepcopy(self.square_sg)
square.alter_edge(
0,
0,
to_jimage=(1, 0, 0),
new_weight=0.0,
new_edge_properties={"foo": "bar"},
)
new_edge = square.graph.get_edge_data(0, 0)[0]
self.assertEqual(new_edge["weight"], 0.0)
self.assertEqual(new_edge["foo"], "bar")
square.break_edge(0, 0, to_jimage=(1, 0, 0))
self.assertEqual(len(square.graph.get_edge_data(0, 0)), 3)
def test_insert_remove(self):
struct_copy = copy.deepcopy(self.square_sg.structure)
square_copy = copy.deepcopy(self.square_sg)
# Ensure that insert_node appropriately wraps Structure.insert()
struct_copy.insert(1, "O", [0.5, 0.5, 0.5])
square_copy.insert_node(1, "O", [0.5, 0.5, 0.5])
self.assertEqual(struct_copy, square_copy.structure)
# Test that removal is also equivalent between Structure and StructureGraph.structure
struct_copy.remove_sites([1])
square_copy.remove_nodes([1])
self.assertEqual(struct_copy, square_copy.structure)
square_copy.insert_node(
1,
"O",
[0.5, 0.5, 0.5],
edges=[{"from_index": 1, "to_index": 0, "to_jimage": (0, 0, 0)}],
)
self.assertEqual(square_copy.get_coordination_of_site(1), 1)
# Test that StructureGraph.graph is correctly updated
square_copy.insert_node(1, "H", [0.5, 0.5, 0.75], edges=[{"from_index": 1,
"to_index": 2,
"to_jimage": (0, 0, 0)}])
square_copy.remove_nodes([1])
self.assertEqual(square_copy.graph.number_of_nodes(), 2)
self.assertEqual(square_copy.graph.number_of_edges(), 5)
def test_substitute(self):
structure = Structure.from_file(
os.path.join(
os.path.dirname(__file__), "..", "..", "..", "test_files", "Li2O.cif"
)
)
molecule = FunctionalGroups["methyl"]
structure_copy = copy.deepcopy(structure)
structure_copy_graph = copy.deepcopy(structure)
sg = StructureGraph.with_local_env_strategy(structure, MinimumDistanceNN())
sg_copy = copy.deepcopy(sg)
# Ensure that strings and molecules lead to equivalent substitutions
sg.substitute_group(1, molecule, MinimumDistanceNN)
sg_copy.substitute_group(1, "methyl", MinimumDistanceNN)
self.assertEqual(sg, sg_copy)
# Ensure that the underlying structure has been modified as expected
structure_copy.substitute(1, "methyl")
self.assertEqual(structure_copy, sg.structure)
# Test inclusion of graph dictionary
graph_dict = {
(0, 1): {"weight": 0.5},
(0, 2): {"weight": 0.5},
(0, 3): {"weight": 0.5},
}
sg_with_graph = StructureGraph.with_local_env_strategy(
structure_copy_graph, MinimumDistanceNN()
)
sg_with_graph.substitute_group(
1, "methyl", MinimumDistanceNN, graph_dict=graph_dict
)
edge = sg_with_graph.graph.get_edge_data(11, 13)[0]
self.assertEqual(edge["weight"], 0.5)
def test_auto_image_detection(self):
sg = StructureGraph.with_empty_graph(self.structure)
sg.add_edge(0, 0)
ref_edges = [
(0, 0, {"to_jimage": (-1, -1, 0)}),
(0, 0, {"to_jimage": (-1, 0, 0)}),
(0, 0, {"to_jimage": (0, -1, 0)}),
(0, 0, {"to_jimage": (0, 1, 0)}),
(0, 0, {"to_jimage": (1, 0, 0)}),
]
self.assertEqual(len(list(sg.graph.edges(data=True))), 6)
def test_str(self):
square_sg_str_ref = """Structure Graph
Structure:
Full Formula (H1)
Reduced Formula: H2
abc : 5.000000 5.000000 50.000000
angles: 90.000000 90.000000 90.000000
Sites (1)
# SP a b c
--- ---- --- --- ---
0 H 0 0 0
Graph: bonds
from to to_image
---- ---- ------------
0 0 (1, 0, 0)
0 0 (-1, 0, 0)
0 0 (0, 1, 0)
0 0 (0, -1, 0)
"""
mos2_sg_str_ref = """Structure Graph
Structure:
Full Formula (Mo1 S2)
Reduced Formula: MoS2
abc : 3.190316 3.190315 17.439502
angles: 90.000000 90.000000 120.000006
Sites (3)
# SP a b c
--- ---- -------- -------- --------
0 Mo 0.333333 0.666667 0.213295
1 S 0.666667 0.333333 0.303027
2 S 0.666667 0.333333 0.123562
Graph: bonds
from to to_image bond_length (A)
---- ---- ------------ ------------------
0 1 (-1, 0, 0) 2.417e+00
0 1 (0, 0, 0) 2.417e+00
0 1 (0, 1, 0) 2.417e+00
0 2 (0, 1, 0) 2.417e+00
0 2 (-1, 0, 0) 2.417e+00
0 2 (0, 0, 0) 2.417e+00
"""
# don't care about testing Py 2.7 unicode support,
# change Å to A
self.mos2_sg.graph.graph["edge_weight_units"] = "A"
self.assertStrContentEqual(str(self.square_sg), square_sg_str_ref)
self.assertStrContentEqual(str(self.mos2_sg), mos2_sg_str_ref)
def test_mul(self):
square_sg_mul = self.square_sg * (2, 1, 1)
square_sg_mul_ref_str = """Structure Graph
Structure:
Full Formula (H2)
Reduced Formula: H2
abc : 10.000000 5.000000 50.000000
angles: 90.000000 90.000000 90.000000
Sites (2)
# SP a b c
--- ---- --- --- ---
0 H 0 0 0
1 H 0.5 0 -0
Graph: bonds
from to to_image
---- ---- ------------
0 0 (0, 1, 0)
0 0 (0, -1, 0)
0 1 (0, 0, 0)
0 1 (-1, 0, 0)
1 1 (0, 1, 0)
1 1 (0, -1, 0)
"""
square_sg_mul_actual_str = str(square_sg_mul)
# only testing bonds portion,
# the c frac_coord of the second H can vary from
# 0 to -0 depending on machine precision
square_sg_mul_ref_str = "\n".join(square_sg_mul_ref_str.splitlines()[11:])
square_sg_mul_actual_str = "\n".join(square_sg_mul_actual_str.splitlines()[11:])
self.assertStrContentEqual(square_sg_mul_actual_str, square_sg_mul_ref_str)
# test sequential multiplication
sq_sg_1 = self.square_sg * (2, 2, 1)
sq_sg_1 = sq_sg_1 * (2, 2, 1)
sq_sg_2 = self.square_sg * (4, 4, 1)
self.assertEqual(
sq_sg_1.graph.number_of_edges(), sq_sg_2.graph.number_of_edges()
)
# TODO: the below test still gives 8 != 4
# self.assertEqual(self.square_sg.get_coordination_of_site(0), 4)
mos2_sg_mul = self.mos2_sg * (3, 3, 1)
for idx in mos2_sg_mul.structure.indices_from_symbol("Mo"):
self.assertEqual(mos2_sg_mul.get_coordination_of_site(idx), 6)
mos2_sg_premul = StructureGraph.with_local_env_strategy(
self.structure * (3, 3, 1), MinimumDistanceNN()
)
self.assertTrue(mos2_sg_mul == mos2_sg_premul)
# test 3D Structure
nio_sg = StructureGraph.with_local_env_strategy(self.NiO, MinimumDistanceNN())
nio_sg = nio_sg * 3
for n in range(len(nio_sg)):
self.assertEqual(nio_sg.get_coordination_of_site(n), 6)
@unittest.skipIf(
not (which("neato") and which("fdp")), "graphviz executables not present"
)
def test_draw(self):
# draw MoS2 graph
self.mos2_sg.draw_graph_to_file(
"MoS2_single.pdf", image_labels=True, hide_image_edges=False
)
mos2_sg = self.mos2_sg * (9, 9, 1)
mos2_sg.draw_graph_to_file("MoS2.pdf", algo="neato")
# draw MoS2 graph that's been successively multiplied
mos2_sg_2 = self.mos2_sg * (3, 3, 1)
mos2_sg_2 = mos2_sg_2 * (3, 3, 1)
mos2_sg_2.draw_graph_to_file(
"MoS2_twice_mul.pdf", algo="neato", hide_image_edges=True
)
# draw MoS2 graph that's generated from a pre-multiplied Structure
mos2_sg_premul = StructureGraph.with_local_env_strategy(
self.structure * (3, 3, 1), MinimumDistanceNN()
)
mos2_sg_premul.draw_graph_to_file(
"MoS2_premul.pdf", algo="neato", hide_image_edges=True
)
# draw graph for a square lattice
self.square_sg.draw_graph_to_file("square_single.pdf", hide_image_edges=False)
square_sg = self.square_sg * (5, 5, 1)
square_sg.draw_graph_to_file(
"square.pdf", algo="neato", image_labels=True, node_labels=False
)
# draw graph for a body-centered square lattice
self.bc_square_sg.draw_graph_to_file(
"bc_square_single.pdf", hide_image_edges=False
)
bc_square_sg = self.bc_square_sg * (9, 9, 1)
bc_square_sg.draw_graph_to_file(
"bc_square.pdf", algo="neato", image_labels=False
)
# draw graph for a body-centered square lattice defined in an alternative way
self.bc_square_sg_r.draw_graph_to_file(
"bc_square_r_single.pdf", hide_image_edges=False
)
bc_square_sg_r = self.bc_square_sg_r * (9, 9, 1)
bc_square_sg_r.draw_graph_to_file(
"bc_square_r.pdf", algo="neato", image_labels=False
)
# delete generated test files
test_files = (
"bc_square_r_single.pdf",
"bc_square_r.pdf",
"bc_square_single.pdf",
"bc_square.pdf",
"MoS2_premul.pdf",
"MOS2_single.pdf",
"MoS2_twice_mul.pdf",
"MoS2.pdf",
"square_single.pdf",
"square.pdf",
)
for test_file in test_files:
os.remove(test_file)
def test_to_from_dict(self):
d = self.mos2_sg.as_dict()
sg = StructureGraph.from_dict(d)
d2 = sg.as_dict()
self.assertDictEqual(d, d2)
def test_from_local_env_and_equality_and_diff(self):
nn = MinimumDistanceNN()
sg = StructureGraph.with_local_env_strategy(self.structure, nn)
self.assertEqual(sg.graph.number_of_edges(), 6)
nn2 = MinimumOKeeffeNN()
sg2 = StructureGraph.with_local_env_strategy(self.structure, nn2)
self.assertTrue(sg == sg2)
self.assertTrue(sg == self.mos2_sg)
# TODO: find better test case where graphs are different
diff = sg.diff(sg2)
self.assertEqual(diff["dist"], 0)
self.assertEqual(self.square_sg.get_coordination_of_site(0), 4)
def test_from_edges(self):
edges = {
(0, 0, (0, 0, 0), (1, 0, 0)): None,
(0, 0, (0, 0, 0), (-1, 0, 0)): None,
(0, 0, (0, 0, 0), (0, 1, 0)): None,
(0, 0, (0, 0, 0), (0, -1, 0)): None,
}
structure = Structure(Lattice.tetragonal(5.0, 50.0), ["H"], [[0, 0, 0]])
sg = StructureGraph.with_edges(structure, edges)
self.assertEqual(sg, self.square_sg)
def test_extract_molecules(self):
structure_file = os.path.join(
os.path.dirname(__file__),
"..",
"..",
"..",
"test_files/H6PbCI3N_mp-977013_symmetrized.cif",
)
s = Structure.from_file(structure_file)
nn = MinimumDistanceNN()
sg = StructureGraph.with_local_env_strategy(s, nn)
molecules = sg.get_subgraphs_as_molecules()
self.assertEqual(molecules[0].composition.formula, "H3 C1")
self.assertEqual(len(molecules), 1)
molecules = self.mos2_sg.get_subgraphs_as_molecules()
self.assertEqual(len(molecules), 0)
def test_types_and_weights_of_connections(self):
types = self.mos2_sg.types_and_weights_of_connections
self.assertEqual(len(types["Mo-S"]), 6)
self.assertAlmostEqual(types["Mo-S"][0], 2.416931678417331)
def test_weight_statistics(self):
weight_statistics = self.mos2_sg.weight_statistics
self.assertEqual(len(weight_statistics["all_weights"]), 6)
self.assertAlmostEqual(weight_statistics["min"], 2.4169314100201875)
self.assertAlmostEqual(weight_statistics["variance"], 0)
def test_types_of_coordination_environments(self):
types = self.mos2_sg.types_of_coordination_environments()
self.assertListEqual(types, ["Mo-S(6)", "S-Mo(3)"])
types_anonymous = self.mos2_sg.types_of_coordination_environments(
anonymous=True
)
self.assertListEqual(types_anonymous, ["A-B(3)", "A-B(6)"])
class MoleculeGraphTest(unittest.TestCase):
def setUp(self):
cyclohexene = Molecule.from_file(
os.path.join(
os.path.dirname(__file__),
"..",
"..",
"..",
"test_files/graphs/cyclohexene.xyz",
)
)
self.cyclohexene = MoleculeGraph.with_empty_graph(
cyclohexene, edge_weight_name="strength", edge_weight_units=""
)
self.cyclohexene.add_edge(0, 1, weight=1.0)
self.cyclohexene.add_edge(1, 2, weight=1.0)
self.cyclohexene.add_edge(2, 3, weight=2.0)
self.cyclohexene.add_edge(3, 4, weight=1.0)
self.cyclohexene.add_edge(4, 5, weight=1.0)
self.cyclohexene.add_edge(5, 0, weight=1.0)
self.cyclohexene.add_edge(0, 6, weight=1.0)
self.cyclohexene.add_edge(0, 7, weight=1.0)
self.cyclohexene.add_edge(1, 8, weight=1.0)
self.cyclohexene.add_edge(1, 9, weight=1.0)
self.cyclohexene.add_edge(2, 10, weight=1.0)
self.cyclohexene.add_edge(3, 11, weight=1.0)
self.cyclohexene.add_edge(4, 12, weight=1.0)
self.cyclohexene.add_edge(4, 13, weight=1.0)
self.cyclohexene.add_edge(5, 14, weight=1.0)
self.cyclohexene.add_edge(5, 15, weight=1.0)
butadiene = Molecule.from_file(
os.path.join(
os.path.dirname(__file__),
"..",
"..",
"..",
"test_files/graphs/butadiene.xyz",
)
)
self.butadiene = MoleculeGraph.with_empty_graph(
butadiene, edge_weight_name="strength", edge_weight_units=""
)
self.butadiene.add_edge(0, 1, weight=2.0)
self.butadiene.add_edge(1, 2, weight=1.0)
self.butadiene.add_edge(2, 3, weight=2.0)
self.butadiene.add_edge(0, 4, weight=1.0)
self.butadiene.add_edge(0, 5, weight=1.0)
self.butadiene.add_edge(1, 6, weight=1.0)
self.butadiene.add_edge(2, 7, weight=1.0)
self.butadiene.add_edge(3, 8, weight=1.0)
self.butadiene.add_edge(3, 9, weight=1.0)
ethylene = Molecule.from_file(
os.path.join(
os.path.dirname(__file__),
"..",
"..",
"..",
"test_files/graphs/ethylene.xyz",
)
)
self.ethylene = MoleculeGraph.with_empty_graph(
ethylene, edge_weight_name="strength", edge_weight_units=""
)
self.ethylene.add_edge(0, 1, weight=2.0)
self.ethylene.add_edge(0, 2, weight=1.0)
self.ethylene.add_edge(0, 3, weight=1.0)
self.ethylene.add_edge(1, 4, weight=1.0)
self.ethylene.add_edge(1, 5, weight=1.0)
self.pc = Molecule.from_file(
os.path.join(module_dir, "..", "..", "..", "test_files", "graphs", "PC.xyz")
)
self.pc_edges = [
[5, 10],
[5, 12],
[5, 11],
[5, 3],
[3, 7],
[3, 4],
[3, 0],
[4, 8],
[4, 9],
[4, 1],
[6, 1],
[6, 0],
[6, 2],
]
self.pc_frag1 = Molecule.from_file(
os.path.join(
module_dir, "..", "..", "..", "test_files", "graphs", "PC_frag1.xyz"
)
)
self.pc_frag1_edges = [[0, 2], [4, 2], [2, 1], [1, 3]]
self.tfsi = Molecule.from_file(
os.path.join(
module_dir, "..", "..", "..", "test_files", "graphs", "TFSI.xyz"
)
)
self.tfsi_edges = (
[14, 1],
[1, 4],
[1, 5],
[1, 7],
[7, 11],
[7, 12],
[7, 13],
[14, 0],
[0, 2],
[0, 3],
[0, 6],
[6, 8],
[6, 9],
[6, 10],
)
warnings.simplefilter("ignore")
def tearDown(self):
warnings.simplefilter("default")
del self.ethylene
del self.butadiene
del self.cyclohexene
@unittest.skipIf(not ob, "OpenBabel not present. Skipping...")
def test_construction(self):
edges_frag = {(e[0], e[1]): {"weight": 1.0} for e in self.pc_frag1_edges}
mol_graph = MoleculeGraph.with_edges(self.pc_frag1, edges_frag)
# dumpfn(mol_graph.as_dict(), os.path.join(module_dir,"pc_frag1_mg.json"))
ref_mol_graph = loadfn(os.path.join(module_dir, "pc_frag1_mg.json"))
self.assertEqual(mol_graph, ref_mol_graph)
self.assertEqual(mol_graph.graph.adj, ref_mol_graph.graph.adj)
for node in mol_graph.graph.nodes:
self.assertEqual(
mol_graph.graph.nodes[node]["specie"],
ref_mol_graph.graph.nodes[node]["specie"],
)
for ii in range(3):
self.assertEqual(
mol_graph.graph.nodes[node]["coords"][ii],
ref_mol_graph.graph.nodes[node]["coords"][ii],
)
edges_pc = {(e[0], e[1]): {"weight": 1.0} for e in self.pc_edges}
mol_graph = MoleculeGraph.with_edges(self.pc, edges_pc)
# dumpfn(mol_graph.as_dict(), os.path.join(module_dir,"pc_mg.json"))
ref_mol_graph = loadfn(os.path.join(module_dir, "pc_mg.json"))
self.assertEqual(mol_graph, ref_mol_graph)
self.assertEqual(mol_graph.graph.adj, ref_mol_graph.graph.adj)
for node in mol_graph.graph:
self.assertEqual(
mol_graph.graph.nodes[node]["specie"],
ref_mol_graph.graph.nodes[node]["specie"],
)
for ii in range(3):
self.assertEqual(
mol_graph.graph.nodes[node]["coords"][ii],
ref_mol_graph.graph.nodes[node]["coords"][ii],
)
mol_graph_edges = MoleculeGraph.with_edges(self.pc, edges=edges_pc)
mol_graph_strat = MoleculeGraph.with_local_env_strategy(self.pc, OpenBabelNN())
self.assertTrue(mol_graph_edges.isomorphic_to(mol_graph_strat))
# Check inappropriate strategy
with self.assertRaises(ValueError):
MoleculeGraph.with_local_env_strategy(self.pc, VoronoiNN())
def test_properties(self):
self.assertEqual(self.cyclohexene.name, "bonds")
self.assertEqual(self.cyclohexene.edge_weight_name, "strength")
self.assertEqual(self.cyclohexene.edge_weight_unit, "")
self.assertEqual(self.cyclohexene.get_coordination_of_site(0), 4)
self.assertEqual(self.cyclohexene.get_coordination_of_site(2), 3)
self.assertEqual(self.cyclohexene.get_coordination_of_site(15), 1)
self.assertEqual(len(self.cyclohexene.get_connected_sites(0)), 4)
self.assertTrue(
isinstance(self.cyclohexene.get_connected_sites(0)[0].site, Site)
)
self.assertEqual(
str(self.cyclohexene.get_connected_sites(0)[0].site.specie), "H"
)
@unittest.skipIf(not nx, "NetworkX not present. Skipping...")
def test_set_node_attributes(self):
self.ethylene.set_node_attributes()
specie = nx.get_node_attributes(self.ethylene.graph, "specie")
coords = nx.get_node_attributes(self.ethylene.graph, "coords")
properties = nx.get_node_attributes(self.ethylene.graph, "properties")
for i in range(len(self.ethylene.molecule)):
self.assertEqual(str(specie[i]), str(self.ethylene.molecule[i].specie))
self.assertEqual(coords[i][0], self.ethylene.molecule[i].coords[0])
self.assertEqual(coords[i][1], self.ethylene.molecule[i].coords[1])
self.assertEqual(coords[i][2], self.ethylene.molecule[i].coords[2])
self.assertEqual(properties[i], self.ethylene.molecule[i].properties)
def test_coordination(self):
molecule = Molecule(["C", "C"], [[0.0, 0.0, 0.0], [1.0, 0.0, 0.0]])
mg = MoleculeGraph.with_empty_graph(molecule)
self.assertEqual(mg.get_coordination_of_site(0), 0)
self.assertEqual(self.cyclohexene.get_coordination_of_site(0), 4)
def test_edge_editing(self):
self.cyclohexene.alter_edge(
0, 1, new_weight=0.0, new_edge_properties={"foo": "bar"}
)
new_edge = self.cyclohexene.graph.get_edge_data(0, 1)[0]
self.assertEqual(new_edge["weight"], 0.0)
self.assertEqual(new_edge["foo"], "bar")
self.cyclohexene.break_edge(0, 1)
self.assertTrue(self.cyclohexene.graph.get_edge_data(0, 1) is None)
# Replace the now-broken edge
self.cyclohexene.add_edge(0, 1, weight=1.0)
def test_insert_remove(self):
mol_copy = copy.deepcopy(self.ethylene.molecule)
eth_copy = copy.deepcopy(self.ethylene)
# Ensure that insert_node appropriately wraps Molecule.insert()
mol_copy.insert(1, "O", [0.5, 0.5, 0.5])
eth_copy.insert_node(1, "O", [0.5, 0.5, 0.5])
self.assertEqual(mol_copy, eth_copy.molecule)
# Test that removal is also equivalent between Molecule and MoleculeGraph.molecule
mol_copy.remove_sites([1])
eth_copy.remove_nodes([1])
self.assertEqual(mol_copy, eth_copy.molecule)
eth_copy.insert_node(
1,
"O",
[0.5, 0.5, 0.5],
edges=[{"from_index": 1, "to_index": 2}, {"from_index": 1, "to_index": 3}],
)
self.assertEqual(eth_copy.get_coordination_of_site(1), 2)
# Test that MoleculeGraph.graph is correctly updated
eth_copy.remove_nodes([1, 2])
self.assertEqual(eth_copy.graph.number_of_nodes(), 5)
self.assertEqual(eth_copy.graph.number_of_edges(), 2)
@unittest.skipIf(not nx, "NetworkX not present. Skipping...")
def test_split(self):
bonds = [(0, 1), (4, 5)]
alterations = {
(2, 3): {"weight": 1.0},
(0, 5): {"weight": 2.0},
(1, 2): {"weight": 2.0},
(3, 4): {"weight": 2.0},
}
# Perform retro-Diels-Alder reaction - turn product into reactants
reactants = self.cyclohexene.split_molecule_subgraphs(
bonds, allow_reverse=True, alterations=alterations
)
self.assertTrue(isinstance(reactants, list))
reactants = sorted(reactants, key=len)
# After alterations, reactants sholuld be ethylene and butadiene
self.assertEqual(reactants[0], self.ethylene)
self.assertEqual(reactants[1], self.butadiene)
with self.assertRaises(MolGraphSplitError):
self.cyclohexene.split_molecule_subgraphs([(0, 1)])
# Test naive charge redistribution
hydroxide = Molecule(["O", "H"], [[0, 0, 0], [0.5, 0.5, 0.5]], charge=-1)
oh_mg = MoleculeGraph.with_empty_graph(hydroxide)
oh_mg.add_edge(0, 1)
new_mgs = oh_mg.split_molecule_subgraphs([(0, 1)])
for mg in new_mgs:
if str(mg.molecule[0].specie) == "O":
self.assertEqual(mg.molecule.charge, -1)
else:
self.assertEqual(mg.molecule.charge, 0)
# Trying to test to ensure that remapping of nodes to atoms works
diff_species = Molecule(
["C", "I", "Cl", "Br", "F"],
[
[0.8314, -0.2682, -0.9102],
[1.3076, 1.3425, -2.2038],
[-0.8429, -0.7410, -1.1554],
[1.9841, -1.7636, -1.2953],
[1.0098, 0.1231, 0.3916],
],
)
diff_spec_mg = MoleculeGraph.with_empty_graph(diff_species)
diff_spec_mg.add_edge(0, 1)
diff_spec_mg.add_edge(0, 2)
diff_spec_mg.add_edge(0, 3)
diff_spec_mg.add_edge(0, 4)
for i in range(1, 5):
bond = (0, i)
split_mgs = diff_spec_mg.split_molecule_subgraphs([bond])
for split_mg in split_mgs:
species = nx.get_node_attributes(split_mg.graph, "specie")
for j in range(len(split_mg.graph.nodes)):
atom = split_mg.molecule[j]
self.assertEqual(species[j], str(atom.specie))
@unittest.skipIf(not nx, "NetworkX not present. Skipping...")
def test_build_unique_fragments(self):
edges = {(e[0], e[1]): None for e in self.pc_edges}
mol_graph = MoleculeGraph.with_edges(self.pc, edges)
unique_fragment_dict = mol_graph.build_unique_fragments()
unique_fragments = []
for key in unique_fragment_dict:
for fragment in unique_fragment_dict[key]:
unique_fragments.append(fragment)
self.assertEqual(len(unique_fragments), 295)
nm = iso.categorical_node_match("specie", "ERROR")
for ii in range(295):
# Test that each fragment is unique
for jj in range(ii + 1, 295):
self.assertFalse(
nx.is_isomorphic(
unique_fragments[ii].graph,
unique_fragments[jj].graph,
node_match=nm,
)
)
# Test that each fragment correctly maps between Molecule and graph
self.assertEqual(
len(unique_fragments[ii].molecule),
len(unique_fragments[ii].graph.nodes),
)
species = nx.get_node_attributes(unique_fragments[ii].graph, "specie")
coords = nx.get_node_attributes(unique_fragments[ii].graph, "coords")
mol = unique_fragments[ii].molecule
for ss, site in enumerate(mol):
self.assertEqual(str(species[ss]), str(site.specie))
self.assertEqual(coords[ss][0], site.coords[0])
self.assertEqual(coords[ss][1], site.coords[1])
self.assertEqual(coords[ss][2], site.coords[2])
# Test that each fragment is connected
self.assertTrue(nx.is_connected(unique_fragments[ii].graph.to_undirected()))
def test_find_rings(self):
rings = self.cyclohexene.find_rings(including=[0])
self.assertEqual(
sorted(rings[0]), [(0, 5), (1, 0), (2, 1), (3, 2), (4, 3), (5, 4)]
)
no_rings = self.butadiene.find_rings()
self.assertEqual(no_rings, [])
def test_isomorphic(self):
ethylene = Molecule.from_file(
os.path.join(
os.path.dirname(__file__),
"..",
"..",
"..",
"test_files/graphs/ethylene.xyz",
)
)
# switch carbons
ethylene[0], ethylene[1] = ethylene[1], ethylene[0]
eth_copy = MoleculeGraph.with_edges(
ethylene,
{
(0, 1): {"weight": 2},
(1, 2): {"weight": 1},
(1, 3): {"weight": 1},
(0, 4): {"weight": 1},
(0, 5): {"weight": 1},
},
)
# If they are equal, they must also be isomorphic
eth_copy = copy.deepcopy(self.ethylene)
self.assertTrue(self.ethylene.isomorphic_to(eth_copy))
self.assertFalse(self.butadiene.isomorphic_to(self.ethylene))
def test_substitute(self):
molecule = FunctionalGroups["methyl"]
molgraph = MoleculeGraph.with_edges(
molecule,
{(0, 1): {"weight": 1}, (0, 2): {"weight": 1}, (0, 3): {"weight": 1}},
)
eth_mol = copy.deepcopy(self.ethylene)
eth_str = copy.deepcopy(self.ethylene)
# Ensure that strings and molecules lead to equivalent substitutions
eth_mol.substitute_group(5, molecule, MinimumDistanceNN)
eth_str.substitute_group(5, "methyl", MinimumDistanceNN)
self.assertEqual(eth_mol, eth_str)
graph_dict = {
(0, 1): {"weight": 1.0},
(0, 2): {"weight": 1.0},
(0, 3): {"weight": 1.0},
}
eth_mg = copy.deepcopy(self.ethylene)
eth_graph = copy.deepcopy(self.ethylene)
# Check that MoleculeGraph input is handled properly
eth_graph.substitute_group(
5, molecule, MinimumDistanceNN, graph_dict=graph_dict
)
eth_mg.substitute_group(5, molgraph, MinimumDistanceNN)
self.assertEqual(eth_graph.graph.get_edge_data(5, 6)[0]["weight"], 1.0)
self.assertEqual(eth_mg, eth_graph)
def test_replace(self):
eth_copy_sub = copy.deepcopy(self.ethylene)
eth_copy_repl = copy.deepcopy(self.ethylene)
# First, perform a substiution as above
eth_copy_sub.substitute_group(5, "methyl", MinimumDistanceNN)
eth_copy_repl.replace_group(5, "methyl", MinimumDistanceNN)
# Test that replacement on a terminal atom is equivalent to substitution
self.assertEqual(eth_copy_repl.molecule, eth_copy_sub.molecule)
self.assertEqual(eth_copy_repl, eth_copy_sub)
# Methyl carbon should have coordination 4
self.assertEqual(eth_copy_repl.get_coordination_of_site(5), 4)
# Now swap one functional group for another
eth_copy_repl.replace_group(5, "amine", MinimumDistanceNN)
self.assertEqual(
["C", "C", "H", "H", "H", "N", "H", "H"],
[str(s) for s in eth_copy_repl.molecule.species],
)
self.assertEqual(len(eth_copy_repl.graph.nodes), 8)
# Amine nitrogen should have coordination 3
self.assertEqual(eth_copy_repl.get_coordination_of_site(5), 3)
def test_as_from_dict(self):
d = self.cyclohexene.as_dict()
mg = MoleculeGraph.from_dict(d)
d2 = mg.as_dict()
self.assertEqual(str(d), str(d2))
if __name__ == "__main__":
unittest.main()
|
gVallverdu/pymatgen
|
pymatgen/analysis/tests/test_graphs.py
|
Python
|
mit
| 38,581
|
[
"pymatgen"
] |
7e0f07b41c5a286ba8ea0a790fc8c194140ccec8c85408e8ff4eba8301f0ef87
|
import numpy as np
import ipdb
import matplotlib.pylab as plt
from os import system
plt.ioff()
# some global distances and RA/Dec's.
klyr_to_center = 27.2
lyr_per_pc = 3.2616
ra_gal_center = 266.4
dec_gal_center = -28.94
ra_gal_npole = 192.85
dec_gal_npole = 27.13
ra_ecl_npole = 270.0
dec_ecl_npole = 66.561
# some global font stuff
fontsize=20
yspacing = fontsize/20*0.15
textcolor = 'white'
charwidth=29
def main():
d_right, d_up, mag = get_star_data()
fig, ax = initialize_figure()
draw_galaxy(ax)
plot_stars(ax, d_right, d_up, mag, mag_min=-99, mag_max=3.0, color='#FFFFFC', psize=16, alpha=0.6)
plot_stars(ax, d_right, d_up, mag, mag_min=3.0, mag_max=6.0, color='#FFCC66', psize=5, alpha=0.3, stride=1)
add_local_info(ax)
convert_to_html_and_open(fig)
def draw_galaxy(ax):
from matplotlib.patches import Ellipse
# MW main disk
alpha=0.04
nell = 10
for w,h in zip(np.linspace(60,80,nell), np.linspace(1.0,2.0,nell)):
ax.add_patch(Ellipse(xy=(klyr_to_center,0), width=w, height=h, angle=0, color='white', alpha=alpha))
# MW bulge
alpha=0.06
nell = 5
for w,h in zip(1.3*np.linspace(4,8,nell), np.linspace(4,8,nell)):
ax.add_patch(Ellipse(xy=(klyr_to_center,0), width=w, height=h, angle=0, color='white', alpha=alpha))
text_rectangle(klyr_to_center+0.1, 0.3, charwidth-2, fontsize, textcolor, yspacing, 'The center of the Milky Way is about 30,000 light-years away, and the light we see from it today was emitted back when humans began farming.')
text_rectangle(klyr_to_center, -1.12, 4*charwidth, fontsize, '#995555', yspacing, 'more down this way.')
text_rectangle(klyr_to_center, -1.12-0.17, 4*charwidth, fontsize, '#995555', yspacing, 'zoom way out.')
# LMC
ra_lmc = 80.893
dec_lmc = -69.75611
dot_gal_center_lmc = dot_ra_dec(ra_gal_center, dec_gal_center, ra_lmc, dec_lmc)
dot_gal_npole_lmc = dot_ra_dec(ra_gal_npole, dec_gal_npole, ra_lmc, dec_lmc)
print 'lmc ',np.sqrt(1.-dot_gal_center_lmc**2.-dot_gal_npole_lmc**2.),dot_gal_center_lmc,dot_gal_npole_lmc
d_klyr_lmc = 163.
d_right_lmc = d_klyr_lmc*dot_gal_center_lmc
d_up_lmc = d_klyr_lmc*dot_gal_npole_lmc
diam_klyr_lmc = 14.
alpha=0.1
nell = 5
for w,h in zip(np.linspace(0.3*diam_klyr_lmc,diam_klyr_lmc,nell), np.linspace(0.3*diam_klyr_lmc,diam_klyr_lmc,nell)):
ax.add_patch(Ellipse(xy=(d_right_lmc,d_up_lmc), width=w, height=h, angle=0, color='white', alpha=alpha))
text_rectangle(d_right_lmc+0.1, d_up_lmc+1.6, charwidth, fontsize, textcolor, yspacing, "The Large Magellanic Cloud is a dwarf galaxy and one of the Milky Way's satellites. It's located about 160,000 light-years from Earth.")
text_rectangle(d_right_lmc+0.1, d_up_lmc+0.65, charwidth, fontsize, textcolor, yspacing, "Note that many of the objects shown on this page are even farther away than they appear to be. You just can't see the distances into and out of the screen.")
# SMC
ra_smc = 13.18666
dec_smc = -72.828
dot_gal_center_smc = dot_ra_dec(ra_gal_center, dec_gal_center, ra_smc, dec_smc)
dot_gal_npole_smc = dot_ra_dec(ra_gal_npole, dec_gal_npole, ra_smc, dec_smc)
print 'smc ',np.sqrt(1.-dot_gal_center_smc**2.-dot_gal_npole_smc**2.),dot_gal_center_smc,dot_gal_npole_smc
d_klyr_smc = 197.
d_right_smc = d_klyr_smc*dot_gal_center_smc
d_up_smc = d_klyr_smc*dot_gal_npole_smc
diam_klyr_smc = 7.
alpha=0.1
nell = 5
for w,h in zip(np.linspace(0.3*diam_klyr_smc,diam_klyr_smc,nell), np.linspace(0.3*diam_klyr_smc,diam_klyr_smc,nell)):
ax.add_patch(Ellipse(xy=(d_right_smc,d_up_smc), width=w, height=h, angle=0, color='white', alpha=alpha))
text_rectangle(d_right_smc+0.1, d_up_smc+0.4, charwidth, fontsize, textcolor, yspacing, "The Small Magellanic Cloud is about 200,000 light-years from Earth.")
text_rectangle(d_right_smc+0.1, d_up_smc-0.2, charwidth, fontsize, textcolor, yspacing, "The light we see from it originated right around the time that homo sapiens first appeared in Africa.")
# M31
ra_m31 = 10.6845
dec_m31 = 41.2691
dot_gal_center_m31 = dot_ra_dec(ra_gal_center, dec_gal_center, ra_m31, dec_m31)
dot_gal_npole_m31 = dot_ra_dec(ra_gal_npole, dec_gal_npole, ra_m31, dec_m31)
print 'm31 ',np.sqrt(1.-dot_gal_center_m31**2.-dot_gal_npole_m31**2.),dot_gal_center_m31,dot_gal_npole_m31
d_klyr_m31 = 2.54*1000.
d_right_m31 = d_klyr_m31*dot_gal_center_m31
d_up_m31 = d_klyr_m31*dot_gal_npole_m31
# M31 main disk
alpha=0.04
nell = 10
for w,h in zip(np.linspace(60,80,nell), 4.*np.linspace(1.1,2.4,nell)):
ax.add_patch(Ellipse(xy=(d_right_m31,d_up_m31), width=w, height=h, angle=-30, color='white', alpha=alpha))
# M31 bulge
alpha=0.05
nell = 5
for w,h in zip(1.4*1.3*np.linspace(4,8,nell), 1.4*np.linspace(4,8,nell)):
ax.add_patch(Ellipse(xy=(d_right_m31,d_up_m31), width=w, height=h, angle=-30, color='white', alpha=alpha))
#x0=-1.75
#y0=d_up_m31+2.2
x0=-2.32
y0=d_up_m31+1.2
text_rectangle(d_right_m31+x0, y0, charwidth+3, fontsize, textcolor, yspacing, "The Andromeda Galaxy is the nearest spiral galaxy to the Milky Way but is still very far away, about 2.5 million light-years. It is by far the most distanct object visible to the naked eye, and the light we see from it was produced way back when hominid pre-humans were first learning to use stone tools.")
text_rectangle(d_right_m31+x0, y0-1.6, charwidth, fontsize, textcolor, yspacing, "It's incredible that, thanks to some fluke of physics and evolution, we can see this far with our eyeballs!")
x=d_right_m31+0.25; y=y0-1.6#d_up_m31-1.1
text_rectangle(x, y, charwidth, fontsize, textcolor, yspacing, "Modern telescopes allow us to see much, much farther, but this is the end of the line for objects visible to the naked eye, and for this page. I promise that, no matter how much you zoom out, you will never never get to the Cosmic Microwave Background.")
# draw connecting lines
linestyle='--'
color='#660000'
lw=1.0
d_right_mc = 0.5*(d_right_lmc+d_right_smc)
d_up_mc = 0.5*(d_up_lmc+d_up_smc)
plt.plot([0,klyr_to_center],[0,0], linestyle, color=color,linewidth=lw)
plt.plot([klyr_to_center,d_right_lmc],[0,d_up_lmc], linestyle, color=color,linewidth=lw)
plt.plot([d_right_lmc, d_right_smc],[d_up_lmc, d_up_smc], linestyle, color=color,linewidth=lw)
plt.plot([d_right_smc, d_right_m31],[d_up_smc, d_up_m31], linestyle, color=color,linewidth=lw)
def add_local_info(ax):
from matplotlib.patches import Circle
lw = 1
ax.add_patch(Circle((0,0), radius=0.200, color='lightgray', fill=False, linewidth=lw, linestyle='-'))
ax.add_patch(Circle((0,0), radius=0.500, color='lightgray', fill=False, linewidth=lw, linestyle='-'))
plt.text(0,0.21, '200 lyr', color=textcolor, size=fontsize, horizontalalignment='center')
plt.text(0,0.51, '500 lyr', color=textcolor, size=fontsize, horizontalalignment='center')
plt.text(0,0.51, '500 lyr', color=textcolor, size=fontsize, horizontalalignment='center')
coltmp = '#333399'
plt.text(0,-0.007, '.', color=coltmp, size=fontsize, horizontalalignment='center')
plt.text(0,-0.1, 'you', color=coltmp, size=fontsize, horizontalalignment='center')
coltmp = '#666666'
plt.text(0, -0.68-0.045, 'star positions from', color=coltmp, size=fontsize, horizontalalignment='center')
plt.text(0, -0.81-0.045, 'the Hipparcos satellite', color=coltmp, size=fontsize, horizontalalignment='center')
x0=-2.6; y0=0.4
text_rectangle(x0, y0, charwidth, fontsize, textcolor, yspacing, 'The stars you can see with your naked eye are typically several hundreds of light-years away.')
text_rectangle(x0, y0-0.55, charwidth, fontsize, '#FFFFFC', yspacing, 'The white stars show what you might see from a city,')
text_rectangle(x0, y0-0.85, charwidth, fontsize, '#FFFFFC', yspacing, 'the yellow stars from the country.')
text_rectangle(x0, -1, charwidth, fontsize, '#666666', yspacing, 'Zoom in and out to explore.')
text_rectangle(1.0, 0.06, 4*charwidth, fontsize, '#995555', yspacing, '> to the center of the Milky Way >')
text_rectangle(1.0, -0.14, 4*charwidth, fontsize, '#995555', yspacing, '> zoom out, drag right >')
def text_rectangle(x, y, charwidth, fontsize, color, yspacing, thetext):
words = thetext.split(' ')
wordcount=0
line=''
nlines=-1
for word in words:
line += word
line += ' '
wordcount+=1
if (len(line)>charwidth) or (wordcount==len(words)):
nlines+=1
plt.text(x, y-nlines*yspacing, line, color=color, size=fontsize)
line=''
def convert_to_html_and_open(fig):
from mpld3 import fig_to_d3
html = fig_to_d3(fig)
file=open('index.html','w')
file.write('<style>body{background-color:#111111;color:#333333;font-size:10pt;font-family:sans-serif}')
file.write('a{color:#444444;text-decoration:none;}</style>')
file.write(html)
file.write("How Far Can You See? <a href=https://github.com/rkeisler/nakedeye target='_blank'>made in python/d3</a> by <a href=https://twitter.com/RyanKeisler target='_blank'>@RyanKeisler</a> using <a href=https://twitter.com/jakevdp target='_blank'>@jakevdp's</a> awesome <a href=https://github.com/jakevdp/mpld3 target='_blank'>mpld3</a> library. work in progress. <br> if you don't see a map of stars, try refreshing.")
file.close()
# I'm not sure why, but this makes it run much faster and more smoothly.
system("perl -pi -e 's/stroke-dasharray: 10,0/stroke-dasharray: 0,0/g' index.html")
system('open index.html')
def plot_stars(ax, d_right, d_up, mag, mag_min=0, mag_max=3.0,
color='#FFFF99', psize=6, alpha=0.3, stride=1):
wh=np.where((mag>mag_min)&(mag<=mag_max))[0]
wh=wh[0:-1:stride]
plt.scatter(d_right[wh], d_up[wh],
alpha=alpha, s=psize,
color=color, linewidths=0)
def initialize_figure():
aratio = 1.9
sf = 6.5
minx=-2.9;maxx=2.9
miny=-0.6*(maxx-minx)/aratio;
maxy=0.4*(maxx-minx)/aratio;
fig = plt.figure(frameon=False, figsize=(aratio*sf,1.*sf))
ax = plt.Axes(fig, [0., 0., 1., 1.])
ax.set_axis_off()
fig.add_axes(ax)
ax.set_axis_bgcolor('k')
plt.xlim(minx,maxx)
plt.ylim(miny,maxy)
return fig, ax
def dot_ra_dec(ra0, dec0, ra, dec):
# ra/dec should be in degrees.
d2r = np.pi/180.
phi0 = ra0*d2r
th0 = (90.-dec0)*d2r
x0 = np.cos(phi0)*np.sin(th0)
y0 = np.sin(phi0)*np.sin(th0)
z0 = np.cos(th0)
phi = ra*d2r
th = (90.-dec)*d2r
x = np.cos(phi)*np.sin(th)
y = np.sin(phi)*np.sin(th)
z = np.cos(th)
dot = x*x0 + y*y0 + z*z0
return dot
def get_star_data(quick=True):
import cPickle as pickle
if quick: ra, dec, plx, e_plx, Vmag, d_kpc, d_klyr = pickle.load(open('get_star_data.pkl','r'))
else:
from astropy.io import fits
# from http://cdsarc.u-strasbg.fr/viz-bin/Cat?cat=I%2F239&target=http&
d=fits.open('I_239_hip_main.dat.gz.fits')[1].data
frac_err = 0.2
max_Vmag = 6.0
whok = np.where((np.abs(d['e_Plx']/d['Plx'])<frac_err)&(d['Plx']>0.)&(d['Vmag']<max_Vmag))[0]
whok2 = np.where((d['Plx']>0.)&(d['Vmag']<max_Vmag))[0]
print 1.*len(whok)/len(whok2) # efficiency of frac_err cut
ra=d['RAdeg'][whok]; dec=d['DEdeg'][whok]
plx=d['Plx'][whok]; e_plx=d['e_Plx'][whok]
Vmag = d['Vmag'][whok]
d_kpc = 1./plx
d_klyr = d_kpc*lyr_per_pc
pickle.dump((ra, dec, plx, e_plx, Vmag, d_kpc, d_klyr), open('get_star_data.pkl','w'))
# i'm curious about the typical and max distances for various flux cuts.
wh=np.where((Vmag>-99.)&(Vmag<3.0))[0]; print np.median(d_klyr[wh]), np.max(d_klyr[wh])
wh=np.where((Vmag>3.0)&(Vmag<4.5))[0]; print np.median(d_klyr[wh]), np.max(d_klyr[wh])
wh=np.where((Vmag>4.5)&(Vmag<6.0))[0]; print np.median(d_klyr[wh]), np.max(d_klyr[wh])
dot_gal_center = dot_ra_dec(ra_gal_center, dec_gal_center, ra, dec)
dot_gal_npole = dot_ra_dec(ra_gal_npole, dec_gal_npole, ra, dec)
d_gal_center = d_klyr*dot_gal_center
d_gal_npole = d_klyr*dot_gal_npole
return d_gal_center, d_gal_npole, Vmag
|
rkeisler/nakedeye
|
nakedeye.py
|
Python
|
bsd-3-clause
| 12,337
|
[
"Galaxy"
] |
509bf435535011591f48bfdffe866e2061eb0af824d747b329be5662d401f56e
|
# !/usr/bin/python
# -*- coding: utf-8 -*-
#
##################################################################################
#
# This program is part of OSRFramework. You can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##################################################################################
domains = {
"0-mail.com",
"007addict.com",
"020.co.uk",
"027168.com",
"0815.ru",
"0815.su",
"0clickemail.com",
"0sg.net",
"0wnd.net",
"0wnd.org",
"1033edge.com",
"10mail.org",
"10minutemail.co.za",
"10minutemail.com",
"11mail.com",
"123-m.com",
"123.com",
"123box.net",
"123india.com",
"123mail.cl",
"123mail.org",
"123qwe.co.uk",
"126.com",
"126.net",
"138mail.com",
"139.com",
"150mail.com",
"150ml.com",
"15meg4free.com",
"163.com",
"16mail.com",
"188.com",
"189.cn",
"1auto.com",
"1ce.us",
"1chuan.com",
"1colony.com",
"1coolplace.com",
"1email.eu",
"1freeemail.com",
"1fsdfdsfsdf.tk",
"1funplace.com",
"1internetdrive.com",
"1mail.ml",
"1mail.net",
"1me.net",
"1mum.com",
"1musicrow.com",
"1netdrive.com",
"1nsyncfan.com",
"1pad.de",
"1under.com",
"1webave.com",
"1webhighway.com",
"1zhuan.com",
"2-mail.com",
"20email.eu",
"20mail.in",
"20mail.it",
"20minutemail.com",
"212.com",
"21cn.com",
"247emails.com",
"24horas.com",
"2911.net",
"2980.com",
"2bmail.co.uk",
"2coolforyou.net",
"2d2i.com",
"2die4.com",
"2fdgdfgdfgdf.tk",
"2hotforyou.net",
"2mydns.com",
"2net.us",
"2prong.com",
"2trom.com",
"3000.it",
"30minutemail.com",
"30minutesmail.com",
"3126.com",
"321media.com",
"33mail.com",
"360.ru",
"37.com",
"3ammagazine.com",
"3dmail.com",
"3email.com",
"3g.ua",
"3mail.ga",
"3trtretgfrfe.tk",
"3xl.net",
"444.net",
"4email.com",
"4email.net",
"4gfdsgfdgfd.tk",
"4mg.com",
"4newyork.com",
"4warding.com",
"4warding.net",
"4warding.org",
"4x4fan.com",
"4x4man.com",
"50mail.com",
"5fm.za.com",
"5ghgfhfghfgh.tk",
"5iron.com",
"5star.com",
"60minutemail.com",
"6hjgjhgkilkj.tk",
"6ip.us",
"6mail.cf",
"6paq.com",
"702mail.co.za",
"74.ru",
"7mail.ga",
"7mail.ml",
"7tags.com",
"88.am",
"8848.net",
"888.nu",
"8mail.ga",
"8mail.ml",
"97rock.com",
"99experts.com",
"9ox.net",
"a-bc.net",
"a-player.org",
"a2z4u.net",
"a45.in",
"aaamail.zzn.com",
"aahlife.com",
"aamail.net",
"aapt.net.au",
"aaronkwok.net",
"abbeyroadlondon.co.uk",
"abcflash.net",
"abdulnour.com",
"aberystwyth.com",
"abolition-now.com",
"about.com",
"absolutevitality.com",
"abusemail.de",
"abv.bg",
"abwesend.de",
"abyssmail.com",
"ac20mail.in",
"academycougars.com",
"acceso.or.cr",
"access4less.net",
"accessgcc.com",
"accountant.com",
"acdcfan.com",
"acdczone.com",
"ace-of-base.com",
"acmecity.com",
"acmemail.net",
"acninc.net",
"acrobatmail.com",
"activatormail.com",
"activist.com",
"adam.com.au",
"add3000.pp.ua",
"addcom.de",
"address.com",
"adelphia.net",
"adexec.com",
"adfarrow.com",
"adinet.com.uy",
"adios.net",
"admin.in.th",
"administrativos.com",
"adoption.com",
"ados.fr",
"adrenalinefreak.com",
"adres.nl",
"advalvas.be",
"advantimo.com",
"aeiou.pt",
"aemail4u.com",
"aeneasmail.com",
"afreeinternet.com",
"africa-11.com",
"africamail.com",
"africamel.net",
"africanpartnersonline.com",
"afrobacon.com",
"ag.us.to",
"agedmail.com",
"agelessemail.com",
"agoodmail.com",
"ahaa.dk",
"ahk.jp",
"aichi.com",
"aim.com",
"aircraftmail.com",
"airforce.net",
"airforceemail.com",
"airpost.net",
"aiutamici.com",
"ajacied.com",
"ajaxapp.net",
"ak47.hu",
"aknet.kg",
"akphantom.com",
"albawaba.com",
"alecsmail.com",
"alex4all.com",
"alexandria.cc",
"algeria.com",
"algeriamail.com",
"alhilal.net",
"alibaba.com",
"alice.it",
"aliceadsl.fr",
"aliceinchainsmail.com",
"alivance.com",
"alive.cz",
"aliyun.com",
"allergist.com",
"allmail.net",
"alloymail.com",
"allracing.com",
"allsaintsfan.com",
"alltel.net",
"alpenjodel.de",
"alphafrau.de",
"alskens.dk",
"altavista.com",
"altavista.net",
"altavista.se",
"alternativagratis.com",
"alumni.com",
"alumnidirector.com",
"alvilag.hu",
"ama-trade.de",
"amail.com",
"amazonses.com",
"amele.com",
"america.hm",
"ameritech.net",
"amilegit.com",
"amiri.net",
"amiriindustries.com",
"amnetsal.com",
"amorki.pl",
"amrer.net",
"amuro.net",
"amuromail.com",
"ananzi.co.za",
"ancestry.com",
"andreabocellimail.com",
"andylau.net",
"anfmail.com",
"angelfan.com",
"angelfire.com",
"angelic.com",
"animail.net",
"animal.net",
"animalhouse.com",
"animalwoman.net",
"anjungcafe.com",
"anniefans.com",
"annsmail.com",
"ano-mail.net",
"anonmails.de",
"anonymbox.com",
"anonymous.to",
"anote.com",
"another.com",
"anotherdomaincyka.tk",
"anotherwin95.com",
"anti-ignorance.net",
"anti-social.com",
"antichef.com",
"antichef.net",
"antiqueemail.com",
"antireg.ru",
"antisocial.com",
"antispam.de",
"antispam24.de",
"antispammail.de",
"antongijsen.com",
"antwerpen.com",
"anymoment.com",
"anytimenow.com",
"aol.co.uk",
"aol.com",
"aol.de",
"aol.fr",
"aol.it",
"aol.jp",
"aon.at",
"apexmail.com",
"apmail.com",
"apollo.lv",
"aport.ru",
"aport2000.ru",
"apple.sib.ru",
"appraiser.net",
"approvers.net",
"aquaticmail.net",
"arabia.com",
"arabtop.net",
"arcademaster.com",
"archaeologist.com",
"archerymail.com",
"arcor.de",
"arcotronics.bg",
"arcticmail.com",
"argentina.com",
"arhaelogist.com",
"aristotle.org",
"army.net",
"armyspy.com",
"arnet.com.ar",
"art-en-ligne.pro",
"artistemail.com",
"artlover.com",
"artlover.com.au",
"artman-conception.com",
"as-if.com",
"asdasd.nl",
"asean-mail",
"asean-mail.com",
"asheville.com",
"asia-links.com",
"asia-mail.com",
"asia.com",
"asiafind.com",
"asianavenue.com",
"asiancityweb.com",
"asiansonly.net",
"asianwired.net",
"asiapoint.net",
"askaclub.ru",
"ass.pp.ua",
"assala.com",
"assamesemail.com",
"astroboymail.com",
"astrolover.com",
"astrosfan.com",
"astrosfan.net",
"asurfer.com",
"atheist.com",
"athenachu.net",
"atina.cl",
"atl.lv",
"atlas.cz",
"atlaswebmail.com",
"atlink.com",
"atmc.net",
"ato.check.com",
"atozasia.com",
"atrus.ru",
"att.net",
"attglobal.net",
"attymail.com",
"au.ru",
"auctioneer.net",
"aufeminin.com",
"aus-city.com",
"ausi.com",
"aussiemail.com.au",
"austin.rr.com",
"australia.edu",
"australiamail.com",
"austrosearch.net",
"autoescuelanerja.com",
"autograf.pl",
"automail.ru",
"automotiveauthority.com",
"autorambler.ru",
"aver.com",
"avh.hu",
"avia-tonic.fr",
"avtoritet.ru",
"awayonvacation.com",
"awholelotofamechi.com",
"awsom.net",
"axoskate.com",
"ayna.com",
"azazazatashkent.tk",
"azimiweb.com",
"azmeil.tk",
"bachelorboy.com",
"bachelorgal.com",
"backfliper.com",
"backpackers.com",
"backstreet-boys.com",
"backstreetboysclub.com",
"backtothefuturefans.com",
"backwards.com",
"badtzmail.com",
"bagherpour.com",
"bahrainmail.com",
"bakpaka.com",
"bakpaka.net",
"baldmama.de",
"baldpapa.de",
"ballerstatus.net",
"ballyfinance.com",
"balochistan.org",
"baluch.com",
"bangkok.com",
"bangkok2000.com",
"bannertown.net",
"baptistmail.com",
"baptized.com",
"barcelona.com",
"bareed.ws",
"barid.com",
"barlick.net",
"bartender.net",
"baseball-email.com",
"baseballmail.com",
"basketballmail.com",
"batuta.net",
"baudoinconsulting.com",
"baxomale.ht.cx",
"bboy.com",
"bboy.zzn.com",
"bcvibes.com",
"beddly.com",
"beeebank.com",
"beefmilk.com",
"beenhad.com",
"beep.ru",
"beer.com",
"beerandremotes.com",
"beethoven.com",
"beirut.com",
"belice.com",
"belizehome.com",
"belizemail.net",
"belizeweb.com",
"bell.net",
"bellair.net",
"bellsouth.net",
"berkscounty.com",
"berlin.com",
"berlin.de",
"berlinexpo.de",
"bestmail.us",
"betriebsdirektor.de",
"bettergolf.net",
"bharatmail.com",
"big1.us",
"big5mail.com",
"bigassweb.com",
"bigblue.net.au",
"bigboab.com",
"bigfoot.com",
"bigfoot.de",
"bigger.com",
"biggerbadder.com",
"bigmailbox.com",
"bigmir.net",
"bigpond.au",
"bigpond.com",
"bigpond.com.au",
"bigpond.net",
"bigpond.net.au",
"bigramp.com",
"bigstring.com",
"bikemechanics.com",
"bikeracer.com",
"bikeracers.net",
"bikerider.com",
"billsfan.com",
"billsfan.net",
"bimamail.com",
"bimla.net",
"bin-wieder-da.de",
"binkmail.com",
"bio-muesli.info",
"bio-muesli.net",
"biologyfan.com",
"birdfanatic.com",
"birdlover.com",
"birdowner.net",
"bisons.com",
"bitmail.com",
"bitpage.net",
"bizhosting.com",
"bk.ru",
"bkkmail.com",
"bla-bla.com",
"blackburnfans.com",
"blackburnmail.com",
"blackplanet.com",
"blader.com",
"bladesmail.net",
"blazemail.com",
"bleib-bei-mir.de",
"blink182.net",
"blockfilter.com",
"blogmyway.org",
"blondandeasy.com",
"bluebottle.com",
"bluehyppo.com",
"bluemail.ch",
"bluemail.dk",
"bluesfan.com",
"bluewin.ch",
"blueyonder.co.uk",
"blumail.org",
"blushmail.com",
"blutig.me",
"bmlsports.net",
"boardermail.com",
"boarderzone.com",
"boatracers.com",
"bobmail.info",
"bodhi.lawlita.com",
"bofthew.com",
"bol.com.br",
"bolando.com",
"bollywoodz.com",
"bolt.com",
"boltonfans.com",
"bombdiggity.com",
"bonbon.net",
"boom.com",
"bootmail.com",
"bootybay.de",
"bornagain.com",
"bornnaked.com",
"bossofthemoss.com",
"bostonoffice.com",
"boun.cr",
"bounce.net",
"bounces.amazon.com",
"bouncr.com",
"box.az",
"box.ua",
"boxbg.com",
"boxemail.com",
"boxformail.in",
"boxfrog.com",
"boximail.com",
"boyzoneclub.com",
"bradfordfans.com",
"brasilia.net",
"bratan.ru",
"brazilmail.com",
"brazilmail.com.br",
"breadtimes.press",
"breakthru.com",
"breathe.com",
"brefmail.com",
"brennendesreich.de",
"bresnan.net",
"brestonline.com",
"brew-master.com",
"brew-meister.com",
"brfree.com.br",
"briefemail.com",
"bright.net",
"britneyclub.com",
"brittonsign.com",
"broadcast.net",
"broadwaybuff.com",
"broadwaylove.com",
"brokeandhappy.com",
"brokenvalve.com",
"brujula.net",
"brunetka.ru",
"brusseler.com",
"bsdmail.com",
"bsnow.net",
"bspamfree.org",
"bt.com",
"btcc.org",
"btcmail.pw",
"btconnect.co.uk",
"btconnect.com",
"btinternet.com",
"btopenworld.co.uk",
"buerotiger.de",
"buffymail.com",
"bugmenot.com",
"bulgaria.com",
"bullsfan.com",
"bullsgame.com",
"bumerang.ro",
"bumpymail.com",
"bumrap.com",
"bund.us",
"bunita.net",
"bunko.com",
"burnthespam.info",
"burntmail.com",
"burstmail.info",
"buryfans.com",
"bushemail.com",
"business-man.com",
"businessman.net",
"businessweekmail.com",
"bust.com",
"busta-rhymes.com",
"busymail.com",
"busymail.com.com",
"busymail.comhomeart.com",
"butch-femme.net",
"butovo.net",
"buyersusa.com",
"buymoreplays.com",
"buzy.com",
"bvimailbox.com",
"byke.com",
"byom.de",
"byteme.com",
"c2.hu",
"c2i.net",
"c3.hu",
"c4.com",
"c51vsgq.com",
"cabacabana.com",
"cable.comcast.com",
"cableone.net",
"caere.it",
"cairomail.com",
"calcuttaads.com",
"calendar-server.bounces.google.com",
"calidifontain.be",
"californiamail.com",
"callnetuk.com",
"callsign.net",
"caltanet.it",
"camidge.com",
"canada-11.com",
"canada.com",
"canadianmail.com",
"canoemail.com",
"cantv.net",
"canwetalk.com",
"caramail.com",
"card.zp.ua",
"care2.com",
"careceo.com",
"careerbuildermail.com",
"carioca.net",
"cartelera.org",
"cartestraina.ro",
"casablancaresort.com",
"casema.nl",
"cash4u.com",
"cashette.com",
"casino.com",
"casualdx.com",
"cataloniamail.com",
"cataz.com",
"catcha.com",
"catchamail.com",
"catemail.com",
"catholic.org",
"catlover.com",
"catsrule.garfield.com",
"ccnmail.com",
"cd2.com",
"cek.pm",
"celineclub.com",
"celtic.com",
"center-mail.de",
"centermail.at",
"centermail.com",
"centermail.de",
"centermail.info",
"centermail.net",
"centoper.it",
"centralpets.com",
"centrum.cz",
"centrum.sk",
"centurylink.net",
"centurytel.net",
"certifiedmail.com",
"cfl.rr.com",
"cgac.es",
"cghost.s-a-d.de",
"chacuo.net",
"chaiyo.com",
"chaiyomail.com",
"chalkmail.net",
"chammy.info",
"chance2mail.com",
"chandrasekar.net",
"channelonetv.com",
"charityemail.com",
"charmedmail.com",
"charter.com",
"charter.net",
"chat.ru",
"chatlane.ru",
"chattown.com",
"chauhanweb.com",
"cheatmail.de",
"chechnya.conf.work",
"check.com",
"check.com12",
"check1check.com",
"cheeb.com",
"cheerful.com",
"chef.net",
"chefmail.com",
"chek.com",
"chello.nl",
"chemist.com",
"chequemail.com",
"cheshiremail.com",
"cheyenneweb.com",
"chez.com",
"chickmail.com",
"chil-e.com",
"childrens.md",
"childsavetrust.org",
"china.com",
"china.net.vg",
"chinalook.com",
"chinamail.com",
"chinesecool.com",
"chirk.com",
"chocaholic.com.au",
"chocofan.com",
"chogmail.com",
"choicemail1.com",
"chong-mail.com",
"chong-mail.net",
"christianmail.net",
"chronicspender.com",
"churchusa.com",
"cia-agent.com",
"cia.hu",
"ciaoweb.it",
"cicciociccio.com",
"cincinow.net",
"cirquefans.com",
"citeweb.net",
"citiz.net",
"citlink.net",
"city-of-bath.org",
"city-of-birmingham.com",
"city-of-brighton.org",
"city-of-cambridge.com",
"city-of-coventry.com",
"city-of-edinburgh.com",
"city-of-lichfield.com",
"city-of-lincoln.com",
"city-of-liverpool.com",
"city-of-manchester.com",
"city-of-nottingham.com",
"city-of-oxford.com",
"city-of-swansea.com",
"city-of-westminster.com",
"city-of-westminster.net",
"city-of-york.net",
"city2city.com",
"citynetusa.com",
"cityofcardiff.net",
"cityoflondon.org",
"ciudad.com.ar",
"ckaazaza.tk",
"claramail.com",
"classicalfan.com",
"classicmail.co.za",
"clear.net.nz",
"clearwire.net",
"clerk.com",
"clickforadate.com",
"cliffhanger.com",
"clixser.com",
"close2you.ne",
"close2you.net",
"clrmail.com",
"club-internet.fr",
"club4x4.net",
"clubalfa.com",
"clubbers.net",
"clubducati.com",
"clubhonda.net",
"clubmember.org",
"clubnetnoir.com",
"clubvdo.net",
"cluemail.com",
"cmail.net",
"cmail.org",
"cmail.ru",
"cmpmail.com",
"cmpnetmail.com",
"cnegal.com",
"cnnsimail.com",
"cntv.cn",
"codec.ro",
"codec.ro.ro",
"codec.roemail.ro",
"coder.hu",
"coid.biz",
"coldemail.info",
"coldmail.com",
"collectiblesuperstore.com",
"collector.org",
"collegebeat.com",
"collegeclub.com",
"collegemail.com",
"colleges.com",
"columbus.rr.com",
"columbusrr.com",
"columnist.com",
"comast.com",
"comast.net",
"comcast.com",
"comcast.net",
"comic.com",
"communityconnect.com",
"complxmind.com",
"comporium.net",
"comprendemail.com",
"compuserve.com",
"computer-expert.net",
"computer-freak.com",
"computer4u.com",
"computerconfused.com",
"computermail.net",
"computernaked.com",
"conexcol.com",
"cong.ru",
"conk.com",
"connect4free.net",
"connectbox.com",
"conok.com",
"consultant.com",
"consumerriot.com",
"contractor.net",
"contrasto.cu.cc",
"cookiemonster.com",
"cool.br",
"cool.fr.nf",
"coole-files.de",
"coolgoose.ca",
"coolgoose.com",
"coolkiwi.com",
"coollist.com",
"coolmail.com",
"coolmail.net",
"coolrio.com",
"coolsend.com",
"coolsite.net",
"cooooool.com",
"cooperation.net",
"cooperationtogo.net",
"copacabana.com",
"copper.net",
"copticmail.com",
"cornells.com",
"cornerpub.com",
"corporatedirtbag.com",
"correo.terra.com.gt",
"corrsfan.com",
"cortinet.com",
"cosmo.com",
"cotas.net",
"counsellor.com",
"countrylover.com",
"courriel.fr.nf",
"courrieltemporaire.com",
"cox.com",
"cox.net",
"coxinet.net",
"cpaonline.net",
"cracker.hu",
"craftemail.com",
"crapmail.org",
"crazedanddazed.com",
"crazy.ru",
"crazymailing.com",
"crazysexycool.com",
"crewstart.com",
"cristianemail.com",
"critterpost.com",
"croeso.com",
"crosshairs.com",
"crosswinds.net",
"crunkmail.com",
"crwmail.com",
"cry4helponline.com",
"cryingmail.com",
"cs.com",
"csinibaba.hu",
"cubiclink.com",
"cuemail.com",
"cumbriamail.com",
"curio-city.com",
"curryworld.de",
"curtsmail.com",
"cust.in",
"cute-girl.com",
"cuteandcuddly.com",
"cutekittens.com",
"cutey.com",
"cuvox.de",
"cww.de",
"cyber-africa.net",
"cyber-innovation.club",
"cyber-matrix.com",
"cyber-phone.eu",
"cyber-wizard.com",
"cyber4all.com",
"cyberbabies.com",
"cybercafemaui.com",
"cybercity-online.net",
"cyberdude.com",
"cyberforeplay.net",
"cybergal.com",
"cybergrrl.com",
"cyberinbox.com",
"cyberleports.com",
"cybermail.net",
"cybernet.it",
"cyberservices.com",
"cyberspace-asia.com",
"cybertrains.org",
"cyclefanz.com",
"cymail.net",
"cynetcity.com",
"d3p.dk",
"dabsol.net",
"dacoolest.com",
"dadacasa.com",
"daha.com",
"dailypioneer.com",
"dallas.theboys.com",
"dallasmail.com",
"dandikmail.com",
"dangerous-minds.com",
"dansegulvet.com",
"dasdasdascyka.tk",
"data54.com",
"date.by",
"daum.net",
"davegracey.com",
"dawnsonmail.com",
"dawsonmail.com",
"dayrep.com",
"dazedandconfused.com",
"dbzmail.com",
"dcemail.com",
"dcsi.net",
"ddns.org",
"deadaddress.com",
"deadlymob.org",
"deadspam.com",
"deafemail.net",
"deagot.com",
"deal-maker.com",
"dearriba.com",
"death-star.com",
"deepseafisherman.net",
"deforestationsucks.com",
"degoo.com",
"dejanews.com",
"delikkt.de",
"deliveryman.com",
"deneg.net",
"depechemode.com",
"deseretmail.com",
"desertmail.com",
"desertonline.com",
"desertsaintsmail.com",
"desilota.com",
"deskmail.com",
"deskpilot.com",
"despam.it",
"despammed.com",
"destin.com",
"detik.com",
"deutschland-net.com",
"devnullmail.com",
"devotedcouples.com",
"dezigner.ru",
"dfgh.net",
"dfwatson.com",
"dglnet.com.br",
"dgoh.org",
"di-ve.com",
"diamondemail.com",
"didamail.com",
"die-besten-bilder.de",
"die-genossen.de",
"die-optimisten.de",
"die-optimisten.net",
"die.life",
"diehardmail.com",
"diemailbox.de",
"digibel.be",
"digital-filestore.de",
"digitalforeplay.net",
"digitalsanctuary.com",
"digosnet.com",
"dingbone.com",
"diplomats.com",
"directbox.com",
"director-general.com",
"diri.com",
"dirtracer.com",
"dirtracers.com",
"discard.email",
"discard.ga",
"discard.gq",
"discardmail.com",
"discardmail.de",
"disciples.com",
"discofan.com",
"discovery.com",
"discoverymail.com",
"discoverymail.net",
"disign-concept.eu",
"disign-revelation.com",
"disinfo.net",
"dispomail.eu",
"disposable.com",
"disposableaddress.com",
"disposableemailaddresses.com",
"disposableinbox.com",
"dispose.it",
"dispostable.com",
"divismail.ru",
"divorcedandhappy.com",
"dm.w3internet.co.uk",
"dmailman.com",
"dmitrovka.net",
"dmitry.ru",
"dnainternet.net",
"dnsmadeeasy.com",
"doar.net",
"doclist.bounces.google.com",
"docmail.cz",
"docs.google.com",
"doctor.com",
"dodgeit.com",
"dodgit.com",
"dodgit.org",
"dodo.com.au",
"dodsi.com",
"dog.com",
"dogit.com",
"doglover.com",
"dogmail.co.uk",
"dogsnob.net",
"doityourself.com",
"domforfb1.tk",
"domforfb2.tk",
"domforfb3.tk",
"domforfb4.tk",
"domforfb5.tk",
"domforfb6.tk",
"domforfb7.tk",
"domforfb8.tk",
"domozmail.com",
"doneasy.com",
"donegal.net",
"donemail.ru",
"donjuan.com",
"dontgotmail.com",
"dontmesswithtexas.com",
"dontreg.com",
"dontsendmespam.de",
"doramail.com",
"dostmail.com",
"dotcom.fr",
"dotmsg.com",
"dotnow.com",
"dott.it",
"download-privat.de",
"dplanet.ch",
"dr.com",
"dragoncon.net",
"dragracer.com",
"drdrb.net",
"drivehq.com",
"dropmail.me",
"dropzone.com",
"drotposta.hu",
"dubaimail.com",
"dublin.com",
"dublin.ie",
"dump-email.info",
"dumpandjunk.com",
"dumpmail.com",
"dumpmail.de",
"dumpyemail.com",
"dunlopdriver.com",
"dunloprider.com",
"duno.com",
"duskmail.com",
"dustdevil.com",
"dutchmail.com",
"dvd-fan.net",
"dwp.net",
"dygo.com",
"dynamitemail.com",
"dyndns.org",
"e-apollo.lv",
"e-hkma.com",
"e-mail.com",
"e-mail.com.tr",
"e-mail.dk",
"e-mail.org",
"e-mail.ru",
"e-mail.ua",
"e-mailanywhere.com",
"e-mails.ru",
"e-tapaal.com",
"e-webtec.com",
"e4ward.com",
"earthalliance.com",
"earthcam.net",
"earthdome.com",
"earthling.net",
"earthlink.net",
"earthonline.net",
"eastcoast.co.za",
"eastlink.ca",
"eastmail.com",
"eastrolog.com",
"easy.com",
"easy.to",
"easypeasy.com",
"easypost.com",
"easytrashmail.com",
"eatmydirt.com",
"ebprofits.net",
"ec.rr.com",
"ecardmail.com",
"ecbsolutions.net",
"echina.com",
"ecolo-online.fr",
"ecompare.com",
"edmail.com",
"ednatx.com",
"edtnmail.com",
"educacao.te.pt",
"educastmail.com",
"eelmail.com",
"ehmail.com",
"einmalmail.de",
"einrot.com",
"einrot.de",
"eintagsmail.de",
"eircom.net",
"ekidz.com.au",
"elisanet.fi",
"elitemail.org",
"elsitio.com",
"eltimon.com",
"elvis.com",
"elvisfan.com",
"email-fake.gq",
"email-london.co.uk",
"email-value.com",
"email.biz",
"email.cbes.net",
"email.com",
"email.cz",
"email.ee",
"email.it",
"email.nu",
"email.org",
"email.ro",
"email.ru",
"email.si",
"email.su",
"email.ua",
"email.women.com",
"email2me.com",
"email2me.net",
"email4u.info",
"email60.com",
"emailacc.com",
"emailaccount.com",
"emailaddresses.com",
"emailage.ga",
"emailage.gq",
"emailasso.net",
"emailchoice.com",
"emailcorner.net",
"emailem.com",
"emailengine.net",
"emailengine.org",
"emailer.hubspot.com",
"emailforyou.net",
"emailgaul.com",
"emailgo.de",
"emailgroups.net",
"emailias.com",
"emailinfive.com",
"emailit.com",
"emaillime.com",
"emailmiser.com",
"emailoregon.com",
"emailpinoy.com",
"emailplanet.com",
"emailplus.org",
"emailproxsy.com",
"emails.ga",
"emails.incisivemedia.com",
"emails.ru",
"emailsensei.com",
"emailservice.com",
"emailsydney.com",
"emailtemporanea.com",
"emailtemporanea.net",
"emailtemporar.ro",
"emailtemporario.com.br",
"emailthe.net",
"emailtmp.com",
"emailto.de",
"emailuser.net",
"emailwarden.com",
"emailx.at.hm",
"emailx.net",
"emailxfer.com",
"emailz.ga",
"emailz.gq",
"emale.ru",
"ematic.com",
"embarqmail.com",
"emeil.in",
"emeil.ir",
"emil.com",
"eml.cc",
"eml.pp.ua",
"empereur.com",
"emptymail.com",
"emumail.com",
"emz.net",
"end-war.com",
"enel.net",
"enelpunto.net",
"engineer.com",
"england.com",
"england.edu",
"englandmail.com",
"epage.ru",
"epatra.com",
"ephemail.net",
"epiqmail.com",
"epix.net",
"epomail.com",
"epost.de",
"eposta.hu",
"eprompter.com",
"eqqu.com",
"eramail.co.za",
"eresmas.com",
"eriga.lv",
"ero-tube.org",
"eshche.net",
"esmailweb.net",
"estranet.it",
"ethos.st",
"etoast.com",
"etrademail.com",
"etranquil.com",
"etranquil.net",
"eudoramail.com",
"europamel.net",
"europe.com",
"europemail.com",
"euroseek.com",
"eurosport.com",
"evafan.com",
"evertonfans.com",
"every1.net",
"everyday.com.kh",
"everymail.net",
"everyone.net",
"everytg.ml",
"evopo.com",
"examnotes.net",
"excite.co.jp",
"excite.co.uk",
"excite.com",
"excite.it",
"execs.com",
"execs2k.com",
"executivemail.co.za",
"exemail.com.au",
"exg6.exghost.com",
"explodemail.com",
"express.net.ua",
"expressasia.com",
"extenda.net",
"extended.com",
"extremail.ru",
"eyepaste.com",
"eyou.com",
"ezagenda.com",
"ezcybersearch.com",
"ezmail.egine.com",
"ezmail.ru",
"ezrs.com",
"f-m.fm",
"f1fans.net",
"facebook-email.ga",
"facebook.com",
"facebookmail.com",
"facebookmail.gq",
"fadrasha.net",
"fadrasha.org",
"fahr-zur-hoelle.org",
"fake-email.pp.ua",
"fake-mail.cf",
"fake-mail.ga",
"fake-mail.ml",
"fakeinbox.com",
"fakeinformation.com",
"fakemailz.com",
"falseaddress.com",
"fan.com",
"fan.theboys.com",
"fannclub.com",
"fansonlymail.com",
"fansworldwide.de",
"fantasticmail.com",
"fantasymail.de",
"farang.net",
"farifluset.mailexpire.com",
"faroweb.com",
"fast-email.com",
"fast-mail.fr",
"fast-mail.org",
"fastacura.com",
"fastchevy.com",
"fastchrysler.com",
"fastem.com",
"fastemail.us",
"fastemailer.com",
"fastemailextractor.net",
"fastermail.com",
"fastest.cc",
"fastimap.com",
"fastkawasaki.com",
"fastmail.ca",
"fastmail.cn",
"fastmail.co.uk",
"fastmail.com",
"fastmail.com.au",
"fastmail.es",
"fastmail.fm",
"fastmail.gr",
"fastmail.im",
"fastmail.in",
"fastmail.jp",
"fastmail.mx",
"fastmail.net",
"fastmail.nl",
"fastmail.se",
"fastmail.to",
"fastmail.tw",
"fastmail.us",
"fastmailbox.net",
"fastmazda.com",
"fastmessaging.com",
"fastmitsubishi.com",
"fastnissan.com",
"fastservice.com",
"fastsubaru.com",
"fastsuzuki.com",
"fasttoyota.com",
"fastyamaha.com",
"fatcock.net",
"fatflap.com",
"fathersrightsne.org",
"fatyachts.com",
"fax.ru",
"fbi-agent.com",
"fbi.hu",
"fdfdsfds.com",
"fea.st",
"federalcontractors.com",
"feinripptraeger.de",
"felicity.com",
"felicitymail.com",
"female.ru",
"femenino.com",
"fepg.net",
"fetchmail.co.uk",
"fetchmail.com",
"fettabernett.de",
"feyenoorder.com",
"ffanet.com",
"fiberia.com",
"fibertel.com.ar",
"ficken.de",
"fificorp.com",
"fificorp.net",
"fightallspam.com",
"filipinolinks.com",
"filzmail.com",
"financefan.net",
"financemail.net",
"financier.com",
"findfo.com",
"findhere.com",
"findmail.com",
"findmemail.com",
"finebody.com",
"fineemail.com",
"finfin.com",
"finklfan.com",
"fire-brigade.com",
"fireman.net",
"fishburne.org",
"fishfuse.com",
"fivemail.de",
"fixmail.tk",
"fizmail.com",
"flashbox.5july.org",
"flashemail.com",
"flashmail.com",
"flashmail.net",
"fleckens.hu",
"flipcode.com",
"floridaemail.net",
"flytecrew.com",
"fmail.co.uk",
"fmailbox.com",
"fmgirl.com",
"fmguy.com",
"fnbmail.co.za",
"fnmail.com",
"folkfan.com",
"foodmail.com",
"footard.com",
"football.theboys.com",
"footballmail.com",
"foothills.net",
"for-president.com",
"force9.co.uk",
"forfree.at",
"forgetmail.com",
"fornow.eu",
"forpresident.com",
"fortuncity.com",
"fortunecity.com",
"forum.dk",
"fossefans.com",
"foxmail.com",
"fr33mail.info",
"francefans.com",
"francemel.fr",
"frapmail.com",
"free-email.ga",
"free-online.net",
"free-org.com",
"free.com.pe",
"free.fr",
"freeaccess.nl",
"freeaccount.com",
"freeandsingle.com",
"freebox.com",
"freedom.usa.com",
"freedomlover.com",
"freefanmail.com",
"freegates.be",
"freeghana.com",
"freelance-france.eu",
"freeler.nl",
"freemail.bozz.com",
"freemail.c3.hu",
"freemail.com.au",
"freemail.com.pk",
"freemail.de",
"freemail.et",
"freemail.gr",
"freemail.hu",
"freemail.it",
"freemail.lt",
"freemail.ms",
"freemail.nl",
"freemail.org.mk",
"freemail.ru",
"freemails.ga",
"freemeil.gq",
"freenet.de",
"freenet.kg",
"freeola.com",
"freeola.net",
"freeproblem.com",
"freesbee.fr",
"freeserve.co.uk",
"freeservers.com",
"freestamp.com",
"freestart.hu",
"freesurf.fr",
"freesurf.nl",
"freeuk.com",
"freeuk.net",
"freeukisp.co.uk",
"freeweb.org",
"freewebemail.com",
"freeyellow.com",
"freezone.co.uk",
"fresnomail.com",
"freudenkinder.de",
"freundin.ru",
"friction.net",
"friendlydevices.com",
"friendlymail.co.uk",
"friends-cafe.com",
"friendsfan.com",
"from-africa.com",
"from-america.com",
"from-argentina.com",
"from-asia.com",
"from-australia.com",
"from-belgium.com",
"from-brazil.com",
"from-canada.com",
"from-china.net",
"from-england.com",
"from-europe.com",
"from-france.net",
"from-germany.net",
"from-holland.com",
"from-israel.com",
"from-italy.net",
"from-japan.net",
"from-korea.com",
"from-mexico.com",
"from-outerspace.com",
"from-russia.com",
"from-spain.net",
"fromalabama.com",
"fromalaska.com",
"fromarizona.com",
"fromarkansas.com",
"fromcalifornia.com",
"fromcolorado.com",
"fromconnecticut.com",
"fromdelaware.com",
"fromflorida.net",
"fromgeorgia.com",
"fromhawaii.net",
"fromidaho.com",
"fromillinois.com",
"fromindiana.com",
"frominter.net",
"fromiowa.com",
"fromjupiter.com",
"fromkansas.com",
"fromkentucky.com",
"fromlouisiana.com",
"frommaine.net",
"frommaryland.com",
"frommassachusetts.com",
"frommiami.com",
"frommichigan.com",
"fromminnesota.com",
"frommississippi.com",
"frommissouri.com",
"frommontana.com",
"fromnebraska.com",
"fromnevada.com",
"fromnewhampshire.com",
"fromnewjersey.com",
"fromnewmexico.com",
"fromnewyork.net",
"fromnorthcarolina.com",
"fromnorthdakota.com",
"fromohio.com",
"fromoklahoma.com",
"fromoregon.net",
"frompennsylvania.com",
"fromrhodeisland.com",
"fromru.com",
"fromru.ru",
"fromsouthcarolina.com",
"fromsouthdakota.com",
"fromtennessee.com",
"fromtexas.com",
"fromthestates.com",
"fromutah.com",
"fromvermont.com",
"fromvirginia.com",
"fromwashington.com",
"fromwashingtondc.com",
"fromwestvirginia.com",
"fromwisconsin.com",
"fromwyoming.com",
"front.ru",
"frontier.com",
"frontiernet.net",
"frostbyte.uk.net",
"fsmail.net",
"ftc-i.net",
"ftml.net",
"fuckingduh.com",
"fudgerub.com",
"fullmail.com",
"funiran.com",
"funkfan.com",
"funky4.com",
"fuorissimo.com",
"furnitureprovider.com",
"fuse.net",
"fusemail.com",
"fut.es",
"fux0ringduh.com",
"fwnb.com",
"fxsmails.com",
"fyii.de",
"galamb.net",
"galaxy5.com",
"galaxyhit.com",
"gamebox.com",
"gamebox.net",
"gamegeek.com",
"games.com",
"gamespotmail.com",
"gamil.com",
"gamil.com.au",
"gamno.config.work",
"garbage.com",
"gardener.com",
"garliclife.com",
"gatwickemail.com",
"gawab.com",
"gay.com",
"gaybrighton.co.uk",
"gaza.net",
"gazeta.pl",
"gazibooks.com",
"gci.net",
"gdi.net",
"gee-wiz.com",
"geecities.com",
"geek.com",
"geek.hu",
"geeklife.com",
"gehensiemirnichtaufdensack.de",
"gelitik.in",
"gencmail.com",
"general-hospital.com",
"gentlemansclub.de",
"genxemail.com",
"geocities.com",
"geography.net",
"geologist.com",
"geopia.com",
"germanymail.com",
"get.pp.ua",
"get1mail.com",
"get2mail.fr",
"getairmail.cf",
"getairmail.com",
"getairmail.ga",
"getairmail.gq",
"getmails.eu",
"getonemail.com",
"getonemail.net",
"gfxartist.ru",
"gh2000.com",
"ghanamail.com",
"ghostmail.com",
"ghosttexter.de",
"giantmail.de",
"giantsfan.com",
"giga4u.de",
"gigileung.org",
"girl4god.com",
"girlsundertheinfluence.com",
"gishpuppy.com",
"givepeaceachance.com",
"glay.org",
"glendale.net",
"globalfree.it",
"globalpagan.com",
"globalsite.com.br",
"globetrotter.net",
"globo.com",
"globomail.com",
"gmail.co.za",
"gmail.com",
"gmail.com.au",
"gmail.com.br",
"gmail.ru",
"gmial.com",
"gmx.at",
"gmx.ch",
"gmx.co.uk",
"gmx.com",
"gmx.de",
"gmx.fr",
"gmx.li",
"gmx.net",
"gmx.us",
"gnwmail.com",
"go.com",
"go.ro",
"go.ru",
"go2.com.py",
"go2net.com",
"go4.it",
"gobrainstorm.net",
"gocollege.com",
"gocubs.com",
"godmail.dk",
"goemailgo.com",
"gofree.co.uk",
"gol.com",
"goldenmail.ru",
"goldmail.ru",
"goldtoolbox.com",
"golfemail.com",
"golfilla.info",
"golfmail.be",
"gonavy.net",
"gonuts4free.com",
"goodnewsmail.com",
"goodstick.com",
"google.com",
"googlegroups.com",
"googlemail.com",
"goosemoose.com",
"goplay.com",
"gorillaswithdirtyarmpits.com",
"gorontalo.net",
"gospelfan.com",
"gothere.uk.com",
"gotmail.com",
"gotmail.net",
"gotmail.org",
"gotomy.com",
"gotti.otherinbox.com",
"govolsfan.com",
"gportal.hu",
"grabmail.com",
"graduate.org",
"graffiti.net",
"gramszu.net",
"grandmamail.com",
"grandmasmail.com",
"graphic-designer.com",
"grapplers.com",
"gratisweb.com",
"great-host.in",
"greenmail.net",
"greensloth.com",
"groupmail.com",
"grr.la",
"grungecafe.com",
"gsrv.co.uk",
"gtemail.net",
"gtmc.net",
"gua.net",
"guerillamail.biz",
"guerillamail.com",
"guerrillamail.biz",
"guerrillamail.com",
"guerrillamail.de",
"guerrillamail.info",
"guerrillamail.net",
"guerrillamail.org",
"guerrillamailblock.com",
"guessmail.com",
"guju.net",
"gurlmail.com",
"gustr.com",
"guy.com",
"guy2.com",
"guyanafriends.com",
"gwhsgeckos.com",
"gyorsposta.com",
"gyorsposta.hu",
"h-mail.us",
"hab-verschlafen.de",
"hablas.com",
"habmalnefrage.de",
"hacccc.com",
"hackermail.com",
"hackermail.net",
"hailmail.net",
"hairdresser.com",
"hairdresser.net",
"haltospam.com",
"hamptonroads.com",
"handbag.com",
"handleit.com",
"hang-ten.com",
"hangglidemail.com",
"hanmail.net",
"happemail.com",
"happycounsel.com",
"happypuppy.com",
"harakirimail.com",
"haramamba.ru",
"hardcorefreak.com",
"hardyoungbabes.com",
"hartbot.de",
"hat-geld.de",
"hatespam.org",
"hawaii.rr.com",
"hawaiiantel.net",
"headbone.com",
"healthemail.net",
"heartthrob.com",
"heavynoize.net",
"heerschap.com",
"heesun.net",
"hehe.com",
"hello.hu",
"hello.net.au",
"hello.to",
"hellokitty.com",
"helter-skelter.com",
"hempseed.com",
"herediano.com",
"heremail.com",
"herono1.com",
"herp.in",
"herr-der-mails.de",
"hetnet.nl",
"hewgen.ru",
"hey.to",
"hhdevel.com",
"hideakifan.com",
"hidemail.de",
"hidzz.com",
"highmilton.com",
"highquality.com",
"highveldmail.co.za",
"hilarious.com",
"hinduhome.com",
"hingis.org",
"hiphopfan.com",
"hispavista.com",
"hitmail.com",
"hitmanrecords.com",
"hitthe.net",
"hkg.net",
"hkstarphoto.com",
"hmamail.com",
"hochsitze.com",
"hockeymail.com",
"hollywoodkids.com",
"home-email.com",
"home.de",
"home.nl",
"home.no.net",
"home.ro",
"home.se",
"homeart.com",
"homelocator.com",
"homemail.com",
"homenetmail.com",
"homeonthethrone.com",
"homestead.com",
"homeworkcentral.com",
"honduras.com",
"hongkong.com",
"hookup.net",
"hoopsmail.com",
"hopemail.biz",
"horrormail.com",
"host-it.com.sg",
"hot-mail.gq",
"hot-shop.com",
"hot-shot.com",
"hot.ee",
"hotbot.com",
"hotbox.ru",
"hotbrev.com",
"hotcoolmail.com",
"hotepmail.com",
"hotfire.net",
"hotletter.com",
"hotlinemail.com",
"hotmail.be",
"hotmail.ca",
"hotmail.ch",
"hotmail.co",
"hotmail.co.il",
"hotmail.co.jp",
"hotmail.co.nz",
"hotmail.co.uk",
"hotmail.co.za",
"hotmail.com",
"hotmail.com.ar",
"hotmail.com.au",
"hotmail.com.br",
"hotmail.com.mx",
"hotmail.com.tr",
"hotmail.de",
"hotmail.es",
"hotmail.fi",
"hotmail.fr",
"hotmail.it",
"hotmail.kg",
"hotmail.kz",
"hotmail.my",
"hotmail.nl",
"hotmail.ro",
"hotmail.roor",
"hotmail.ru",
"hotpop.com",
"hotpop3.com",
"hotvoice.com",
"housefan.com",
"housefancom",
"housemail.com",
"hsuchi.net",
"html.tou.com",
"hu2.ru",
"hughes.net",
"hulapla.de",
"humanoid.net",
"humanux.com",
"humn.ws.gy",
"humour.com",
"hunsa.com",
"hurting.com",
"hush.com",
"hushmail.com",
"hypernautica.com",
"i-connect.com",
"i-france.com",
"i-love-cats.com",
"i-mail.com.au",
"i-mailbox.net",
"i-p.com",
"i.am",
"i.am.to",
"i.amhey.to",
"i.ua",
"i12.com",
"i2828.com",
"i2pmail.org",
"iam4msu.com",
"iamawoman.com",
"iamfinallyonline.com",
"iamwaiting.com",
"iamwasted.com",
"iamyours.com",
"icestorm.com",
"ich-bin-verrueckt-nach-dir.de",
"ich-will-net.de",
"icloud.com",
"icmsconsultants.com",
"icq.com",
"icqmail.com",
"icrazy.com",
"icu.md",
"id-base.com",
"id.ru",
"ididitmyway.com",
"idigjesus.com",
"idirect.com",
"ieatspam.eu",
"ieatspam.info",
"ieh-mail.de",
"iespana.es",
"ifoward.com",
"ig.com.br",
"ignazio.it",
"ignmail.com",
"ihateclowns.com",
"ihateyoualot.info",
"iheartspam.org",
"iinet.net.au",
"ijustdontcare.com",
"ikbenspamvrij.nl",
"ilkposta.com",
"ilovechocolate.com",
"ilovegiraffes.net",
"ilovejesus.com",
"ilovelionking.com",
"ilovepokemonmail.com",
"ilovethemovies.com",
"ilovetocollect.net",
"ilse.nl",
"imaginemail.com",
"imail.org",
"imail.ru",
"imailbox.com",
"imails.info",
"imap-mail.com",
"imap.cc",
"imapmail.org",
"imel.org",
"imgof.com",
"imgv.de",
"immo-gerance.info",
"imneverwrong.com",
"imposter.co.uk",
"imstations.com",
"imstressed.com",
"imtoosexy.com",
"in-box.net",
"in2jesus.com",
"iname.com",
"inbax.tk",
"inbound.plus",
"inbox.com",
"inbox.lv",
"inbox.net",
"inbox.ru",
"inbox.si",
"inboxalias.com",
"inboxclean.com",
"inboxclean.org",
"incamail.com",
"includingarabia.com",
"incredimail.com",
"indeedemail.com",
"index.ua",
"indexa.fr",
"india.com",
"indiatimes.com",
"indo-mail.com",
"indocities.com",
"indomail.com",
"indosat.net.id",
"indus.ru",
"indyracers.com",
"inerted.com",
"inet.com",
"inet.net.au",
"info-media.de",
"info-radio.ml",
"info.com",
"info66.com",
"infoapex.com",
"infocom.zp.ua",
"infohq.com",
"infomail.es",
"infomart.or.jp",
"informaticos.com",
"infospacemail.com",
"infovia.com.ar",
"inicia.es",
"inmail.sk",
"inmail24.com",
"inmano.com",
"inmynetwork.tk",
"innocent.com",
"inonesearch.com",
"inorbit.com",
"inoutbox.com",
"insidebaltimore.net",
"insight.rr.com",
"inspectorjavert.com",
"instant-mail.de",
"instantemailaddress.com",
"instantmail.fr",
"instruction.com",
"instructor.net",
"insurer.com",
"interburp.com",
"interfree.it",
"interia.pl",
"interlap.com.ar",
"intermail.co.il",
"internet-club.com",
"internet-e-mail.com",
"internet-mail.org",
"internet-police.com",
"internetbiz.com",
"internetdrive.com",
"internetegypt.com",
"internetemails.net",
"internetmailing.net",
"internode.on.net",
"invalid.com",
"investormail.com",
"inwind.it",
"iobox.com",
"iobox.fi",
"iol.it",
"iol.pt",
"iowaemail.com",
"ip3.com",
"ip4.pp.ua",
"ip6.li",
"ip6.pp.ua",
"ipdeer.com",
"ipex.ru",
"ipoo.org",
"iportalexpress.com",
"iprimus.com.au",
"iqemail.com",
"irangate.net",
"iraqmail.com",
"ireland.com",
"irelandmail.com",
"irish2me.com",
"irj.hu",
"iroid.com",
"iscooler.com",
"isellcars.com",
"iservejesus.com",
"islamonline.net",
"islandemail.net",
"isleuthmail.com",
"ismart.net",
"isonfire.com",
"isp9.net",
"israelmail.com",
"ist-allein.info",
"ist-einmalig.de",
"ist-ganz-allein.de",
"ist-willig.de",
"italymail.com",
"itelefonica.com.br",
"itloox.com",
"itmom.com",
"ivebeenframed.com",
"ivillage.com",
"iwan-fals.com",
"iwi.net",
"iwmail.com",
"iwon.com",
"izadpanah.com",
"jabble.com",
"jahoopa.com",
"jakuza.hu",
"japan.com",
"jaydemail.com",
"jazzandjava.com",
"jazzfan.com",
"jazzgame.com",
"je-recycle.info",
"jeanvaljean.com",
"jerusalemmail.com",
"jesusanswers.com",
"jet-renovation.fr",
"jetable.com",
"jetable.de",
"jetable.fr.nf",
"jetable.net",
"jetable.org",
"jetable.pp.ua",
"jetemail.net",
"jewishmail.com",
"jfkislanders.com",
"jingjo.net",
"jippii.fi",
"jmail.co.za",
"jnxjn.com",
"job4u.com",
"jobbikszimpatizans.hu",
"joelonsoftware.com",
"joinme.com",
"jojomail.com",
"jokes.com",
"jordanmail.com",
"journalist.com",
"jourrapide.com",
"jovem.te.pt",
"joymail.com",
"jpopmail.com",
"jsrsolutions.com",
"jubiimail.dk",
"jump.com",
"jumpy.it",
"juniormail.com",
"junk1e.com",
"junkmail.com",
"junkmail.gq",
"juno.com",
"justemail.net",
"justicemail.com",
"justmail.de",
"justmailz.com",
"justmarriedmail.com",
"jwspamspy ",
"k.ro",
"kaazoo.com",
"kabissa.org",
"kaduku.net",
"kaffeeschluerfer.com",
"kaffeeschluerfer.de",
"kaixo.com",
"kalpoint.com",
"kansascity.com",
"kapoorweb.com",
"karachian.com",
"karachioye.com",
"karbasi.com",
"kasmail.com",
"kaspop.com",
"katamail.com",
"kayafmmail.co.za",
"kbjrmail.com",
"kcks.com",
"kebi.com",
"keftamail.com",
"keg-party.com",
"keinpardon.de",
"keko.com.ar",
"kellychen.com",
"keptprivate.com",
"keromail.com",
"kewpee.com",
"keyemail.com",
"kgb.hu",
"khosropour.com",
"kichimail.com",
"kickassmail.com",
"killamail.com",
"killergreenmail.com",
"killermail.com",
"killmail.com",
"killmail.net",
"kimo.com",
"kimsdisk.com",
"kinglibrary.net",
"kinki-kids.com",
"kismail.ru",
"kissfans.com",
"kitemail.com",
"kittymail.com",
"kitznet.at",
"kiwibox.com",
"kiwitown.com",
"klassmaster.com",
"klassmaster.net",
"klzlk.com",
"km.ru",
"kmail.com.au",
"knol-power.nl",
"koko.com",
"kolumbus.fi",
"kommespaeter.de",
"konkovo.net",
"konsul.ru",
"konx.com",
"korea.com",
"koreamail.com",
"kosino.net",
"koszmail.pl",
"kozmail.com",
"kpnmail.nl",
"kreditor.ru",
"krim.ws",
"krongthip.com",
"krovatka.net",
"krunis.com",
"ksanmail.com",
"ksee24mail.com",
"kube93mail.com",
"kukamail.com",
"kulturbetrieb.info",
"kumarweb.com",
"kurzepost.de",
"kuwait-mail.com",
"kuzminki.net",
"kyokodate.com",
"kyokofukada.net",
"l33r.eu",
"la.com",
"labetteraverouge.at",
"lackmail.ru",
"ladyfire.com",
"ladymail.cz",
"lagerlouts.com",
"lags.us",
"lahoreoye.com",
"lakmail.com",
"lamer.hu",
"land.ru",
"langoo.com",
"lankamail.com",
"laoeq.com",
"laposte.net",
"lass-es-geschehen.de",
"last-chance.pro",
"lastmail.co",
"latemodels.com",
"latinmail.com",
"latino.com",
"lavabit.com",
"lavache.com",
"law.com",
"lawlita.com",
"lawyer.com",
"lazyinbox.com",
"learn2compute.net",
"lebanonatlas.com",
"leeching.net",
"leehom.net",
"lefortovo.net",
"legalactions.com",
"legalrc.loan",
"legislator.com",
"legistrator.com",
"lenta.ru",
"leonlai.net",
"letsgomets.net",
"letterbox.com",
"letterboxes.org",
"letthemeatspam.com",
"levele.com",
"levele.hu",
"lex.bg",
"lexis-nexis-mail.com",
"lhsdv.com",
"lianozovo.net",
"libero.it",
"liberomail.com",
"lick101.com",
"liebt-dich.info",
"lifebyfood.com",
"link2mail.net",
"linkmaster.com",
"linktrader.com",
"linuxfreemail.com",
"linuxmail.org",
"lionsfan.com.au",
"liontrucks.com",
"liquidinformation.net",
"lissamail.com",
"list.ru",
"listomail.com",
"litedrop.com",
"literaturelover.com",
"littleapple.com",
"littleblueroom.com",
"live.at",
"live.be",
"live.ca",
"live.cl",
"live.cn",
"live.co.uk",
"live.co.za",
"live.com",
"live.com.ar",
"live.com.au",
"live.com.mx",
"live.com.my",
"live.com.pt",
"live.com.sg",
"live.de",
"live.dk",
"live.fr",
"live.hk",
"live.ie",
"live.in",
"live.it",
"live.jp",
"live.nl",
"live.no",
"live.ru",
"live.se",
"liveradio.tk",
"liverpoolfans.com",
"ljiljan.com",
"llandudno.com",
"llangollen.com",
"lmxmail.sk",
"lobbyist.com",
"localbar.com",
"localgenius.com",
"locos.com",
"login-email.ga",
"loh.pp.ua",
"lol.ovpn.to",
"lolfreak.net",
"lolito.tk",
"lolnetwork.net",
"london.com",
"loobie.com",
"looksmart.co.uk",
"looksmart.com",
"looksmart.com.au",
"lookugly.com",
"lopezclub.com",
"lortemail.dk",
"louiskoo.com",
"lov.ru",
"love.com",
"love.cz",
"loveable.com",
"lovecat.com",
"lovefall.ml",
"lovefootball.com",
"loveforlostcats.com",
"lovelygirl.net",
"lovemail.com",
"lover-boy.com",
"lovergirl.com",
"lovesea.gq",
"lovethebroncos.com",
"lovethecowboys.com",
"lovetocook.net",
"lovetohike.com",
"loveyouforever.de",
"lovingjesus.com",
"lowandslow.com",
"lr7.us",
"lr78.com",
"lroid.com",
"lubovnik.ru",
"lukop.dk",
"luso.pt",
"luukku.com",
"luv2.us",
"luvrhino.com",
"lvie.com.sg",
"lvwebmail.com",
"lycos.co.uk",
"lycos.com",
"lycos.es",
"lycos.it",
"lycos.ne.jp",
"lycos.ru",
"lycosemail.com",
"lycosmail.com",
"m-a-i-l.com",
"m-hmail.com",
"m21.cc",
"m4.org",
"m4ilweb.info",
"mac.com",
"macbox.com",
"macbox.ru",
"macfreak.com",
"machinecandy.com",
"macmail.com",
"mad.scientist.com",
"madcrazy.com",
"madcreations.com",
"madonnafan.com",
"madrid.com",
"maennerversteherin.com",
"maennerversteherin.de",
"maffia.hu",
"magicmail.co.za",
"mahmoodweb.com",
"mail-awu.de",
"mail-box.cz",
"mail-center.com",
"mail-central.com",
"mail-easy.fr",
"mail-filter.com",
"mail-me.com",
"mail-page.com",
"mail-temporaire.fr",
"mail-tester.com",
"mail.austria.com",
"mail.az",
"mail.be",
"mail.bg",
"mail.bulgaria.com",
"mail.by",
"mail.byte.it",
"mail.co.za",
"mail.com",
"mail.com.tr",
"mail.ee",
"mail.entrepeneurmag.com",
"mail.freetown.com",
"mail.gr",
"mail.hitthebeach.com",
"mail.htl22.at",
"mail.kmsp.com",
"mail.md",
"mail.mezimages.net",
"mail.misterpinball.de",
"mail.nu",
"mail.org.uk",
"mail.pf",
"mail.pharmacy.com",
"mail.pt",
"mail.r-o-o-t.com",
"mail.ru",
"mail.salu.net",
"mail.sisna.com",
"mail.spaceports.com",
"mail.svenz.eu",
"mail.theboys.com",
"mail.usa.com",
"mail.vasarhely.hu",
"mail.vu",
"mail.wtf",
"mail.zp.ua",
"mail114.net",
"mail15.com",
"mail1a.de",
"mail1st.com",
"mail2007.com",
"mail21.cc",
"mail2aaron.com",
"mail2abby.com",
"mail2abc.com",
"mail2actor.com",
"mail2admiral.com",
"mail2adorable.com",
"mail2adoration.com",
"mail2adore.com",
"mail2adventure.com",
"mail2aeolus.com",
"mail2aether.com",
"mail2affection.com",
"mail2afghanistan.com",
"mail2africa.com",
"mail2agent.com",
"mail2aha.com",
"mail2ahoy.com",
"mail2aim.com",
"mail2air.com",
"mail2airbag.com",
"mail2airforce.com",
"mail2airport.com",
"mail2alabama.com",
"mail2alan.com",
"mail2alaska.com",
"mail2albania.com",
"mail2alcoholic.com",
"mail2alec.com",
"mail2alexa.com",
"mail2algeria.com",
"mail2alicia.com",
"mail2alien.com",
"mail2allan.com",
"mail2allen.com",
"mail2allison.com",
"mail2alpha.com",
"mail2alyssa.com",
"mail2amanda.com",
"mail2amazing.com",
"mail2amber.com",
"mail2america.com",
"mail2american.com",
"mail2andorra.com",
"mail2andrea.com",
"mail2andy.com",
"mail2anesthesiologist.com",
"mail2angela.com",
"mail2angola.com",
"mail2ann.com",
"mail2anna.com",
"mail2anne.com",
"mail2anthony.com",
"mail2anything.com",
"mail2aphrodite.com",
"mail2apollo.com",
"mail2april.com",
"mail2aquarius.com",
"mail2arabia.com",
"mail2arabic.com",
"mail2architect.com",
"mail2ares.com",
"mail2argentina.com",
"mail2aries.com",
"mail2arizona.com",
"mail2arkansas.com",
"mail2armenia.com",
"mail2army.com",
"mail2arnold.com",
"mail2art.com",
"mail2artemus.com",
"mail2arthur.com",
"mail2artist.com",
"mail2ashley.com",
"mail2ask.com",
"mail2astronomer.com",
"mail2athena.com",
"mail2athlete.com",
"mail2atlas.com",
"mail2atom.com",
"mail2attitude.com",
"mail2auction.com",
"mail2aunt.com",
"mail2australia.com",
"mail2austria.com",
"mail2azerbaijan.com",
"mail2baby.com",
"mail2bahamas.com",
"mail2bahrain.com",
"mail2ballerina.com",
"mail2ballplayer.com",
"mail2band.com",
"mail2bangladesh.com",
"mail2bank.com",
"mail2banker.com",
"mail2bankrupt.com",
"mail2baptist.com",
"mail2bar.com",
"mail2barbados.com",
"mail2barbara.com",
"mail2barter.com",
"mail2basketball.com",
"mail2batter.com",
"mail2beach.com",
"mail2beast.com",
"mail2beatles.com",
"mail2beauty.com",
"mail2becky.com",
"mail2beijing.com",
"mail2belgium.com",
"mail2belize.com",
"mail2ben.com",
"mail2bernard.com",
"mail2beth.com",
"mail2betty.com",
"mail2beverly.com",
"mail2beyond.com",
"mail2biker.com",
"mail2bill.com",
"mail2billionaire.com",
"mail2billy.com",
"mail2bio.com",
"mail2biologist.com",
"mail2black.com",
"mail2blackbelt.com",
"mail2blake.com",
"mail2blind.com",
"mail2blonde.com",
"mail2blues.com",
"mail2bob.com",
"mail2bobby.com",
"mail2bolivia.com",
"mail2bombay.com",
"mail2bonn.com",
"mail2bookmark.com",
"mail2boreas.com",
"mail2bosnia.com",
"mail2boston.com",
"mail2botswana.com",
"mail2bradley.com",
"mail2brazil.com",
"mail2breakfast.com",
"mail2brian.com",
"mail2bride.com",
"mail2brittany.com",
"mail2broker.com",
"mail2brook.com",
"mail2bruce.com",
"mail2brunei.com",
"mail2brunette.com",
"mail2brussels.com",
"mail2bryan.com",
"mail2bug.com",
"mail2bulgaria.com",
"mail2business.com",
"mail2buy.com",
"mail2ca.com",
"mail2california.com",
"mail2calvin.com",
"mail2cambodia.com",
"mail2cameroon.com",
"mail2canada.com",
"mail2cancer.com",
"mail2capeverde.com",
"mail2capricorn.com",
"mail2cardinal.com",
"mail2cardiologist.com",
"mail2care.com",
"mail2caroline.com",
"mail2carolyn.com",
"mail2casey.com",
"mail2cat.com",
"mail2caterer.com",
"mail2cathy.com",
"mail2catlover.com",
"mail2catwalk.com",
"mail2cell.com",
"mail2chad.com",
"mail2champaign.com",
"mail2charles.com",
"mail2chef.com",
"mail2chemist.com",
"mail2cherry.com",
"mail2chicago.com",
"mail2chile.com",
"mail2china.com",
"mail2chinese.com",
"mail2chocolate.com",
"mail2christian.com",
"mail2christie.com",
"mail2christmas.com",
"mail2christy.com",
"mail2chuck.com",
"mail2cindy.com",
"mail2clark.com",
"mail2classifieds.com",
"mail2claude.com",
"mail2cliff.com",
"mail2clinic.com",
"mail2clint.com",
"mail2close.com",
"mail2club.com",
"mail2coach.com",
"mail2coastguard.com",
"mail2colin.com",
"mail2college.com",
"mail2colombia.com",
"mail2color.com",
"mail2colorado.com",
"mail2columbia.com",
"mail2comedian.com",
"mail2composer.com",
"mail2computer.com",
"mail2computers.com",
"mail2concert.com",
"mail2congo.com",
"mail2connect.com",
"mail2connecticut.com",
"mail2consultant.com",
"mail2convict.com",
"mail2cook.com",
"mail2cool.com",
"mail2cory.com",
"mail2costarica.com",
"mail2country.com",
"mail2courtney.com",
"mail2cowboy.com",
"mail2cowgirl.com",
"mail2craig.com",
"mail2crave.com",
"mail2crazy.com",
"mail2create.com",
"mail2croatia.com",
"mail2cry.com",
"mail2crystal.com",
"mail2cuba.com",
"mail2culture.com",
"mail2curt.com",
"mail2customs.com",
"mail2cute.com",
"mail2cutey.com",
"mail2cynthia.com",
"mail2cyprus.com",
"mail2czechrepublic.com",
"mail2dad.com",
"mail2dale.com",
"mail2dallas.com",
"mail2dan.com",
"mail2dana.com",
"mail2dance.com",
"mail2dancer.com",
"mail2danielle.com",
"mail2danny.com",
"mail2darlene.com",
"mail2darling.com",
"mail2darren.com",
"mail2daughter.com",
"mail2dave.com",
"mail2dawn.com",
"mail2dc.com",
"mail2dealer.com",
"mail2deanna.com",
"mail2dearest.com",
"mail2debbie.com",
"mail2debby.com",
"mail2deer.com",
"mail2delaware.com",
"mail2delicious.com",
"mail2demeter.com",
"mail2democrat.com",
"mail2denise.com",
"mail2denmark.com",
"mail2dennis.com",
"mail2dentist.com",
"mail2derek.com",
"mail2desert.com",
"mail2devoted.com",
"mail2devotion.com",
"mail2diamond.com",
"mail2diana.com",
"mail2diane.com",
"mail2diehard.com",
"mail2dilemma.com",
"mail2dillon.com",
"mail2dinner.com",
"mail2dinosaur.com",
"mail2dionysos.com",
"mail2diplomat.com",
"mail2director.com",
"mail2dirk.com",
"mail2disco.com",
"mail2dive.com",
"mail2diver.com",
"mail2divorced.com",
"mail2djibouti.com",
"mail2doctor.com",
"mail2doglover.com",
"mail2dominic.com",
"mail2dominica.com",
"mail2dominicanrepublic.com",
"mail2don.com",
"mail2donald.com",
"mail2donna.com",
"mail2doris.com",
"mail2dorothy.com",
"mail2doug.com",
"mail2dough.com",
"mail2douglas.com",
"mail2dow.com",
"mail2downtown.com",
"mail2dream.com",
"mail2dreamer.com",
"mail2dude.com",
"mail2dustin.com",
"mail2dyke.com",
"mail2dylan.com",
"mail2earl.com",
"mail2earth.com",
"mail2eastend.com",
"mail2eat.com",
"mail2economist.com",
"mail2ecuador.com",
"mail2eddie.com",
"mail2edgar.com",
"mail2edwin.com",
"mail2egypt.com",
"mail2electron.com",
"mail2eli.com",
"mail2elizabeth.com",
"mail2ellen.com",
"mail2elliot.com",
"mail2elsalvador.com",
"mail2elvis.com",
"mail2emergency.com",
"mail2emily.com",
"mail2engineer.com",
"mail2english.com",
"mail2environmentalist.com",
"mail2eos.com",
"mail2eric.com",
"mail2erica.com",
"mail2erin.com",
"mail2erinyes.com",
"mail2eris.com",
"mail2eritrea.com",
"mail2ernie.com",
"mail2eros.com",
"mail2estonia.com",
"mail2ethan.com",
"mail2ethiopia.com",
"mail2eu.com",
"mail2europe.com",
"mail2eurus.com",
"mail2eva.com",
"mail2evan.com",
"mail2evelyn.com",
"mail2everything.com",
"mail2exciting.com",
"mail2expert.com",
"mail2fairy.com",
"mail2faith.com",
"mail2fanatic.com",
"mail2fancy.com",
"mail2fantasy.com",
"mail2farm.com",
"mail2farmer.com",
"mail2fashion.com",
"mail2fat.com",
"mail2feeling.com",
"mail2female.com",
"mail2fever.com",
"mail2fighter.com",
"mail2fiji.com",
"mail2filmfestival.com",
"mail2films.com",
"mail2finance.com",
"mail2finland.com",
"mail2fireman.com",
"mail2firm.com",
"mail2fisherman.com",
"mail2flexible.com",
"mail2florence.com",
"mail2florida.com",
"mail2floyd.com",
"mail2fly.com",
"mail2fond.com",
"mail2fondness.com",
"mail2football.com",
"mail2footballfan.com",
"mail2found.com",
"mail2france.com",
"mail2frank.com",
"mail2frankfurt.com",
"mail2franklin.com",
"mail2fred.com",
"mail2freddie.com",
"mail2free.com",
"mail2freedom.com",
"mail2french.com",
"mail2freudian.com",
"mail2friendship.com",
"mail2from.com",
"mail2fun.com",
"mail2gabon.com",
"mail2gabriel.com",
"mail2gail.com",
"mail2galaxy.com",
"mail2gambia.com",
"mail2games.com",
"mail2gary.com",
"mail2gavin.com",
"mail2gemini.com",
"mail2gene.com",
"mail2genes.com",
"mail2geneva.com",
"mail2george.com",
"mail2georgia.com",
"mail2gerald.com",
"mail2german.com",
"mail2germany.com",
"mail2ghana.com",
"mail2gilbert.com",
"mail2gina.com",
"mail2girl.com",
"mail2glen.com",
"mail2gloria.com",
"mail2goddess.com",
"mail2gold.com",
"mail2golfclub.com",
"mail2golfer.com",
"mail2gordon.com",
"mail2government.com",
"mail2grab.com",
"mail2grace.com",
"mail2graham.com",
"mail2grandma.com",
"mail2grandpa.com",
"mail2grant.com",
"mail2greece.com",
"mail2green.com",
"mail2greg.com",
"mail2grenada.com",
"mail2gsm.com",
"mail2guard.com",
"mail2guatemala.com",
"mail2guy.com",
"mail2hades.com",
"mail2haiti.com",
"mail2hal.com",
"mail2handhelds.com",
"mail2hank.com",
"mail2hannah.com",
"mail2harold.com",
"mail2harry.com",
"mail2hawaii.com",
"mail2headhunter.com",
"mail2heal.com",
"mail2heather.com",
"mail2heaven.com",
"mail2hebe.com",
"mail2hecate.com",
"mail2heidi.com",
"mail2helen.com",
"mail2hell.com",
"mail2help.com",
"mail2helpdesk.com",
"mail2henry.com",
"mail2hephaestus.com",
"mail2hera.com",
"mail2hercules.com",
"mail2herman.com",
"mail2hermes.com",
"mail2hespera.com",
"mail2hestia.com",
"mail2highschool.com",
"mail2hindu.com",
"mail2hip.com",
"mail2hiphop.com",
"mail2holland.com",
"mail2holly.com",
"mail2hollywood.com",
"mail2homer.com",
"mail2honduras.com",
"mail2honey.com",
"mail2hongkong.com",
"mail2hope.com",
"mail2horse.com",
"mail2hot.com",
"mail2hotel.com",
"mail2houston.com",
"mail2howard.com",
"mail2hugh.com",
"mail2human.com",
"mail2hungary.com",
"mail2hungry.com",
"mail2hygeia.com",
"mail2hyperspace.com",
"mail2hypnos.com",
"mail2ian.com",
"mail2ice-cream.com",
"mail2iceland.com",
"mail2idaho.com",
"mail2idontknow.com",
"mail2illinois.com",
"mail2imam.com",
"mail2in.com",
"mail2india.com",
"mail2indian.com",
"mail2indiana.com",
"mail2indonesia.com",
"mail2infinity.com",
"mail2intense.com",
"mail2iowa.com",
"mail2iran.com",
"mail2iraq.com",
"mail2ireland.com",
"mail2irene.com",
"mail2iris.com",
"mail2irresistible.com",
"mail2irving.com",
"mail2irwin.com",
"mail2isaac.com",
"mail2israel.com",
"mail2italian.com",
"mail2italy.com",
"mail2jackie.com",
"mail2jacob.com",
"mail2jail.com",
"mail2jaime.com",
"mail2jake.com",
"mail2jamaica.com",
"mail2james.com",
"mail2jamie.com",
"mail2jan.com",
"mail2jane.com",
"mail2janet.com",
"mail2janice.com",
"mail2japan.com",
"mail2japanese.com",
"mail2jasmine.com",
"mail2jason.com",
"mail2java.com",
"mail2jay.com",
"mail2jazz.com",
"mail2jed.com",
"mail2jeffrey.com",
"mail2jennifer.com",
"mail2jenny.com",
"mail2jeremy.com",
"mail2jerry.com",
"mail2jessica.com",
"mail2jessie.com",
"mail2jesus.com",
"mail2jew.com",
"mail2jeweler.com",
"mail2jim.com",
"mail2jimmy.com",
"mail2joan.com",
"mail2joann.com",
"mail2joanna.com",
"mail2jody.com",
"mail2joe.com",
"mail2joel.com",
"mail2joey.com",
"mail2john.com",
"mail2join.com",
"mail2jon.com",
"mail2jonathan.com",
"mail2jones.com",
"mail2jordan.com",
"mail2joseph.com",
"mail2josh.com",
"mail2joy.com",
"mail2juan.com",
"mail2judge.com",
"mail2judy.com",
"mail2juggler.com",
"mail2julian.com",
"mail2julie.com",
"mail2jumbo.com",
"mail2junk.com",
"mail2justin.com",
"mail2justme.com",
"mail2k.ru",
"mail2kansas.com",
"mail2karate.com",
"mail2karen.com",
"mail2karl.com",
"mail2karma.com",
"mail2kathleen.com",
"mail2kathy.com",
"mail2katie.com",
"mail2kay.com",
"mail2kazakhstan.com",
"mail2keen.com",
"mail2keith.com",
"mail2kelly.com",
"mail2kelsey.com",
"mail2ken.com",
"mail2kendall.com",
"mail2kennedy.com",
"mail2kenneth.com",
"mail2kenny.com",
"mail2kentucky.com",
"mail2kenya.com",
"mail2kerry.com",
"mail2kevin.com",
"mail2kim.com",
"mail2kimberly.com",
"mail2king.com",
"mail2kirk.com",
"mail2kiss.com",
"mail2kosher.com",
"mail2kristin.com",
"mail2kurt.com",
"mail2kuwait.com",
"mail2kyle.com",
"mail2kyrgyzstan.com",
"mail2la.com",
"mail2lacrosse.com",
"mail2lance.com",
"mail2lao.com",
"mail2larry.com",
"mail2latvia.com",
"mail2laugh.com",
"mail2laura.com",
"mail2lauren.com",
"mail2laurie.com",
"mail2lawrence.com",
"mail2lawyer.com",
"mail2lebanon.com",
"mail2lee.com",
"mail2leo.com",
"mail2leon.com",
"mail2leonard.com",
"mail2leone.com",
"mail2leslie.com",
"mail2letter.com",
"mail2liberia.com",
"mail2libertarian.com",
"mail2libra.com",
"mail2libya.com",
"mail2liechtenstein.com",
"mail2life.com",
"mail2linda.com",
"mail2linux.com",
"mail2lionel.com",
"mail2lipstick.com",
"mail2liquid.com",
"mail2lisa.com",
"mail2lithuania.com",
"mail2litigator.com",
"mail2liz.com",
"mail2lloyd.com",
"mail2lois.com",
"mail2lola.com",
"mail2london.com",
"mail2looking.com",
"mail2lori.com",
"mail2lost.com",
"mail2lou.com",
"mail2louis.com",
"mail2louisiana.com",
"mail2lovable.com",
"mail2love.com",
"mail2lucky.com",
"mail2lucy.com",
"mail2lunch.com",
"mail2lust.com",
"mail2luxembourg.com",
"mail2luxury.com",
"mail2lyle.com",
"mail2lynn.com",
"mail2madagascar.com",
"mail2madison.com",
"mail2madrid.com",
"mail2maggie.com",
"mail2mail4.com",
"mail2maine.com",
"mail2malawi.com",
"mail2malaysia.com",
"mail2maldives.com",
"mail2mali.com",
"mail2malta.com",
"mail2mambo.com",
"mail2man.com",
"mail2mandy.com",
"mail2manhunter.com",
"mail2mankind.com",
"mail2many.com",
"mail2marc.com",
"mail2marcia.com",
"mail2margaret.com",
"mail2margie.com",
"mail2marhaba.com",
"mail2maria.com",
"mail2marilyn.com",
"mail2marines.com",
"mail2mark.com",
"mail2marriage.com",
"mail2married.com",
"mail2marries.com",
"mail2mars.com",
"mail2marsha.com",
"mail2marshallislands.com",
"mail2martha.com",
"mail2martin.com",
"mail2marty.com",
"mail2marvin.com",
"mail2mary.com",
"mail2maryland.com",
"mail2mason.com",
"mail2massachusetts.com",
"mail2matt.com",
"mail2matthew.com",
"mail2maurice.com",
"mail2mauritania.com",
"mail2mauritius.com",
"mail2max.com",
"mail2maxwell.com",
"mail2maybe.com",
"mail2mba.com",
"mail2me4u.com",
"mail2mechanic.com",
"mail2medieval.com",
"mail2megan.com",
"mail2mel.com",
"mail2melanie.com",
"mail2melissa.com",
"mail2melody.com",
"mail2member.com",
"mail2memphis.com",
"mail2methodist.com",
"mail2mexican.com",
"mail2mexico.com",
"mail2mgz.com",
"mail2miami.com",
"mail2michael.com",
"mail2michelle.com",
"mail2michigan.com",
"mail2mike.com",
"mail2milan.com",
"mail2milano.com",
"mail2mildred.com",
"mail2milkyway.com",
"mail2millennium.com",
"mail2millionaire.com",
"mail2milton.com",
"mail2mime.com",
"mail2mindreader.com",
"mail2mini.com",
"mail2minister.com",
"mail2minneapolis.com",
"mail2minnesota.com",
"mail2miracle.com",
"mail2missionary.com",
"mail2mississippi.com",
"mail2missouri.com",
"mail2mitch.com",
"mail2model.com",
"mail2moldova.commail2molly.com",
"mail2mom.com",
"mail2monaco.com",
"mail2money.com",
"mail2mongolia.com",
"mail2monica.com",
"mail2montana.com",
"mail2monty.com",
"mail2moon.com",
"mail2morocco.com",
"mail2morpheus.com",
"mail2mors.com",
"mail2moscow.com",
"mail2moslem.com",
"mail2mouseketeer.com",
"mail2movies.com",
"mail2mozambique.com",
"mail2mp3.com",
"mail2mrright.com",
"mail2msright.com",
"mail2museum.com",
"mail2music.com",
"mail2musician.com",
"mail2muslim.com",
"mail2my.com",
"mail2myboat.com",
"mail2mycar.com",
"mail2mycell.com",
"mail2mygsm.com",
"mail2mylaptop.com",
"mail2mymac.com",
"mail2mypager.com",
"mail2mypalm.com",
"mail2mypc.com",
"mail2myphone.com",
"mail2myplane.com",
"mail2namibia.com",
"mail2nancy.com",
"mail2nasdaq.com",
"mail2nathan.com",
"mail2nauru.com",
"mail2navy.com",
"mail2neal.com",
"mail2nebraska.com",
"mail2ned.com",
"mail2neil.com",
"mail2nelson.com",
"mail2nemesis.com",
"mail2nepal.com",
"mail2netherlands.com",
"mail2network.com",
"mail2nevada.com",
"mail2newhampshire.com",
"mail2newjersey.com",
"mail2newmexico.com",
"mail2newyork.com",
"mail2newzealand.com",
"mail2nicaragua.com",
"mail2nick.com",
"mail2nicole.com",
"mail2niger.com",
"mail2nigeria.com",
"mail2nike.com",
"mail2no.com",
"mail2noah.com",
"mail2noel.com",
"mail2noelle.com",
"mail2normal.com",
"mail2norman.com",
"mail2northamerica.com",
"mail2northcarolina.com",
"mail2northdakota.com",
"mail2northpole.com",
"mail2norway.com",
"mail2notus.com",
"mail2noway.com",
"mail2nowhere.com",
"mail2nuclear.com",
"mail2nun.com",
"mail2ny.com",
"mail2oasis.com",
"mail2oceanographer.com",
"mail2ohio.com",
"mail2ok.com",
"mail2oklahoma.com",
"mail2oliver.com",
"mail2oman.com",
"mail2one.com",
"mail2onfire.com",
"mail2online.com",
"mail2oops.com",
"mail2open.com",
"mail2ophthalmologist.com",
"mail2optometrist.com",
"mail2oregon.com",
"mail2oscars.com",
"mail2oslo.com",
"mail2painter.com",
"mail2pakistan.com",
"mail2palau.com",
"mail2pan.com",
"mail2panama.com",
"mail2paraguay.com",
"mail2paralegal.com",
"mail2paris.com",
"mail2park.com",
"mail2parker.com",
"mail2party.com",
"mail2passion.com",
"mail2pat.com",
"mail2patricia.com",
"mail2patrick.com",
"mail2patty.com",
"mail2paul.com",
"mail2paula.com",
"mail2pay.com",
"mail2peace.com",
"mail2pediatrician.com",
"mail2peggy.com",
"mail2pennsylvania.com",
"mail2perry.com",
"mail2persephone.com",
"mail2persian.com",
"mail2peru.com",
"mail2pete.com",
"mail2peter.com",
"mail2pharmacist.com",
"mail2phil.com",
"mail2philippines.com",
"mail2phoenix.com",
"mail2phonecall.com",
"mail2phyllis.com",
"mail2pickup.com",
"mail2pilot.com",
"mail2pisces.com",
"mail2planet.com",
"mail2platinum.com",
"mail2plato.com",
"mail2pluto.com",
"mail2pm.com",
"mail2podiatrist.com",
"mail2poet.com",
"mail2poland.com",
"mail2policeman.com",
"mail2policewoman.com",
"mail2politician.com",
"mail2pop.com",
"mail2pope.com",
"mail2popular.com",
"mail2portugal.com",
"mail2poseidon.com",
"mail2potatohead.com",
"mail2power.com",
"mail2presbyterian.com",
"mail2president.com",
"mail2priest.com",
"mail2prince.com",
"mail2princess.com",
"mail2producer.com",
"mail2professor.com",
"mail2protect.com",
"mail2psychiatrist.com",
"mail2psycho.com",
"mail2psychologist.com",
"mail2qatar.com",
"mail2queen.com",
"mail2rabbi.com",
"mail2race.com",
"mail2racer.com",
"mail2rachel.com",
"mail2rage.com",
"mail2rainmaker.com",
"mail2ralph.com",
"mail2randy.com",
"mail2rap.com",
"mail2rare.com",
"mail2rave.com",
"mail2ray.com",
"mail2raymond.com",
"mail2realtor.com",
"mail2rebecca.com",
"mail2recruiter.com",
"mail2recycle.com",
"mail2redhead.com",
"mail2reed.com",
"mail2reggie.com",
"mail2register.com",
"mail2rent.com",
"mail2republican.com",
"mail2resort.com",
"mail2rex.com",
"mail2rhodeisland.com",
"mail2rich.com",
"mail2richard.com",
"mail2ricky.com",
"mail2ride.com",
"mail2riley.com",
"mail2rita.com",
"mail2rob.com",
"mail2robert.com",
"mail2roberta.com",
"mail2robin.com",
"mail2rock.com",
"mail2rocker.com",
"mail2rod.com",
"mail2rodney.com",
"mail2romania.com",
"mail2rome.com",
"mail2ron.com",
"mail2ronald.com",
"mail2ronnie.com",
"mail2rose.com",
"mail2rosie.com",
"mail2roy.com",
"mail2rss.org",
"mail2rudy.com",
"mail2rugby.com",
"mail2runner.com",
"mail2russell.com",
"mail2russia.com",
"mail2russian.com",
"mail2rusty.com",
"mail2ruth.com",
"mail2rwanda.com",
"mail2ryan.com",
"mail2sa.com",
"mail2sabrina.com",
"mail2safe.com",
"mail2sagittarius.com",
"mail2sail.com",
"mail2sailor.com",
"mail2sal.com",
"mail2salaam.com",
"mail2sam.com",
"mail2samantha.com",
"mail2samoa.com",
"mail2samurai.com",
"mail2sandra.com",
"mail2sandy.com",
"mail2sanfrancisco.com",
"mail2sanmarino.com",
"mail2santa.com",
"mail2sara.com",
"mail2sarah.com",
"mail2sat.com",
"mail2saturn.com",
"mail2saudi.com",
"mail2saudiarabia.com",
"mail2save.com",
"mail2savings.com",
"mail2school.com",
"mail2scientist.com",
"mail2scorpio.com",
"mail2scott.com",
"mail2sean.com",
"mail2search.com",
"mail2seattle.com",
"mail2secretagent.com",
"mail2senate.com",
"mail2senegal.com",
"mail2sensual.com",
"mail2seth.com",
"mail2sevenseas.com",
"mail2sexy.com",
"mail2seychelles.com",
"mail2shane.com",
"mail2sharon.com",
"mail2shawn.com",
"mail2ship.com",
"mail2shirley.com",
"mail2shoot.com",
"mail2shuttle.com",
"mail2sierraleone.com",
"mail2simon.com",
"mail2singapore.com",
"mail2single.com",
"mail2site.com",
"mail2skater.com",
"mail2skier.com",
"mail2sky.com",
"mail2sleek.com",
"mail2slim.com",
"mail2slovakia.com",
"mail2slovenia.com",
"mail2smile.com",
"mail2smith.com",
"mail2smooth.com",
"mail2soccer.com",
"mail2soccerfan.com",
"mail2socialist.com",
"mail2soldier.com",
"mail2somalia.com",
"mail2son.com",
"mail2song.com",
"mail2sos.com",
"mail2sound.com",
"mail2southafrica.com",
"mail2southamerica.com",
"mail2southcarolina.com",
"mail2southdakota.com",
"mail2southkorea.com",
"mail2southpole.com",
"mail2spain.com",
"mail2spanish.com",
"mail2spare.com",
"mail2spectrum.com",
"mail2splash.com",
"mail2sponsor.com",
"mail2sports.com",
"mail2srilanka.com",
"mail2stacy.com",
"mail2stan.com",
"mail2stanley.com",
"mail2star.com",
"mail2state.com",
"mail2stephanie.com",
"mail2steve.com",
"mail2steven.com",
"mail2stewart.com",
"mail2stlouis.com",
"mail2stock.com",
"mail2stockholm.com",
"mail2stockmarket.com",
"mail2storage.com",
"mail2store.com",
"mail2strong.com",
"mail2student.com",
"mail2studio.com",
"mail2studio54.com",
"mail2stuntman.com",
"mail2subscribe.com",
"mail2sudan.com",
"mail2superstar.com",
"mail2surfer.com",
"mail2suriname.com",
"mail2susan.com",
"mail2suzie.com",
"mail2swaziland.com",
"mail2sweden.com",
"mail2sweetheart.com",
"mail2swim.com",
"mail2swimmer.com",
"mail2swiss.com",
"mail2switzerland.com",
"mail2sydney.com",
"mail2sylvia.com",
"mail2syria.com",
"mail2taboo.com",
"mail2taiwan.com",
"mail2tajikistan.com",
"mail2tammy.com",
"mail2tango.com",
"mail2tanya.com",
"mail2tanzania.com",
"mail2tara.com",
"mail2taurus.com",
"mail2taxi.com",
"mail2taxidermist.com",
"mail2taylor.com",
"mail2taz.com",
"mail2teacher.com",
"mail2technician.com",
"mail2ted.com",
"mail2telephone.com",
"mail2teletubbie.com",
"mail2tenderness.com",
"mail2tennessee.com",
"mail2tennis.com",
"mail2tennisfan.com",
"mail2terri.com",
"mail2terry.com",
"mail2test.com",
"mail2texas.com",
"mail2thailand.com",
"mail2therapy.com",
"mail2think.com",
"mail2tickets.com",
"mail2tiffany.com",
"mail2tim.com",
"mail2time.com",
"mail2timothy.com",
"mail2tina.com",
"mail2titanic.com",
"mail2toby.com",
"mail2todd.com",
"mail2togo.com",
"mail2tom.com",
"mail2tommy.com",
"mail2tonga.com",
"mail2tony.com",
"mail2touch.com",
"mail2tourist.com",
"mail2tracey.com",
"mail2tracy.com",
"mail2tramp.com",
"mail2travel.com",
"mail2traveler.com",
"mail2travis.com",
"mail2trekkie.com",
"mail2trex.com",
"mail2triallawyer.com",
"mail2trick.com",
"mail2trillionaire.com",
"mail2troy.com",
"mail2truck.com",
"mail2trump.com",
"mail2try.com",
"mail2tunisia.com",
"mail2turbo.com",
"mail2turkey.com",
"mail2turkmenistan.com",
"mail2tv.com",
"mail2tycoon.com",
"mail2tyler.com",
"mail2u4me.com",
"mail2uae.com",
"mail2uganda.com",
"mail2uk.com",
"mail2ukraine.com",
"mail2uncle.com",
"mail2unsubscribe.com",
"mail2uptown.com",
"mail2uruguay.com",
"mail2usa.com",
"mail2utah.com",
"mail2uzbekistan.com",
"mail2v.com",
"mail2vacation.com",
"mail2valentines.com",
"mail2valerie.com",
"mail2valley.com",
"mail2vamoose.com",
"mail2vanessa.com",
"mail2vanuatu.com",
"mail2venezuela.com",
"mail2venous.com",
"mail2venus.com",
"mail2vermont.com",
"mail2vickie.com",
"mail2victor.com",
"mail2victoria.com",
"mail2vienna.com",
"mail2vietnam.com",
"mail2vince.com",
"mail2virginia.com",
"mail2virgo.com",
"mail2visionary.com",
"mail2vodka.com",
"mail2volleyball.com",
"mail2waiter.com",
"mail2wallstreet.com",
"mail2wally.com",
"mail2walter.com",
"mail2warren.com",
"mail2washington.com",
"mail2wave.com",
"mail2way.com",
"mail2waycool.com",
"mail2wayne.com",
"mail2webmaster.com",
"mail2webtop.com",
"mail2webtv.com",
"mail2weird.com",
"mail2wendell.com",
"mail2wendy.com",
"mail2westend.com",
"mail2westvirginia.com",
"mail2whether.com",
"mail2whip.com",
"mail2white.com",
"mail2whitehouse.com",
"mail2whitney.com",
"mail2why.com",
"mail2wilbur.com",
"mail2wild.com",
"mail2willard.com",
"mail2willie.com",
"mail2wine.com",
"mail2winner.com",
"mail2wired.com",
"mail2wisconsin.com",
"mail2woman.com",
"mail2wonder.com",
"mail2world.com",
"mail2worship.com",
"mail2wow.com",
"mail2www.com",
"mail2wyoming.com",
"mail2xfiles.com",
"mail2xox.com",
"mail2yachtclub.com",
"mail2yahalla.com",
"mail2yemen.com",
"mail2yes.com",
"mail2yugoslavia.com",
"mail2zack.com",
"mail2zambia.com",
"mail2zenith.com",
"mail2zephir.com",
"mail2zeus.com",
"mail2zipper.com",
"mail2zoo.com",
"mail2zoologist.com",
"mail2zurich.com",
"mail3000.com",
"mail333.com",
"mail4trash.com",
"mail4u.info",
"mail8.com",
"mailandftp.com",
"mailandnews.com",
"mailas.com",
"mailasia.com",
"mailbidon.com",
"mailbiz.biz",
"mailblocks.com",
"mailbolt.com",
"mailbomb.net",
"mailboom.com",
"mailbox.as",
"mailbox.co.za",
"mailbox.gr",
"mailbox.hu",
"mailbox72.biz",
"mailbox80.biz",
"mailbr.com.br",
"mailbucket.org",
"mailc.net",
"mailcan.com",
"mailcat.biz",
"mailcatch.com",
"mailcc.com",
"mailchoose.co",
"mailcity.com",
"mailclub.fr",
"mailclub.net",
"mailde.de",
"mailde.info",
"maildrop.cc",
"maildrop.gq",
"maildx.com",
"mailed.ro",
"maileimer.de",
"mailexcite.com",
"mailexpire.com",
"mailfa.tk",
"mailfly.com",
"mailforce.net",
"mailforspam.com",
"mailfree.gq",
"mailfreeonline.com",
"mailfreeway.com",
"mailfs.com",
"mailftp.com",
"mailgate.gr",
"mailgate.ru",
"mailgenie.net",
"mailguard.me",
"mailhaven.com",
"mailhood.com",
"mailimate.com",
"mailin8r.com",
"mailinatar.com",
"mailinater.com",
"mailinator.com",
"mailinator.net",
"mailinator.org",
"mailinator.us",
"mailinator2.com",
"mailinblack.com",
"mailincubator.com",
"mailingaddress.org",
"mailingweb.com",
"mailisent.com",
"mailismagic.com",
"mailite.com",
"mailmate.com",
"mailme.dk",
"mailme.gq",
"mailme.ir",
"mailme.lv",
"mailme24.com",
"mailmetrash.com",
"mailmight.com",
"mailmij.nl",
"mailmoat.com",
"mailms.com",
"mailnator.com",
"mailnesia.com",
"mailnew.com",
"mailnull.com",
"mailops.com",
"mailorg.org",
"mailoye.com",
"mailpanda.com",
"mailpick.biz",
"mailpokemon.com",
"mailpost.zzn.com",
"mailpride.com",
"mailproxsy.com",
"mailpuppy.com",
"mailquack.com",
"mailrock.biz",
"mailroom.com",
"mailru.com",
"mailsac.com",
"mailscrap.com",
"mailseal.de",
"mailsent.net",
"mailserver.ru",
"mailservice.ms",
"mailshell.com",
"mailshuttle.com",
"mailsiphon.com",
"mailslapping.com",
"mailsnare.net",
"mailstart.com",
"mailstartplus.com",
"mailsurf.com",
"mailtag.com",
"mailtemp.info",
"mailto.de",
"mailtome.de",
"mailtothis.com",
"mailtrash.net",
"mailtv.net",
"mailtv.tv",
"mailueberfall.de",
"mailup.net",
"mailwire.com",
"mailworks.org",
"mailzi.ru",
"mailzilla.com",
"mailzilla.org",
"makemetheking.com",
"maktoob.com",
"malayalamtelevision.net",
"malayalapathram.com",
"male.ru",
"maltesemail.com",
"mamber.net",
"manager.de",
"manager.in.th",
"mancity.net",
"manlymail.net",
"mantrafreenet.com",
"mantramail.com",
"mantraonline.com",
"manutdfans.com",
"manybrain.com",
"marchmail.com",
"marfino.net",
"margarita.ru",
"mariah-carey.ml.org",
"mariahc.com",
"marijuana.com",
"marijuana.nl",
"marketing.lu",
"marketingfanatic.com",
"marketweighton.com",
"married-not.com",
"marriedandlovingit.com",
"marry.ru",
"marsattack.com",
"martindalemail.com",
"martinguerre.net",
"mash4077.com",
"masrawy.com",
"matmail.com",
"mauimail.com",
"mauritius.com",
"maximumedge.com",
"maxleft.com",
"maxmail.co.uk",
"mayaple.ru",
"mbox.com.au",
"mbx.cc",
"mchsi.com",
"mcrmail.com",
"me-mail.hu",
"me.com",
"meanpeoplesuck.com",
"meatismurder.net",
"medical.net.au",
"medmail.com",
"medscape.com",
"meetingmall.com",
"mega.zik.dj",
"megago.com",
"megamail.pt",
"megapoint.com",
"mehrani.com",
"mehtaweb.com",
"meine-dateien.info",
"meine-diashow.de",
"meine-fotos.info",
"meine-urlaubsfotos.de",
"meinspamschutz.de",
"mekhong.com",
"melodymail.com",
"meloo.com",
"meltmail.com",
"members.student.com",
"menja.net",
"merda.flu.cc",
"merda.igg.biz",
"merda.nut.cc",
"merda.usa.cc",
"merseymail.com",
"mesra.net",
"message.hu",
"message.myspace.com",
"messagebeamer.de",
"messages.to",
"messagez.com",
"metacrawler.com",
"metalfan.com",
"metaping.com",
"metta.lk",
"mexicomail.com",
"mezimages.net",
"mfsa.ru",
"miatadriver.com",
"mierdamail.com",
"miesto.sk",
"mighty.co.za",
"migmail.net",
"migmail.pl",
"migumail.com",
"miho-nakayama.com",
"mikrotamanet.com",
"millionaireintraining.com",
"millionairemail.com",
"milmail.com",
"milmail.com15",
"mindless.com",
"mindspring.com",
"minermail.com",
"mini-mail.com",
"minister.com",
"ministry-of-silly-walks.de",
"mintemail.com",
"misery.net",
"misterpinball.de",
"mit.tc",
"mittalweb.com",
"mixmail.com",
"mjfrogmail.com",
"ml1.net",
"mlanime.com",
"mlb.bounce.ed10.net",
"mm.st",
"mmail.com",
"mns.ru",
"mo3gov.net",
"moakt.com",
"mobico.ru",
"mobilbatam.com",
"mobileninja.co.uk",
"mochamail.com",
"modemnet.net",
"modernenglish.com",
"modomail.com",
"mohammed.com",
"mohmal.com",
"moldova.cc",
"moldova.com",
"moldovacc.com",
"mom-mail.com",
"momslife.com",
"moncourrier.fr.nf",
"monemail.com",
"monemail.fr.nf",
"money.net",
"mongol.net",
"monmail.fr.nf",
"monsieurcinema.com",
"montevideo.com.uy",
"monumentmail.com",
"moomia.com",
"moonman.com",
"moose-mail.com",
"mor19.uu.gl",
"mortaza.com",
"mosaicfx.com",
"moscowmail.com",
"mosk.ru",
"most-wanted.com",
"mostlysunny.com",
"motorcyclefan.net",
"motormania.com",
"movemail.com",
"movieemail.net",
"movieluver.com",
"mox.pp.ua",
"mozartmail.com",
"mozhno.net",
"mp3haze.com",
"mp4.it",
"mr-potatohead.com",
"mrpost.com",
"mrspender.com",
"mscold.com",
"msgbox.com",
"msn.cn",
"msn.com",
"msn.nl",
"msx.ru",
"mt2009.com",
"mt2014.com",
"mt2015.com",
"mt2016.com",
"mttestdriver.com",
"muehlacker.tk",
"multiplechoices",
"mundomail.net",
"munich.com",
"music.com",
"music.com19",
"music.maigate.ru",
"musician.com",
"musician.org",
"musicscene.org",
"muskelshirt.de",
"muslim.com",
"muslimemail.com",
"muslimsonline.com",
"mutantweb.com",
"mvrht.com",
"my.com",
"my10minutemail.com",
"mybox.it",
"mycabin.com",
"mycampus.com",
"mycard.net.ua",
"mycity.com",
"mycleaninbox.net",
"mycool.com",
"mydomain.com",
"mydotcomaddress.com",
"myfairpoint.net",
"myfamily.com",
"myfastmail.com",
"myfunnymail.com",
"mygo.com",
"myiris.com",
"myjazzmail.com",
"mymac.ru",
"mymacmail.com",
"mymail-in.net",
"mymail.ro",
"mynamedot.com",
"mynet.com",
"mynetaddress.com",
"mynetstore.de",
"myotw.net",
"myownemail.com",
"myownfriends.com",
"mypacks.net",
"mypad.com",
"mypartyclip.de",
"mypersonalemail.com",
"myphantomemail.com",
"myplace.com",
"myrambler.ru",
"myrealbox.com",
"myremarq.com",
"mysamp.de",
"myself.com",
"myspaceinc.net",
"myspamless.com",
"mystupidjob.com",
"mytemp.email",
"mytempemail.com",
"mytempmail.com",
"mythirdage.com",
"mytrashmail.com",
"myway.com",
"myworldmail.com",
"n2.com",
"n2baseball.com",
"n2business.com",
"n2mail.com",
"n2soccer.com",
"n2software.com",
"nabc.biz",
"nabuma.com",
"nafe.com",
"nagarealm.com",
"nagpal.net",
"nakedgreens.com",
"name.com",
"nameplanet.com",
"nanaseaikawa.com",
"nandomail.com",
"naplesnews.net",
"naseej.com",
"nate.com",
"nativestar.net",
"nativeweb.net",
"naui.net",
"naver.com",
"navigator.lv",
"navy.org",
"naz.com",
"nc.rr.com",
"nc.ru",
"nchoicemail.com",
"neeva.net",
"nekto.com",
"nekto.net",
"nekto.ru",
"nemra1.com",
"nenter.com",
"neo.rr.com",
"neomailbox.com",
"nepwk.com",
"nervhq.org",
"nervmich.net",
"nervtmich.net",
"net-c.be",
"net-c.ca",
"net-c.cat",
"net-c.com",
"net-c.es",
"net-c.fr",
"net-c.it",
"net-c.lu",
"net-c.nl",
"net-c.pl",
"net-pager.net",
"net-shopping.com",
"net.tf",
"net4b.pt",
"net4you.at",
"netaddres.ru",
"netaddress.ru",
"netbounce.com",
"netbroadcaster.com",
"netby.dk",
"netc.eu",
"netc.fr",
"netc.it",
"netc.lu",
"netc.pl",
"netcenter-vn.net",
"netcity.ru",
"netcmail.com",
"netcourrier.com",
"netexecutive.com",
"netexpressway.com",
"netfirms.com",
"netgenie.com",
"netian.com",
"netizen.com.ar",
"netkushi.com",
"netlane.com",
"netlimit.com",
"netmail.kg",
"netmails.com",
"netmails.net",
"netman.ru",
"netmanor.com",
"netmongol.com",
"netnet.com.sg",
"netnoir.net",
"netpiper.com",
"netposta.net",
"netradiomail.com",
"netralink.com",
"netscape.net",
"netscapeonline.co.uk",
"netspace.net.au",
"netspeedway.com",
"netsquare.com",
"netster.com",
"nettaxi.com",
"nettemail.com",
"netterchef.de",
"netti.fi",
"netvigator.com",
"netzero.com",
"netzero.net",
"netzidiot.de",
"netzoola.com",
"neue-dateien.de",
"neuf.fr",
"neuro.md",
"neustreet.com",
"neverbox.com",
"newap.ru",
"newarbat.net",
"newmail.com",
"newmail.net",
"newmail.ru",
"newsboysmail.com",
"newyork.com",
"newyorkcity.com",
"nextmail.ru",
"nexxmail.com",
"nfmail.com",
"ngs.ru",
"nhmail.com",
"nice-4u.com",
"nicebush.com",
"nicegal.com",
"nicholastse.net",
"nicolastse.com",
"niepodam.pl",
"nightimeuk.com",
"nightmail.com",
"nightmail.ru",
"nikopage.com",
"nikulino.net",
"nimail.com",
"nincsmail.hu",
"ninfan.com",
"nirvanafan.com",
"nm.ru",
"nmail.cf",
"nnh.com",
"nnov.ru",
"no-spam.ws",
"no4ma.ru",
"noavar.com",
"noblepioneer.com",
"nogmailspam.info",
"nomail.pw",
"nomail.xl.cx",
"nomail2me.com",
"nomorespamemails.com",
"nonpartisan.com",
"nonspam.eu",
"nonspammer.de",
"nonstopcinema.com",
"norika-fujiwara.com",
"norikomail.com",
"northgates.net",
"nospam.ze.tc",
"nospam4.us",
"nospamfor.us",
"nospammail.net",
"nospamthanks.info",
"notmailinator.com",
"notsharingmy.info",
"notyouagain.com",
"novogireevo.net",
"novokosino.net",
"nowhere.org",
"nowmymail.com",
"ntelos.net",
"ntlhelp.net",
"ntlworld.com",
"ntscan.com",
"null.net",
"nullbox.info",
"numep.ru",
"nur-fuer-spam.de",
"nurfuerspam.de",
"nus.edu.sg",
"nuvse.com",
"nwldx.com",
"nxt.ru",
"ny.com",
"nybce.com",
"nybella.com",
"nyc.com",
"nycmail.com",
"nz11.com",
"nzoomail.com",
"o-tay.com",
"o2.co.uk",
"o2.pl",
"oaklandas-fan.com",
"oath.com",
"objectmail.com",
"obobbo.com",
"oceanfree.net",
"ochakovo.net",
"odaymail.com",
"oddpost.com",
"odmail.com",
"odnorazovoe.ru",
"office-dateien.de",
"office-email.com",
"officedomain.com",
"offroadwarrior.com",
"oi.com.br",
"oicexchange.com",
"oikrach.com",
"ok.kz",
"ok.net",
"ok.ru",
"okbank.com",
"okhuman.com",
"okmad.com",
"okmagic.com",
"okname.net",
"okuk.com",
"oldbuthealthy.com",
"oldies1041.com",
"oldies104mail.com",
"ole.com",
"olemail.com",
"oligarh.ru",
"olympist.net",
"olypmall.ru",
"omaninfo.com",
"omen.ru",
"ondikoi.com",
"onebox.com",
"onenet.com.ar",
"oneoffemail.com",
"oneoffmail.com",
"onet.com.pl",
"onet.eu",
"onet.pl",
"onewaymail.com",
"oninet.pt",
"onlatedotcom.info",
"online.de",
"online.ie",
"online.ms",
"online.nl",
"online.ru",
"onlinecasinogamblings.com",
"onlinewiz.com",
"onmicrosoft.com",
"onmilwaukee.com",
"onobox.com",
"onvillage.com",
"oopi.org",
"op.pl",
"opayq.com",
"opendiary.com",
"openmailbox.org",
"operafan.com",
"operamail.com",
"opoczta.pl",
"optician.com",
"optonline.net",
"optusnet.com.au",
"orange.fr",
"orange.net",
"orbitel.bg",
"ordinaryamerican.net",
"orgmail.net",
"orthodontist.net",
"osite.com.br",
"oso.com",
"otakumail.com",
"otherinbox.com",
"our-computer.com",
"our-office.com",
"our.st",
"ourbrisbane.com",
"ourklips.com",
"ournet.md",
"outel.com",
"outgun.com",
"outlawspam.com",
"outlook.at",
"outlook.be",
"outlook.cl",
"outlook.co.id",
"outlook.co.il",
"outlook.co.nz",
"outlook.co.th",
"outlook.com",
"outlook.com.au",
"outlook.com.br",
"outlook.com.gr",
"outlook.com.pe",
"outlook.com.tr",
"outlook.com.vn",
"outlook.cz",
"outlook.de",
"outlook.dk",
"outlook.es",
"outlook.fr",
"outlook.hu",
"outlook.ie",
"outlook.in",
"outlook.it",
"outlook.jp",
"outlook.kr",
"outlook.lv",
"outlook.my",
"outlook.nl",
"outlook.ph",
"outlook.pt",
"outlook.sa",
"outlook.sg",
"outlook.sk",
"outloook.com",
"over-the-rainbow.com",
"ovi.com",
"ovpn.to",
"owlpic.com",
"ownmail.net",
"ozbytes.net.au",
"ozemail.com.au",
"ozz.ru",
"pacbell.net",
"pacific-ocean.com",
"pacific-re.com",
"pacificwest.com",
"packersfan.com",
"pagina.de",
"pagons.org",
"paidforsurf.com",
"pakistanmail.com",
"pakistanoye.com",
"palestinemail.com",
"pancakemail.com",
"pandawa.com",
"pandora.be",
"paradiseemail.com",
"paris.com",
"parkjiyoon.com",
"parrot.com",
"parsmail.com",
"partlycloudy.com",
"partybombe.de",
"partyheld.de",
"partynight.at",
"parvazi.com",
"passwordmail.com",
"pathfindermail.com",
"patmail.com",
"patra.net",
"pconnections.net",
"pcpostal.com",
"pcsrock.com",
"pcusers.otherinbox.com",
"peachworld.com",
"pechkin.ru",
"pediatrician.com",
"pekklemail.com",
"pemail.net",
"penpen.com",
"peoplepc.com",
"peopleweb.com",
"pepbot.com",
"perfectmail.com",
"perovo.net",
"perso.be",
"personal.ro",
"personales.com",
"petlover.com",
"petml.com",
"petr.ru",
"pettypool.com",
"pezeshkpour.com",
"pfui.ru",
"phayze.com",
"phone.net",
"photo-impact.eu",
"photographer.net",
"phpbb.uu.gl",
"phreaker.net",
"phus8kajuspa.cu.cc",
"physicist.net",
"pianomail.com",
"pickupman.com",
"picusnet.com",
"piercedallover.com",
"pigeonportal.com",
"pigmail.net",
"pigpig.net",
"pilotemail.com",
"pimagop.com",
"pinoymail.com",
"piracha.net",
"pisem.net",
"pjjkp.com",
"planet-mail.com",
"planet.nl",
"planetaccess.com",
"planetall.com",
"planetarymotion.net",
"planetdirect.com",
"planetearthinter.net",
"planetmail.com",
"planetmail.net",
"planetout.com",
"plasa.com",
"playersodds.com",
"playful.com",
"playstation.sony.com",
"plexolan.de",
"pluno.com",
"plus.com",
"plus.google.com",
"plusmail.com.br",
"pmail.net",
"pobox.com",
"pobox.hu",
"pobox.ru",
"pobox.sk",
"pochta.by",
"pochta.ru",
"pochta.ws",
"pochtamt.ru",
"poczta.fm",
"poczta.onet.pl",
"poetic.com",
"pokemail.net",
"pokemonpost.com",
"pokepost.com",
"polandmail.com",
"polbox.com",
"policeoffice.com",
"politician.com",
"politikerclub.de",
"polizisten-duzer.de",
"polyfaust.com",
"poofy.org",
"poohfan.com",
"pookmail.com",
"pool-sharks.com",
"poond.com",
"pop3.ru",
"popaccount.com",
"popmail.com",
"popsmail.com",
"popstar.com",
"populus.net",
"portableoffice.com",
"portugalmail.com",
"portugalmail.pt",
"portugalnet.com",
"positive-thinking.com",
"post.com",
"post.cz",
"post.sk",
"posta.net",
"posta.ro",
"posta.rosativa.ro.org",
"postaccesslite.com",
"postafiok.hu",
"postafree.com",
"postaweb.com",
"poste.it",
"postfach.cc",
"postinbox.com",
"postino.ch",
"postino.it",
"postmark.net",
"postmaster.co.uk",
"postmaster.twitter.com",
"postpro.net",
"pousa.com",
"powerdivas.com",
"powerfan.com",
"pp.inet.fi",
"praize.com",
"pray247.com",
"predprinimatel.ru",
"premium-mail.fr",
"premiumproducts.com",
"premiumservice.com",
"prepodavatel.ru",
"presidency.com",
"presnya.net",
"press.co.jp",
"prettierthanher.com",
"priest.com",
"primposta.com",
"primposta.hu",
"printesamargareta.ro",
"privacy.net",
"privatdemail.net",
"privy-mail.com",
"privymail.de",
"pro.hu",
"probemail.com",
"prodigy.net",
"prodigy.net.mx",
"professor.ru",
"progetplus.it",
"programist.ru",
"programmer.net",
"programozo.hu",
"proinbox.com",
"project2k.com",
"prokuratura.ru",
"prolaunch.com",
"promessage.com",
"prontomail.com",
"prontomail.compopulus.net",
"protestant.com",
"protonmail.com",
"proxymail.eu",
"prtnx.com",
"prydirect.info",
"psv-supporter.com",
"ptd.net",
"public-files.de",
"public.usa.com",
"publicist.com",
"pulp-fiction.com",
"punkass.com",
"puppy.com.my",
"purinmail.com",
"purpleturtle.com",
"put2.net",
"putthisinyourspamdatabase.com",
"pwrby.com",
"q.com",
"qatar.io",
"qatarmail.com",
"qdice.com",
"qip.ru",
"qmail.com",
"qprfans.com",
"qq.com",
"qrio.com",
"quackquack.com",
"quake.ru",
"quakemail.com",
"qualityservice.com",
"quantentunnel.de",
"qudsmail.com",
"quepasa.com",
"quickhosts.com",
"quickinbox.com",
"quickmail.nl",
"quickmail.ru",
"quicknet.nl",
"quickwebmail.com",
"quiklinks.com",
"quikmail.com",
"qv7.info",
"qwest.net",
"qwestoffice.net",
"r-o-o-t.com",
"r7.com",
"raakim.com",
"racedriver.com",
"racefanz.com",
"racingfan.com.au",
"racingmail.com",
"radicalz.com",
"radiku.ye.vc",
"radiologist.net",
"ragingbull.com",
"ralib.com",
"rambler.ru",
"ranmamail.com",
"rastogi.net",
"ratt-n-roll.com",
"rattle-snake.com",
"raubtierbaendiger.de",
"ravearena.com",
"ravefan.com",
"ravemail.co.za",
"ravemail.com",
"razormail.com",
"rccgmail.org",
"rcn.com",
"rcpt.at",
"realemail.net",
"realestatemail.net",
"reality-concept.club",
"reallyfast.biz",
"reallyfast.info",
"reallymymail.com",
"realradiomail.com",
"realtyagent.com",
"realtyalerts.ca",
"reborn.com",
"recode.me",
"reconmail.com",
"recursor.net",
"recycledmail.com",
"recycler.com",
"recyclermail.com",
"rediff.com",
"rediffmail.com",
"rediffmailpro.com",
"rednecks.com",
"redseven.de",
"redsfans.com",
"redwhitearmy.com",
"regbypass.com",
"reggaefan.com",
"reggafan.com",
"regiononline.com",
"registerednurses.com",
"regspaces.tk",
"reincarnate.com",
"relia.com",
"reliable-mail.com",
"religious.com",
"remail.ga",
"renren.com",
"repairman.com",
"reply.hu",
"reply.ticketmaster.com",
"represantive.com",
"representative.com",
"rescueteam.com",
"resgedvgfed.tk",
"resource.calendar.google.com",
"resumemail.com",
"retailfan.com",
"rexian.com",
"rezai.com",
"rhyta.com",
"richmondhill.com",
"rickymail.com",
"rin.ru",
"ring.by",
"riopreto.com.br",
"rklips.com",
"rmqkr.net",
"rn.com",
"ro.ru",
"roadrunner.com",
"roanokemail.com",
"rock.com",
"rocketmail.com",
"rocketship.com",
"rockfan.com",
"rodrun.com",
"rogers.com",
"rojname.com",
"rol.ro",
"rome.com",
"romymichele.com",
"roosh.com",
"rootprompt.org",
"rotfl.com",
"roughnet.com",
"royal.net",
"rpharmacist.com",
"rr.com",
"rrohio.com",
"rsub.com",
"rt.nl",
"rtrtr.com",
"ru.ru",
"rubyridge.com",
"runbox.com",
"rushpost.com",
"ruttolibero.com",
"rvshop.com",
"rxdoc.biz",
"s-mail.com",
"s0ny.net",
"sabreshockey.com",
"sacbeemail.com",
"saeuferleber.de",
"safarimail.com",
"safe-mail.net",
"safersignup.de",
"safetymail.info",
"safetypost.de",
"safrica.com",
"sagra.lu",
"sagra.lu.lu",
"sagra.lumarketing.lu",
"sags-per-mail.de",
"sailormoon.com",
"saint-mike.org",
"saintly.com",
"saintmail.net",
"sale-sale-sale.com",
"salehi.net",
"salesperson.net",
"samerica.com",
"samilan.net",
"samiznaetekogo.net",
"sammimail.com",
"sanchezsharks.com",
"sandelf.de",
"sanfranmail.com",
"sanook.com",
"sanriotown.com",
"santanmail.com",
"sapo.pt",
"sativa.ro.org",
"saturnfans.com",
"saturnperformance.com",
"saudia.com",
"savecougars.com",
"savelife.ml",
"saveowls.com",
"sayhi.net",
"saynotospams.com",
"sbcglbal.net",
"sbcglobal.com",
"sbcglobal.net",
"scandalmail.com",
"scanova.in",
"scanova.io",
"scarlet.nl",
"scfn.net",
"schafmail.de",
"schizo.com",
"schmusemail.de",
"schoolemail.com",
"schoolmail.com",
"schoolsucks.com",
"schreib-doch-mal-wieder.de",
"schrott-email.de",
"schweiz.org",
"sci.fi",
"science.com.au",
"scientist.com",
"scifianime.com",
"scotland.com",
"scotlandmail.com",
"scottishmail.co.uk",
"scottishtories.com",
"scottsboro.org",
"scrapbookscrapbook.com",
"scubadiving.com",
"seanet.com",
"search.ua",
"search417.com",
"searchwales.com",
"sebil.com",
"seckinmail.com",
"secret-police.com",
"secretarias.com",
"secretary.net",
"secretemail.de",
"secretservices.net",
"secure-mail.biz",
"secure-mail.cc",
"seductive.com",
"seekstoyboy.com",
"seguros.com.br",
"sekomaonline.com",
"selfdestructingmail.com",
"sellingspree.com",
"send.hu",
"sendmail.ru",
"sendme.cz",
"sendspamhere.com",
"senseless-entertainment.com",
"sent.as",
"sent.at",
"sent.com",
"sentrismail.com",
"serga.com.ar",
"servemymail.com",
"servermaps.net",
"services391.com",
"sesmail.com",
"sexmagnet.com",
"seznam.cz",
"sfr.fr",
"shahweb.net",
"shaniastuff.com",
"shared-files.de",
"sharedmailbox.org",
"sharewaredevelopers.com",
"sharklasers.com",
"sharmaweb.com",
"shaw.ca",
"she.com",
"shellov.net",
"shieldedmail.com",
"shieldemail.com",
"shiftmail.com",
"shinedyoureyes.com",
"shitaway.cf",
"shitaway.cu.cc",
"shitaway.ga",
"shitaway.gq",
"shitaway.ml",
"shitaway.tk",
"shitaway.usa.cc",
"shitmail.de",
"shitmail.me",
"shitmail.org",
"shitware.nl",
"shmeriously.com",
"shockinmytown.cu.cc",
"shootmail.com",
"shortmail.com",
"shortmail.net",
"shotgun.hu",
"showfans.com",
"showslow.de",
"shqiptar.eu",
"shuf.com",
"sialkotcity.com",
"sialkotian.com",
"sialkotoye.com",
"sibmail.com",
"sify.com",
"sigaret.net",
"silkroad.net",
"simbamail.fm",
"sina.cn",
"sina.com",
"sinamail.com",
"singapore.com",
"singles4jesus.com",
"singmail.com",
"singnet.com.sg",
"singpost.com",
"sinnlos-mail.de",
"sirindia.com",
"siteposter.net",
"skafan.com",
"skeefmail.com",
"skim.com",
"skizo.hu",
"skrx.tk",
"skunkbox.com",
"sky.com",
"skynet.be",
"slamdunkfan.com",
"slapsfromlastnight.com",
"slaskpost.se",
"slave-auctions.net",
"slickriffs.co.uk",
"slingshot.com",
"slippery.email",
"slipry.net",
"slo.net",
"slotter.com",
"sm.westchestergov.com",
"smap.4nmv.ru",
"smapxsmap.net",
"smashmail.de",
"smellfear.com",
"smellrear.com",
"smileyface.comsmithemail.net",
"sminkymail.com",
"smoothmail.com",
"sms.at",
"smtp.ru",
"snail-mail.net",
"snail-mail.ney",
"snakebite.com",
"snakemail.com",
"sndt.net",
"sneakemail.com",
"sneakmail.de",
"snet.net",
"sniper.hu",
"snkmail.com",
"snoopymail.com",
"snowboarding.com",
"snowdonia.net",
"so-simple.org",
"socamail.com",
"socceraccess.com",
"socceramerica.net",
"soccermail.com",
"soccermomz.com",
"social-mailer.tk",
"socialworker.net",
"sociologist.com",
"sofimail.com",
"sofort-mail.de",
"sofortmail.de",
"softhome.net",
"sogetthis.com",
"sogou.com",
"sohu.com",
"sokolniki.net",
"sol.dk",
"solar-impact.pro",
"solcon.nl",
"soldier.hu",
"solution4u.com",
"solvemail.info",
"songwriter.net",
"sonnenkinder.org",
"soodomail.com",
"soodonims.com",
"soon.com",
"soulfoodcookbook.com",
"soundofmusicfans.com",
"southparkmail.com",
"sovsem.net",
"sp.nl",
"space-bank.com",
"space-man.com",
"space-ship.com",
"space-travel.com",
"space.com",
"spaceart.com",
"spacebank.com",
"spacemart.com",
"spacetowns.com",
"spacewar.com",
"spainmail.com",
"spam.2012-2016.ru",
"spam4.me",
"spamail.de",
"spamarrest.com",
"spamavert.com",
"spambob.com",
"spambob.net",
"spambob.org",
"spambog.com",
"spambog.de",
"spambog.net",
"spambog.ru",
"spambooger.com",
"spambox.info",
"spambox.us",
"spamcannon.com",
"spamcannon.net",
"spamcero.com",
"spamcon.org",
"spamcorptastic.com",
"spamcowboy.com",
"spamcowboy.net",
"spamcowboy.org",
"spamday.com",
"spamdecoy.net",
"spameater.com",
"spameater.org",
"spamex.com",
"spamfree.eu",
"spamfree24.com",
"spamfree24.de",
"spamfree24.info",
"spamfree24.net",
"spamfree24.org",
"spamgoes.in",
"spamgourmet.com",
"spamgourmet.net",
"spamgourmet.org",
"spamherelots.com",
"spamhereplease.com",
"spamhole.com",
"spamify.com",
"spaminator.de",
"spamkill.info",
"spaml.com",
"spaml.de",
"spammotel.com",
"spamobox.com",
"spamoff.de",
"spamslicer.com",
"spamspot.com",
"spamstack.net",
"spamthis.co.uk",
"spamtroll.net",
"spankthedonkey.com",
"spartapiet.com",
"spazmail.com",
"speed.1s.fr",
"speedemail.net",
"speedpost.net",
"speedrules.com",
"speedrulz.com",
"speedy.com.ar",
"speedymail.org",
"sperke.net",
"spils.com",
"spinfinder.com",
"spiritseekers.com",
"spl.at",
"spoko.pl",
"spoofmail.de",
"sportemail.com",
"sportmail.ru",
"sportsmail.com",
"sporttruckdriver.com",
"spray.no",
"spray.se",
"spybox.de",
"spymac.com",
"sraka.xyz",
"srilankan.net",
"ssl-mail.com",
"st-davids.net",
"stade.fr",
"stalag13.com",
"standalone.net",
"starbuzz.com",
"stargateradio.com",
"starmail.com",
"starmail.org",
"starmedia.com",
"starplace.com",
"starspath.com",
"start.com.au",
"starting-point.com",
"startkeys.com",
"startrekmail.com",
"starwars-fans.com",
"stealthmail.com",
"stillchronic.com",
"stinkefinger.net",
"stipte.nl",
"stockracer.com",
"stockstorm.com",
"stoned.com",
"stones.com",
"stop-my-spam.pp.ua",
"stopdropandroll.com",
"storksite.com",
"streber24.de",
"streetwisemail.com",
"stribmail.com",
"strompost.com",
"strongguy.com",
"student.su",
"studentcenter.org",
"stuffmail.de",
"subnetwork.com",
"subram.com",
"sudanmail.net",
"sudolife.me",
"sudolife.net",
"sudomail.biz",
"sudomail.com",
"sudomail.net",
"sudoverse.com",
"sudoverse.net",
"sudoweb.net",
"sudoworld.com",
"sudoworld.net",
"sueddeutsche.de",
"suhabi.com",
"suisse.org",
"sukhumvit.net",
"sul.com.br",
"sunmail1.com",
"sunpoint.net",
"sunrise-sunset.com",
"sunsgame.com",
"sunumail.sn",
"suomi24.fi",
"super-auswahl.de",
"superdada.com",
"supereva.it",
"supergreatmail.com",
"supermail.ru",
"supermailer.jp",
"superman.ru",
"superposta.com",
"superrito.com",
"superstachel.de",
"surat.com",
"suremail.info",
"surf3.net",
"surfree.com",
"surfsupnet.net",
"surfy.net",
"surgical.net",
"surimail.com",
"survivormail.com",
"susi.ml",
"sviblovo.net",
"svk.jp",
"swbell.net",
"sweb.cz",
"swedenmail.com",
"sweetville.net",
"sweetxxx.de",
"swift-mail.com",
"swiftdesk.com",
"swingeasyhithard.com",
"swingfan.com",
"swipermail.zzn.com",
"swirve.com",
"swissinfo.org",
"swissmail.com",
"swissmail.net",
"switchboardmail.com",
"switzerland.org",
"sx172.com",
"sympatico.ca",
"syom.com",
"syriamail.com",
"t-online.de",
"t.psh.me",
"t2mail.com",
"tafmail.com",
"takoe.com",
"takoe.net",
"takuyakimura.com",
"talk21.com",
"talkcity.com",
"talkinator.com",
"talktalk.co.uk",
"tamb.ru",
"tamil.com",
"tampabay.rr.com",
"tangmonkey.com",
"tankpolice.com",
"taotaotano.com",
"tatanova.com",
"tattooedallover.com",
"tattoofanatic.com",
"tbwt.com",
"tcc.on.ca",
"tds.net",
"teacher.com",
"teachermail.net",
"teachers.org",
"teamdiscovery.com",
"teamtulsa.net",
"tech-center.com",
"tech4peace.org",
"techemail.com",
"techie.com",
"technisamail.co.za",
"technologist.com",
"technologyandstocks.com",
"techpointer.com",
"techscout.com",
"techseek.com",
"techsniper.com",
"techspot.com",
"teenagedirtbag.com",
"teewars.org",
"tele2.nl",
"telebot.com",
"telebot.net",
"telefonica.net",
"teleline.es",
"telenet.be",
"telepac.pt",
"telerymd.com",
"teleserve.dynip.com",
"teletu.it",
"teleworm.com",
"teleworm.us",
"telfort.nl",
"telfortglasvezel.nl",
"telinco.net",
"telkom.net",
"telpage.net",
"telstra.com",
"telstra.com.au",
"temp-mail.com",
"temp-mail.de",
"temp-mail.org",
"temp-mail.ru",
"temp.headstrong.de",
"tempail.com",
"tempe-mail.com",
"tempemail.biz",
"tempemail.co.za",
"tempemail.com",
"tempemail.net",
"tempinbox.co.uk",
"tempinbox.com",
"tempmail.eu",
"tempmail.it",
"tempmail.us",
"tempmail2.com",
"tempmaildemo.com",
"tempmailer.com",
"tempmailer.de",
"tempomail.fr",
"temporarioemail.com.br",
"temporaryemail.net",
"temporaryemail.us",
"temporaryforwarding.com",
"temporaryinbox.com",
"temporarymailaddress.com",
"tempthe.net",
"tempymail.com",
"temtulsa.net",
"tenchiclub.com",
"tenderkiss.com",
"tennismail.com",
"terminverpennt.de",
"terra.cl",
"terra.com",
"terra.com.ar",
"terra.com.br",
"terra.com.pe",
"terra.es",
"test.com",
"test.de",
"tfanus.com.er",
"tfbnw.net",
"tfz.net",
"tgasa.ru",
"tgma.ru",
"tgngu.ru",
"tgu.ru",
"thai.com",
"thaimail.com",
"thaimail.net",
"thanksnospam.info",
"thankyou2010.com",
"thc.st",
"the-african.com",
"the-airforce.com",
"the-aliens.com",
"the-american.com",
"the-animal.com",
"the-army.com",
"the-astronaut.com",
"the-beauty.com",
"the-big-apple.com",
"the-biker.com",
"the-boss.com",
"the-brazilian.com",
"the-canadian.com",
"the-canuck.com",
"the-captain.com",
"the-chinese.com",
"the-country.com",
"the-cowboy.com",
"the-davis-home.com",
"the-dutchman.com",
"the-eagles.com",
"the-englishman.com",
"the-fastest.net",
"the-fool.com",
"the-frenchman.com",
"the-galaxy.net",
"the-genius.com",
"the-gentleman.com",
"the-german.com",
"the-gremlin.com",
"the-hooligan.com",
"the-italian.com",
"the-japanese.com",
"the-lair.com",
"the-madman.com",
"the-mailinglist.com",
"the-marine.com",
"the-master.com",
"the-mexican.com",
"the-ministry.com",
"the-monkey.com",
"the-newsletter.net",
"the-pentagon.com",
"the-police.com",
"the-prayer.com",
"the-professional.com",
"the-quickest.com",
"the-russian.com",
"the-seasiders.com",
"the-snake.com",
"the-spaceman.com",
"the-stock-market.com",
"the-student.net",
"the-whitehouse.net",
"the-wild-west.com",
"the18th.com",
"thecoolguy.com",
"thecriminals.com",
"thedoghousemail.com",
"thedorm.com",
"theend.hu",
"theglobe.com",
"thegolfcourse.com",
"thegooner.com",
"theheadoffice.com",
"theinternetemail.com",
"thelanddownunder.com",
"thelimestones.com",
"themail.com",
"themillionare.net",
"theoffice.net",
"theplate.com",
"thepokerface.com",
"thepostmaster.net",
"theraces.com",
"theracetrack.com",
"therapist.net",
"thereisnogod.com",
"thesimpsonsfans.com",
"thestreetfighter.com",
"theteebox.com",
"thewatercooler.com",
"thewebpros.co.uk",
"thewizzard.com",
"thewizzkid.com",
"thexyz.ca",
"thexyz.cn",
"thexyz.com",
"thexyz.es",
"thexyz.fr",
"thexyz.in",
"thexyz.mobi",
"thexyz.net",
"thexyz.org",
"thezhangs.net",
"thirdage.com",
"thisgirl.com",
"thisisnotmyrealemail.com",
"thismail.net",
"thoic.com",
"thraml.com",
"thrott.com",
"throwam.com",
"throwawayemailaddress.com",
"thundermail.com",
"tibetemail.com",
"tidni.com",
"tilien.com",
"timein.net",
"timormail.com",
"tin.it",
"tipsandadvice.com",
"tiran.ru",
"tiscali.at",
"tiscali.be",
"tiscali.co.uk",
"tiscali.it",
"tiscali.lu",
"tiscali.se",
"tittbit.in",
"tizi.com",
"tkcity.com",
"tlcfan.com",
"tmail.ws",
"tmailinator.com",
"tmicha.net",
"toast.com",
"toke.com",
"tokyo.com",
"tom.com",
"toolsource.com",
"toomail.biz",
"toothfairy.com",
"topchat.com",
"topgamers.co.uk",
"topletter.com",
"topmail-files.de",
"topmail.com.ar",
"topranklist.de",
"topsurf.com",
"topteam.bg",
"toquedequeda.com",
"torba.com",
"torchmail.com",
"torontomail.com",
"tortenboxer.de",
"totalmail.com",
"totalmail.de",
"totalmusic.net",
"totalsurf.com",
"toughguy.net",
"townisp.com",
"tpg.com.au",
"tradermail.info",
"trainspottingfan.com",
"trash-amil.com",
"trash-mail.at",
"trash-mail.com",
"trash-mail.de",
"trash-mail.ga",
"trash-mail.ml",
"trash2009.com",
"trash2010.com",
"trash2011.com",
"trashdevil.com",
"trashdevil.de",
"trashemail.de",
"trashmail.at",
"trashmail.com",
"trashmail.de",
"trashmail.me",
"trashmail.net",
"trashmail.org",
"trashmailer.com",
"trashymail.com",
"trashymail.net",
"travel.li",
"trayna.com",
"trbvm.com",
"trbvn.com",
"trevas.net",
"trialbytrivia.com",
"trialmail.de",
"trickmail.net",
"trillianpro.com",
"trimix.cn",
"tritium.net",
"trjam.net",
"trmailbox.com",
"tropicalstorm.com",
"truckeremail.net",
"truckers.com",
"truckerz.com",
"truckracer.com",
"truckracers.com",
"trust-me.com",
"truth247.com",
"truthmail.com",
"tsamail.co.za",
"ttml.co.in",
"tulipsmail.net",
"tunisiamail.com",
"turboprinz.de",
"turboprinzessin.de",
"turkey.com",
"turual.com",
"tushino.net",
"tut.by",
"tvcablenet.be",
"tverskie.net",
"tverskoe.net",
"tvnet.lv",
"tvstar.com",
"twc.com",
"twcny.com",
"twentylove.com",
"twinmail.de",
"twinstarsmail.com",
"tx.rr.com",
"tycoonmail.com",
"tyldd.com",
"typemail.com",
"tyt.by",
"u14269.ml",
"u2club.com",
"ua.fm",
"uae.ac",
"uaemail.com",
"ubbi.com",
"ubbi.com.br",
"uboot.com",
"uggsrock.com",
"uk2.net",
"uk2k.com",
"uk2net.com",
"uk7.net",
"uk8.net",
"ukbuilder.com",
"ukcool.com",
"ukdreamcast.com",
"ukmail.org",
"ukmax.com",
"ukr.net",
"ukrpost.net",
"ukrtop.com",
"uku.co.uk",
"ultapulta.com",
"ultimatelimos.com",
"ultrapostman.com",
"umail.net",
"ummah.org",
"umpire.com",
"unbounded.com",
"underwriters.com",
"unforgettable.com",
"uni.de",
"uni.de.de",
"uni.demailto.de",
"unican.es",
"unihome.com",
"universal.pt",
"uno.ee",
"uno.it",
"unofree.it",
"unomail.com",
"unterderbruecke.de",
"uogtritons.com",
"uol.com.ar",
"uol.com.br",
"uol.com.co",
"uol.com.mx",
"uol.com.ve",
"uole.com",
"uole.com.ve",
"uolmail.com",
"uomail.com",
"upc.nl",
"upcmail.nl",
"upf.org",
"upliftnow.com",
"uplipht.com",
"uraniomail.com",
"ureach.com",
"urgentmail.biz",
"uroid.com",
"us.af",
"usa.com",
"usa.net",
"usaaccess.net",
"usanetmail.com",
"used-product.fr",
"userbeam.com",
"usermail.com",
"username.e4ward.com",
"userzap.com",
"usma.net",
"usmc.net",
"uswestmail.net",
"uymail.com",
"uyuyuy.com",
"uzhe.net",
"v-sexi.com",
"v8email.com",
"vaasfc4.tk",
"vahoo.com",
"valemail.net",
"valudeal.net",
"vampirehunter.com",
"varbizmail.com",
"vcmail.com",
"velnet.co.uk",
"velnet.com",
"velocall.com",
"veloxmail.com.br",
"venompen.com",
"verizon.net",
"verizonmail.com",
"verlass-mich-nicht.de",
"versatel.nl",
"verticalheaven.com",
"veryfast.biz",
"veryrealemail.com",
"veryspeedy.net",
"vfemail.net",
"vickaentb.tk",
"videotron.ca",
"viditag.com",
"viewcastmedia.com",
"viewcastmedia.net",
"vinbazar.com",
"violinmakers.co.uk",
"vip.126.com",
"vip.21cn.com",
"vip.citiz.net",
"vip.gr",
"vip.onet.pl",
"vip.qq.com",
"vip.sina.com",
"vipmail.ru",
"viralplays.com",
"virgilio.it",
"virgin.net",
"virginbroadband.com.au",
"virginmedia.com",
"virtual-mail.com",
"virtualactive.com",
"virtualguam.com",
"virtualmail.com",
"visitmail.com",
"visitweb.com",
"visto.com",
"visualcities.com",
"vivavelocity.com",
"vivianhsu.net",
"viwanet.ru",
"vjmail.com",
"vjtimail.com",
"vkcode.ru",
"vlcity.ru",
"vlmail.com",
"vnet.citiz.net",
"vnn.vn",
"vnukovo.net",
"vodafone.nl",
"vodafonethuis.nl",
"voila.fr",
"volcanomail.com",
"vollbio.de",
"volloeko.de",
"vomoto.com",
"voo.be",
"vorsicht-bissig.de",
"vorsicht-scharf.de",
"vote-democrats.com",
"vote-hillary.com",
"vote-republicans.com",
"vote4gop.org",
"votenet.com",
"vovan.ru",
"vp.pl",
"vpn.st",
"vr9.com",
"vsimcard.com",
"vubby.com",
"vyhino.net",
"w3.to",
"wahoye.com",
"walala.org",
"wales2000.net",
"walkmail.net",
"walkmail.ru",
"walla.co.il",
"wam.co.za",
"wanaboo.com",
"wanadoo.co.uk",
"wanadoo.es",
"wanadoo.fr",
"wapda.com",
"war-im-urlaub.de",
"warmmail.com",
"warpmail.net",
"warrior.hu",
"wasteland.rfc822.org",
"watchmail.com",
"waumail.com",
"wazabi.club",
"wbdet.com",
"wearab.net",
"web-contact.info",
"web-emailbox.eu",
"web-ideal.fr",
"web-mail.com.ar",
"web-mail.pp.ua",
"web-police.com",
"web.de",
"webaddressbook.com",
"webadicta.org",
"webave.com",
"webbworks.com",
"webcammail.com",
"webcity.ca",
"webcontact-france.eu",
"webdream.com",
"webemail.me",
"webemaillist.com",
"webinbox.com",
"webindia123.com",
"webjump.com",
"webm4il.info",
"webmail.bellsouth.net",
"webmail.blue",
"webmail.co.yu",
"webmail.co.za",
"webmail.fish",
"webmail.hu",
"webmail.lawyer",
"webmail.ru",
"webmail.wiki",
"webmails.com",
"webmailv.com",
"webname.com",
"webprogramming.com",
"webskulker.com",
"webstation.com",
"websurfer.co.za",
"webtopmail.com",
"webtribe.net",
"webuser.in",
"wee.my",
"weedmail.com",
"weekmail.com",
"weekonline.com",
"wefjo.grn.cc",
"weg-werf-email.de",
"wegas.ru",
"wegwerf-emails.de",
"wegwerfadresse.de",
"wegwerfemail.com",
"wegwerfemail.de",
"wegwerfmail.de",
"wegwerfmail.info",
"wegwerfmail.net",
"wegwerfmail.org",
"wegwerpmailadres.nl",
"wehshee.com",
"weibsvolk.de",
"weibsvolk.org",
"weinenvorglueck.de",
"welsh-lady.com",
"wesleymail.com",
"westnet.com",
"westnet.com.au",
"wetrainbayarea.com",
"wfgdfhj.tk",
"wh4f.org",
"whale-mail.com",
"whartontx.com",
"whatiaas.com",
"whatpaas.com",
"wheelweb.com",
"whipmail.com",
"whoever.com",
"wholefitness.com",
"whoopymail.com",
"whtjddn.33mail.com",
"whyspam.me",
"wickedmail.com",
"wickmail.net",
"wideopenwest.com",
"wildmail.com",
"wilemail.com",
"will-hier-weg.de",
"willhackforfood.biz",
"willselfdestruct.com",
"windowslive.com",
"windrivers.net",
"windstream.com",
"windstream.net",
"winemaven.info",
"wingnutz.com",
"winmail.com.au",
"winning.com",
"winrz.com",
"wir-haben-nachwuchs.de",
"wir-sind-cool.org",
"wirsindcool.de",
"witty.com",
"wiz.cc",
"wkbwmail.com",
"wmail.cf",
"wo.com.cn",
"woh.rr.com",
"wolf-web.com",
"wolke7.net",
"wollan.info",
"wombles.com",
"women-at-work.org",
"women-only.net",
"wonder-net.com",
"wongfaye.com",
"wooow.it",
"work4teens.com",
"worker.com",
"workmail.co.za",
"workmail.com",
"worldbreak.com",
"worldemail.com",
"worldmailer.com",
"worldnet.att.net",
"wormseo.cn",
"wosaddict.com",
"wouldilie.com",
"wovz.cu.cc",
"wow.com",
"wowgirl.com",
"wowmail.com",
"wowway.com",
"wp.pl",
"wptamail.com",
"wrestlingpages.com",
"wrexham.net",
"writeme.com",
"writemeback.com",
"writeremail.com",
"wronghead.com",
"wrongmail.com",
"wtvhmail.com",
"wwdg.com",
"www.com",
"www.e4ward.com",
"www.mailinator.com",
"www2000.net",
"wwwnew.eu",
"wx88.net",
"wxs.net",
"wyrm.supernews.com",
"x-mail.net",
"x-networks.net",
"x.ip6.li",
"x5g.com",
"xagloo.com",
"xaker.ru",
"xd.ae",
"xemaps.com",
"xents.com",
"xing886.uu.gl",
"xmail.com",
"xmaily.com",
"xmastime.com",
"xmenfans.com",
"xms.nl",
"xmsg.com",
"xoom.com",
"xoommail.com",
"xoxox.cc",
"xoxy.net",
"xpectmore.com",
"xpressmail.zzn.com",
"xs4all.nl",
"xsecurity.org",
"xsmail.com",
"xtra.co.nz",
"xtram.com",
"xuno.com",
"xww.ro",
"xy9ce.tk",
"xyz.am",
"xyzfree.net",
"xzapmail.com",
"y7mail.com",
"ya.ru",
"yada-yada.com",
"yaho.com",
"yahoo.ae",
"yahoo.at",
"yahoo.be",
"yahoo.ca",
"yahoo.ch",
"yahoo.cn",
"yahoo.co",
"yahoo.co.id",
"yahoo.co.il",
"yahoo.co.in",
"yahoo.co.jp",
"yahoo.co.kr",
"yahoo.co.nz",
"yahoo.co.th",
"yahoo.co.uk",
"yahoo.co.za",
"yahoo.com",
"yahoo.com.ar",
"yahoo.com.au",
"yahoo.com.br",
"yahoo.com.cn",
"yahoo.com.co",
"yahoo.com.hk",
"yahoo.com.is",
"yahoo.com.mx",
"yahoo.com.my",
"yahoo.com.ph",
"yahoo.com.ru",
"yahoo.com.sg",
"yahoo.com.tr",
"yahoo.com.tw",
"yahoo.com.vn",
"yahoo.cz",
"yahoo.de",
"yahoo.dk",
"yahoo.es",
"yahoo.fi",
"yahoo.fr",
"yahoo.gr",
"yahoo.hu",
"yahoo.ie",
"yahoo.in",
"yahoo.it",
"yahoo.jp",
"yahoo.net",
"yahoo.nl",
"yahoo.no",
"yahoo.pl",
"yahoo.pt",
"yahoo.ro",
"yahoo.ru",
"yahoo.se",
"yahoofs.com",
"yahoomail.com",
"yalla.com",
"yalla.com.lb",
"yalook.com",
"yam.com",
"yandex.com",
"yandex.mail",
"yandex.pl",
"yandex.ru",
"yandex.ua",
"yapost.com",
"yapped.net",
"yawmail.com",
"yclub.com",
"yeah.net",
"yebox.com",
"yeehaa.com",
"yehaa.com",
"yehey.com",
"yemenmail.com",
"yep.it",
"yepmail.net",
"yert.ye.vc",
"yesbox.net",
"yesey.net",
"yeswebmaster.com",
"ygm.com",
"yifan.net",
"ymail.com",
"ynnmail.com",
"yogamaven.com",
"yogotemail.com",
"yomail.info",
"yopmail.com",
"yopmail.fr",
"yopmail.net",
"yopmail.org",
"yopmail.pp.ua",
"yopolis.com",
"yopweb.com",
"youareadork.com",
"youmailr.com",
"youpy.com",
"your-house.com",
"your-mail.com",
"yourdomain.com",
"yourinbox.com",
"yourlifesucks.cu.cc",
"yourlover.net",
"yournightmare.com",
"yours.com",
"yourssincerely.com",
"yourteacher.net",
"yourwap.com",
"youthfire.com",
"youthpost.com",
"youvegotmail.net",
"yuuhuu.net",
"yuurok.com",
"yyhmail.com",
"z1p.biz",
"z6.com",
"z9mail.com",
"za.com",
"zahadum.com",
"zaktouni.fr",
"zcities.com",
"zdnetmail.com",
"zdorovja.net",
"zeeks.com",
"zeepost.nl",
"zehnminuten.de",
"zehnminutenmail.de",
"zensearch.com",
"zensearch.net",
"zerocrime.org",
"zetmail.com",
"zhaowei.net",
"zhouemail.510520.org",
"ziggo.nl",
"zing.vn",
"zionweb.org",
"zip.net",
"zipido.com",
"ziplip.com",
"zipmail.com",
"zipmail.com.br",
"zipmax.com",
"zippymail.info",
"zmail.pt",
"zmail.ru",
"zoemail.com",
"zoemail.net",
"zoemail.org",
"zoho.com",
"zomg.info",
"zonai.com",
"zoneview.net",
"zonnet.nl",
"zooglemail.com",
"zoominternet.net",
"zubee.com",
"zuvio.com",
"zuzzurello.com",
"zvmail.com",
"zwallet.com",
"zweb.in",
"zxcv.com",
"zxcvbnm.com",
"zybermail.com",
"zydecofan.com",
"zzn.com",
"zzom.co.uk",
"zzz.com",
}
|
i3visio/osrframework
|
osrframework/domains/email_providers.py
|
Python
|
agpl-3.0
| 128,746
|
[
"CASINO",
"COLUMBUS",
"Galaxy",
"MOOSE"
] |
7497036ab9e537ed822d73d8e4beb196ae9f63203e74077135d36c17d9f0dfd2
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# --- BEGIN_HEADER ---
#
# edituser - Edit a MiG user
# Copyright (C) 2003-2013 The MiG Project lead by Brian Vinter
#
# This file is part of MiG.
#
# MiG is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# MiG is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# -- END_HEADER ---
#
"""Edit MiG user in user database and file system"""
import getopt
import os
import sys
from shared.useradm import init_user_adm, edit_user
def usage(name='edituser.py'):
"""Usage help"""
print """Edit existing user in MiG user database and file system.
Usage:
%(name)s [OPTIONS] -i USER_ID [FULL_NAME] [ORGANIZATION] [STATE] [COUNTRY] \
[EMAIL] [COMMENT] [PASSWORD]
Where OPTIONS may be one or more of:
-c CONF_FILE Use CONF_FILE as server configuration
-d DB_FILE Use DB_FILE as user data base file
-f Force operations to continue past errors
-h Show this help
-i CERT_DN CERT_DN of user to edit
-v Verbose output
"""\
% {'name': name}
# ## Main ###
if '__main__' == __name__:
(args, app_dir, db_path) = init_user_adm()
conf_path = None
force = False
verbose = False
user_id = None
user_dict = {}
opt_args = 'c:d:fhi:v'
try:
(opts, args) = getopt.getopt(args, opt_args)
except getopt.GetoptError, err:
print 'Error: ', err.msg
usage()
sys.exit(1)
for (opt, val) in opts:
if opt == '-c':
conf_path = val
elif opt == '-d':
db_path = val
elif opt == '-f':
force = True
elif opt == '-h':
usage()
sys.exit(0)
elif opt == '-i':
user_id = val
elif opt == '-v':
verbose = True
else:
print 'Error: %s not supported!' % opt
if conf_path and not os.path.isfile(conf_path):
print 'Failed to read configuration file: %s' % conf_path
sys.exit(1)
if verbose:
if conf_path:
print 'using configuration in %s' % conf_path
else:
print 'using configuration from MIG_CONF (or default)'
if not user_id:
print 'Error: Existing user ID is required'
usage()
sys.exit(1)
if args:
try:
user_dict['full_name'] = args[0]
user_dict['organization'] = args[1]
user_dict['state'] = args[2]
user_dict['country'] = args[3]
user_dict['email'] = args[4]
except IndexError:
# Ignore missing optional arguments
pass
else:
print 'Please enter the new details for %s:' % user_id
print '[enter to skip field]'
user_dict['full_name'] = raw_input('Full Name: ').title()
user_dict['organization'] = raw_input('Organization: ')
user_dict['state'] = raw_input('State: ')
user_dict['country'] = raw_input('2-letter Country Code: ')
user_dict['email'] = raw_input('Email: ')
# Remove empty value fields
for (key, val) in user_dict.items():
if not val:
del user_dict[key]
if verbose:
print 'Update DB entry and dirs for %s: %s' % (user_id, user_dict)
try:
user = edit_user(user_id, user_dict, conf_path, db_path, force,
verbose)
except Exception, err:
print err
sys.exit(1)
print '%s\nchanged to\n%s\nin user database and file system' % \
(user_id, user['distinguished_name'])
print
print 'Please revoke/reissue any related certificates!'
|
heromod/migrid
|
mig/server/edituser.py
|
Python
|
gpl-2.0
| 4,234
|
[
"Brian"
] |
bba20b3abf94b59ce0d5abfdb1619fcbe51eb5aea11b23332fd8874af2504e51
|
#!/usr/bin/env python
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
from __future__ import print_function # Python 2/3 compatibility
__doc__ = """
This example simulates a number of pure Gaussian white noise signals, then fits
each one in terms of two regressors: a constant baseline, and a linear function
of time. The voxelwise t statistics associated with the baseline coefficient are
then computed.
"""
print(__doc__)
import numpy as np
from nipy.modalities.fmri.glm import GeneralLinearModel
dimt = 100
dimx = 10
dimy = 11
dimz = 12
# axis defines the "time direction"
y = np.random.randn(dimt, dimx * dimy * dimz)
axis = 0
X = np.array([np.ones(dimt), range(dimt)])
X = X.T ## the design matrix X must have dimt lines
mod = GeneralLinearModel(X)
mod.fit(y)
# Define a t contrast
tcon = mod.contrast([1, 0])
# Compute the t-stat
t = tcon.stat()
## t = tcon.stat(baseline=1) to test effects > 1
# Compute the p-value
p = tcon.p_value()
# Compute the z-score
z = tcon.z_score()
# Perform a F test without keeping the F stat
p = mod.contrast([[1, 0], [1, - 1]]).p_value()
print(np.shape(y))
print(np.shape(X))
print(np.shape(z))
|
bthirion/nipy
|
examples/labs/glm_lowlevel.py
|
Python
|
bsd-3-clause
| 1,211
|
[
"Gaussian"
] |
d76db03bd8464efd252b21ddad8eb4d0227b4d12bf465e9de2f20edefa829fd4
|
# -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding:utf-8 -*-
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
#
# MDAnalysis --- http://www.mdanalysis.org
# Copyright (c) 2006-2016 The MDAnalysis Development Team and contributors
# (see the file AUTHORS for the full list of names)
#
# Released under the GNU Public Licence, v2 or any higher version
#
# Please cite your use of MDAnalysis in published work:
#
# R. J. Gowers, M. Linke, J. Barnoud, T. J. E. Reddy, M. N. Melo, S. L. Seyler,
# D. L. Dotson, J. Domanski, S. Buchoux, I. M. Kenney, and O. Beckstein.
# MDAnalysis: A Python package for the rapid analysis of molecular dynamics
# simulations. In S. Benthall and S. Rostrup editors, Proceedings of the 15th
# Python in Science Conference, pages 102-109, Austin, TX, 2016. SciPy.
#
# N. Michaud-Agrawal, E. J. Denning, T. B. Woolf, and O. Beckstein.
# MDAnalysis: A Toolkit for the Analysis of Molecular Dynamics Simulations.
# J. Comput. Chem. 32 (2011), 2319--2327, doi:10.1002/jcc.21787
#
"""
Leaflet identification --- :mod:`MDAnalysis.analysis.leaflet`
==============================================================
This module implements the *LeafletFinder* algorithm, described in
[Michaud-Agrawal2011]_. It can identify the lipids in a bilayer of
arbitrary shape and topology, including planar and undulating bilayers
under periodic boundary conditions or vesicles.
One can use this information to identify
* the upper and lower leaflet of a *planar membrane* by comparing the
the :meth:`~MDAnalysis.core.groups.AtomGroup.center_of_geometry` of
the leaflet groups, or
* the outer and inner leaflet of a *vesicle* by comparing histograms
of distances from the centre of geometry (or possibly simply the
:meth:`~MDAnalysis.core.groups.AtomGroup.radius_of_gyration`).
See example scripts in the MDAnalysisCookbook_ on how to use
:class:`LeafletFinder`. The function :func:`optimize_cutoff` implements a
(slow) heuristic method to find the best cut off for the LeafletFinder
algorithm.
.. MDAnalysisCookbook_: https://github.com/MDAnalysis/MDAnalysisCookbook/tree/master/examples
Algorithm
---------
1. build a graph of all phosphate distances < cutoff
2. identify the largest connected subgraphs
3. analyse first and second largest graph, which correspond to the leaflets
For further details see [Michaud-Agrawal2011]_.
Classes and Functions
---------------------
.. autoclass:: LeafletFinder
:members:
.. autofunction:: optimize_cutoff
"""
from __future__ import division, absolute_import
from six.moves import range
import warnings
import numpy as np
import networkx as NX
from .. import core
from . import distances
from .. import selections
class LeafletFinder(object):
"""Identify atoms in the same leaflet of a lipid bilayer.
This class implements the *LeafletFinder* algorithm [Michaud-Agrawal2011]_.
Parameters
----------
universe : Universe or str
:class:`MDAnalysis.Universe` or a file name (e.g., in PDB or
GRO format)
selection : AtomGroup or str
A AtomGroup instance or a
:meth:`Universe.select_atoms` selection string
for atoms that define the lipid head groups, e.g.
universe.atoms.PO4 or "name PO4" or "name P*"
cutoff : float (optional)
head group-defining atoms within a distance of `cutoff`
Angstroms are deemed to be in the same leaflet [15.0]
pbc : bool (optional)
take periodic boundary conditions into account [``False``]
sparse : bool (optional)
``None``: use fastest possible routine; ``True``: use slow
sparse matrix implementation (for large systems); ``False``:
use fast :func:`~MDAnalysis.lib.distances.distance_array`
implementation [``None``].
Example
-------
The components of the graph are stored in the list
:attr:`LeafletFinder.components`; the atoms in each component are numbered
consecutively, starting at 0. To obtain the atoms in the input structure
use :meth:`LeafletFinder.groups`::
L = LeafletFinder(PDB, 'name P*')
leaflet0 = L.groups(0)
leaflet1 = L.groups(1)
The residues can be accessed through the standard MDAnalysis mechanism::
leaflet0.residues
provides a :class:`~MDAnalysis.core.groups.ResidueGroup`
instance. Similarly, all atoms in the first leaflet are then ::
leaflet0.residues.atoms
"""
def __init__(self, universe, selectionstring, cutoff=15.0, pbc=False, sparse=None):
universe = core.universe.as_Universe(universe)
self.universe = universe
self.selectionstring = selectionstring
if isinstance(self.selectionstring, core.groups.AtomGroup):
self.selection = self.selectionstring
else:
self.selection = universe.select_atoms(self.selectionstring)
self.pbc = pbc
self.sparse = sparse
self._init_graph(cutoff)
def _init_graph(self, cutoff):
self.cutoff = cutoff
self.graph = self._get_graph()
self.components = self._get_components()
# The last two calls in _get_graph() and the single line in
# _get_components() are all that are needed to make the leaflet
# detection work.
def _get_graph(self):
"""Build graph from adjacency matrix at the given cutoff.
Automatically select between high and low memory usage versions of
contact_matrix."""
# could use self_distance_array to speed up but then need to deal with the sparse indexing
if self.pbc:
box = self.universe.trajectory.ts.dimensions
else:
box = None
coord = self.selection.positions
if self.sparse is False:
# only try distance array
try:
adj = distances.contact_matrix(coord, cutoff=self.cutoff, returntype="numpy", box=box)
except ValueError:
warnings.warn('N x N matrix too big, use sparse=True or sparse=None', category=UserWarning,
stacklevel=2)
raise
elif self.sparse is True:
# only try sparse
adj = distances.contact_matrix(coord, cutoff=self.cutoff, returntype="sparse", box=box)
else:
# use distance_array and fall back to sparse matrix
try:
# this works for small-ish systems and depends on system memory
adj = distances.contact_matrix(coord, cutoff=self.cutoff, returntype="numpy", box=box)
except ValueError:
# but use a sparse matrix method for larger systems for memory reasons
warnings.warn(
'N x N matrix too big - switching to sparse matrix method (works fine, but is currently rather '
'slow)',
category=UserWarning, stacklevel=2)
adj = distances.contact_matrix(coord, cutoff=self.cutoff, returntype="sparse", box=box)
return NX.Graph(adj)
def _get_components(self):
"""Return connected components (as sorted numpy arrays), sorted by size."""
return [np.sort(list(component)) for component in NX.connected_components(self.graph)]
def update(self, cutoff=None):
"""Update components, possibly with a different *cutoff*"""
if cutoff is None:
cutoff = self.cutoff
self._init_graph(cutoff)
def sizes(self):
"""Dict of component index with size of component."""
return dict(((idx, len(component)) for idx, component in enumerate(self.components)))
def groups(self, component_index=None):
"""Return a :class:`MDAnalysis.core.groups.AtomGroup` for *component_index*.
If no argument is supplied, then a list of all leaflet groups is returned.
.. SeeAlso:: :meth:`LeafletFinder.group` and :meth:`LeafletFinder.groups_iter`
"""
if component_index is None:
return list(self.groups_iter())
else:
return self.group(component_index)
def group(self, component_index):
"""Return a :class:`MDAnalysis.core.groups.AtomGroup` for *component_index*."""
# maybe cache this?
indices = [i for i in self.components[component_index]]
return self.selection[indices]
def groups_iter(self):
"""Iterator over all leaflet :meth:`groups`"""
for component_index in range(len(self.components)):
yield self.group(component_index)
def write_selection(self, filename, **kwargs):
"""Write selections for the leaflets to *filename*.
The format is typically determined by the extension of *filename*
(e.g. "vmd", "pml", or "ndx" for VMD, PyMol, or Gromacs).
See :class:`MDAnalysis.selections.base.SelectionWriter` for all
options.
"""
sw = selections.get_writer(filename, kwargs.pop('format', None))
with sw(filename, mode=kwargs.pop('mode', 'w'),
preamble="leaflets based on selection={selectionstring!r} cutoff={cutoff:f}\n".format(
**vars(self)),
**kwargs) as writer:
for i, ag in enumerate(self.groups_iter()):
name = "leaflet_{0:d}".format((i + 1))
writer.write(ag, name=name)
def __repr__(self):
return "<LeafletFinder({0!r}, cutoff={1:.1f} A) with {2:d} atoms in {3:d} groups>".format(
self.selectionstring, self.cutoff, self.selection.n_atoms,
len(self.components))
def optimize_cutoff(universe, selection, dmin=10.0, dmax=20.0, step=0.5,
max_imbalance=0.2, **kwargs):
r"""Find cutoff that minimizes number of disconnected groups.
Applies heuristics to find best groups:
1. at least two groups (assumes that there are at least 2 leaflets)
2. reject any solutions for which:
.. math::
\frac{|N_0 - N_1|}{|N_0 + N_1|} > \mathrm{max_imbalance}
with :math:`N_i` being the number of lipids in group
:math:`i`. This heuristic picks groups with balanced numbers of
lipids.
Parameters
----------
universe : Universe
:class:`MDAnalysis.Universe` instance
selection : AtomGroup or str
AtomGroup or selection string as used for :class:`LeafletFinder`
dmin : float (optional)
dmax : float (optional)
step : float (optional)
scan cutoffs from `dmin` to `dmax` at stepsize `step` (in Angstroms)
max_imbalance : float (optional)
tuning parameter for the balancing heuristic [0.2]
kwargs : other keyword arguments
other arguments for :class:`LeafletFinder`
Returns
-------
(cutoff, N)
optimum cutoff and number of groups found
.. Note:: This function can die in various ways if really no
appropriate number of groups can be found; it ought to be
made more robust.
"""
kwargs.pop('cutoff', None) # not used, so we filter it
_sizes = []
for cutoff in np.arange(dmin, dmax, step):
LF = LeafletFinder(universe, selection, cutoff=cutoff, **kwargs)
# heuristic:
# 1) N > 1
# 2) no imbalance between large groups:
sizes = LF.sizes()
if len(sizes) < 2:
continue
n0 = float(sizes[0]) # sizes of two biggest groups ...
n1 = float(sizes[1]) # ... assumed to be the leaflets
imbalance = np.abs(n0 - n1) / (n0 + n1)
# print "sizes: %(sizes)r; imbalance=%(imbalance)f" % vars()
if imbalance > max_imbalance:
continue
_sizes.append((cutoff, len(LF.sizes())))
results = np.rec.fromrecords(_sizes, names="cutoff,N")
del _sizes
results.sort(order=["N", "cutoff"]) # sort ascending by N, then cutoff
return results[0] # (cutoff,N) with N>1 and shortest cutoff
|
kain88-de/mdanalysis
|
package/MDAnalysis/analysis/leaflet.py
|
Python
|
gpl-2.0
| 11,897
|
[
"Gromacs",
"MDAnalysis",
"PyMOL",
"VMD"
] |
bf55c4d5e6bedecfd972e495b7d8b602b6fe764e0c103b579c8ba94ef34efdba
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This program does an image search.
Image search. Use histogram similarity functions.
"""
from __future__ import absolute_import, division, \
print_function, unicode_literals
# import ipdb; ipdb.set_trace() ; # debugging-------
import sys
import logging
import os
import numpy as np
import scipy.sparse as sp
import cv2
cv2.namedWindow('GetArroundASegmentationFailure', 0)
cv2.destroyWindow('GetArroundASegmentationFailure')
import matplotlib.pyplot as plt
import ava.utl
import ava.cv.utl
def get_hue_histogram(img_hsv, min_saturation=0):
""" img: np.array, min_saturation int, -> historgram as np.array. """
assert img_hsv is not None, "img can't be null"
assert img_hsv.shape[2] == 3, 'Expecting 3 channel image'
h = img_hsv[:, :, 0]
s = img_hsv[:, :, 1]
v = img_hsv[:, :, 2]
h = h[..., np.newaxis]
s = s[..., np.newaxis]
saturation_mask = None
if min_saturation > 0:
ret, saturation_mask = cv2.threshold(s, min_saturation, 255, cv2.THRESH_BINARY)
hist = cv2.calcHist([img_hsv], [0], saturation_mask, [180], [0, 180])
return hist
def get_hs_histogram(img_hsv, min_saturation=0):
""" img: np.array, min_saturation int, -> historgram as np.array. """
assert img_hsv is not None, "img can't be null"
assert img_hsv.shape[2] == 3, 'Expecting 3 channel image'
h = img_hsv[:, :, 0]
s = img_hsv[:, :, 1]
v = img_hsv[:, :, 2]
h = h[..., np.newaxis]
s = s[..., np.newaxis]
saturation_mask = None
if min_saturation > 0:
ret, saturation_mask = cv2.threshold(s, min_saturation, 255, cv2.THRESH_BINARY)
hist = cv2.calcHist([img_hsv], [0,1], saturation_mask, [180, 256], [0, 180, 0, 256])
return hist
def find_content(img_hsv, hist_sample):
""" img hsv, hist_sample as np.array, -> 1 channel distance """
src_img_cp = img_hsv
# normalize the sample histogram
cv2.normalize(hist_sample, hist_sample, 0, 179, cv2.NORM_MINMAX)
distance = cv2.calcBackProject([img_hsv], [0], hist_sample, [0, 180], 0.5)
print('ssssssssssssssssssssss distance -------------------')
# show the distance
ava.cv.utl.show_image_wait_2(distance) # ------------
# convolute with circular, morphology
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5, 5))
cv2.filter2D(distance, -1, kernel, distance)
print('==================== distance convoluted -------------------')
# show the smoothed distance
ava.cv.utl.show_image_wait_2(distance) # ------------
# threshold
ret, thresh = cv2.threshold(distance, 55, 180, cv2.THRESH_BINARY)
# thresh = cv2.merge([thresh, thresh, thresh])
# do the bitwise_and
#result = cv2.bitwise_and(src_img_cp, thresh)
return thresh
class ImageComparator(object):
def __init__(self):
super(ImageComparator, self).__init__()
self._ref_img = None
self._ref_img_color_reduced = None
self._ref_img_histogram = None
self._color_reduction_factor = 32
@property
def ref_img(self):
return self._ref_img
@ref_img.setter
def ref_img(self, img):
self._ref_img = img
self._ref_img_color_reduced = \
ava.cv.utl.color_reduce_2(img, self._color_reduction_factor)
ref_img_hsv = cv2.cvtColor(self._ref_img_color_reduced, cv2.COLOR_BGR2HSV)
self._ref_img_histogram = \
get_hs_histogram(ref_img_hsv)
@ref_img.deleter
def ref_img(self):
del(self._ref_img)
@property
def ref_img_color_reduced(self):
return self._ref_img_color_reduced
def compare(self, the_img):
the_img_color_reduced = \
ava.cv.utl.color_reduce_2(the_img, self._color_reduction_factor)
the_img_hsv = cv2.cvtColor(the_img_color_reduced, cv2.COLOR_BGR2HSV)
the_img_hist = get_hs_histogram(the_img_hsv)
# print('h1.type():', self._ref_img_histogram.type())
# print('h2.type():', the_img_hist.type())
score = cv2.compareHist(self._ref_img_histogram, the_img_hist, 0)
return score
@ava.utl.time_this
def main(argv=None):
if argv is None:
argv = sys.argv
# logger
ava.utl.setup_logging()
logger = logging.getLogger(__name__).getChild('main')
logger.debug('starting main.')
img_root_path = '../images'
img_files = [
os.path.join(img_root_path, 'waves.jpg'),
os.path.join(img_root_path, 'beach.jpg'),
os.path.join(img_root_path, 'dog.jpg'),
os.path.join(img_root_path, 'polar.jpg'),
os.path.join(img_root_path, 'bear.jpg'),
os.path.join(img_root_path, 'lake.jpg'),
os.path.join(img_root_path, 'moose.jpg') ]
img_comparator = ImageComparator()
img_comparator.ref_img = cv2.imread(
os.path.join(img_root_path, 'waves.jpg'))
# compare
for img_file in img_files:
the_img = cv2.imread(img_file)
img_size = the_img.shape[0] * the_img.shape[1]
score = img_comparator.compare(the_img)
print(img_file + ', score: %6.4f' % (score))
ava.cv.utl.show_image_wait_2(img_comparator.ref_img_color_reduced)
exit() # ===================
if __name__ == "__main__":
main()
|
neilhan/python_cv_learning
|
04-similar_images/run_me.py
|
Python
|
bsd-3-clause
| 5,260
|
[
"MOOSE"
] |
de0815bca999d55c913c710b73498379894cf57e9209d8e77cd0153005521a6f
|
# encoding: utf-8
import json
from vapory import *
from random import choice
from ase.io import read
from aces.input import getboxrange
import numpy as np
def drawRegions():
xlo,xhi,ylo,yhi,zlo,zhi=getboxrange()
f=open('regions.txt')
regions=json.loads(f.read())
f.close()
objs=[]
a=[1.0]
colors=np.array([
[0,0,1],[0,1,0],[1,0,0],[1,1,0],[1,0,1],[0,1,1],[.9,.5,.9],[2,1,1],[1,2,1],[1,1,2]
])
wall = Plane([0, 1, 0], -(yhi-ylo)/2,Texture( Pigment( 'color rgb', [1, 1, 1]),
Finish( 'phong', 0.8,
'reflection',0.0,
'metallic', 0.1,'ior',1.5,'diffuse', .5)))
"""
ground = Plane( [0, 0, -1], -(zhi-zlo)/2,
Texture( Pigment( 'color rgb', [1, 1, 1]),
Finish( 'phong', 0.8,
'reflection',0.7,'ambient',0.5,
'metallic', 0.8,'ior',1.5,'diffuse', .9)))
"""
ground = Plane( [0, 0, -1], -(zhi+zlo)/2,Texture(Pigment(""" gradient y
color_map {
[0, 0.25 color Gray color Gray]
[0.25, 0.50 color DimGray color LightGray]
[0.50, 0.75 color LightGray color Gray]
[0.75, 1 color Gray color Gray]
}
scale <1, 8, 1> turbulence 5""")))
light = LightSource([0, -50,-(zhi-zlo)/2-200], 'White shadowless')
lo=[xlo,ylo,zlo]
hi=[xhi,yhi,zhi]
k=0
for region in regions:
if region['type']=='box':
x=region['dim'][0]
y=region['dim'][1]
for i in range(3):
if x[i]=='INF':x[i]=lo[i]
if y[i]=='INF':y[i]=hi[i]
obj = Box(x,y, Pigment('color', 'rgbf',list(1*colors[k])+a),Finish('phong', 0.5,'ambient 0.7',
'reflection', 0.3,'metallic', 0.2 ),Interior('ior',1.2))
objs.append(obj)
k+=1
atoms=read('minimize/range',format='lammps')
balls=[]
for pos in atoms.positions:
ball=Sphere(np.array(pos)+np.array([xlo,ylo,zlo]),0.7,Pigment('color White', ),Finish('phong', 1,
'reflection', 0.1,'metallic', .1 ),Interior('ior',1.2)
)
balls.append(ball)
object=Union().add_args(objs+balls+['translate %f*x'%(-(xhi+xlo)/2),'translate %f*y'%(-(yhi+ylo)/2),'translate %f*z'%(-(zhi+zlo)/2)])
scene = Scene( Camera('orthographic',"location", [0, 0, -2], "look_at", [0, 0,0],'direction',[0,0,1],'sky',[0,0,-1],'scale 70'),objects = [ ground,light,object],included=["glass.inc","colors.inc","textures.inc"] )
scene.render('regions.png',width=800,height=600 ,antialiasing=.01,remove_temp=False)
|
vanceeasleaf/aces
|
aces/io/lammps/region.py
|
Python
|
gpl-2.0
| 2,626
|
[
"ASE",
"LAMMPS"
] |
a53e3998c506f2b2f05ec5debe18caa249fc11c98138c7d74bbe92b3edff7e6d
|
# Copyright (C) 2004-2008 Paul Cochrane
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
"""
Example of plotting multiple curves offset from each other with pyvisi
This is an example with simulated seismic data, and is a larger dataset
than seismicOffsetPlotExample.py
"""
import sys
numArgs = len(sys.argv)
if numArgs == 1:
ren_mod = "vtk"
else:
ren_mod = sys.argv[1]
# set up some data to plot
from numpy import *
# read in the data (being fortunate we know how much data there is)
fp = open('waves.dat')
t = zeros((100), dtype=floating)
x = zeros((13), dtype=floating)
data = zeros((100,13), dtype=floating)
for i in range(100):
for j in range(13):
line = fp.readline()
arr = line.split()
t[i] = float(arr[0])
x[j] = float(arr[1])
data[i,j] = float(arr[2])
fp.close()
# example code for how a user would write a script in pyvisi
from pyvisi import * # base level visualisation stuff
#from pyvisi.utils import * # pyvisi specific utils
# import the objects to render the scene using the specific renderer
if ren_mod == "gnuplot":
from pyvisi.renderers.gnuplot import * # gnuplot
elif ren_mod == "vtk":
from pyvisi.renderers.vtk import * # vtk
else:
raise ValueError, "Unknown renderer module"
# define the scene object
# a Scene is a container for all of the kinds of things you want to put
# into your plot for instance, images, meshes, arrow/vector/quiver plots,
# contour plots, spheres etc.
scene = Scene()
# create an OffsetPlot object
plot = OffsetPlot(scene)
# add some helpful info to the plot
plot.title = 'OffsetPlot example - waves.dat'
plot.xlabel = 't'
plot.ylabel = 'y'
# assign some data to the plot
plot.setData(t, data)
# render the scene to screen
scene.render(pause=True, interactive=True)
# save the scene to file
scene.save(fname="offsetPlot.png", format=PngImage())
# vim: expandtab shiftwidth=4:
|
paultcochrane/pyvisi
|
examples/offsetPlot.py
|
Python
|
gpl-2.0
| 2,577
|
[
"VTK"
] |
79b2c9b814dac5fa718086f13e07a3a94560ee21faaeed72b45f8308e81e5fa6
|
# Regular Scripted module import
from __main__ import vtk, qt, ctk, slicer
#
# SimpleWorkflow
#
class SimpleWorkflow:
def __init__(self, parent):
import string
parent.title = "Bender Simple Workflow"
parent.categories = ["", "Segmentation.Bender"]
parent.contributors = ["Julien Finet (Kitware), Johan Andruejol (Kitware)"]
parent.helpText = string.Template("""
Step by step workflow to reposition a labelmap. See <a href=\"$a/Documentation/$b.$c/Modules/SimpleWorkflow\">$a/Documentation/$b.$c/Modules/SimpleWorkflow</a> for more information.
""").substitute({ 'a':'http://public.kitware.com/Wiki/Bender', 'b':2, 'c':0 })
parent.acknowledgementText = """
This work is supported by Air Force Research Laboratory (AFRL)
"""
parent.index = 0
self.parent = parent
#
# SimpleWorkflow widget
#
class SimpleWorkflowWidget:
def __init__(self, parent = None):
if not parent:
self.setup()
self.parent.show()
else:
self.parent = parent
self.logic = None
self.labelmapNode = None
self.parent.show()
def setup(self):
self.IsSetup = True
self.Observations = []
import imp, sys, os, slicer, qt
loader = qt.QUiLoader()
moduleName = 'SimpleWorkflow'
scriptedModulesPath = eval('slicer.modules.%s.path' % moduleName.lower())
scriptedModulesPath = os.path.dirname(scriptedModulesPath)
path = os.path.join(scriptedModulesPath, 'Resources', 'UI', 'SimpleWorkflow.ui')
qfile = qt.QFile(path)
qfile.open(qt.QFile.ReadOnly)
widget = loader.load( qfile, self.parent )
self.layout = self.parent.layout()
self.widget = widget;
self.layout.addWidget(widget)
self.WorkflowWidget = self.get('SimpleWorkflowWidget')
print(self.WorkflowWidget)
self.TitleLabel = self.get('TitleLabel')
# Global variables
self.StatusModifiedEvent = slicer.vtkMRMLCommandLineModuleNode().StatusModifiedEvent
# Labelmap variables
# Compute Weight variables
self.volumeSkinningCreateOutputConnected = False
# Pose surface variables
self.poseSurfaceCreateOutputConnected = False
# Pose surface variables
self.poseLabelmapCreateOutputConnected = False
self.pages = { 0 : 'Adjust',
1 : 'Extract',
2 : 'Armature',
3 : 'Skinning',
4 : 'Weights',
5 : 'PoseArmature',
6 : 'PoseLabelmap'
}
# Load/Save icons
loadIcon = self.WorkflowWidget.style().standardIcon(qt.QStyle.SP_DialogOpenButton)
saveIcon = self.WorkflowWidget.style().standardIcon(qt.QStyle.SP_DialogSaveButton)
self.get('LabelmapVolumeNodeToolButton').icon = loadIcon
self.get('LabelmapColorNodeToolButton').icon = loadIcon
self.get('MergeLabelsOutputNodeToolButton').icon = saveIcon
self.get('MergeLabelsSaveToolButton').icon = saveIcon
self.get('BoneModelMakerOutputNodeToolButton').icon = saveIcon
self.get('BoneModelMakerSaveToolButton').icon = saveIcon
self.get('SkinModelMakerOutputNodeToolButton').icon = saveIcon
self.get('SkinModelMakerSaveToolButton').icon = saveIcon
self.get('ArmaturesArmatureSaveToolButton').icon = saveIcon
self.get('VolumeSkinningInputVolumeNodeToolButton').icon = loadIcon
self.get('VolumeSkinningOutputVolumeNodeToolButton').icon = saveIcon
self.get('VolumeSkinningSaveToolButton').icon = saveIcon
self.get('EditSkinnedVolumeNodeToolButton').icon = loadIcon
self.get('EditSkinnedVolumeNodeSaveToolButton').icon = saveIcon
self.get('EditSkinnedVolumeSaveToolButton').icon = saveIcon
self.get('EvalSurfaceWeightInputNodeToolButton').icon = loadIcon
self.get('EvalSurfaceWeightOutputNodeToolButton').icon = saveIcon
self.get('PoseArmatureArmatureNodeToolButton').icon = loadIcon
self.get('PoseArmatureArmatureNodeSaveToolButton').icon = saveIcon
self.get('PoseArmatureSaveToolButton').icon = saveIcon
self.get('PoseSurfaceInputNodeToolButton').icon = loadIcon
self.get('PoseSurfaceOutputNodeToolButton').icon = saveIcon
self.get('PoseLabelmapOutputNodeToolButton').icon = saveIcon
self.get('PoseLabelmapSaveToolButton').icon = saveIcon
# --------------------------------------------------------------------------
# Connections
# Workflow
self.get('NextPageToolButton').connect('clicked()', self.goToNext)
self.get('PreviousPageToolButton').connect('clicked()', self.goToPrevious)
# 0) Welcome
self.get('SettingsWorkflowComboBox').connect('currentIndexChanged(int)', self.setupWorkflow)
self.get('SettingsReloadPushButton').connect('clicked()', self.reloadModule)
# 1) Adjust Labelmap
# a) Labelmap
self.get('LabelmapVolumeNodeComboBox').connect('currentNodeChanged(vtkMRMLNode*)', self.setupLabelmap)
self.get('LabelmapColorNodeComboBox').connect('nodeActivated(vtkMRMLNode*)', self.applyColorNode)
self.get('LabelmapVolumeNodeToolButton').connect('clicked()', self.loadLabelmapVolumeNode)
self.get('LabelmapColorNodeToolButton').connect('clicked()', self.loadLabelmapColorNode)
self.get('LabelMapApplyColorNodePushButton').connect('clicked()', self.applyColorNode)
self.get('LabelmapGoToModulePushButton').connect('clicked()', self.openLabelmapModule)
self.get('LPSRASTransformPushButton').connect('clicked()', self.runLPSRASTransform)
# 2) Model Maker
# a) Merge Labels
self.get('MergeLabelsInputNodeComboBox').connect('currentNodeChanged(vtkMRMLNode*)', self.setupMergeLabels)
self.get('MergeLabelsOutputNodeToolButton').connect('clicked()', self.saveMergeLabelsVolumeNode)
self.get('MergeLabelsApplyPushButton').connect('clicked(bool)', self.runMergeLabels)
self.get('MergeLabelsGoToModulePushButton').connect('clicked()', self.openMergeLabelsModule)
# b) Bone Model Maker
self.get('BoneLabelComboBox').connect('currentColorChanged(int)', self.setupBoneModelMakerLabels)
self.get('BoneModelMakerInputNodeComboBox').connect('currentNodeChanged(vtkMRMLNode*)', self.setupBoneModelMakerLabels)
self.get('BoneModelMakerOutputNodeComboBox').connect('currentNodeChanged(vtkMRMLNode*)', self.setupBoneModelMakerLabels)
self.get('BoneModelMakerOutputNodeToolButton').connect('clicked()', self.saveBoneModelMakerModelNode)
self.get('BoneModelMakerApplyPushButton').connect('clicked(bool)', self.runBoneModelMaker)
self.get('BoneModelMakerGoToModelsModulePushButton').connect('clicked()', self.openModelsModule)
self.get('BoneModelMakerGoToModulePushButton').connect('clicked()', self.openBoneModelMakerModule)
# c) Skin Model Maker
self.get('SkinModelMakerInputNodeComboBox').connect('currentNodeChanged(vtkMRMLNode*)', self.setupSkinModelMakerLabels)
self.get('SkinModelMakerOutputNodeToolButton').connect('clicked()', self.saveSkinModelMakerModelNode)
self.get('SkinModelMakerToggleVisiblePushButton').connect('clicked()', self.updateSkinNodeVisibility)
self.get('SkinModelMakerApplyPushButton').connect('clicked(bool)', self.runSkinModelMaker)
self.get('SkinModelMakerGoToModelsModulePushButton').connect('clicked()', self.openModelsModule)
self.get('SkinModelMakerGoToModulePushButton').connect('clicked()', self.openSkinModelMakerModule)
self.get('SkinLabelComboBox').connect('currentColorChanged(int)', self.setSkinModelMakerSkinLabel)
# b) Data
self.get('VisibleNodesComboBox').connect('checkedNodesChanged()', self.setNodesVisibility)
self.get('VisibleNodesComboBox').connect('nodeAdded(vtkMRMLNode*)', self.onNodeAdded)
# c) Volume Render
self.get('BoneLabelComboBox').connect('currentColorChanged(int)', self.setupVolumeRenderLabels)
self.get('SkinLabelComboBox').connect('currentColorChanged(int)', self.setupVolumeRenderLabels)
self.get('VolumeRenderInputNodeComboBox').connect('currentNodeChanged(vtkMRMLNode*)', self.setupVolumeRender)
self.get('VolumeRenderLabelsLineEdit').connect('editingFinished()', self.updateVolumeRenderLabels)
self.get('VolumeRenderCheckBox').connect('toggled(bool)',self.runVolumeRender)
self.get('VolumeRenderCropCheckBox').connect('toggled(bool)', self.onCropVolumeRender)
self.get('VolumeRenderGoToModulePushButton').connect('clicked()', self.openVolumeRenderModule)
# 3) Armatures
self.get('ArmaturesPresetComboBox').connect('activated(int)', self.loadArmaturePreset)
self.get('ArmaturesArmatureNodeComboBox').connect('nodeAdded(vtkMRMLNode*)',self.onArmatureNodeAdded)
self.get('ArmaturesArmatureNodeComboBox').connect('currentNodeChanged(vtkMRMLNode*)', self.setCurrentArmatureModelNode)
self.get('ArmaturesToggleVisiblePushButton').connect('clicked()', self.updateSkinNodeVisibility)
self.get('ArmaturesArmatureSaveToolButton').connect('clicked()', self.saveArmatureNode)
self.get('ArmaturesGoToPushButton').connect('clicked()', self.openArmaturesModule)
# 4) Skinning
# a) Volume Skinning
self.get('VolumeSkinningInputVolumeNodeToolButton').connect('clicked()', self.loadSkinningInputVolumeNode)
self.get('VolumeSkinningOutputVolumeNodeToolButton').connect('clicked()', self.saveSkinningVolumeNode)
self.get('VolumeSkinningApplyPushButton').connect('clicked(bool)',self.runVolumeSkinning)
self.get('VolumeSkinningGoToPushButton').connect('clicked()', self.openVolumeSkinningModule)
# b) Edit skinned volume
self.get('EditSkinnedVolumeNodeToolButton').connect('clicked()', self.loadEditSkinnedVolumeNode)
self.get('EditSkinnedVolumeNodeSaveToolButton').connect('clicked()', self.saveEditSkinnedVolumeNode)
self.get('EditSkinnedVolumeNodeComboBox').connect('currentNodeChanged(vtkMRMLNode*)', self.editSkinnedVolumeParameterChanged)
self.get('EditSkinnedVolumeGoToEditorPushButton').connect('clicked()', self.openEditorModule)
# 5) Weights
# a) Armatures Weight
self.get('ComputeArmatureWeightInputVolumeNodeComboBox').connect('currentNodeChanged(vtkMRMLNode*)', self.setDefaultPath)
self.get('ComputeArmatureWeightScaleFactorSpinBox').connect('valueChanged(double)', self.setDefaultPath)
self.get('ComputeArmatureWeightApplyPushButton').connect('clicked(bool)',self.runComputeArmatureWeight)
self.get('ComputeArmatureWeightGoToPushButton').connect('clicked()', self.openComputeArmatureWeightModule)
# b) Eval Weight
self.get('EvalSurfaceWeightInputNodeComboBox').connect('currentNodeChanged(vtkMRMLNode*)', self.evalSurfaceWeightParameterChanged)
self.get('EvalSurfaceWeightWeightPathLineEdit').connect('currentPathChanged(QString)', self.evalSurfaceWeightParameterChanged)
self.get('EvalSurfaceWeightOutputNodeComboBox').connect('currentNodeChanged(vtkMRMLNode*)', self.evalSurfaceWeightParameterChanged)
self.get('EvalSurfaceWeightInputNodeToolButton').connect('clicked()', self.loadEvalSurfaceWeightInputNode)
self.get('EvalSurfaceWeightOutputNodeToolButton').connect('clicked()', self.saveEvalSurfaceWeightOutputNode)
self.get('EvalSurfaceWeightApplyPushButton').connect('clicked(bool)', self.runEvalSurfaceWeight)
self.get('EvalSurfaceWeightGoToPushButton').connect('clicked()', self.openEvalSurfaceWeight)
self.get('EvalSurfaceWeightWeightPathLineEdit').connect('currentPathChanged(QString)', self.setWeightDirectory)
# 6) (Pose) Armature And Pose Body
# a) Pose Armature
self.get('PoseArmatureArmatureNodeComboBox').connect('currentNodeChanged(vtkMRMLNode*)', self.setPoseArmatureModelNode)
self.get('PoseArmatureArmatureNodeToolButton').connect('clicked()', self.loadArmatureNode)
self.get('PoseArmatureArmatureNodeSaveToolButton').connect('clicked()', self.savePoseArmatureArmatureNode)
self.get('PoseArmaturesGoToPushButton').connect('clicked()', self.openPosedArmatureModule)
# b) Pose Surface
self.get('PoseSurfaceInputNodeToolButton').connect('clicked()', self.loadPoseSurfaceInputNode)
self.get('PoseSurfaceOutputNodeToolButton').connect('clicked()', self.savePoseSurfaceOutputNode)
self.get('PoseSurfaceOutputNodeComboBox').connect('currentNodeChanged(vtkMRMLNode*)', self.poseSurfaceParameterChanged)
self.get('PoseSurfaceWeightInputPathLineEdit').connect('currentPathChanged(QString)', self.poseSurfaceParameterChanged)
self.get('PoseSurfaceInputNodeComboBox').connect('currentNodeChanged(vtkMRMLNode*)', self.poseSurfaceInputNodeChanged)
self.get('PoseSurfaceArmatureInputNodeComboBox').connect('currentNodeChanged(vtkMRMLNode*)', self.poseSurfaceParameterChanged)
self.get('PoseSurfaceApplyPushButton').connect('clicked(bool)', self.runPoseSurface)
self.get('PoseSurfaceApplyPushButton').connect('checkBoxToggled(bool)', self.autoRunPoseSurface)
self.get('PoseSurfaceGoToPushButton').connect('clicked()', self.openPoseSurfaceModule)
self.get('ComputeArmatureWeightOutputPathLineEdit').connect('currentPathChanged(QString)', self.setWeightDirectory)
# 7) Resample
self.get('PoseLabelmapInputNodeComboBox').connect('currentNodeChanged(vtkMRMLNode*)', self.poseLabelmapParameterChanged)
self.get('PoseLabelmapArmatureNodeComboBox').connect('currentNodeChanged(vtkMRMLNode*)', self.poseLabelmapParameterChanged)
self.get('PoseLabelmapWeightPathLineEdit').connect('currentPathChanged(QString)', self.poseLabelmapParameterChanged)
self.get('PoseLabelmapOutputNodeComboBox').connect('currentNodeChanged(vtkMRMLNode*)', self.poseLabelmapParameterChanged)
self.get('PoseLabelmapOutputNodeToolButton').connect('clicked()', self.savePoseLabelmapOutputNode)
self.get('PoseLabelmapApplyPushButton').connect('clicked(bool)', self.runPoseLabelmap)
self.get('PoseLabelmapGoToPushButton').connect('clicked()', self.openPoseLabelmap)
self.get('PoseLabelmapInputNodeComboBox').connect('currentNodeChanged(vtkMRMLNode*)', self.createOutputPoseLabelmap)
self.get('BoneLabelsLineEdit').connect('textChanged(QString)', self.setupPoseLabelmap)
self.get('SkinLabelsLineEdit').connect('textChanged(QString)', self.setupPoseLabelmap)
# --------------------------------------------------------------------------
# Initialize all the MRML aware GUI elements.
# Lots of setup methods are called from this line
self.setupComboboxes()
self.widget.setMRMLScene(slicer.mrmlScene)
# can be used to prevent processing when setting the scene. Other items
# might not have the scene set yet.
self.IsSetup = False
# init pages after the scene is set.
self.initWelcomePage()
for page in self.pages.values():
initMethod = getattr(self, 'init' + page + 'Page')
initMethod()
# Init title
self.updateHeader()
# Workflow page
self.setupWorkflow(self.get('SettingsWorkflowComboBox').currentIndex)
self.get('AdvancedPropertiesWidget').setVisible(self.get('ExpandAdvancedPropertiesButton').isChecked())
# Worflow
def updateHeader(self):
# title
title = self.WorkflowWidget.currentWidget().accessibleName
self.TitleLabel.setText('<h2>%i/%i<br>%s</h2>' % (self.WorkflowWidget.currentIndex + 1, self.WorkflowWidget.count, title))
# help
self.get('HelpCollapsibleButton').setText('Help')
self.get('HelpLabel').setText(self.WorkflowWidget.currentWidget().accessibleDescription)
# Hide the Status if not running
cliNode = self.get('CLIProgressBar').commandLineModuleNode()
if cliNode != None and not cliNode.IsBusy():
self.get('CLIProgressBar').setCommandLineModuleNode(0)
# previous
if self.WorkflowWidget.currentIndex > 0:
self.get('PreviousPageToolButton').setVisible(True)
previousIndex = self.WorkflowWidget.currentIndex - 1
previousWidget = self.WorkflowWidget.widget(previousIndex)
previous = previousWidget.accessibleName
self.get('PreviousPageToolButton').setText('< %i/%i - %s' %(previousIndex + 1, self.WorkflowWidget.count, previous))
else:
self.get('PreviousPageToolButton').setVisible(False)
# next
if self.WorkflowWidget.currentIndex < self.WorkflowWidget.count - 1:
self.get('NextPageToolButton').setVisible(True)
nextIndex = self.WorkflowWidget.currentIndex + 1
nextWidget = self.WorkflowWidget.widget(nextIndex)
next = nextWidget.accessibleName
self.get('NextPageToolButton').setText('%i/%i - %s >' %(nextIndex + 1, self.WorkflowWidget.count, next))
else:
self.get('NextPageToolButton').setVisible(False)
self.get('NextPageToolButton').enabled = not self.isWorkflow( 0 )
# disable the refreshes to avoid flickering
self.WorkflowWidget.updatesEnabled = False
# initialize the module
openMethod = getattr(self,'open' + self.pages[self.WorkflowWidget.currentIndex] + 'Page')
openMethod()
workflowMode = self.get('SettingsWorkflowComboBox').currentIndex
# turn the widget in advanced mode to show all the GUI components
# so it takes as much space as possible.
self.get('SettingsWorkflowComboBox').currentIndex = 2
# expand all the collapsible group boxes to compute the minimum height.
collapsedGroupBox = []
for collapsibleGroupBox in self.WorkflowWidget.currentWidget().findChildren(ctk.ctkCollapsibleGroupBox):
collapsedGroupBox.append(collapsibleGroupBox.collapsed)
collapsibleGroupBox.collapsed = False
self.WorkflowWidget.maximumHeight = self.WorkflowWidget.currentWidget().sizeHint.height()
# restore the groupbox collapse mode
for collapsibleGroupBox in self.WorkflowWidget.currentWidget().findChildren(ctk.ctkCollapsibleGroupBox):
collapsibleGroupBox.collapsed = collapsedGroupBox.pop(0)
self.get('SettingsWorkflowComboBox').currentIndex = workflowMode
# validate to enable/disable next button
validateMethod = getattr(self,'validate' + self.pages[self.WorkflowWidget.currentIndex] + 'Page')
validateMethod()
self.WorkflowWidget.updatesEnabled = True
self.WorkflowWidget.resize(self.WorkflowWidget.width,
self.WorkflowWidget.currentWidget().sizeHint.height())
def goToPrevious(self):
self.WorkflowWidget.setCurrentIndex(self.WorkflowWidget.currentIndex - 1)
self.updateHeader()
def goToNext(self):
self.WorkflowWidget.setCurrentIndex(self.WorkflowWidget.currentIndex + 1)
self.updateHeader()
#----------------------------------------------------------------------------
# 0) Settings
#----------------------------------------------------------------------------
def initWelcomePage(self):
self.initData()
# Collapse DataProbe as it takes screen real estate
dataProbeCollapsibleWidget = self.findWidget(
slicer.util.mainWindow(), 'DataProbeCollapsibleWidget')
dataProbeCollapsibleWidget.checked = False
def openWelcomePage(self):
print('welcome')
def isWorkflow(self, level):
return self.get('SettingsWorkflowComboBox').currentIndex == level
# Helper function for setting the visibility of a list of widgets
def setWidgetsVisibility(self, widgets, level):
for widget in widgets:
workflow = widget.property('workflow')
if workflow != None:
widget.setVisible( str(level) in workflow )
def setupWorkflow(self, level):
self.setWidgetsVisibility(self.getChildren(self.WorkflowWidget), level)
self.setWidgetsVisibility(self.getChildren(self.get('AdvancedTabWidget')), level)
# Validate the current page (to disable/enable the next page tool button if needed)
self.get('NextPageToolButton').enabled = True
validateMethod = getattr(self,'validate' + self.pages[self.WorkflowWidget.currentIndex] + 'Page')
validateMethod()
def setupComboboxes(self):
# Add here the combo box that should only see labelmaps
labeldMapComboBoxes = ['MergeLabelsInputNodeComboBox', 'MergeLabelsOutputNodeComboBox',
'VolumeSkinningInputVolumeNodeComboBox', 'VolumeSkinningOutputVolumeNodeComboBox',
'EditSkinnedVolumeNodeComboBox',
'ComputeArmatureWeightInputVolumeNodeComboBox', 'ComputeArmatureWeightSkinnedVolumeVolumeNodeComboBox',
'PoseLabelmapInputNodeComboBox', 'PoseLabelmapOutputNodeComboBox']
for combobox in labeldMapComboBoxes:
self.get(combobox).addAttribute('vtkMRMLScalarVolumeNode','LabelMap','1')
def observeCLINode(self, cliNode, onCLINodeModified = None):
if cliNode != None and onCLINodeModified != None:
self.addObserver(cliNode, self.StatusModifiedEvent, onCLINodeModified)
self.get('CLIProgressBar').setCommandLineModuleNode(cliNode)
#----------------------------------------------------------------------------
# b) Data
#----------------------------------------------------------------------------
def initData(self):
self.IgnoreSetNodesVisibility = False
self.get('VisibleNodesComboBox').sortFilterProxyModel().filterCaseSensitivity = qt.Qt.CaseInsensitive
self.get('VisibleNodesComboBox').sortFilterProxyModel().sort(0)
selectionNode = slicer.app.applicationLogic().GetSelectionNode()
self.addObserver(selectionNode, 'ModifiedEvent', self.onNodeModified)
def setNodesVisibility(self):
"""Set the visibility of nodes based on their check marks."""
visibleNodes = self.get('VisibleNodesComboBox').checkedNodes()
for node in visibleNodes:
self.setNodeVisibility(node, 1)
hiddenNodes = self.get('VisibleNodesComboBox').uncheckedNodes()
for node in hiddenNodes:
self.setNodeVisibility(node, 0)
def setNodeVisibility(self, node, visible):
"""Set the visiblity of a displayable node when the user checks it."""
if self.IgnoreSetNodesVisibility == True:
return
selectionNode = slicer.app.applicationLogic().GetSelectionNode()
if (node.IsA('vtkMRMLScalarVolumeNode')):
if (not visible):
if (selectionNode.GetActiveVolumeID() == node.GetID()):
selectionNode.SetActiveVolumeID(None)
if (selectionNode.GetActiveLabelVolumeID() == node.GetID()):
selectionNode.SetActiveLabelVolumeID(None)
else:
if (node.GetLabelMap() == 0):
selectionNode.SetActiveVolumeID(node.GetID())
else:
selectionNode.SetActiveLabelVolumeID(node.GetID())
slicer.app.applicationLogic().PropagateVolumeSelection()
elif node.IsA('vtkMRMLArmatureNode'):
armatureLogic = slicer.modules.armatures.logic()
armatureLogic.SetArmatureVisibility(node, visible)
else:
displayNode = node.GetDisplayNode()
if displayNode != None:
displayNode.SetVisibility(visible)
def nodeVisibility(self, node):
"""Return true if the node is visible, false if it is hidden."""
selectionNode = slicer.app.applicationLogic().GetSelectionNode()
visible = False
if (node.IsA('vtkMRMLScalarVolumeNode')):
visible = (selectionNode.GetActiveVolumeID() == node.GetID() or
selectionNode.GetActiveLabelVolumeID() == node.GetID())
elif node.IsA('vtkMRMLArmatureNode'):
armatureLogic = slicer.modules.armatures.logic()
visible = armatureLogic.GetArmatureVisibility(node)
else:
displayNode = node.GetDisplayNode()
if (displayNode != None):
visible = displayNode.GetVisibility() == 1
return visible
def onNodeAdded(self, node):
"""Observe the node to synchronize its visibility with the checkmarks"""
self.addObserver(node, slicer.vtkMRMLDisplayableNode.DisplayModifiedEvent, self.onNodeModified)
self.onNodeModified(node, 'DisplayModifiedEvent')
def onNodeModified(self, node, event):
"""Update the node checkmark based on its visibility"""
# Selection node is a special case
if node.IsA('vtkMRMLSelectionNode'):
# check all the volumes to see which one is active
volumeNodes = slicer.mrmlScene.GetNodesByClass('vtkMRMLScalarVolumeNode')
volumeNodes.UnRegister(slicer.mrmlScene)
for i in range(0, volumeNodes.GetNumberOfItems()):
volumeNode = volumeNodes.GetItemAsObject(i)
self.onNodeModified(volumeNode, 'ModifiedEvent')
return
elif node.IsA('vtkMRMLArmatureNode'):
# Hide the armature model node, it is not to be displayed
armatureLogic = slicer.modules.armatures.logic()
modelNode = armatureLogic.GetArmatureModel(node)
if modelNode != None:
self.get('VisibleNodesComboBox').sortFilterProxyModel().hiddenNodeIDs = [modelNode.GetID()]
visible = self.nodeVisibility(node)
checkState = qt.Qt.Checked if visible else qt.Qt.Unchecked
self.IgnoreSetNodesVisibility = True
self.get('VisibleNodesComboBox').setCheckState(node, checkState)
self.IgnoreSetNodesVisibility = False
#----------------------------------------------------------------------------
# c) Volume Render
#----------------------------------------------------------------------------
def updateVolumeRender(self, volumeNode, event):
if volumeNode != self.get('VolumeRenderInputNodeComboBox').currentNode():
return
self.setupVolumeRender(volumeNode)
def setupVolumeRender(self, volumeNode):
self.removeObservers(self.updateVolumeRender)
if volumeNode == None:
return
displayNode = volumeNode.GetNthDisplayNodeByClass(0, 'vtkMRMLVolumeRenderingDisplayNode')
visible = False
if displayNode != None:
visible = displayNode.GetVisibility()
self.get('VolumeRenderCheckBox').setChecked(visible)
self.setupVolumeRenderLabels()
self.addObserver(volumeNode, 'ModifiedEvent', self.updateVolumeRender)
def setupVolumeRenderLabels(self):
""" Update the labels of the volume rendering
"""
labels = []
labels.append(self.get('BoneLabelComboBox').currentColor)
labels.append(self.get('SkinLabelComboBox').currentColor)
self.get('VolumeRenderLabelsLineEdit').setText(', '.join(str(val) for val in labels))
def getVolumeRenderLabels(self):
labels = self.get('VolumeRenderLabelsLineEdit').text.split(', ')
labels = filter(lambda x: x != '', labels)
return labels
def updateVolumeRenderLabels(self):
""" Update the LUT used to volume render the labelmap
"""
if not self.get('VolumeRenderCheckBox').isChecked():
return
volumeNode = self.get('VolumeRenderInputNodeComboBox').currentNode()
displayNode = volumeNode.GetNthDisplayNodeByClass(0, 'vtkMRMLVolumeRenderingDisplayNode')
volumePropertyNode = displayNode.GetVolumePropertyNode()
opacities = volumePropertyNode.GetScalarOpacity()
labels = self.getVolumeRenderLabels()
for i in range(opacities.GetSize()):
node = [0, 0, 0, 0]
opacities.GetNodeValue(i, node)
if (str(i) in labels) or (i != 0 and len(labels) == 0):
node[1] = 0.5
node[3] = 1
else:
node[1] = 0
node[3] = 1
opacities.SetNodeValue(i, node)
opacities.Modified()
def runVolumeRender(self, show):
"""Start/stop to volume render a volume"""
volumeNode = self.get('VolumeRenderInputNodeComboBox').currentNode()
displayNode = volumeNode.GetNthDisplayNodeByClass(0, 'vtkMRMLVolumeRenderingDisplayNode')
if not show:
if displayNode == None:
return
displayNode.SetVisibility(0)
else:
volumeRenderingLogic = slicer.modules.volumerendering.logic()
if displayNode == None:
displayNode = volumeRenderingLogic.CreateVolumeRenderingDisplayNode()
slicer.mrmlScene.AddNode(displayNode)
displayNode.UnRegister(volumeRenderingLogic)
volumeRenderingLogic.UpdateDisplayNodeFromVolumeNode(displayNode, volumeNode)
volumeNode.AddAndObserveDisplayNodeID(displayNode.GetID())
else:
volumeRenderingLogic.UpdateDisplayNodeFromVolumeNode(displayNode, volumeNode)
self.updateVolumeRenderLabels()
volumePropertyNode = displayNode.GetVolumePropertyNode()
volumeProperty = volumePropertyNode.GetVolumeProperty()
volumeProperty.SetShade(0)
displayNode.SetVisibility(1)
self.onCropVolumeRender(self.get('VolumeRenderCropCheckBox').checked)
def onCropVolumeRender(self, crop):
volumeNode = self.get('VolumeRenderInputNodeComboBox').currentNode()
if volumeNode == None:
return
displayNode = volumeNode.GetNthDisplayNodeByClass(0, 'vtkMRMLVolumeRenderingDisplayNode')
if displayNode == None:
return
roiNode = displayNode.GetROINode()
roiNode.SetDisplayVisibility(crop)
displayNode.SetCroppingEnabled(crop)
def openVolumeRenderModule(self):
self.openModule('VolumeRendering')
#----------------------------------------------------------------------------
# 1) Load inputs
#----------------------------------------------------------------------------
def initAdjustPage(self):
# Init color node combo box <=> make 'Generic Colors' labelmap visible
model = self.get('LabelmapColorNodeComboBox').sortFilterProxyModel()
visibleNodeIDs = []
visibleNodeIDs.append(slicer.mrmlScene.GetFirstNodeByName('GenericAnatomyColors').GetID())
model.visibleNodeIDs = visibleNodeIDs
# LPS <-> RAS Transform
transformMenu = qt.QMenu(self.get('LPSRASTransformPushButton'))
a = transformMenu.addAction('Left <-> Right')
a.setToolTip('Switch the volume orientation from Left to Right')
a.connect('triggered(bool)', self.runLRTransform)
a = transformMenu.addAction('Posterior <-> Anterior')
a.setToolTip('Switch the volume orientation from Posterior to Anterior')
a.connect('triggered(bool)', self.runPATransform)
a = transformMenu.addAction('Superior <-> Inferior')
a.setToolTip('Switch the volume orientation from Superior to Inferior')
a.connect('triggered(bool)', self.runSITransform)
self.get('LPSRASTransformPushButton').setMenu(transformMenu)
a = transformMenu.addAction('Center')
a.setToolTip('Center volume on (0,0,0)')
a.connect('triggered(bool)', self.runCenter)
self.get('LPSRASTransformPushButton').setMenu(transformMenu)
self.get('LabelMapApplyColorNodePushButton').visible = False
self.initLabelmap()
def validateAdjustPage(self, validateSections = True):
if validateSections:
self.validateLabelmap()
valid = self.get('LabelmapCollapsibleGroupBox').property('valid')
self.get('NextPageToolButton').enabled = not self.isWorkflow(0) or valid
def openAdjustPage(self):
# Switch to 3D View only
slicer.app.layoutManager().setLayout(slicer.vtkMRMLLayoutNode.SlicerLayoutFourUpView)
#----------------------------------------------------------------------------
# a) Labelmap
def initLabelmap(self):
self.validateLabelmap()
def updateLabelmap(self, node, event):
volumeNode = self.get('LabelmapVolumeNodeComboBox').currentNode()
if node != volumeNode and node != volumeNode.GetDisplayNode():
return
self.setupLabelmap(volumeNode)
self.setupMergeLabels(volumeNode)
def setupLabelmap(self, volumeNode):
if volumeNode == None:
return
# Labelmapcolornode should get its scene before the volume node selector
# gets it. That way, setCurrentNode can work at first
self.get('LabelmapColorNodeComboBox').setCurrentNode(volumeNode.GetDisplayNode().GetColorNode())
self.addObserver(volumeNode, 'ModifiedEvent', self.updateLabelmap)
self.addObserver(volumeNode.GetDisplayNode(), 'ModifiedEvent', self.updateLabelmap)
def validateLabelmap(self):
volumeNode = self.get('LabelmapVolumeNodeComboBox').currentNode()
colorNode = self.get('LabelmapColorNodeComboBox').currentNode()
valid = volumeNode != None and colorNode != None
self.get('LabelmapCollapsibleGroupBox').setProperty('valid', valid)
if valid:
self.get('VolumeRenderInputNodeComboBox').setCurrentNode(
self.get('LabelmapVolumeNodeComboBox').currentNode())
self.validateAdjustPage(validateSections = False)
def loadLabelmapVolumeNode(self):
self.loadFile('Volume/Labelmap to reposition', 'VolumeFile', self.get('LabelmapVolumeNodeComboBox'))
def loadLabelmapColorNode(self):
self.loadFile('Tissue/Color file', 'ColorTableFile', self.get('LabelmapColorNodeComboBox'))
def applyColorNode(self):
volumeNode = self.get('LabelmapVolumeNodeComboBox').currentNode()
if volumeNode == None:
self.validateLabelmap()
return
colorNode = self.get('LabelmapColorNodeComboBox').currentNode()
volumesLogic = slicer.modules.volumes.logic()
wasModifying = volumeNode.StartModify()
volumesLogic.SetVolumeAsLabelMap(volumeNode, colorNode != None) # Greyscale is None
labelmapDisplayNode = volumeNode.GetDisplayNode()
if colorNode != None:
labelmapDisplayNode.SetAndObserveColorNodeID(colorNode.GetID())
volumeNode.EndModify(wasModifying)
# We can't just use a regular qt signal/slot connection because the input
# node might not be a labelmap at the time it becomes current, which would
# not show up in the combobox.
self.get('MergeLabelsInputNodeComboBox').setCurrentNode(volumeNode)
self.setupMergeLabels(volumeNode)
self.get('PoseLabelmapInputNodeComboBox').setCurrentNode(volumeNode)
self.validateLabelmap()
def openLabelmapModule(self):
self.openModule('Volumes')
# b) Transform
def runLPSRASTransform(self):
self.runTransform((-1.0, 0.0, 0.0, 0.0,
0.0, -1.0, 0.0, 0.0,
0.0, 0.0, 1.0, 0.0,
0.0, 0.0, 0.0, 1.0))
def runLRTransform(self):
self.runTransform((-1.0, 0.0, 0.0, 0.0,
0.0, 1.0, 0.0, 0.0,
0.0, 0.0, 1.0, 0.0,
0.0, 0.0, 0.0, 1.0))
def runPATransform(self):
self.runTransform((1.0, 0.0, 0.0, 0.0,
0.0, -1.0, 0.0, 0.0,
0.0, 0.0, 1.0, 0.0,
0.0, 0.0, 0.0, 1.0))
def runSITransform(self):
self.runTransform((1.0, 0.0, 0.0, 0.0,
0.0, 1.0, 0.0, 0.0,
0.0, 0.0, -1.0, 0.0,
0.0, 0.0, 0.0, 1.0))
def runTransform(self, matrix):
volumeNode = self.get('LabelmapVolumeNodeComboBox').currentNode()
if volumeNode == None:
return
transform = vtk.vtkMatrix4x4()
transform.DeepCopy(matrix)
volumeNode.ApplyTransformMatrix(transform)
volumeNode.Modified()
def runCenter(self):
volumeNode = self.get('LabelmapVolumeNodeComboBox').currentNode()
volumesLogic = slicer.modules.volumes.logic()
if volumesLogic != None and volumeNode != None:
volumesLogic.CenterVolume(volumeNode)
# need to refresh the views
self.reset3DViews()
self.resetSliceViews()
#----------------------------------------------------------------------------
# 2) Model Maker
#----------------------------------------------------------------------------
def initExtractPage(self):
self.initMergeLabels()
self.initBoneModelMakerLabels()
self.initSkinModelMakerLabels()
def validateExtractPage(self, validateSections = True):
if validateSections:
self.validateMergeLabels()
self.validateBoneModelMakerLabels()
self.validateSkinModelMakerLabels()
valid = self.get('SkinModelMakerCollapsibleGroupBox').property('valid')
self.get('NextPageToolButton').enabled = not self.isWorkflow(0) or valid
def openExtractPage(self):
if self.get('BoneModelMakerOutputNodeComboBox').currentNode() == None:
self.get('BoneModelMakerOutputNodeComboBox').addNode()
if self.get('SkinModelMakerOutputNodeComboBox').currentNode() == None:
self.get('SkinModelMakerOutputNodeComboBox').addNode()
#----------------------------------------------------------------------------
# a) Merge Labels
def initMergeLabels(self):
self.setupMergeLabels(self.get('MergeLabelsInputNodeComboBox').currentNode())
self.validateMergeLabels()
def updateMergeLabels(self, node, event):
volumeNode = self.get('MergeLabelsInputNodeComboBox').currentNode()
if volumeNode == None or (node.IsA('vtkMRMLScalarVolumeNode') and node != volumeNode):
return
elif node.IsA('vtkMRMLVolumeDisplayNode'):
if node != volumeNode.GetDisplayNode():
return
self.setupMergeLabels(volumeNode)
def setupMergeLabels(self, volumeNode):
if volumeNode == None or not volumeNode.GetLabelMap():
return
labelmapDisplayNode = volumeNode.GetDisplayNode()
self.removeObservers(self.updateMergeLabels)
colorNode = labelmapDisplayNode.GetColorNode()
if colorNode == None:
self.get('BoneLabelComboBox').setMRMLColorNode(None)
self.get('SkinLabelComboBox').setMRMLColorNode(None)
self.get('BoneLabelsLineEdit').setText('')
self.get('BoneLabelComboBox').setCurrentColor(None)
self.get('SkinLabelsLineEdit').setText('')
self.get('SkinLabelComboBox').setCurrentColor(None)
else:
self.get('BoneLabelComboBox').setMRMLColorNode(colorNode)
self.get('SkinLabelComboBox').setMRMLColorNode(colorNode)
boneLabels = self.searchLabels(colorNode, 'bone')
boneLabels.update(self.searchLabels(colorNode, 'vertebr'))
boneLabels.update(self.searchLabels(colorNode, 'mandible'))
boneLabels.update(self.searchLabels(colorNode, 'cartilage'))
self.get('BoneLabelsLineEdit').setText(', '.join(str( val ) for val in boneLabels.keys()))
boneLabel = self.bestLabel(boneLabels, ['bone', 'cancellous'])
self.get('BoneLabelComboBox').setCurrentColor(boneLabel)
skinLabels = self.searchLabels(colorNode, 'skin')
self.get('SkinLabelsLineEdit').setText(', '.join(str(val) for val in skinLabels.keys()))
skinLabel = self.bestLabel(skinLabels, ['skin'])
self.get('SkinLabelComboBox').setCurrentColor(skinLabel)
self.createMergeLabelsOutput(volumeNode)
self.addObserver(volumeNode, 'ModifiedEvent', self.updateMergeLabels)
self.addObserver(labelmapDisplayNode, 'ModifiedEvent', self.updateMergeLabels)
self.validateMergeLabels()
def validateMergeLabels(self):
cliNode = self.getCLINode(slicer.modules.changelabel)
valid = (cliNode.GetStatusString() == 'Completed')
self.get('MergeLabelsOutputNodeToolButton').enabled = valid
self.get('MergeLabelsSaveToolButton').enabled = valid
self.get('MergeLabelsCollapsibleGroupBox').setProperty('valid',valid)
if valid:
self.get('VolumeRenderInputNodeComboBox').setCurrentNode(
self.get('MergeLabelsInputNodeComboBox').currentNode())
self.get('BoneModelMakerApplyPushButton').enabled = not self.isWorkflow(0) or valid
def searchLabels(self, colorNode, label):
""" Search the color node for all the labels that contain the word 'label'
"""
labels = {}
for index in range(colorNode.GetNumberOfColors()):
if label in colorNode.GetColorName(index).lower():
labels[index] = colorNode.GetColorName(index)
return labels
def bestLabel(self, labels, labelNames):
""" Return the label from a [index, colorName] map that fits the best the
label name
"""
bestLabels = labels
if (len(bestLabels) == 0):
return -1
labelIndex = 0
for labelName in labelNames:
newBestLabels = {}
for key in bestLabels.keys():
startswith = bestLabels[key].lower().startswith(labelName)
contains = labelName in bestLabels[key].lower()
if (labelIndex == 0 and startswith) or (labelIndex > 0 and contains):
newBestLabels[key] = bestLabels[key]
if len(newBestLabels) == 1:
return newBestLabels.keys()[0]
bestLabels = newBestLabels
labelIndex = labelIndex + 1
return bestLabels.keys()[0]
def createMergeLabelsOutput(self, node):
""" Make sure the output scalar volume node is a node with a -posed suffix.
Note that the merged volume is used only by the model makers. This
Should not be used by the PoseLabelmap filter.
"""
if node == None or self.IsSetup:
return
# Don't create the node if the name already contains "merged"
if node.GetName().lower().find('merged') != -1:
return
nodeName = '%s-merged' % node.GetName()
# make sure such node does not already exist.
mergedNode = self.getFirstNodeByNameAndClass(nodeName, 'vtkMRMLScalarVolumeNode')
if mergedNode == None:
newNode = self.get('MergeLabelsOutputNodeComboBox').addNode()
newNode.SetName(nodeName)
else:
self.get('MergeLabelsOutputNodeComboBox').setCurrentNode(mergedNode)
def mergeLabelsParameters(self):
boneLabels = self.get('BoneLabelsLineEdit').text
skinLabels = self.get('SkinLabelsLineEdit').text
parameters = {}
parameters["InputVolume"] = self.get('MergeLabelsInputNodeComboBox').currentNode()
parameters["OutputVolume"] = self.get('MergeLabelsOutputNodeComboBox').currentNode()
# That's my dream:
#parameters["InputLabelNumber"] = len(boneLabels.split(','))
#parameters["InputLabelNumber"] = len(skinLabels.split(','))
#parameters["InputLabel"] = boneLabels
#parameters["InputLabel"] = skinLabels
#parameters["OutputLabel"] = self.get('BoneLabelComboBox').currentColor
#parameters["OutputLabel"] = self.get('SkinLabelComboBox').currentColor
# But that's how it is done for now
parameters["InputLabelNumber"] = str(len(boneLabels.split(','))) + ', ' + str(len(skinLabels.split(',')))
parameters["InputLabel"] = boneLabels + ', ' + skinLabels
parameters["OutputLabel"] = str(self.get('BoneLabelComboBox').currentColor) + ', ' + str(self.get('SkinLabelComboBox').currentColor)
return parameters
def runMergeLabels(self, run):
if run:
cliNode = self.getCLINode(slicer.modules.changelabel)
parameters = self.mergeLabelsParameters()
self.get('MergeLabelsApplyPushButton').setChecked(True)
self.observeCLINode(cliNode, self.onMergeLabelsCLIModified)
cliNode = slicer.cli.run(slicer.modules.changelabel, cliNode, parameters, wait_for_completion = False)
else:
cliNode = self.observer(self.StatusModifiedEvent, self.onMergeLabelsCLIModified)
self.get('MergeLabelsApplyPushButton').enabled = False
cliNode.Cancel()
def onMergeLabelsCLIModified(self, cliNode, event):
if cliNode.GetStatusString() == 'Completed':
# apply label map
newNode = self.get('MergeLabelsOutputNodeComboBox').currentNode()
if newNode != None:
displayNode = newNode.GetDisplayNode()
if displayNode == None:
volumesLogic = slicer.modules.volumes.logic()
volumesLogic.SetVolumeAsLabelMap(newNode, 1)
displayNode = newNode.GetDisplayNode()
colorNode = self.get('LabelmapColorNodeComboBox').currentNode()
if displayNode != None and colorNode != None:
displayNode.SetAndObserveColorNodeID(colorNode.GetID())
self.validateMergeLabels()
if not cliNode.IsBusy():
self.get('MergeLabelsApplyPushButton').setChecked(False)
self.get('MergeLabelsApplyPushButton').enabled = True
print 'MergeLabels %s' % cliNode.GetStatusString()
self.removeObservers(self.onMergeLabelsCLIModified)
def saveMergeLabelsVolumeNode(self):
self.saveFile('Merged label volume', 'VolumeFile', '.mha', self.get('MergeLabelsOutputNodeComboBox'))
def openMergeLabelsModule(self):
self.openModule('ChangeLabel')
cliNode = self.getCLINode(slicer.modules.changelabel)
parameters = self.mergeLabelsParameters()
slicer.cli.setNodeParameters(cliNode, parameters)
#----------------------------------------------------------------------------
# b) Bone Model Maker
def initBoneModelMakerLabels(self):
self.validateBoneModelMakerLabels()
def setupBoneModelMakerLabels(self):
""" Update the labels of the bone model maker
"""
labels = []
labels.append(self.get('BoneLabelComboBox').currentColor)
self.get('BoneModelMakerLabelsLineEdit').setText(', '.join(str(val) for val in labels))
self.validateBoneModelMakerLabels()
def validateBoneModelMakerLabels(self):
cliNode = self.getCLINode(slicer.modules.modelmaker)
valid = cliNode.GetStatusString() == 'Completed'
self.get('BoneModelMakerOutputNodeToolButton').enabled = valid
self.get('BoneModelMakerSaveToolButton').enabled = valid
self.get('BoneModelMakerCollapsibleGroupBox').setProperty('valid',valid)
self.get('SkinModelMakerApplyPushButton').enabled = not self.isWorkflow(0) or valid
def boneModelFromModelHierarchyNode(self, modelHierarchyNode):
models = vtk.vtkCollection()
modelHierarchyNode.GetChildrenModelNodes(models)
return models.GetItemAsObject(0)
def boneModelMakerParameters(self):
parameters = {}
parameters["InputVolume"] = self.get('BoneModelMakerInputNodeComboBox').currentNode()
parameters["ModelSceneFile"] = self.get('BoneModelMakerOutputNodeComboBox').currentNode()
parameters["Labels"] = self.get('BoneModelMakerLabelsLineEdit').text
parameters["Name"] = 'Bones'
parameters['GenerateAll'] = False
parameters["JointSmoothing"] = False
parameters["SplitNormals"] = True
parameters["PointNormals"] = True
parameters["SkipUnNamed"] = True
parameters["Decimate"] = self.get('BoneModelMakerDecimateSliderWidget').value
parameters["Smooth"] = 10
return parameters
def runBoneModelMaker(self, run):
if run:
cliNode = self.getCLINode(slicer.modules.modelmaker)
parameters = self.boneModelMakerParameters()
self.get('BoneModelMakerApplyPushButton').setChecked(True)
self.observeCLINode(cliNode, self.onBoneModelMakerCLIModified)
cliNode = slicer.cli.run(slicer.modules.modelmaker, cliNode, parameters, wait_for_completion = False)
else:
cliNode = self.observer(self.StatusModifiedEvent, self.onBoneModelMakerCLIModified)
self.get('BoneModelMakerApplyPushButton').enabled = False
cliNode.Cancel()
def onBoneModelMakerCLIModified(self, cliNode, event):
if cliNode.GetStatusString() == 'Completed':
self.reset3DViews()
modelNode = self.boneModelFromModelHierarchyNode(self.get('BoneModelMakerOutputNodeComboBox').currentNode())
self.get('EvalSurfaceWeightInputNodeComboBox').setCurrentNode(modelNode)
self.validateBoneModelMakerLabels()
if not cliNode.IsBusy():
self.get('BoneModelMakerApplyPushButton').setChecked(False)
self.get('BoneModelMakerApplyPushButton').enabled = True
print 'Bone ModelMaker %s' % cliNode.GetStatusString()
self.removeObservers(self.onBoneModelMakerCLIModified)
def saveBoneModelMakerModelNode(self):
modelNode = self.boneModelFromModelHierarchyNode(self.get('BoneModelMakerOutputNodeComboBox').currentNode())
self.saveNode('Bone model', 'ModelFile', '.vtk', modelNode)
def openModelsModule(self):
self.openModule('Models')
def openBoneModelMakerModule(self):
self.openModule('ModelMaker')
cliNode = self.getCLINode(slicer.modules.modelmaker)
parameters = self.boneModelMakerParameters()
slicer.cli.setNodeParameters(cliNode, parameters)
#----------------------------------------------------------------------------
# c) Skin Model Maker
def initSkinModelMakerLabels(self):
import SkinModelMaker
self.SkinModelMakerLogic = SkinModelMaker.SkinModelMakerLogic()
self.validateSkinModelMakerLabels()
def setupSkinModelMakerLabels(self, volumeNode):
""" Update the labels of the skin model maker
"""
if volumeNode == None:
return
labelmapDisplayNode = volumeNode.GetDisplayNode()
if labelmapDisplayNode == None:
return
colorNode = labelmapDisplayNode.GetColorNode()
if colorNode == None:
self.get('SkinModelMakerBackgroundLabelSpinBox').setText('')
else:
airLabels = self.searchLabels(colorNode, 'air')
if len(airLabels) > 0:
self.get('SkinModelMakerBackgroundLabelSpinBox').setValue(min(airLabels)) # highly probable outside is lowest label
else:
self.get('SkinModelMakerBackgroundLabelSpinBox').setValue(0) # highly probable outside is 0
self.validateSkinModelMakerLabels()
def validateSkinModelMakerLabels(self):
cliNode = self.getCLINode(slicer.modules.grayscalemodelmaker)
valid = cliNode.GetStatusString() == 'Completed'
self.get('SkinModelMakerOutputNodeToolButton').enabled = valid
self.get('SkinModelMakerSaveToolButton').enabled = valid
self.get('SkinModelMakerToggleVisiblePushButton').enabled = valid
self.get('ArmaturesToggleVisiblePushButton').enabled = valid
self.get('SkinModelMakerCollapsibleGroupBox').setProperty('valid',valid)
self.validateExtractPage(validateSections = False)
def skinModelMakerParameters(self):
parameters = {}
parameters["InputVolume"] = self.get('SkinModelMakerInputNodeComboBox').currentNode()
parameters["OutputGeometry"] = self.get('SkinModelMakerOutputNodeComboBox').currentNode()
parameters["BackgroundLabel"] = self.get('SkinModelMakerBackgroundLabelSpinBox').value
parameters["SkinLabel"] = self.get('SkinModelMakerSkinLabelLineEdit').text
parameters["Decimate"] = False
parameters["Spacing"] = '5,5,5'
return parameters
def runSkinModelMaker(self, run):
if run:
parameters = self.skinModelMakerParameters()
self.get('SkinModelMakerApplyPushButton').setChecked(True)
self.observeCLINode(self.SkinModelMakerLogic.GetCLINode(), self.onSkinModelMakerCLIModified)
self.SkinModelMakerLogic.CreateSkinModel(parameters, wait_for_completion = False)
else:
self.get('SkinModelMakerApplyPushButton').enabled = False
self.SkinModelMakerLogic.Cancel()
def onSkinModelMakerCLIModified(self, cliNode, event):
if cliNode.GetStatusString() == 'Completed':
# Set opacity
newNode = self.get('SkinModelMakerOutputNodeComboBox').currentNode()
newNodeDisplayNode = newNode.GetModelDisplayNode()
newNodeDisplayNode.SetOpacity(0.2)
# Set color
colorNode = self.get('SkinModelMakerInputNodeComboBox').currentNode().GetDisplayNode().GetColorNode()
color = [0, 0, 0]
lookupTable = colorNode.GetLookupTable().GetColor(self.get('SkinLabelComboBox').currentColor, color)
newNodeDisplayNode.SetColor(color)
# Set Clip intersection ON
newNodeDisplayNode.SetSliceIntersectionVisibility(1)
# Reset camera
self.reset3DViews()
self.validateSkinModelMakerLabels()
if not cliNode.IsBusy():
self.get('SkinModelMakerApplyPushButton').setChecked(False)
self.get('SkinModelMakerApplyPushButton').enabled = True
print 'Skin ModelMaker %s' % cliNode.GetStatusString()
self.removeObservers(self.onSkinModelMakerCLIModified)
def saveSkinModelMakerModelNode(self):
self.saveFile('Skin model', 'ModelFile', '.vtk', self.get('SkinModelMakerOutputNodeComboBox'))
def openSkinModelMakerModule(self):
self.openModule('SkinModelMaker')
cliNode = self.getCLINode(slicer.modules.skinmodelmaker)
parameters = self.skinModelMakerParameters()
slicer.cli.setNodeParameters(cliNode, parameters)
def setSkinModelMakerSkinLabel(self):
self.get('SkinModelMakerSkinLabelLineEdit').text = self.get('SkinLabelComboBox').currentColor
def updateSkinNodeVisibility(self):
skinModel = self.get('SkinModelMakerOutputNodeComboBox').currentNode()
if skinModel != None:
skinModel.SetDisplayVisibility(not skinModel.GetDisplayVisibility())
#----------------------------------------------------------------------------
# 3) Rigging
#----------------------------------------------------------------------------
def initArmaturePage(self):
self.initArmature()
def validateArmaturePage(self, validateSections = True):
if validateSections:
self.validateArmature()
valid = self.get('ArmaturesCollapsibleGroupBox').property('valid')
self.get('NextPageToolButton').enabled = not self.isWorkflow(0) or valid
def openArmaturePage(self):
self.reset3DViews()
# Switch to 3D View only
manager = slicer.app.layoutManager()
manager.setLayout(slicer.vtkMRMLLayoutNode.SlicerLayoutOneUp3DView)
#----------------------------------------------------------------------------
# 3.A) Armature
def initArmature(self):
presetComboBox = self.findWidget(slicer.modules.armatures.widgetRepresentation(), 'LoadArmatureFromModelComboBox')
for i in range(presetComboBox.count):
text = presetComboBox.itemText(i)
if text:
self.get('ArmaturesPresetComboBox').addItem( text )
else:
self.get('ArmaturesPresetComboBox').insertSeparator(self.get('ArmaturesPresetComboBox').count)
self.get('ArmaturesPresetComboBox').setCurrentIndex(-1)
self.validateArmature()
def validateArmature(self):
valid = (self.get('ArmaturesArmatureNodeComboBox').currentNode() != None and
self.get('ArmaturesArmatureNodeComboBox').currentNode().GetAssociatedNode() != None)
self.get('ArmaturesCollapsibleGroupBox').setProperty('valid', valid)
self.validateArmaturePage(validateSections = False)
def setCurrentArmatureModelNode(self, armatureNode):
if armatureNode != None:
modelNode = armatureNode.GetAssociatedNode()
if modelNode != None:
self.get('VolumeSkinningAmartureNodeComboBox').setCurrentNode(modelNode)
else:
self.addObserver(armatureNode, 'ModifiedEvent', self.onArmatureNodeModified)
self.validateArmature()
def loadArmaturePreset(self, index):
if index == -1:
return
presetComboBox = self.findWidget(slicer.modules.armatures.widgetRepresentation(), 'LoadArmatureFromModelComboBox')
presetComboBox.setCurrentIndex(index)
self.get('ArmaturesPresetComboBox').setCurrentIndex(-1)
def onArmatureNodeAdded(self, armatureNode):
self.get('ArmaturesArmatureNodeComboBox').setCurrentNode(armatureNode)
name = 'armature'
if self.get('LabelmapVolumeNodeComboBox').currentNode() != None:
name = self.get('LabelmapVolumeNodeComboBox').currentNode().GetName() + '-armature'
name = slicer.mrmlScene.GenerateUniqueName(name)
armatureNode.SetName(name)
def onArmatureNodeModified(self, armatureNode, event):
'''This method can be called when a previously (or still) current armature
node is modified but that did not have a model node at the time it was set
current. It now try to recall the method that set the armature model to
the model node comboboxes.'''
self.removeObservers(self.onArmatureNodeModified)
if self.get('ArmaturesArmatureNodeComboBox').currentNode() == armatureNode:
self.setCurrentArmatureModelNode(armatureNode)
def loadArmatureNode(self):
self.loadFile('Armature', 'ArmatureFile', self.get('ArmaturesArmatureNodeComboBox'))
def saveArmatureNode(self):
armatureNode = self.get('ArmaturesArmatureNodeComboBox').currentNode()
modelNode = armatureNode.GetAssociatedNode()
self.saveNode('Armature', 'ModelFile', '.vtk', modelNode)
def openArmaturesModule(self):
# Finaly open armature module
self.openModule('Armatures')
#----------------------------------------------------------------------------
# 4) Skinning
#----------------------------------------------------------------------------
def initSkinningPage(self):
self.initVolumeSkinning()
self.initEditSkinnedVolume()
def validateSkinningPage(self, validateSections = True):
if validateSections:
self.validateVolumeSkinning()
self.validateEditSkinnedVolume()
valid = self.get('EditSkinnedVolumeCollapsibleGroupBox').property('valid')
self.get('NextPageToolButton').enabled = not self.isWorkflow(0) or valid
def openSkinningPage(self):
# Switch to FourUp
slicer.app.layoutManager().setLayout(slicer.vtkMRMLLayoutNode.SlicerLayoutFourUpView)
# Create output if necessary
if not self.volumeSkinningCreateOutputConnected:
self.get('VolumeSkinningInputVolumeNodeComboBox').connect('currentNodeChanged(vtkMRMLNode*)', self.createOutputSkinnedVolume)
self.volumeSkinningCreateOutputConnected = True
self.createOutputSkinnedVolume(self.get('VolumeSkinningInputVolumeNodeComboBox').currentNode())
#----------------------------------------------------------------------------
# a) Volume Skinning
def initVolumeSkinning(self):
self.validateVolumeSkinning()
def validateVolumeSkinning(self):
cliNode = self.getCLINode(slicer.modules.volumeskinning)
valid = cliNode.GetStatusString() == 'Completed'
self.get('VolumeSkinningOutputVolumeNodeToolButton').enabled = valid
self.get('VolumeSkinningSaveToolButton').enabled = valid
self.get('VolumeSkinningCollapsibleGroupBox').setProperty('valid', valid)
self.get('EditSkinnedVolumeGoToEditorPushButton').enabled = not self.isWorkflow(0) or valid
def volumeSkinningParameters(self):
parameters = {}
parameters["RestVolume"] = self.get('VolumeSkinningInputVolumeNodeComboBox').currentNode()
parameters["ArmaturePoly"] = self.get('VolumeSkinningAmartureNodeComboBox').currentNode()
parameters["SkinnedVolume"] = self.get('VolumeSkinningOutputVolumeNodeComboBox').currentNode()
#parameters["Padding"] = 1
#parameters["Debug"] = False
#parameters["ArmatureInRAS"] = False
return parameters
def runVolumeSkinning(self, run):
if run:
cliNode = self.getCLINode(slicer.modules.volumeskinning)
parameters = self.volumeSkinningParameters()
self.get('VolumeSkinningApplyPushButton').setChecked(True)
self.observeCLINode(cliNode, self.onVolumeSkinningCLIModified)
cliNode = slicer.cli.run(slicer.modules.volumeskinning, cliNode, parameters, wait_for_completion = False)
else:
cliNode = self.observer(self.StatusModifiedEvent, self.onVolumeSkinningCLIModified)
self.get('VolumeSkinningApplyPushButton').enabled = False
cliNode.Cancel()
def onVolumeSkinningCLIModified(self, cliNode, event):
if cliNode.GetStatusString() == 'Completed':
self.validateVolumeSkinning()
if not cliNode.IsBusy():
self.get('VolumeSkinningApplyPushButton').setChecked(False)
self.get('VolumeSkinningApplyPushButton').enabled = True
print 'VolumeSkinning %s' % cliNode.GetStatusString()
self.removeObservers(self.onVolumeSkinningCLIModified)
def loadSkinningInputVolumeNode(self):
self.loadLabelmapFile('Input volume', 'VolumeFile', self.get('VolumeSkinningInputVolumeNodeComboBox'))
def saveSkinningVolumeNode(self):
self.saveFile('Skinned volume', 'VolumeFile', '.mha', self.get('VolumeSkinningOutputVolumeNodeComboBox'))
def openVolumeSkinningModule(self):
self.openModule('VolumeSkinning')
cliNode = self.getCLINode(slicer.modules.volumeskinning)
parameters = self.volumeSkinningParameters()
slicer.cli.setNodeParameters(cliNode, parameters)
def createOutputSkinnedVolume(self, node):
if node == None:
return
nodeName = '%s-skinned' % node.GetName()
skinnedNode = self.getFirstNodeByNameAndClass(nodeName, 'vtkMRMLScalarVolumeNode')
if skinnedNode == None:
newNode = self.get('VolumeSkinningOutputVolumeNodeComboBox').addNode()
newNode.SetName(nodeName)
else:
self.get('VolumeSkinningOutputVolumeNodeComboBox').setCurrentNode(skinnedNode)
#----------------------------------------------------------------------------
# b) Edit skinned volume
def initEditSkinnedVolume(self):
self.validateEditSkinnedVolume()
def validateEditSkinnedVolume(self):
skinnedVolume = self.get('EditSkinnedVolumeNodeComboBox').currentNode()
canEdit = False
canSave = False
if skinnedVolume != None:
canEdit = skinnedVolume.GetDisplayNode() != None
canSave = canEdit and skinnedVolume.GetModifiedSinceRead()
self.get('EditSkinnedVolumeGoToEditorPushButton').enabled = canEdit
self.get('EditSkinnedVolumeNodeSaveToolButton').enabled = canSave
self.get('EditSkinnedVolumeSaveToolButton').enabled = canSave
valid = canEdit
self.get('EditSkinnedVolumeCollapsibleGroupBox').setProperty('valid', valid)
if valid:
self.get('VolumeRenderInputNodeComboBox').setCurrentNode(
self.get('EditSkinnedVolumeNodeComboBox').currentNode())
self.get('VolumeRenderLabelsLineEdit').text = ''
self.validateSkinningPage(validateSections = False)
def editSkinnedVolumeParameterChanged(self, skinnedVolume = None, event = None):
self.removeObservers(self.editSkinnedVolumeParameterChanged)
if skinnedVolume != None:
self.addObserver(skinnedVolume, 'ModifiedEvent', self.editSkinnedVolumeParameterChanged)
self.validateEditSkinnedVolume()
def loadEditSkinnedVolumeNode(self):
self.loadLabelmapFile('Skinning volume', 'VolumeFile', self.get('EditSkinnedVolumeNodeComboBox'))
def saveEditSkinnedVolumeNode(self):
self.saveFile('Skinned volume', 'VolumeFile', '.mha', self.get('EditSkinnedVolumeNodeComboBox'))
def openEditorModule(self):
self.removeObservers(self.editSkinnedVolumeParameterChanged)
self.openModule('Editor')
editorWidget = slicer.modules.editor.widgetRepresentation()
masterVolumeNodeComboBox = editorWidget.findChild('qMRMLNodeComboBox')
masterVolumeNodeComboBox.addAttribute('vtkMRMLScalarVolumeNode', 'LabelMap', 1)
masterVolumeNodeComboBox.setCurrentNode(self.get('EditSkinnedVolumeNodeComboBox').currentNode())
setButton = editorWidget.findChild('QPushButton')
setButton.click()
#----------------------------------------------------------------------------
# 5) Weights
#----------------------------------------------------------------------------
def initWeightsPage(self):
self.initComputeArmatureWeight()
self.initEvalSurfaceWeight()
def setDefaultPath(self, *args):
defaultName = 'weights-%sx' % self.get('ComputeArmatureWeightScaleFactorSpinBox').value
currentNode = self.get('ComputeArmatureWeightInputVolumeNodeComboBox').currentNode()
if currentNode != None:
defaultName = '%s-%s' % (currentNode.GetName(), defaultName)
defaultPath = qt.QDir.home().absoluteFilePath(defaultName)
self.get('ComputeArmatureWeightOutputPathLineEdit').setCurrentPath(defaultPath)
# observe the input volume node in case its name is changed
self.removeObservers(self.setDefaultPath)
self.addObserver(currentNode, 'ModifiedEvent', self.setDefaultPath)
def validateWeightsPage(self, validateSections = True):
if validateSections:
self.validateComputeArmatureWeight()
self.validateEvalSurfaceWeight()
valid = self.get('EvalSurfaceWeightCollapsibleGroupBox').property('valid')
self.get('NextPageToolButton').enabled = not self.isWorkflow(0) or valid
def openWeightsPage(self):
pass
#----------------------------------------------------------------------------
# a) Compute Armature Weight
def initComputeArmatureWeight(self):
self.validateComputeArmatureWeight()
def validateComputeArmatureWeight(self):
cliNode = self.getCLINode(slicer.modules.computearmatureweight)
valid = cliNode.GetStatusString() == 'Completed'
self.get('ComputeArmatureWeightCollapsibleGroupBox').setProperty('valid', valid)
self.get('EvalSurfaceWeightApplyPushButton').enabled = not self.isWorkflow(0) or valid
def computeArmatureWeightParameters(self):
parameters = {}
parameters["RestLabelmap"] = self.get('ComputeArmatureWeightInputVolumeNodeComboBox').currentNode()
parameters["ArmaturePoly"] = self.get('ComputeArmatureWeightAmartureNodeComboBox').currentNode()
parameters["SkinnedVolume"] = self.get('ComputeArmatureWeightSkinnedVolumeVolumeNodeComboBox').currentNode()
parameters["WeightDirectory"] = str(self.get('ComputeArmatureWeightOutputPathLineEdit').currentPath)
parameters["BackgroundValue"] = self.get('ComputeArmatureWeightBackgroundSpinBox').value
parameters["BoneLabel"] = self.get('ComputeArmatureWeightBoneSpinBox').value
parameters["Padding"] = self.get('ComputeArmatureWeightPaddingSpinBox').value
parameters["ScaleFactor"] = self.get('ComputeArmatureWeightScaleFactorSpinBox').value
parameters["MaximumParenthoodDistance"] = '4'
#parameters["FirstEdge"] = '0'
#parameters["LastEdge"] = '-1'
#parameters["BinaryWeight"] = False
#parameters["SmoothingIteration"] = '10'
#parameters["Debug"] = False
parameters["RunSequential"] = True
return parameters
def runComputeArmatureWeight(self, run):
if run:
cliNode = self.getCLINode(slicer.modules.computearmatureweight)
parameters = self.computeArmatureWeightParameters()
if not qt.QDir(parameters["WeightDirectory"]).exists():
answer = qt.QMessageBox.question(0, 'Create directory ?',
'The path %s does not exist, do you want to create it ?' % parameters["WeightDirectory"],
qt.QMessageBox.Yes | qt.QMessageBox.No | qt.QMessageBox.Cancel)
if answer == qt.QMessageBox.Yes:
qt.QDir(parameters["WeightDirectory"]).mkpath(parameters["WeightDirectory"])
else:
self.get('ComputeArmatureWeightApplyPushButton').setChecked(False)
return
self.get('ComputeArmatureWeightApplyPushButton').setChecked(True)
self.observeCLINode(cliNode, self.onComputeArmatureWeightCLIModified)
cliNode = slicer.cli.run(slicer.modules.computearmatureweight, cliNode, parameters, wait_for_completion = False)
else:
cliNode = self.observer(self.StatusModifiedEvent, self.onComputeArmatureWeightCLIModified)
self.get('ComputeArmatureWeightApplyPushButton').enabled = False
cliNode.Cancel()
def onComputeArmatureWeightCLIModified(self, cliNode, event):
if cliNode.GetStatusString() == 'Completed':
# add path if not already added (bug fixed in CTK #b277f5d4)
if self.get('ComputeArmatureWeightOutputPathLineEdit').findChild('QComboBox').findText(
self.get('ComputeArmatureWeightOutputPathLineEdit').currentPath) == -1:
self.get('ComputeArmatureWeightOutputPathLineEdit').addCurrentPathToHistory()
self.validateComputeArmatureWeight()
if not cliNode.IsBusy():
self.get('ComputeArmatureWeightApplyPushButton').setChecked(False)
self.get('ComputeArmatureWeightApplyPushButton').enabled = True
print 'ComputeArmatureWeight %s' % cliNode.GetStatusString()
self.removeObservers(self.onComputeArmatureWeightCLIModified)
def openComputeArmatureWeightModule(self):
self.openModule('ComputeArmatureWeight')
cliNode = self.getCLINode(slicer.modules.computearmatureweight)
parameters = self.computeArmatureWeightParameters()
slicer.cli.setNodeParameters(cliNode, parameters)
#----------------------------------------------------------------------------
# c) Eval Weight
def initEvalSurfaceWeight(self):
self.validateEvalSurfaceWeight()
def validateEvalSurfaceWeight(self):
cliNode = self.getCLINode(slicer.modules.evalsurfaceweight)
valid = cliNode.GetStatusString() == 'Completed'
self.get('EvalSurfaceWeightCollapsibleGroupBox').setProperty('valid', valid)
self.get('EvalSurfaceWeightOutputNodeToolButton').enabled = valid
self.validateWeightsPage(validateSections = False)
def evalSurfaceWeightParameterChanged(self):
self.get('EvalSurfaceWeightOutputNodeToolButton').enabled = False
def evalSurfaceWeightParameters(self):
parameters = {}
parameters["InputSurface"] = self.get('EvalSurfaceWeightInputNodeComboBox').currentNode()
parameters["OutputSurface"] = self.get('EvalSurfaceWeightOutputNodeComboBox').currentNode()
parameters["WeightDirectory"] = str(self.get('EvalSurfaceWeightWeightPathLineEdit').currentPath)
#parameters["IsSurfaceInRAS"] = False
#parameters["PrintDebug"] = False
return parameters
def runEvalSurfaceWeight(self, run):
if run:
cliNode = self.getCLINode(slicer.modules.evalsurfaceweight)
parameters = self.evalSurfaceWeightParameters()
self.get('EvalSurfaceWeightApplyPushButton').setChecked(True)
self.observeCLINode(cliNode, self.onEvalSurfaceWeightCLIModified)
cliNode = slicer.cli.run(slicer.modules.evalsurfaceweight, cliNode, parameters, wait_for_completion = False)
else:
cliNode = self.observer(self.StatusModifiedEvent, self.onEvalSurfaceWeightCLIModified)
self.get('EvalSurfaceWeightApplyPushButton').enabled = False
cliNode.Cancel()
def onEvalSurfaceWeightCLIModified(self, cliNode, event):
if cliNode.GetStatusString() == 'Completed':
self.validateEvalSurfaceWeight()
if self.get('PoseSurfaceApplyPushButton').checkState != qt.Qt.Unchecked:
# Pose the surface as soon as the weights are computed.
self.runPoseSurface(True)
if not cliNode.IsBusy():
self.get('EvalSurfaceWeightApplyPushButton').setChecked(False)
self.get('EvalSurfaceWeightApplyPushButton').enabled = True
print 'EvalSurfaceWeight %s' % cliNode.GetStatusString()
self.removeObservers(self.onEvalSurfaceWeightCLIModified)
def loadEvalSurfaceWeightInputNode(self):
self.loadFile('Model to eval', 'ModelFile', self.get('EvalSurfaceWeightInputNodeComboBox'))
def saveEvalSurfaceWeightOutputNode(self):
self.saveFile('Evaluated Model', 'ModelFile', '.vtk', self.get('EvalSurfaceWeightOutputNodeComboBox'))
def openEvalSurfaceWeight(self):
self.openModule('EvalSurfaceWeight')
cliNode = self.getCLINode(slicer.modules.evalweight)
parameters = self.evalWeightParameters()
slicer.cli.setNodeParameters(cliNode, parameters)
#----------------------------------------------------------------------------
# 6) Pose Armature & Pose surface
#----------------------------------------------------------------------------
def initPoseArmaturePage(self):
self.initPoseArmature()
self.initPoseSurface()
def validatePoseArmaturePage(self, validateSections = True):
if validateSections:
self.validatePoseArmature()
self.validatePoseSurface()
valid = self.get('PoseSurfaceCollapsibleGroupBox').property('valid')
self.get('NextPageToolButton').enabled = not self.isWorkflow(0) or valid
def openPoseArmaturePage(self):
# Create output if necessary
if not self.poseSurfaceCreateOutputConnected:
self.get('PoseSurfaceInputNodeComboBox').connect('currentNodeChanged(vtkMRMLNode*)', self.createOutputPoseSurface)
self.poseSurfaceCreateOutputConnected = True
self.createOutputPoseSurface(self.get('PoseSurfaceInputNodeComboBox').currentNode())
self.autoRunPoseSurface(self.get('PoseSurfaceApplyPushButton').checkState != qt.Qt.Unchecked)
armatureLogic = slicer.modules.armatures.logic()
if armatureLogic != None:
armatureLogic.SetActiveArmatureWidgetState(3) # 3 is Pose
self.poseSurfaceInputNodeChanged()
slicer.app.layoutManager().setLayout(slicer.vtkMRMLLayoutNode.SlicerLayoutOneUp3DView)
#----------------------------------------------------------------------------
# a) Pose Armature
def initPoseArmature(self):
self.validatePoseArmature()
def validatePoseArmature(self):
valid = self.get('PoseArmatureArmatureNodeComboBox').currentNode() != None
self.get('PoseArmaturesCollapsibleGroupBox').setProperty('valid', valid)
self.get('PoseSurfaceApplyPushButton').enabled = not self.isWorkflow(0) or valid
def setPoseArmatureModelNode(self, armatureNode):
if armatureNode == None:
return
modelNode = armatureNode.GetAssociatedNode()
self.get('PoseSurfaceArmatureInputNodeComboBox').setCurrentNode(modelNode)
armatureLogic = slicer.modules.armatures.logic()
if armatureLogic != None and self.WorkflowWidget.currentIndex == 4:
armatureLogic.SetActiveArmature(armatureNode)
armatureLogic.SetActiveArmatureWidgetState(3) # 3 is Pose
self.validatePoseArmature()
def savePoseArmatureArmatureNode(self):
armatureNode = self.get('PoseArmatureArmatureNodeComboBox').currentNode()
modelNode = armatureNode.GetAssociatedNode()
self.saveNode('Armature', 'ModelFile', '.vtk', modelNode)
def openPosedArmatureModule(self):
self.openModule('Armatures')
#----------------------------------------------------------------------------
# b) Pose Surface
def initPoseSurface(self):
self.validatePoseSurface()
def validatePoseSurface(self):
cliNode = self.getCLINode(slicer.modules.posesurface)
valid = cliNode.GetStatusString() == 'Completed'
self.get('PoseSurfaceOutputNodeToolButton').enabled = True
self.get('PoseSurfaceCollapsibleGroupBox').setProperty('valid', valid)
self.validatePoseArmaturePage(validateSections = False)
def poseSurfaceParameterChanged(self):
self.get('PoseSurfaceOutputNodeToolButton').enabled = False
cliNode = self.getCLINode(slicer.modules.posesurface)
parameters = self.poseSurfaceParameters()
slicer.cli.setNodeParameters(cliNode, parameters)
def poseSurfaceInputNodeChanged(self):
"""Makes sure the weights are computed for the new input surface."""
surfaceNode = self.get('PoseSurfaceInputNodeComboBox').currentNode()
armatureModelNode = self.get('PoseSurfaceArmatureInputNodeComboBox').currentNode()
if surfaceNode != None and surfaceNode.GetPolyData() != None and armatureModelNode != None:
pointData = surfaceNode.GetPolyData().GetPointData()
transforms = armatureModelNode.GetPolyData().GetCellData().GetArray('Transforms')
if transforms:
if pointData.GetNumberOfArrays() < transforms.GetNumberOfTuples():
self.runEvalSurfaceWeight(True)
self.poseSurfaceParameterChanged()
def poseSurfaceParameters(self):
# Setup CLI node on input changed or apply changed
parameters = {}
parameters["ArmaturePoly"] = self.get('PoseSurfaceArmatureInputNodeComboBox').currentNode()
parameters["SurfaceInput"] = self.get('PoseSurfaceInputNodeComboBox').currentNode()
parameters["WeightDirectory"] = str(self.get('PoseSurfaceWeightInputPathLineEdit').currentPath)
parameters["OutputSurface"] = self.get('PoseSurfaceOutputNodeComboBox').currentNode()
parameters["MaximumParenthoodDistance"] = '4'
#parameters["IsSurfaceInRAS"] = False
#parameters["IsArmatureInRAS"] = False
parameters["LinearBlend"] = True # much faster
return parameters
def autoRunPoseSurface(self, autoRun):
cliNode = self.getCLINode(slicer.modules.posesurface)
if autoRun:
parameters = self.poseSurfaceParameters()
slicer.cli.setNodeParameters(cliNode, parameters)
cliNode.SetAutoRunMode(cliNode.AutoRunOnAnyInputEvent)
cliNode.SetAutoRun(autoRun)
self.observeCLINode(cliNode, self.onPoseSurfaceCLIModified)
else:
cliNode.SetAutoRun(autoRun)
def runPoseSurface(self, run):
if run:
cliNode = self.getCLINode(slicer.modules.posesurface)
parameters = self.poseSurfaceParameters()
slicer.cli.setNodeParameters(cliNode, parameters)
self.get('PoseSurfaceApplyPushButton').setChecked(True)
self.observeCLINode(cliNode, self.onPoseSurfaceCLIModified)
cliNode = slicer.cli.run(slicer.modules.poselabelmap, cliNode, parameters, wait_for_completion = False)
else:
cliNode = self.observer(self.StatusModifiedEvent, self.onPoseSurfaceCLIModified)
self.get('PoseSurfaceApplyPushButton').enabled = False
if cliNode != None:
cliNode.Cancel()
def onPoseSurfaceCLIModified(self, cliNode, event):
if cliNode.GetStatusString() == 'Completed':
if self.get('PoseSurfaceInputNodeComboBox').currentNode() != self.get('PoseSurfaceOutputNodeComboBox').currentNode():
self.get('PoseSurfaceInputNodeComboBox').currentNode().GetDisplayNode().SetVisibility(0)
self.get('PoseSurfaceOutputNodeComboBox').currentNode().GetDisplayNode().SetOpacity(
self.get('PoseSurfaceInputNodeComboBox').currentNode().GetDisplayNode().GetOpacity())
self.get('PoseSurfaceOutputNodeComboBox').currentNode().GetDisplayNode().SetColor(
self.get('PoseSurfaceInputNodeComboBox').currentNode().GetDisplayNode().GetColor())
self.validatePoseSurface()
if not cliNode.IsBusy():
self.get('PoseSurfaceApplyPushButton').setChecked(False)
self.get('PoseSurfaceApplyPushButton').enabled = True
print 'PoseSurface %s' % cliNode.GetStatusString()
self.removeObservers(self.onPoseSurfaceCLIModified)
def loadPoseSurfaceInputNode(self):
self.loadFile('Model to pose', 'ModelFile', self.get('PoseSurfaceInputNodeComboBox'))
def savePoseSurfaceOutputNode(self):
self.saveFile('Posed model', 'ModelFile', '.vtk', self.get('PoseSurfaceOutputNodeComboBox'))
def openPoseSurfaceModule(self):
self.openModule('PoseSurface')
cliNode = self.getCLINode(slicer.modules.posesurface)
parameters = self.poseSurfaceParameterss()
slicer.cli.setNodeParameters(cliNode, parameters)
def setWeightDirectory(self, dir):
self.get('EvalSurfaceWeightWeightPathLineEdit').currentPath = dir
self.get('PoseSurfaceWeightInputPathLineEdit').currentPath = dir
self.get('PoseLabelmapWeightPathLineEdit').currentPath = dir
def createOutputPoseSurface(self, node):
if node == None:
return
nodeName = '%s-posed' % node.GetName()
posedNode = self.getFirstNodeByNameAndClass(nodeName, 'vtkMRMLModelNode')
if posedNode == None:
newNode = self.get('PoseSurfaceOutputNodeComboBox').addNode()
newNode.SetName(nodeName)
else:
self.get('PoseSurfaceOutputNodeComboBox').setCurrentNode(posedNode)
#----------------------------------------------------------------------------
# 7) Resample
#----------------------------------------------------------------------------
def initPoseLabelmapPage(self):
self.initPoseLabelmap()
def validatePoseLabelmapPage(self, validateSections = True):
if validateSections:
self.validatePoseLabelmap()
valid = self.get('ResampleCollapsibleGroupBox').property('valid')
self.get('NextPageToolButton').enabled = not self.isWorkflow(0) or valid
def openPoseLabelmapPage(self):
slicer.app.layoutManager().setLayout(slicer.vtkMRMLLayoutNode.SlicerLayoutFourUpView)
# Create output if necessary
if not self.poseLabelmapCreateOutputConnected:
self.get('PoseLabelmapInputNodeComboBox').connect('currentNodeChanged(vtkMRMLNode*)', self.createOutputPoseLabelmap)
self.poseLabelmapCreateOutputConnected = True
self.createOutputPoseLabelmap(self.get('PoseLabelmapInputNodeComboBox').currentNode())
#----------------------------------------------------------------------------
# a) Pose Labelmap
def initPoseLabelmap(self):
self.validatePoseLabelmap()
def validatePoseLabelmap(self):
cliNode = self.getCLINode(slicer.modules.poselabelmap)
valid = cliNode.GetStatusString() == 'Completed'
self.get('PoseLabelmapOutputNodeToolButton').enabled = valid
self.get('PoseLabelmapSaveToolButton').enabled = valid
self.get('ResampleCollapsibleGroupBox').setProperty('valid', valid)
if valid:
self.get('VolumeRenderInputNodeComboBox').setCurrentNode(
self.get('PoseLabelmapOutputNodeComboBox').currentNode())
self.get('VolumeRenderLabelsLineEdit').text = ''
self.validatePoseLabelmapPage(validateSections = False)
def poseLabelmapParameterChanged(self):
self.get('PoseLabelmapOutputNodeToolButton').enabled = False
self.get('PoseLabelmapSaveToolButton').enabled = False
#
def setupPoseLabelmap(self):
""" Update the labels of the poselabelmap module
"""
labels = []
labels.append(self.get('BoneLabelComboBox').currentColor)
self.get('BoneModelMakerLabelsLineEdit').setText(', '.join(str(val) for val in labels))
self.get('PoseLabelmapHighPrecedenceLabelsLineEdit').text = self.get('BoneLabelsLineEdit').text
self.get('PoseLabelmapLowPrecedenceLabelsLineEdit').text = self.get('SkinLabelsLineEdit').text
def poseLabelmapParameters(self):
parameters = {}
parameters["RestLabelmap"] = self.get('PoseLabelmapInputNodeComboBox').currentNode()
parameters["ArmaturePoly"] = self.get('PoseLabelmapArmatureNodeComboBox').currentNode()
parameters["WeightDirectory"] = str(self.get('PoseLabelmapWeightPathLineEdit').currentPath)
parameters["PosedLabelmap"] = self.get('PoseLabelmapOutputNodeComboBox').currentNode()
parameters["LinearBlend"] = False
parameters["Padding"] = self.get('PoseLabelmapPaddingSpinBox').value
parameters["MaximumParenthoodDistance"] = '4'
#parameters["MaximumRadius"] = '64'
#parameters["Debug"] = False
#parameters["IsArmatureInRAS"] = False
parameters["HighPrecedenceLabels"] = self.get('PoseLabelmapHighPrecedenceLabelsLineEdit').text
parameters["LowPrecedenceLabels"] = self.get('PoseLabelmapLowPrecedenceLabelsLineEdit').text
return parameters
def runPoseLabelmap(self, run):
if run:
cliNode = self.getCLINode(slicer.modules.poselabelmap)
parameters = self.poseLabelmapParameters()
self.get('PoseLabelmapApplyPushButton').setChecked(True)
self.observeCLINode(cliNode, self.onPoseLabelmapCLIModified)
cliNode = slicer.cli.run(slicer.modules.poselabelmap, cliNode, parameters, wait_for_completion = False)
else:
cliNode = self.observer(self.StatusModifiedEvent, self.onPoseLabelmapCLIModified())
self.get('PoseLabelmapApplyPushButton').enabled = False
cliNode.Cancel()
def onPoseLabelmapCLIModified(self, cliNode, event):
if cliNode.GetStatusString() == 'Completed':
# apply color table to generated volume
newNode = self.get('PoseLabelmapOutputNodeComboBox').currentNode()
displayNode = newNode.GetDisplayNode()
if displayNode == None:
volumesLogic = slicer.modules.volumes.logic()
volumesLogic.SetVolumeAsLabelMap(newNode, 1)
displayNode = newNode.GetDisplayNode()
inputColorNode = self.get('PoseLabelmapInputNodeComboBox').currentNode().GetDisplayNode().GetColorNode()
if displayNode != None and inputColorNode != None:
displayNode.SetAndObserveColorNodeID(inputColorNode.GetID())
# hide the models that would hide the volume rendering
displayNodes = slicer.mrmlScene.GetNodesByClass('vtkMRMLModelDisplayNode')
displayNodes.UnRegister(displayNodes)
for i in range(0, displayNodes.GetNumberOfItems()):
displayNode = displayNodes.GetItemAsObject(i)
if (not displayNode.IsA('vtkMRMLAnnotationDisplayNode')):
displayNode.SetVisibility(0)
self.validatePoseLabelmap()
#enable volume rendering
self.get('ExpandAdvancedPropertiesButton').setChecked(True)
self.get('AdvancedTabWidget').setCurrentWidget(self.get('VolumeRenderingTab'))
self.get('VolumeRenderCollapsibleGroupBox').checked = True
self.get('VolumeRenderCheckBox').setChecked(True)
if not cliNode.IsBusy():
self.get('PoseLabelmapApplyPushButton').setChecked(False)
self.get('PoseLabelmapApplyPushButton').enabled = True
print 'PoseLabelmap %s' % cliNode.GetStatusString()
self.removeObservers(self.onPoseLabelmapCLIModified)
def savePoseLabelmapOutputNode(self):
self.saveFile('Posed labelmap', 'VolumeFile', '.mha', self.get('PoseLabelmapOutputNodeComboBox'))
def openPoseLabelmap(self):
self.openModule('PoseLabelmap')
cliNode = self.getCLINode(slicer.modules.poselabelmap)
parameters = self.poseLabelmapParameters()
slicer.cli.setNodeParameters(cliNode, parameters)
def createOutputPoseLabelmap(self, node):
if node == None:
return
nodeName = '%s-posed' % node.GetName()
posedNode = self.getFirstNodeByNameAndClass(nodeName, 'vtkMRMLScalarVolumeNode')
if posedNode == None:
newNode = self.get('PoseLabelmapOutputNodeComboBox').addNode()
newNode.SetName(nodeName)
else:
self.get('PoseLabelmapOutputNodeComboBox').setCurrentNode(posedNode)
# =================== END ==============
def get(self, objectName):
return self.findWidget(self.widget, objectName)
def getChildren(self, object):
'''Return the list of the children and grand children of a Qt object'''
children = object.children()
allChildren = list(children)
for child in children:
allChildren.extend( self.getChildren(child) )
return allChildren
def findWidget(self, widget, objectName):
if widget.objectName == objectName:
return widget
else:
children = []
for w in widget.children():
resulting_widget = self.findWidget(w, objectName)
if resulting_widget:
return resulting_widget
return None
def removeObservers(self, method):
for o, e, m, g, t in self.Observations:
if method == m:
o.RemoveObserver(t)
self.Observations.remove([o, e, m, g, t])
def addObserver(self, object, event, method, group = 'none'):
if object == None:
return
if self.hasObserver(object, event, method):
print 'already has observer'
return
tag = object.AddObserver(event, method)
self.Observations.append([object, event, method, group, tag])
def hasObserver(self, object, event, method):
for o, e, m, g, t in self.Observations:
if o == object and e == event and m == method:
return True
return False
def observer(self, event, method):
for o, e, m, g, t in self.Observations:
if e == event and m == method:
return o
return None
def getCLINode(self, cliModule):
""" Return the cli node to use for a given CLI module. Create the node in
scene if needed. Return None in the case of scripted module.
"""
cliNode = slicer.mrmlScene.GetFirstNodeByName(cliModule.title)
# Also check path to make sure the CLI isn't a scripted module
if (cliNode == None) and ('qt-scripted-modules' not in cliModule.path):
cliNode = slicer.cli.createNode(cliModule)
cliNode.SetName(cliModule.title)
return cliNode
def loadLabelmapFile(self, title, fileType, nodeComboBox):
volumeNode = self.loadFile(title, fileType, nodeComboBox)
if volumeNode != None:
volumesLogic = slicer.modules.volumes.logic()
volumesLogic.SetVolumeAsLabelMap(volumeNode, 1)
nodeComboBox.setCurrentNode(volumeNode)
def loadFile(self, title, fileType, nodeComboBox):
manager = slicer.app.ioManager()
loadedNodes = vtk.vtkCollection()
properties = {}
res = manager.openDialog(fileType, slicer.qSlicerFileDialog.Read, properties, loadedNodes)
loadedNode = loadedNodes.GetItemAsObject(0)
if res == True:
nodeComboBox.setCurrentNode(loadedNode)
self.reset3DViews()
return loadedNode
def saveFile(self, title, fileType, fileSuffix, nodeComboBox):
self.saveNode(title, fileType, fileSuffix, nodeComboBox.currentNode())
def saveNode(self, title, fileType, fileSuffix, node):
manager = slicer.app.ioManager()
properties = {}
properties['nodeID'] = node.GetID()
properties['defaultFileName'] = node.GetName() + fileSuffix
manager.openDialog(fileType, slicer.qSlicerFileDialog.Write, properties)
def reset3DViews(self):
# Reset focal view around volumes
manager = slicer.app.layoutManager()
for i in range(0, manager.threeDViewCount):
manager.threeDWidget(i).threeDView().resetFocalPoint()
rendererCollection = manager.threeDWidget(i).threeDView().renderWindow().GetRenderers()
for i in range(0, rendererCollection.GetNumberOfItems()):
rendererCollection.GetItemAsObject(i).ResetCamera()
def resetSliceViews(self):
# Reset focal view around volumes
manager = slicer.app.layoutManager()
for i in manager.sliceViewNames():
manager.sliceWidget(i).sliceController().fitSliceToBackground()
def openModule(self, moduleName):
slicer.util.selectModule(moduleName)
def getFirstNodeByNameAndClass(self, name, className):
nodes = slicer.mrmlScene.GetNodesByClass(className)
nodes.UnRegister(nodes)
for i in range(0, nodes.GetNumberOfItems()):
node = nodes.GetItemAsObject(i)
if node.GetName() == name:
return node
return None
def reloadModule(self,moduleName="SimpleWorkflow"):
"""Generic reload method for any scripted module.
ModuleWizard will subsitute correct default moduleName.
"""
import imp, sys, os, slicer, qt
widgetName = moduleName + "Widget"
# reload the source code
# - set source file path
# - load the module to the global space
filePath = eval('slicer.modules.%s.path' % moduleName.lower())
p = os.path.dirname(filePath)
if not sys.path.__contains__(p):
sys.path.insert(0,p)
fp = open(filePath, "r")
globals()[moduleName] = imp.load_module(
moduleName, fp, filePath, ('.py', 'r', imp.PY_SOURCE))
fp.close()
# rebuild the widget
# - find and hide the existing widget
# - create a new widget in the existing parent
parent = self.widget.parent()
for child in parent.children():
try:
child.hide()
except AttributeError:
pass
self.layout.removeWidget(self.widget)
self.widget.deleteLater()
self.widget = None
# Remove spacer items
item = parent.layout().itemAt(0)
while item:
parent.layout().removeItem(item)
item = parent.layout().itemAt(0)
# create new widget inside existing parent
globals()[widgetName.lower()] = eval(
'globals()["%s"].%s(parent)' % (moduleName, widgetName))
globals()[widgetName.lower()].setup()
# =================== END ==============
class SimpleWorkflowLogic:
"""Implement the logic to calculate label statistics.
Nodes are passed in as arguments.
Results are stored as 'statistics' instance variable.
"""
def __init__(self):
return
class Slicelet(object):
"""A slicer slicelet is a module widget that comes up in stand alone mode
implemented as a python class.
This class provides common wrapper functionality used by all slicer modlets.
"""
# TODO: put this in a SliceletLib
# TODO: parse command line arge
def __init__(self, widgetClass=None):
self.parent = qt.QFrame()
self.parent.setLayout( qt.QVBoxLayout() )
# TODO: should have way to pop up python interactor
self.buttons = qt.QFrame()
self.buttons.setLayout( qt.QHBoxLayout() )
self.parent.layout().addWidget(self.buttons)
self.addDataButton = qt.QPushButton("Add Data")
self.buttons.layout().addWidget(self.addDataButton)
self.addDataButton.connect("clicked()",slicer.app.ioManager().openAddDataDialog)
self.loadSceneButton = qt.QPushButton("Load Scene")
self.buttons.layout().addWidget(self.loadSceneButton)
self.loadSceneButton.connect("clicked()",slicer.app.ioManager().openLoadSceneDialog)
if widgetClass:
self.widget = widgetClass(self.parent)
self.widget.setup()
self.parent.show()
class SimpleWorkflowSlicelet(Slicelet):
""" Creates the interface when module is run as a stand alone gui app.
"""
def __init__(self):
super(SimpleWorkflowSlicelet,self).__init__(SimpleWorkflowWidget)
if __name__ == "__main__":
# TODO: need a way to access and parse command line arguments
# TODO: ideally command line args should handle --xml
import sys
print( sys.argv )
slicelet = SimpleWorkflowSlicelet()
|
ricortiz/BenderCraniosynostosis
|
Modules/Scripted/SimpleWorkflow/SimpleWorkflow.py
|
Python
|
apache-2.0
| 91,206
|
[
"VTK"
] |
e2bfdc45c4a2468b60ac0e3a31fc1263f77c071c0dcf143f270d2c137b4a087c
|
# coding: utf-8
# In[1]:
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import quandl as Quandl
import wbdata as wb
from scipy import stats
import runProcs
# get_ipython().magic('matplotlib inline')
# # Preliminaries
#
# Import country codes and country lists by income
# In[ ]:
# 1. Import country codes and organize
# 1.1 Import country codes and names from the country_codes file from Quandl's WB WDI documentation: https://www.quandl.com/data/WWDI/documentation/documentation
countryCodes = {}
try:
text_file = open('../txt/country_codes', 'r')
lines = text_file.readlines()
for line in lines:
split = line.split('|')
if len(split)>1:
if len(split[1])==4:
countryCodes[split[0]] = split[1][:-1]
except:
countryCodes = {
'Afghanistan': 'AFG',
'Africa': 'AFR',
'Albania': 'ALB',
'Algeria': 'DZA',
'American Samoa': 'ASM',
'Andorra': 'AND',
'Angola': 'AGO',
'Antigua and Barbuda': 'ATG',
'Arab World': 'ARB',
'Argentina': 'ARG',
'Armenia': 'ARM',
'Aruba': 'ABW',
'Australia': 'AUS',
'Austria': 'AUT',
'Azerbaijan': 'AZE',
'Bahamas, The': 'BHS',
'Bahrain': 'BHR',
'Bangladesh': 'BGD',
'Barbados': 'BRB',
'Belarus': 'BLR',
'Belgium': 'BEL',
'Belize': 'BLZ',
'Benin': 'BEN',
'Bermuda': 'BMU',
'Bhutan': 'BTN',
'Bolivia': 'BOL',
'Bosnia and Herzegovina': 'BIH',
'Botswana': 'BWA',
'Brazil': 'BRA',
'Brunei Darussalam': 'BRN',
'Bulgaria': 'BGR',
'Burkina Faso': 'BFA',
'Burundi': 'BDI',
'Cabo Verde': 'CPV',
'Cambodia': 'KHM',
'Cameroon': 'CMR',
'Canada': 'CAN',
'Caribbean small states': 'CSS',
'Cayman Islands': 'CYM',
'Central African Republic': 'CAF',
'Chad': 'TCD',
'Channel Islands': 'CHI',
'Chile': 'CHL',
'China': 'CHN',
'Colombia': 'COL',
'Comoros': 'COM',
'Congo, Dem. Rep.': 'COD',
'Congo, Rep.': 'COG',
'Costa Rica': 'CRI',
"Cote d'Ivoire": 'CIV',
'Croatia': 'HRV',
'Cuba': 'CUB',
'Curacao': 'CUW',
'Cyprus': 'CYP',
'Czech Republic': 'CZE',
'Denmark': 'DNK',
'Djibouti': 'DJI',
'Dominica': 'DMA',
'Dominican Republic': 'DOM',
'East Asia & Pacific (all income levels)': 'EAS',
'East Asia & Pacific (developing only)': 'EAP',
'East Asia and the Pacific (IFC classification)': 'CEA',
'Ecuador': 'ECU',
'Egypt, Arab Rep.': 'EGY',
'El Salvador': 'SLV',
'Equatorial Guinea': 'GNQ',
'Eritrea': 'ERI',
'Estonia': 'EST',
'Ethiopia': 'ETH',
'Euro area': 'EMU',
'Europe & Central Asia (all income levels)': 'ECS',
'Europe & Central Asia (developing only)': 'ECA',
'Europe and Central Asia (IFC classification)': 'CEU',
'European Union': 'EUU',
'Faeroe Islands': 'FRO',
'Fiji': 'FJI',
'Finland': 'FIN',
'France': 'FRA',
'French Polynesia': 'PYF',
'Gabon': 'GAB',
'Gambia, The': 'GMB',
'Georgia': 'GEO',
'Germany': 'DEU',
'Ghana': 'GHA',
'Greece': 'GRC',
'Greenland': 'GRL',
'Grenada': 'GRD',
'Guam': 'GUM',
'Guatemala': 'GTM',
'Guinea': 'GIN',
'Guinea-Bissau': 'GNB',
'Guyana': 'GUY',
'Haiti': 'HTI',
'Heavily indebted poor countries (HIPC)': 'HPC',
'High income': 'HIC',
'High income: OECD': 'OEC',
'High income: nonOECD': 'NOC',
'Honduras': 'HND',
'Hong Kong SAR, China': 'HKG',
'Hungary': 'HUN',
'Iceland': 'ISL',
'India': 'IND',
'Indonesia': 'IDN',
'Iran, Islamic Rep.': 'IRN',
'Iraq': 'IRQ',
'Ireland': 'IRL',
'Isle of Man': 'IMN',
'Israel': 'ISR',
'Italy': 'ITA',
'Jamaica': 'JAM',
'Japan': 'JPN',
'Jordan': 'JOR',
'Kazakhstan': 'KAZ',
'Kenya': 'KEN',
'Kiribati': 'KIR',
'Korea, Dem. Rep.': 'PRK',
'Korea, Rep.': 'KOR',
'Kosovo': 'KSV',
'Kuwait': 'KWT',
'Kyrgyz Republic': 'KGZ',
'Lao PDR': 'LAO',
'Latin America & Caribbean (all income levels)': 'LCN',
'Latin America & Caribbean (developing only)': 'LAC',
'Latin America and the Caribbean (IFC classification)': 'CLA',
'Latvia': 'LVA',
'Least developed countries: UN classification': 'LDC',
'Lebanon': 'LBN',
'Lesotho': 'LSO',
'Liberia': 'LBR',
'Libya': 'LBY',
'Liechtenstein': 'LIE',
'Lithuania': 'LTU',
'Low & middle income': 'LMY',
'Low income': 'LIC',
'Lower middle income': 'LMC',
'Luxembourg': 'LUX',
'Macao SAR, China': 'MAC',
'Macedonia, FYR': 'MKD',
'Madagascar': 'MDG',
'Malawi': 'MWI',
'Malaysia': 'MYS',
'Maldives': 'MDV',
'Mali': 'MLI',
'Malta': 'MLT',
'Marshall Islands': 'MHL',
'Mauritania': 'MRT',
'Mauritius': 'MUS',
'Mexico': 'MEX',
'Micronesia, Fed. Sts.': 'FSM',
'Middle East & North Africa (all income levels)': 'MEA',
'Middle East & North Africa (developing only)': 'MNA',
'Middle East and North Africa (IFC classification)': 'CME',
'Middle income': 'MIC',
'Moldova': 'MDA',
'Monaco': 'MCO',
'Mongolia': 'MNG',
'Montenegro': 'MNE',
'Morocco': 'MAR',
'Mozambique': 'MOZ',
'Myanmar': 'MMR',
'Namibia': 'NAM',
'Nepal': 'NPL',
'Netherlands': 'NLD',
'New Caledonia': 'NCL',
'New Zealand': 'NZL',
'Nicaragua': 'NIC',
'Niger': 'NER',
'Nigeria': 'NGA',
'North Africa': 'NAF',
'North America': 'NAC',
'Northern Mariana Islands': 'MNP',
'Norway': 'NOR',
'OECD members': 'OED',
'Oman': 'OMN',
'Other small states': 'OSS',
'Pacific island small states': 'PSS',
'Pakistan': 'PAK',
'Palau': 'PLW',
'Panama': 'PAN',
'Papua New Guinea': 'PNG',
'Paraguay': 'PRY',
'Peru': 'PER',
'Philippines': 'PHL',
'Poland': 'POL',
'Portugal': 'PRT',
'Puerto Rico': 'PRI',
'Qatar': 'QAT',
'Romania': 'ROU',
'Russian Federation': 'RUS',
'Rwanda': 'RWA',
'Samoa': 'WSM',
'San Marino': 'SMR',
'Sao Tome and Principe': 'STP',
'Saudi Arabia': 'SAU',
'Senegal': 'SEN',
'Serbia': 'SRB',
'Seychelles': 'SYC',
'Sierra Leone': 'SLE',
'Singapore': 'SGP',
'Sint Maarten (Dutch part)': 'SXM',
'Slovak Republic': 'SVK',
'Slovenia': 'SVN',
'Small states': 'SST',
'Solomon Islands': 'SLB',
'Somalia': 'SOM',
'South Africa': 'ZAF',
'South Asia': 'SAS',
'South Asia (IFC classification)': 'CSA',
'South Sudan': 'SSD',
'Spain': 'ESP',
'Sri Lanka': 'LKA',
'St. Kitts and Nevis': 'KNA',
'St. Lucia': 'LCA',
'St. Martin (French part)': 'MAF',
'St. Vincent and the Grenadines': 'VCT',
'Sub-Saharan Africa (IFC classification)': 'CAA',
'Sub-Saharan Africa (all income levels)': 'SSF',
'Sub-Saharan Africa (developing only)': 'SSA',
'Sub-Saharan Africa excluding South Africa': 'SXZ',
'Sub-Saharan Africa excluding South Africa and Nigeria': 'XZN',
'Sudan': 'SDN',
'Suriname': 'SUR',
'Swaziland': 'SWZ',
'Sweden': 'SWE',
'Switzerland': 'CHE',
'Syrian Arab Republic': 'SYR',
'Tajikistan': 'TJK',
'Tanzania': 'TZA',
'Thailand': 'THA',
'Timor-Leste': 'TLS',
'Togo': 'TGO',
'Tonga': 'TON',
'Trinidad and Tobago': 'TTO',
'Tunisia': 'TUN',
'Turkey': 'TUR',
'Turkmenistan': 'TKM',
'Turks and Caicos Islands': 'TCA',
'Tuvalu': 'TUV',
'Uganda': 'UGA',
'Ukraine': 'UKR',
'United Arab Emirates': 'ARE',
'United Kingdom': 'GBR',
'United States': 'USA',
'Upper middle income': 'UMC',
'Uruguay': 'URY',
'Uzbekistan': 'UZB',
'Vanuatu': 'VUT',
'Venezuela, RB': 'VEN',
'Vietnam': 'VNM',
'Virgin Islands (U.S.)': 'VIR',
'West Bank and Gaza': 'PSE',
'World': 'WLD',
'Yemen, Rep.': 'YEM',
'Zambia': 'ZMB',
'Zimbabwe': 'ZWE'}
#1.2 Use wbdata to get lists of country codes by income groups
countriesIncomeAll = [i['id'] for i in wb.get_country(incomelevel=['LIC','MIC','HIC'],display=False)]
countriesIncomeH = [i['id'] for i in wb.get_country(incomelevel=['HIC'],display=False)]
countriesIncomeM = [i['id'] for i in wb.get_country(incomelevel=['MIC'],display=False)]
countriesIncomeL = [i['id'] for i in wb.get_country(incomelevel=['LIC'],display=False)]
# countriesIncomeOecd = [i['id'] for i in wb.get_country(incomelevel="OEC", display=False)]
countriesIncomeOecd = ['AUS','CAN','CHL','CZE','DNK','EST','HUN','ISL','ISR','JPN'
,'KOR','NZL','NOR''POL','SVK','SVN','SWE','CHE','USA']
# # Import data from Quandl
# In[ ]:
# 2. Import data from Quandl
# 2.1 Money supply (LCU)
moneyDf = pd.DataFrame({})
for name,key in countryCodes.items():
try:
df = Quandl.get('WWDI/'+key+'_FM_LBL_MONY_CN',authtoken="QqLL1AFCjc31_MVo4qsU")
df.columns = [key]
moneyDf = pd.concat([moneyDf,df],axis=1)
except:
pass
# 2.2 GDP deflator
deflatorDf = pd.DataFrame({})
for name,key in countryCodes.items():
try:
df = Quandl.get('WWDI/'+key+'_NY_GDP_DEFL_ZS',authtoken="QqLL1AFCjc31_MVo4qsU")
df.columns = [key]
deflatorDf = pd.concat([deflatorDf,df],axis=1)
except:
pass
# 2.3 Real GDP
gdpDf = pd.DataFrame({})
for name,key in countryCodes.items():
try:
df = Quandl.get('WWDI/'+key+'_NY_GDP_MKTP_KD',authtoken="QqLL1AFCjc31_MVo4qsU")
df.columns = [key]
gdpDf = pd.concat([gdpDf,df],axis=1)
except:
pass
# 2.4 Exahange rate relative to USD
exchangeDf = pd.DataFrame({})
for name,key in countryCodes.items():
try:
df = Quandl.get('WWDI/'+key+'_PA_NUS_FCRF',authtoken="QqLL1AFCjc31_MVo4qsU")
df.columns = [key]
exchangeDf = pd.concat([exchangeDf,df],axis=1)
except:
pass
# 2.5 Nominal interest rate (lending rate)
interestDf = pd.DataFrame({})
for name,key in countryCodes.items():
try:
df = Quandl.get('WWDI/'+key+'_FR_INR_LEND',authtoken="QqLL1AFCjc31_MVo4qsU")
df.columns = [key]
interestDf = pd.concat([interestDf,df],axis=1)
except:
pass
# # Create data sets: money, prices, and output
# In[ ]:
# 3. Create datasets for quantity theory without interest and exchange rates
# 3.1 Dataframes to use
dataFrames = [moneyDf, deflatorDf, gdpDf]
# 3.2 Identify the codes for countries with at leaset 10 years of consecutive data for each series
availableCodes = []
for code in countryCodes.values():
if all(code in frame for frame in dataFrames):
if any( all(np.isnan(x) for x in frame[code]) for frame in dataFrames):
print(code)
else:
availableCodes.append(code)
print('Number of countries: ',len(availableCodes))
# 3.3 Construct the dataset including the average growth rates of variabes for each country
includedCodes = []
obs = []
mData = []
pData = []
yData = []
for c in availableCodes:
count = 0
ind = []
for i in dataFrames[0].index[1:]:
noneNan = all( not np.isnan(frame[c].loc[i]) for frame in dataFrames)
anyNan = any( np.isnan(frame[c].loc[i]) for frame in dataFrames)
if noneNan:
count+=1
ind.append(i)
elif anyNan and count>0:
break
if count >9:
m = (moneyDf[c].loc[ind[-1]]/moneyDf[c].loc[ind[0]])**(1/count)-1
p = (deflatorDf[c].loc[ind[-1]]/deflatorDf[c].loc[ind[0]])**(1/count)-1
y = (gdpDf[c].loc[ind[-1]]/gdpDf[c].loc[ind[0]])**(1/count)-1
includedCodes.append(c)
mData.append(np.around(m,5))
pData.append(np.around(p,5))
yData.append(np.around(y,5))
obs.append(count)
# 3.3 Identify the names of the countries that are incuded
includedNames = []
for c in includedCodes:
for name, code in countryCodes.items():
if c == code:
includedNames.append(name)
# 3.4 Create the main dataframe
qtyTheoryData = pd.DataFrame({'iso code':includedCodes,'observations':obs,'inflation':pData,'money growth':mData,'gdp growth':yData},index = includedNames)
qtyTheoryData = qtyTheoryData[['iso code','observations','inflation','money growth','gdp growth']]
qtyTheoryData = qtyTheoryData.sort_index()
# 3.5 Create dataframes organized by income levels
indexL=[]
indexM=[]
indexH=[]
indexOecd=[]
for country in qtyTheoryData.index:
code = qtyTheoryData['iso code'].loc[country]
if code in countriesIncomeL:
indexL.append(country)
if code in countriesIncomeM:
indexM.append(country)
if code in countriesIncomeH:
indexH.append(country)
if code in countriesIncomeOecd:
indexOecd.append(country)
# Drop countries with inf values
qtyTheoryData = qtyTheoryData.replace([np.inf, -np.inf], np.nan).dropna()
qtyTheoryDataL = qtyTheoryData.loc[indexL]
qtyTheoryDataM = qtyTheoryData.loc[indexM]
qtyTheoryDataH = qtyTheoryData.loc[indexH]
qtyTheoryDataOecd = qtyTheoryData.loc[indexOecd]
# 3.6 Export dataframes to csv
qtyTheoryData.to_csv('../csv/qtyTheoryData.csv',index=True,index_label='country')
qtyTheoryDataL.to_csv('../csv/qtyTheoryDataL.csv',index=True,index_label='country')
qtyTheoryDataM.to_csv('../csv/qtyTheoryDataM.csv',index=True,index_label='country')
qtyTheoryDataH.to_csv('../csv/qtyTheoryDataH.csv',index=True,index_label='country')
qtyTheoryDataOecd.to_csv('../csv/qtyTheoryDataOecd.csv',index=True,index_label='country')
# # Create data sets: money, prices, output, interest, and exchange rates
# In[ ]:
# 4. Create datasets for quantity theory with interest and exchange rates
# 4.1 Dataframes to use
dataFrames = [moneyDf, deflatorDf, gdpDf,interestDf,exchangeDf]
# 4.2 Identify the codes for countries with at leaset 10 years of consecutive data for each series
availableCodes = []
for code in countryCodes.values():
if all(code in frame for frame in dataFrames):
if any( all(np.isnan(x) for x in frame[code]) for frame in dataFrames):
print(code)
else:
availableCodes.append(code)
print('Number of countries: ',len(availableCodes))
# 4.3 Construct the dataset including the average growth rates of variabes for each country
includedCodes = []
obs = []
mData = []
pData = []
yData = []
iData = []
eData = []
for c in availableCodes:
count = 0
ind = []
for i in dataFrames[0].index[1:]:
noneNan = all( not np.isnan(frame[c].loc[i]) for frame in dataFrames)
anyNan = any( np.isnan(frame[c].loc[i]) for frame in dataFrames)
if noneNan:
count+=1
ind.append(i)
elif anyNan and count>0:
break
if count >9:
m = (moneyDf[c].loc[ind[-1]]/moneyDf[c].loc[ind[0]])**(1/count)-1
p = (deflatorDf[c].loc[ind[-1]]/deflatorDf[c].loc[ind[0]])**(1/count)-1
y = (gdpDf[c].loc[ind[-1]]/gdpDf[c].loc[ind[0]])**(1/count)-1
e = (exchangeDf[c].loc[ind[-1]]/exchangeDf[c].loc[ind[0]])**(1/count)-1
rate = np.mean(interestDf[c].iloc[1:])/100
includedCodes.append(c)
mData.append(np.around(m,5))
pData.append(np.around(p,5))
yData.append(np.around(y,5))
eData.append(np.around(e,5))
iData.append(np.around(rate,5))
obs.append(count)
# 4.3 Identify the names of the countries that are incuded
includedNames = []
for c in includedCodes:
for name, code in countryCodes.items():
if c == code:
includedNames.append(name)
# 4.4 Create the main dataframe
qtyTheoryData = pd.DataFrame({'iso code':includedCodes,'observations':obs,'inflation':pData,'money growth':mData,'gdp growth':yData,'nominal interest rate':iData,'exchange rate depreciation':eData},index = includedNames)
qtyTheoryData = qtyTheoryData[['iso code','observations','inflation','money growth','gdp growth','nominal interest rate','exchange rate depreciation']]
qtyTheoryData = qtyTheoryData.sort_index()
# 4.5 Create dataframes organized by income levels
indexL=[]
indexM=[]
indexH=[]
indexOecd=[]
for country in qtyTheoryData.index:
code = qtyTheoryData['iso code'].loc[country]
if code in countriesIncomeL:
indexL.append(country)
if code in countriesIncomeM:
indexM.append(country)
if code in countriesIncomeH:
indexH.append(country)
if code in countriesIncomeOecd:
indexOecd.append(country)
# Drop countries with inf values
qtyTheoryData = qtyTheoryData.replace([np.inf, -np.inf], np.nan).dropna()
qtyTheoryDataL = qtyTheoryData.loc[indexL]
qtyTheoryDataM = qtyTheoryData.loc[indexM]
qtyTheoryDataH = qtyTheoryData.loc[indexH]
qtyTheoryDataOecd = qtyTheoryData.loc[indexOecd]
# 4.6 Export dataframes to csv
qtyTheoryData.to_csv('../csv/qtyTheoryOpenData.csv',index=True,index_label='country')
qtyTheoryDataL.to_csv('../csv/qtyTheoryOpenDataL.csv',index=True,index_label='country')
qtyTheoryDataM.to_csv('../csv/qtyTheoryOpenDataM.csv',index=True,index_label='country')
qtyTheoryDataH.to_csv('../csv/qtyTheoryOpenDataH.csv',index=True,index_label='country')
qtyTheoryDataOecd.to_csv('../csv/qtyTheoryOpenDataOecd.csv',index=True,index_label='country')
# In[ ]:
# 5. Export notebook to python script
# runProcs.exportNb('quantityTheoryData')
|
letsgoexploring/data
|
quantitytheory/python/quantityTheoryData.py
|
Python
|
mit
| 16,452
|
[
"BWA"
] |
40fc3e9fb838c952eb547d542efe394d7cdd34494161a60a406bcc69e2c3209a
|
#!/usr/bin/env python
"""
TextureCoordinates
"""
"""
Copyright 2001 Pearu Peterson all rights reserved,
Pearu Peterson <pearu@ioc.ee>
Permission to use, modify, and distribute this software is given under the
terms of the LGPL. See http://www.fsf.org
NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
$Revision: 1.3 $
$Date: 2001-05-31 17:48:55 $
Pearu Peterson
"""
from . import DataSetAttr
class TextureCoordinates(DataSetAttr.DataSetAttr):
"""Holds VTK Texture Coordinates.
Usage:
TextureCoordinates(<sequence of (1,2, or 3)-sequences> ,name = <string>)
Attributes:
coords
name
Public methods:
get_size()
to_string(format = 'ascii')
"""
def __init__(self,scalars,name=None):
self.name = self._get_name(name)
self.coords = self.get_n_seq_seq(scalars,self.default_value)
if not 1<=len(self.coords[0])<=3:
raise ValueError('texture coordinates dimension must be 1, 2, or 3 but got %s'%(len(self.coords[0])))
def to_string(self,format='ascii'):
t = self.get_datatype(self.coords)
ret = ['TEXTURE_COORDINATES %s %s %s'%(self.name,len(self.coords[0]),t),
self.seq_to_string(self.coords,format,t)]
return '\n'.join(ret)
def get_size(self):
return len(self.coords)
def texture_coordinates_fromfile(f,n,sl):
assert len(sl)==3
dataname = sl[0].strip()
dim = eval(sl[1])
datatype = sl[2].strip().lower()
assert datatype in ['bit','unsigned_char','char','unsigned_short','short','unsigned_int','int','unsigned_long','long','float','double'],repr(datatype)
arr = []
while len(arr)<dim*n:
arr += list(map(eval,common._getline(f).split(' ')))
assert len(arr)==dim*n
arr2 = []
for i in range(0,len(arr),dim):
arr2.append(arr[i:i+dim])
return TextureCoordinates(arr2,dataname)
if __name__ == "__main__":
print(TextureCoordinates([[3,3],[4,3],240,3,2]).to_string())
|
ddempsey/PyFEHM
|
pyvtk/TextureCoordinates.py
|
Python
|
lgpl-2.1
| 2,047
|
[
"VTK"
] |
1abad3043a12c9e2bdb1e9c9f7e8751b07b507463b5140451e8f38807fd6a4e4
|
#! /usr/bin/env python
import sys, os
from mpi4py import MPI
import subprocess as sub
import cPickle as pickle
import MDAnalysis as mda
import ShiftPred as sp
import md2nmr
simPath = '/local/jubio/oschill/iGRASP/IL-6/IL-6_ffcomp'
runName = '04_prod01_protein'
expDatFile = simPath + '/' + 'common_files/IL6_S2_exp.dat'
simulations = {}
skipFrames = 9
rerunMD2NMR = False
rerunShiftPred = True
shiftPredMethod = 'sparta+'
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
numprocs = comm.Get_size()
root = 0
# root distributes work
if rank == root:
indices = [6, 12, 13, 14 ,15, 21, 22, 23, 26, 31]
forcefields = ['amber03',
'amber03-star',
'amber99sb',
'amber99sb-star',
'amber99sb-ildn',
'amber99sb-star-ildn',
'amber99sbnmr1-ildn',
'charmm22star',
'charmm27',
'oplsaa',
'gromos54a7']
#indices = [15]
#forcefields = ['charmm27']
# create work items
workItems = []
for ff in forcefields:
for index in indices:
simName = "{:s}_2IL6_{:d}".format(ff, index)
workItems.append(simName)
# compute workload per process
totalWorkload = len(workItems)
workload = totalWorkload / numprocs
extraWork = totalWorkload % numprocs
# loop through processes
for proc in range(numprocs):
currentWorkload = workload
if proc < extraWork:
currentWorkload += 1
# pack work items per process
work = []
for w in range(currentWorkload):
work.append(workItems.pop())
# send work items
if proc != 0:
comm.send(work, dest=proc, tag=123)
else:
mywork = work
# receive work from root
if rank != root:
work = comm.recv(source=0, tag=123)
else:
work = mywork
# report work
for proc in range(numprocs):
if rank == proc:
for w in work:
print rank, w
#print ""
sys.stdout.flush()
comm.Barrier()
# Creating simulation objects
for simName in work:
simulations[simName] = md2nmr.md2nmr(runName, path=simPath+'/'+simName, rerun=rerunMD2NMR, verbose=False)
# Predicting chemical shifts
shiftPredictions = {}
for i, key in enumerate(simulations.keys()):
simName = key
pickleFilename = "{}/{}/{}_shiftpred_{}_skip{}.dat".format(simPath, simName, runName, shiftPredMethod, skipFrames)
#print pickleFilename
if not rerunShiftPred and os.path.isfile(pickleFilename):
#print "unpickling average shifts"
# unpickle average shifts
loadFile = open(pickleFilename, 'rb')
shiftPredictions[key] = pickle.load(loadFile)
loadFile.close()
else:
shiftPredictions[key] = sp.ShiftPred(simulations[key].universe, method=shiftPredMethod)
shiftPredictions[key].predict(skip=skipFrames)
shiftPredictions[key].universe = None # unset universe, so we can pickle
# pickle average shifts
#print "pickling average shifts"
dumpFile = open(pickleFilename, 'wb')
pickle.dump(shiftPredictions[key], dumpFile, protocol=pickle.HIGHEST_PROTOCOL)
dumpFile.close()
# report number of frames
print "Pickled {} frames for {} ({}/{}) done".format(len(shiftPredictions[key].averageShifts.values()[0].shiftList), simName, i+1, len(simulations.keys()))
#print "rank {}, {}/{} done".format(rank, i+1, len(simulations.keys()))
print "Process {} done.".format(rank)
|
schilli/md2nmr
|
predict_shifts.py
|
Python
|
gpl-2.0
| 3,675
|
[
"MDAnalysis"
] |
c6ad1b279b62ce4bfe342545a7fa2aeabe62063cf97ca9f926f859570865da43
|
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
# Input
from signals.aux_functions import gaussian_bump
from inputs.sensors import Sensor, PerceptualSpace
from inputs.lag_structure import LagStructure
# nexa
from nexa.nexa import Nexa
# Visualization libraries
from visualization.sensor_clustering import visualize_cluster_matrix
from visualization.sensors import visualize_SLM_axis
from visualization.sensors import visualize_STDM_seaborn
def set_gaussian_bumps(base, distance, value, time, max_rate):
"""
A convenience function for setting the bumps
"""
base = base
value = value
attenuation = 2
center1 = 200
distance = distance
center2 = center1 + distance
# Create the gaussian bumpbs
gb1 = gaussian_bump(time, center1, max_rate, base, value, attenuation)
gb2 = gaussian_bump(time, center2, max_rate, base, value, attenuation)
# Add some noise
gb1 += np.random.rand(gb1.size)
gb2 += np.random.rand(gb2.size)
return gb1, gb2
def set_perceptual_space(gb1, gb2, dt):
"""
A convenience function to set the percpetual space
"""
# lag_times = np.linspace(0, 800, 5) # Go two times the period
lag_times = np.arange(0, 1000, 200)
window_size = 200
weights = None
lag_structure = LagStructure(lag_times=lag_times, weights=weights, window_size=window_size)
sensor1 = Sensor(gb1, dt, lag_structure)
sensor2 = Sensor(gb2, dt, lag_structure)
sensors = [sensor1, sensor2]
perceptual_space = PerceptualSpace(sensors, lag_first=True)
return perceptual_space
def parameter_swep_SLM(base, distance, value):
"""
Sweps parameter looking for the SLM
"""
Tmax = 1100
dt = 1.0
time = np.arange(0, Tmax, dt)
# First we define the parameters
max_rate = 450
Nspatial_clusters = 2 # Number of spatial clusters
Ntime_clusters = 4 # Number of time clusters
Nembedding = 2 # Dimension of the embedding space
# Set the gaussian bumpbs
gb1, gb2 = set_gaussian_bumps(base, distance, value, time, max_rate)
# Get the perceptual_space
perceptual_space = set_perceptual_space(gb1, gb2, dt)
# Let's do the plotin here
gs = mpl.gridspec.GridSpec(2, 2)
fig = plt.figure(figsize=(16, 12))
ax1 = fig.add_subplot(gs[:, 1])
# Now the nexa object
nexa_object = Nexa(perceptual_space, Nspatial_clusters,
Ntime_clusters, Nembedding)
# Visualize object
visualize_SLM_axis(nexa_object, ax=ax1)
ax1 = fig.add_subplot(gs[0, 0])
ax1.plot(time, gb1)
ax1.set_ylim((0, max_rate + 20))
ax2 = fig.add_subplot(gs[1, 0])
ax2.plot(time, gb2)
ax2.set_ylim((0, max_rate + 20))
return fig
def parameter_swep_STDM(base, distance, value):
"""
Sweps parameter looking for the STDM
"""
Tmax = 1100
dt = 1.0
time = np.arange(0, Tmax, dt)
# First we define the parameters
max_rate = 450
Nspatial_clusters = 2 # Number of spatial clusters
Ntime_clusters = 4 # Number of time clusters
Nembedding = 2 # Dimension of the embedding space
# Set the gaussian bumpbs
gb1, gb2 = set_gaussian_bumps(base, distance, value, time, max_rate)
# Get the perceptual_space
perceptual_space = set_perceptual_space(gb1, gb2, dt)
# Let's do the plotin here
gs = mpl.gridspec.GridSpec(2, 2)
fig = plt.figure(figsize=(16, 12))
ax1 = fig.add_subplot(gs[:, 1])
# Now the nexa object
nexa_object = Nexa(perceptual_space, Nspatial_clusters,
Ntime_clusters, Nembedding)
# Visualize object
nexa_object.calculate_distance_matrix()
visualize_STDM_seaborn(nexa_object, ax=ax1)
ax1 = fig.add_subplot(gs[0, 0])
ax1.plot(time, gb1)
ax1.set_ylim((0, max_rate + 20))
ax2 = fig.add_subplot(gs[1, 0])
ax2.plot(time, gb2)
ax2.set_ylim((0, max_rate + 20))
return fig
def parameter_swep_cluster(base, distance, value):
"""
Sweps parameter looking for the STDM
"""
Tmax = 1100
dt = 1.0
time = np.arange(0, Tmax, dt)
# First we define the parameters
max_rate = 450
Nspatial_clusters = 2 # Number of spatial clusters
Ntime_clusters = 4 # Number of time clusters
Nembedding = 2 # Dimension of the embedding space
# Set the gaussian bumpbs
gb1, gb2 = set_gaussian_bumps(base, distance, value, time, max_rate)
# Get the perceptual_space
perceptual_space = set_perceptual_space(gb1, gb2, dt)
# Let's do the plotin here
gs = mpl.gridspec.GridSpec(2, 2)
fig = plt.figure(figsize=(16, 12))
ax1 = fig.add_subplot(gs[:, 1])
# Now the nexa object
nexa_object = Nexa(perceptual_space, Nspatial_clusters,
Ntime_clusters, Nembedding)
nexa_object.calculate_distance_matrix()
nexa_object.calculate_embedding
nexa_object.calculate_spatial_clustering()
# Visualize object
visualize_cluster_matrix(nexa_object, ax=ax1)
ax1 = fig.add_subplot(gs[0, 0])
ax1.plot(time, gb1)
ax1.set_ylim((0, max_rate + 20))
ax2 = fig.add_subplot(gs[1, 0])
ax2.plot(time, gb2)
ax2.set_ylim((0, max_rate + 20))
return fig
def parameter_swep_cluster_SLM(base, distance, value):
"""
Sweps parameter looking for the STDM
"""
Tmax = 1100
dt = 1.0
time = np.arange(0, Tmax, dt)
# First we define the parameters
max_rate = 450
Nspatial_clusters = 2 # Number of spatial clusters
Ntime_clusters = 4 # Number of time clusters
Nembedding = 2 # Dimension of the embedding space
# Set the gaussian bumpbs
gb1, gb2 = set_gaussian_bumps(base, distance, value, time, max_rate)
# Get the perceptual_space
perceptual_space = set_perceptual_space(gb1, gb2, dt)
# Let's do the plotin here
gs = mpl.gridspec.GridSpec(2, 2)
fig = plt.figure(figsize=(16, 12))
# Now the nexa object
nexa_object = Nexa(perceptual_space, Nspatial_clusters,
Ntime_clusters, Nembedding)
nexa_object.calculate_distance_matrix()
nexa_object.calculate_embedding
nexa_object.calculate_spatial_clustering()
# Visualize the cluster on the right side
ax1 = fig.add_subplot(gs[:, 1])
visualize_cluster_matrix(nexa_object, ax=ax1)
# Visualize the SLE on the left side
ax2 = fig.add_subplot(gs[:, 0])
visualize_SLM_axis(nexa_object, ax=ax2)
return fig
def create_filename(directory, name, figure_format, base, distance, value):
"""
Creation of a unique filename
"""
filename = directory + name
filename += '-'
filename += '{:.2f}'.format(base)
filename += '-'
filename += '{:.2f}'.format(distance)
filename += '-'
filename += '{:.2f}'.format(value)
filename += figure_format
return filename
|
h-mayorquin/time_series_basic
|
parameter_exploration/parameter_swep.py
|
Python
|
bsd-3-clause
| 6,887
|
[
"Gaussian"
] |
ab92b454b17bbcdd43f4eb9de117bc20da10b9be6e4964b77042e759ac228b1c
|
from paraview.simple import *
import glob
import re
#liste = glob.glob('/home/bruno/CFDEM/bruno-PUBLIC-2.2.1/runv2-6/particle_only/loadingSquare/post/cylinder*.vtk')
liste = glob.glob('./post/couette*.vtk')
#liste.sort()
def natural_sort(l):
convert = lambda text: int(text) if text.isdigit() else text.lower()
alphanum_key = lambda key: [ convert(c) for c in re.split('([0-9]+)', key) ]
return sorted(l, key = alphanum_key)
liste=natural_sort(liste)
#paraview.simple._DisableFirstRenderCameraReset()
#square = LegacyVTKReader( FileNames=[
#'/home/bruno/CFDEM/bruno-PUBLIC-2.2.1/runv2-6/particle_only/loadingSquare/post/square98000.vtk'] )
couette_ = STLReader( FileNames=['./initpost/couette_1000.stl'] )
square=LegacyVTKReader(FileNames=liste)
RenderView1 = GetRenderView()
RenderView1.CameraPosition = [-0.00032399967312812805, -0.0002795010805130005, 0.799224061999444]
RenderView1.CameraClippingRange = [0.5396848694000508, 0.5986885115576086]
RenderView1.CameraFocalPoint = [-0.00032399967312812805, -0.0002795010805130005, 0.23402100056409836]
RenderView1.CameraParallelScale = 0.1462853166497175
RenderView1.CenterOfRotation = [-0.00032399967312812805, -0.0002795010805130005, 0.23402100056409836]
DataRepresentation1 = Show()
DataRepresentation1.ConstantRadius = 0.0015
DataRepresentation1.EdgeColor = [0.0, 0.0, 0.5000076295109483]
DataRepresentation1.PointSpriteDefaultsInitialized = 1
DataRepresentation1.SelectionPointFieldDataArrayName = 'f'
DataRepresentation1.SelectionCellFieldDataArrayName = 'radius'
DataRepresentation1.ColorArrayName = ('POINT_DATA', 'radius')
DataRepresentation1.Texture = []
DataRepresentation1.AmbientColor = [0.0, 0.0, 0.0]
DataRepresentation1.Representation = 'Point Sprite'
DataRepresentation1.CubeAxesColor = [0.0, 0.0, 0.0]
DataRepresentation1.RadiusRange = [-0.10308, 0.102432]
DataRepresentation1.ScaleFactor = 0.020727699995040896
a1_radius_PVLookupTable = GetLookupTableForArray( "radius", 1, RGBPoints=[0.004000000189989805, 0.0, 0.0, 1.0, 0.004000000189989905, 1.0, 0.0, 0.0], VectorMode='Component', NanColor=[0.498039, 0.498039, 0.498039], ColorSpace='HSV', ScalarRangeInitialized=1.0 )
a1_radius_PiecewiseFunction = CreatePiecewiseFunction( Points=[0.004000000189989805, 0.0, 0.5, 0.0, 0.004000000189989905, 1.0, 0.5, 0.0] )
Render()
|
blaisb/cfdemUtilities
|
paraview/couetteParticles.py
|
Python
|
lgpl-3.0
| 2,318
|
[
"ParaView",
"VTK"
] |
3eef35a793f00378f8afb759211c59de4029afce6571f2bfb905e36f9ecd39e0
|
#!/usr/bin/env python
import fileinput
import sys
import os
import re
import math
import json
import numpy as np
import subprocess
from optparse import OptionParser
# These kernels have guaranteed correct Jacobians
whitelisted_kernels = ['Diffusion', 'TimeDerivative']
# regular expressions to parse the PETSc debug output
MfdRE = re.compile("^Finite[ -]difference Jacobian \(user-defined state\)")
MhcRE = re.compile("^Hand-coded Jacobian \(user-defined state\)")
MdiffRE = re.compile("^Hand-coded minus finite[ -]difference Jacobian \(user-defined state\)")
rowRE = re.compile("row (\d+): ")
valRE = re.compile(" \((\d+), ([+.e\d-]+)\)")
# Get the real path of jacobian analyzer
if(os.path.islink(sys.argv[0])):
pathname = os.path.dirname(os.path.realpath(sys.argv[0]))
else:
pathname = os.path.dirname(sys.argv[0])
pathname = os.path.abspath(pathname)
# Borrowed from Peacock
def recursiveFindFile(current_path, p, executable):
if not os.path.exists(current_path):
return None
files = os.listdir(current_path)
split = current_path.split('/')
if len(split) > 2 and split[-2] == 'problems':
# if we're in the "problems" directory... hop over to this application's directory instead
the_file = recursiveFindFile('/'.join(split[:-2]) + '/' + split[-1], p, executable)
# Still didn't find it, we must keep looking up this path so fall through here
if the_file != None:
return the_file
for afile in files:
if p.match(afile) and ((not executable) or os.access(current_path+'/'+afile, os.X_OK)):
return current_path + '/' + afile
up_one = os.path.dirname(current_path)
if current_path != '/':
return recursiveFindFile(up_one, p, executable)
return None
# Borrowed from Peacock
def findExecutable(executable_option, method_option):
if executable_option and os.path.exists(executable_option):
return executable_option
else:
# search up directories until we find an executable, starting with the current directory
method = 'opt' # Find the optimized binary by default
if 'METHOD' in os.environ:
method = os.environ['METHOD']
if method_option:
method = method_option
p = re.compile('.+-'+method+'$')
executable = recursiveFindFile(os.getcwd(), p, True)
if not executable:
print 'Executable not found! Try specifying it using -e'
sys.exit(1)
return executable
#
# v
# sd1 kern1 kern2
# sd2 kern1
#
# u
# sd1 kern3
def analyze(dofdata, Mfd, Mhc, Mdiff) :
global options
diagonal_only = options.diagonal_only
dofs = dofdata['ndof']
nlvars = [var['name'] for var in dofdata['vars']]
numvars = len(nlvars)
# build analysis blocks (for now: one block per variable)
blocks = []
for var in dofdata['vars'] :
blockdofs = []
for subdomain in var['subdomains'] :
blockdofs.extend(subdomain['dofs'])
blocks.append(blockdofs)
nblocks = len(blocks)
# analysis results
fd = np.zeros((nblocks, nblocks))
hc = np.zeros((nblocks, nblocks))
norm = np.zeros((nblocks, nblocks))
# prepare block norms
for i in range(nblocks) :
for j in range(nblocks) :
# iterate over all DOFs in the current block and compute the block norm
for di in blocks[i] :
for dj in blocks[j] :
if abs(Mfd[di][dj]) > 1e60 or abs(Mhc[di][dj]) > 1e60:
fd [i][j] += 1e10
norm[i][j] += 1e20
continue
else :
fd [i][j] += Mfd[di][dj]**2
hc [i][j] += Mhc[di][dj]**2
norm[i][j] += Mdiff[di][dj]**2
fd = fd**0.5
hc = hc**0.5
norm = norm**0.5
all_good = True
e = 1e-4
for i in range(nblocks) :
printed = False
for j in range(nblocks) :
if i != j and diagonal_only :
continue
if norm[i][j] > e*fd[i][j] :
if not printed :
print "\nKernel for variable '%s':" % nlvars[i]
printed = True
all_good = False
if hc[i][j] == 0.0 :
problem = "needs to be implemented"
elif fd[i][j] == 0.0 :
problem = "should just return zero"
else :
err = math.fabs((hc[i][j]-fd[i][j])/fd[i][j])*100.0
if err > 20.0 :
problem = "is wrong (off by %.1f %%)" % err
elif err > 5.0 :
problem = "is questionable (off by %.2f %%)" % err
elif err > 1.0 :
problem = "is inexact (off by %.3f %%)" % err
else :
problem = "is slightly off (by %f %%)" % err
if i == j :
print " (%d,%d) On-diagonal Jacobian %s" % (i, j, problem)
else :
print " (%d,%d) Off-diagonal Jacobian for variable '%s' %s" % (i, j, nlvars[j], problem)
if all_good :
print "No errors detected. :-)"
# output parsed (but not processed) jacobian matric data in gnuplot's nonuniform matrix format
def saveMatrixToFile(M, dofs, filename) :
file = open(filename, "w")
for i in range(dofs) :
for j in range(dofs) :
file.write("%d %d %f\n" % (i, j, M[i][j]))
file.write("\n")
#
# Simple state machine parser for the MOOSE output
#
def parseOutput(output, dofdata) :
global options
write_matrices = options.write_matrices
dofs = dofdata['ndof']
state = 0
for line in output.split('\n'):
#print state, line
#
# Read in PetSc matrices
#
if state == 0 :
Mfd = np.zeros((dofs, dofs))
Mhc = np.zeros((dofs, dofs))
Mdiff = np.zeros((dofs, dofs))
state = 1
if state == 1 :
m = MfdRE.match(line)
if m :
state = 2
continue
if state == 2 :
m = MhcRE.match(line)
if m :
state = 3
continue
if state == 3 :
m = MdiffRE.match(line)
if m :
state = 4
continue
# read data
if state >= 2 and state <= 4 :
m = rowRE.match(line)
vals = valRE.findall(line)
if m :
row = int(m.group(1))
for pair in vals :
if state == 2 :
Mfd[row, int(pair[0])] = float(pair[1])
if state == 3 :
Mhc[row, int(pair[0])] = float(pair[1])
if state == 4 :
Mdiff[row, int(pair[0])] = float(pair[1])
if state == 4 and row+1 == dofs :
state = 0
analyze(dofdata, Mfd, Mhc, Mdiff)
# dump parsed matrices in gnuplottable format
if write_matrices :
saveMatrixToFile(Mfd, dofs, "jacobian_finite_differenced.dat")
saveMatrixToFile(Mhc, dofs, "jacobian_hand_coded.dat")
saveMatrixToFile(Mdiff, dofs, "jacobians_diffed.dat")
# theoretically we could have multiple steps to analyze in the output
continue
if __name__ == '__main__':
usage = "Usage: %prog [options] [input_file]"
description = "Note: You can directly supply an input file without specifying any options. The correct thing will automatically happen."
parser = OptionParser(usage=usage, description=description)
parser.add_option("-e", "--executable", dest="executable",
help="The executable you would like to build an input file for. If not supplied an executable will be searched for. The searched for executable will default to the optimized version of the executable (if available).")
parser.add_option("-i", "--input-file", dest="input_file",
help="Input file you would like to open debug the jacobians on.")
parser.add_option("-m", "--method", dest="method",
help="Pass either opt, dbg or devel. Works the same as setting the $METHOD environment variable.")
parser.add_option("-r", "--resize-mesh", dest="resize_mesh", action="store_true", help="Perform resizing of generated meshs (to speed up the testing).")
parser.add_option("-s", "--mesh-size", dest="mesh_size", default=1, type="int", help="Set the mesh dimensions to this number of elements along each dimension (defaults to 1, requires -r option).")
parser.add_option("-D", "--on-diagonal-only", dest="diagonal_only", action="store_true", help="Test on-diagonal Jacobians only.")
parser.add_option("-d", "--debug", dest="debug", action="store_true", help="Output the command line used to run the application.")
parser.add_option("-w", "--write-matrices", dest="write_matrices", action="store_true", help="Output the Jacobian matrices in gnuplot format.")
parser.add_option("-n", "--no-auto-options", dest="noauto", action="store_true", help="Do not add automatic options to the invocation of the moose based application. Requres a specially prepared input file for debugging.")
(options, args) = parser.parse_args()
for arg in args:
if arg[-2:] == '.i':
options.input_file = arg
if options.input_file is None :
print 'Please specify an input file.'
sys.exit(1)
executable = findExecutable(options.executable, options.method)
basename = options.input_file[0:-2]
dofoutname = 'analyzerdofmap'
# common arguments for both debugging and dofmapping
moosebaseparams = [executable, '-i', options.input_file ]
if options.resize_mesh :
moosebaseparams.extend(['Mesh/nx=%d' % options.mesh_size, 'Mesh/ny=%d' % options.mesh_size, 'Mesh/nz=%d' % options.mesh_size])
# run to dump DOFs (this does not happen during the debug step)
dofmapfilename = basename + '_' + dofoutname + '.json'
if not options.noauto :
mooseparams = moosebaseparams[:]
mooseparams.extend(['Problem/solve=false', 'BCs/active=', 'Outputs/' + dofoutname+ '/type=DOFMap', 'Outputs/active=' + dofoutname, 'Outputs/file_base=' + basename + '_' + dofoutname])
if options.debug :
print "Running\n%s\n" % " ".join(mooseparams)
try:
child = subprocess.Popen(mooseparams, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
data = child.communicate()[0]
child.wait()
except:
print 'Error executing moose based application to gather DOF map\n'
sys.exit(1)
else :
print "Runing without automatic options DOF map '%s' will not be generated automatically!" % dofmapfilename
# analyze return code
if child.returncode == 1 :
# MOOSE failed with an unexpected error
print data
sys.exit(1)
elif child.returncode == -11 :
print "The moose application crashed with a segmentation fault (try recompiling)"
sys.exit(1)
# load and decode the DOF map data (for now we only care about one frame)
with open (dofmapfilename, "rt") as myfile :
dofjson = myfile.readlines()
dofdata = json.loads(dofjson[0].rstrip('\n'))
if options.debug :
print "DOF map output:\n%s\n" % dofdata
# for every DOF get the list of kernels contributing to it
dofkernels = [[] for i in range(dofdata['ndof'])]
kerneltypes = {}
for var in dofdata['vars'] :
for subdomain in var['subdomains'] :
kernels = [kernel for kernel in subdomain['kernels']]
# create lookup table from kernel name to kernel type
for kernel in kernels :
kerneltypes[kernel['name']] = kernel['type']
# list of active kernels contributing to a DOF
for dof in subdomain['dofs'] :
dofkernels[dof].extend([kernel['name'] for kernel in kernels if not kernel['name'] in dofkernels[dof]])
# get all unique kernel combinations occurring on the DOFs
combination_dofs = {}
for dof, kernels in enumerate(dofkernels) :
kernels.sort()
idx = tuple(kernels)
if idx in combination_dofs :
combination_dofs[idx].append(dof)
else :
combination_dofs[idx] = [dof]
#combinations = []
#for kernels in combination_dofs :
# print kernels
# build the parameter list for the jacobian debug run
mooseparams = moosebaseparams[:]
if not options.noauto :
mooseparams.extend([ '-snes_type', 'test', '-snes_test_display', '-mat_fd_type', 'ds', 'Executioner/solve_type=NEWTON', 'BCs/active='])
if options.debug :
print "Running\n%s\n" % " ".join(mooseparams)
else :
print 'Running input with executable %s ...\n' % executable
# run debug process to gather jacobian data
try:
child = subprocess.Popen(mooseparams, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
data = child.communicate()[0]
child.wait()
except:
print 'Error executing moose based application\n'
sys.exit(1)
# parse the raw output, which contains the PETSc debug information
parseOutput(data, dofdata)
|
jhbradley/moose
|
python/jacobiandebug/analyzejacobian.py
|
Python
|
lgpl-2.1
| 12,369
|
[
"MOOSE"
] |
40b5ad1a1bf2fbd9dd9bd477bc4b95961848960166a35a8f89e334be2d8d3a26
|
#: cX:decorator:compromise:CoffeeShop.py
# Coffee example with a compromise of basic
# combinations and decorators
class DrinkComponent:
def getDescription(self):
return self.__class__.__name__
def getTotalCost(self):
return self.__class__.cost
class Espresso(DrinkComponent):
cost = 0.75
class EspressoConPanna(DrinkComponent):
cost = 1.0
class Cappuccino(DrinkComponent):
cost = 1.0
class CafeLatte(DrinkComponent):
cost = 1.0
class CafeMocha(DrinkComponent):
cost = 1.25
class Decorator(DrinkComponent):
def __init__(self, drinkComponent):
self.component = drinkComponent
def getTotalCost(self):
return self.component.getTotalCost() + \
DrinkComponent.getTotalCost(self)
def getDescription(self):
return self.component.getDescription() + \
' ' + DrinkComponent.getDescription(self)
class ExtraEspresso(Decorator):
cost = 0.75
def __init__(self, drinkComponent):
Decorator.__init__(self, drinkComponent)
class Whipped(Decorator):
cost = 0.50
def __init__(self, drinkComponent):
Decorator.__init__(self, drinkComponent)
class Decaf(Decorator):
cost = 0.0
def __init__(self, drinkComponent):
Decorator.__init__(self, drinkComponent)
class Dry(Decorator):
cost = 0.0
def __init__(self, drinkComponent):
Decorator.__init__(self, drinkComponent)
class Wet(Decorator):
cost = 0.0
def __init__(self, drinkComponent):
Decorator.__init__(self, drinkComponent)
cappuccino = Cappuccino()
print cappuccino.getDescription() + ": $" + \
`cappuccino.getTotalCost()`
cafeMocha = Whipped(Decaf(CafeMocha()))
print cafeMocha.getDescription() + ": $" + \
`cafeMocha.getTotalCost()`
#<hr>
output = '''
Cappuccino: $1.0
CafeMocha Decaf Whipped: $1.75
'''
|
tapomayukh/projects_in_python
|
sandbox_tapo/src/refs/TIPython/code/cX/decorator/compromise/CoffeeShop.py
|
Python
|
mit
| 1,746
|
[
"ESPResSo"
] |
28e8b5c75ac1a127eade8ba3ea5423b39dcd9b0b20117212390d220b83b0c5c8
|
# -*- coding: utf-8 -*-
"""Vendorized, partial version of scipy.constants which does not implement the
full codata formulations.
This was implemented to provide a consistent set of constants across scipy
versions; and to prevent the tests from failing when new CODATA formulations
come out.
"""
import math as _math
# mathematical constants
pi = _math.pi
pi_inv = 1.0/pi
golden = golden_ratio = 1.618033988749895
# SI prefixes
yotta = 1e24
zetta = 1e21
exa = 1e18
peta = 1e15
tera = 1e12
giga = 1e9
mega = 1e6
kilo = 1e3
hecto = 1e2
deka = 1e1
deci = 1e-1
centi = 1e-2
milli = 1e-3
micro = 1e-6
nano = 1e-9
pico = 1e-12
femto = 1e-15
atto = 1e-18
zepto = 1e-21
# binary prefixes
kibi = 2**10
mebi = 2**20
gibi = 2**30
tebi = 2**40
pebi = 2**50
exbi = 2**60
zebi = 2**70
yobi = 2**80
# physical constants
c = speed_of_light = 299792458.0
mu_0 = 4e-7*pi
epsilon_0 = 1.0 / (mu_0*c*c)
h = Planck = 6.62607004e-34
hbar = h / (2.0 * pi)
G = gravitational_constant = 6.67408e-11
g = 9.80665
g_sqrt = 3.1315571206669692#_math.sqrt(g)
e = elementary_charge = 1.6021766208e-19
alpha = fine_structure = 0.0072973525664
N_A = Avogadro = 6.022140857e+23
k = Boltzmann = 1.38064852e-23
sigma = Stefan_Boltzmann = 5.670367e-08
Wien = 0.0028977729
Rydberg = 10973731.568508
k = 1.380649e-23
N_A = 6.02214076e23
R = gas_constant = N_A*k # 8.31446261815324 exactly now, N_A*k
R_inv = 1.0/R
R2 = R*R
# mass in kg
gram = 1e-3
metric_ton = 1e3
grain = 64.79891e-6
lb = pound = 7000 * grain # avoirdupois
blob = slinch = pound * g / 0.0254 # lbf*s**2/in (added in 1.0.0)
slug = blob / 12 # lbf*s**2/foot (added in 1.0.0)
oz = ounce = pound / 16.0
stone = 14.0 * pound
long_ton = 2240.0 * pound
short_ton = 2000.0 * pound
troy_ounce = 480.0 * grain # only for metals / gems
troy_pound = 12.0 * troy_ounce
carat = 200e-6
m_e = electron_mass = 9.10938356e-31
m_p = proton_mass = 1.672621898e-27
m_n = neutron_mass = 1.674927471e-27
m_u = u = atomic_mass = 1.66053904e-27
# angle in rad
degree = pi / 180.0
arcmin = arcminute = degree / 60.0
arcsec = arcsecond = arcmin / 60.0
# time in second
minute = 60.0
hour = 60.0 * minute
hour_inv = 1.0/hour
day = 24.0 * hour
week = 7.0 * day
year = 365.0 * day
Julian_year = 365.25 * day
# length in meter
inch = 0.0254
inch_inv = 1.0/inch
foot = 12 * inch
foot_cubed = foot*foot*foot
foot_cubed_inv = 1.0/foot_cubed
yard = 3 * foot
mile = 1760 * yard
mil = 0.001*inch
pt = point = inch / 72 # typography
survey_foot = 1200.0 / 3937
survey_mile = 5280.0 * survey_foot
nautical_mile = 1852.0
fermi = 1e-15
angstrom = 1e-10
micron = 1e-6
au = astronomical_unit = 149597870691.0
light_year = Julian_year * c
parsec = au / arcsec
# pressure in pascal
atm = atmosphere = 101325.0
bar = 1e5
torr = mmHg = atm / 760
inchHg = mmHg*inch*1000
torr_inv = 1.0/torr
psi = pound * g / (inch * inch)
atm_inv = atmosphere_inv = 1.0/atm
torr_inv = mmHg_inv = 1.0/torr
psi_inv = 1.0/psi
# area in meter**2
hectare = 1e4
acre = 43560 * foot*foot
# volume in meter**3
litre = liter = 1e-3
gallon = gallon_US = 231.0 * inch*inch*inch # US
# pint = gallon_US / 8
fluid_ounce = fluid_ounce_US = gallon_US / 128
bbl = barrel = 42.0 * gallon_US # for oil
gallon_imp = 4.54609e-3 # UK
fluid_ounce_imp = gallon_imp / 160.0
# speed in meter per second
kmh = 1e3 / hour
mph = mile / hour
mach = speed_of_sound = 340.5 # approx value at 15 degrees in 1 atm. is this a common value?
knot = nautical_mile / hour
# temperature in kelvin
zero_Celsius = 273.15
degree_Fahrenheit = 1.0/1.8 # only for differences
# energy in joule
eV = electron_volt = elementary_charge # * 1 Volt
calorie = calorie_th = 4.184
calorie_IT = 4.1868
erg = 1e-7
Btu_th = pound * degree_Fahrenheit * calorie_th / gram
Btu = Btu_IT = pound * degree_Fahrenheit * calorie_IT / gram
ton_TNT = 1e9 * calorie_th
# Wh = watt_hour
# power in watt
hp = horsepower = 550.0 * foot * pound * g
# force in newton
dyn = dyne = 1e-5
lbf = pound_force = pound * g
kgf = kilogram_force = g # * 1 kg
deg2rad = 0.017453292519943295769 # Multiple an angle in degrees by this to get radians
rad2deg = 57.295779513082320877# Multiple an angle in radians by this to get degrees
root_two = 1.4142135623730951
|
CalebBell/fluids
|
fluids/constants/constants.py
|
Python
|
mit
| 4,181
|
[
"Avogadro"
] |
c6030e45da9ac1b7d7d77d2747516eb8ffa931513e7355413a76ec372cef7af1
|
"""
A setuptools based setup module. This file originally came from
https://github.com/pypa/sampleproject/blob/master/setup.py
See these pages for more details:
https://packaging.python.org/en/latest/distributing.html
https://github.com/pypa/sampleproject
To use this file to install this project as an editable package, change to this directory and run
pip install -e .
"""
# Always prefer setuptools over distutils
from setuptools import setup, find_packages
# To use a consistent encoding
from codecs import open
from os import path
# Get the long description from the README file
with open(path.join(path.abspath(path.dirname(__file__)), 'README.md'), encoding='utf-8') as f:
long_description = f.read()
# Arguments marked as "Required" below must be included for upload to PyPI.
# Fields marked as "Optional" may be commented out.
setup(
# This is the name of your project. The first time you publish this
# package, this name will be registered for you. It will determine how
# users can install this project, e.g.:
#
# $ pip install sampleproject
#
# And where it will live on PyPI: https://pypi.org/project/sampleproject/
#
# There are some restrictions on what makes a valid project name
# specification here:
# https://packaging.python.org/specifications/core-metadata/#name
name='repo_template', # Required
# Versions should comply with PEP 440:
# https://www.python.org/dev/peps/pep-0440/
#
# For a discussion on single-sourcing the version across setup.py and the
# project code, see
# https://packaging.python.org/en/latest/single_source_version.html
version='0.0.1', # Required
# This is a one-line description or tagline of what your project does. This
# corresponds to the "Summary" metadata field:
# https://packaging.python.org/specifications/core-metadata/#summary
description='A Python module', # Required
# This is an optional longer description of your project that represents
# the body of text which users will see when they visit PyPI.
#
# Often, this is the same as your README, so you can just read it in from
# that file directly (as we have already done above)
#
# This field corresponds to the "Description" metadata field:
# https://packaging.python.org/specifications/core-metadata/#description-optional
long_description=long_description, # Optional
# This should be a valid link to your project's main homepage.
#
# This field corresponds to the "Home-Page" metadata field:
# https://packaging.python.org/specifications/core-metadata/#home-page-optional
url='https://github.com/carlsapp/repo-template/', # Optional
# This should be your name or the name of the organization which owns the
# project.
author='Carl Sapp', # Optional
# This should be a valid email address corresponding to the author listed
# above.
author_email='CarlSapp@gmail.com', # Optional
# Classifiers help users find your project by categorizing it.
#
# For a list of valid classifiers, see
# https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[ # Optional
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 3 - Alpha',
# Indicate who your project is intended for
'Intended Audience :: Developers',
'Topic :: Software Development :: Build Tools',
# Pick your license as you wish
'License :: OSI Approved :: MIT License',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
# This field adds keywords for your project which will appear on the
# project page. What does your project relate to?
#
# Note that this is a string of words separated by whitespace, not a list.
keywords='repo repository template', # Optional
# You can just specify package directories manually here if your project is
# simple. Or you can use find_packages().
#
# Alternatively, if you just want to distribute a single Python file, use
# the `py_modules` argument instead as follows, which will expect a file
# called `my_module.py` to exist:
#
# py_modules=["my_module"],
#
packages=find_packages('src'), # Required
# This field lists other packages that your project depends on to run.
# Any package you put here will be installed by pip when your project is
# installed, so they must be valid existing projects.
#
# For an analysis of "install_requires" vs pip's requirements files see:
# https://packaging.python.org/en/latest/requirements.html
install_requires=[], # Optional
# List additional groups of dependencies here (e.g. development
# dependencies). Users will be able to install these using the "extras"
# syntax, for example:
#
# $ pip install sampleproject[dev]
#
# Similar to `install_requires` above, these must be valid existing
# projects.
extras_require={ # Optional
'dev': ['check-manifest'],
'test': ['coverage'],
},
# If there are data files included in your packages that need to be
# installed, specify them here.
#
# If using Python 2.6 or earlier, then these have to be included in
# MANIFEST.in as well.
package_data={ # Optional
'sample': ['package_data.dat'],
},
# Although 'package_data' is the preferred approach, in some case you may
# need to place data files outside of your packages. See:
# http://docs.python.org/3.4/distutils/setupscript.html#installing-additional-files
#
# In this case, 'data_file' will be installed into '<sys.prefix>/my_data'
data_files=[('my_data', ['data/data_file'])], # Optional
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and allow
# `pip` to create the appropriate form of executable for the target
# platform.
#
# For example, the following would provide a command called `sample` which
# executes the function `main` from this package when invoked:
entry_points={ # Optional
'console_scripts': [
'sample=sample:main',
],
},
)
|
carlsapp/RepoTemplate
|
Python/setup.py
|
Python
|
lgpl-3.0
| 6,761
|
[
"VisIt"
] |
08a7fe143881c59f8891c4c7688ba71e7c62c666ffc4abf252cee13b50cc5dd0
|
#######################################################################################
# Python-code: SAINT pre-processing from MaxQuant "Samples Report" output
# Author: Brent Kuenzi
#######################################################################################
# This program reads in a raw MaxQuant "Samples Report" output and a user generated
# bait file and autoformats it into prey and interaction files for SAINTexpress
# analysis
#######################################################################################
# Copyright (C) Brent Kuenzi.
# Permission is granted to copy, distribute and/or modify this document
# under the terms of the GNU Free Documentation License, Version 1.3
# or any later version published by the Free Software Foundation;
# with no Invariant Sections, no Front-Cover Texts, and no Back-Cover Texts.
# A copy of the license is included in the section entitled "GNU
# Free Documentation License".
#######################################################################################
## REQUIRED INPUT ##
# 1) infile: MaxQuant "Samples Report" output
# 2) baitfile: SAINT formatted bait file generated in Galaxy
# 3) fasta_db: fasta database for use (defaults to SwissProt_HUMAN_2014_08.fasta)
# 4) prey: Y or N for generating a prey file
# 5) make_bait: String of bait names, assignment, and test or control boolean
#######################################################################################
import sys
import os
mq_file = sys.argv[1]
ins_path = "/galaxy-apostl-docker/tools/Moffitt_Tools/"
names_path = str(ins_path) + r"uniprot_names.txt"
cmd = (r"Rscript "+ str(ins_path) +"pre_process_protein_name_set.R " + str(mq_file) +
" " + str(names_path))
os.system(cmd)
infile = "./tukeys_output.txt"
# The MaxQuant "Samples Report" output.
prey = sys.argv[2]
# Y or N boolean from Galaxy.
fasta_db = sys.argv[3]
if fasta_db == "None":
fasta_db = str(ins_path) + "SwissProt_HUMAN_2014_08.fasta"
make_bait = sys.argv[6]
bait_bool = sys.argv[9]
def bait_create(baits, infile):
# Takes the Bait specified by the user and makes them into a Bait file and includes a
# check to make sure they are using valid baits.
baits = make_bait.split()
i = 0
bait_file_tmp = open("bait.txt", "w")
order = []
bait_cache = []
while i < len(baits):
if baits[i+2] == "true":
T_C = "C"
else:
T_C = "T"
bait_line = baits[i] + "\t" + baits[i+1] + "\t" + T_C + "\n"
read_infile = open(infile, "r")
for input_line in read_infile :
input_line = input_line.replace("\"", "")
input_line = input_line.replace(r"Intensity.", "")
# R coerces "-" into "." changes them back and remove Intensity from the Bait names.
input_line = input_line.replace(r".", r"-")
temp = input_line.split()
if "mapped_protein" in str(temp):
if baits[i] in temp:
number_bait = temp.index(str(baits[i]))
number_bait = number_bait - 9
bait_cache.append((number_bait, str(bait_line)))
# Locates the Bait names in the column names and then sets the Baits in the
# correct order in the cache thus the - 9 because the baits start at the 9th
# column.
else:
print "Error: bad bait " + str(baits[i])
sys.exit()
else:
pass
i = i + 3
# Writes cache to Bait file.
bait_cache.sort()
for line in bait_cache:
bait_file_tmp.write(line[1])
bait_file_tmp.close()
if bait_bool == 'false':
bait_create(make_bait, infile)
baitfile = "bait.txt"
else:
bait_temp_file = open(sys.argv[10], 'r')
bait_cache = bait_temp_file.readlines()
bait_file_tmp = open("bait.txt", "wr")
for line in bait_cache:
bait_file_tmp.write(line)
bait_file_tmp.close()
baitfile = "bait.txt"
class ReturnValue1(object):
def __init__(self, sequence, gene):
self.seqlength = sequence
self.genename = gene
class ReturnValue2(object):
def __init__(self, getdata, getproteins, getheader):
self.data = getdata
self.proteins = getproteins
self.header = getheader
def main(MaxQuant_input, make_bait):
#bait_check(baitfile, MaxQuant_input)
make_inter(MaxQuant_input)
if prey == 'true':
make_prey(MaxQuant_input)
no_error_inter(MaxQuant_input)
os.rename('prey.txt', sys.argv[5])
elif prey == 'false':
if os.path.isfile('error proteins.txt') == True:
no_error_inter(MaxQuant_input)
pass
elif prey != 'true' or 'false':
sys.exit("Invalid Prey Argument: Y or N")
os.rename('inter.txt', sys.argv[4])
os.rename("bait.txt", sys.argv[7])
def get_info(uniprot_accession_in):
# Get aa lengths and gene name.
error = open('error proteins.txt', 'a+')
data = open(fasta_db, 'r')
data_lines = data.readlines()
db_len = len(data_lines)
seqlength = 0
count = 0
for data_line in data_lines:
if ">sp" in data_line:
if uniprot_accession_in == data_line.split("|")[1]:
match = count + 1
if 'GN=' in data_line:
lst = data_line.split('GN=')
lst2 = lst[1].split(' ')
genename = lst2[0]
if 'GN=' not in data_line:
genename = 'NA'
while ">sp" not in data_lines[match]:
if match <= db_len:
seqlength = seqlength + len(data_lines[match].strip())
match = match + 1
else:
break
return ReturnValue1(seqlength, genename)
count = count + 1
if seqlength == 0:
error.write(uniprot_accession_in + '\t' + "Uniprot not in Fasta" + '\n')
error.close
seqlength = 'NA'
genename = 'NA'
return ReturnValue1(seqlength, genename)
def readtab(infile):
with open(infile, 'r') as input_file:
# Read in tab-delim text file.
output = []
for input_line in input_file:
input_line = input_line.strip()
temp = input_line.split('\t')
output.append(temp)
return output
def read_MaxQuant(MaxQuant_input):
# Get data, proteins and header from MaxQuant output.
dupes = readtab(MaxQuant_input)
header_start = 0
header = dupes[header_start]
for var_MQ in header:
var_MQ = var_MQ.replace(r"\"", "")
var_MQ = var_MQ.replace(r"Intensity.", r"")
var_MQ = var_MQ.replace(r".", r"-")
data = dupes[header_start+1:len(dupes)]
# Cut off blank line and END OF FILE.
proteins = []
for protein in data:
proteins.append(protein[0])
return ReturnValue2(data, proteins, header)
def make_inter(MaxQuant_input):
bait = readtab(baitfile)
data = read_MaxQuant(MaxQuant_input).data
header = read_MaxQuant(MaxQuant_input).header
proteins = read_MaxQuant(MaxQuant_input).proteins
bait_index = []
for bait_item in bait:
bait_index.append(header.index("mapped_protein") + 1)
# Find just the baits defined in bait file.
with open('inter.txt', 'w') as y:
a = 0; l = 0
for bb in bait:
for lst in data:
y.write(header[bait_index[l]] + '\t' + bb[1] + '\t' + proteins[a] + '\t'
+ lst[bait_index[l]] + '\n')
a += 1
if a == len(proteins):
a = 0; l += 1
def make_prey(MaxQuant_input):
proteins = read_MaxQuant(MaxQuant_input).proteins
output_file = open("prey.txt", 'w')
for a in proteins:
a = a.replace("\n", "")
# Remove \n for input into function.
a = a.replace("\r", "")
# Ditto for \r.
seq = get_info(a).seqlength
GN = get_info(a).genename
if seq != 'NA':
output_file.write(a+"\t"+str(seq)+ "\t" + str(GN) + "\n")
output_file.close()
def no_error_inter(MaxQuant_input):
# Remake inter file without protein errors from Uniprot.
err = readtab("error proteins.txt")
bait = readtab(baitfile)
data = read_MaxQuant(MaxQuant_input).data
header = read_MaxQuant(MaxQuant_input).header
header = [MQ_var.replace(r"\"", "") for MQ_var in header]
header = [MQ_var.replace(r"Intensity.", r"") for MQ_var in header]
header = [MQ_var.replace(r".", r"-") for MQ_var in header]
bait_index = []
for bait_item in bait:
bait_index.append(header.index(bait_item[0]))
proteins = read_MaxQuant(MaxQuant_input).proteins
errors = []
for e in err:
errors.append(e[0])
with open('inter.txt', 'w') as input_file:
l = 0; a = 0
for bb in bait:
for lst in data:
if proteins[a] not in errors:
input_file.write(header[bait_index[l]] + '\t' + bb[1] + '\t' + proteins[a] + '\t'
+ lst[bait_index[l]] + '\n')
a += 1
if a == len(proteins):
l += 1; a = 0
def bait_check(bait, MaxQuant_input):
# Check that bait names share header titles.
bait_in = readtab(bait)
header = read_MaxQuant(MaxQuant_input).header
for bait in bait_in:
if bait[0] not in header:
sys.exit("Bait must share header titles with MaxQuant output")
if __name__ == '__main__':
main(infile, make_bait)
|
bornea/APOSTL
|
SAINT_preprocessing/SAINT_preprocessing_v6_mq_pep.py
|
Python
|
gpl-2.0
| 9,916
|
[
"Galaxy"
] |
c969135747f6893dec7e1253b31793e6410ae982a531a594243133ad4fcf6f1a
|
# -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*-
#
# Copyright (C) 2016 Canonical Ltd
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""This plugin is used for gulp.js, the streaming build system.
The plugin uses gulp to drive the build. It requires a gulpfile.js in
the root of the source.
This plugin uses the common plugin keywords as well as those for "sources".
For more information check the 'plugins' topic for the former and the
'sources' topic for the latter.
Additionally, this plugin uses the following plugin-specific keywords:
- gulp-tasks:
(list)
A list of gulp tasks to run.
- node-engine:
(string)
The version of nodejs to use for the build.
"""
import logging
import os
import shutil
import snapcraft
from snapcraft import sources
from snapcraft.plugins import nodejs
logger = logging.getLogger(__name__)
class GulpPlugin(snapcraft.BasePlugin):
@classmethod
def schema(cls):
schema = super().schema()
node_properties = nodejs.NodePlugin.schema()['properties']
schema['properties']['gulp-tasks'] = {
'type': 'array',
'minitems': 1,
'uniqueItems': True,
'items': {
'type': 'string'
},
'default': [],
}
schema['properties']['node-engine'] = node_properties['node-engine']
schema['required'].append('gulp-tasks')
# Inform Snapcraft of the properties associated with building. If these
# change in the YAML Snapcraft will consider the build step dirty.
schema['build-properties'].append('gulp-tasks')
# Inform Snapcraft of the properties associated with pulling. If these
# change in the YAML Snapcraft will consider the build step dirty.
schema['pull-properties'].append('node-engine')
return schema
def __init__(self, name, options, project):
super().__init__(name, options, project)
self._npm_dir = os.path.join(self.partdir, 'npm')
self._nodejs_tar = sources.Tar(nodejs.get_nodejs_release(
self.options.node_engine), self._npm_dir)
def pull(self):
super().pull()
os.makedirs(self._npm_dir, exist_ok=True)
self._nodejs_tar.download()
def clean_pull(self):
super().clean_pull()
# Remove the npm directory (if any)
if os.path.exists(self._npm_dir):
shutil.rmtree(self._npm_dir)
def build(self):
super().build()
self._nodejs_tar.provision(
self._npm_dir, clean_target=False, keep_tarball=True)
env = os.environ.copy()
env['PATH'] = '{}:{}'.format(
os.path.join(self._npm_dir, 'bin'), env['PATH'])
env['NPM_CONFIG_PREFIX'] = self._npm_dir
self.run(['npm', 'install', '-g', 'gulp-cli'], env=env)
if os.path.exists(os.path.join(self.builddir, 'package.json')):
self.run(['npm', 'install', '--only-development'], env=env)
self.run([
os.path.join(self._npm_dir, 'bin', 'gulp')] +
self.options.gulp_tasks, env=env)
|
stgraber/snapcraft
|
snapcraft/plugins/gulp.py
|
Python
|
gpl-3.0
| 3,643
|
[
"GULP"
] |
9830f04ceca4d8d645872ddd3c58e86f46e0f8689ef7c73c4e561f3321862657
|
#
# Copyright (c) 2015 nexB Inc. and others. All rights reserved.
# http://nexb.com and https://github.com/nexB/scancode-toolkit/
# The ScanCode software is licensed under the Apache License version 2.0.
# Data generated with ScanCode require an acknowledgment.
# ScanCode is a trademark of nexB Inc.
#
# You may not use this software except in compliance with the License.
# You may obtain a copy of the License at: http://apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
#
# When you publish or redistribute any data created with ScanCode or any ScanCode
# derivative work, you must accompany this data with the following acknowledgment:
#
# Generated with ScanCode and provided on an "AS IS" BASIS, WITHOUT WARRANTIES
# OR CONDITIONS OF ANY KIND, either express or implied. No content created from
# ScanCode should be considered or used as legal advice. Consult an Attorney
# for any legal advice.
# ScanCode is a free software code scanning tool from nexB Inc. and others.
# Visit https://github.com/nexB/scancode-toolkit/ for support and download.
from __future__ import absolute_import, print_function
import os
from collections import OrderedDict
from datetime import datetime
from commoncode.system import on_posix
from commoncode.functional import memoize
def is_link(location):
"""
Return True if `location` is a symbolic link.
"""
return location and os.path.islink(location)
def is_file(location):
"""
Return True if `location` is a file.
"""
return (location and os.path.isfile(location)
and not is_link(location) and not is_broken_link(location))
def is_dir(location):
"""
Return True if `location` is a directory.
"""
return (location and os.path.isdir(location) and not is_file(location)
and not is_link(location) and not is_broken_link(location))
def is_regular(location):
"""
Return True if `location` is regular. A regular location is a file or a
dir and not a special file or symlink.
"""
return location and (is_file(location) or is_dir(location))
def is_special(location):
"""
Return True if `location` is a special file . A special file is not a
regular file, i.e. anything such as a broken link, block file, fifo,
socket, character device or else.
"""
return not is_regular(location)
def is_broken_link(location):
"""
Return True if `location` is a broken link.
"""
# always false on windows, until Python supports junctions/links
if on_posix and is_link(location):
target = get_link_target(location)
target_loc = os.path.join(os.path.dirname(location), target)
return target and not os.path.exists(target_loc)
def get_link_target(location):
"""
Return the link target for `location` if this is a Link or an empty
string.
"""
target = ''
# always false on windows, until Python supports junctions/links
if on_posix and is_link(location):
try:
# return false on OSes not supporting links
target = os.readlink(location) # @UndefinedVariable
except UnicodeEncodeError: # @UnusedVariable
# location is unicode but readlink can fail in some cases
pass
return target
# Map of type checker function -> short type code
# The order of types check matters: link -> file -> directory -> special
TYPES = OrderedDict([(is_link, ('l', 'link',)),
(is_file, ('f', 'file',)),
(is_dir, ('d', 'directory',)),
(is_special, ('s', 'special',))])
def get_type(location, short=True):
"""
Return the type of the `location` or None if it does not exist.
Return the short form (single character) or long form if short=False
"""
if location:
for type_checker in TYPES:
tc = type_checker(location)
if tc:
short_form, long_form = TYPES[type_checker]
return short and short_form or long_form
def is_readable(location):
"""
Return True if the file at location has readable permission set.
Does not follow links.
"""
if location:
if is_dir(location):
return os.access(location, os.R_OK | os.X_OK)
else:
return os.access(location, os.R_OK)
def is_writable(location):
"""
Return True if the file at location has writeable permission set.
Does not follow links.
"""
if location:
if is_dir(location):
return os.access(location, os.R_OK | os.W_OK | os.X_OK)
else:
return os.access(location, os.R_OK | os.W_OK)
def is_executable(location):
"""
Return True if the file at location has executable permission set.
Does not follow links.
"""
if location:
if is_dir(location):
return os.access(location, os.R_OK | os.W_OK | os.X_OK)
else:
return os.access(location, os.X_OK)
def is_rwx(location):
"""
Return True if the file at location has read, write and executable
permission set. Does not follow links.
"""
return is_readable(location) and is_writable(location) and is_executable(location)
def get_last_modified_date(location):
"""
Return the last modified date stamp of a file is YYYYMMDD format. The date
of non-files (dir, links, special) is always an empty string.
"""
yyyymmdd = ''
if is_file(location):
utc_date = datetime.isoformat(
datetime.utcfromtimestamp(os.path.getmtime(location))
)
yyyymmdd = utc_date[:10]
return yyyymmdd
counting_functions = {
'file_count': lambda _: 1,
'file_size': os.path.getsize,
}
@memoize
def counter(location, counting_function):
"""
Return a count for a single file or a cumulative count for a directory
tree at `location`.
Get a callable from the counting_functions registry using the
`counting_function` string. Call this callable with a `location` argument
to determine the count value for a single file. This allow memoization
with hashable arguments.
Only regular files and directories have a count. The count for a directory
is the recursive count sum of the directory file and directory
descendants.
Any other file type such as a special file or link has a zero size. Does
not follow links.
"""
if not (is_file(location) or is_dir(location)):
return 0
count = 0
if is_file(location):
count_fun = counting_functions[counting_function]
return count_fun(location)
elif is_dir(location):
count += sum(counter(os.path.join(location, p), counting_function)
for p in os.listdir(location))
return count
def get_file_count(location):
"""
Return the cumulative number of files in the directory tree at `location`
or 1 if `location` is a file. Only regular files are counted. Everything
else has a zero size.
"""
return counter(location, 'file_count')
def get_size(location):
"""
Return the size in bytes of a file at `location` or if `location` is a
directory, the cumulative size of all files in this directory tree. Only
regular files have a size. Everything else has a zero size.
"""
return counter(location, 'file_size')
|
vinodpanicker/scancode-toolkit
|
src/commoncode/filetype.py
|
Python
|
apache-2.0
| 7,643
|
[
"VisIt"
] |
2c1fc5eede266e5bca6bc8632a1f6245a6be9f3feacb2c8cd0ecba2f5cb169d2
|
"""
Petrophysically guided inversion (PGI): Linear example
======================================================
We do a comparison between the classic Tikhonov inversion
and our formulation of a petrophysically constrained inversion.
We explore it through the UBC linear example.
"""
#####################
# Tikhonov Inversion#
#####################
import discretize as Mesh
from SimPEG import (
simulation,
maps,
data_misfit,
directives,
optimization,
regularization,
inverse_problem,
inversion,
utils,
)
import numpy as np
import matplotlib.pyplot as plt
# Random seed for reproductibility
np.random.seed(1)
# Mesh
N = 100
mesh = Mesh.TensorMesh([N])
# Survey design parameters
nk = 20
jk = np.linspace(1.0, 60.0, nk)
p = -0.25
q = 0.25
# Physics
def g(k):
return np.exp(p * jk[k] * mesh.cell_centers_x) * np.cos(
np.pi * q * jk[k] * mesh.cell_centers_x
)
G = np.empty((nk, mesh.nC))
for i in range(nk):
G[i, :] = g(i)
# True model
mtrue = np.zeros(mesh.nC)
mtrue[mesh.cell_centers_x > 0.2] = 1.0
mtrue[mesh.cell_centers_x > 0.35] = 0.0
t = (mesh.cell_centers_x - 0.65) / 0.25
indx = np.abs(t) < 1
mtrue[indx] = -(((1 - t ** 2.0) ** 2.0)[indx])
mtrue = np.zeros(mesh.nC)
mtrue[mesh.cell_centers_x > 0.3] = 1.0
mtrue[mesh.cell_centers_x > 0.45] = -0.5
mtrue[mesh.cell_centers_x > 0.6] = 0
# SimPEG problem and survey
prob = simulation.LinearSimulation(mesh, G=G, model_map=maps.IdentityMap())
std = 0.01
survey = prob.make_synthetic_data(mtrue, relative_error=std, add_noise=True)
# Setup the inverse problem
reg = regularization.Tikhonov(mesh, alpha_s=1.0, alpha_x=1.0)
dmis = data_misfit.L2DataMisfit(data=survey, simulation=prob)
opt = optimization.ProjectedGNCG(maxIter=10, maxIterCG=50, tolCG=1e-4)
invProb = inverse_problem.BaseInvProblem(dmis, reg, opt)
directiveslist = [
directives.BetaEstimate_ByEig(beta0_ratio=1e-5),
directives.BetaSchedule(coolingFactor=10.0, coolingRate=2),
directives.TargetMisfit(),
]
inv = inversion.BaseInversion(invProb, directiveList=directiveslist)
m0 = np.zeros_like(mtrue)
mnormal = inv.run(m0)
#########################################
# Petrophysically constrained inversion #
#########################################
# fit a Gaussian Mixture Model with n components
# on the true model to simulate the laboratory
# petrophysical measurements
n = 3
clf = utils.WeightedGaussianMixture(
mesh=mesh,
n_components=n,
covariance_type="full",
max_iter=100,
n_init=3,
reg_covar=5e-4,
)
clf.fit(mtrue.reshape(-1, 1))
# Petrophyically constrained regularization
reg = utils.make_PGI_regularization(
gmmref=clf,
mesh=mesh,
alpha_s=1.0,
alpha_x=1.0,
)
# Optimization
opt = optimization.ProjectedGNCG(maxIter=10, maxIterCG=50, tolCG=1e-4)
opt.remember("xc")
# Setup new inverse problem
invProb = inverse_problem.BaseInvProblem(dmis, reg, opt)
# directives
Alphas = directives.AlphasSmoothEstimate_ByEig(alpha0_ratio=10.0, verbose=True)
beta = directives.BetaEstimate_ByEig(beta0_ratio=1e-6)
betaIt = directives.PGI_BetaAlphaSchedule(
verbose=True,
coolingFactor=2.0,
warmingFactor=1.0,
tolerance=0.1,
update_rate=1,
progress=0.2,
)
targets = directives.MultiTargetMisfits(verbose=True)
petrodir = directives.PGI_UpdateParameters()
addmref = directives.PGI_AddMrefInSmooth(verbose=True)
# Setup Inversion
inv = inversion.BaseInversion(
invProb, directiveList=[Alphas, beta, petrodir, targets, addmref, betaIt]
)
# Initial model same as for Tikhonov
mcluster = inv.run(m0)
# Final Plot
fig, axes = plt.subplots(1, 3, figsize=(12 * 1.2, 4 * 1.2))
for i in range(prob.G.shape[0]):
axes[0].plot(prob.G[i, :])
axes[0].set_title("Columns of matrix G")
axes[1].hist(mtrue, bins=20, linewidth=3.0, density=True, color="k")
axes[1].set_xlabel("Model value")
axes[1].set_xlabel("Occurence")
axes[1].hist(mnormal, bins=20, density=True, color="b")
axes[1].hist(mcluster, bins=20, density=True, color="r")
axes[1].legend(["Mtrue Hist.", "L2 Model Hist.", "PGI Model Hist."])
axes[2].plot(mesh.cell_centers_x, mtrue, color="black", linewidth=3)
axes[2].plot(mesh.cell_centers_x, mnormal, color="blue")
axes[2].plot(mesh.cell_centers_x, mcluster, "r-")
axes[2].plot(mesh.cell_centers_x, invProb.reg.objfcts[0].mref, "r--")
axes[2].legend(("True Model", "L2 Model", "PGI Model", "Learned Mref"))
axes[2].set_ylim([-2, 2])
plt.show()
|
simpeg/simpeg
|
examples/10-pgi/plot_inv_0_PGI_Linear_1D.py
|
Python
|
mit
| 4,423
|
[
"Gaussian"
] |
e80dd7cbc3318b0b0f040d35269b0b74e14b0922d79454e957633779ad5862a4
|
import neuroelectro.models as m
__author__ = 'shreejoy'
def update_data_table_stat(data_table_object):
"""adds intermediate fields to data table stat object based on concept map objects associated
with data table"""
data_table_stat = m.DataTableStat.objects.get_or_create(data_table = data_table_object)[0]
# assign curating users by looking at history concepts assoc with table
robot_user = m.get_robot_user()
user_list = data_table_object.get_curating_users()
if robot_user in user_list:
user_list.remove(robot_user)
existing_users = data_table_stat.curating_users.all()
for u in user_list:
if u in existing_users:
continue
else:
data_table_stat.curating_users.add(u)
# assign last curated on by looking at curating users curation times and getting most recent
concept_maps = data_table_object.get_concept_maps()
if len(concept_maps) == 0:
return
curated_on_dates = []
for cm in concept_maps:
curated_on = cm.history.latest().history_date
curated_on_dates.append(curated_on)
curated_on = max(curated_on_dates)
# update last curated on if different
if data_table_stat.last_curated_on is not curated_on:
data_table_stat.last_curated_on = curated_on
# count number of unique ncms, ecms, nedms associated with table
data_table_stat.num_ecms = m.EphysProp.objects.filter(ephysconceptmap__source__data_table = data_table_object).distinct().count()
data_table_stat.num_ncms = m.Neuron.objects.filter(neuronconceptmap__source__data_table = data_table_object).distinct().count()
data_table_stat.num_nedms = m.NeuronEphysDataMap.objects.filter(source__data_table = data_table_object).distinct().count()
# define times validated here as min num of times validated per neuron concept map
concept_maps = data_table_object.get_neuron_concept_maps()
times_validated_per_neuron = []
for cm in concept_maps:
tv = cm.times_validated
times_validated_per_neuron.append(tv)
if len(times_validated_per_neuron) > 0:
data_table_stat.times_validated = int(min(times_validated_per_neuron))
data_table_stat.save()
return data_table_stat
|
neuroelectro/neuroelectro_org
|
db_functions/update_data_table_stats.py
|
Python
|
gpl-2.0
| 2,245
|
[
"NEURON"
] |
e830493beb130315d002a603b8c5c8a17684e96474d4e89ce3588866223b2197
|
# !usr/bin/env python2
# -*- coding: utf-8 -*-
#
# Licensed under a 3-clause BSD license.
#
# @Author: Brian Cherinka
# @Date: 2017-06-12 18:20:03
# @Last modified by: Brian Cherinka
# @Last Modified time: 2017-07-05 14:22:58
from __future__ import print_function, division, absolute_import
from marvin import config
from marvin.tools.cube import Cube
import pytest
import os
class TestMisc(object):
@pytest.mark.parametrize('mpl, drpver', [('MPL-4', 'v1_5_1')])
def test_custom_drpall(self, galaxy, mpl, drpver):
assert galaxy.drpall in config.drpall
cube = Cube(plateifu=galaxy.plateifu, release=mpl)
drpall = 'drpall-{0}.fits'.format(drpver)
assert cube._release == mpl
assert cube._drpver == drpver
assert os.path.exists(cube._drpall) is True
assert drpall in cube._drpall
assert galaxy.drpall in config.drpall
|
sdss/marvin
|
tests/misc/test_misc.py
|
Python
|
bsd-3-clause
| 895
|
[
"Brian",
"Galaxy"
] |
864b684f8cb1fe3eddd0b0f05726b588e419268a10618db0d6b2dfc231ababe8
|
import os
import unittest
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
import numpy as np
from spglib import (get_ir_reciprocal_mesh,
get_stabilized_reciprocal_mesh,
get_symmetry_dataset)
from vasp import read_vasp
data_dir = os.path.dirname(os.path.abspath(__file__))
result_ir_rec_mesh = (""" 0 0 0 0
1 1 0 0
2 2 0 0
1 -1 0 0
1 0 1 0
5 1 1 0
6 2 1 0
5 -1 1 0
2 0 2 0
6 1 2 0
10 2 2 0
6 -1 2 0
1 0 -1 0
5 1 -1 0
6 2 -1 0
5 -1 -1 0
1 0 0 1
5 1 0 1
6 2 0 1
5 -1 0 1
5 0 1 1
21 1 1 1
22 2 1 1
21 -1 1 1
6 0 2 1
22 1 2 1
26 2 2 1
22 -1 2 1
5 0 -1 1
21 1 -1 1
22 2 -1 1
21 -1 -1 1
2 0 0 2
6 1 0 2
10 2 0 2
6 -1 0 2
6 0 1 2
22 1 1 2
26 2 1 2
22 -1 1 2
10 0 2 2
26 1 2 2
42 2 2 2
26 -1 2 2
6 0 -1 2
22 1 -1 2
26 2 -1 2
22 -1 -1 2
1 0 0 -1
5 1 0 -1
6 2 0 -1
5 -1 0 -1
5 0 1 -1
21 1 1 -1
22 2 1 -1
21 -1 1 -1
6 0 2 -1
22 1 2 -1
26 2 2 -1
22 -1 2 -1
5 0 -1 -1
21 1 -1 -1
22 2 -1 -1
21 -1 -1 -1""", """ 0 0 0 0
1 1 0 0
2 2 0 0
1 -1 0 0
1 0 1 0
5 1 1 0
5 2 1 0
1 -1 1 0
2 0 2 0
5 1 2 0
2 2 2 0
5 -1 2 0
1 0 -1 0
1 1 -1 0
5 2 -1 0
5 -1 -1 0
16 0 0 1
17 1 0 1
18 2 0 1
17 -1 0 1
17 0 1 1
21 1 1 1
21 2 1 1
17 -1 1 1
18 0 2 1
21 1 2 1
18 2 2 1
21 -1 2 1
17 0 -1 1
17 1 -1 1
21 2 -1 1
21 -1 -1 1""")
result_ir_rec_mesh_distortion = (""" 0 0 0 0
1 1 0 0
1 -1 0 0
3 0 1 0
4 1 1 0
4 -1 1 0
6 0 2 0
7 1 2 0
7 -1 2 0
3 0 -1 0
4 1 -1 0
4 -1 -1 0
3 0 0 1
4 1 0 1
4 -1 0 1
15 0 1 1
16 1 1 1
16 -1 1 1
18 0 2 1
19 1 2 1
19 -1 2 1
15 0 -1 1
16 1 -1 1
16 -1 -1 1
6 0 0 2
7 1 0 2
7 -1 0 2
18 0 1 2
19 1 1 2
19 -1 1 2
30 0 2 2
31 1 2 2
31 -1 2 2
18 0 -1 2
19 1 -1 2
19 -1 -1 2
3 0 0 -1
4 1 0 -1
4 -1 0 -1
15 0 1 -1
16 1 1 -1
16 -1 1 -1
18 0 2 -1
19 1 2 -1
19 -1 2 -1
15 0 -1 -1
16 1 -1 -1
16 -1 -1 -1""", """ 0 0 0 0
1 1 0 0
1 -1 0 0
3 0 1 0
4 1 1 0
5 -1 1 0
6 0 2 0
7 1 2 0
8 -1 2 0
6 0 -2 0
8 1 -2 0
7 -1 -2 0
3 0 -1 0
5 1 -1 0
4 -1 -1 0""", """ 0 0 0 0
1 1 0 0
1 -1 0 0
3 0 1 0
4 1 1 0
4 -1 1 0
3 0 2 0
4 1 2 0
4 -1 2 0
0 0 -1 0
1 1 -1 0
1 -1 -1 0
12 0 0 1
13 1 0 1
13 -1 0 1
15 0 1 1
16 1 1 1
16 -1 1 1
15 0 2 1
16 1 2 1
16 -1 2 1
12 0 -1 1
13 1 -1 1
13 -1 -1 1
24 0 0 2
25 1 0 2
25 -1 0 2
27 0 1 2
28 1 1 2
28 -1 1 2
27 0 2 2
28 1 2 2
28 -1 2 2
24 0 -1 2
25 1 -1 2
25 -1 -1 2
12 0 0 -1
13 1 0 -1
13 -1 0 -1
15 0 1 -1
16 1 1 -1
16 -1 1 -1
15 0 2 -1
16 1 2 -1
16 -1 2 -1
12 0 -1 -1
13 1 -1 -1
13 -1 -1 -1""", """ 0 0 0 0
1 1 0 0
2 -1 0 0
3 0 1 0
4 1 1 0
5 -1 1 0
6 0 2 0
7 1 2 0
7 -1 2 0
3 0 -2 0
5 1 -2 0
4 -1 -2 0
0 0 -1 0
2 1 -1 0
1 -1 -1 0""")
class TestReciprocalMesh(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_get_ir_reciprocal_mesh(self):
file_and_mesh = (
[os.path.join(data_dir, "data", "cubic", "POSCAR-217"), [4, 4, 4]],
[os.path.join(data_dir, "data", "hexagonal", "POSCAR-182"),
[4, 4, 2]])
i = 0
for fname, mesh in file_and_mesh:
cell = read_vasp(fname)
ir_rec_mesh = get_ir_reciprocal_mesh(mesh, cell)
(mapping_table, grid_address) = ir_rec_mesh
# for gp, ga in zip(mapping_table, grid_address):
# print("%4d %3d %3d %3d" % (gp, ga[0], ga[1], ga[2]))
# print("")
data = np.loadtxt(StringIO(result_ir_rec_mesh[i]), dtype='intc')
np.testing.assert_equal(data[:, 0], mapping_table)
np.testing.assert_equal(data[:, 1:4], grid_address)
i += 1
def test_get_stabilized_reciprocal_mesh(self):
file_and_mesh = (
[os.path.join(data_dir, "data", "cubic", "POSCAR-217"), [4, 4, 4]],
[os.path.join(data_dir, "data", "hexagonal", "POSCAR-182"),
[4, 4, 2]])
i = 0
for fname, mesh in file_and_mesh:
cell = read_vasp(fname)
rotations = get_symmetry_dataset(cell)['rotations']
ir_rec_mesh = get_stabilized_reciprocal_mesh(mesh, rotations)
(mapping_table, grid_address) = ir_rec_mesh
data = np.loadtxt(StringIO(result_ir_rec_mesh[i]), dtype='intc')
np.testing.assert_equal(data[:, 0], mapping_table)
np.testing.assert_equal(data[:, 1:4], grid_address)
i += 1
def test_get_ir_reciprocal_mesh_distortion(self):
file_and_mesh = (
[os.path.join(data_dir, "data", "cubic", "POSCAR-217"), [3, 4, 4]],
[os.path.join(data_dir, "data", "hexagonal", "POSCAR-182"),
[3, 5, 1]])
i = 0
for is_shift in ([0, 0, 0], [0, 1, 0]):
for fname, mesh in file_and_mesh:
cell = read_vasp(fname)
ir_rec_mesh = get_ir_reciprocal_mesh(mesh, cell,
is_shift=is_shift)
(mapping_table, grid_address) = ir_rec_mesh
# for gp, ga in zip(mapping_table, grid_address):
# print("%4d %3d %3d %3d" % (gp, ga[0], ga[1], ga[2]))
# print("")
data = np.loadtxt(StringIO(result_ir_rec_mesh_distortion[i]),
dtype='intc')
np.testing.assert_equal(data[:, 0], mapping_table)
np.testing.assert_equal(data[:, 1:4], grid_address)
i += 1
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(TestReciprocalMesh)
unittest.TextTestRunner(verbosity=2).run(suite)
# unittest.main()
|
jochym/spglib
|
python/test/test_reciprocal_mesh.py
|
Python
|
bsd-3-clause
| 7,402
|
[
"VASP"
] |
39a2198e40d40e152b2ef42536c083420a550ee901cfccecd7173c17b674dd7f
|
from __future__ import (absolute_import, division, print_function)
import os
import sys
import netCDF4
import ruamel.yaml as yaml
from six import raise_from
from docopt import docopt
__all__ = [
'main',
'build'
]
__doc__ = """
Generate ncml based on a yaml file.
Usage:
yaml2ncml INFILE [--output=OUTFILE]
yaml2ncml (-h | --help | --version)
Examples:
yaml2ncml roms.yaml
yaml2ncml roms.yaml --output=roms.ncml
Arguments:
file yaml file.
Options:
-h --help Show this screen.
-v --version Show version.
"""
def str_att(name, value):
if isinstance(value, list):
try:
value = ','.join(value)
except TypeError as e:
raise_from(ValueError('Expected `str` got {!r}'.format(value)), e)
msg = ' <attribute name="{:s}" type="String" value="{:s}"/>\n'
return msg.format(name, value)
def header():
text = '<?xml version="1.0" encoding="UTF-8"?>\n<netcdf xmlns='
text += '"http://www.unidata.ucar.edu/namespaces/netcdf/ncml-2.2">\n'
text += str_att('Conventions', 'CF-1.6, SGRID-0.1, ACDD-1.3')
text += str_att('cdm_data_type', 'Grid')
return text
def footer(text):
text += '</netcdf>\n'
return text
def add_global_atts(text, a):
d = a['dataset']
for key, value in d.items():
# Handle simple attribute pairs first.
if key in ['id', 'license', 'summary', 'title', 'project',
'naming_authority', 'references', 'acknowledgments']:
text += str_att(key, value)
elif key in ['creator', 'publisher']:
email = value.get("email", None)
if email:
text += str_att('_'.join([key, 'email']), email)
url = value.get("url", None)
if url:
text += str_att('_'.join([key, 'url']), url)
name = value.get("name", None)
if name:
text += str_att('_'.join([key, 'name']), name)
elif key in ['contributor']:
role = value.get("role", None)
text += str_att('_'.join([key, 'role']), role)
email = value.get("email", None)
if email:
text += str_att('_'.join([key, 'email']), email)
url = value.get("url", None)
if url:
text += str_att('_'.join([key, 'url']), url)
name = value.get("name", None)
if name:
text += str_att('_'.join([key, 'name']), name)
return text
def add_bed_coord(text, a):
ncfile = os.path.join(a['aggregation']['dir'],
a['aggregation']['sample_file'])
nc = netCDF4.Dataset(ncfile)
bed_coord_var = """<variable name="Nbed" shape="Nbed" type="double">
<attribute name="long_name" value="pseudo coordinate at seabed points"/>
<attribute name="standard_name" value="ocean_sigma_coordinate"/>
<attribute name="positive" value="up"/>
<attribute name="formula_terms" value="sigma: Nbed eta: zeta depth: h"/>
<values start="-1.0" increment="-0.01"/>
</variable>\n """
if 'Nbed' in nc.dimensions.keys():
text += bed_coord_var
return text
def add_var_atts(text, a):
ncfile = os.path.join(a['aggregation']['dir'],
a['aggregation']['sample_file'])
nc = netCDF4.Dataset(ncfile)
ncv = nc.variables
# Get a list of all variables more than 1D.
vars = [var for var, vart in ncv.items() if vart.ndim > 1]
# identify all the rho, u and v vars
rho_vars = [var for var in vars if 'eta_rho' in
ncv[var].dimensions and 'xi_rho' in ncv[var].dimensions]
u_vars = [var for var in vars if 'eta_u' in
ncv[var].dimensions and 'xi_u' in ncv[var].dimensions]
v_vars = [var for var in vars if 'eta_v' in
ncv[var].dimensions and 'xi_v' in ncv[var].dimensions]
vars_all = set(vars)
vars_include = set(a['variables']['include'])
vars_exclude = set(a['variables']['exclude'])
# include/exclude only variables that actually occur in variable list
vars_include = vars_all.intersection(vars_include)
vars_exclude = vars_all.intersection(vars_exclude)
# If there are variables excluded, exclude them and keep all rest.
# If no variables are excluded, take just the included variables
# If no variables are included or excluded, take all variables (leave
# list of variables unchanged)
if vars_exclude:
vars_display = vars_all - vars_all.intersection(vars_exclude)
else:
if vars_include:
vars_display = vars_all.intersection(vars_include)
else:
vars_display = vars_all
# remove some variables we never want (if they exist)
Tobc = set(['Tobc_in', 'Tobc_out'])
vars_display = vars_display - vars_display.intersection(Tobc)
vars_display = list(vars_display)
# add the variable attributes: S-grid stuff, display=T|F, ...
for var in vars:
text += '<variable name="{:s}">\n'.format(var)
try:
text += str_att('standard_name', cf[var])
except:
pass
text += str_att('grid', 'grid')
if 'Nbed' in ncv[var].dimensions:
text += str_att('coordinates', ncv[var].coordinates+' Nbed')
if var in vars_display:
text += str_att('display', 'True')
else:
text += str_att('display', 'False')
text += str_att('coverage_content_type', 'modelResult')
if var in rho_vars:
text += str_att('location', 'face')
elif var in u_vars:
text += str_att('location', 'edge1')
elif var in v_vars:
text += str_att('location', 'edge2')
text += '</variable>\n\n'
# write standard_name for time coordinate variable
var = 'ocean_time'
if var in ncv.keys():
try:
text += '\n<variable name="{:s}">\n'.format(var)
text += str_att('standard_name', cf[var])
text += '</variable>\n\n'
except:
pass
nc.close()
return text
def write_grid_var(text):
grid_var = """<variable name="grid" type="int">
<attribute name="cf_role" value="grid_topology"/>
<attribute name="topology_dimension" type="int" value="2"/>
<attribute name="node_dimensions" value="xi_psi eta_psi"/>
<attribute name="face_dimensions"
value="xi_rho: xi_psi (padding: both) eta_rho: eta_psi (padding: both)"/>
<attribute name="edge1_dimensions" value="xi_u: xi_psi eta_u: eta_psi (padding: both)"/>
<attribute name="edge2_dimensions" value="xi_v: xi_psi (padding: both) eta_v: eta_psi"/>
<attribute name="node_coordinates" value="lon_psi lat_psi"/>
<attribute name="face_coordinates" value="lon_rho lat_rho"/>
<attribute name="edge1_coordinates" value="lon_u lat_u"/>
<attribute name="edge2_coordinates" value="lon_v lat_v"/>
<attribute name="vertical_dimensions" value="s_rho: s_w (padding: none)"/>
</variable>\n""" # noqa
text += grid_var
return text
def add_aggregation_scan(text, a):
agg = a['aggregation']
text += '<aggregation dimName="{:s}" type="joinExisting">\n'.format(
agg['time_var'])
text += '<scan location="{:s}" regExp="{:s}" subdirs="false"/>\n</aggregation>\n'\
.format(agg['dir'], agg['pattern']) # noqa
return text
# Map ROMS variables to CF standard_names.
cf = dict(ocean_time='time',
zeta='sea_surface_height_above_geopotential_datum',
temp='sea_water_potential_temperature',
salt='sea_water_salinity',
u='sea_water_x_velocity',
v='sea_water_y_velocity',
ubar='barotropic_sea_water_x_velocity',
vbar='barotropic_sea_water_y_velocity',
Hwave='sea_surface_wave_significant_height',
bed_thickness='sediment_bed_thickness') #sediment_bed_thickness not in CF standard_names
def build(yml):
text = header()
text = add_global_atts(text, yml)
text = add_var_atts(text, yml)
text = write_grid_var(text)
text = add_bed_coord(text, yml)
text = add_aggregation_scan(text, yml)
text = footer(text)
return text
def main():
args = docopt(__doc__, version='0.6.0')
fname = args.get('INFILE')
fout = args.get('--output', None)
with open(fname, 'r') as stream:
yml = yaml.load(stream, Loader=yaml.RoundTripLoader)
text = build(yml)
if fout:
with open(fout, 'w') as f:
f.write("{:s}".format(text))
else:
sys.stdout.write(text)
|
ocefpaf/yaml2ncml
|
yaml2ncml/yaml2ncml.py
|
Python
|
mit
| 8,605
|
[
"NetCDF"
] |
06a61d9b087b2ff67419f923182aeb141c803e59d60d5d859bea1628645ab587
|
"""
Setup Pylp.
Copyright (C) 2017 The Pylp Authors.
This file is under the MIT License.
"""
from setuptools import setup, find_packages
from pylp import __version__ as version
setup(
name = "pylp",
version = version,
author = "Guillaume Gonnet",
author_email = "gonnet.guillaume97@gmail.com",
description = "A Python task runner inspired by gulp.js",
long_description = open("README.rst").read(),
license = "MIT",
keywords = "pylp build task runner gulp",
url = "https://github.com/pylp/pylp",
packages = find_packages(),
python_requires = ">=3.5",
entry_points = {
"console_scripts" : ["pylp = pylp.cli.cli:launch_cli",]
},
classifiers = [
"Development Status :: 4 - Beta",
"Topic :: Utilities",
"Topic :: Software Development :: Build Tools",
"Framework :: AsyncIO",
"Programming Language :: Python :: 3 :: Only",
"Programming Language :: Python :: 3.5",
"Intended Audience :: Developers",
"Natural Language :: English",
"License :: OSI Approved :: MIT License",
],
)
|
pylp/pylp
|
setup.py
|
Python
|
mit
| 1,014
|
[
"GULP"
] |
8b605b7360f6766975fe59f0eb6e304b46f61b0b5087d2fd6082b4fa5ef887e2
|
#
# @file TestSBase.py
# @brief SBase unit tests
#
# @author Akiya Jouraku (Python conversion)
# @author Ben Bornstein
#
# $Id$
# $HeadURL$
#
# ====== WARNING ===== WARNING ===== WARNING ===== WARNING ===== WARNING ======
#
# DO NOT EDIT THIS FILE.
#
# This file was generated automatically by converting the file located at
# src/sbml/test/TestSBase.cpp
# using the conversion program dev/utilities/translateTests/translateTests.pl.
# Any changes made here will be lost the next time the file is regenerated.
#
# -----------------------------------------------------------------------------
# This file is part of libSBML. Please visit http://sbml.org for more
# information about SBML, and the latest version of libSBML.
#
# Copyright 2005-2010 California Institute of Technology.
# Copyright 2002-2005 California Institute of Technology and
# Japan Science and Technology Corporation.
#
# This library is free software; you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation. A copy of the license agreement is provided
# in the file named "LICENSE.txt" included with this software distribution
# and also available online as http://sbml.org/software/libsbml/license.html
# -----------------------------------------------------------------------------
import sys
import unittest
import libsbml
def wrapString(s):
return s
pass
class TestSBase(unittest.TestCase):
global S
S = None
def setUp(self):
self.S = libsbml.Model(2,4)
if (self.S == None):
pass
pass
def tearDown(self):
self.S = None
pass
def test_SBase_CVTerms(self):
cv = libsbml.CVTerm(libsbml.BIOLOGICAL_QUALIFIER)
cv.setBiologicalQualifierType(libsbml.BQB_IS)
cv.addResource( "foo")
self.assert_( self.S.getNumCVTerms() == 0 )
self.assert_( self.S.getCVTerms() == None )
self.S.setMetaId( "_id")
self.S.addCVTerm(cv)
self.assert_( self.S.getNumCVTerms() == 1 )
self.assert_( self.S.getCVTerms() != None )
self.assert_( self.S.getCVTerm(0) != cv )
_dummyList = [ cv ]; _dummyList[:] = []; del _dummyList
pass
def test_SBase_addCVTerms(self):
cv = libsbml.CVTerm(libsbml.BIOLOGICAL_QUALIFIER)
cv.setBiologicalQualifierType(libsbml.BQB_ENCODES)
cv.addResource( "foo")
self.S.setMetaId( "sbase1")
self.S.addCVTerm(cv)
self.assert_( self.S.getNumCVTerms() == 1 )
self.assert_( self.S.getCVTerms() != None )
res = self.S.getCVTerm(0).getResources()
self.assert_(( "foo" == res.getValue(0) ))
cv1 = libsbml.CVTerm(libsbml.BIOLOGICAL_QUALIFIER)
cv1.setBiologicalQualifierType(libsbml.BQB_IS)
cv1.addResource( "bar")
self.S.addCVTerm(cv1)
self.assert_( self.S.getNumCVTerms() == 2 )
cv2 = libsbml.CVTerm(libsbml.BIOLOGICAL_QUALIFIER)
cv2.setBiologicalQualifierType(libsbml.BQB_IS)
cv2.addResource( "bar1")
self.S.addCVTerm(cv2)
self.assert_( self.S.getNumCVTerms() == 2 )
res = self.S.getCVTerm(1).getResources()
self.assert_( res.getLength() == 2 )
self.assert_(( "bar" == res.getValue(0) ))
self.assert_(( "bar1" == res.getValue(1) ))
cv4 = libsbml.CVTerm(libsbml.BIOLOGICAL_QUALIFIER)
cv4.setBiologicalQualifierType(libsbml.BQB_IS)
cv4.addResource( "bar1")
self.S.addCVTerm(cv4)
self.assert_( self.S.getNumCVTerms() == 2 )
res = self.S.getCVTerm(1).getResources()
self.assert_( res.getLength() == 2 )
self.assert_(( "bar" == res.getValue(0) ))
self.assert_(( "bar1" == res.getValue(1) ))
cv5 = libsbml.CVTerm(libsbml.BIOLOGICAL_QUALIFIER)
cv5.setBiologicalQualifierType(libsbml.BQB_HAS_PART)
cv5.addResource( "bar1")
self.S.addCVTerm(cv5)
self.assert_( self.S.getNumCVTerms() == 2 )
res = self.S.getCVTerm(1).getResources()
self.assert_( res.getLength() == 2 )
self.assert_(( "bar" == res.getValue(0) ))
self.assert_(( "bar1" == res.getValue(1) ))
_dummyList = [ cv ]; _dummyList[:] = []; del _dummyList
_dummyList = [ cv2 ]; _dummyList[:] = []; del _dummyList
_dummyList = [ cv1 ]; _dummyList[:] = []; del _dummyList
_dummyList = [ cv4 ]; _dummyList[:] = []; del _dummyList
pass
def test_SBase_appendNotes(self):
triple = libsbml.XMLTriple("p", "", "")
att = libsbml.XMLAttributes()
ns = libsbml.XMLNamespaces()
ns.add( "http://www.w3.org/1999/xhtml", "")
token4 = libsbml.XMLToken("This is my text")
node4 = libsbml.XMLNode(token4)
token5 = libsbml.XMLToken("This is additional text")
node5 = libsbml.XMLNode(token5)
token = libsbml.XMLToken(triple,att,ns)
node = libsbml.XMLNode(token)
node.addChild(node4)
self.S.setNotes(node)
self.assert_( self.S.isSetNotes() == True )
token1 = libsbml.XMLToken(triple,att,ns)
node1 = libsbml.XMLNode(token1)
node1.addChild(node5)
self.S.appendNotes(node1)
self.assert_( self.S.isSetNotes() == True )
node2 = self.S.getNotes()
self.assert_( node2.getNumChildren() == 2 )
self.assert_(( "p" == node2.getChild(0).getName() ))
self.assert_( node2.getChild(0).getNumChildren() == 1 )
self.assert_(( "p" == node2.getChild(1).getName() ))
self.assert_( node2.getChild(1).getNumChildren() == 1 )
chars1 = node2.getChild(0).getChild(0).getCharacters()
chars2 = node2.getChild(1).getChild(0).getCharacters()
self.assert_(( "This is my text" == chars1 ))
self.assert_(( "This is additional text" == chars2 ))
_dummyList = [ node ]; _dummyList[:] = []; del _dummyList
_dummyList = [ node1 ]; _dummyList[:] = []; del _dummyList
pass
def test_SBase_appendNotes1(self):
att = libsbml.XMLAttributes()
ns = libsbml.XMLNamespaces()
ns.add( "http://www.w3.org/1999/xhtml", "")
html_triple = libsbml.XMLTriple("html", "", "")
head_triple = libsbml.XMLTriple("head", "", "")
title_triple = libsbml.XMLTriple("title", "", "")
body_triple = libsbml.XMLTriple("body", "", "")
p_triple = libsbml.XMLTriple("p", "", "")
html_token = libsbml.XMLToken(html_triple,att,ns)
head_token = libsbml.XMLToken(head_triple,att)
title_token = libsbml.XMLToken(title_triple,att)
body_token = libsbml.XMLToken(body_triple,att)
p_token = libsbml.XMLToken(p_triple,att)
text_token = libsbml.XMLToken("This is my text")
html_node = libsbml.XMLNode(html_token)
head_node = libsbml.XMLNode(head_token)
title_node = libsbml.XMLNode(title_token)
body_node = libsbml.XMLNode(body_token)
p_node = libsbml.XMLNode(p_token)
text_node = libsbml.XMLNode(text_token)
text_token1 = libsbml.XMLToken("This is more text")
html_node1 = libsbml.XMLNode(html_token)
head_node1 = libsbml.XMLNode(head_token)
title_node1 = libsbml.XMLNode(title_token)
body_node1 = libsbml.XMLNode(body_token)
p_node1 = libsbml.XMLNode(p_token)
text_node1 = libsbml.XMLNode(text_token1)
p_node.addChild(text_node)
body_node.addChild(p_node)
head_node.addChild(title_node)
html_node.addChild(head_node)
html_node.addChild(body_node)
p_node1.addChild(text_node1)
body_node1.addChild(p_node1)
head_node1.addChild(title_node1)
html_node1.addChild(head_node1)
html_node1.addChild(body_node1)
self.S.setNotes(html_node)
self.S.appendNotes(html_node1)
notes = self.S.getNotes()
self.assert_(( "notes" == notes.getName() ))
self.assert_( notes.getNumChildren() == 1 )
child = notes.getChild(0)
self.assert_(( "html" == child.getName() ))
self.assert_( child.getNumChildren() == 2 )
child = child.getChild(1)
self.assert_(( "body" == child.getName() ))
self.assert_( child.getNumChildren() == 2 )
child1 = child.getChild(0)
self.assert_(( "p" == child1.getName() ))
self.assert_( child1.getNumChildren() == 1 )
child1 = child1.getChild(0)
self.assert_(( "This is my text" == child1.getCharacters() ))
self.assert_( child1.getNumChildren() == 0 )
child1 = child.getChild(1)
self.assert_(( "p" == child1.getName() ))
self.assert_( child1.getNumChildren() == 1 )
child1 = child1.getChild(0)
self.assert_(( "This is more text" == child1.getCharacters() ))
self.assert_( child1.getNumChildren() == 0 )
_dummyList = [ att ]; _dummyList[:] = []; del _dummyList
_dummyList = [ ns ]; _dummyList[:] = []; del _dummyList
_dummyList = [ html_triple ]; _dummyList[:] = []; del _dummyList
_dummyList = [ head_triple ]; _dummyList[:] = []; del _dummyList
_dummyList = [ body_triple ]; _dummyList[:] = []; del _dummyList
_dummyList = [ p_triple ]; _dummyList[:] = []; del _dummyList
_dummyList = [ html_token ]; _dummyList[:] = []; del _dummyList
_dummyList = [ head_token ]; _dummyList[:] = []; del _dummyList
_dummyList = [ body_token ]; _dummyList[:] = []; del _dummyList
_dummyList = [ p_token ]; _dummyList[:] = []; del _dummyList
_dummyList = [ text_token ]; _dummyList[:] = []; del _dummyList
_dummyList = [ text_token1 ]; _dummyList[:] = []; del _dummyList
_dummyList = [ html_node ]; _dummyList[:] = []; del _dummyList
_dummyList = [ head_node ]; _dummyList[:] = []; del _dummyList
_dummyList = [ body_node ]; _dummyList[:] = []; del _dummyList
_dummyList = [ p_node ]; _dummyList[:] = []; del _dummyList
_dummyList = [ text_node ]; _dummyList[:] = []; del _dummyList
_dummyList = [ html_node1 ]; _dummyList[:] = []; del _dummyList
_dummyList = [ head_node1 ]; _dummyList[:] = []; del _dummyList
_dummyList = [ body_node1 ]; _dummyList[:] = []; del _dummyList
_dummyList = [ p_node1 ]; _dummyList[:] = []; del _dummyList
_dummyList = [ text_node1 ]; _dummyList[:] = []; del _dummyList
pass
def test_SBase_appendNotes2(self):
att = libsbml.XMLAttributes()
ns = libsbml.XMLNamespaces()
ns.add( "http://www.w3.org/1999/xhtml", "")
html_triple = libsbml.XMLTriple("html", "", "")
head_triple = libsbml.XMLTriple("head", "", "")
title_triple = libsbml.XMLTriple("title", "", "")
body_triple = libsbml.XMLTriple("body", "", "")
p_triple = libsbml.XMLTriple("p", "", "")
html_token = libsbml.XMLToken(html_triple,att,ns)
head_token = libsbml.XMLToken(head_triple,att)
title_token = libsbml.XMLToken(title_triple,att)
body_token = libsbml.XMLToken(body_triple,att)
p_token = libsbml.XMLToken(p_triple,att)
text_token = libsbml.XMLToken("This is my text")
html_node = libsbml.XMLNode(html_token)
head_node = libsbml.XMLNode(head_token)
title_node = libsbml.XMLNode(title_token)
body_node = libsbml.XMLNode(body_token)
p_node = libsbml.XMLNode(p_token)
text_node = libsbml.XMLNode(text_token)
body_token1 = libsbml.XMLToken(body_triple,att,ns)
text_token1 = libsbml.XMLToken("This is more text")
body_node1 = libsbml.XMLNode(body_token1)
p_node1 = libsbml.XMLNode(p_token)
text_node1 = libsbml.XMLNode(text_token1)
p_node.addChild(text_node)
body_node.addChild(p_node)
head_node.addChild(title_node)
html_node.addChild(head_node)
html_node.addChild(body_node)
p_node1.addChild(text_node1)
body_node1.addChild(p_node1)
self.S.setNotes(html_node)
self.S.appendNotes(body_node1)
notes = self.S.getNotes()
self.assert_(( "notes" == notes.getName() ))
self.assert_( notes.getNumChildren() == 1 )
child = notes.getChild(0)
self.assert_(( "html" == child.getName() ))
self.assert_( child.getNumChildren() == 2 )
child = child.getChild(1)
self.assert_(( "body" == child.getName() ))
self.assert_( child.getNumChildren() == 2 )
child1 = child.getChild(0)
self.assert_(( "p" == child1.getName() ))
self.assert_( child1.getNumChildren() == 1 )
child1 = child1.getChild(0)
self.assert_(( "This is my text" == child1.getCharacters() ))
self.assert_( child1.getNumChildren() == 0 )
child1 = child.getChild(1)
self.assert_(( "p" == child1.getName() ))
self.assert_( child1.getNumChildren() == 1 )
child1 = child1.getChild(0)
self.assert_(( "This is more text" == child1.getCharacters() ))
self.assert_( child1.getNumChildren() == 0 )
_dummyList = [ att ]; _dummyList[:] = []; del _dummyList
_dummyList = [ ns ]; _dummyList[:] = []; del _dummyList
_dummyList = [ html_triple ]; _dummyList[:] = []; del _dummyList
_dummyList = [ head_triple ]; _dummyList[:] = []; del _dummyList
_dummyList = [ body_triple ]; _dummyList[:] = []; del _dummyList
_dummyList = [ p_triple ]; _dummyList[:] = []; del _dummyList
_dummyList = [ html_token ]; _dummyList[:] = []; del _dummyList
_dummyList = [ head_token ]; _dummyList[:] = []; del _dummyList
_dummyList = [ body_token ]; _dummyList[:] = []; del _dummyList
_dummyList = [ p_token ]; _dummyList[:] = []; del _dummyList
_dummyList = [ text_token ]; _dummyList[:] = []; del _dummyList
_dummyList = [ text_token1 ]; _dummyList[:] = []; del _dummyList
_dummyList = [ body_token1 ]; _dummyList[:] = []; del _dummyList
_dummyList = [ html_node ]; _dummyList[:] = []; del _dummyList
_dummyList = [ head_node ]; _dummyList[:] = []; del _dummyList
_dummyList = [ body_node ]; _dummyList[:] = []; del _dummyList
_dummyList = [ p_node ]; _dummyList[:] = []; del _dummyList
_dummyList = [ text_node ]; _dummyList[:] = []; del _dummyList
_dummyList = [ body_node1 ]; _dummyList[:] = []; del _dummyList
_dummyList = [ p_node1 ]; _dummyList[:] = []; del _dummyList
_dummyList = [ text_node1 ]; _dummyList[:] = []; del _dummyList
pass
def test_SBase_appendNotes3(self):
att = libsbml.XMLAttributes()
ns = libsbml.XMLNamespaces()
ns.add( "http://www.w3.org/1999/xhtml", "")
html_triple = libsbml.XMLTriple("html", "", "")
head_triple = libsbml.XMLTriple("head", "", "")
title_triple = libsbml.XMLTriple("title", "", "")
body_triple = libsbml.XMLTriple("body", "", "")
p_triple = libsbml.XMLTriple("p", "", "")
html_token = libsbml.XMLToken(html_triple,att,ns)
head_token = libsbml.XMLToken(head_triple,att)
title_token = libsbml.XMLToken(title_triple,att)
body_token = libsbml.XMLToken(body_triple,att)
p_token = libsbml.XMLToken(p_triple,att)
text_token = libsbml.XMLToken("This is my text")
html_node = libsbml.XMLNode(html_token)
head_node = libsbml.XMLNode(head_token)
title_node = libsbml.XMLNode(title_token)
body_node = libsbml.XMLNode(body_token)
p_node = libsbml.XMLNode(p_token)
text_node = libsbml.XMLNode(text_token)
p_token1 = libsbml.XMLToken(p_triple,att,ns)
text_token1 = libsbml.XMLToken("This is more text")
p_node1 = libsbml.XMLNode(p_token1)
text_node1 = libsbml.XMLNode(text_token1)
p_node.addChild(text_node)
body_node.addChild(p_node)
head_node.addChild(title_node)
html_node.addChild(head_node)
html_node.addChild(body_node)
p_node1.addChild(text_node1)
self.S.setNotes(html_node)
self.S.appendNotes(p_node1)
notes = self.S.getNotes()
self.assert_(( "notes" == notes.getName() ))
self.assert_( notes.getNumChildren() == 1 )
child = notes.getChild(0)
self.assert_(( "html" == child.getName() ))
self.assert_( child.getNumChildren() == 2 )
child = child.getChild(1)
self.assert_(( "body" == child.getName() ))
self.assert_( child.getNumChildren() == 2 )
child1 = child.getChild(0)
self.assert_(( "p" == child1.getName() ))
self.assert_( child1.getNumChildren() == 1 )
child1 = child1.getChild(0)
self.assert_(( "This is my text" == child1.getCharacters() ))
self.assert_( child1.getNumChildren() == 0 )
child1 = child.getChild(1)
self.assert_(( "p" == child1.getName() ))
self.assert_( child1.getNumChildren() == 1 )
child1 = child1.getChild(0)
self.assert_(( "This is more text" == child1.getCharacters() ))
self.assert_( child1.getNumChildren() == 0 )
_dummyList = [ att ]; _dummyList[:] = []; del _dummyList
_dummyList = [ ns ]; _dummyList[:] = []; del _dummyList
_dummyList = [ html_triple ]; _dummyList[:] = []; del _dummyList
_dummyList = [ head_triple ]; _dummyList[:] = []; del _dummyList
_dummyList = [ body_triple ]; _dummyList[:] = []; del _dummyList
_dummyList = [ p_triple ]; _dummyList[:] = []; del _dummyList
_dummyList = [ html_token ]; _dummyList[:] = []; del _dummyList
_dummyList = [ head_token ]; _dummyList[:] = []; del _dummyList
_dummyList = [ body_token ]; _dummyList[:] = []; del _dummyList
_dummyList = [ p_token ]; _dummyList[:] = []; del _dummyList
_dummyList = [ text_token ]; _dummyList[:] = []; del _dummyList
_dummyList = [ text_token1 ]; _dummyList[:] = []; del _dummyList
_dummyList = [ p_token1 ]; _dummyList[:] = []; del _dummyList
_dummyList = [ html_node ]; _dummyList[:] = []; del _dummyList
_dummyList = [ head_node ]; _dummyList[:] = []; del _dummyList
_dummyList = [ body_node ]; _dummyList[:] = []; del _dummyList
_dummyList = [ p_node ]; _dummyList[:] = []; del _dummyList
_dummyList = [ text_node ]; _dummyList[:] = []; del _dummyList
_dummyList = [ p_node1 ]; _dummyList[:] = []; del _dummyList
_dummyList = [ text_node1 ]; _dummyList[:] = []; del _dummyList
pass
def test_SBase_appendNotes4(self):
att = libsbml.XMLAttributes()
ns = libsbml.XMLNamespaces()
ns.add( "http://www.w3.org/1999/xhtml", "")
html_triple = libsbml.XMLTriple("html", "", "")
head_triple = libsbml.XMLTriple("head", "", "")
title_triple = libsbml.XMLTriple("title", "", "")
body_triple = libsbml.XMLTriple("body", "", "")
p_triple = libsbml.XMLTriple("p", "", "")
html_token = libsbml.XMLToken(html_triple,att,ns)
head_token = libsbml.XMLToken(head_triple,att)
title_token = libsbml.XMLToken(title_triple,att)
body_token = libsbml.XMLToken(body_triple,att)
p_token = libsbml.XMLToken(p_triple,att)
body_token1 = libsbml.XMLToken(body_triple,att,ns)
text_token = libsbml.XMLToken("This is my text")
body_node = libsbml.XMLNode(body_token1)
p_node = libsbml.XMLNode(p_token)
text_node = libsbml.XMLNode(text_token)
text_token1 = libsbml.XMLToken("This is more text")
html_node1 = libsbml.XMLNode(html_token)
head_node1 = libsbml.XMLNode(head_token)
title_node1 = libsbml.XMLNode(title_token)
body_node1 = libsbml.XMLNode(body_token)
p_node1 = libsbml.XMLNode(p_token)
text_node1 = libsbml.XMLNode(text_token1)
p_node.addChild(text_node)
body_node.addChild(p_node)
p_node1.addChild(text_node1)
body_node1.addChild(p_node1)
head_node1.addChild(title_node1)
html_node1.addChild(head_node1)
html_node1.addChild(body_node1)
self.S.setNotes(body_node)
self.S.appendNotes(html_node1)
notes = self.S.getNotes()
self.assert_(( "notes" == notes.getName() ))
self.assert_( notes.getNumChildren() == 1 )
child = notes.getChild(0)
self.assert_(( "html" == child.getName() ))
self.assert_( child.getNumChildren() == 2 )
child = child.getChild(1)
self.assert_(( "body" == child.getName() ))
self.assert_( child.getNumChildren() == 2 )
child1 = child.getChild(0)
self.assert_(( "p" == child1.getName() ))
self.assert_( child1.getNumChildren() == 1 )
child1 = child1.getChild(0)
self.assert_(( "This is my text" == child1.getCharacters() ))
self.assert_( child1.getNumChildren() == 0 )
child1 = child.getChild(1)
self.assert_(( "p" == child1.getName() ))
self.assert_( child1.getNumChildren() == 1 )
child1 = child1.getChild(0)
self.assert_(( "This is more text" == child1.getCharacters() ))
self.assert_( child1.getNumChildren() == 0 )
_dummyList = [ att ]; _dummyList[:] = []; del _dummyList
_dummyList = [ ns ]; _dummyList[:] = []; del _dummyList
_dummyList = [ html_triple ]; _dummyList[:] = []; del _dummyList
_dummyList = [ head_triple ]; _dummyList[:] = []; del _dummyList
_dummyList = [ body_triple ]; _dummyList[:] = []; del _dummyList
_dummyList = [ p_triple ]; _dummyList[:] = []; del _dummyList
_dummyList = [ body_token ]; _dummyList[:] = []; del _dummyList
_dummyList = [ p_token ]; _dummyList[:] = []; del _dummyList
_dummyList = [ text_token ]; _dummyList[:] = []; del _dummyList
_dummyList = [ text_token1 ]; _dummyList[:] = []; del _dummyList
_dummyList = [ body_token1 ]; _dummyList[:] = []; del _dummyList
_dummyList = [ body_node ]; _dummyList[:] = []; del _dummyList
_dummyList = [ p_node ]; _dummyList[:] = []; del _dummyList
_dummyList = [ text_node ]; _dummyList[:] = []; del _dummyList
_dummyList = [ html_node1 ]; _dummyList[:] = []; del _dummyList
_dummyList = [ head_node1 ]; _dummyList[:] = []; del _dummyList
_dummyList = [ body_node1 ]; _dummyList[:] = []; del _dummyList
_dummyList = [ p_node1 ]; _dummyList[:] = []; del _dummyList
_dummyList = [ text_node1 ]; _dummyList[:] = []; del _dummyList
pass
def test_SBase_appendNotes5(self):
att = libsbml.XMLAttributes()
ns = libsbml.XMLNamespaces()
ns.add( "http://www.w3.org/1999/xhtml", "")
html_triple = libsbml.XMLTriple("html", "", "")
head_triple = libsbml.XMLTriple("head", "", "")
title_triple = libsbml.XMLTriple("title", "", "")
body_triple = libsbml.XMLTriple("body", "", "")
p_triple = libsbml.XMLTriple("p", "", "")
html_token = libsbml.XMLToken(html_triple,att,ns)
head_token = libsbml.XMLToken(head_triple,att)
title_token = libsbml.XMLToken(title_triple,att)
body_token = libsbml.XMLToken(body_triple,att)
p_token = libsbml.XMLToken(p_triple,att)
p_token1 = libsbml.XMLToken(p_triple,att,ns)
text_token = libsbml.XMLToken("This is my text")
p_node = libsbml.XMLNode(p_token1)
text_node = libsbml.XMLNode(text_token)
text_token1 = libsbml.XMLToken("This is more text")
html_node1 = libsbml.XMLNode(html_token)
head_node1 = libsbml.XMLNode(head_token)
title_node1 = libsbml.XMLNode(title_token)
body_node1 = libsbml.XMLNode(body_token)
p_node1 = libsbml.XMLNode(p_token)
text_node1 = libsbml.XMLNode(text_token1)
p_node.addChild(text_node)
p_node1.addChild(text_node1)
body_node1.addChild(p_node1)
head_node1.addChild(title_node1)
html_node1.addChild(head_node1)
html_node1.addChild(body_node1)
self.S.setNotes(p_node)
self.S.appendNotes(html_node1)
notes = self.S.getNotes()
self.assert_(( "notes" == notes.getName() ))
self.assert_( notes.getNumChildren() == 1 )
child = notes.getChild(0)
self.assert_(( "html" == child.getName() ))
self.assert_( child.getNumChildren() == 2 )
child = child.getChild(1)
self.assert_(( "body" == child.getName() ))
self.assert_( child.getNumChildren() == 2 )
child1 = child.getChild(0)
self.assert_(( "p" == child1.getName() ))
self.assert_( child1.getNumChildren() == 1 )
child1 = child1.getChild(0)
self.assert_(( "This is my text" == child1.getCharacters() ))
self.assert_( child1.getNumChildren() == 0 )
child1 = child.getChild(1)
self.assert_(( "p" == child1.getName() ))
self.assert_( child1.getNumChildren() == 1 )
child1 = child1.getChild(0)
self.assert_(( "This is more text" == child1.getCharacters() ))
self.assert_( child1.getNumChildren() == 0 )
_dummyList = [ att ]; _dummyList[:] = []; del _dummyList
_dummyList = [ ns ]; _dummyList[:] = []; del _dummyList
_dummyList = [ html_triple ]; _dummyList[:] = []; del _dummyList
_dummyList = [ head_triple ]; _dummyList[:] = []; del _dummyList
_dummyList = [ body_triple ]; _dummyList[:] = []; del _dummyList
_dummyList = [ p_triple ]; _dummyList[:] = []; del _dummyList
_dummyList = [ body_token ]; _dummyList[:] = []; del _dummyList
_dummyList = [ p_token ]; _dummyList[:] = []; del _dummyList
_dummyList = [ p_token1 ]; _dummyList[:] = []; del _dummyList
_dummyList = [ text_token ]; _dummyList[:] = []; del _dummyList
_dummyList = [ text_token1 ]; _dummyList[:] = []; del _dummyList
_dummyList = [ p_node ]; _dummyList[:] = []; del _dummyList
_dummyList = [ text_node ]; _dummyList[:] = []; del _dummyList
_dummyList = [ html_node1 ]; _dummyList[:] = []; del _dummyList
_dummyList = [ head_node1 ]; _dummyList[:] = []; del _dummyList
_dummyList = [ body_node1 ]; _dummyList[:] = []; del _dummyList
_dummyList = [ p_node1 ]; _dummyList[:] = []; del _dummyList
_dummyList = [ text_node1 ]; _dummyList[:] = []; del _dummyList
pass
def test_SBase_appendNotes6(self):
att = libsbml.XMLAttributes()
ns = libsbml.XMLNamespaces()
ns.add( "http://www.w3.org/1999/xhtml", "")
body_triple = libsbml.XMLTriple("body", "", "")
p_triple = libsbml.XMLTriple("p", "", "")
body_token = libsbml.XMLToken(body_triple,att,ns)
p_token = libsbml.XMLToken(p_triple,att)
text_token = libsbml.XMLToken("This is my text")
body_node = libsbml.XMLNode(body_token)
p_node = libsbml.XMLNode(p_token)
text_node = libsbml.XMLNode(text_token)
text_token1 = libsbml.XMLToken("This is more text")
body_node1 = libsbml.XMLNode(body_token)
p_node1 = libsbml.XMLNode(p_token)
text_node1 = libsbml.XMLNode(text_token1)
p_node.addChild(text_node)
body_node.addChild(p_node)
p_node1.addChild(text_node1)
body_node1.addChild(p_node1)
self.S.setNotes(body_node)
self.S.appendNotes(body_node1)
notes = self.S.getNotes()
self.assert_(( "notes" == notes.getName() ))
self.assert_( notes.getNumChildren() == 1 )
child = notes.getChild(0)
self.assert_(( "body" == child.getName() ))
self.assert_( child.getNumChildren() == 2 )
child1 = child.getChild(0)
self.assert_(( "p" == child1.getName() ))
self.assert_( child1.getNumChildren() == 1 )
child1 = child1.getChild(0)
self.assert_(( "This is my text" == child1.getCharacters() ))
self.assert_( child1.getNumChildren() == 0 )
child1 = child.getChild(1)
self.assert_(( "p" == child1.getName() ))
self.assert_( child1.getNumChildren() == 1 )
child1 = child1.getChild(0)
self.assert_(( "This is more text" == child1.getCharacters() ))
self.assert_( child1.getNumChildren() == 0 )
_dummyList = [ att ]; _dummyList[:] = []; del _dummyList
_dummyList = [ ns ]; _dummyList[:] = []; del _dummyList
_dummyList = [ body_triple ]; _dummyList[:] = []; del _dummyList
_dummyList = [ p_triple ]; _dummyList[:] = []; del _dummyList
_dummyList = [ body_token ]; _dummyList[:] = []; del _dummyList
_dummyList = [ p_token ]; _dummyList[:] = []; del _dummyList
_dummyList = [ text_token ]; _dummyList[:] = []; del _dummyList
_dummyList = [ text_token1 ]; _dummyList[:] = []; del _dummyList
_dummyList = [ body_node ]; _dummyList[:] = []; del _dummyList
_dummyList = [ p_node ]; _dummyList[:] = []; del _dummyList
_dummyList = [ text_node ]; _dummyList[:] = []; del _dummyList
_dummyList = [ body_node1 ]; _dummyList[:] = []; del _dummyList
_dummyList = [ p_node1 ]; _dummyList[:] = []; del _dummyList
_dummyList = [ text_node1 ]; _dummyList[:] = []; del _dummyList
pass
def test_SBase_appendNotes7(self):
att = libsbml.XMLAttributes()
ns = libsbml.XMLNamespaces()
ns.add( "http://www.w3.org/1999/xhtml", "")
body_triple = libsbml.XMLTriple("body", "", "")
p_triple = libsbml.XMLTriple("p", "", "")
body_token = libsbml.XMLToken(body_triple,att,ns)
p_token1 = libsbml.XMLToken(p_triple,att,ns)
text_token = libsbml.XMLToken("This is my text")
p_token = libsbml.XMLToken(p_triple,att)
p_node = libsbml.XMLNode(p_token1)
text_node = libsbml.XMLNode(text_token)
text_token1 = libsbml.XMLToken("This is more text")
body_node1 = libsbml.XMLNode(body_token)
p_node1 = libsbml.XMLNode(p_token)
text_node1 = libsbml.XMLNode(text_token1)
p_node.addChild(text_node)
p_node1.addChild(text_node1)
body_node1.addChild(p_node1)
self.S.setNotes(p_node)
self.S.appendNotes(body_node1)
notes = self.S.getNotes()
self.assert_(( "notes" == notes.getName() ))
self.assert_( notes.getNumChildren() == 1 )
child = notes.getChild(0)
self.assert_(( "body" == child.getName() ))
self.assert_( child.getNumChildren() == 2 )
child1 = child.getChild(0)
self.assert_(( "p" == child1.getName() ))
self.assert_( child1.getNumChildren() == 1 )
child1 = child1.getChild(0)
self.assert_(( "This is my text" == child1.getCharacters() ))
self.assert_( child1.getNumChildren() == 0 )
child1 = child.getChild(1)
self.assert_(( "p" == child1.getName() ))
self.assert_( child1.getNumChildren() == 1 )
child1 = child1.getChild(0)
self.assert_(( "This is more text" == child1.getCharacters() ))
self.assert_( child1.getNumChildren() == 0 )
_dummyList = [ att ]; _dummyList[:] = []; del _dummyList
_dummyList = [ ns ]; _dummyList[:] = []; del _dummyList
_dummyList = [ body_triple ]; _dummyList[:] = []; del _dummyList
_dummyList = [ p_triple ]; _dummyList[:] = []; del _dummyList
_dummyList = [ body_token ]; _dummyList[:] = []; del _dummyList
_dummyList = [ p_token ]; _dummyList[:] = []; del _dummyList
_dummyList = [ p_token1 ]; _dummyList[:] = []; del _dummyList
_dummyList = [ text_token ]; _dummyList[:] = []; del _dummyList
_dummyList = [ text_token1 ]; _dummyList[:] = []; del _dummyList
_dummyList = [ p_node ]; _dummyList[:] = []; del _dummyList
_dummyList = [ text_node ]; _dummyList[:] = []; del _dummyList
_dummyList = [ body_node1 ]; _dummyList[:] = []; del _dummyList
_dummyList = [ p_node1 ]; _dummyList[:] = []; del _dummyList
_dummyList = [ text_node1 ]; _dummyList[:] = []; del _dummyList
pass
def test_SBase_appendNotes8(self):
att = libsbml.XMLAttributes()
ns = libsbml.XMLNamespaces()
ns.add( "http://www.w3.org/1999/xhtml", "")
body_triple = libsbml.XMLTriple("body", "", "")
p_triple = libsbml.XMLTriple("p", "", "")
body_token = libsbml.XMLToken(body_triple,att,ns)
p_token = libsbml.XMLToken(p_triple,att)
text_token = libsbml.XMLToken("This is my text")
body_node = libsbml.XMLNode(body_token)
p_node = libsbml.XMLNode(p_token)
text_node = libsbml.XMLNode(text_token)
p_token1 = libsbml.XMLToken(p_triple,att,ns)
text_token1 = libsbml.XMLToken("This is more text")
p_node1 = libsbml.XMLNode(p_token1)
text_node1 = libsbml.XMLNode(text_token1)
p_node.addChild(text_node)
body_node.addChild(p_node)
p_node1.addChild(text_node1)
self.S.setNotes(body_node)
self.S.appendNotes(p_node1)
notes = self.S.getNotes()
self.assert_(( "notes" == notes.getName() ))
self.assert_( notes.getNumChildren() == 1 )
child = notes.getChild(0)
self.assert_(( "body" == child.getName() ))
self.assert_( child.getNumChildren() == 2 )
child1 = child.getChild(0)
self.assert_(( "p" == child1.getName() ))
self.assert_( child1.getNumChildren() == 1 )
child1 = child1.getChild(0)
self.assert_(( "This is my text" == child1.getCharacters() ))
self.assert_( child1.getNumChildren() == 0 )
child1 = child.getChild(1)
self.assert_(( "p" == child1.getName() ))
self.assert_( child1.getNumChildren() == 1 )
child1 = child1.getChild(0)
self.assert_(( "This is more text" == child1.getCharacters() ))
self.assert_( child1.getNumChildren() == 0 )
_dummyList = [ att ]; _dummyList[:] = []; del _dummyList
_dummyList = [ ns ]; _dummyList[:] = []; del _dummyList
_dummyList = [ body_triple ]; _dummyList[:] = []; del _dummyList
_dummyList = [ p_triple ]; _dummyList[:] = []; del _dummyList
_dummyList = [ body_token ]; _dummyList[:] = []; del _dummyList
_dummyList = [ p_token ]; _dummyList[:] = []; del _dummyList
_dummyList = [ text_token ]; _dummyList[:] = []; del _dummyList
_dummyList = [ text_token1 ]; _dummyList[:] = []; del _dummyList
_dummyList = [ p_token1 ]; _dummyList[:] = []; del _dummyList
_dummyList = [ body_node ]; _dummyList[:] = []; del _dummyList
_dummyList = [ p_node ]; _dummyList[:] = []; del _dummyList
_dummyList = [ text_node ]; _dummyList[:] = []; del _dummyList
_dummyList = [ p_node1 ]; _dummyList[:] = []; del _dummyList
_dummyList = [ text_node1 ]; _dummyList[:] = []; del _dummyList
pass
def test_SBase_appendNotesString(self):
notes = "<p xmlns=\"http://www.w3.org/1999/xhtml\">This is a test note </p>";
taggednewnotes = wrapString("<notes>\n" + " <p xmlns=\"http://www.w3.org/1999/xhtml\">This is a test note </p>\n" +
" <p xmlns=\"http://www.w3.org/1999/xhtml\">This is more test notes </p>\n" +
"</notes>")
taggednewnotes2 = wrapString("<notes>\n" + " <p xmlns=\"http://www.w3.org/1999/xhtml\">This is a test note </p>\n" +
" <p xmlns=\"http://www.w3.org/1999/xhtml\">This is more test notes 1</p>\n" +
" <p xmlns=\"http://www.w3.org/1999/xhtml\">This is more test notes 2</p>\n" +
"</notes>")
newnotes = "<p xmlns=\"http://www.w3.org/1999/xhtml\">This is more test notes </p>";
newnotes2 = "<p xmlns=\"http://www.w3.org/1999/xhtml\">This is more test notes 1</p>" + "<p xmlns=\"http://www.w3.org/1999/xhtml\">This is more test notes 2</p>";
newnotes3 = wrapString("<notes>\n" + " <p xmlns=\"http://www.w3.org/1999/xhtml\">This is more test notes </p>\n" + "</notes>")
newnotes4 = wrapString("<notes>\n" + " <p xmlns=\"http://www.w3.org/1999/xhtml\">This is more test notes 1</p>\n" +
" <p xmlns=\"http://www.w3.org/1999/xhtml\">This is more test notes 2</p>\n" +
"</notes>")
self.S.setNotes(notes)
self.assert_( self.S.isSetNotes() == True )
self.S.appendNotes(newnotes)
notes1 = self.S.getNotesString()
self.assert_( self.S.isSetNotes() == True )
self.assert_(( notes1 == taggednewnotes ))
self.S.setNotes(notes)
self.S.appendNotes(newnotes2)
notes2 = self.S.getNotesString()
self.assert_( self.S.isSetNotes() == True )
self.assert_(( notes2 == taggednewnotes2 ))
self.S.setNotes(notes)
self.S.appendNotes(newnotes3)
notes3 = self.S.getNotesString()
self.assert_( self.S.isSetNotes() == True )
self.assert_(( notes3 == taggednewnotes ))
self.S.setNotes(notes)
self.S.appendNotes(newnotes4)
notes4 = self.S.getNotesString()
self.assert_( self.S.isSetNotes() == True )
self.assert_(( notes4 == taggednewnotes2 ))
pass
def test_SBase_appendNotesString1(self):
notes = wrapString("<html xmlns=\"http://www.w3.org/1999/xhtml\">\n" + " <head>\n" +
" <title/>\n" +
" </head>\n" +
" <body>\n" +
" <p>This is a test note </p>\n" +
" </body>\n" +
"</html>")
taggednewnotes = wrapString("<notes>\n" +
" <html xmlns=\"http://www.w3.org/1999/xhtml\">\n" +
" <head>\n" +
" <title/>\n" +
" </head>\n" +
" <body>\n" +
" <p>This is a test note </p>\n" +
" <p>This is more test notes </p>\n" +
" </body>\n" +
" </html>\n" +
"</notes>")
addnotes = wrapString("<html xmlns=\"http://www.w3.org/1999/xhtml\">\n" + " <head>\n" +
" <title/>\n" +
" </head>\n" +
" <body>\n" +
" <p>This is more test notes </p>\n" +
" </body>\n" +
"</html>")
addnotes2 = wrapString("<notes>\n" +
" <html xmlns=\"http://www.w3.org/1999/xhtml\">\n" +
" <head>\n" +
" <title/>\n" +
" </head>\n" +
" <body>\n" +
" <p>This is more test notes </p>\n" +
" </body>\n" +
" </html>\n" +
"</notes>")
self.S.setNotes(notes)
self.S.appendNotes(addnotes)
notes1 = self.S.getNotesString()
self.assert_( self.S.isSetNotes() == True )
self.assert_(( notes1 == taggednewnotes ))
self.S.setNotes(notes)
self.S.appendNotes(addnotes2)
notes2 = self.S.getNotesString()
self.assert_( self.S.isSetNotes() == True )
self.assert_(( notes2 == taggednewnotes ))
pass
def test_SBase_appendNotesString2(self):
notes = wrapString("<html xmlns=\"http://www.w3.org/1999/xhtml\">\n" + " <head>\n" +
" <title/>\n" +
" </head>\n" +
" <body>\n" +
" <p>This is a test note </p>\n" +
" </body>\n" +
"</html>")
taggednewnotes = wrapString("<notes>\n" +
" <html xmlns=\"http://www.w3.org/1999/xhtml\">\n" +
" <head>\n" +
" <title/>\n" +
" </head>\n" +
" <body>\n" +
" <p>This is a test note </p>\n" +
" <p>This is more test notes </p>\n" +
" </body>\n" +
" </html>\n" +
"</notes>")
addnotes = wrapString("<body xmlns=\"http://www.w3.org/1999/xhtml\">\n" + " <p>This is more test notes </p>\n" + "</body>\n")
addnotes2 = wrapString("<notes>\n" +
" <body xmlns=\"http://www.w3.org/1999/xhtml\">\n" +
" <p>This is more test notes </p>\n" +
" </body>\n" +
"</notes>")
self.S.setNotes(notes)
self.S.appendNotes(addnotes)
notes1 = self.S.getNotesString()
self.assert_( self.S.isSetNotes() == True )
self.assert_(( notes1 == taggednewnotes ))
self.S.setNotes(notes)
self.S.appendNotes(addnotes2)
notes2 = self.S.getNotesString()
self.assert_( self.S.isSetNotes() == True )
self.assert_(( notes2 == taggednewnotes ))
pass
def test_SBase_appendNotesString3(self):
notes = wrapString("<html xmlns=\"http://www.w3.org/1999/xhtml\">\n" + " <head>\n" +
" <title/>\n" +
" </head>\n" +
" <body>\n" +
" <p>This is a test note </p>\n" +
" </body>\n" +
"</html>")
taggednewnotes = wrapString("<notes>\n" +
" <html xmlns=\"http://www.w3.org/1999/xhtml\">\n" +
" <head>\n" +
" <title/>\n" +
" </head>\n" +
" <body>\n" +
" <p>This is a test note </p>\n" +
" <p xmlns=\"http://www.w3.org/1999/xhtml\">This is more test notes </p>\n" +
" </body>\n" +
" </html>\n" +
"</notes>")
taggednewnotes2 = wrapString("<notes>\n" +
" <html xmlns=\"http://www.w3.org/1999/xhtml\">\n" +
" <head>\n" +
" <title/>\n" +
" </head>\n" +
" <body>\n" +
" <p>This is a test note </p>\n" +
" <p xmlns=\"http://www.w3.org/1999/xhtml\">This is more test notes 1</p>\n" +
" <p xmlns=\"http://www.w3.org/1999/xhtml\">This is more test notes 2</p>\n" +
" </body>\n" +
" </html>\n" +
"</notes>")
addnotes = "<p xmlns=\"http://www.w3.org/1999/xhtml\">This is more test notes </p>\n";
addnotes2 = "<p xmlns=\"http://www.w3.org/1999/xhtml\">This is more test notes 1</p>\n" + "<p xmlns=\"http://www.w3.org/1999/xhtml\">This is more test notes 2</p>";
addnotes3 = wrapString("<notes>\n" + " <p xmlns=\"http://www.w3.org/1999/xhtml\">This is more test notes </p>\n" + "</notes>")
addnotes4 = wrapString("<notes>\n" + " <p xmlns=\"http://www.w3.org/1999/xhtml\">This is more test notes 1</p>\n" +
" <p xmlns=\"http://www.w3.org/1999/xhtml\">This is more test notes 2</p>\n" +
"</notes>")
self.S.setNotes(notes)
self.S.appendNotes(addnotes)
notes1 = self.S.getNotesString()
self.assert_( self.S.isSetNotes() == True )
self.assert_(( notes1 == taggednewnotes ))
self.S.setNotes(notes)
self.S.appendNotes(addnotes2)
notes2 = self.S.getNotesString()
self.assert_( self.S.isSetNotes() == True )
self.assert_(( notes2 == taggednewnotes2 ))
self.S.setNotes(notes)
self.S.appendNotes(addnotes3)
notes3 = self.S.getNotesString()
self.assert_( self.S.isSetNotes() == True )
self.assert_(( notes3 == taggednewnotes ))
self.S.setNotes(notes)
self.S.appendNotes(addnotes4)
notes4 = self.S.getNotesString()
self.assert_( self.S.isSetNotes() == True )
self.assert_(( notes4 == taggednewnotes2 ))
pass
def test_SBase_appendNotesString4(self):
notes = wrapString("<body xmlns=\"http://www.w3.org/1999/xhtml\">\n" + " <p>This is a test note </p>\n" + "</body>")
taggednewnotes = wrapString("<notes>\n" +
" <html xmlns=\"http://www.w3.org/1999/xhtml\">\n" +
" <head>\n" +
" <title/>\n" +
" </head>\n" +
" <body>\n" +
" <p>This is a test note </p>\n" +
" <p>This is more test notes </p>\n" +
" </body>\n" +
" </html>\n" +
"</notes>")
addnotes = wrapString("<html xmlns=\"http://www.w3.org/1999/xhtml\">\n" + " <head>\n" +
" <title/>\n" +
" </head>\n" +
" <body>\n" +
" <p>This is more test notes </p>\n" +
" </body>\n" +
"</html>")
addnotes2 = wrapString("<notes>\n" +
" <html xmlns=\"http://www.w3.org/1999/xhtml\">\n" +
" <head>\n" +
" <title/>\n" +
" </head>\n" +
" <body>\n" +
" <p>This is more test notes </p>\n" +
" </body>\n" +
" </html>\n" +
"</notes>")
self.S.setNotes(notes)
self.S.appendNotes(addnotes)
notes1 = self.S.getNotesString()
self.assert_( self.S.isSetNotes() == True )
self.assert_(( notes1 == taggednewnotes ))
self.S.setNotes(notes)
self.S.appendNotes(addnotes2)
notes2 = self.S.getNotesString()
self.assert_( self.S.isSetNotes() == True )
self.assert_(( notes2 == taggednewnotes ))
pass
def test_SBase_appendNotesString5(self):
notes = "<p xmlns=\"http://www.w3.org/1999/xhtml\">This is a test note </p>";
taggednewnotes = wrapString("<notes>\n" +
" <html xmlns=\"http://www.w3.org/1999/xhtml\">\n" +
" <head>\n" +
" <title/>\n" +
" </head>\n" +
" <body>\n" +
" <p xmlns=\"http://www.w3.org/1999/xhtml\">This is a test note </p>\n" +
" <p>This is more test notes </p>\n" +
" </body>\n" +
" </html>\n" +
"</notes>")
addnotes = wrapString("<html xmlns=\"http://www.w3.org/1999/xhtml\">\n" + " <head>\n" +
" <title/>\n" +
" </head>\n" +
" <body>\n" +
" <p>This is more test notes </p>\n" +
" </body>\n" +
"</html>")
addnotes2 = wrapString("<notes>\n" +
" <html xmlns=\"http://www.w3.org/1999/xhtml\">\n" +
" <head>\n" +
" <title/>\n" +
" </head>\n" +
" <body>\n" +
" <p>This is more test notes </p>\n" +
" </body>\n" +
" </html>\n" +
"</notes>")
self.S.setNotes(notes)
self.S.appendNotes(addnotes)
notes1 = self.S.getNotesString()
self.assert_( self.S.isSetNotes() == True )
self.assert_(( notes1 == taggednewnotes ))
self.S.setNotes(notes)
self.S.appendNotes(addnotes2)
notes2 = self.S.getNotesString()
self.assert_( self.S.isSetNotes() == True )
self.assert_(( notes2 == taggednewnotes ))
pass
def test_SBase_appendNotesString6(self):
notes = wrapString("<body xmlns=\"http://www.w3.org/1999/xhtml\">\n" + " <p>This is a test note </p>\n" + "</body>")
taggednewnotes = wrapString("<notes>\n" +
" <body xmlns=\"http://www.w3.org/1999/xhtml\">\n" +
" <p>This is a test note </p>\n" +
" <p>This is more test notes </p>\n" +
" </body>\n" +
"</notes>")
addnotes = wrapString("<body xmlns=\"http://www.w3.org/1999/xhtml\">\n" + " <p>This is more test notes </p>\n" + "</body>")
addnotes2 = wrapString("<notes>\n" +
" <body xmlns=\"http://www.w3.org/1999/xhtml\">\n" +
" <p>This is more test notes </p>\n" +
" </body>\n" +
"</notes>")
self.S.setNotes(notes)
self.S.appendNotes(addnotes)
notes1 = self.S.getNotesString()
self.assert_( self.S.isSetNotes() == True )
self.assert_(( notes1 == taggednewnotes ))
self.S.setNotes(notes)
self.S.appendNotes(addnotes2)
notes2 = self.S.getNotesString()
self.assert_( self.S.isSetNotes() == True )
self.assert_(( notes2 == taggednewnotes ))
pass
def test_SBase_appendNotesString7(self):
notes = "<p xmlns=\"http://www.w3.org/1999/xhtml\">This is a test note </p>";
taggednewnotes = wrapString("<notes>\n" +
" <body xmlns=\"http://www.w3.org/1999/xhtml\">\n" +
" <p xmlns=\"http://www.w3.org/1999/xhtml\">This is a test note </p>\n" +
" <p>This is more test notes </p>\n" +
" </body>\n" +
"</notes>")
addnotes = wrapString("<body xmlns=\"http://www.w3.org/1999/xhtml\">\n" + " <p>This is more test notes </p>\n" + "</body>")
addnotes2 = wrapString("<notes>\n" +
" <body xmlns=\"http://www.w3.org/1999/xhtml\">\n" +
" <p>This is more test notes </p>\n" +
" </body>\n" +
"</notes>")
self.S.setNotes(notes)
self.S.appendNotes(addnotes)
notes1 = self.S.getNotesString()
self.assert_( self.S.isSetNotes() == True )
self.assert_(( notes1 == taggednewnotes ))
self.S.setNotes(notes)
self.S.appendNotes(addnotes2)
notes2 = self.S.getNotesString()
self.assert_( self.S.isSetNotes() == True )
self.assert_(( notes2 == taggednewnotes ))
pass
def test_SBase_appendNotesString8(self):
notes = wrapString("<body xmlns=\"http://www.w3.org/1999/xhtml\">\n" + " <p>This is a test note </p>\n" + "</body>")
taggednewnotes = wrapString("<notes>\n" +
" <body xmlns=\"http://www.w3.org/1999/xhtml\">\n" +
" <p>This is a test note </p>\n" +
" <p xmlns=\"http://www.w3.org/1999/xhtml\">This is more test notes </p>\n" +
" </body>\n" +
"</notes>")
taggednewnotes2 = wrapString("<notes>\n" +
" <body xmlns=\"http://www.w3.org/1999/xhtml\">\n" +
" <p>This is a test note </p>\n" +
" <p xmlns=\"http://www.w3.org/1999/xhtml\">This is more test notes 1</p>\n" +
" <p xmlns=\"http://www.w3.org/1999/xhtml\">This is more test notes 2</p>\n" +
" </body>\n" +
"</notes>")
addnotes = "<p xmlns=\"http://www.w3.org/1999/xhtml\">This is more test notes </p>";
addnotes2 = "<p xmlns=\"http://www.w3.org/1999/xhtml\">This is more test notes 1</p>\n" + "<p xmlns=\"http://www.w3.org/1999/xhtml\">This is more test notes 2</p>";
addnotes3 = wrapString("<notes>\n" +
" <p xmlns=\"http://www.w3.org/1999/xhtml\">This is more test notes </p>\n" +
"</notes>")
addnotes4 = wrapString("<notes>\n" +
" <p xmlns=\"http://www.w3.org/1999/xhtml\">This is more test notes 1</p>\n" +
" <p xmlns=\"http://www.w3.org/1999/xhtml\">This is more test notes 2</p>\n" +
"</notes>")
self.S.setNotes(notes)
self.S.appendNotes(addnotes)
notes1 = self.S.getNotesString()
self.assert_( self.S.isSetNotes() == True )
self.assert_(( notes1 == taggednewnotes ))
self.S.setNotes(notes)
self.S.appendNotes(addnotes2)
notes2 = self.S.getNotesString()
self.assert_( self.S.isSetNotes() == True )
self.assert_(( notes2 == taggednewnotes2 ))
self.S.setNotes(notes)
self.S.appendNotes(addnotes3)
notes3 = self.S.getNotesString()
self.assert_( self.S.isSetNotes() == True )
self.assert_(( notes3 == taggednewnotes ))
self.S.setNotes(notes)
self.S.appendNotes(addnotes4)
notes4 = self.S.getNotesString()
self.assert_( self.S.isSetNotes() == True )
self.assert_(( notes4 == taggednewnotes2 ))
pass
def test_SBase_getQualifiersFromResources(self):
cv = libsbml.CVTerm(libsbml.BIOLOGICAL_QUALIFIER)
cv.setBiologicalQualifierType(libsbml.BQB_ENCODES)
cv.addResource( "foo")
self.S.setMetaId( "sbase1")
self.S.addCVTerm(cv)
self.assert_( self.S.getResourceBiologicalQualifier( "foo") == libsbml.BQB_ENCODES )
cv1 = libsbml.CVTerm(libsbml.MODEL_QUALIFIER)
cv1.setModelQualifierType(libsbml.BQM_IS)
cv1.addResource( "bar")
self.S.addCVTerm(cv1)
self.assert_( self.S.getResourceModelQualifier( "bar") == libsbml.BQM_IS )
_dummyList = [ cv ]; _dummyList[:] = []; del _dummyList
_dummyList = [ cv1 ]; _dummyList[:] = []; del _dummyList
pass
def test_SBase_setAnnotation(self):
token = libsbml.XMLToken("This is a test note")
node = libsbml.XMLNode(token)
self.S.setAnnotation(node)
self.assert_( self.S.isSetAnnotation() == True )
t1 = self.S.getAnnotation()
self.assert_( t1.getNumChildren() == 1 )
self.assert_(( "This is a test note" == t1.getChild(0).getCharacters() ))
if (self.S.getAnnotation() == node):
pass
self.S.setAnnotation(self.S.getAnnotation())
self.assert_(( "This is a test note" == self.S.getAnnotation().getChild(0).getCharacters() ))
self.S.setAnnotation(None)
self.assert_( self.S.isSetAnnotation() == False )
if (self.S.getAnnotation() != None):
pass
self.S.setAnnotation(node)
self.assert_( self.S.isSetAnnotation() == True )
self.S.unsetAnnotation()
self.assert_( self.S.isSetAnnotation() == False )
token = libsbml.XMLToken("(CR) ¨ ¨ ¨ (NOT CR) &#; &#x; �a8; ¨ ¨")
node = libsbml.XMLNode(token)
self.S.setAnnotation(node)
t1 = self.S.getAnnotation()
self.assert_( t1.getNumChildren() == 1 )
s = t1.getChild(0).toXMLString()
expected = "(CR) ¨ ¨ ¨ (NOT CR) &#; &#x; &#00a8; &#0168 &#x00a8";
self.assert_(( expected == s ))
token = libsbml.XMLToken("& ' > < \" & ' > < "")
node = libsbml.XMLNode(token)
self.S.setAnnotation(node)
t1 = self.S.getAnnotation()
self.assert_( t1.getNumChildren() == 1 )
s2 = t1.getChild(0).toXMLString()
expected2 = "& ' > < " & ' > < "";
self.assert_(( expected2 == s2 ))
_dummyList = [ token ]; _dummyList[:] = []; del _dummyList
_dummyList = [ node ]; _dummyList[:] = []; del _dummyList
pass
def test_SBase_setAnnotationString(self):
annotation = "This is a test note";
taggedannotation = "<annotation>This is a test note</annotation>";
self.S.setAnnotation(annotation)
self.assert_( self.S.isSetAnnotation() == True )
if (( taggedannotation != self.S.getAnnotationString() )):
pass
t1 = self.S.getAnnotation()
self.assert_( t1.getNumChildren() == 1 )
self.assert_(( "This is a test note" == t1.getChild(0).getCharacters() ))
self.S.setAnnotation(self.S.getAnnotationString())
t1 = self.S.getAnnotation()
self.assert_( t1.getNumChildren() == 1 )
chars = self.S.getAnnotationString()
self.assert_(( taggedannotation == chars ))
self.S.setAnnotation( "")
self.assert_( self.S.isSetAnnotation() == False )
if (self.S.getAnnotationString() != None):
pass
self.S.setAnnotation(taggedannotation)
self.assert_( self.S.isSetAnnotation() == True )
if (( taggedannotation != self.S.getAnnotationString() )):
pass
t1 = self.S.getAnnotation()
self.assert_( t1.getNumChildren() == 1 )
t2 = t1.getChild(0)
self.assert_(( "This is a test note" == t2.getCharacters() ))
pass
def test_SBase_setMetaId(self):
metaid = "x12345";
self.S.setMetaId(metaid)
self.assert_(( metaid == self.S.getMetaId() ))
self.assertEqual( True, self.S.isSetMetaId() )
if (self.S.getMetaId() == metaid):
pass
self.S.setMetaId(self.S.getMetaId())
self.assert_(( metaid == self.S.getMetaId() ))
self.S.setMetaId("")
self.assertEqual( False, self.S.isSetMetaId() )
if (self.S.getMetaId() != None):
pass
pass
def test_SBase_setNotes(self):
c = libsbml.Model(1,2)
token = libsbml.XMLToken("This is a test note")
node = libsbml.XMLNode(token)
c.setNotes(node)
self.assert_( c.isSetNotes() == True )
if (c.getNotes() == node):
pass
t1 = c.getNotes()
self.assert_( t1.getNumChildren() == 1 )
self.assert_(( "This is a test note" == t1.getChild(0).getCharacters() ))
c.setNotes(c.getNotes())
t1 = c.getNotes()
self.assert_( t1.getNumChildren() == 1 )
chars = t1.getChild(0).getCharacters()
self.assert_(( "This is a test note" == chars ))
c.setNotes(None)
self.assert_( c.isSetNotes() == False )
if (c.getNotes() != None):
pass
c.setNotes(node)
self.assert_( c.isSetNotes() == True )
token = libsbml.XMLToken("(CR) ¨ ¨ ¨ (NOT CR) &#; &#x; �a8; ¨ ¨")
node = libsbml.XMLNode(token)
c.setNotes(node)
t1 = c.getNotes()
self.assert_( t1.getNumChildren() == 1 )
s = t1.getChild(0).toXMLString()
expected = "(CR) ¨ ¨ ¨ (NOT CR) &#; &#x; &#00a8; &#0168 &#x00a8";
self.assert_(( expected == s ))
token = libsbml.XMLToken("& ' > < \" & ' > < "")
node = libsbml.XMLNode(token)
c.setNotes(node)
t1 = c.getNotes()
self.assert_( t1.getNumChildren() == 1 )
s2 = t1.getChild(0).toXMLString()
expected2 = "& ' > < " & ' > < "";
self.assert_(( expected2 == s2 ))
_dummyList = [ token ]; _dummyList[:] = []; del _dummyList
_dummyList = [ node ]; _dummyList[:] = []; del _dummyList
pass
def test_SBase_setNotesString(self):
c = libsbml.Model(1,2)
notes = "This is a test note";
taggednotes = "<notes>This is a test note</notes>";
c.setNotes(notes)
self.assert_( c.isSetNotes() == True )
if (( taggednotes != c.getNotesString() )):
pass
t1 = c.getNotes()
self.assert_( t1.getNumChildren() == 1 )
t2 = t1.getChild(0)
self.assert_(( "This is a test note" == t2.getCharacters() ))
c.setNotes(c.getNotesString())
t1 = c.getNotes()
self.assert_( t1.getNumChildren() == 1 )
chars = c.getNotesString()
self.assert_(( taggednotes == chars ))
c.setNotes("")
self.assert_( c.isSetNotes() == False )
if (c.getNotesString() != None):
pass
c.setNotes(taggednotes)
self.assert_( c.isSetNotes() == True )
if (( taggednotes != c.getNotesString() )):
pass
t1 = c.getNotes()
self.assert_( t1.getNumChildren() == 1 )
t2 = t1.getChild(0)
self.assert_(( "This is a test note" == t2.getCharacters() ))
pass
def test_SBase_unsetAnnotationWithCVTerms(self):
annt = wrapString("<annotation>\n" +
" <test:test xmlns:test=\"http://test.org/test\">this is a test node</test:test>\n" +
"</annotation>")
annt_with_cvterm = wrapString("<annotation>\n" +
" <test:test xmlns:test=\"http://test.org/test\">this is a test node</test:test>\n" +
" <rdf:RDF xmlns:rdf=\"http://www.w3.org/1999/02/22-rdf-syntax-ns#\" " +
"xmlns:dc=\"http://purl.org/dc/elements/1.1/\" " +
"xmlns:dcterms=\"http://purl.org/dc/terms/\" " +
"xmlns:vCard=\"http://www.w3.org/2001/vcard-rdf/3.0#\" " +
"xmlns:bqbiol=\"http://biomodels.net/biology-qualifiers/\" " +
"xmlns:bqmodel=\"http://biomodels.net/model-qualifiers/\">\n" +
" <rdf:Description rdf:about=\"#_000001\">\n" +
" <bqbiol:is>\n" +
" <rdf:Bag>\n" +
" <rdf:li rdf:resource=\"http://www.geneontology.org/#GO:0005895\"/>\n" +
" </rdf:Bag>\n" +
" </bqbiol:is>\n" +
" </rdf:Description>\n" +
" </rdf:RDF>\n" +
"</annotation>")
self.S.setAnnotation(annt)
self.assert_( self.S.isSetAnnotation() == True )
self.assert_(( annt == self.S.getAnnotationString() ))
self.S.unsetAnnotation()
self.assert_( self.S.isSetAnnotation() == False )
self.assert_( self.S.getAnnotation() == None )
self.S.setAnnotation(annt)
self.S.setMetaId( "_000001")
cv = libsbml.CVTerm(libsbml.BIOLOGICAL_QUALIFIER)
cv.setBiologicalQualifierType(libsbml.BQB_IS)
cv.addResource( "http://www.geneontology.org/#GO:0005895")
self.S.addCVTerm(cv)
self.assert_( self.S.isSetAnnotation() == True )
self.assert_(( annt_with_cvterm == self.S.getAnnotationString() ))
self.S.unsetAnnotation()
self.assert_( self.S.isSetAnnotation() == False )
self.assert_( self.S.getAnnotation() == None )
_dummyList = [ cv ]; _dummyList[:] = []; del _dummyList
pass
def test_SBase_unsetAnnotationWithModelHistory(self):
h = libsbml.ModelHistory()
c = libsbml.ModelCreator()
annt = wrapString("<annotation>\n" +
" <test:test xmlns:test=\"http://test.org/test\">this is a test node</test:test>\n" +
"</annotation>")
annt_with_modelhistory = wrapString("<annotation>\n" +
" <test:test xmlns:test=\"http://test.org/test\">this is a test node</test:test>\n" +
" <rdf:RDF xmlns:rdf=\"http://www.w3.org/1999/02/22-rdf-syntax-ns#\" " +
"xmlns:dc=\"http://purl.org/dc/elements/1.1/\" " +
"xmlns:dcterms=\"http://purl.org/dc/terms/\" " +
"xmlns:vCard=\"http://www.w3.org/2001/vcard-rdf/3.0#\" " +
"xmlns:bqbiol=\"http://biomodels.net/biology-qualifiers/\" " +
"xmlns:bqmodel=\"http://biomodels.net/model-qualifiers/\">\n" +
" <rdf:Description rdf:about=\"#_000001\">\n" +
" <dc:creator>\n" +
" <rdf:Bag>\n" +
" <rdf:li rdf:parseType=\"Resource\">\n" +
" <vCard:N rdf:parseType=\"Resource\">\n" +
" <vCard:Family>Keating</vCard:Family>\n" +
" <vCard:Given>Sarah</vCard:Given>\n" +
" </vCard:N>\n" +
" <vCard:EMAIL>sbml-team@caltech.edu</vCard:EMAIL>\n" +
" </rdf:li>\n" +
" </rdf:Bag>\n" +
" </dc:creator>\n" +
" <dcterms:created rdf:parseType=\"Resource\">\n" +
" <dcterms:W3CDTF>2005-12-29T12:15:45+02:00</dcterms:W3CDTF>\n" +
" </dcterms:created>\n" +
" <dcterms:modified rdf:parseType=\"Resource\">\n" +
" <dcterms:W3CDTF>2005-12-30T12:15:45+02:00</dcterms:W3CDTF>\n" +
" </dcterms:modified>\n" +
" </rdf:Description>\n" +
" </rdf:RDF>\n" +
"</annotation>")
self.S.setAnnotation(annt)
self.assert_( self.S.isSetAnnotation() == True )
self.assert_(( annt == self.S.getAnnotationString() ))
self.S.unsetAnnotation()
self.assert_( self.S.isSetAnnotation() == False )
self.assert_( self.S.getAnnotation() == None )
self.S.setAnnotation(annt)
self.S.setMetaId( "_000001")
c.setFamilyName("Keating")
c.setGivenName("Sarah")
c.setEmail("sbml-team@caltech.edu")
h.addCreator(c)
dc = libsbml.Date(2005,12,29,12,15,45,1,2,0)
h.setCreatedDate(dc)
dm = libsbml.Date(2005,12,30,12,15,45,1,2,0)
h.setModifiedDate(dm)
self.S.setModelHistory(h)
self.assert_( self.S.isSetAnnotation() == True )
self.assert_(( annt_with_modelhistory == self.S.getAnnotationString() ))
self.S.unsetAnnotation()
self.assert_( self.S.isSetAnnotation() == False )
self.assert_( self.S.getAnnotation() == None )
_dummyList = [ c ]; _dummyList[:] = []; del _dummyList
_dummyList = [ h ]; _dummyList[:] = []; del _dummyList
pass
def test_SBase_unsetCVTerms(self):
cv = libsbml.CVTerm(libsbml.BIOLOGICAL_QUALIFIER)
cv.setBiologicalQualifierType(libsbml.BQB_ENCODES)
cv.addResource( "foo")
self.S.setMetaId( "sbase1")
self.S.addCVTerm(cv)
cv1 = libsbml.CVTerm(libsbml.BIOLOGICAL_QUALIFIER)
cv1.setBiologicalQualifierType(libsbml.BQB_IS)
cv1.addResource( "bar")
self.S.addCVTerm(cv1)
cv2 = libsbml.CVTerm(libsbml.BIOLOGICAL_QUALIFIER)
cv2.setBiologicalQualifierType(libsbml.BQB_IS)
cv2.addResource( "bar1")
self.S.addCVTerm(cv2)
cv4 = libsbml.CVTerm(libsbml.BIOLOGICAL_QUALIFIER)
cv4.setBiologicalQualifierType(libsbml.BQB_IS)
cv4.addResource( "bar1")
self.S.addCVTerm(cv4)
self.assert_( self.S.getNumCVTerms() == 2 )
self.S.unsetCVTerms()
self.assert_( self.S.getNumCVTerms() == 0 )
self.assert_( self.S.getCVTerms() == None )
_dummyList = [ cv ]; _dummyList[:] = []; del _dummyList
_dummyList = [ cv2 ]; _dummyList[:] = []; del _dummyList
_dummyList = [ cv1 ]; _dummyList[:] = []; del _dummyList
_dummyList = [ cv4 ]; _dummyList[:] = []; del _dummyList
pass
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(TestSBase))
return suite
if __name__ == "__main__":
if unittest.TextTestRunner(verbosity=1).run(suite()).wasSuccessful() :
sys.exit(0)
else:
sys.exit(1)
|
alexholehouse/SBMLIntegrator
|
libsbml-5.0.0/src/bindings/python/test/sbml/TestSBase.py
|
Python
|
gpl-3.0
| 60,557
|
[
"VisIt"
] |
85bd37fb9491d506e9e8673753796e110592856e7364876c0fa29ca4b7c3486b
|
#!/usr/bin/env python
#pylint: disable=missing-docstring
#* This file is part of the MOOSE framework
#* https://www.mooseframework.org
#*
#* All rights reserved, see COPYRIGHT for full restrictions
#* https://github.com/idaholab/moose/blob/master/COPYRIGHT
#*
#* Licensed under LGPL 2.1, please see LICENSE for details
#* https://www.gnu.org/licenses/lgpl-2.1.html
import sys
import re
import unittest
import logging
from parameters import Parameter
class TestParameter(unittest.TestCase):
def testMinimal(self):
opt = Parameter('foo')
self.assertEqual(opt.name, 'foo')
self.assertIsNone(opt.default)
self.assertIsNone(opt.value)
def testValue(self):
opt = Parameter('foo')
self.assertEqual(opt.name, 'foo')
self.assertIsNone(opt.default)
self.assertIsNone(opt.value)
self.value = 12345
self.assertEqual(self.value, 12345)
def testDefault(self):
opt = Parameter('foo', default=12345)
self.assertEqual(opt.name, 'foo')
self.assertEqual(opt.default, 12345)
self.assertEqual(opt.value, 12345)
opt.value = '12345'
self.assertEqual(opt.default, 12345)
self.assertEqual(opt.value, '12345')
opt = Parameter('bar', default=1980, vtype=int)
with self.assertLogs(level=logging.WARNING) as cm:
opt.default = 'nope'
self.assertEqual(len(cm.output), 1)
self.assertIn("'bar' must be of type (<class 'int'>,) but <class 'str'> provided.", cm.output[0])
def testAllow(self):
opt = Parameter('foo', allow=(1, 'two'))
self.assertIsNone(opt.default)
self.assertIsNone(opt.value)
opt.value = 1
self.assertEqual(opt.value, 1)
opt.value = 'two'
self.assertEqual(opt.value, 'two')
with self.assertLogs(level=logging.WARNING) as cm:
opt.value = 4
self.assertEqual(len(cm.output), 1)
self.assertIn("Attempting to set 'foo' to a value of 4 but only the following are allowed: (1, 'two')", cm.output[0])
def testType(self):
opt = Parameter('foo', vtype=int)
self.assertIsNone(opt.default)
self.assertIsNone(opt.value)
opt.value = 1
self.assertEqual(opt.value, 1)
with self.assertLogs(level=logging.WARNING) as cm:
opt.value = 's'
self.assertEqual(len(cm.output), 1)
self.assertIn("'foo' must be of type (<class 'int'>,) but <class 'str'> provided.", cm.output[0])
def testTypeWithAllow(self):
opt = Parameter('foo', vtype=int, allow=(1,2))
self.assertIsNone(opt.default)
self.assertIsNone(opt.value)
opt.value = 2
self.assertEqual(opt.value, 2)
opt.value = 1
self.assertEqual(opt.value, 1)
with self.assertLogs(level=logging.WARNING) as cm:
opt.value = 4
self.assertEqual(len(cm.output), 1)
self.assertIn("Attempting to set 'foo' to a value of 4 but only the following are allowed: (1, 2)", cm.output[0])
self.assertEqual(opt.value, 1)
def testAllowWithTypeError(self):
with self.assertRaises(TypeError) as e:
Parameter('foo', allow='wrong')
self.assertIn("The supplied 'allow' argument must be a 'tuple', but <class 'str'> was provided.", str(e.exception))
with self.assertRaises(TypeError) as e:
Parameter('foo', vtype=int, allow=(1,'2'))
self.assertIn("The supplied 'allow' argument must be a 'tuple' of (<class 'int'>,) items, but a <class 'str'> item was provided.", str(e.exception))
def testArray(self):
opt = Parameter('foo', default=(1,2), array=True)
self.assertEqual(opt._Parameter__array, True)
self.assertEqual(opt.value, (1,2))
with self.assertLogs(level=logging.WARNING) as cm:
opt.value = 4
self.assertIn("'foo' was defined as an array, which require <class 'tuple'> for assignment, but a <class 'int'> was provided.", cm.output[0])
opt.value = (3, 4)
self.assertEqual(opt.value, (3,4))
opt.value = ('1', )
self.assertEqual(opt.value, ('1',))
opt = Parameter('foo', vtype=int, array=True)
self.assertEqual(opt._Parameter__array, True)
self.assertIsNone(opt.value)
with self.assertLogs(level=logging.WARNING) as cm:
opt.value = 4
self.assertEqual(len(cm.output), 1)
self.assertIn("'foo' was defined as an array, which require <class 'tuple'> for assignment, but a <class 'int'> was provided.", cm.output[0])
with self.assertLogs(level=logging.WARNING) as cm:
opt.value = ('x', )
self.assertEqual(len(cm.output), 1)
self.assertIn("The values within 'foo' must be of type (<class 'int'>,) but <class 'str'> provided.", cm.output[0])
self.assertIsNone(opt.value)
opt.value = (1, )
self.assertEqual(opt.value, (1,))
def testSize(self):
opt = Parameter('foo', size=4)
self.assertEqual(opt._Parameter__array, True)
self.assertEqual(opt._Parameter__size, 4)
with self.assertLogs(level=logging.WARNING) as cm:
opt.value = (1,2,3)
self.assertIn("'foo' was defined as an array with length 4 but a value with length 3 was provided.", cm.output[0])
def testDoc(self):
opt = Parameter('foo', doc='This is foo, not bar.')
self.assertEqual(opt.doc, 'This is foo, not bar.')
opt = Parameter('foo', doc=u'This is foo, not bar.')
self.assertEqual(opt.doc, u'This is foo, not bar.')
with self.assertRaises(TypeError) as e:
Parameter('foo', doc=42)
self.assertIn("The supplied 'doc' argument must be a 'str', but <class 'int'> was provided.", str(e.exception))
def testName(self):
opt = Parameter('foo')
self.assertEqual(opt.name, 'foo')
opt = Parameter(u'foo')
self.assertEqual(opt.name, u'foo')
with self.assertRaises(TypeError) as e:
Parameter(42)
self.assertIn("The supplied 'name' argument must be a 'str', but <class 'int'> was provided.", str(e.exception))
def testRequired(self):
opt = Parameter('year', required=True)
self.assertEqual(opt.required, True)
with self.assertLogs(level=logging.WARNING) as cm:
retcode = opt.validate()
self.assertEqual(retcode, 1)
self.assertEqual(len(cm.output), 1)
self.assertIn("The Parameter 'year' is marked as required, but no value is assigned.", cm.output[0])
with self.assertRaises(TypeError) as e:
Parameter('year', required="wrong")
self.assertIn("The supplied 'required' argument must be a 'bool', but <class 'str'> was provided.", str(e.exception))
opt.value = 1980
self.assertEqual(opt.validate(), 0)
def testSetDefault(self):
opt = Parameter('year', default=1980)
self.assertEqual(opt.value, 1980)
self.assertEqual(opt.default, 1980)
opt.default = 1949
self.assertEqual(opt.value, 1980)
self.assertEqual(opt.default, 1949)
opt = Parameter('year')
self.assertEqual(opt.value, None)
opt.default = 1949
self.assertEqual(opt.value, 1949)
self.assertEqual(opt.default, 1949)
def testPrivate(self):
opt = Parameter('year')
self.assertEqual(opt.private, False)
opt = Parameter('year', private=True)
self.assertEqual(opt.private, True)
opt = Parameter('_year', private=False)
self.assertEqual(opt.private, False)
opt = Parameter('_year')
self.assertEqual(opt.private, True)
def testToString(self):
opt = Parameter('year')
s = str(opt)
self.assertIn('Value: None', s)
self.assertNotIn('Default', s)
self.assertNotIn('Type', s)
self.assertNotIn('Allow', s)
opt = Parameter('year', default=1980, vtype=int, allow=(1949, 1954, 1977, 1980))
opt.value = 1954
s = str(opt)
self.assertIn('Value: 1954', s)
self.assertIn('Default: 1980', s)
self.assertIn("Type(s): ('int',)", s)
self.assertIn('Allow: (1949, 1954, 1977, 1980)', s)
opt = Parameter('year', default=1980, doc="The best year.")
s = str(opt)
self.assertIn("best", s)
def testVerify(self):
opt = Parameter('year', verify=(lambda v: v > 1980, "The year must be greater than 1980."))
self.assertEqual(opt.value, None)
opt.value = 1990
self.assertEqual(opt.value, 1990)
with self.assertLogs(level=logging.WARNING) as cm:
opt.value = 1949
self.assertEqual(len(cm.output), 1)
self.assertIn("Verify function failed with the given value of 1949\nThe year must be greater than 1980.", cm.output[0])
with self.assertRaises(TypeError) as e:
Parameter('year', verify="wrong")
self.assertIn("The supplied 'verify' argument must be a 'tuple' with callable function and 'str' error message, but <class 'str'> was provided.", str(e.exception))
with self.assertRaises(TypeError) as e:
Parameter('year', verify=("wrong", 1, 2))
self.assertIn("The supplied 'verify' argument must be a 'tuple' with two items a callable function and 'str' error message, but <class 'tuple'> with 3 items was provided.", str(e.exception))
with self.assertRaises(TypeError) as e:
Parameter('year', verify=("wrong", "message"))
self.assertIn("The first item in the 'verify' argument tuple must be a callable function with a single argument, but <class 'str'> was provided", str(e.exception))
with self.assertRaises(TypeError) as e:
Parameter('year', verify=(lambda x,y: True, "message"))
self.assertIn("The first item in the 'verify' argument tuple must be a callable function with a single argument, but <class 'function'> was provided that has 2 arguments.", str(e.exception))
with self.assertRaises(TypeError) as e:
Parameter('year', verify=(lambda v: True, 42))
self.assertIn("The second item in the 'verify' argument tuple must be a string, but <class 'int'> was provided", str(e.exception))
if __name__ == '__main__':
unittest.main(module=__name__, verbosity=2, buffer=True)
|
harterj/moose
|
python/parameters/test/test_Parameter.py
|
Python
|
lgpl-2.1
| 10,414
|
[
"MOOSE"
] |
1583e514367d1e6627cdfc6ad7ad13c986380571189c20d4ee8209fafbf1dfc1
|
# pylint: disable=missing-docstring
# pylint: disable=no-member
import datetime
import os
import pytz
from django.conf import settings
from lettuce import step, world
from mock import patch
from pytz import UTC
from splinter.exceptions import ElementDoesNotExist
from common import visit_scenario_item
from courseware.access import has_access
from courseware.tests.factories import BetaTesterFactory, InstructorFactory
from openedx.core.lib.tests.tools import assert_equal, assert_in, assert_true # pylint: disable=no-name-in-module
from student.tests.factories import UserFactory
TEST_COURSE_NAME = "test_course_a"
@step('I view the LTI and error is shown$')
def lti_is_not_rendered(_step):
# error is shown
assert world.is_css_present('.error_message', wait_time=0)
# iframe is not presented
assert not world.is_css_present('iframe', wait_time=0)
# link is not presented
assert not world.is_css_present('.link_lti_new_window', wait_time=0)
def check_lti_iframe_content(text):
# inside iframe test content is presented
location = world.scenario_dict['LTI'].location.html_id()
iframe_name = 'ltiFrame-' + location
with world.browser.get_iframe(iframe_name) as iframe:
# iframe does not contain functions from terrain/ui_helpers.py
assert iframe.is_element_present_by_css('.result', wait_time=0)
assert (text == world.retry_on_exception(
lambda: iframe.find_by_css('.result')[0].text,
max_attempts=5
))
@step('I view the LTI and it is rendered in iframe$')
def lti_is_rendered_iframe(_step):
world.wait_for_present('iframe') # pylint: disable=no-member
assert world.is_css_present('iframe', wait_time=2) # pylint: disable=no-member
assert not world.is_css_present('.link_lti_new_window', wait_time=0) # pylint: disable=no-member
assert not world.is_css_present('.error_message', wait_time=0) # pylint: disable=no-member
# iframe is visible
assert world.css_visible('iframe') # pylint: disable=no-member
check_lti_iframe_content("This is LTI tool. Success.")
@step('I view the LTI but incorrect_signature warning is rendered$')
def incorrect_lti_is_rendered(_step):
assert world.is_css_present('iframe', wait_time=2)
assert not world.is_css_present('.link_lti_new_window', wait_time=0)
assert not world.is_css_present('.error_message', wait_time=0)
# inside iframe test content is presented
check_lti_iframe_content("Wrong LTI signature")
@step('the course has correct LTI credentials with registered (.*)$')
def set_correct_lti_passport(_step, user='Instructor'):
coursenum = TEST_COURSE_NAME
metadata = {
'lti_passports': ["correct_lti_id:test_client_key:test_client_secret"]
}
i_am_registered_for_the_course(coursenum, metadata, user)
@step('the course has incorrect LTI credentials$')
def set_incorrect_lti_passport(_step):
coursenum = TEST_COURSE_NAME
metadata = {
'lti_passports': ["test_lti_id:test_client_key:incorrect_lti_secret_key"]
}
i_am_registered_for_the_course(coursenum, metadata)
@step(r'the course has an LTI component with (.*) fields(?:\:)?$') # , new_page is(.*), graded is(.*)
def add_correct_lti_to_course(_step, fields):
category = 'lti'
host = getattr(settings, 'LETTUCE_HOST', '127.0.0.1')
metadata = {
'lti_id': 'correct_lti_id',
'launch_url': 'http://{}:{}/correct_lti_endpoint'.format(host, settings.LTI_PORT),
}
if fields.strip() == 'incorrect_lti_id': # incorrect fields
metadata.update({
'lti_id': 'incorrect_lti_id'
})
elif fields.strip() == 'correct': # correct fields
pass
elif fields.strip() == 'no_launch_url':
metadata.update({
'launch_url': u''
})
else: # incorrect parameter
assert False
if _step.hashes:
metadata.update(_step.hashes[0])
world.scenario_dict['LTI'] = world.ItemFactory.create(
parent_location=world.scenario_dict['SECTION'].location,
category=category,
display_name='LTI',
metadata=metadata,
)
visit_scenario_item('LTI')
def create_course_for_lti(course, metadata):
# First clear the modulestore so we don't try to recreate
# the same course twice
# This also ensures that the necessary templates are loaded
world.clear_courses()
weight = 0.1
grading_policy = {
"GRADER": [
{
"type": "Homework",
"min_count": 1,
"drop_count": 0,
"short_label": "HW",
"weight": weight
},
]
}
# Create the course
# We always use the same org and display name,
# but vary the course identifier (e.g. 600x or 191x)
world.scenario_dict['COURSE'] = world.CourseFactory.create(
org='edx',
number=course,
display_name='Test Course',
metadata=metadata,
grading_policy=grading_policy,
)
# Add a section to the course to contain problems
world.scenario_dict['CHAPTER'] = world.ItemFactory.create(
parent_location=world.scenario_dict['COURSE'].location,
category='chapter',
display_name='Test Chapter',
)
world.scenario_dict['SECTION'] = world.ItemFactory.create(
parent_location=world.scenario_dict['CHAPTER'].location,
category='sequential',
display_name='Test Section',
metadata={'graded': True, 'format': 'Homework'})
@patch.dict('courseware.access.settings.FEATURES', {'DISABLE_START_DATES': False})
def i_am_registered_for_the_course(coursenum, metadata, user='Instructor'):
# Create user
if user == 'BetaTester':
# Create the course
now = datetime.datetime.now(pytz.UTC)
tomorrow = now + datetime.timedelta(days=5)
metadata.update({'days_early_for_beta': 5, 'start': tomorrow})
create_course_for_lti(coursenum, metadata)
course_descriptor = world.scenario_dict['COURSE']
# create beta tester
user = BetaTesterFactory(course_key=course_descriptor.id)
normal_student = UserFactory()
instructor = InstructorFactory(course_key=course_descriptor.id)
assert not has_access(normal_student, 'load', course_descriptor)
assert has_access(user, 'load', course_descriptor)
assert has_access(instructor, 'load', course_descriptor)
else:
metadata.update({'start': datetime.datetime(1970, 1, 1, tzinfo=UTC)})
create_course_for_lti(coursenum, metadata)
course_descriptor = world.scenario_dict['COURSE']
user = InstructorFactory(course_key=course_descriptor.id)
# Enroll the user in the course and log them in
if has_access(user, 'load', course_descriptor):
world.enroll_user(user, course_descriptor.id)
world.log_in(username=user.username, password='test')
def check_lti_popup(parent_window):
# You should now have 2 browser windows open, the original courseware and the LTI
windows = world.browser.windows
assert_equal(len(windows), 2)
# For verification, iterate through the window titles and make sure that
# both are there.
tabs = []
expected_tabs = [
u'LTI | Test Section | {course} Courseware | {platform}'.format(
course=TEST_COURSE_NAME,
platform=settings.PLATFORM_NAME
),
u'TEST TITLE'
]
for window in windows:
world.browser.switch_to_window(window)
tabs.append(world.browser.title)
assert_equal(tabs, expected_tabs)
# Now verify the contents of the LTI window (which is the 2nd window/tab)
# Note: The LTI opens in a new browser window, but Selenium sticks with the
# current window until you explicitly switch to the context of the new one.
world.browser.switch_to_window(windows[1])
url = world.browser.url
basename = os.path.basename(url)
pathname = os.path.splitext(basename)[0]
assert_equal(pathname, u'correct_lti_endpoint')
result = world.css_find('.result').first.text
assert_equal(result, u'This is LTI tool. Success.')
world.browser.driver.close() # Close the pop-up window
world.browser.switch_to_window(parent_window) # Switch to the main window again
def click_and_check_lti_popup():
parent_window = world.browser.current_window # Save the parent window
world.css_find('.link_lti_new_window').first.click()
check_lti_popup(parent_window)
@step('visit the LTI component')
def visit_lti_component(_step):
visit_scenario_item('LTI')
@step('I see LTI component (.*) with text "([^"]*)"$')
def see_elem_text(_step, elem, text):
selector_map = {
'progress': '.problem-progress',
'feedback': '.problem-feedback',
'module title': '.problem-header',
'button': '.link_lti_new_window',
'description': '.lti-description'
}
assert_in(elem, selector_map)
assert_true(world.css_has_text(selector_map[elem], text))
@step('I see text "([^"]*)"$')
def check_progress(_step, text):
assert world.browser.is_text_present(text)
@step('I see graph with total progress "([^"]*)"$')
def see_graph(_step, progress):
assert_equal(progress, world.css_find('#grade-detail-graph .overallGrade').first.text.split('\n')[1])
@step('I see in the gradebook table that "([^"]*)" is "([^"]*)"$')
def see_value_in_the_gradebook(_step, label, text):
table_selector = '.grade-table'
index = 0
table_headers = world.css_find('{0} thead th'.format(table_selector))
for i, element in enumerate(table_headers):
if element.text.strip() == label:
index = i
break
assert_true(world.css_has_text('{0} tbody td'.format(table_selector), text, index=index))
@step('I submit answer to LTI (.*) question$')
def click_grade(_step, version):
version_map = {
'1': {'selector': 'submit-button', 'expected_text': 'LTI consumer (edX) responded with XML content'},
'2': {'selector': 'submit-lti2-button', 'expected_text': 'LTI consumer (edX) responded with HTTP 200'},
}
assert_in(version, version_map)
location = world.scenario_dict['LTI'].location.html_id()
iframe_name = 'ltiFrame-' + location
with world.browser.get_iframe(iframe_name) as iframe:
css_ele = version_map[version]['selector']
css_loc = '#' + css_ele
world.wait_for_visible(css_loc)
world.css_click(css_loc)
assert iframe.is_text_present(version_map[version]['expected_text'])
@step('LTI provider deletes my grade and feedback$')
def click_delete_button(_step):
with world.browser.get_iframe(get_lti_frame_name()) as iframe:
iframe.find_by_name('submit-lti2-delete-button').first.click()
def get_lti_frame_name():
location = world.scenario_dict['LTI'].location.html_id()
return 'ltiFrame-' + location
@step('I see in iframe that LTI role is (.*)$')
def check_role(_step, role):
world.wait_for_present('iframe')
location = world.scenario_dict['LTI'].location.html_id()
iframe_name = 'ltiFrame-' + location
with world.browser.get_iframe(iframe_name) as iframe:
expected_role = 'Role: ' + role
role = world.retry_on_exception(
lambda: iframe.find_by_tag('h5').first.value,
max_attempts=5,
ignored_exceptions=ElementDoesNotExist
)
assert_equal(expected_role, role)
@step('I switch to (.*)$')
def switch_view(_step, view):
staff_status = world.css_find('#action-preview-select').first.value
if staff_status != view:
world.browser.select("select", view)
world.wait_for_ajax_complete()
assert_equal(world.css_find('#action-preview-select').first.value, view)
@step("in the LTI component I do not see (.*)$")
def check_lti_component_no_elem(_step, text):
selector_map = {
'a launch button': '.link_lti_new_window',
'an provider iframe': '.ltiLaunchFrame',
'feedback': '.problem-feedback',
'progress': '.problem-progress',
}
assert_in(text, selector_map)
assert_true(world.is_css_not_present(selector_map[text]))
|
ahmedaljazzar/edx-platform
|
lms/djangoapps/courseware/features/lti.py
|
Python
|
agpl-3.0
| 12,199
|
[
"VisIt"
] |
60e4c95ab4dfa03ae8a8a5b3bca8b146e1ff633210022b85fec4ec1ab0d5d215
|
#!/usr/bin/env python
import vtk
def main():
colors = vtk.vtkNamedColors()
lines = vtk.vtkLineSource()
# Create two points, P0 and P1
p0 = [1.0, 0.0, 0.0]
p1 = [5.0, 0.0, 0.0]
lines.SetResolution(11)
lines.SetPoint1(p0)
lines.SetPoint2(p1)
lines.Update()
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputConnection(lines.GetOutputPort())
actor = vtk.vtkActor()
actor.SetMapper(mapper)
actor.GetProperty().SetLineWidth(5)
actor.GetProperty().SetColor(colors.GetColor3d("Banana"))
StippledLine(actor, 0xA1A1, 2)
ren1 = vtk.vtkRenderer()
ren1.SetBackground(colors.GetColor3d("SlateGray"))
renWin = vtk.vtkRenderWindow()
renWin.SetSize(640, 480)
renWin.AddRenderer(ren1)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
ren1.AddActor(actor)
renWin.Render()
iren.Start()
def StippledLine(actor, lineStipplePattern, lineStippleRepeat):
tcoords = vtk.vtkDoubleArray()
image = vtk.vtkImageData()
texture = vtk.vtkTexture()
# Create texture
dimension = 16 * lineStippleRepeat
image.SetDimensions(dimension, 1, 1)
image.AllocateScalars(vtk.VTK_UNSIGNED_CHAR, 4)
image.SetExtent(0, dimension - 1, 0, 0, 0, 0)
on = 255
off = 0
i_dim = 0
while i_dim < dimension:
for i in range(0, 16):
mask = (1 << i)
bit = (lineStipplePattern & mask) >> i
value = bit
if value == 0:
for j in range(0, lineStippleRepeat):
image.SetScalarComponentFromFloat(i_dim, 0, 0, 0, on)
image.SetScalarComponentFromFloat(i_dim, 0, 0, 1, on)
image.SetScalarComponentFromFloat(i_dim, 0, 0, 2, on)
image.SetScalarComponentFromFloat(i_dim, 0, 0, 3, off)
i_dim += 1
else:
for j in range(0, lineStippleRepeat):
image.SetScalarComponentFromFloat(i_dim, 0, 0, 0, on)
image.SetScalarComponentFromFloat(i_dim, 0, 0, 1, on)
image.SetScalarComponentFromFloat(i_dim, 0, 0, 2, on)
image.SetScalarComponentFromFloat(i_dim, 0, 0, 3, on)
i_dim += 1
polyData = actor.GetMapper().GetInput()
# Create texture coordinates
tcoords.SetNumberOfComponents(1)
tcoords.SetNumberOfTuples(polyData.GetNumberOfPoints())
for i in range(0, polyData.GetNumberOfPoints()):
value = i * 0.5
tcoords.SetTypedTuple(i, [value])
polyData.GetPointData().SetTCoords(tcoords)
texture.SetInputData(image)
texture.InterpolateOff()
texture.RepeatOn()
actor.SetTexture(texture)
if __name__ == '__main__':
main()
|
lorensen/VTKExamples
|
src/Python/Rendering/StippledLine.py
|
Python
|
apache-2.0
| 2,771
|
[
"VTK"
] |
0ec2a3f3007c7eb21584b9b79b4edcccd8aa0f5f214084fb9dea0e812c9c04a6
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# EHS (20 March 2013): This is the list of general functions.
# The list is continuation from Rens's and Dominik's.
import shutil
import subprocess
import datetime
import random
import os
import gc
import re
import math
import sys
import types
import netCDF4 as nc
import numpy as np
import numpy.ma as ma
import pcraster as pcr
import logging
logger = logging.getLogger(__name__)
# file cache to minimize/reduce opening/closing files.
filecache = dict()
# Global variables:
MV = 1e20
smallNumber = 1E-39
# tuple of netcdf file suffixes (extensions) that can be used:
netcdf_suffixes = ('.nc4','.nc')
def checkVariableInNC(ncFile,varName):
logger.debug('Check whether the variable: '+str(varName)+' is defined in the file: '+str(ncFile))
if ncFile in filecache.keys():
f = filecache[ncFile]
#~ print "Cached: ", ncFile
else:
f = nc.Dataset(ncFile)
filecache[ncFile] = f
#~ print "New: ", ncFile
varName = str(varName)
return varName in f.variables.keys()
def netcdf2PCRobjCloneWithoutTime(ncFile,varName,
cloneMapFileName = None,\
LatitudeLongitude = True,\
specificFillValue = None):
logger.debug('reading variable: '+str(varName)+' from the file: '+str(ncFile))
#
# EHS (19 APR 2013): To convert netCDF (tss) file to PCR file.
# --- with clone checking
# Only works if cells are 'square'.
# Only works if cellsizeClone <= cellsizeInput
# Get netCDF file and variable name:
if ncFile in filecache.keys():
f = filecache[ncFile]
#~ print "Cached: ", ncFile
else:
f = nc.Dataset(ncFile)
filecache[ncFile] = f
#~ print "New: ", ncFile
#print ncFile
#f = nc.Dataset(ncFile)
varName = str(varName)
if LatitudeLongitude == True:
try:
f.variables['lat'] = f.variables['latitude']
f.variables['lon'] = f.variables['longitude']
except:
pass
sameClone = True
# check whether clone and input maps have the same attributes:
if cloneMapFileName != None:
# get the attributes of cloneMap
attributeClone = getMapAttributesALL(cloneMapFileName)
cellsizeClone = attributeClone['cellsize']
rowsClone = attributeClone['rows']
colsClone = attributeClone['cols']
xULClone = attributeClone['xUL']
yULClone = attributeClone['yUL']
# get the attributes of input (netCDF)
cellsizeInput = f.variables['lat'][0]- f.variables['lat'][1]
cellsizeInput = float(cellsizeInput)
rowsInput = len(f.variables['lat'])
colsInput = len(f.variables['lon'])
xULInput = f.variables['lon'][0]-0.5*cellsizeInput
yULInput = f.variables['lat'][0]+0.5*cellsizeInput
# check whether both maps have the same attributes
if cellsizeClone != cellsizeInput: sameClone = False
if rowsClone != rowsInput: sameClone = False
if colsClone != colsInput: sameClone = False
if xULClone != xULInput: sameClone = False
if yULClone != yULInput: sameClone = False
cropData = f.variables[varName][:,:] # still original data
factor = 1 # needed in regridData2FinerGrid
if sameClone == False:
# crop to cloneMap:
minX = min(abs(f.variables['lon'][:] - (xULClone + 0.5*cellsizeInput))) # ; print(minX)
xIdxSta = int(np.where(abs(f.variables['lon'][:] - (xULClone + 0.5*cellsizeInput)) == minX)[0])
xIdxEnd = int(math.ceil(xIdxSta + colsClone /(cellsizeInput/cellsizeClone)))
minY = min(abs(f.variables['lat'][:] - (yULClone - 0.5*cellsizeInput))) # ; print(minY)
yIdxSta = int(np.where(abs(f.variables['lat'][:] - (yULClone - 0.5*cellsizeInput)) == minY)[0])
yIdxEnd = int(math.ceil(yIdxSta + rowsClone /(cellsizeInput/cellsizeClone)))
cropData = f.variables[varName][yIdxSta:yIdxEnd,xIdxSta:xIdxEnd]
factor = int(round(float(cellsizeInput)/float(cellsizeClone)))
if factor > 1: logger.debug('Resample: input cell size = '+str(float(cellsizeInput))+' ; output/clone cell size = '+str(float(cellsizeClone)))
# convert to PCR object and close f
if specificFillValue != None:
outPCR = pcr.numpy2pcr(pcr.Scalar, \
regridData2FinerGrid(factor,cropData,MV), \
float(specificFillValue))
else:
outPCR = pcr.numpy2pcr(pcr.Scalar, \
regridData2FinerGrid(factor,cropData,MV), \
float(f.variables[varName]._FillValue))
#~ # debug:
#~ pcr.report(outPCR,"tmp.map")
#~ print(varName)
#~ os.system('aguila tmp.map')
#f.close();
f = None ; cropData = None
# PCRaster object
return (outPCR)
def netcdf2PCRobjClone(ncFile,varName,dateInput,\
useDoy = None,
cloneMapFileName = None,\
LatitudeLongitude = True,\
specificFillValue = None):
#
# EHS (19 APR 2013): To convert netCDF (tss) file to PCR file.
# --- with clone checking
# Only works if cells are 'square'.
# Only works if cellsizeClone <= cellsizeInput
# Get netCDF file and variable name:
#~ print ncFile
logger.debug('reading variable: '+str(varName)+' from the file: '+str(ncFile))
if ncFile in filecache.keys():
f = filecache[ncFile]
#~ print "Cached: ", ncFile
else:
f = nc.Dataset(ncFile)
filecache[ncFile] = f
#~ print "New: ", ncFile
varName = str(varName)
if LatitudeLongitude == True:
try:
f.variables['lat'] = f.variables['latitude']
f.variables['lon'] = f.variables['longitude']
except:
pass
if varName == "evapotranspiration":
try:
f.variables['evapotranspiration'] = f.variables['referencePotET']
except:
pass
if varName == "kc": # the variable name in PCR-GLOBWB
try:
f.variables['kc'] = \
f.variables['Cropcoefficient'] # the variable name in the netcdf file
except:
pass
if varName == "interceptCapInput": # the variable name in PCR-GLOBWB
try:
f.variables['interceptCapInput'] = \
f.variables['Interceptioncapacity'] # the variable name in the netcdf file
except:
pass
if varName == "coverFractionInput": # the variable name in PCR-GLOBWB
try:
f.variables['coverFractionInput'] = \
f.variables['Coverfraction'] # the variable name in the netcdf file
except:
pass
if varName == "fracVegCover": # the variable name in PCR-GLOBWB
try:
f.variables['fracVegCover'] = \
f.variables['vegetation_fraction'] # the variable name in the netcdf file
except:
pass
if varName == "minSoilDepthFrac": # the variable name in PCR-GLOBWB
try:
f.variables['minSoilDepthFrac'] = \
f.variables['minRootDepthFraction'] # the variable name in the netcdf file
except:
pass
if varName == "maxSoilDepthFrac": # the variable name in PCR-GLOBWB
try:
f.variables['maxSoilDepthFrac'] = \
f.variables['maxRootDepthFraction'] # the variable name in the netcdf file
except:
pass
if varName == "arnoBeta": # the variable name in PCR-GLOBWB
try:
f.variables['arnoBeta'] = \
f.variables['arnoSchemeBeta'] # the variable name in the netcdf file
except:
pass
# date
date = dateInput
if useDoy == "Yes":
logger.debug('Finding the date based on the given climatology doy index (1 to 366, or index 0 to 365)')
idx = int(dateInput) - 1
elif useDoy == "month": # PS: WE NEED THIS ONE FOR NETCDF FILES that contain only 12 monthly values (e.g. cropCoefficientWaterNC).
logger.debug('Finding the date based on the given climatology month index (1 to 12, or index 0 to 11)')
# make sure that date is in the correct format
if isinstance(date, str) == True: date = \
datetime.datetime.strptime(str(date),'%Y-%m-%d')
idx = int(date.month) - 1
else:
# make sure that date is in the correct format
if isinstance(date, str) == True: date = \
datetime.datetime.strptime(str(date),'%Y-%m-%d')
date = datetime.datetime(date.year,date.month,date.day)
if useDoy == "yearly":
date = datetime.datetime(date.year,int(1),int(1))
if useDoy == "monthly":
date = datetime.datetime(date.year,date.month,int(1))
if useDoy == "yearly" or useDoy == "monthly" or useDoy == "daily_seasonal":
# if the desired year is not available, use the first year or the last year that is available
first_year_in_nc_file = findFirstYearInNCTime(f.variables['time'])
last_year_in_nc_file = findLastYearInNCTime(f.variables['time'])
#
if date.year < first_year_in_nc_file:
date = datetime.datetime(first_year_in_nc_file,date.month,date.day)
msg = "\n"
msg += "WARNING related to the netcdf file: "+str(ncFile)+" ; variable: "+str(varName)+" !!!!!!"+"\n"
msg += "The date "+str(dateInput)+" is NOT available. "
msg += "The date "+str(date.year)+"-"+str(date.month)+"-"+str(date.day)+" is used."
msg += "\n"
logger.warning(msg)
if date.year > last_year_in_nc_file:
date = datetime.datetime(last_year_in_nc_file,date.month,date.day)
msg = "\n"
msg += "WARNING related to the netcdf file: "+str(ncFile)+" ; variable: "+str(varName)+" !!!!!!"+"\n"
msg += "The date "+str(dateInput)+" is NOT available. "
msg += "The date "+str(date.year)+"-"+str(date.month)+"-"+str(date.day)+" is used."
msg += "\n"
logger.warning(msg)
try:
idx = nc.date2index(date, f.variables['time'], calendar = f.variables['time'].calendar, \
select ='exact')
msg = "The date "+str(date.year)+"-"+str(date.month)+"-"+str(date.day)+" is available. The 'exact' option is used while selecting netcdf time."
logger.debug(msg)
except:
msg = "The date "+str(date.year)+"-"+str(date.month)+"-"+str(date.day)+" is NOT available. The 'exact' option CANNOT be used while selecting netcdf time."
logger.debug(msg)
try:
idx = nc.date2index(date, f.variables['time'], calendar = f.variables['time'].calendar, \
select = 'before')
msg = "\n"
msg += "WARNING related to the netcdf file: "+str(ncFile)+" ; variable: "+str(varName)+" !!!!!!"+"\n"
msg += "The date "+str(date.year)+"-"+str(date.month)+"-"+str(date.day)+" is NOT available. The 'before' option is used while selecting netcdf time."
msg += "\n"
except:
idx = nc.date2index(date, f.variables['time'], calendar = f.variables['time'].calendar, \
select = 'after')
msg = "\n"
msg += "WARNING related to the netcdf file: "+str(ncFile)+" ; variable: "+str(varName)+" !!!!!!"+"\n"
msg += "The date "+str(date.year)+"-"+str(date.month)+"-"+str(date.day)+" is NOT available. The 'after' option is used while selecting netcdf time."
msg += "\n"
logger.warning(msg)
idx = int(idx)
logger.debug('Using the date index '+str(idx))
sameClone = True
# check whether clone and input maps have the same attributes:
if cloneMapFileName != None:
# get the attributes of cloneMap
attributeClone = getMapAttributesALL(cloneMapFileName)
cellsizeClone = attributeClone['cellsize']
rowsClone = attributeClone['rows']
colsClone = attributeClone['cols']
xULClone = attributeClone['xUL']
yULClone = attributeClone['yUL']
# get the attributes of input (netCDF)
cellsizeInput = f.variables['lat'][0]- f.variables['lat'][1]
cellsizeInput = float(cellsizeInput)
rowsInput = len(f.variables['lat'])
colsInput = len(f.variables['lon'])
xULInput = f.variables['lon'][0]-0.5*cellsizeInput
yULInput = f.variables['lat'][0]+0.5*cellsizeInput
# check whether both maps have the same attributes
if cellsizeClone != cellsizeInput: sameClone = False
if rowsClone != rowsInput: sameClone = False
if colsClone != colsInput: sameClone = False
if xULClone != xULInput: sameClone = False
if yULClone != yULInput: sameClone = False
cropData = f.variables[varName][int(idx),:,:] # still original data
factor = 1 # needed in regridData2FinerGrid
if sameClone == False:
logger.debug('Crop to the clone map with lower left corner (x,y): '+str(xULClone)+' , '+str(yULClone))
# crop to cloneMap:
#~ xIdxSta = int(np.where(f.variables['lon'][:] == xULClone + 0.5*cellsizeInput)[0])
minX = min(abs(f.variables['lon'][:] - (xULClone + 0.5*cellsizeInput))) # ; print(minX)
xIdxSta = int(np.where(abs(f.variables['lon'][:] - (xULClone + 0.5*cellsizeInput)) == minX)[0])
xIdxEnd = int(math.ceil(xIdxSta + colsClone /(cellsizeInput/cellsizeClone)))
#~ yIdxSta = int(np.where(f.variables['lat'][:] == yULClone - 0.5*cellsizeInput)[0])
minY = min(abs(f.variables['lat'][:] - (yULClone - 0.5*cellsizeInput))) # ; print(minY)
yIdxSta = int(np.where(abs(f.variables['lat'][:] - (yULClone - 0.5*cellsizeInput)) == minY)[0])
yIdxEnd = int(math.ceil(yIdxSta + rowsClone /(cellsizeInput/cellsizeClone)))
cropData = f.variables[varName][idx,yIdxSta:yIdxEnd,xIdxSta:xIdxEnd]
factor = int(round(float(cellsizeInput)/float(cellsizeClone)))
if factor > 1: logger.debug('Resample: input cell size = '+str(float(cellsizeInput))+' ; output/clone cell size = '+str(float(cellsizeClone)))
# convert to PCR object and close f
if specificFillValue != None:
outPCR = pcr.numpy2pcr(pcr.Scalar, \
regridData2FinerGrid(factor,cropData,MV), \
float(specificFillValue))
else:
outPCR = pcr.numpy2pcr(pcr.Scalar, \
regridData2FinerGrid(factor,cropData,MV), \
float(f.variables[varName]._FillValue))
#f.close();
f = None ; cropData = None
# PCRaster object
return (outPCR)
def netcdf2PCRobjCloneJOYCE(ncFile,varName,dateInput,\
useDoy = None,
cloneMapFileName = None,\
LatitudeLongitude = True,\
specificFillValue = None):
#
# EHS (19 APR 2013): To convert netCDF (tss) file to PCR file.
# --- with clone checking
# Only works if cells are 'square'.
# Only works if cellsizeClone <= cellsizeInput
# Get netCDF file and variable name:
#~ print ncFile
logger.debug('reading variable: '+str(varName)+' from the file: '+str(ncFile))
if ncFile in filecache.keys():
f = filecache[ncFile]
#~ print "Cached: ", ncFile
else:
f = nc.Dataset(ncFile)
filecache[ncFile] = f
#~ print "New: ", ncFile
varName = str(varName)
if LatitudeLongitude == True:
try:
f.variables['lat'] = f.variables['latitude']
f.variables['lon'] = f.variables['longitude']
except:
pass
if varName == "evapotranspiration":
try:
f.variables['evapotranspiration'] = f.variables['referencePotET']
except:
pass
if varName == "kc": # the variable name in PCR-GLOBWB
try:
f.variables['kc'] = \
f.variables['Cropcoefficient'] # the variable name in the netcdf file
except:
pass
if varName == "interceptCapInput": # the variable name in PCR-GLOBWB
try:
f.variables['interceptCapInput'] = \
f.variables['Interceptioncapacity'] # the variable name in the netcdf file
except:
pass
if varName == "coverFractionInput": # the variable name in PCR-GLOBWB
try:
f.variables['coverFractionInput'] = \
f.variables['Coverfraction'] # the variable name in the netcdf file
except:
pass
if varName == "fracVegCover": # the variable name in PCR-GLOBWB
try:
f.variables['fracVegCover'] = \
f.variables['vegetation_fraction'] # the variable name in the netcdf file
except:
pass
if varName == "arnoBeta": # the variable name in PCR-GLOBWB
try:
f.variables['arnoBeta'] = \
f.variables['arnoSchemeBeta'] # the variable name in the netcdf file
except:
pass
# date
date = dateInput
if useDoy == "Yes":
logger.debug('Finding the date based on the given climatology doy index (1 to 366, or index 0 to 365)')
idx = int(dateInput) - 1
else:
# make sure that date is in the correct format
if isinstance(date, str) == True: date = \
datetime.datetime.strptime(str(date),'%Y-%m-%d')
date = datetime.datetime(date.year,date.month,date.day)
if useDoy == "month":
logger.debug('Finding the date based on the given climatology month index (1 to 12, or index 0 to 11)')
idx = int(date.month) - 1
if useDoy == "yearly":
date = datetime.datetime(date.year,int(1),int(1))
if useDoy == "monthly":
date = datetime.datetime(date.year,date.month,int(1))
if useDoy == "yearly" or useDoy == "monthly" or useDoy == "daily_seasonal":
# if the desired year is not available, use the first year or the last year that is available
first_year_in_nc_file = findFirstYearInNCTime(f.variables['time'])
last_year_in_nc_file = findLastYearInNCTime(f.variables['time'])
#
if date.year < first_year_in_nc_file:
date = datetime.datetime(first_year_in_nc_file,date.month,date.day)
msg = "\n"
msg += "WARNING related to the netcdf file: "+str(ncFile)+" ; variable: "+str(varName)+" !!!!!!"+"\n"
msg += "The date "+str(dateInput)+" is NOT available. "
msg += "The date "+str(date.year)+"-"+str(date.month)+"-"+str(date.day)+" is used."
msg += "\n"
logger.warning(msg)
if date.year > last_year_in_nc_file:
date = datetime.datetime(last_year_in_nc_file,date.month,date.day)
msg = "\n"
msg += "WARNING related to the netcdf file: "+str(ncFile)+" ; variable: "+str(varName)+" !!!!!!"+"\n"
msg += "The date "+str(dateInput)+" is NOT available. "
msg += "The date "+str(date.year)+"-"+str(date.month)+"-"+str(date.day)+" is used."
msg += "\n"
logger.warning(msg)
try:
idx = nc.date2index(date, f.variables['time'], calendar = f.variables['time'].calendar, \
select ='exact')
msg = "The date "+str(date.year)+"-"+str(date.month)+"-"+str(date.day)+" is available. The 'exact' option is used while selecting netcdf time."
logger.debug(msg)
except:
msg = "The date "+str(date.year)+"-"+str(date.month)+"-"+str(date.day)+" is NOT available. The 'exact' option CANNOT be used while selecting netcdf time."
logger.debug(msg)
try:
idx = nc.date2index(date, f.variables['time'], calendar = f.variables['time'].calendar, \
select = 'before')
msg = "\n"
msg += "WARNING related to the netcdf file: "+str(ncFile)+" ; variable: "+str(varName)+" !!!!!!"+"\n"
msg += "The date "+str(date.year)+"-"+str(date.month)+"-"+str(date.day)+" is NOT available. The 'before' option is used while selecting netcdf time."
msg += "\n"
except:
idx = nc.date2index(date, f.variables['time'], calendar = f.variables['time'].calendar, \
select = 'after')
msg = "\n"
msg += "WARNING related to the netcdf file: "+str(ncFile)+" ; variable: "+str(varName)+" !!!!!!"+"\n"
msg += "The date "+str(date.year)+"-"+str(date.month)+"-"+str(date.day)+" is NOT available. The 'after' option is used while selecting netcdf time."
msg += "\n"
logger.warning(msg)
idx = int(idx)
logger.debug('Using the date index '+str(idx))
cropData = f.variables[varName][int(idx),:,:].copy() # still original data
factor = 1 # needed in regridData2FinerGrid
# store latitudes and longitudes to a new variable
latitude = f.variables['lat']
longitude = f.variables['lon']
# check the orientation of the latitude and flip it if necessary
we_have_to_flip = False
if (latitude[0]- latitude[1]) < 0.0:
we_have_to_flip = True
latitude = np.flipud(latitude)
sameClone = True
# check whether clone and input maps have the same attributes:
if cloneMapFileName != None:
# get the attributes of cloneMap
attributeClone = getMapAttributesALL(cloneMapFileName)
cellsizeClone = attributeClone['cellsize']
rowsClone = attributeClone['rows']
colsClone = attributeClone['cols']
xULClone = attributeClone['xUL']
yULClone = attributeClone['yUL']
# get the attributes of input (netCDF)
cellsizeInput = latitude[0]- latitude[1]
cellsizeInput = float(cellsizeInput)
rowsInput = len(latitude)
colsInput = len(longitude)
xULInput = longitude[0]-0.5*cellsizeInput
yULInput = latitude[0] +0.5*cellsizeInput
# check whether both maps have the same attributes
if cellsizeClone != cellsizeInput: sameClone = False
if rowsClone != rowsInput: sameClone = False
if colsClone != colsInput: sameClone = False
if xULClone != xULInput: sameClone = False
if yULClone != yULInput: sameClone = False
# flip cropData if necessary
if we_have_to_flip:
#~ cropData = cropData[::-1,:]
#~ cropData = cropData[::-1,:].copy()
#~ cropData = np.flipud(cropData)
#~ cropData = np.flipud(cropData)
#~ cropData = np.flipud(cropData).copy()
#~ original = cropData.copy()
#~
#~ print id(cropData)
#~ print id(original)
#~ cropData = None
#~ del cropData
#~ cropData = np.flipud(original).copy()
#~ print type(cropData)
#~ cropData2 = cropData[::-1,:]
#~ cropData = None
#~ cropData = original[::-1,:]
#~ cropData = cropData[::-1,:]
cropData = cropData[::-1,:]
print type(cropData)
print "Test test tet"
print id(cropData)
#~ print id(original)
#~ cropData = cropData[::-1,:].copy()
pcr_map = pcr.numpy2pcr(pcr.Scalar, cropData, -999.9)
pcr.report(pcr_map, "test2.map")
os.system("aguila test2.map")
if sameClone == False:
logger.debug('Crop to the clone map with lower left corner (x,y): '+str(xULClone)+' , '+str(yULClone))
# crop to cloneMap:
minX = min(abs(longitude[:] - (xULClone + 0.5*cellsizeInput))) # ; print(minX)
xIdxSta = int(np.where(abs(longitude[:] - (xULClone + 0.5*cellsizeInput)) == minX)[0])
xIdxEnd = int(math.ceil(xIdxSta + colsClone /(cellsizeInput/cellsizeClone)))
minY = min(abs(latitude[:] - (yULClone - 0.5*cellsizeInput))) # ; print(minY)
yIdxSta = int(np.where(abs(latitude[:] - (yULClone - 0.5*cellsizeInput)) == minY)[0])
yIdxEnd = int(math.ceil(yIdxSta + rowsClone /(cellsizeInput/cellsizeClone)))
cropData = cropData[yIdxSta:yIdxEnd,xIdxSta:xIdxEnd]
factor = int(round(float(cellsizeInput)/float(cellsizeClone)))
if factor > 1: logger.debug('Resample: input cell size = '+str(float(cellsizeInput))+' ; output/clone cell size = '+str(float(cellsizeClone)))
# convert to PCR object and close f
if specificFillValue != None:
outPCR = pcr.numpy2pcr(pcr.Scalar, \
regridData2FinerGrid(factor,cropData,MV), \
float(specificFillValue))
else:
outPCR = pcr.numpy2pcr(pcr.Scalar, \
regridData2FinerGrid(factor,cropData,MV), \
float(f.variables[varName]._FillValue))
#f.close();
f = None ; cropData = None
# PCRaster object
return (outPCR)
def netcdf2PCRobjCloneWindDist(ncFile,varName,dateInput,useDoy = None,
cloneMapFileName=None):
# EHS (02 SEP 2013): This is a special function made by Niko Wanders (for his DA framework).
# EHS (19 APR 2013): To convert netCDF (tss) file to PCR file.
# --- with clone checking
# Only works if cells are 'square'.
# Only works if cellsizeClone <= cellsizeInput
# Get netCDF file and variable name:
f = nc.Dataset(ncFile)
varName = str(varName)
# date
date = dateInput
if useDoy == "Yes":
idx = dateInput - 1
else:
if isinstance(date, str) == True: date = \
datetime.datetime.strptime(str(date),'%Y-%m-%d')
date = datetime.datetime(date.year,date.month,date.day)
# time index (in the netCDF file)
nctime = f.variables['time'] # A netCDF time variable object.
idx = nc.date2index(date, nctime, calendar=nctime.calendar, \
select='exact')
idx = int(idx)
sameClone = True
# check whether clone and input maps have the same attributes:
if cloneMapFileName != None:
# get the attributes of cloneMap
attributeClone = getMapAttributesALL(cloneMapFileName)
cellsizeClone = attributeClone['cellsize']
rowsClone = attributeClone['rows']
colsClone = attributeClone['cols']
xULClone = attributeClone['xUL']
yULClone = attributeClone['yUL']
# get the attributes of input (netCDF)
cellsizeInput = f.variables['lat'][0]- f.variables['lat'][1]
cellsizeInput = float(cellsizeInput)
rowsInput = len(f.variables['lat'])
colsInput = len(f.variables['lon'])
xULInput = f.variables['lon'][0]-0.5*cellsizeInput
yULInput = f.variables['lat'][0]+0.5*cellsizeInput
# check whether both maps have the same attributes
if cellsizeClone != cellsizeInput: sameClone = False
if rowsClone != rowsInput: sameClone = False
if colsClone != colsInput: sameClone = False
if xULClone != xULInput: sameClone = False
if yULClone != yULInput: sameClone = False
cropData = f.variables[varName][int(idx),:,:] # still original data
factor = 1 # needed in regridData2FinerGrid
if sameClone == False:
# crop to cloneMap:
xIdxSta = int(np.where(f.variables['lon'][:] == xULClone + 0.5*cellsizeInput)[0])
xIdxEnd = int(math.ceil(xIdxSta + colsClone /(cellsizeInput/cellsizeClone)))
yIdxSta = int(np.where(f.variables['lat'][:] == yULClone - 0.5*cellsizeInput)[0])
yIdxEnd = int(math.ceil(yIdxSta + rowsClone /(cellsizeInput/cellsizeClone)))
cropData = f.variables[varName][idx,yIdxSta:yIdxEnd,xIdxSta:xIdxEnd]
factor = int(float(cellsizeInput)/float(cellsizeClone))
# convert to PCR object and close f
outPCR = pcr.numpy2pcr(pcr.Scalar, \
regridData2FinerGrid(factor,cropData,MV), \
float(0.0))
f.close();
f = None ; cropData = None
# PCRaster object
return (outPCR)
def netcdf2PCRobjCloneWind(ncFile,varName,dateInput,useDoy = None,
cloneMapFileName=None):
# EHS (02 SEP 2013): This is a special function made by Niko Wanders (for his DA framework).
# EHS (19 APR 2013): To convert netCDF (tss) file to PCR file.
# --- with clone checking
# Only works if cells are 'square'.
# Only works if cellsizeClone <= cellsizeInput
# Get netCDF file and variable name:
f = nc.Dataset(ncFile)
varName = str(varName)
# date
date = dateInput
if useDoy == "Yes":
idx = dateInput - 1
else:
if isinstance(date, str) == True: date = \
datetime.datetime.strptime(str(date),'%Y-%m-%d')
date = datetime.datetime(date.year,date.month,date.day, 0, 0)
# time index (in the netCDF file)
nctime = f.variables['time'] # A netCDF time variable object.
idx = nc.date2index(date, nctime, select="exact")
idx = int(idx)
sameClone = True
# check whether clone and input maps have the same attributes:
if cloneMapFileName != None:
# get the attributes of cloneMap
attributeClone = getMapAttributesALL(cloneMapFileName)
cellsizeClone = attributeClone['cellsize']
rowsClone = attributeClone['rows']
colsClone = attributeClone['cols']
xULClone = attributeClone['xUL']
yULClone = attributeClone['yUL']
# get the attributes of input (netCDF)
cellsizeInput = f.variables['lat'][0]- f.variables['lat'][1]
cellsizeInput = float(cellsizeInput)
rowsInput = len(f.variables['lat'])
colsInput = len(f.variables['lon'])
xULInput = f.variables['lon'][0]-0.5*cellsizeInput
yULInput = f.variables['lat'][0]+0.5*cellsizeInput
# check whether both maps have the same attributes
if cellsizeClone != cellsizeInput: sameClone = False
if rowsClone != rowsInput: sameClone = False
if colsClone != colsInput: sameClone = False
if xULClone != xULInput: sameClone = False
if yULClone != yULInput: sameClone = False
cropData = f.variables[varName][int(idx),:,:] # still original data
factor = 1 # needed in regridData2FinerGrid
if sameClone == False:
# crop to cloneMap:
xIdxSta = int(np.where(f.variables['lon'][:] == xULClone + 0.5*cellsizeInput)[0])
xIdxEnd = int(math.ceil(xIdxSta + colsClone /(cellsizeInput/cellsizeClone)))
yIdxSta = int(np.where(f.variables['lat'][:] == yULClone - 0.5*cellsizeInput)[0])
yIdxEnd = int(math.ceil(yIdxSta + rowsClone /(cellsizeInput/cellsizeClone)))
cropData = f.variables[varName][idx,yIdxSta:yIdxEnd,xIdxSta:xIdxEnd]
factor = int(float(cellsizeInput)/float(cellsizeClone))
# convert to PCR object and close f
outPCR = pcr.numpy2pcr(pcr.Scalar, \
regridData2FinerGrid(factor,cropData,MV), \
float(f.variables[varName]._FillValue))
f.close();
f = None ; cropData = None
# PCRaster object
return (outPCR)
def netcdf2PCRobj(ncFile,varName,dateInput):
# EHS (04 APR 2013): To convert netCDF (tss) file to PCR file.
# The cloneMap is globally defined (outside this method).
# Get netCDF file and variable name:
f = nc.Dataset(ncFile)
varName = str(varName)
# date
date = dateInput
if isinstance(date, str) == True: date = \
datetime.datetime.strptime(str(date),'%Y-%m-%d')
date = datetime.datetime(date.year,date.month,date.day)
# time index (in the netCDF file)
nctime = f.variables['time'] # A netCDF time variable object.
idx = nc.date2index(date, nctime, calendar=nctime.calendar, \
select='exact')
# convert to PCR object and close f
outPCR = pcr.numpy2pcr(pcr.Scalar,(f.variables[varName][idx].data), \
float(f.variables[varName]._FillValue))
f.close(); f = None ; del f
# PCRaster object
return (outPCR)
def makeDir(directoryName):
try:
os.makedirs(directoryName)
except OSError:
pass
def writePCRmapToDir(v,outFileName,outDir):
# v: inputMapFileName or floating values
# cloneMapFileName: If the inputMap and cloneMap have different clones,
# resampling will be done. Then,
fullFileName = getFullPath(outFileName,outDir)
pcr.report(v,fullFileName)
def readPCRmapClone(v,cloneMapFileName,tmpDir,absolutePath=None,isLddMap=False,cover=None,isNomMap=False):
# v: inputMapFileName or floating values
# cloneMapFileName: If the inputMap and cloneMap have different clones,
# resampling will be done.
logger.debug('read file/values: '+str(v))
if v == "None":
#~ PCRmap = str("None")
PCRmap = None # 29 July: I made an experiment by changing the type of this object.
elif not re.match(r"[0-9.-]*$",v):
if absolutePath != None: v = getFullPath(v,absolutePath)
# print(v)
sameClone = isSameClone(v,cloneMapFileName)
if sameClone == True:
PCRmap = pcr.readmap(v)
else:
# resample using GDAL:
output = tmpDir+'temp.map'
warp = gdalwarpPCR(v,output,cloneMapFileName,tmpDir,isLddMap,isNomMap)
# read from temporary file and delete the temporary file:
PCRmap = pcr.readmap(output)
if isLddMap == True: PCRmap = pcr.ifthen(pcr.scalar(PCRmap) < 10., PCRmap)
if isLddMap == True: PCRmap = pcr.ldd(PCRmap)
if isNomMap == True: PCRmap = pcr.ifthen(pcr.scalar(PCRmap) > 0., PCRmap)
if isNomMap == True: PCRmap = pcr.nominal(PCRmap)
if os.path.isdir(tmpDir):
shutil.rmtree(tmpDir)
os.makedirs(tmpDir)
else:
PCRmap = pcr.spatial(pcr.scalar(float(v)))
if cover != None:
PCRmap = pcr.cover(PCRmap, cover)
co = None; cOut = None; err = None; warp = None
del co; del cOut; del err; del warp
stdout = None; del stdout
stderr = None; del stderr
return PCRmap
def readPCRmap(v):
# v : fileName or floating values
if not re.match(r"[0-9.-]*$", v):
PCRmap = pcr.readmap(v)
else:
PCRmap = pcr.scalar(float(v))
return PCRmap
def isSameClone(inputMapFileName,cloneMapFileName):
# reading inputMap:
attributeInput = getMapAttributesALL(inputMapFileName)
cellsizeInput = attributeInput['cellsize']
rowsInput = attributeInput['rows']
colsInput = attributeInput['cols']
xULInput = attributeInput['xUL']
yULInput = attributeInput['yUL']
# reading cloneMap:
attributeClone = getMapAttributesALL(cloneMapFileName)
cellsizeClone = attributeClone['cellsize']
rowsClone = attributeClone['rows']
colsClone = attributeClone['cols']
xULClone = attributeClone['xUL']
yULClone = attributeClone['yUL']
# check whether both maps have the same attributes?
sameClone = True
if cellsizeClone != cellsizeInput: sameClone = False
if rowsClone != rowsInput: sameClone = False
if colsClone != colsInput: sameClone = False
if xULClone != xULInput: sameClone = False
if yULClone != yULInput: sameClone = False
return sameClone
def gdalwarpPCR(input,output,cloneOut,tmpDir,isLddMap=False,isNominalMap=False):
# 19 Mar 2013 created by Edwin H. Sutanudjaja
# all input maps must be in PCRaster maps
#
# remove temporary files:
co = 'rm '+str(tmpDir)+'*.*'
cOut,err = subprocess.Popen(co, stdout=subprocess.PIPE,stderr=open(os.devnull),shell=True).communicate()
#
# converting files to tif:
co = 'gdal_translate -ot Float64 '+str(input)+' '+str(tmpDir)+'tmp_inp.tif'
if isLddMap == True: co = 'gdal_translate -ot Int32 '+str(input)+' '+str(tmpDir)+'tmp_inp.tif'
if isNominalMap == True: co = 'gdal_translate -ot Int32 '+str(input)+' '+str(tmpDir)+'tmp_inp.tif'
cOut,err = subprocess.Popen(co, stdout=subprocess.PIPE,stderr=open(os.devnull),shell=True).communicate()
#
# get the attributes of PCRaster map:
cloneAtt = getMapAttributesALL(cloneOut)
xmin = cloneAtt['xUL']
ymin = cloneAtt['yUL'] - cloneAtt['rows']*cloneAtt['cellsize']
xmax = cloneAtt['xUL'] + cloneAtt['cols']*cloneAtt['cellsize']
ymax = cloneAtt['yUL']
xres = cloneAtt['cellsize']
yres = cloneAtt['cellsize']
te = '-te '+str(xmin)+' '+str(ymin)+' '+str(xmax)+' '+str(ymax)+' '
tr = '-tr '+str(xres)+' '+str(yres)+' '
co = 'gdalwarp '+te+tr+ \
' -srcnodata -3.4028234663852886e+38 -dstnodata mv '+ \
str(tmpDir)+'tmp_inp.tif '+ \
str(tmpDir)+'tmp_out.tif'
cOut,err = subprocess.Popen(co, stdout=subprocess.PIPE,stderr=open(os.devnull),shell=True).communicate()
#
co = 'gdal_translate -of PCRaster '+ \
str(tmpDir)+'tmp_out.tif '+str(output)
cOut,err = subprocess.Popen(co, stdout=subprocess.PIPE,stderr=open(os.devnull),shell=True).communicate()
#
co = 'mapattr -c '+str(cloneOut)+' '+str(output)
cOut,err = subprocess.Popen(co, stdout=subprocess.PIPE,stderr=open(os.devnull),shell=True).communicate()
#
#~ co = 'aguila '+str(output)
#~ print(co)
#~ cOut,err = subprocess.Popen(co, stdout=subprocess.PIPE,stderr=open(os.devnull),shell=True).communicate()
#
co = 'rm '+str(tmpDir)+'tmp*.*'
cOut,err = subprocess.Popen(co, stdout=subprocess.PIPE,stderr=open(os.devnull),shell=True).communicate()
co = None; cOut = None; err = None
del co; del cOut; del err
stdout = None; del stdout
stderr = None; del stderr
n = gc.collect() ; del gc.garbage[:] ; n = None ; del n
def getFullPath(inputPath,absolutePath,completeFileName = True):
# 19 Mar 2013 created by Edwin H. Sutanudjaja
# Function: to get the full absolute path of a folder or a file
# replace all \ with /
inputPath = str(inputPath).replace("\\", "/")
absolutePath = str(absolutePath).replace("\\", "/")
# tuple of suffixes (extensions) that can be used:
suffix = ('/','_','.nc4','.map','.nc','.dat','.txt','.asc','.ldd','.tbl',\
'.001','.002','.003','.004','.005','.006',\
'.007','.008','.009','.010','.011','.012')
if inputPath.startswith('/') or str(inputPath)[1] == ":":
fullPath = str(inputPath)
else:
if absolutePath.endswith('/'):
absolutePath = str(absolutePath)
else:
absolutePath = str(absolutePath)+'/'
fullPath = str(absolutePath)+str(inputPath)
if completeFileName:
if fullPath.endswith(suffix):
fullPath = str(fullPath)
else:
fullPath = str(fullPath)+'/'
return fullPath
def findISIFileName(year,model,rcp,prefix,var):
histYears = [1951,1961,1971,1981,1991,2001]
sYears = [2011,2021,2031,2041,2051,2061,2071,2081,2091]
rcpStr = rcp
if year >= sYears[0]:
sYear = [i for i in range(len(sYears)) if year >= sYears[i]]
sY = sYears[sYear[-1]]
elif year < histYears[-1]:
sYear = [i for i in range(len(histYears)) if year >= histYears[i] ]
sY = histYears[sYear[-1]]
if year >= histYears[-1] and year < sYears[0]:
if model == 'HadGEM2-ES':
if year < 2005:
rcpStr = 'historical'
sY = 2001
eY = 2004
else:
rcpStr = rcp
sY = 2005
eY = 2010
if model == 'IPSL-CM5A-LR' or model == 'GFDL-ESM2M':
if year < 2006:
rcpStr = 'historical'
sY = 2001
eY = 2005
else:
rcpStr = rcp
sY = 2006
eY = 2010
else:
eY = sY + 9
if sY == 2091:
eY = 2099
if model == 'HadGEM2-ES':
if year < 2005:
rcpStr = 'historical'
if model == 'IPSL-CM5A-LR' or model == 'GFDL-ESM2M':
if year < 2006:
rcpStr = 'historical'
#print year,sY,eY
return "%s_%s_%s_%s_%i-%i.nc" %(var,prefix,model.lower(),rcpStr,sY,eY)
def get_random_word(wordLen):
word = ''
for i in range(wordLen):
word += random.choice('ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789')
return word
def isLastDayOfMonth(date):
if (date + datetime.timedelta(days=1 )).day == 1:
return True
else:
return False
def getMapAttributesALL(cloneMap,arcDegree=True):
cOut,err = subprocess.Popen(str('mapattr -p %s ' %(cloneMap)), stdout=subprocess.PIPE,stderr=open(os.devnull),shell=True).communicate()
if err !=None or cOut == []:
print "Something wrong with mattattr in virtualOS, maybe clone Map does not exist ? "
sys.exit()
cellsize = float(cOut.split()[7])
if arcDegree == True: cellsize = round(cellsize * 360000.)/360000.
mapAttr = {'cellsize': float(cellsize) ,\
'rows' : float(cOut.split()[3]) ,\
'cols' : float(cOut.split()[5]) ,\
'xUL' : float(cOut.split()[17]),\
'yUL' : float(cOut.split()[19])}
co = None; cOut = None; err = None
del co; del cOut; del err
n = gc.collect() ; del gc.garbage[:] ; n = None ; del n
return mapAttr
def getMapAttributes(cloneMap,attribute,arcDegree=True):
cOut,err = subprocess.Popen(str('mapattr -p %s ' %(cloneMap)), stdout=subprocess.PIPE,stderr=open(os.devnull),shell=True).communicate()
#print cOut
if err !=None or cOut == []:
print "Something wrong with mattattr in virtualOS, maybe clone Map does not exist ? "
sys.exit()
#print cOut.split()
co = None; err = None
del co; del err
n = gc.collect() ; del gc.garbage[:] ; n = None ; del n
if attribute == 'cellsize':
cellsize = float(cOut.split()[7])
if arcDegree == True: cellsize = round(cellsize * 360000.)/360000.
return cellsize
if attribute == 'rows':
return int(cOut.split()[3])
#return float(cOut.split()[3])
if attribute == 'cols':
return int(cOut.split()[5])
#return float(cOut.split()[5])
if attribute == 'xUL':
return float(cOut.split()[17])
if attribute == 'yUL':
return float(cOut.split()[19])
def getMapTotal(mapFile):
''' outputs the sum of all values in a map file '''
total, valid= pcr.cellvalue(pcr.maptotal(mapFile),1)
return total
def get_rowColAboveThreshold(map, threshold):
npMap = pcr.pcr2numpy(map, -9999)
(nr, nc) = np.shape(npMap)
for r in range(0, nr):
for c in range(0, nc):
if npMap[r, c] != -9999:
if np.abs(npMap[r, c]) > threshold:
return (r, c)
def getLastDayOfMonth(date):
''' returns the last day of the month for a given date '''
if date.month == 12:
return date.replace(day=31)
return date.replace(month=date.month + 1, day=1) - datetime.timedelta(days=1)
def getMinMaxMean(mapFile,ignoreEmptyMap=False):
mn = pcr.cellvalue(pcr.mapminimum(mapFile),1)[0]
mx = pcr.cellvalue(pcr.mapmaximum(mapFile),1)[0]
nrValues = pcr.cellvalue(pcr.maptotal(pcr.scalar(pcr.defined(mapFile))), 1 ) [0] #/ getNumNonMissingValues(mapFile)
if nrValues == 0.0 and ignoreEmptyMap:
return 0.0,0.0,0.0
else:
return mn,mx,(getMapTotal(mapFile) / nrValues)
def getMapVolume(mapFile,cellareaFile):
''' returns the sum of all grid cell values '''
volume = mapFile * cellareaFile
return (getMapTotal(volume) / 1)
def secondsPerDay():
return float(3600 * 24)
def getValDivZero(x,y,y_lim=smallNumber,z_def= 0.):
#-returns the result of a division that possibly involves a zero
# denominator; in which case, a default value is substituted:
# x/y= z in case y > y_lim,
# x/y= z_def in case y <= y_lim, where y_lim -> 0.
# z_def is set to zero if not otherwise specified
return pcr.ifthenelse(y > y_lim,x/pcr.max(y_lim,y),z_def)
def getValFloatDivZero(x,y,y_lim,z_def= 0.):
#-returns the result of a division that possibly involves a zero
# denominator; in which case, a default value is substituted:
# x/y= z in case y > y_lim,
# x/y= z_def in case y <= y_lim, where y_lim -> 0.
# z_def is set to zero if not otherwise specified
if y > y_lim:
return x / max(y_lim,y)
else:
return z_def
def retrieveMapValue(pcrX,coordinates):
#-retrieves values from a map and returns an array conform the IDs stored in properties
nrRows= coordinates.shape[0]
x= np.ones((nrRows))* MV
tmpIDArray= pcr.pcr2numpy(pcrX,MV)
for iCnt in xrange(nrRows):
row,col= coordinates[iCnt,:]
if row != MV and col != MV:
x[iCnt]= tmpIDArray[row,col]
return x
def returnMapValue(pcrX,x,coord):
#-retrieves value from an array and update values in the map
if x.ndim == 1:
nrRows= 1
tempIDArray= pcr.pcr2numpy(pcrX,MV)
#print tempIDArray
temporary= tempIDArray
nrRows= coord.shape[0]
for iCnt in xrange(nrRows):
row,col= coord[iCnt,:]
if row != MV and col != MV:
tempIDArray[row,col]= (x[iCnt])
# print iCnt,row,col,x[iCnt]
pcrX= pcr.numpy2pcr(pcr.Scalar,tempIDArray,MV)
return pcrX
def getQAtBasinMouths(discharge, basinMouth):
temp = pcr.ifthenelse(basinMouth != 0 , discharge * secondsPerDay(),0.)
pcr.report(temp,"temp.map")
return (getMapTotal(temp) / 1e9)
def regridMapFile2FinerGrid (rescaleFac,coarse):
if rescaleFac ==1:
return coarse
return pcr.numpy2pcr(pcr.Scalar, regridData2FinerGrid(rescaleFac,pcr.pcr2numpy(coarse,MV),MV),MV)
def regridData2FinerGrid(rescaleFac,coarse,MV):
if rescaleFac ==1:
return coarse
nr,nc = np.shape(coarse)
fine= np.zeros(nr*nc*rescaleFac*rescaleFac).reshape(nr*rescaleFac,nc*rescaleFac) + MV
ii = -1
nrF,ncF = np.shape(fine)
for i in range(0 , nrF):
if i % rescaleFac == 0:
ii += 1
fine [i,:] = coarse[ii,:].repeat(rescaleFac)
nr = None; nc = None
del nr; del nc
nrF = None; ncF = None
del nrF; del ncF
n = gc.collect() ; del gc.garbage[:] ; n = None ; del n
return fine
def regridToCoarse(fine,fac,mode,missValue):
nr,nc = np.shape(fine)
coarse = np.zeros(nr/fac * nc / fac).reshape(nr/fac,nc/fac) + MV
nr,nc = np.shape(coarse)
for r in range(0,nr):
for c in range(0,nc):
ar = fine[r * fac : fac * (r+1),c * fac: fac * (c+1)]
m = np.ma.masked_values(ar,missValue)
if ma.count(m) == 0:
coarse[r,c] = MV
else:
if mode == 'average':
coarse [r,c] = ma.average(m)
elif mode == 'median':
coarse [r,c] = ma.median(m)
elif mode == 'sum':
coarse [r,c] = ma.sum(m)
elif mode =='min':
coarse [r,c] = ma.min(m)
elif mode == 'max':
coarse [r,c] = ma.max(m)
return coarse
def waterBalanceCheck(fluxesIn,fluxesOut,preStorages,endStorages,processName,PrintOnlyErrors,dateStr,threshold=1e-5,landmask=None):
""" Returns the water balance for a list of input, output, and storage map files """
# modified by Edwin (22 Apr 2013)
inMap = pcr.spatial(pcr.scalar(0.0))
outMap = pcr.spatial(pcr.scalar(0.0))
dsMap = pcr.spatial(pcr.scalar(0.0))
for fluxIn in fluxesIn:
inMap += fluxIn
for fluxOut in fluxesOut:
outMap += fluxOut
for preStorage in preStorages:
dsMap += preStorage
for endStorage in endStorages:
dsMap -= endStorage
a,b,c = getMinMaxMean(inMap + dsMap- outMap)
if abs(a) > threshold or abs(b) > threshold:
if PrintOnlyErrors:
msg = "\n"
msg += "\n"
msg = "\n"
msg += "\n"
msg += "##############################################################################################################################################\n"
msg += "WARNING !!!!!!!! Water Balance Error %s Min %f Max %f Mean %f" %(processName,a,b,c)
msg += "\n"
msg += "##############################################################################################################################################\n"
msg += "\n"
msg += "\n"
msg += "\n"
logger.error(msg)
#~ pcr.report(inMap + dsMap - outMap,"wb.map")
#~ os.system("aguila wb.map")
#~ # for debugging:
#~ error = inMap + dsMap- outMap
#~ os.system('rm error.map')
#~ pcr.report(error,"error.map")
#~ os.system('aguila error.map')
#~ os.system('rm error.map')
#~ wb = inMap + dsMap - outMap
#~ maxWBError = pcr.cellvalue(pcr.mapmaximum(pcr.abs(wb)), 1, 1)[0]
#~ #return wb
def waterBalance( fluxesIn, fluxesOut, deltaStorages, processName, PrintOnlyErrors, dateStr,threshold=1e-5):
""" Returns the water balance for a list of input, output, and storage map files and """
inMap = pcr.spatial(pcr.scalar(0.0))
dsMap = pcr.spatial(pcr.scalar(0.0))
outMap = pcr.spatial(pcr.scalar(0.0))
inflow = 0
outflow = 0
deltaS = 0
for fluxIn in fluxesIn:
inflow += getMapTotal(fluxIn)
inMap += fluxIn
for fluxOut in fluxesOut:
outflow += getMapTotal(fluxOut)
outMap += fluxOut
for deltaStorage in deltaStorages:
deltaS += getMapTotal(deltaStorage)
dsMap += deltaStorage
#if PrintOnlyErrors:
a,b,c = getMinMaxMean(inMap + dsMap- outMap)
# if abs(a) > 1e-5 or abs(b) > 1e-5:
# if abs(a) > 1e-4 or abs(b) > 1e-4:
if abs(a) > threshold or abs(b) > threshold:
print "WBError %s Min %f Max %f Mean %f" %(processName,a,b,c)
# if abs(inflow + deltaS - outflow) > 1e-5:
# print "Water balance Error for %s on %s: in = %f\tout=%f\tdeltaS=%f\tBalance=%f" \
# %(processName,dateStr,inflow,outflow,deltaS,inflow + deltaS - outflow)
#else:
# print "Water balance for %s: on %s in = %f\tout=%f\tdeltaS=%f\tBalance=%f" \
# %(processName,dateStr,inflow,outflow,deltaS,inflow + deltaS - outflow)
wb = inMap + dsMap - outMap
maxWBError = pcr.cellvalue(pcr.mapmaximum(pcr.abs(wb)), 1, 1)[0]
#if maxWBError > 0.001 / 1000:
#row = 0
#col = 0
#cellID = 1
#troubleCell = 0
#print "Water balance for %s on %s: %f mm !!! " %(processName,dateStr,maxWBError * 1000)
#pcr.report(wb,"%s-WaterBalanceError-%s" %(processName,dateStr))
#npWBMError = pcr2numpy(wb, -9999)
#(nr, nc) = np.shape(npWBMError)
#for r in range(0, nr):
#for c in range(0, nc):
## print r,c
#if npWBMError[r, c] != -9999.0:
#val = npWBMError[r, c]
#if math.fabs(val) > 0.0001 / 1000:
## print npWBMError[r,c]
#row = r
#col = c
#troubleCell = cellID
#cellID += 1
#print 'Water balance for %s on %s: %f mm row %i col %i cellID %i!!! ' % (
#processName,
#dateStr,
#maxWBError * 1000,
#row,
#col,
#troubleCell,
#)
return inMap + dsMap - outMap
def waterAbstractionAndAllocation(water_demand_volume,available_water_volume,allocation_zones,\
zone_area = None,
high_volume_treshold = 1000000.,
debug_water_balance = True,\
extra_info_for_water_balance_reporting = "",
ignore_small_values = True):
logger.debug("Allocation of abstraction.")
# demand volume in each cell (unit: m3)
if ignore_small_values: # ignore small values to avoid runding error
cellVolDemand = pcr.rounddown(pcr.max(0.0, water_demand_volume))
else:
cellVolDemand = pcr.max(0.0, water_demand_volume)
# total demand volume in each zone/segment (unit: m3)
zoneVolDemand = pcr.areatotal(cellVolDemand, allocation_zones)
# total available water volume in each cell
if ignore_small_values: # ignore small values to avoid runding error
cellAvlWater = pcr.rounddown(pcr.max(0.00, available_water_volume))
else:
cellAvlWater = pcr.max(0.00, available_water_volume)
# total available water volume in each zone/segment (unit: m3)
# - to minimize numerical errors, separating cellAvlWater
if not isinstance(high_volume_treshold,types.NoneType):
# mask: 0 for small volumes ; 1 for large volumes (e.g. in lakes and reservoirs)
mask = pcr.cover(\
pcr.ifthen(cellAvlWater > high_volume_treshold, pcr.boolean(1)), pcr.boolean(0))
zoneAvlWater = pcr.areatotal(
pcr.ifthenelse(mask, 0.0, cellAvlWater), allocation_zones)
zoneAvlWater += pcr.areatotal(
pcr.ifthenelse(mask, cellAvlWater, 0.0), allocation_zones)
else:
zoneAvlWater = pcr.areatotal(cellAvlWater, allocation_zones)
# total actual water abstraction volume in each zone/segment (unit: m3)
# - limited to available water
zoneAbstraction = pcr.min(zoneAvlWater, zoneVolDemand)
# actual water abstraction volume in each cell (unit: m3)
cellAbstraction = getValDivZero(\
cellAvlWater, zoneAvlWater, smallNumber)*zoneAbstraction
cellAbstraction = pcr.min(cellAbstraction, cellAvlWater)
if ignore_small_values: # ignore small values to avoid runding error
cellAbstraction = pcr.rounddown(pcr.max(0.00, cellAbstraction))
# to minimize numerical errors, separating cellAbstraction
if not isinstance(high_volume_treshold,types.NoneType):
# mask: 0 for small volumes ; 1 for large volumes (e.g. in lakes and reservoirs)
mask = pcr.cover(\
pcr.ifthen(cellAbstraction > high_volume_treshold, pcr.boolean(1)), pcr.boolean(0))
zoneAbstraction = pcr.areatotal(
pcr.ifthenelse(mask, 0.0, cellAbstraction), allocation_zones)
zoneAbstraction += pcr.areatotal(
pcr.ifthenelse(mask, cellAbstraction, 0.0), allocation_zones)
else:
zoneAbstraction = pcr.areatotal(cellAbstraction, allocation_zones)
# allocation water to meet water demand (unit: m3)
cellAllocation = getValDivZero(\
cellVolDemand, zoneVolDemand, smallNumber)*zoneAbstraction
#~ # extraAbstraction to minimize numerical errors:
#~ zoneDeficitAbstraction = pcr.max(0.0,\
#~ pcr.areatotal(cellAllocation , allocation_zones) -\
#~ pcr.areatotal(cellAbstraction, allocation_zones))
#~ remainingCellAvlWater = pcr.max(0.0, cellAvlWater - cellAbstraction)
#~ cellAbstraction += zoneDeficitAbstraction * getValDivZero(\
#~ remainingCellAvlWater,
#~ pcr.areatotal(remainingCellAvlWater, allocation_zones),
#~ smallNumber)
#~ #
#~ # extraAllocation to minimize numerical errors:
#~ zoneDeficitAllocation = pcr.max(0.0,\
#~ pcr.areatotal(cellAbstraction, allocation_zones) -\
#~ pcr.areatotal(cellAllocation , allocation_zones))
#~ remainingCellDemand = pcr.max(0.0, cellVolDemand - cellAllocation)
#~ cellAllocation += zoneDeficitAllocation * getValDivZero(\
#~ remainingCellDemand,
#~ pcr.areatotal(remainingCellDemand, allocation_zones),
#~ smallNumber)
if debug_water_balance and not isinstance(zone_area,types.NoneType):
zoneAbstraction = pcr.cover(pcr.areatotal(cellAbstraction, allocation_zones)/zone_area, 0.0)
zoneAllocation = pcr.cover(pcr.areatotal(cellAllocation , allocation_zones)/zone_area, 0.0)
waterBalanceCheck([zoneAbstraction],\
[zoneAllocation],\
[pcr.scalar(0.0)],\
[pcr.scalar(0.0)],\
'abstraction - allocation per zone/segment (PS: Error here may be caused by rounding error.)' ,\
True,\
extra_info_for_water_balance_reporting,threshold=1e-4)
return cellAbstraction, cellAllocation
def findLastYearInNCFile(ncFile):
# open a netcdf file:
if ncFile in filecache.keys():
f = filecache[ncFile]
else:
f = nc.Dataset(ncFile)
filecache[ncFile] = f
# last datetime
last_datetime_year = findLastYearInNCTime(f.variables['time'])
return last_datetime_year
def findLastYearInNCTime(ncTimeVariable):
# last datetime
last_datetime = nc.num2date(ncTimeVariable[len(ncTimeVariable) - 1],\
ncTimeVariable.units,\
ncTimeVariable.calendar)
return last_datetime.year
def findFirstYearInNCTime(ncTimeVariable):
# first datetime
first_datetime = nc.num2date(ncTimeVariable[0],\
ncTimeVariable.units,\
ncTimeVariable.calendar)
return first_datetime.year
def cmd_line(command_line,using_subprocess = True):
msg = "Call: "+str(command_line)
logger.debug(msg)
co = command_line
if using_subprocess:
cOut,err = subprocess.Popen(co, stdout=subprocess.PIPE,stderr=open('/dev/null'),shell=True).communicate()
else:
os.system(co)
|
edwinkost/edwin_simple_tools
|
netcdf_to_pcraster/virtualOS.py
|
Python
|
gpl-2.0
| 59,924
|
[
"NetCDF"
] |
69d45ccdff610b741bab2e7f3ed858c57c3943c195ff6760e707e5ab89d8d4fe
|
from director import asynctaskqueue as atq
from director import segmentation
from director import visualization as vis
import director.objectmodel as om
from director import propertyset
from director import pointpicker
from director import planplayback
from director.timercallback import TimerCallback
from director.simpletimer import SimpleTimer
from director import ikplanner
from director import callbacks
from director import robotsystem
from director import transformUtils
from director import affordanceitems
from director import vtkNumpy as vnp
from director.debugVis import DebugData
from director import vtkAll as vtk
from director import lcmUtils
import numpy as np
import copy
import pickle
import PythonQt
from PythonQt import QtCore, QtGui
import re
import inspect
try:
import drc as lcmdrc
HAVE_DRC_MESSAGES = True
except ImportError:
HAVE_DRC_MESSAGES = False
robotSystem = None
class ManipulationPlanItem(om.ObjectModelItem):
pass
class FootstepPlanItem(om.ObjectModelItem):
pass
class WalkingPlanItem(om.ObjectModelItem):
pass
def _splitCamelCase(name):
name = re.sub('(.)([A-Z][a-z]+)', r'\1 \2', name)
return re.sub('([a-z0-9])([A-Z])', r'\1 \2', name)
class AsyncTask(object):
'''
AsyncTask documentation.
'''
def __init__(self, **kwargs):
self.statusMessage = ''
self.failReason = ''
self.properties = propertyset.PropertySet()
self.properties.addProperty('Name', _splitCamelCase(self.__class__.__name__).lower())
for cls in reversed(inspect.getmro(self.__class__)):
if hasattr(cls, 'getDefaultProperties'):
cls.getDefaultProperties(self.properties)
for name, value in kwargs.iteritems():
self.properties.setProperty(_splitCamelCase(name).capitalize(), value)
def __call__(self):
return self.run()
def stop(self):
pass
def run(self):
pass
def fail(self, reason):
self.failReason = reason
raise atq.AsyncTaskQueue.FailException(reason)
def copy(self):
return copy.deepcopy(self)
class PrintTask(AsyncTask):
'''
Name: Print Task
Short Description: prints a string
Description:
This task prints a message string.
'''
printFunction = None
@staticmethod
def getDefaultProperties(properties):
properties.addProperty('Message', '<empty message>')
def run(self):
if self.printFunction:
self.printFunction(self.properties.message)
else:
print self.properties.message
class CallbackTask(AsyncTask):
def __init__(self, callback=None, **kwargs):
AsyncTask.__init__(self, **kwargs)
self.callback = callback
def run(self):
if self.callback:
yield self.callback()
class ExceptionTask(AsyncTask):
def run(self):
raise Exception('Task exception')
class UserPromptTask(AsyncTask):
promptsEnabled = True
promptFunction = None
@staticmethod
def getDefaultProperties(properties):
properties.addProperty('Message', 'continue?')
properties.addProperty('Always', False)
def showUserPrompt(self):
if self.promptFunction:
self.promptFunction(self, self.properties.message)
else:
self.showDialog()
def showDialog(self):
self.d = QtGui.QDialog()
buttons = QtGui.QDialogButtonBox()
buttons.addButton('Yes', QtGui.QDialogButtonBox.AcceptRole)
buttons.addButton('No', QtGui.QDialogButtonBox.RejectRole)
buttons.connect('accepted()', self.d.accept)
buttons.connect('rejected()', self.d.reject)
l = QtGui.QVBoxLayout(self.d)
l.addWidget(QtGui.QLabel(self.properties.message))
l.addWidget(buttons)
self.d.setAttribute(QtCore.Qt.WA_QuitOnClose, False)
self.d.show()
self.d.raise_()
self.d.connect('accepted()', self.accept)
self.d.connect('rejected()', self.reject)
def accept(self):
self.result = True
def reject(self):
self.result = False
def run(self):
if not self.promptsEnabled and not self.properties.getProperty('Always'):
return
self.result = None
self.showUserPrompt()
while self.result is None:
yield
if not self.result:
raise atq.AsyncTaskQueue.PauseException()
class CheckPlanInfo(UserPromptTask):
@staticmethod
def getDefaultProperties(properties):
UserPromptTask.getDefaultProperties(properties)
properties.setProperty('Message', 'Plan is invalid. Do you want to accept it anyway?')
def run(self):
if robotSystem.ikPlanner.lastManipPlan and max(robotSystem.ikPlanner.lastManipPlan.plan_info) <= 10 and min(robotSystem.ikPlanner.lastManipPlan.plan_info) >= 0:
return
else:
return UserPromptTask.run(self)
class DelayTask(AsyncTask):
@staticmethod
def getDefaultProperties(properties):
properties.addProperty('Delay time', 1.0, attributes=propertyset.PropertyAttributes(minimum=0.0, maximum=1e4, singleStep=0.1))
def run(self):
delayTime = self.properties.getProperty('Delay time')
t = SimpleTimer()
while True:
elapsed = t.elapsed()
if elapsed >= delayTime:
break
self.statusMessage = 'Waiting %.1f seconds' % (delayTime - elapsed)
yield
class PauseTask(AsyncTask):
def run(self):
raise atq.AsyncTaskQueue.PauseException()
class QuitTask(AsyncTask):
def run(self):
QtCore.QCoreApplication.instance().quit()
class WaitForMultisenseLidar(AsyncTask):
@staticmethod
def getDefaultProperties(properties):
properties.addProperty('Number of sweeps', 1, attributes=propertyset.PropertyAttributes(minimum=0, maximum=100))
def run(self):
self.multisenseDriver = robotSystem.multisenseDriver
currentRevolution = self.multisenseDriver.displayedRevolution
desiredRevolution = currentRevolution + self.properties.getProperty('Number of sweeps')
while self.multisenseDriver.displayedRevolution < desiredRevolution:
self.statusMessage = 'Waiting for multisense sweep'
yield
class SnapshotPointcloud(AsyncTask):
def run(self):
polyData = self.getPointCloud()
om.removeFromObjectModel(om.findObjectByName('pointcloud snapshot'))
vis.showPolyData(polyData, 'pointcloud snapshot', parent='segmentation', visible=False)
class SnapshotMultisensePointcloud(SnapshotPointcloud):
def getPointCloud(self):
return segmentation.getCurrentRevolutionData()
class SnapshotSelectedPointcloud(SnapshotPointcloud):
def getPointCloud(self):
obj = om.getActiveObject()
if obj and obj.getProperty('Name') == 'Multisense':
return SnapshotMultisensePointcloud().getPointCloud()
elif obj and obj.getProperty('Name') == 'stereo point cloud':
return SnapshotStereoPointcloud().getPointCloud()
elif obj and hasattr(obj, 'polyData'):
return obj.polyData
else:
self.fail('no pointcloud is selected')
class SnapshotStereoPointcloud(SnapshotPointcloud):
@staticmethod
def getDefaultProperties(properties):
properties.addProperty('Remove Outliers', False)
def getPointCloud(self):
return segmentation.getDisparityPointCloud(decimation=1, removeOutliers=self.getProperty('Remove Outliers'))
class PointCloudAlgorithmBase(AsyncTask):
@staticmethod
def getDefaultProperties(properties):
properties.addProperty('Use pointcloud snapshot', True)
def getPointCloud(self):
if self.properties.getProperty('Use pointcloud snapshot'):
obj = om.findObjectByName('pointcloud snapshot')
if obj is None:
self.fail('pointcloud snapshot not found')
if not obj.polyData.GetNumberOfPoints():
self.fail('input pointcloud is empty')
return obj.polyData
else:
return SnapshotSelectedPointcloud().getPointCloud()
class FitDrill(PointCloudAlgorithmBase):
def run(self):
polyData = self.getPointCloud()
segmentation.findAndFitDrillBarrel(polyData)
class FindRotaryDrillByAnnotation(PointCloudAlgorithmBase):
def getAnnotationInputPoint(self):
obj = om.findObjectByName('rotary drill annotation')
if obj is None:
self.fail('user annotation not found')
return obj.annotationPoints[0]
def run(self):
point = self.getAnnotationInputPoint()
polyData = self.getPointCloud()
#segmentation.segmentDrillAuto(point, polyData)
om.removeFromObjectModel(om.findObjectByName('drill'))
segmentation.segmentDrillAlignedWithTable(point, polyData)
class WaitForAtlasBehavior(AsyncTask):
@staticmethod
def getDefaultProperties(properties):
properties.addProperty('Behavior name', '')
def run(self):
behaviorName = self.properties.getProperty('Behavior name')
assert behaviorName in robotSystem.atlasDriver.getBehaviorMap().values()
while robotSystem.atlasDriver.getCurrentBehaviorName() != behaviorName:
yield
class WaitForWalkExecutionBDI(AsyncTask):
def run(self):
self.statusMessage = 'Waiting for BDI walking to begin...'
while robotSystem.atlasDriver.getCurrentBehaviorName() != 'step':
yield
self.statusMessage = 'Waiting for BDI walk execution...'
while robotSystem.atlasDriver.getCurrentBehaviorName() != 'stand':
yield
class WaitForPlanExecution(AsyncTask):
@staticmethod
def getDefaultProperties(properties):
properties.addProperty('Timeout', 5.0, attributes=propertyset.PropertyAttributes(minimum=0.0, maximum=1e4, singleStep=0.1, decimals=2))
def promptUserForPlanRecommit(self):
prompt = UserPromptTask(message='Plan appears dropped. Recommit?')
return prompt.run()
def run(self):
def getMsg():
return robotSystem.atlasDriver.lastControllerStatusMessage
def isExecuting():
return getMsg().execution_status == lcmdrc.plan_status_t.EXECUTION_STATUS_EXECUTING
# wait for first status message
while not getMsg():
yield
if isExecuting():
raise Exception('error, invoked during plan execution and cannot guarantee safety.')
t = SimpleTimer()
lastPlanStartTime = getMsg().last_plan_start_utime
# wait for next plan to begin
self.statusMessage = 'Waiting for %s to begin...' % self.getTypeLabel()
while getMsg().last_plan_start_utime == lastPlanStartTime:
if t.elapsed() > self.properties.getProperty('Timeout'):
yield self.promptUserForPlanRecommit()
t.reset()
self.recommitPlan()
else:
yield
# wait for execution
self.statusMessage = 'Waiting for %s execution...' % self.getTypeLabel()
while getMsg().execution_status == lcmdrc.plan_status_t.EXECUTION_STATUS_EXECUTING:
if getMsg().plan_type != self.getType():
raise Exception('error, unexpected execution plan type: %s' % getMsg().plan_type)
yield
self.statusMessage = 'Waiting for recent robot state...'
while robotSystem.robotStateJointController.lastRobotStateMessage.utime < getMsg().last_plan_start_utime:
yield
class WaitForManipulationPlanExecution(WaitForPlanExecution):
def getType(self):
return lcmdrc.plan_status_t.MANIPULATING
def getTypeLabel(self):
return 'manipulation'
def recommitPlan(self):
lastPlan = robotSystem.manipPlanner.committedPlans.pop()
robotSystem.manipPlanner.commitManipPlan(lastPlan)
class WaitForWalkExecution(WaitForPlanExecution):
def getType(self):
return lcmdrc.plan_status_t.WALKING
def getTypeLabel(self):
return 'walking'
def recommitPlan(self):
lastPlan = robotSystem.footstepsDriver.committedPlans.pop()
robotSystem.footstepsDriver.commitFootstepPlan(lastPlan)
class UserSelectAffordanceCandidate(AsyncTask):
@staticmethod
def getDefaultProperties(properties):
properties.addProperty('Candidate name prefix', '')
properties.addProperty('New name', '')
properties.addProperty('Delete candidates', True)
def getCandidates(self):
namePrefix = self.properties.getProperty('Candidate name prefix')
matchStr = '^%s [0-9]+$' % namePrefix
return [obj for obj in om.getObjects() if re.match(matchStr, obj.getProperty('Name'))]
def selectCandidate(self, selectedObj, candidates):
if self.properties.getProperty('Delete candidates'):
for obj in candidates:
if obj != selectedObj:
om.removeFromObjectModel(obj)
newName = self.properties.getProperty('New name')
if newName:
selectedObj.rename(newName)
def run(self):
candidates = self.getCandidates()
if not candidates:
self.fail('no affordance candidates found')
om.clearSelection()
self.statusMessage = 'Please select affordance candidate: %s' % self.properties.getProperty('Candidate name prefix')
while True:
obj = om.getActiveObject()
if obj and obj in candidates:
break
else:
yield
self.selectCandidate(obj, candidates)
class TransformFrame(AsyncTask):
@staticmethod
def getDefaultProperties(properties):
properties.addProperty('Frame input name', '')
properties.addProperty('Frame output name', '')
properties.addProperty('Translation', [0.0, 0.0, 0.0], attributes=propertyset.PropertyAttributes(decimals=3, minimum=-1e5, maximum=1e5))
properties.addProperty('Rotation', [0.0, 0.0, 0.0], attributes=propertyset.PropertyAttributes(decimals=3, minimum=-360, maximum=360))
def getInputFrame(self):
name = self.properties.getProperty('Frame input name')
frame = om.findObjectByName(name)
if not isinstance(frame, vis.FrameItem):
self.fail('frame not found: %s' % name)
return frame
def run(self):
inputFrame = self.getInputFrame()
translation = self.properties.getProperty('Translation')
rpy = self.properties.getProperty('Rotation')
offset = transformUtils.frameFromPositionAndRPY(translation, rpy)
offset.PostMultiply()
offset.Concatenate(transformUtils.copyFrame(inputFrame.transform))
outputFrame = vis.updateFrame(offset, self.properties.getProperty('Frame output name'), scale=0.2, parent=inputFrame.parent())
if not hasattr(inputFrame, 'frameSync'):
inputFrame.frameSync = vis.FrameSync()
inputFrame.frameSync.addFrame(inputFrame)
inputFrame.frameSync.addFrame(outputFrame, ignoreIncoming=True)
class ComputeRobotFootFrame(AsyncTask):
@staticmethod
def getDefaultProperties(properties):
properties.addProperty('Pose name', 'EST_ROBOT_STATE')
properties.addProperty('Frame output name', 'robot foot frame')
def run(self):
poseName = self.properties.getProperty('Pose name')
if poseName == 'EST_ROBOT_STATE':
pose = robotSystem.robotStateJointController.q.copy()
else:
pose = robotSystem.ikPlanner.jointController.getPose(poseName)
robotModel = robotSystem.ikPlanner.getRobotModelAtPose(pose)
footFrame = robotSystem.footstepsDriver.getFeetMidPoint(robotModel)
vis.updateFrame(footFrame, self.properties.getProperty('Frame output name'), scale=0.2)
class FindAffordance(AsyncTask):
@staticmethod
def getDefaultProperties(properties):
properties.addProperty('Affordance name', '')
def run(self):
affordanceName = self.properties.getProperty('Affordance name')
obj = om.findObjectByName(affordanceName)
if not obj:
self.fail('could not find affordance: %s' % affordanceName)
class ProjectAffordanceToGround(PointCloudAlgorithmBase):
@staticmethod
def getDefaultProperties(properties):
properties.addProperty('Affordance name', '')
properties.addProperty('Ground frame name', '')
properties.addProperty('Frame output name', '')
def getSelectedAffordance(self):
affordanceName = self.properties.getProperty('Affordance name')
if affordanceName:
obj = om.findObjectByName(affordanceName)
if not obj:
self.fail('could not find affordance: %s' % affordanceName)
else:
obj = om.getActiveObject()
if obj is None:
self.fail('no affordance is selected')
try:
frame = obj.getChildFrame()
except AttributeError:
frame = None
if frame is None:
self.fail('affordance does not have a frame')
return obj
def getGroundFrame(self):
frame = om.findObjectByName(self.properties.getProperty('Ground frame name'))
if not frame:
self.fail('could not find ground frame')
return frame
def run(self):
aff = self.getSelectedAffordance()
affFrame = aff.getChildFrame().transform
groundFrame = self.getGroundFrame().transform
projectedXYZ = np.hstack([affFrame.GetPosition()[0:2], groundFrame.GetPosition()[2]])
result = transformUtils.copyFrame(affFrame)
result.Translate(projectedXYZ - np.array(result.GetPosition()))
outputName = self.properties.getProperty('Frame output name')
outputName = outputName or '%s ground frame' % aff.getProperty('Name')
vis.updateFrame(result, outputName, scale=0.2)
class UserAnnotatePointCloud(PointCloudAlgorithmBase):
@staticmethod
def getDefaultProperties(properties):
properties.addProperty('Annotation name', 'user annotation')
properties.addProperty('Number of points', 2)
def clearPicker(self):
self.picker.stop()
self.picker.clear()
self.picker = None
def onAnnotationAborted(self):
om.removeFromObjectModel(self.picker.annotationObj)
self.aborted = True
self.clearPicker()
def onAnnotationComplete(self, *pts):
self.picker.annotationObj.annotationPoints = pts
self.clearPicker()
def startAnnotationPicker(self):
view = robotSystem.view
polyData = self.getPointCloud()
self.picker = pointpicker.PointPicker(view, numberOfPoints=self.properties.getProperty('Number of points'), drawLines=True, callback=self.onAnnotationComplete, abortCallback=self.onAnnotationAborted)
self.picker.annotationName = self.properties.getProperty('Annotation name')
self.picker.annotationFolder = 'annotations'
self.picker.pickType = 'points' if polyData.GetNumberOfCells() == polyData.GetNumberOfVerts() else 'render'
self.aborted = False
self.picker.start()
def run(self):
self.startAnnotationPicker()
self.statusMessage = 'Annotate point cloud (shift+click) to select points'
while self.picker is not None:
yield
if self.aborted:
self.fail('user abort')
class FindHorizontalSurfaces(PointCloudAlgorithmBase):
@staticmethod
def getDefaultProperties(properties):
properties.addProperty('Normal estimation search radius', 0.03, attributes=propertyset.PropertyAttributes(decimals=3, minimum=0.0, maximum=100))
properties.addProperty('Cluster tolerance', 0.02, attributes=propertyset.PropertyAttributes(decimals=3, minimum=0.0, maximum=10))
properties.addProperty('Min cluster size', 150, attributes=propertyset.PropertyAttributes(minimum=3, maximum=1e6))
properties.addProperty('Distance to plane threshold', 0.01, attributes=propertyset.PropertyAttributes(decimals=4, minimum=0.0, maximum=1))
properties.addProperty('Normals dot up range', [0.9, 1.0], attributes=propertyset.PropertyAttributes(decimals=2, minimum=0.0, maximum=1))
def run(self):
polyData = self.getPointCloud()
segmentation.findHorizontalSurfaces(polyData,
removeGroundFirst=True,
showClusters=True,
normalEstimationSearchRadius=self.properties.getProperty('Normal estimation search radius'),
clusterTolerance=self.properties.getProperty('Cluster tolerance'),
minClusterSize=self.properties.getProperty('Min cluster size'),
distanceToPlaneThreshold=self.properties.getProperty('Distance to plane threshold'),
normalsDotUpRange=self.properties.getProperty('Normals dot up range')
)
class SetNeckPitch(AsyncTask):
@staticmethod
def getDefaultProperties(properties):
properties.addProperty('Angle', 0, attributes=om.PropertyAttributes(minimum=-35, maximum=90))
def run(self):
robotSystem.neckDriver.setNeckPitch(self.properties.getProperty('Angle'))
class SetArmsPosition(AsyncTask):
@staticmethod
def getDefaultProperties(properties):
properties.addProperty('Posture group', 'General')
properties.addProperty('Posture name', 'handsdown both')
def run(self):
startPosture = robotSystem.robotStateJointController.q.copy()
side = None
pose = robotSystem.ikPlanner.getMergedPostureFromDatabase(startPosture, self.properties.getProperty('Posture group'), self.properties.getProperty('Posture name'), side)
plan = robotSystem.ikPlanner.computePostureGoal(startPosture, pose)
#_addPlanItem(plan, self.properties.getProperty('Posture name') + ' posture plan', ManipulationPlanItem)
class CloseHand(AsyncTask):
@staticmethod
def getDefaultProperties(properties):
properties.addProperty('Side', 0, attributes=om.PropertyAttributes(enumNames=['Left', 'Right']))
properties.addProperty('Mode', 0, attributes=om.PropertyAttributes(enumNames=['Basic', 'Pinch']))
properties.addProperty('Amount', 100, attributes=propertyset.PropertyAttributes(minimum=0, maximum=100))
properties.addProperty('Check status', False)
def getHandDriver(self, side):
assert side in ('left', 'right')
return robotSystem.lHandDriver if side == 'left' else robotSystem.rHandDriver
def run(self):
side = self.properties.getPropertyEnumValue('Side').lower()
self.getHandDriver(side).sendCustom(self.properties.getProperty('Amount'), 100, 100, self.properties.getProperty('Mode'))
if self.properties.getProperty('Check status'):
WaitForGraspingState(actionName='Grasp').run()
class OpenHand(AsyncTask):
@staticmethod
def getDefaultProperties(properties):
properties.addProperty('Side', 0, attributes=om.PropertyAttributes(enumNames=['Left', 'Right']))
properties.addProperty('Mode', 0, attributes=om.PropertyAttributes(enumNames=['Basic', 'Pinch']))
properties.addProperty('Amount', 100, attributes=propertyset.PropertyAttributes(minimum=0, maximum=100))
properties.addProperty('Check status', False)
def getHandDriver(self, side):
assert side in ('left', 'right')
return robotSystem.lHandDriver if side == 'left' else robotSystem.rHandDriver
def run(self):
side = self.properties.getPropertyEnumValue('Side').lower()
self.getHandDriver(side).sendOpen()
self.getHandDriver(side).sendCustom(100-self.properties.getProperty('Amount'), 100, 100, self.properties.getProperty('Mode'))
if self.properties.getProperty('Check status'):
WaitForGraspingState(actionName='Open').run()
class WaitForGraspingState(AsyncTask):
@staticmethod
def getDefaultProperties(properties):
properties.addProperty('Channel name', 'GRASPING_STATE')
properties.addProperty('Action name', 0, attributes=om.PropertyAttributes(enumNames=['Open', 'Grasp']))
# TODO: properties for timeout, responseMessageClass, expectedResponse
def run(self):
responseMessageClass = lcmdrc.boolean_t
grasping_state = lcmUtils.MessageResponseHelper(self.properties.getProperty('Channel name'), responseMessageClass).waitForResponse(timeout=7000)
if grasping_state is not None and self.properties.getPropertyEnumValue('Action name') == 'Open':
if grasping_state.data == 0:
# print "Hand opening successful"
self.statusMessage = "Hand opening successful"
else:
self.fail("Could not open hand")
elif grasping_state is not None and self.properties.getPropertyEnumValue('Action name') == 'Grasp':
if grasping_state.data == 1:
# print "Grasping successful"
self.statusMessage = "Grasping successful"
else:
self.fail("No object in hand")
else:
self.fail("Grasping state timeout")
class CommitFootstepPlan(AsyncTask):
@staticmethod
def getDefaultProperties(properties):
properties.addProperty('Plan name', '')
def run(self):
#planName = self.properties.getProperty('Plan name')
#plan = om.findObjectByName(planName)
#if not isinstance(plan, FootstepPlanItem):
# self.fail('could not find footstep plan')
#plan = plan.plan
plan = robotSystem.footstepsDriver.lastFootstepPlan
robotSystem.footstepsDriver.commitFootstepPlan(plan)
class CommitManipulationPlan(AsyncTask):
@staticmethod
def getDefaultProperties(properties):
properties.addProperty('Plan name', '')
def run(self):
planName = self.properties.getProperty('Plan name')
plan = om.findObjectByName(planName)
if not isinstance(plan, ManipulationPlanItem):
self.fail('could not find manipulation plan')
plan = plan.plan
robotSystem.manipPlanner.commitManipPlan(plan)
class RequestWalkingPlan(AsyncTask):
@staticmethod
def getDefaultProperties(properties):
properties.addProperty('Start pose name', 'EST_ROBOT_STATE')
properties.addProperty('Footstep plan name', '')
def run(self):
poseName = self.properties.getProperty('Start pose name')
if poseName == 'EST_ROBOT_STATE':
pose = robotSystem.robotStateJointController.q.copy()
else:
pose = robotSystem.ikPlanner.jointController.getPose(poseName)
planName = self.properties.getProperty('Footstep plan name')
plan = om.findObjectByName(planName)
if not isinstance(plan, FootstepPlanItem):
self.fail('could not find footstep plan: %s' % planName)
plan = plan.plan
robotSystem.footstepsDriver.sendWalkingPlanRequest(plan, pose, waitForResponse=True)
def _addPlanItem(plan, name, itemClass):
assert plan is not None
item = itemClass(name)
item.plan = plan
om.removeFromObjectModel(om.findObjectByName(name))
om.addToObjectModel(item, om.getOrCreateContainer('segmentation'))
return item
class RequestFootstepPlan(AsyncTask):
@staticmethod
def getDefaultProperties(properties):
properties.addProperty('Stance frame name', 'stance frame')
properties.addProperty('Start pose name', 'EST_ROBOT_STATE')
def run(self):
poseName = self.properties.getProperty('Start pose name')
if poseName == 'EST_ROBOT_STATE':
pose = robotSystem.robotStateJointController.q.copy()
else:
pose = robotSystem.ikPlanner.jointController.getPose(poseName)
goalFrame = om.findObjectByName(self.properties.getProperty('Stance frame name')).transform
request = robotSystem.footstepsDriver.constructFootstepPlanRequest(pose, goalFrame)
footstepPlan = robotSystem.footstepsDriver.sendFootstepPlanRequest(request, waitForResponse=True)
if not footstepPlan:
self.fail('failed to get a footstep plan response')
_addPlanItem(footstepPlan, self.properties.getProperty('Stance frame name') + ' footstep plan', FootstepPlanItem)
class PlanPostureGoal(AsyncTask):
@staticmethod
def getDefaultProperties(properties):
properties.addProperty('Posture group', 'General')
properties.addProperty('Posture name', 'arm up pregrasp')
properties.addProperty('Side', 1, attributes=om.PropertyAttributes(enumNames=['Default', 'Left', 'Right']))
def run(self):
startPosture = robotSystem.robotStateJointController.q.copy()
side = [None, 'left', 'right'][self.properties.getProperty('Side')]
pose = robotSystem.ikPlanner.getMergedPostureFromDatabase(startPosture, self.properties.getProperty('Posture group'), self.properties.getProperty('Posture name'), side)
plan = robotSystem.ikPlanner.computePostureGoal(startPosture, pose)
_addPlanItem(plan, self.properties.getProperty('Posture name') + ' posture plan', ManipulationPlanItem)
class PlanStandPosture(AsyncTask):
def run(self):
startPosture = robotSystem.robotStateJointController.q.copy()
plan = robotSystem.ikPlanner.computeStandPlan(startPosture)
_addPlanItem(plan, 'stand pose plan', ManipulationPlanItem)
class PlanNominalPosture(AsyncTask):
def run(self):
startPosture = robotSystem.robotStateJointController.q.copy()
plan = robotSystem.ikPlanner.computeNominalPlan(startPosture)
_addPlanItem(plan, 'nominal pose plan', ManipulationPlanItem)
class PlanReachToFrame(AsyncTask):
@staticmethod
def getDefaultProperties(properties):
properties.addProperty('Frame input name', '')
properties.addProperty('Side', 1, attributes=om.PropertyAttributes(enumNames=['Left', 'Right']))
def getInputFrame(self):
name = self.properties.getProperty('Frame input name')
frame = om.findObjectByName(name)
if not isinstance(frame, vis.FrameItem):
self.fail('frame not found: %s' % name)
return frame
def run(self):
side = self.properties.getPropertyEnumValue('Side').lower()
startPose = robotSystem.robotStateJointController.q.copy()
targetFrame = self.getInputFrame()
constraintSet = robotSystem.ikPlanner.planEndEffectorGoal(startPose, side, targetFrame, lockBase=False, lockBack=True)
endPose, info = constraintSet.runIk()
plan = constraintSet.runIkTraj()
_addPlanItem(plan, '%s reach plan' % targetFrame.getProperty('Name'), ManipulationPlanItem)
class FitWallFrameFromAnnotation(PointCloudAlgorithmBase):
@staticmethod
def getDefaultProperties(properties):
properties.addProperty('Annotation input name', '')
def getAnnotationInput(self):
obj = om.findObjectByName(self.properties.getProperty('Annotation input name'))
if obj is None:
self.fail('user annotation not found')
return obj
def run(self):
polyData = self.getPointCloud()
annotation = self.getAnnotationInput()
annotationPoint = annotation.annotationPoints[0]
planePoints, normal = segmentation.applyLocalPlaneFit(polyData, annotationPoint, searchRadius=0.1, searchRadiusEnd=0.2)
viewDirection = segmentation.SegmentationContext.getGlobalInstance().getViewDirection()
if np.dot(normal, viewDirection) < 0:
normal = -normal
xaxis = normal
zaxis = [0, 0, 1]
yaxis = np.cross(zaxis, xaxis)
xaxis = np.cross(yaxis, zaxis)
xaxis /= np.linalg.norm(xaxis)
yaxis /= np.linalg.norm(yaxis)
t = transformUtils.getTransformFromAxes(xaxis, yaxis, zaxis)
t.PostMultiply()
t.Translate(annotationPoint)
polyData = annotation.polyData
polyData = segmentation.transformPolyData(polyData, t.GetLinearInverse())
annotation.setProperty('Visible', False)
om.removeFromObjectModel(om.findObjectByName('wall'))
obj = vis.showPolyData(polyData, 'wall')
obj.actor.SetUserTransform(t)
vis.showFrame(t, 'wall frame', scale=0.2, parent=obj)
class FitShelfItem(PointCloudAlgorithmBase):
@staticmethod
def getDefaultProperties(properties):
properties.addProperty('Annotation input name', '')
properties.addProperty('Cluster tolerance', 0.02, attributes=propertyset.PropertyAttributes(decimals=3, minimum=0.0, maximum=10))
def getAnnotationInput(self):
obj = om.findObjectByName(self.properties.getProperty('Annotation input name'))
if obj is None:
self.fail('user annotation not found')
return obj
def run(self):
polyData = self.getPointCloud()
annotation = self.getAnnotationInput()
annotationPoint = annotation.annotationPoints[0]
mesh = segmentation.fitShelfItem(polyData, annotationPoint, clusterTolerance=self.properties.getProperty('Cluster tolerance'))
annotation.setProperty('Visible', False)
om.removeFromObjectModel(om.findObjectByName('shelf item'))
obj = vis.showPolyData(mesh, 'shelf item', color=[0,1,0])
t = transformUtils.frameFromPositionAndRPY(segmentation.computeCentroid(mesh), [0,0,0])
segmentation.makeMovable(obj, t)
class SpawnValveAffordance(AsyncTask):
@staticmethod
def getDefaultProperties(properties):
properties.addProperty('Radius', 0.195, attributes=om.PropertyAttributes(decimals=4, minimum=0, maximum=10))
properties.addProperty('Position', [0.7, -0.22, 1.21], attributes=om.PropertyAttributes(decimals=3, minimum=-1e4, maximum=1e4))
properties.addProperty('Rotation', [180, -90, 16], attributes=om.PropertyAttributes(decimals=2, minimum=-360, maximum=360))
def getGroundFrame(self):
return vtk.vtkTransform()
robotModel = robotSystem.robotStateModel
baseLinkFrame = robotModel.model.getLinkFrame(robotModel.model.getLinkNames()[0])
#baseLinkFrame.PostMultiply()
#baseLinkFrame.Translate(0,0,-baseLinkFrame.GetPosition()[2])
return baseLinkFrame
#return robotSystem.footstepsDriver.getFeetMidPoint(robotModel)
def computeValveFrame(self):
position = self.properties.getProperty('Position')
rpy = self.properties.getProperty('Rotation')
t = transformUtils.frameFromPositionAndRPY(position, rpy)
t.Concatenate(self.getGroundFrame())
return t
def run(self):
radius = self.properties.getProperty('Radius')
thickness = 0.03
folder = om.getOrCreateContainer('affordances')
frame = self.computeValveFrame()
d = DebugData()
d.addLine(np.array([0, 0, -thickness/2.0]), np.array([0, 0, thickness/2.0]), radius=radius)
mesh = d.getPolyData()
params = dict(radius=radius, length=thickness, xwidth=radius, ywidth=radius, zwidth=thickness, otdf_type='steering_cyl', friendly_name='valve')
affordance = vis.showPolyData(mesh, 'valve', color=[0.0, 1.0, 0.0], cls=affordanceitems.FrameAffordanceItem, parent=folder, alpha=1.0)
frame = vis.showFrame(frame, 'valve frame', parent=affordance, visible=False, scale=radius)
affordance.actor.SetUserTransform(frame.transform)
affordance.setAffordanceParams(params)
affordance.updateParamsFromActorTransform()
class SpawnDrillBarrelAffordance(AsyncTask):
@staticmethod
def getDefaultProperties(properties):
properties.addProperty('Position', [0.5, -0.22, 1.2], attributes=om.PropertyAttributes(decimals=3, minimum=-1e4, maximum=1e4))
properties.addProperty('Rotation', [0, 0, 0], attributes=om.PropertyAttributes(decimals=2, minimum=-360, maximum=360))
def getGroundFrame(self):
return vtk.vtkTransform()
robotModel = robotSystem.robotStateModel
baseLinkFrame = robotModel.model.getLinkFrame(robotModel.model.getLinkNames()[0])
#baseLinkFrame.PostMultiply()
#baseLinkFrame.Translate(0,0,-baseLinkFrame.GetPosition()[2])
return baseLinkFrame
#return robotSystem.footstepsDriver.getFeetMidPoint(robotModel)
def computeAffordanceFrame(self):
position = self.properties.getProperty('Position')
rpy = self.properties.getProperty('Rotation')
t = transformUtils.frameFromPositionAndRPY(position, rpy)
t.Concatenate(self.getGroundFrame())
return t
def run(self):
folder = om.getOrCreateContainer('affordances')
frame = self.computeAffordanceFrame()
mesh = segmentation.getDrillBarrelMesh()
params = segmentation.getDrillAffordanceParams(np.array(frame.GetPosition()), [1,0,0], [0,1,0], [0,0,1], 'dewalt_barrel')
affordance = vis.showPolyData(mesh, 'drill', color=[0.0, 1.0, 0.0], cls=affordanceitems.FrameAffordanceItem, parent=folder)
frame = vis.showFrame(frame, 'drill frame', parent=affordance, visible=False, scale=0.2)
affordance.actor.SetUserTransform(frame.transform)
affordance.setAffordanceParams(params)
affordance.updateParamsFromActorTransform()
class SpawnDrillRotaryAffordance(AsyncTask):
@staticmethod
def getDefaultProperties(properties):
properties.addProperty('Position', [0.5, -0.22, 1.2], attributes=om.PropertyAttributes(decimals=3, minimum=-1e4, maximum=1e4))
properties.addProperty('Rotation', [0, 0, 0], attributes=om.PropertyAttributes(decimals=2, minimum=-360, maximum=360))
def getGroundFrame(self):
return vtk.vtkTransform()
robotModel = robotSystem.robotStateModel
baseLinkFrame = robotModel.model.getLinkFrame(robotModel.model.getLinkNames()[0])
#baseLinkFrame.PostMultiply()
#baseLinkFrame.Translate(0,0,-baseLinkFrame.GetPosition()[2])
return baseLinkFrame
#return robotSystem.footstepsDriver.getFeetMidPoint(robotModel)
def computeAffordanceFrame(self):
position = self.properties.getProperty('Position')
rpy = self.properties.getProperty('Rotation')
t = transformUtils.frameFromPositionAndRPY(position, rpy)
t.Concatenate(self.getGroundFrame())
return t
def run(self):
folder = om.getOrCreateContainer('affordances')
frame = self.computeAffordanceFrame()
mesh = segmentation.getDrillMesh()
params = segmentation.getDrillAffordanceParams(np.array(frame.GetPosition()), [1,0,0], [0,1,0], [0,0,1])
affordance = vis.showPolyData(mesh, 'drill', color=[0.0, 1.0, 0.0], cls=affordanceitems.FrameAffordanceItem, parent=folder)
frame = vis.showFrame(frame, 'drill frame', parent=affordance, visible=False, scale=0.2)
affordance.actor.SetUserTransform(frame.transform)
affordance.setAffordanceParams(params)
affordance.updateParamsFromActorTransform()
class PlanGazeTrajectory(AsyncTask):
@staticmethod
def getDefaultProperties(properties):
properties.addProperty('Target frame name', '')
properties.addProperty('Annotation input name', '')
properties.addProperty('Side', 1, attributes=om.PropertyAttributes(enumNames=['Left', 'Right']))
properties.addProperty('Cone threshold degrees', 5.0, attributes=om.PropertyAttributes(decimals=1, minimum=0, maximum=360))
properties.addProperty('Palm offset', 0.0, attributes=om.PropertyAttributes(decimals=3, minimum=-1e4, maximum=1e4))
def getAnnotationInputPoints(self):
obj = om.findObjectByName(self.properties.getProperty('Annotation input name'))
if obj is None:
self.fail('user annotation not found')
return obj.annotationPoints
def appendPositionConstraintForTargetFrame(self, goalFrame, t):
positionConstraint, _ = self.ikPlanner.createPositionOrientationGraspConstraints(self.graspingHand, goalFrame, self.graspToHandLinkFrame)
positionConstraint.tspan = [t, t]
self.constraintSet.constraints.append(positionConstraint)
def initGazeConstraintSet(self, goalFrame):
# create constraint set
startPose = robotSystem.robotStateJointController.q.copy()
startPoseName = 'gaze_plan_start'
endPoseName = 'gaze_plan_end'
self.ikPlanner.addPose(startPose, startPoseName)
self.ikPlanner.addPose(startPose, endPoseName)
self.constraintSet = ikplanner.ConstraintSet(self.ikPlanner, [], startPoseName, endPoseName)
self.constraintSet.endPose = startPose
# add body constraints
bodyConstraints = self.ikPlanner.createMovingBodyConstraints(startPoseName, lockBase=True, lockBack=False, lockLeftArm=self.graspingHand=='right', lockRightArm=self.graspingHand=='left')
self.constraintSet.constraints.extend(bodyConstraints)
# add gaze constraint
self.graspToHandLinkFrame = self.ikPlanner.newPalmOffsetGraspToHandFrame(self.graspingHand, self.properties.getProperty('Palm offset'))
gazeConstraint = self.ikPlanner.createGazeGraspConstraint(self.graspingHand, goalFrame, self.graspToHandLinkFrame, coneThresholdDegrees=self.properties.getProperty('Cone threshold degrees'))
self.constraintSet.constraints.insert(0, gazeConstraint)
def getGazeTargetFrame(self):
frame = om.findObjectByName(self.properties.getProperty('Target frame name'))
if not frame:
self.fail('could not find ground frame')
return frame
def run(self):
self.ikPlanner = robotSystem.ikPlanner
side = self.properties.getPropertyEnumValue('Side').lower()
self.graspingHand = side
targetPoints = self.getAnnotationInputPoints()
gazeTargetFrame = self.getGazeTargetFrame()
self.initGazeConstraintSet(gazeTargetFrame)
numberOfSamples = len(targetPoints)
for i in xrange(numberOfSamples):
targetPos = targetPoints[i]
targetFrame = transformUtils.copyFrame(gazeTargetFrame.transform)
targetFrame.Translate(targetPos - np.array(targetFrame.GetPosition()))
self.appendPositionConstraintForTargetFrame(targetFrame, i+1)
gazeConstraint = self.constraintSet.constraints[0]
assert isinstance(gazeConstraint, ikplanner.ikconstraints.WorldGazeDirConstraint)
gazeConstraint.tspan = [1.0, numberOfSamples]
plan = self.constraintSet.runIkTraj()
_addPlanItem(plan, '%s gaze plan' % gazeTargetFrame.getProperty('Name'), ManipulationPlanItem)
|
patmarion/director
|
src/python/director/tasks/robottasks.py
|
Python
|
bsd-3-clause
| 42,912
|
[
"VTK"
] |
ebb08f08eadfdb58dc72a51aeae36caebbaa182cd756f3c9bfe5abb81122e622
|
#!/usr/bin/env python
'''
Input a XC functional which was not implemented in pyscf.
See also
* The definition of define_xc_ function in pyscf/dft/libxc.py
* pyscf/dft/libxc.py for API of function eval_xc;
* dft.numint.NumInt class for its methods eval_xc, hybrid_coeff and _xc_type.
These methods controls the XC functional evaluation;
* Example 24-custom_xc_functional.py to customize XC functionals using the
functionals provided by Libxc or XcFun library.
'''
from pyscf import gto
from pyscf import dft
mol = gto.M(
atom = '''
O 0. 0. 0.
H 0. -0.757 0.587
H 0. 0.757 0.587 ''',
basis = 'ccpvdz')
# half-half exact exchange and GGA functional
hybrid_coeff = 0.5
def eval_xc(xc_code, rho, spin=0, relativity=0, deriv=1, verbose=None):
# A fictitious XC functional to demonstrate the usage
rho0, dx, dy, dz = rho[:4]
gamma = (dx**2 + dy**2 + dz**2)
exc = .01 * rho0**2 + .02 * (gamma+.001)**.5
vrho = .01 * 2 * rho0
vgamma = .02 * .5 * (gamma+.001)**(-.5)
vlapl = None
vtau = None
vxc = (vrho, vgamma, vlapl, vtau)
fxc = None # 2nd order functional derivative
kxc = None # 3rd order functional derivative
# Mix with existing functionals
pbe_xc = dft.libxc.eval_xc('pbe,pbe', rho, spin, relativity, deriv,
verbose)
exc += pbe_xc[0] * 0.5
vrho += pbe_xc[1][0] * 0.5
vgamma += pbe_xc[1][1] * 0.5
return exc, vxc, fxc, kxc
mf = dft.RKS(mol)
mf = mf.define_xc_(eval_xc, 'GGA', hyb=hybrid_coeff)
mf.verbose = 4
mf.kernel()
# half exact exchange in which 40% of the exchange is computed with short
# range part of the range-separation Coulomb operator (omega = 0.8)
beta = 0.2
rsh_coeff = (0.8, hybrid_coeff-beta, beta)
mf = dft.RKS(mol)
mf = mf.define_xc_(eval_xc, 'GGA', rsh=rsh_coeff)
mf.verbose = 4
mf.kernel()
|
gkc1000/pyscf
|
examples/dft/24-define_xc_functional.py
|
Python
|
apache-2.0
| 1,864
|
[
"PySCF"
] |
b11d352174c4be3e9408095ed6df91dbbc5f0ed6ce1738c96458d0e3e00bc947
|
# -*- coding: utf-8 -*-
#!/usr/bin/env python
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2000-2007 Donald N. Allingham
# Copyright (C) 2007 Johan Gonqvist <johan.gronqvist@gmail.com>
# Copyright (C) 2007-2009 Gary Burton <gary.burton@zen.co.uk>
# Copyright (C) 2007-2009 Stephane Charette <stephanecharette@gmail.com>
# Copyright (C) 2008-2009 Brian G. Matherly
# Copyright (C) 2008 Jason M. Simanek <jason@bohemianalps.com>
# Copyright (C) 2008-2011 Rob G. Healey <robhealey1@gmail.com>
# Copyright (C) 2010 Doug Blank <doug.blank@gmail.com>
# Copyright (C) 2010 Jakim Friant
# Copyright (C) 2010-2017 Serge Noiraud
# Copyright (C) 2011 Tim G L Lyons
# Copyright (C) 2013 Benny Malengier
# Copyright (C) 2016 Allen Crider
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
"""
Narrative Web Page generator.
Classe:
FamilyPage - Family index page and individual Family pages
"""
#------------------------------------------------
# python modules
#------------------------------------------------
from collections import defaultdict
from decimal import getcontext
import logging
#------------------------------------------------
# Gramps module
#------------------------------------------------
from gramps.gen.const import GRAMPS_LOCALE as glocale
from gramps.gen.lib import (EventType, Family)
from gramps.gen.plug.report import Bibliography
from gramps.plugins.lib.libhtml import Html
#------------------------------------------------
# specific narrative web import
#------------------------------------------------
from gramps.plugins.webreport.basepage import BasePage
from gramps.plugins.webreport.common import (get_first_letters, _KEYPERSON,
alphabet_navigation, sort_people,
primary_difference, first_letter,
FULLCLEAR, get_index_letter)
_ = glocale.translation.sgettext
LOG = logging.getLogger(".NarrativeWeb")
getcontext().prec = 8
#################################################
#
# creates the Family List Page and Family Pages
#
#################################################
class FamilyPages(BasePage):
"""
This class is responsible for displaying information about the 'Family'
database objects. It displays this information under the 'Families'
tab. It is told by the 'add_instances' call which 'Family's to display,
and remembers the list of Family. A single call to 'display_pages'
displays both the Family List (Index) page and all the Family
pages.
The base class 'BasePage' is initialised once for each page that is
displayed.
"""
def __init__(self, report):
"""
@param: report -- The instance of the main report class for
this report
"""
BasePage.__init__(self, report, title="")
self.family_dict = defaultdict(set)
self.person = None
self.familymappages = None
def display_pages(self, title):
"""
Generate and output the pages under the Family tab, namely the family
index and the individual family pages.
@param: title -- Is the title of the web page
"""
LOG.debug("obj_dict[Family]")
for item in self.report.obj_dict[Family].items():
LOG.debug(" %s", str(item))
with self.r_user.progress(_("Narrated Web Site Report"),
_("Creating family pages..."),
len(self.report.obj_dict[Family]) + 1
) as step:
self.familylistpage(self.report, title,
self.report.obj_dict[Family].keys())
for family_handle in self.report.obj_dict[Family]:
step()
self.familypage(self.report, title, family_handle)
def familylistpage(self, report, title, fam_list):
"""
Create a family index
@param: report -- The instance of the main report class for
this report
@param: title -- Is the title of the web page
@param: fam_list -- The handle for the place to add
"""
BasePage.__init__(self, report, title)
output_file, sio = self.report.create_file("families")
familieslistpage, head, body = self.write_header(self._("Families"))
ldatec = 0
prev_letter = " "
# begin Family Division
with Html("div", class_="content", id="Relationships") as relationlist:
body += relationlist
# Families list page message
msg = self._("This page contains an index of all the "
"families/ relationships in the "
"database, sorted by their family name/ surname. "
"Clicking on a person’s "
"name will take you to their "
"family/ relationship’s page.")
relationlist += Html("p", msg, id="description")
# go through all the families, and construct a dictionary of all the
# people and the families thay are involved in. Note that the people
# in the list may be involved in OTHER families, that are not listed
# because they are not in the original family list.
pers_fam_dict = defaultdict(list)
for family_handle in fam_list:
family = self.r_db.get_family_from_handle(family_handle)
if family:
if family.get_change_time() > ldatec:
ldatec = family.get_change_time()
husband_handle = family.get_father_handle()
spouse_handle = family.get_mother_handle()
if husband_handle:
pers_fam_dict[husband_handle].append(family)
if spouse_handle:
pers_fam_dict[spouse_handle].append(family)
# add alphabet navigation
index_list = get_first_letters(self.r_db, pers_fam_dict.keys(),
_KEYPERSON, rlocale=self.rlocale)
alpha_nav = alphabet_navigation(index_list, self.rlocale)
if alpha_nav:
relationlist += alpha_nav
# begin families table and table head
with Html("table", class_="infolist relationships") as table:
relationlist += table
thead = Html("thead")
table += thead
trow = Html("tr")
thead += trow
# set up page columns
trow.extend(
Html("th", trans, class_=colclass, inline=True)
for trans, colclass in [(self._("Letter"),
"ColumnRowLabel"),
(self._("Person"), "ColumnPartner"),
(self._("Family"), "ColumnPartner"),
(self._("Marriage"), "ColumnDate"),
(self._("Divorce"), "ColumnDate")]
)
tbody = Html("tbody")
table += tbody
# begin displaying index list
ppl_handle_list = sort_people(self.r_db, pers_fam_dict.keys(),
self.rlocale)
first = True
for (surname, handle_list) in ppl_handle_list:
if surname and not surname.isspace():
letter = get_index_letter(first_letter(surname),
index_list,
self.rlocale)
else:
letter = ' '
# get person from sorted database list
for person_handle in sorted(
handle_list, key=self.sort_on_name_and_grampsid):
person = self.r_db.get_person_from_handle(person_handle)
if person:
family_list = person.get_family_handle_list()
first_family = True
for family_handle in family_list:
get_family = self.r_db.get_family_from_handle
family = get_family(family_handle)
trow = Html("tr")
tbody += trow
tcell = Html("td", class_="ColumnRowLabel")
trow += tcell
if first or primary_difference(letter,
prev_letter,
self.rlocale):
first = False
prev_letter = letter
trow.attr = 'class="BeginLetter"'
ttle = self._("Families beginning with "
"letter ")
tcell += Html("a", letter, name=letter,
title=ttle + letter,
inline=True)
else:
tcell += ' '
tcell = Html("td", class_="ColumnPartner")
trow += tcell
if first_family:
trow.attr = 'class ="BeginFamily"'
tcell += self.new_person_link(
person_handle, uplink=self.uplink)
first_family = False
else:
tcell += ' '
tcell = Html("td", class_="ColumnPartner")
trow += tcell
tcell += self.family_link(
family.get_handle(),
self.report.get_family_name(family),
family.get_gramps_id(), self.uplink)
# family events; such as marriage and divorce
# events
fam_evt_ref_list = family.get_event_ref_list()
tcell1 = Html("td", class_="ColumnDate",
inline=True)
tcell2 = Html("td", class_="ColumnDate",
inline=True)
trow += (tcell1, tcell2)
if fam_evt_ref_list:
fam_evt_srt_ref_list = sorted(
fam_evt_ref_list,
key=self.sort_on_grampsid)
for evt_ref in fam_evt_srt_ref_list:
evt = self.r_db.get_event_from_handle(
evt_ref.ref)
if evt:
evt_type = evt.get_type()
if evt_type in [EventType.MARRIAGE,
EventType.DIVORCE]:
cell = self.rlocale.get_date(
evt.get_date_object())
if (evt_type ==
EventType.MARRIAGE):
tcell1 += cell
else:
tcell1 += ' '
if (evt_type ==
EventType.DIVORCE):
tcell2 += cell
else:
tcell2 += ' '
else:
tcell1 += ' '
tcell2 += ' '
first_family = False
# add clearline for proper styling
# add footer section
footer = self.write_footer(ldatec)
body += (FULLCLEAR, footer)
# send page out for processing
# and close the file
self.xhtml_writer(familieslistpage, output_file, sio, ldatec)
def familypage(self, report, title, family_handle):
"""
Create a family page
@param: report -- The instance of the main report class for
this report
@param: title -- Is the title of the web page
@param: family_handle -- The handle for the family to add
"""
family = report.database.get_family_from_handle(family_handle)
if not family:
return
BasePage.__init__(self, report, title, family.get_gramps_id())
ldatec = family.get_change_time()
self.bibli = Bibliography()
self.uplink = True
family_name = self.report.get_family_name(family)
self.page_title = family_name
self.familymappages = report.options["familymappages"]
output_file, sio = self.report.create_file(family.get_handle(), "fam")
familydetailpage, head, body = self.write_header(family_name)
# begin FamilyDetaill division
with Html("div", class_="content",
id="RelationshipDetail") as relationshipdetail:
body += relationshipdetail
# family media list for initial thumbnail
if self.create_media:
media_list = family.get_media_list()
# If Event pages are not being created, then we need to display
# the family event media here
if not self.inc_events:
for evt_ref in family.get_event_ref_list():
event = self.r_db.get_event_from_handle(evt_ref.ref)
media_list += event.get_media_list()
thumbnail = self.disp_first_img_as_thumbnail(media_list,
family)
if thumbnail:
relationshipdetail += thumbnail
self.person = None # no longer used
relationshipdetail += Html(
"h2", self.page_title, inline=True) + (
Html('sup') + (Html('small') +
self.get_citation_links(
family.get_citation_list())))
# display relationships
families = self.display_family_relationships(family, None)
if families is not None:
relationshipdetail += families
# display additional images as gallery
if self.create_media and media_list:
addgallery = self.disp_add_img_as_gallery(media_list, family)
if addgallery:
relationshipdetail += addgallery
# Narrative subsection
notelist = family.get_note_list()
if notelist:
relationshipdetail += self.display_note_list(notelist)
# display family LDS ordinance...
family_lds_ordinance_list = family.get_lds_ord_list()
if family_lds_ordinance_list:
relationshipdetail += self.display_lds_ordinance(family)
# get attribute list
attrlist = family.get_attribute_list()
if attrlist:
attrsection, attrtable = self.display_attribute_header()
self.display_attr_list(attrlist, attrtable)
relationshipdetail += attrsection
# source references
srcrefs = self.display_ind_sources(family)
if srcrefs:
relationshipdetail += srcrefs
# add clearline for proper styling
# add footer section
footer = self.write_footer(ldatec)
body += (FULLCLEAR, footer)
# send page out for processing
# and close the file
self.xhtml_writer(familydetailpage, output_file, sio, ldatec)
|
jralls/gramps
|
gramps/plugins/webreport/family.py
|
Python
|
gpl-2.0
| 17,649
|
[
"Brian"
] |
cb938532f3568714a2fd14de0b6507398946ef6bba36a22ff16b1f338a69ad73
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
"""
Simple test file to load voxel data.
"""
import re
import os
import numpy
def read_itk_metaimage(filename, order=[2, 1, 0]):
"""
Read voxel data saved as a pair of .mhd/.raw files.
Assume the binary data is stored as uint8.
Additional documentation:
http://www.itk.org/Wiki/ITK/MetaIO/Documentation
Args:
filename (str): Name of the file to load (with or withour extension)
order (list, optional): Storage order in the .raw file.
"""
name = os.path.splitext(filename)[0]
with open(name + ".mhd", "r") as mhdfile:
s = mhdfile.read()
m = re.search('DimSize = ([0-9]*) ([0-9]*) ([0-9]*)', s)
shape = (int(m.group(order[0] + 1)), int(m.group(order[1] + 1)), int(m.group(order[2] + 1)))
m = re.search('ElementDataFile = (.*).raw', s)
my_dir = os.path.dirname(name)
assert os.path.join(my_dir, m.group(1)) == name
with open(name + ".raw", "rb") as rawfile:
volume = numpy.frombuffer(bytearray(rawfile.read()), dtype=numpy.uint8)
return volume.reshape(shape)
def plot_voxels(volume):
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d.art3d import Poly3DCollection
from skimage import measure
# Use marching cubes to obtain the surface mesh
verts, faces = measure.marching_cubes(volume, 0.5)
# Display resulting triangular mesh using Matplotlib. This can also be done
# with mayavi (see skimage.measure.marching_cubes docstring).
fig = plt.figure(figsize=(10, 12))
ax = fig.add_subplot(111, projection='3d')
# Fancy indexing: `verts[faces]` to generate a collection of triangles
mesh = Poly3DCollection(verts[faces])
ax.add_collection3d(mesh)
ax.set_xlim(0, volume.shape[0])
ax.set_ylim(0, volume.shape[1])
ax.set_zlim(0, volume.shape[2])
ax.set_aspect('equal')
plt.show()
if __name__ == "__main__":
filename = 'build/output.mhd'
volume = read_itk_metaimage(filename)
print("volume = {0}%".format(100 * volume.sum() / volume.size))
print(volume.shape)
plot_voxels(volume)
|
jdumas/geotools
|
voxmesh/visualize.py
|
Python
|
gpl-3.0
| 2,120
|
[
"Mayavi"
] |
387ef27e9934c33f63946a1ae53129cffeb5b2a48b55522b33b1638ca4d13090
|
"""
This script computes projected correlation function from given files of
galalxy and random catalgues
python3 compute_corr_projected.py
Args:
n: index of lightcone
Options:
--igalaxies=1:1
--irandoms=1:1
Input:
mocks/mock_<n>.txt
randoms/random_<n>.txt
File format is:
x y z
in each line
Output:
Standard output
Column 1: rp
Column 2: wp
Column 3: rms(wp) (wp if number of galaxy catalogues in 1)
"""
import os
import argparse
import json
import signal
import numpy as np
import h5py
import mockgallib as mock
signal.signal(signal.SIGINT, signal.SIG_DFL) # stop with ctrl-c
#
# Command-line options
#
parser = argparse.ArgumentParser()
parser.add_argument('reg', help='region w1 or w4')
parser.add_argument('--igalaxies', default='1:1',
help='index range of galaxy catalogues')
parser.add_argument('--irandoms', default='1:1',
help='index range of random catalogues')
parser.add_argument('--param', default='param.json',
help='parameter json file')
parser.add_argument('--rr', default='', help='precomputed RR filename')
parser.add_argument('-o', default='.', help='output directory')
parser.add_argument('--zmin', type=float, default=0.5, help='minimum redshift')
parser.add_argument('--zmax', type=float, default=1.2, help='minimum redshift')
arg = parser.parse_args()
igalaxies = arg.igalaxies.split(':')
irandoms = arg.irandoms.split(':')
#
# Read parameter file
#
print('# Parameter file: %s' % arg.param)
with open(arg.param, 'r') as f:
param = json.load(f)
omega_m = param['omega_m']
print('# Setting cosmology: omega_m= %.4f' % omega_m)
print('# redshift-range %f %f' % (arg.zmin, arg.zmax))
#
# Initilise
#
mock.set_loglevel(0)
mock.cosmology.set(omega_m)
mock.distance.init(1.2)
def read_catalogues(filebase, irange):
cats = mock.Catalogues()
for i in range(int(irange[0]), int(irange[1]) + 1):
filename = '%s%05d.txt' % (filebase, i)
a = np.loadtxt(filename, delimiter=' ', usecols=[1,2,3,6,7])
cats.append(a, z_min=arg.zmin, z_max= arg.zmax)
return cats
galaxies = read_catalogues('../mocks/%s/mock_%s_' % (arg.reg, arg.reg), igalaxies)
randoms = read_catalogues('../rands/%s/rand_%s_' % (arg.reg, arg.reg), irandoms)
corr = mock.CorrelationFunction(rp_min=0.1, rp_max=60.0, nbin=24,
pi_max=60.0, pi_nbin=20,
ra_min=0.001388889, dec_min=0.0375)
rr = mock.corr.Hist2D(rp_min=0.1, rp_max=60.0, rp_nbin=24,
pi_max=60.0, pi_nbin=20)
rr.load(arg.rr)
corr.compute_corr_projected_with_rr(galaxies, randoms, rr)
i0 = int(igalaxies[0])
i1 = int(igalaxies[1]) + 1
for i in range(i0, i1):
ii= i - i0
rp = corr.rp_i(ii)
wp = corr.wp_i(ii)
nrow= len(rp)
filename = '%s/corr_projected_%05d.txt' % (arg.o, i)
with open(filename, 'w') as f:
for irow in range(nrow):
f.write('%e %e\n' % (rp[irow], wp[irow]))
print('%s written', arg.o)
|
junkoda/mockgallib
|
script/compute_corr_projected.py
|
Python
|
gpl-3.0
| 3,057
|
[
"Galaxy"
] |
e8f9675438e45557e3d3063c8559262426da43fedfad8ed22aa6cac350fc4fc2
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 Rackspace
# Copyright (c) 2011 X.commerce, a business unit of eBay Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mox
import shutil
import sys
import tempfile
from nova import context
from nova import db
from nova import exception
from nova import flags
from nova import log as logging
import nova.policy
from nova import rpc
from nova import test
from nova import utils
from nova.network import linux_net
from nova.network import manager as network_manager
from nova.tests import fake_network
LOG = logging.getLogger(__name__)
HOST = "testhost"
networks = [{'id': 0,
'uuid': "aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa",
'label': 'test0',
'injected': False,
'multi_host': False,
'cidr': '192.168.0.0/24',
'cidr_v6': '2001:db8::/64',
'gateway_v6': '2001:db8::1',
'netmask_v6': '64',
'netmask': '255.255.255.0',
'bridge': 'fa0',
'bridge_interface': 'fake_fa0',
'gateway': '192.168.0.1',
'broadcast': '192.168.0.255',
'dns1': '192.168.0.1',
'dns2': '192.168.0.2',
'vlan': None,
'host': HOST,
'project_id': 'fake_project',
'vpn_public_address': '192.168.0.2'},
{'id': 1,
'uuid': "bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb",
'label': 'test1',
'injected': False,
'multi_host': False,
'cidr': '192.168.1.0/24',
'cidr_v6': '2001:db9::/64',
'gateway_v6': '2001:db9::1',
'netmask_v6': '64',
'netmask': '255.255.255.0',
'bridge': 'fa1',
'bridge_interface': 'fake_fa1',
'gateway': '192.168.1.1',
'broadcast': '192.168.1.255',
'dns1': '192.168.0.1',
'dns2': '192.168.0.2',
'vlan': None,
'host': HOST,
'project_id': 'fake_project',
'vpn_public_address': '192.168.1.2'}]
fixed_ips = [{'id': 0,
'network_id': 0,
'address': '192.168.0.100',
'instance_id': 0,
'allocated': False,
'virtual_interface_id': 0,
'floating_ips': []},
{'id': 0,
'network_id': 1,
'address': '192.168.1.100',
'instance_id': 0,
'allocated': False,
'virtual_interface_id': 0,
'floating_ips': []}]
flavor = {'id': 0,
'rxtx_cap': 3}
floating_ip_fields = {'id': 0,
'address': '192.168.10.100',
'pool': 'nova',
'interface': 'eth0',
'fixed_ip_id': 0,
'project_id': None,
'auto_assigned': False}
vifs = [{'id': 0,
'address': 'DE:AD:BE:EF:00:00',
'uuid': '00000000-0000-0000-0000-0000000000000000',
'network_id': 0,
'instance_id': 0},
{'id': 1,
'address': 'DE:AD:BE:EF:00:01',
'uuid': '00000000-0000-0000-0000-0000000000000001',
'network_id': 1,
'instance_id': 0},
{'id': 2,
'address': 'DE:AD:BE:EF:00:02',
'uuid': '00000000-0000-0000-0000-0000000000000002',
'network_id': 2,
'instance_id': 0}]
class FlatNetworkTestCase(test.TestCase):
def setUp(self):
super(FlatNetworkTestCase, self).setUp()
self.tempdir = tempfile.mkdtemp()
self.flags(logdir=self.tempdir)
self.network = network_manager.FlatManager(host=HOST)
temp = utils.import_object('nova.network.minidns.MiniDNS')
self.network.instance_dns_manager = temp
self.network.instance_dns_domain = ''
self.network.db = db
self.context = context.RequestContext('testuser', 'testproject',
is_admin=False)
def tearDown(self):
shutil.rmtree(self.tempdir)
super(FlatNetworkTestCase, self).tearDown()
def test_get_instance_nw_info(self):
fake_get_instance_nw_info = fake_network.fake_get_instance_nw_info
nw_info = fake_get_instance_nw_info(self.stubs, 0, 2)
self.assertFalse(nw_info)
nw_info = fake_get_instance_nw_info(self.stubs, 1, 2)
for i, (nw, info) in enumerate(nw_info):
nid = i + 1
check = {'bridge': 'fake_br%d' % nid,
'cidr': '192.168.%s.0/24' % nid,
'cidr_v6': '2001:db8:0:%x::/64' % nid,
'id': '00000000-0000-0000-0000-00000000000000%02d' % nid,
'multi_host': False,
'injected': False,
'bridge_interface': None,
'vlan': None}
self.assertDictMatch(nw, check)
check = {'broadcast': '192.168.%d.255' % nid,
'dhcp_server': '192.168.%d.1' % nid,
'dns': ['192.168.%d.3' % nid, '192.168.%d.4' % nid],
'gateway': '192.168.%d.1' % nid,
'gateway_v6': 'fe80::def',
'ip6s': 'DONTCARE',
'ips': 'DONTCARE',
'label': 'test%d' % nid,
'mac': 'DE:AD:BE:EF:00:%02x' % nid,
'rxtx_cap': 0,
'vif_uuid':
'00000000-0000-0000-0000-00000000000000%02d' % nid,
'should_create_vlan': False,
'should_create_bridge': False}
self.assertDictMatch(info, check)
check = [{'enabled': 'DONTCARE',
'ip': '2001:db8:0:1::%x' % nid,
'netmask': 64,
'gateway': 'fe80::def'}]
self.assertDictListMatch(info['ip6s'], check)
num_fixed_ips = len(info['ips'])
check = [{'enabled': 'DONTCARE',
'ip': '192.168.%d.%03d' % (nid, ip_num + 99),
'netmask': '255.255.255.0',
'gateway': '192.168.%d.1' % nid}
for ip_num in xrange(1, num_fixed_ips + 1)]
self.assertDictListMatch(info['ips'], check)
def test_validate_networks(self):
self.mox.StubOutWithMock(db, 'network_get')
self.mox.StubOutWithMock(db, 'network_get_all_by_uuids')
self.mox.StubOutWithMock(db, "fixed_ip_get_by_address")
requested_networks = [("bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb",
"192.168.1.100")]
db.network_get_all_by_uuids(mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn(networks)
db.network_get(mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn(networks[1])
ip = fixed_ips[1].copy()
ip['instance_id'] = None
db.fixed_ip_get_by_address(mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn(ip)
self.mox.ReplayAll()
self.network.validate_networks(self.context, requested_networks)
def test_validate_reserved(self):
context_admin = context.RequestContext('testuser', 'testproject',
is_admin=True)
nets = self.network.create_networks(context_admin, 'fake',
'192.168.0.0/24', False, 1,
256, None, None, None, None, None)
self.assertEqual(1, len(nets))
network = nets[0]
self.assertEqual(3, db.network_count_reserved_ips(context_admin,
network['id']))
def test_validate_networks_none_requested_networks(self):
self.network.validate_networks(self.context, None)
def test_validate_networks_empty_requested_networks(self):
requested_networks = []
self.mox.ReplayAll()
self.network.validate_networks(self.context, requested_networks)
def test_validate_networks_invalid_fixed_ip(self):
self.mox.StubOutWithMock(db, 'network_get_all_by_uuids')
requested_networks = [(1, "192.168.0.100.1")]
db.network_get_all_by_uuids(mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn(networks)
self.mox.ReplayAll()
self.assertRaises(exception.FixedIpInvalid,
self.network.validate_networks, self.context,
requested_networks)
def test_validate_networks_empty_fixed_ip(self):
self.mox.StubOutWithMock(db, 'network_get_all_by_uuids')
requested_networks = [(1, "")]
db.network_get_all_by_uuids(mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn(networks)
self.mox.ReplayAll()
self.assertRaises(exception.FixedIpInvalid,
self.network.validate_networks,
self.context, requested_networks)
def test_validate_networks_none_fixed_ip(self):
self.mox.StubOutWithMock(db, 'network_get_all_by_uuids')
requested_networks = [(1, None)]
db.network_get_all_by_uuids(mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn(networks)
self.mox.ReplayAll()
self.network.validate_networks(self.context, requested_networks)
def test_add_fixed_ip_instance_without_vpn_requested_networks(self):
self.mox.StubOutWithMock(db, 'network_get')
self.mox.StubOutWithMock(db, 'network_update')
self.mox.StubOutWithMock(db, 'fixed_ip_associate_pool')
self.mox.StubOutWithMock(db, 'instance_get')
self.mox.StubOutWithMock(db,
'virtual_interface_get_by_instance_and_network')
self.mox.StubOutWithMock(db, 'fixed_ip_update')
db.fixed_ip_update(mox.IgnoreArg(),
mox.IgnoreArg(),
mox.IgnoreArg())
db.virtual_interface_get_by_instance_and_network(mox.IgnoreArg(),
mox.IgnoreArg(), mox.IgnoreArg()).AndReturn({'id': 0})
db.instance_get(mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn({'security_groups':
[{'id': 0}]})
db.instance_get(self.context,
1).AndReturn({'display_name': HOST,
'uuid': 'test-00001'})
db.instance_get(mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn({'availability_zone': ''})
db.fixed_ip_associate_pool(mox.IgnoreArg(),
mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn('192.168.0.101')
db.network_get(mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn(networks[0])
db.network_update(mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg())
self.mox.ReplayAll()
self.network.add_fixed_ip_to_instance(self.context, 1, HOST,
networks[0]['id'])
def test_mini_dns_driver(self):
zone1 = "example.org"
zone2 = "example.com"
driver = self.network.instance_dns_manager
driver.create_entry("hostone", "10.0.0.1", "A", zone1)
driver.create_entry("hosttwo", "10.0.0.2", "A", zone1)
driver.create_entry("hostthree", "10.0.0.3", "A", zone1)
driver.create_entry("hostfour", "10.0.0.4", "A", zone1)
driver.create_entry("hostfive", "10.0.0.5", "A", zone2)
driver.delete_entry("hostone", zone1)
driver.modify_address("hostfour", "10.0.0.1", zone1)
driver.modify_address("hostthree", "10.0.0.1", zone1)
names = driver.get_entries_by_address("10.0.0.1", zone1)
self.assertEqual(len(names), 2)
self.assertIn('hostthree', names)
self.assertIn('hostfour', names)
names = driver.get_entries_by_address("10.0.0.5", zone2)
self.assertEqual(len(names), 1)
self.assertIn('hostfive', names)
addresses = driver.get_entries_by_name("hosttwo", zone1)
self.assertEqual(len(addresses), 1)
self.assertIn('10.0.0.2', addresses)
self.assertRaises(exception.InvalidInput,
driver.create_entry,
"hostname",
"10.10.10.10",
"invalidtype",
zone1)
def test_instance_dns(self):
fixedip = '192.168.0.101'
self.mox.StubOutWithMock(db, 'network_get')
self.mox.StubOutWithMock(db, 'network_update')
self.mox.StubOutWithMock(db, 'fixed_ip_associate_pool')
self.mox.StubOutWithMock(db, 'instance_get')
self.mox.StubOutWithMock(db,
'virtual_interface_get_by_instance_and_network')
self.mox.StubOutWithMock(db, 'fixed_ip_update')
db.fixed_ip_update(mox.IgnoreArg(),
mox.IgnoreArg(),
mox.IgnoreArg())
db.virtual_interface_get_by_instance_and_network(mox.IgnoreArg(),
mox.IgnoreArg(), mox.IgnoreArg()).AndReturn({'id': 0})
db.instance_get(mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn({'security_groups':
[{'id': 0}]})
db.instance_get(self.context,
1).AndReturn({'display_name': HOST,
'uuid': 'test-00001'})
db.instance_get(mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn({'availability_zone': ''})
db.fixed_ip_associate_pool(mox.IgnoreArg(),
mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn(fixedip)
db.network_get(mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn(networks[0])
db.network_update(mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg())
self.mox.ReplayAll()
self.network.add_fixed_ip_to_instance(self.context, 1, HOST,
networks[0]['id'])
instance_manager = self.network.instance_dns_manager
addresses = instance_manager.get_entries_by_name(HOST,
self.network.instance_dns_domain)
self.assertEqual(len(addresses), 1)
self.assertEqual(addresses[0], fixedip)
addresses = instance_manager.get_entries_by_name('test-00001',
self.network.instance_dns_domain)
self.assertEqual(len(addresses), 1)
self.assertEqual(addresses[0], fixedip)
class VlanNetworkTestCase(test.TestCase):
def setUp(self):
super(VlanNetworkTestCase, self).setUp()
self.network = network_manager.VlanManager(host=HOST)
self.network.db = db
self.context = context.RequestContext('testuser', 'testproject',
is_admin=False)
def test_vpn_allocate_fixed_ip(self):
self.mox.StubOutWithMock(db, 'fixed_ip_associate')
self.mox.StubOutWithMock(db, 'fixed_ip_update')
self.mox.StubOutWithMock(db,
'virtual_interface_get_by_instance_and_network')
db.fixed_ip_associate(mox.IgnoreArg(),
mox.IgnoreArg(),
mox.IgnoreArg(),
mox.IgnoreArg(),
reserved=True).AndReturn('192.168.0.1')
db.fixed_ip_update(mox.IgnoreArg(),
mox.IgnoreArg(),
mox.IgnoreArg())
db.virtual_interface_get_by_instance_and_network(mox.IgnoreArg(),
mox.IgnoreArg(), mox.IgnoreArg()).AndReturn({'id': 0})
self.mox.ReplayAll()
network = dict(networks[0])
network['vpn_private_address'] = '192.168.0.2'
self.network.allocate_fixed_ip(None, 0, network, vpn=True)
def test_vpn_allocate_fixed_ip_no_network_id(self):
network = dict(networks[0])
network['vpn_private_address'] = '192.168.0.2'
network['id'] = None
context_admin = context.RequestContext('testuser', 'testproject',
is_admin=True)
self.assertRaises(exception.FixedIpNotFoundForNetwork,
self.network.allocate_fixed_ip,
context_admin,
0,
network,
vpn=True)
def test_allocate_fixed_ip(self):
self.mox.StubOutWithMock(db, 'fixed_ip_associate_pool')
self.mox.StubOutWithMock(db, 'fixed_ip_update')
self.mox.StubOutWithMock(db,
'virtual_interface_get_by_instance_and_network')
self.mox.StubOutWithMock(db, 'instance_get')
db.instance_get(mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn({'security_groups':
[{'id': 0}]})
db.fixed_ip_associate_pool(mox.IgnoreArg(),
mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn('192.168.0.1')
db.fixed_ip_update(mox.IgnoreArg(),
mox.IgnoreArg(),
mox.IgnoreArg())
db.virtual_interface_get_by_instance_and_network(mox.IgnoreArg(),
mox.IgnoreArg(), mox.IgnoreArg()).AndReturn({'id': 0})
self.mox.ReplayAll()
network = dict(networks[0])
network['vpn_private_address'] = '192.168.0.2'
self.network.allocate_fixed_ip(self.context, 0, network)
def test_create_networks_too_big(self):
self.assertRaises(ValueError, self.network.create_networks, None,
num_networks=4094, vlan_start=1)
def test_create_networks_too_many(self):
self.assertRaises(ValueError, self.network.create_networks, None,
num_networks=100, vlan_start=1,
cidr='192.168.0.1/24', network_size=100)
def test_validate_networks(self):
def network_get(_context, network_id):
return networks[network_id]
self.stubs.Set(db, 'network_get', network_get)
self.mox.StubOutWithMock(db, 'network_get_all_by_uuids')
self.mox.StubOutWithMock(db, "fixed_ip_get_by_address")
requested_networks = [("bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb",
"192.168.1.100")]
db.network_get_all_by_uuids(mox.IgnoreArg(),
mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn(networks)
fixed_ips[1]['network_id'] = networks[1]['id']
fixed_ips[1]['instance_id'] = None
db.fixed_ip_get_by_address(mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn(fixed_ips[1])
self.mox.ReplayAll()
self.network.validate_networks(self.context, requested_networks)
def test_validate_networks_none_requested_networks(self):
self.network.validate_networks(self.context, None)
def test_validate_networks_empty_requested_networks(self):
requested_networks = []
self.mox.ReplayAll()
self.network.validate_networks(self.context, requested_networks)
def test_validate_networks_invalid_fixed_ip(self):
self.mox.StubOutWithMock(db, 'network_get_all_by_uuids')
requested_networks = [(1, "192.168.0.100.1")]
db.network_get_all_by_uuids(mox.IgnoreArg(),
mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn(networks)
self.mox.ReplayAll()
self.assertRaises(exception.FixedIpInvalid,
self.network.validate_networks, self.context,
requested_networks)
def test_validate_networks_empty_fixed_ip(self):
self.mox.StubOutWithMock(db, 'network_get_all_by_uuids')
requested_networks = [(1, "")]
db.network_get_all_by_uuids(mox.IgnoreArg(),
mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn(networks)
self.mox.ReplayAll()
self.assertRaises(exception.FixedIpInvalid,
self.network.validate_networks,
self.context, requested_networks)
def test_validate_networks_none_fixed_ip(self):
self.mox.StubOutWithMock(db, 'network_get_all_by_uuids')
requested_networks = [(1, None)]
db.network_get_all_by_uuids(mox.IgnoreArg(),
mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn(networks)
self.mox.ReplayAll()
self.network.validate_networks(self.context, requested_networks)
def test_floating_ip_owned_by_project(self):
ctxt = context.RequestContext('testuser', 'testproject',
is_admin=False)
# raises because floating_ip project_id is None
floating_ip = {'address': '10.0.0.1',
'project_id': None}
self.assertRaises(exception.NotAuthorized,
self.network._floating_ip_owned_by_project,
ctxt,
floating_ip)
# raises because floating_ip project_id is not equal to ctxt project_id
floating_ip = {'address': '10.0.0.1',
'project_id': ctxt.project_id + '1'}
self.assertRaises(exception.NotAuthorized,
self.network._floating_ip_owned_by_project,
ctxt,
floating_ip)
# does not raise (floating ip is owned by ctxt project)
floating_ip = {'address': '10.0.0.1',
'project_id': ctxt.project_id}
self.network._floating_ip_owned_by_project(ctxt, floating_ip)
def test_allocate_floating_ip(self):
ctxt = context.RequestContext('testuser', 'testproject',
is_admin=False)
def fake1(*args, **kwargs):
return {'address': '10.0.0.1'}
def fake2(*args, **kwargs):
return 25
def fake3(*args, **kwargs):
return 0
self.stubs.Set(self.network.db, 'floating_ip_allocate_address', fake1)
# this time should raise
self.stubs.Set(self.network.db, 'floating_ip_count_by_project', fake2)
self.assertRaises(exception.QuotaError,
self.network.allocate_floating_ip,
ctxt,
ctxt.project_id)
# this time should not
self.stubs.Set(self.network.db, 'floating_ip_count_by_project', fake3)
self.network.allocate_floating_ip(ctxt, ctxt.project_id)
def test_deallocate_floating_ip(self):
ctxt = context.RequestContext('testuser', 'testproject',
is_admin=False)
def fake1(*args, **kwargs):
pass
def fake2(*args, **kwargs):
return {'address': '10.0.0.1', 'fixed_ip_id': 1}
def fake3(*args, **kwargs):
return {'address': '10.0.0.1', 'fixed_ip_id': None}
self.stubs.Set(self.network.db, 'floating_ip_deallocate', fake1)
self.stubs.Set(self.network, '_floating_ip_owned_by_project', fake1)
# this time should raise because floating ip is associated to fixed_ip
self.stubs.Set(self.network.db, 'floating_ip_get_by_address', fake2)
self.assertRaises(exception.FloatingIpAssociated,
self.network.deallocate_floating_ip,
ctxt,
mox.IgnoreArg())
# this time should not raise
self.stubs.Set(self.network.db, 'floating_ip_get_by_address', fake3)
self.network.deallocate_floating_ip(ctxt, ctxt.project_id)
def test_associate_floating_ip(self):
ctxt = context.RequestContext('testuser', 'testproject',
is_admin=False)
def fake1(*args, **kwargs):
pass
# floating ip that's already associated
def fake2(*args, **kwargs):
return {'address': '10.0.0.1',
'pool': 'nova',
'interface': 'eth0',
'fixed_ip_id': 1}
# floating ip that isn't associated
def fake3(*args, **kwargs):
return {'address': '10.0.0.1',
'pool': 'nova',
'interface': 'eth0',
'fixed_ip_id': None}
# fixed ip with remote host
def fake4(*args, **kwargs):
return {'address': '10.0.0.1',
'pool': 'nova',
'interface': 'eth0',
'network_id': 'blah'}
def fake4_network(*args, **kwargs):
return {'multi_host': False, 'host': 'jibberjabber'}
# fixed ip with local host
def fake5(*args, **kwargs):
return {'address': '10.0.0.1',
'pool': 'nova',
'interface': 'eth0',
'network_id': 'blahblah'}
def fake5_network(*args, **kwargs):
return {'multi_host': False, 'host': 'testhost'}
def fake6(*args, **kwargs):
self.local = False
def fake7(*args, **kwargs):
self.local = True
def fake8(*args, **kwargs):
raise exception.ProcessExecutionError('',
'Cannot find device "em0"\n')
# raises because interface doesn't exist
self.stubs.Set(self.network.db,
'floating_ip_fixed_ip_associate',
fake1)
self.stubs.Set(self.network.db, 'floating_ip_disassociate', fake1)
self.stubs.Set(self.network.driver, 'bind_floating_ip', fake8)
self.assertRaises(exception.NoFloatingIpInterface,
self.network._associate_floating_ip,
ctxt,
mox.IgnoreArg(),
mox.IgnoreArg(),
mox.IgnoreArg())
self.stubs.Set(self.network, '_floating_ip_owned_by_project', fake1)
# raises because floating_ip is already associated to a fixed_ip
self.stubs.Set(self.network.db, 'floating_ip_get_by_address', fake2)
self.assertRaises(exception.FloatingIpAssociated,
self.network.associate_floating_ip,
ctxt,
mox.IgnoreArg(),
mox.IgnoreArg())
self.stubs.Set(self.network.db, 'floating_ip_get_by_address', fake3)
# does not raise and makes call remotely
self.local = True
self.stubs.Set(self.network.db, 'fixed_ip_get_by_address', fake4)
self.stubs.Set(self.network.db, 'network_get', fake4_network)
self.stubs.Set(rpc, 'cast', fake6)
self.network.associate_floating_ip(ctxt, mox.IgnoreArg(),
mox.IgnoreArg())
self.assertFalse(self.local)
# does not raise and makes call locally
self.local = False
self.stubs.Set(self.network.db, 'fixed_ip_get_by_address', fake5)
self.stubs.Set(self.network.db, 'network_get', fake5_network)
self.stubs.Set(self.network, '_associate_floating_ip', fake7)
self.network.associate_floating_ip(ctxt, mox.IgnoreArg(),
mox.IgnoreArg())
self.assertTrue(self.local)
def test_floating_ip_init_host(self):
def get_all_by_host(_context, _host):
return [{'interface': 'foo',
'address': 'foo'},
{'interface': 'fakeiface',
'address': 'fakefloat',
'fixed_ip_id': 1},
{'interface': 'bar',
'address': 'bar',
'fixed_ip_id': 2}]
self.stubs.Set(self.network.db, 'floating_ip_get_all_by_host',
get_all_by_host)
def fixed_ip_get(_context, fixed_ip_id):
if fixed_ip_id == 1:
return {'address': 'fakefixed'}
raise exception.FixedIpNotFound()
self.stubs.Set(self.network.db, 'fixed_ip_get', fixed_ip_get)
self.mox.StubOutWithMock(self.network.l3driver, 'add_floating_ip')
self.network.l3driver.add_floating_ip('fakefloat',
'fakefixed',
'fakeiface')
self.mox.ReplayAll()
self.network.init_host_floating_ips()
def test_disassociate_floating_ip(self):
ctxt = context.RequestContext('testuser', 'testproject',
is_admin=False)
def fake1(*args, **kwargs):
pass
# floating ip that isn't associated
def fake2(*args, **kwargs):
return {'address': '10.0.0.1',
'pool': 'nova',
'interface': 'eth0',
'fixed_ip_id': None}
# floating ip that is associated
def fake3(*args, **kwargs):
return {'address': '10.0.0.1',
'pool': 'nova',
'interface': 'eth0',
'fixed_ip_id': 1}
# fixed ip with remote host
def fake4(*args, **kwargs):
return {'address': '10.0.0.1',
'pool': 'nova',
'interface': 'eth0',
'network_id': 'blah'}
def fake4_network(*args, **kwargs):
return {'multi_host': False,
'host': 'jibberjabber'}
# fixed ip with local host
def fake5(*args, **kwargs):
return {'address': '10.0.0.1',
'pool': 'nova',
'interface': 'eth0',
'network_id': 'blahblah'}
def fake5_network(*args, **kwargs):
return {'multi_host': False, 'host': 'testhost'}
def fake6(*args, **kwargs):
self.local = False
def fake7(*args, **kwargs):
self.local = True
self.stubs.Set(self.network, '_floating_ip_owned_by_project', fake1)
# raises because floating_ip is not associated to a fixed_ip
self.stubs.Set(self.network.db, 'floating_ip_get_by_address', fake2)
self.assertRaises(exception.FloatingIpNotAssociated,
self.network.disassociate_floating_ip,
ctxt,
mox.IgnoreArg())
self.stubs.Set(self.network.db, 'floating_ip_get_by_address', fake3)
# does not raise and makes call remotely
self.local = True
self.stubs.Set(self.network.db, 'fixed_ip_get', fake4)
self.stubs.Set(self.network.db, 'network_get', fake4_network)
self.stubs.Set(rpc, 'cast', fake6)
self.network.disassociate_floating_ip(ctxt, mox.IgnoreArg())
self.assertFalse(self.local)
# does not raise and makes call locally
self.local = False
self.stubs.Set(self.network.db, 'fixed_ip_get', fake5)
self.stubs.Set(self.network.db, 'network_get', fake5_network)
self.stubs.Set(self.network, '_disassociate_floating_ip', fake7)
self.network.disassociate_floating_ip(ctxt, mox.IgnoreArg())
self.assertTrue(self.local)
def test_add_fixed_ip_instance_without_vpn_requested_networks(self):
self.mox.StubOutWithMock(db, 'network_get')
self.mox.StubOutWithMock(db, 'fixed_ip_associate_pool')
self.mox.StubOutWithMock(db, 'instance_get')
self.mox.StubOutWithMock(db,
'virtual_interface_get_by_instance_and_network')
self.mox.StubOutWithMock(db, 'fixed_ip_update')
db.fixed_ip_update(mox.IgnoreArg(),
mox.IgnoreArg(),
mox.IgnoreArg())
db.virtual_interface_get_by_instance_and_network(mox.IgnoreArg(),
mox.IgnoreArg(), mox.IgnoreArg()).AndReturn({'id': 0})
db.instance_get(mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn({'security_groups':
[{'id': 0}],
'availability_zone': ''})
db.fixed_ip_associate_pool(mox.IgnoreArg(),
mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn('192.168.0.101')
db.network_get(mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn(networks[0])
self.mox.ReplayAll()
self.network.add_fixed_ip_to_instance(self.context, 1, HOST,
networks[0]['id'])
def test_ip_association_and_allocation_of_other_project(self):
"""Makes sure that we cannot deallocaate or disassociate
a public ip of other project"""
def network_get(_context, network_id):
return networks[network_id]
self.stubs.Set(db, 'network_get', network_get)
context1 = context.RequestContext('user', 'project1')
context2 = context.RequestContext('user', 'project2')
address = '1.2.3.4'
float_addr = db.floating_ip_create(context1.elevated(),
{'address': address,
'project_id': context1.project_id})
instance = db.instance_create(context1,
{'project_id': 'project1'})
fix_addr = db.fixed_ip_associate_pool(context1.elevated(),
1, instance['id'])
# Associate the IP with non-admin user context
self.assertRaises(exception.NotAuthorized,
self.network.associate_floating_ip,
context2,
float_addr,
fix_addr)
# Deallocate address from other project
self.assertRaises(exception.NotAuthorized,
self.network.deallocate_floating_ip,
context2,
float_addr)
# Now Associates the address to the actual project
self.network.associate_floating_ip(context1, float_addr, fix_addr)
# Now try dis-associating from other project
self.assertRaises(exception.NotAuthorized,
self.network.disassociate_floating_ip,
context2,
float_addr)
# Clean up the ip addresses
self.network.disassociate_floating_ip(context1, float_addr)
self.network.deallocate_floating_ip(context1, float_addr)
self.network.deallocate_fixed_ip(context1, fix_addr, 'fake')
db.floating_ip_destroy(context1.elevated(), float_addr)
db.fixed_ip_disassociate(context1.elevated(), fix_addr)
def test_deallocate_fixed(self):
"""Verify that release is called properly.
Ensures https://bugs.launchpad.net/nova/+bug/973442 doesn't return"""
def network_get(_context, network_id):
return networks[network_id]
self.stubs.Set(db, 'network_get', network_get)
def vif_get(_context, _vif_id):
return {'address': 'fake_mac'}
self.stubs.Set(db, 'virtual_interface_get', vif_get)
context1 = context.RequestContext('user', 'project1')
instance = db.instance_create(context1,
{'project_id': 'project1'})
elevated = context1.elevated()
fix_addr = db.fixed_ip_associate_pool(elevated, 1, instance['id'])
values = {'allocated': True,
'virtual_interface_id': 3}
db.fixed_ip_update(elevated, fix_addr, values)
fixed = db.fixed_ip_get_by_address(elevated, fix_addr)
network = db.network_get(elevated, fixed['network_id'])
self.flags(force_dhcp_release=True)
self.mox.StubOutWithMock(linux_net, 'release_dhcp')
linux_net.release_dhcp(network['bridge'], fixed['address'], 'fake_mac')
self.mox.ReplayAll()
self.network.deallocate_fixed_ip(context1, fix_addr, 'fake')
fixed = db.fixed_ip_get_by_address(elevated, fix_addr)
self.assertFalse(fixed['allocated'])
def test_deallocate_fixed_no_vif(self):
"""Verify that deallocate doesn't raise when no vif is returned.
Ensures https://bugs.launchpad.net/nova/+bug/968457 doesn't return"""
def network_get(_context, network_id):
return networks[network_id]
self.stubs.Set(db, 'network_get', network_get)
def vif_get(_context, _vif_id):
return None
self.stubs.Set(db, 'virtual_interface_get', vif_get)
context1 = context.RequestContext('user', 'project1')
instance = db.instance_create(context1,
{'project_id': 'project1'})
elevated = context1.elevated()
fix_addr = db.fixed_ip_associate_pool(elevated, 1, instance['id'])
values = {'allocated': True,
'virtual_interface_id': 3}
db.fixed_ip_update(elevated, fix_addr, values)
self.flags(force_dhcp_release=True)
self.network.deallocate_fixed_ip(context1, fix_addr, 'fake')
class CommonNetworkTestCase(test.TestCase):
def setUp(self):
super(CommonNetworkTestCase, self).setUp()
self.context = context.RequestContext('fake', 'fake')
def fake_create_fixed_ips(self, context, network_id, fixed_cidr=None):
return None
def test_remove_fixed_ip_from_instance(self):
manager = fake_network.FakeNetworkManager()
manager.remove_fixed_ip_from_instance(self.context, 99, HOST,
'10.0.0.1')
self.assertEquals(manager.deallocate_called, '10.0.0.1')
def test_remove_fixed_ip_from_instance_bad_input(self):
manager = fake_network.FakeNetworkManager()
self.assertRaises(exception.FixedIpNotFoundForSpecificInstance,
manager.remove_fixed_ip_from_instance,
self.context, 99, HOST, 'bad input')
def test_validate_cidrs(self):
manager = fake_network.FakeNetworkManager()
nets = manager.create_networks(None, 'fake', '192.168.0.0/24',
False, 1, 256, None, None, None,
None, None)
self.assertEqual(1, len(nets))
cidrs = [str(net['cidr']) for net in nets]
self.assertTrue('192.168.0.0/24' in cidrs)
def test_validate_cidrs_split_exact_in_half(self):
manager = fake_network.FakeNetworkManager()
nets = manager.create_networks(None, 'fake', '192.168.0.0/24',
False, 2, 128, None, None, None,
None, None)
self.assertEqual(2, len(nets))
cidrs = [str(net['cidr']) for net in nets]
self.assertTrue('192.168.0.0/25' in cidrs)
self.assertTrue('192.168.0.128/25' in cidrs)
def test_validate_cidrs_split_cidr_in_use_middle_of_range(self):
manager = fake_network.FakeNetworkManager()
self.mox.StubOutWithMock(manager.db, 'network_get_all')
ctxt = mox.IgnoreArg()
manager.db.network_get_all(ctxt).AndReturn([{'id': 1,
'cidr': '192.168.2.0/24'}])
self.mox.ReplayAll()
nets = manager.create_networks(None, 'fake', '192.168.0.0/16',
False, 4, 256, None, None, None,
None, None)
self.assertEqual(4, len(nets))
cidrs = [str(net['cidr']) for net in nets]
exp_cidrs = ['192.168.0.0/24', '192.168.1.0/24', '192.168.3.0/24',
'192.168.4.0/24']
for exp_cidr in exp_cidrs:
self.assertTrue(exp_cidr in cidrs)
self.assertFalse('192.168.2.0/24' in cidrs)
def test_validate_cidrs_smaller_subnet_in_use(self):
manager = fake_network.FakeNetworkManager()
self.mox.StubOutWithMock(manager.db, 'network_get_all')
ctxt = mox.IgnoreArg()
manager.db.network_get_all(ctxt).AndReturn([{'id': 1,
'cidr': '192.168.2.9/25'}])
self.mox.ReplayAll()
# ValueError: requested cidr (192.168.2.0/24) conflicts with
# existing smaller cidr
args = (None, 'fake', '192.168.2.0/24', False, 1, 256, None, None,
None, None, None)
self.assertRaises(ValueError, manager.create_networks, *args)
def test_validate_cidrs_split_smaller_cidr_in_use(self):
manager = fake_network.FakeNetworkManager()
self.mox.StubOutWithMock(manager.db, 'network_get_all')
ctxt = mox.IgnoreArg()
manager.db.network_get_all(ctxt).AndReturn([{'id': 1,
'cidr': '192.168.2.0/25'}])
self.mox.ReplayAll()
nets = manager.create_networks(None, 'fake', '192.168.0.0/16',
False, 4, 256, None, None, None, None,
None)
self.assertEqual(4, len(nets))
cidrs = [str(net['cidr']) for net in nets]
exp_cidrs = ['192.168.0.0/24', '192.168.1.0/24', '192.168.3.0/24',
'192.168.4.0/24']
for exp_cidr in exp_cidrs:
self.assertTrue(exp_cidr in cidrs)
self.assertFalse('192.168.2.0/24' in cidrs)
def test_validate_cidrs_split_smaller_cidr_in_use2(self):
manager = fake_network.FakeNetworkManager()
self.mox.StubOutWithMock(manager.db, 'network_get_all')
ctxt = mox.IgnoreArg()
manager.db.network_get_all(ctxt).AndReturn([{'id': 1,
'cidr': '192.168.2.9/29'}])
self.mox.ReplayAll()
nets = manager.create_networks(None, 'fake', '192.168.2.0/24',
False, 3, 32, None, None, None, None,
None)
self.assertEqual(3, len(nets))
cidrs = [str(net['cidr']) for net in nets]
exp_cidrs = ['192.168.2.32/27', '192.168.2.64/27', '192.168.2.96/27']
for exp_cidr in exp_cidrs:
self.assertTrue(exp_cidr in cidrs)
self.assertFalse('192.168.2.0/27' in cidrs)
def test_validate_cidrs_split_all_in_use(self):
manager = fake_network.FakeNetworkManager()
self.mox.StubOutWithMock(manager.db, 'network_get_all')
ctxt = mox.IgnoreArg()
in_use = [{'id': 1, 'cidr': '192.168.2.9/29'},
{'id': 2, 'cidr': '192.168.2.64/26'},
{'id': 3, 'cidr': '192.168.2.128/26'}]
manager.db.network_get_all(ctxt).AndReturn(in_use)
self.mox.ReplayAll()
args = (None, 'fake', '192.168.2.0/24', False, 3, 64, None, None,
None, None, None)
# ValueError: Not enough subnets avail to satisfy requested num_
# networks - some subnets in requested range already
# in use
self.assertRaises(ValueError, manager.create_networks, *args)
def test_validate_cidrs_one_in_use(self):
manager = fake_network.FakeNetworkManager()
args = (None, 'fake', '192.168.0.0/24', False, 2, 256, None, None,
None, None, None)
# ValueError: network_size * num_networks exceeds cidr size
self.assertRaises(ValueError, manager.create_networks, *args)
def test_validate_cidrs_already_used(self):
manager = fake_network.FakeNetworkManager()
self.mox.StubOutWithMock(manager.db, 'network_get_all')
ctxt = mox.IgnoreArg()
manager.db.network_get_all(ctxt).AndReturn([{'id': 1,
'cidr': '192.168.0.0/24'}])
self.mox.ReplayAll()
# ValueError: cidr already in use
args = (None, 'fake', '192.168.0.0/24', False, 1, 256, None, None,
None, None, None)
self.assertRaises(ValueError, manager.create_networks, *args)
def test_validate_cidrs_too_many(self):
manager = fake_network.FakeNetworkManager()
args = (None, 'fake', '192.168.0.0/24', False, 200, 256, None, None,
None, None, None)
# ValueError: Not enough subnets avail to satisfy requested
# num_networks
self.assertRaises(ValueError, manager.create_networks, *args)
def test_validate_cidrs_split_partial(self):
manager = fake_network.FakeNetworkManager()
nets = manager.create_networks(None, 'fake', '192.168.0.0/16',
False, 2, 256, None, None, None, None,
None)
returned_cidrs = [str(net['cidr']) for net in nets]
self.assertTrue('192.168.0.0/24' in returned_cidrs)
self.assertTrue('192.168.1.0/24' in returned_cidrs)
def test_validate_cidrs_conflict_existing_supernet(self):
manager = fake_network.FakeNetworkManager()
self.mox.StubOutWithMock(manager.db, 'network_get_all')
ctxt = mox.IgnoreArg()
fakecidr = [{'id': 1, 'cidr': '192.168.0.0/8'}]
manager.db.network_get_all(ctxt).AndReturn(fakecidr)
self.mox.ReplayAll()
args = (None, 'fake', '192.168.0.0/24', False, 1, 256, None, None,
None, None, None)
# ValueError: requested cidr (192.168.0.0/24) conflicts
# with existing supernet
self.assertRaises(ValueError, manager.create_networks, *args)
def test_create_networks(self):
cidr = '192.168.0.0/24'
manager = fake_network.FakeNetworkManager()
self.stubs.Set(manager, '_create_fixed_ips',
self.fake_create_fixed_ips)
args = [None, 'foo', cidr, None, 1, 256, 'fd00::/48', None, None,
None, None, None]
self.assertTrue(manager.create_networks(*args))
def test_create_networks_cidr_already_used(self):
manager = fake_network.FakeNetworkManager()
self.mox.StubOutWithMock(manager.db, 'network_get_all')
ctxt = mox.IgnoreArg()
fakecidr = [{'id': 1, 'cidr': '192.168.0.0/24'}]
manager.db.network_get_all(ctxt).AndReturn(fakecidr)
self.mox.ReplayAll()
args = [None, 'foo', '192.168.0.0/24', None, 1, 256,
'fd00::/48', None, None, None, None, None]
self.assertRaises(ValueError, manager.create_networks, *args)
def test_create_networks_many(self):
cidr = '192.168.0.0/16'
manager = fake_network.FakeNetworkManager()
self.stubs.Set(manager, '_create_fixed_ips',
self.fake_create_fixed_ips)
args = [None, 'foo', cidr, None, 10, 256, 'fd00::/48', None, None,
None, None, None]
self.assertTrue(manager.create_networks(*args))
def test_get_instance_uuids_by_ip_regex(self):
manager = fake_network.FakeNetworkManager()
_vifs = manager.db.virtual_interface_get_all(None)
fake_context = context.RequestContext('user', 'project')
# Greedy get eveything
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'ip': '.*'})
self.assertEqual(len(res), len(_vifs))
# Doesn't exist
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'ip': '10.0.0.1'})
self.assertFalse(res)
# Get instance 1
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'ip': '172.16.0.2'})
self.assertTrue(res)
self.assertEqual(len(res), 1)
self.assertEqual(res[0]['instance_id'], _vifs[1]['instance_id'])
# Get instance 2
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'ip': '173.16.0.2'})
self.assertTrue(res)
self.assertEqual(len(res), 1)
self.assertEqual(res[0]['instance_id'], _vifs[2]['instance_id'])
# Get instance 0 and 1
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'ip': '172.16.0.*'})
self.assertTrue(res)
self.assertEqual(len(res), 2)
self.assertEqual(res[0]['instance_id'], _vifs[0]['instance_id'])
self.assertEqual(res[1]['instance_id'], _vifs[1]['instance_id'])
# Get instance 1 and 2
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'ip': '17..16.0.2'})
self.assertTrue(res)
self.assertEqual(len(res), 2)
self.assertEqual(res[0]['instance_id'], _vifs[1]['instance_id'])
self.assertEqual(res[1]['instance_id'], _vifs[2]['instance_id'])
def test_get_instance_uuids_by_ipv6_regex(self):
manager = fake_network.FakeNetworkManager()
_vifs = manager.db.virtual_interface_get_all(None)
fake_context = context.RequestContext('user', 'project')
# Greedy get eveything
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'ip6': '.*'})
self.assertEqual(len(res), len(_vifs))
# Doesn't exist
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'ip6': '.*1034.*'})
self.assertFalse(res)
# Get instance 1
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'ip6': '2001:.*2'})
self.assertTrue(res)
self.assertEqual(len(res), 1)
self.assertEqual(res[0]['instance_id'], _vifs[1]['instance_id'])
# Get instance 2
ip6 = '2001:db8:69:1f:dead:beff:feff:ef03'
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'ip6': ip6})
self.assertTrue(res)
self.assertEqual(len(res), 1)
self.assertEqual(res[0]['instance_id'], _vifs[2]['instance_id'])
# Get instance 0 and 1
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'ip6': '.*ef0[1,2]'})
self.assertTrue(res)
self.assertEqual(len(res), 2)
self.assertEqual(res[0]['instance_id'], _vifs[0]['instance_id'])
self.assertEqual(res[1]['instance_id'], _vifs[1]['instance_id'])
# Get instance 1 and 2
ip6 = '2001:db8:69:1.:dead:beff:feff:ef0.'
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'ip6': ip6})
self.assertTrue(res)
self.assertEqual(len(res), 2)
self.assertEqual(res[0]['instance_id'], _vifs[1]['instance_id'])
self.assertEqual(res[1]['instance_id'], _vifs[2]['instance_id'])
def test_get_instance_uuids_by_ip(self):
manager = fake_network.FakeNetworkManager()
_vifs = manager.db.virtual_interface_get_all(None)
fake_context = context.RequestContext('user', 'project')
# No regex for you!
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'fixed_ip': '.*'})
self.assertFalse(res)
# Doesn't exist
ip = '10.0.0.1'
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'fixed_ip': ip})
self.assertFalse(res)
# Get instance 1
ip = '172.16.0.2'
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'fixed_ip': ip})
self.assertTrue(res)
self.assertEqual(len(res), 1)
self.assertEqual(res[0]['instance_id'], _vifs[1]['instance_id'])
# Get instance 2
ip = '173.16.0.2'
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'fixed_ip': ip})
self.assertTrue(res)
self.assertEqual(len(res), 1)
self.assertEqual(res[0]['instance_id'], _vifs[2]['instance_id'])
def test_get_network(self):
manager = fake_network.FakeNetworkManager()
fake_context = context.RequestContext('user', 'project')
self.mox.StubOutWithMock(manager.db, 'network_get_all_by_uuids')
manager.db.network_get_all_by_uuids(
mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn(networks)
self.mox.ReplayAll()
uuid = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
network = manager.get_network(fake_context, uuid)
self.assertEqual(network['uuid'], uuid)
def test_get_network_not_found(self):
manager = fake_network.FakeNetworkManager()
fake_context = context.RequestContext('user', 'project')
self.mox.StubOutWithMock(manager.db, 'network_get_all_by_uuids')
manager.db.network_get_all_by_uuids(mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn([])
self.mox.ReplayAll()
uuid = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee'
self.assertRaises(exception.NetworkNotFound,
manager.get_network, fake_context, uuid)
def test_get_all_networks(self):
manager = fake_network.FakeNetworkManager()
fake_context = context.RequestContext('user', 'project')
self.mox.StubOutWithMock(manager.db, 'network_get_all')
manager.db.network_get_all(mox.IgnoreArg()).AndReturn(networks)
self.mox.ReplayAll()
output = manager.get_all_networks(fake_context)
self.assertEqual(len(networks), 2)
self.assertEqual(output[0]['uuid'],
'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa')
self.assertEqual(output[1]['uuid'],
'bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb')
def test_disassociate_network(self):
manager = fake_network.FakeNetworkManager()
fake_context = context.RequestContext('user', 'project')
self.mox.StubOutWithMock(manager.db, 'network_get_all_by_uuids')
manager.db.network_get_all_by_uuids(
mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn(networks)
self.mox.ReplayAll()
uuid = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
manager.disassociate_network(fake_context, uuid)
def test_disassociate_network_not_found(self):
manager = fake_network.FakeNetworkManager()
fake_context = context.RequestContext('user', 'project')
self.mox.StubOutWithMock(manager.db, 'network_get_all_by_uuids')
manager.db.network_get_all_by_uuids(mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn([])
self.mox.ReplayAll()
uuid = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee'
self.assertRaises(exception.NetworkNotFound,
manager.disassociate_network, fake_context, uuid)
class TestRPCFixedManager(network_manager.RPCAllocateFixedIP,
network_manager.NetworkManager):
"""Dummy manager that implements RPCAllocateFixedIP"""
class RPCAllocateTestCase(test.TestCase):
"""Tests nova.network.manager.RPCAllocateFixedIP"""
def setUp(self):
super(RPCAllocateTestCase, self).setUp()
self.rpc_fixed = TestRPCFixedManager()
self.context = context.RequestContext('fake', 'fake')
def test_rpc_allocate(self):
"""Test to verify bug 855030 doesn't resurface.
Mekes sure _rpc_allocate_fixed_ip returns a value so the call
returns properly and the greenpool completes."""
address = '10.10.10.10'
def fake_allocate(*args, **kwargs):
return address
def fake_network_get(*args, **kwargs):
return {}
self.stubs.Set(self.rpc_fixed, 'allocate_fixed_ip', fake_allocate)
self.stubs.Set(self.rpc_fixed.db, 'network_get', fake_network_get)
rval = self.rpc_fixed._rpc_allocate_fixed_ip(self.context,
'fake_instance',
'fake_network')
self.assertEqual(rval, address)
class TestFloatingIPManager(network_manager.FloatingIP,
network_manager.NetworkManager):
"""Dummy manager that implements FloatingIP"""
class AllocateTestCase(test.TestCase):
def test_allocate_for_instance(self):
address = "10.10.10.10"
self.flags(auto_assign_floating_ip=True)
self.compute = self.start_service('compute')
self.network = self.start_service('network')
self.user_id = 'fake'
self.project_id = 'fake'
self.context = context.RequestContext(self.user_id,
self.project_id,
is_admin=True)
db.floating_ip_create(self.context,
{'address': address,
'pool': 'nova'})
inst = db.instance_create(self.context, {'host': self.compute.host,
'instance_type_id': 1})
networks = db.network_get_all(self.context)
for network in networks:
db.network_update(self.context, network['id'],
{'host': self.network.host})
project_id = self.context.project_id
nw_info = self.network.allocate_for_instance(self.context,
instance_id=inst['id'],
instance_uuid='',
host=inst['host'],
vpn=None,
rxtx_factor=3,
project_id=project_id)
self.assertEquals(1, len(nw_info))
fixed_ip = nw_info.fixed_ips()[0]['address']
self.assertTrue(utils.is_valid_ipv4(fixed_ip))
self.network.deallocate_for_instance(self.context,
instance_id=inst['id'],
fixed_ips=fixed_ip,
host=self.network.host,
project_id=project_id)
class FloatingIPTestCase(test.TestCase):
"""Tests nova.network.manager.FloatingIP"""
def setUp(self):
super(FloatingIPTestCase, self).setUp()
self.tempdir = tempfile.mkdtemp()
self.flags(logdir=self.tempdir)
self.network = TestFloatingIPManager()
temp = utils.import_object('nova.network.minidns.MiniDNS')
self.network.floating_dns_manager = temp
self.network.db = db
self.project_id = 'testproject'
self.context = context.RequestContext('testuser', self.project_id,
is_admin=False)
def tearDown(self):
shutil.rmtree(self.tempdir)
super(FloatingIPTestCase, self).tearDown()
def test_double_deallocation(self):
instance_ref = db.api.instance_create(self.context,
{"project_id": self.project_id})
# Run it twice to make it fault if it does not handle
# instances without fixed networks
# If this fails in either, it does not handle having no addresses
self.network.deallocate_for_instance(self.context,
instance_id=instance_ref['id'])
self.network.deallocate_for_instance(self.context,
instance_id=instance_ref['id'])
def test_deallocation_deleted_instance(self):
instance_ref = db.api.instance_create(self.context,
{"project_id": self.project_id, "deleted": True})
self.network.deallocate_for_instance(self.context,
instance_id=instance_ref['id'])
def test_floating_dns_create_conflict(self):
zone = "example.org"
address1 = "10.10.10.11"
name1 = "foo"
name2 = "bar"
self.network.add_dns_entry(self.context, address1, name1, "A", zone)
self.assertRaises(exception.FloatingIpDNSExists,
self.network.add_dns_entry, self.context,
address1, name1, "A", zone)
def test_floating_create_and_get(self):
zone = "example.org"
address1 = "10.10.10.11"
name1 = "foo"
name2 = "bar"
entries = self.network.get_dns_entries_by_address(self.context,
address1, zone)
self.assertFalse(entries)
self.network.add_dns_entry(self.context, address1, name1, "A", zone)
self.network.add_dns_entry(self.context, address1, name2, "A", zone)
entries = self.network.get_dns_entries_by_address(self.context,
address1, zone)
self.assertEquals(len(entries), 2)
self.assertEquals(entries[0], name1)
self.assertEquals(entries[1], name2)
entries = self.network.get_dns_entries_by_name(self.context,
name1, zone)
self.assertEquals(len(entries), 1)
self.assertEquals(entries[0], address1)
def test_floating_dns_delete(self):
zone = "example.org"
address1 = "10.10.10.11"
name1 = "foo"
name2 = "bar"
self.network.add_dns_entry(self.context, address1, name1, "A", zone)
self.network.add_dns_entry(self.context, address1, name2, "A", zone)
self.network.delete_dns_entry(self.context, name1, zone)
entries = self.network.get_dns_entries_by_address(self.context,
address1, zone)
self.assertEquals(len(entries), 1)
self.assertEquals(entries[0], name2)
self.assertRaises(exception.NotFound,
self.network.delete_dns_entry, self.context,
name1, zone)
def test_floating_dns_domains_public(self):
zone1 = "testzone"
domain1 = "example.org"
domain2 = "example.com"
address1 = '10.10.10.10'
entryname = 'testentry'
context_admin = context.RequestContext('testuser', 'testproject',
is_admin=True)
self.assertRaises(exception.AdminRequired,
self.network.create_public_dns_domain, self.context,
domain1, zone1)
self.network.create_public_dns_domain(context_admin, domain1,
'testproject')
self.network.create_public_dns_domain(context_admin, domain2,
'fakeproject')
domains = self.network.get_dns_domains(self.context)
self.assertEquals(len(domains), 2)
self.assertEquals(domains[0]['domain'], domain1)
self.assertEquals(domains[1]['domain'], domain2)
self.assertEquals(domains[0]['project'], 'testproject')
self.assertEquals(domains[1]['project'], 'fakeproject')
self.network.add_dns_entry(self.context, address1, entryname,
'A', domain1)
entries = self.network.get_dns_entries_by_name(self.context,
entryname, domain1)
self.assertEquals(len(entries), 1)
self.assertEquals(entries[0], address1)
self.assertRaises(exception.AdminRequired,
self.network.delete_dns_domain, self.context,
domain1)
self.network.delete_dns_domain(context_admin, domain1)
self.network.delete_dns_domain(context_admin, domain2)
# Verify that deleting the domain deleted the associated entry
entries = self.network.get_dns_entries_by_name(self.context,
entryname, domain1)
self.assertFalse(entries)
def test_delete_all_by_ip(self):
domain1 = "example.org"
domain2 = "example.com"
address = "10.10.10.10"
name1 = "foo"
name2 = "bar"
def fake_domains(context):
return [{'domain': 'example.org', 'scope': 'public'},
{'domain': 'example.com', 'scope': 'public'},
{'domain': 'test.example.org', 'scope': 'public'}]
self.stubs.Set(self.network, 'get_dns_domains', fake_domains)
context_admin = context.RequestContext('testuser', 'testproject',
is_admin=True)
self.network.create_public_dns_domain(context_admin, domain1,
'testproject')
self.network.create_public_dns_domain(context_admin, domain2,
'fakeproject')
domains = self.network.get_dns_domains(self.context)
for domain in domains:
self.network.add_dns_entry(self.context, address,
name1, "A", domain['domain'])
self.network.add_dns_entry(self.context, address,
name2, "A", domain['domain'])
entries = self.network.get_dns_entries_by_address(self.context,
address,
domain['domain'])
self.assertEquals(len(entries), 2)
self.network._delete_all_entries_for_ip(self.context, address)
for domain in domains:
entries = self.network.get_dns_entries_by_address(self.context,
address,
domain['domain'])
self.assertFalse(entries)
self.network.delete_dns_domain(context_admin, domain1)
self.network.delete_dns_domain(context_admin, domain2)
class NetworkPolicyTestCase(test.TestCase):
def setUp(self):
super(NetworkPolicyTestCase, self).setUp()
nova.policy.reset()
nova.policy.init()
self.context = context.get_admin_context()
def tearDown(self):
super(NetworkPolicyTestCase, self).tearDown()
nova.policy.reset()
def _set_rules(self, rules):
nova.common.policy.set_brain(nova.common.policy.HttpBrain(rules))
def test_check_policy(self):
self.mox.StubOutWithMock(nova.policy, 'enforce')
target = {
'project_id': self.context.project_id,
'user_id': self.context.user_id,
}
nova.policy.enforce(self.context, 'network:get_all', target)
self.mox.ReplayAll()
network_manager.check_policy(self.context, 'get_all')
class InstanceDNSTestCase(test.TestCase):
"""Tests nova.network.manager instance DNS"""
def setUp(self):
super(InstanceDNSTestCase, self).setUp()
self.tempdir = tempfile.mkdtemp()
self.flags(logdir=self.tempdir)
self.network = TestFloatingIPManager()
temp = utils.import_object('nova.network.minidns.MiniDNS')
self.network.instance_dns_manager = temp
temp = utils.import_object('nova.network.dns_driver.DNSDriver')
self.network.floating_dns_manager = temp
self.network.db = db
self.project_id = 'testproject'
self.context = context.RequestContext('testuser', self.project_id,
is_admin=False)
def tearDown(self):
shutil.rmtree(self.tempdir)
super(InstanceDNSTestCase, self).tearDown()
def test_dns_domains_private(self):
zone1 = 'testzone'
domain1 = 'example.org'
context_admin = context.RequestContext('testuser', 'testproject',
is_admin=True)
self.assertRaises(exception.AdminRequired,
self.network.create_private_dns_domain, self.context,
domain1, zone1)
self.network.create_private_dns_domain(context_admin, domain1, zone1)
domains = self.network.get_dns_domains(self.context)
self.assertEquals(len(domains), 1)
self.assertEquals(domains[0]['domain'], domain1)
self.assertEquals(domains[0]['availability_zone'], zone1)
self.assertRaises(exception.AdminRequired,
self.network.delete_dns_domain, self.context,
domain1)
self.network.delete_dns_domain(context_admin, domain1)
domain1 = "example.org"
domain2 = "example.com"
class LdapDNSTestCase(test.TestCase):
"""Tests nova.network.ldapdns.LdapDNS"""
def setUp(self):
super(LdapDNSTestCase, self).setUp()
self.saved_ldap = sys.modules.get('ldap')
import nova.auth.fakeldap
sys.modules['ldap'] = nova.auth.fakeldap
temp = utils.import_object('nova.network.ldapdns.FakeLdapDNS')
self.driver = temp
self.driver.create_domain(domain1)
self.driver.create_domain(domain2)
def tearDown(self):
self.driver.delete_domain(domain1)
self.driver.delete_domain(domain2)
sys.modules['ldap'] = self.saved_ldap
super(LdapDNSTestCase, self).tearDown()
def test_ldap_dns_domains(self):
domains = self.driver.get_domains()
self.assertEqual(len(domains), 2)
self.assertIn(domain1, domains)
self.assertIn(domain2, domains)
def test_ldap_dns_create_conflict(self):
address1 = "10.10.10.11"
name1 = "foo"
name2 = "bar"
self.driver.create_entry(name1, address1, "A", domain1)
self.assertRaises(exception.FloatingIpDNSExists,
self.driver.create_entry,
name1, address1, "A", domain1)
def test_ldap_dns_create_and_get(self):
address1 = "10.10.10.11"
name1 = "foo"
name2 = "bar"
entries = self.driver.get_entries_by_address(address1, domain1)
self.assertFalse(entries)
self.driver.create_entry(name1, address1, "A", domain1)
self.driver.create_entry(name2, address1, "A", domain1)
entries = self.driver.get_entries_by_address(address1, domain1)
self.assertEquals(len(entries), 2)
self.assertEquals(entries[0], name1)
self.assertEquals(entries[1], name2)
entries = self.driver.get_entries_by_name(name1, domain1)
self.assertEquals(len(entries), 1)
self.assertEquals(entries[0], address1)
def test_ldap_dns_delete(self):
address1 = "10.10.10.11"
name1 = "foo"
name2 = "bar"
self.driver.create_entry(name1, address1, "A", domain1)
self.driver.create_entry(name2, address1, "A", domain1)
entries = self.driver.get_entries_by_address(address1, domain1)
self.assertEquals(len(entries), 2)
self.driver.delete_entry(name1, domain1)
entries = self.driver.get_entries_by_address(address1, domain1)
LOG.debug("entries: %s" % entries)
self.assertEquals(len(entries), 1)
self.assertEquals(entries[0], name2)
self.assertRaises(exception.NotFound,
self.driver.delete_entry,
name1, domain1)
|
sileht/deb-openstack-nova
|
nova/tests/network/test_manager.py
|
Python
|
apache-2.0
| 72,549
|
[
"FEFF"
] |
fc7adf3351b5726d79fad4f165024a3b8c4f76388539b95f1bb4aad60616e970
|
__author__ = "Amaral LAN"
__copyright__ = "Copyright 2017-2018, Amaral LAN"
__credits__ = ["Amaral LAN"]
__license__ = "GPL"
__version__ = "1.1"
__maintainer__ = "Amaral LAN"
__email__ = "amaral@northwestern.edu"
__status__ = "Production"
from bs4 import BeautifulSoup
from splinter import Browser
import pymongo
from my_settings import PUBLICATION_TYPES, FLAGS, URLS
from my_mongo_db_login import DB_LOGIN_INFO
if __name__ == "__main__":
connection = pymongo.MongoClient(DB_LOGIN_INFO['credentials'], DB_LOGIN_INFO['port'])
db = connection['amaral_cv_data']
print('Opened connection')
# Create session for retrieval of data
#
cross_ref_url = URLS['doi']
with Browser('chrome') as browser:
for pub_type in PUBLICATION_TYPES:
collection = db['publications' + '_' + pub_type.lower()]
print('\n\n', pub_type.upper())
paper_ids = []
for paper in collection.find():
paper_ids.append(paper['_id'])
print('There are {} papers in this group'.format(len(paper_ids)))
for paper_id in paper_ids:
update = False
if FLAGS['update_DOIs']:
update = True
else:
paper = collection.find_one({'_id': paper_id})
if 'doi' not in paper.keys():
update = True
if update:
title = paper['Title'].lower()
first_author = paper['Authors'].split()[0].replace("\`", '').replace("\'", '').replace("\:", '').lower()
paper_code = paper['Year'] + ' ' + first_author + ' ' + title
print(paper_code)
if 'doi' not in paper.keys():
new_title = title
new_author = first_author
success_flag = False
flag = 0
while True:
browser.visit(cross_ref_url)
browser.fill('auth', new_author)
browser.fill('title', paper['Journal'])
browser.fill('atitle', new_title)
if not paper['Volume']:
browser.fill('volume', paper['Volume'])
browser.fill('year', paper['Year'])
browser.find_by_name('view_records').click()
soup = BeautifulSoup(browser.html, 'html.parser')
item = soup.find('table', {'width': 600})
for child in item.findAll('tr'):
tmp = str( child.text )
if 'http' in tmp:
print(tmp)
collection.update_one( {'_id': paper_id}, {'$set': {'doi': tmp.strip()}} )
success_flag = True
if flag == 'title':
collection.update_one({'_id': paper_id}, {'$set': {'Title_doi': new_title}})
elif flag == 'author':
collection.update_one({'_id': paper_id}, {'$set': {'Author_doi': new_author}})
break
if success_flag:
break
else:
print(paper_code)
print('Not successful in getting doi')
a = input('Try with different title? [Y/n] ')
if a == 'n' or a == 'N':
a = input('Try with different author? [Y/n] ')
if a == 'n' or a == 'N':
a = input('Enter doi? [Y/n] ')
if a == 'n' or a == 'N':
break
else:
doi_input = input('Enter doi ')
collection.update_one({'_id': paper_id}, {'$set': {'doi': doi_input}})
success_flag = True
break
else:
new_author = input('Enter new name ')
flag = 'author'
else:
new_title = input('Enter new title ')
flag = 'title'
|
lamaral1968/maintaining_latex_cv
|
scrape_doi.py
|
Python
|
mit
| 4,816
|
[
"VisIt"
] |
0986f9917465f93195cbd63afa7298ae20995bf9f0cef509d8f6cedca9706fc9
|
# -*- coding: utf-8 -*-
# Vahid Moosavi 2014 10 23 9:04 pm
#sevamoo@gmail.com
#Chair For Computer Aided Architectural Design, ETH Zurich
# Future Cities Lab
#www.vahidmoosavi.com
import numpy as np
from matplotlib import pyplot as plt
import matplotlib.gridspec as gridspec
import numexpr as ne
from time import time
import scipy.spatial as spdist
import tables as tb
import timeit
import sys
from sklearn.externals.joblib import Parallel, delayed
from sklearn.externals.joblib import load, dump
import tempfile
import shutil
import os
import itertools
from scipy.sparse import csr_matrix
from sklearn.decomposition import RandomizedPCA
from sklearn.decomposition import PCA
from sklearn import neighbors
from matplotlib.colors import LogNorm
from matplotlib import cm
import matplotlib
import pandas as pd
class SOM(object):
def __init__(self, name, Data, mapsize=None, norm_method='var', initmethod='pca'):
"""
name and data
"""
self.name = name
self.data_raw = Data
if norm_method == 'var':
Data = normalize(Data, method=norm_method)
self.data = Data
else:
self.data = Data
self.dim = Data.shape[1]
self.dlen = Data.shape[0]
self.set_topology(mapsize=mapsize)
self.set_algorithm(initmethod=initmethod)
self.calc_map_dist()
#Slow for large data sets
#self.set_data_labels()
#set SOM topology
def set_topology(self, mapsize = None, mapshape = 'planar', lattice = 'rect', mask = None, compname = None):
"""
all_mapshapes = ['planar','toroid','cylinder']
all_lattices = ['hexa','rect']
"""
self.mapshape = mapshape
self.lattice = lattice
#to set mask
if mask == None:
self.mask = np.ones([1,self.dim])
else:
self.mask = mask
#to set map size
if mapsize == None:
tmp = int(round(np.sqrt(self.dlen)))
self.nnodes = tmp
self.mapsize = [int(3./5*self.nnodes), int(2./5*self.nnodes)]
else:
if len(mapsize)==2:
if np.min(mapsize) == 1:
self.mapsize = [1, np.max(mapsize)]
else:
self.mapsize = mapsize
elif len(mapsize) == 1:
#s = int (mapsize[0]/2)
self.mapsize = [1 ,mapsize[0]]
print 'input was considered as node numbers'
print 'map size is [{0},{1}]'.format(s,s)
self.nnodes = self.mapsize[0]*self.mapsize[1]
# to set component names
if compname == None:
try:
cc = list()
for i in range(0,self.dim):
cc.append ('Variable-'+ str(i+1))
self.compname = np.asarray(cc)[np.newaxis,:]
except:
pass
print 'no data yet: plesae first set trainign data to the SOM'
else:
try:
dim = getattr(self,'dim')
if len(compname) == dim:
self.compname = np.asarray(compname)[np.newaxis,:]
else:
print 'compname should have the same size'
except:
pass
print 'no data yet: plesae first set trainign data to the SOM'
#Set labels of the training data
# it should be in the format of a list of strings
def set_data_labels(self, dlabel = None):
if dlabel == None:
try:
dlen = (getattr(self,'dlen'))
cc = list()
for i in range(0,dlen):
cc.append ('dlabel-'+ str(i))
self.dlabel = np.asarray(cc)[:, np.newaxis]
except:
pass
print 'no data yet: plesae first set trainign data to the SOM'
else:
try:
dlen = (getattr(self,'dlen'))
if dlabel.shape == (1,dlen):
self.dlabel = dlabel.T#[:,np.newaxis]
elif dlabel.shape == (dlen,1):
self.dlabel = dlabel
elif dlabel.shape == (dlen,):
self.dlabel = dlabel[:, np.newaxis]
else:
print 'wrong lable format'
except:
pass
print 'no data yet: plesae first set trainign data to the SOM'
#calculating the grid distance, which will be called during the training steps
#currently just works for planar grids
def calc_map_dist(self):
cd = getattr(self, 'nnodes')
UD2 = np.zeros((cd, cd))
for i in range(cd):
UD2[i,:] = grid_dist(self, i).reshape(1,cd)
self.UD2 = UD2
def set_algorithm(self, initmethod = 'pca', algtype = 'batch', neighborhoodmethod = 'gaussian', alfatype = 'inv', alfaini = .5, alfafinal = .005):
"""
initmethod = ['random', 'pca']
algos = ['seq','batch']
all_neigh = ['gaussian','manhatan','bubble','cut_gaussian','epanechicov' ]
alfa_types = ['linear','inv','power']
"""
self.initmethod = initmethod
self.algtype = algtype
self.alfaini = alfaini
self.alfafinal = alfafinal
self.neigh = neighborhoodmethod
###################################
#visualize map
def view_map(self, what = 'codebook', which_dim = 'all', pack= 'Yes', text_size = 2.8,save='No', save_dir = 'empty',grid='No',text='Yes'):
mapsize = getattr(self, 'mapsize')
if np.min(mapsize) >1:
if pack == 'No':
view_2d(self, text_size, which_dim = which_dim, what = what)
else:
# print 'hi'
view_2d_Pack(self, text_size, which_dim = which_dim,what = what,save = save, save_dir = save_dir, grid=grid,text=text)
elif np.min(mapsize) == 1:
view_1d(self, text_size, which_dim = which_dim, what = what)
################################################################################
# Initialize map codebook: Weight vectors of SOM
def init_map(self):
dim = 0
n_nod = 0
if getattr(self, 'initmethod')=='random':
#It produces random values in the range of min- max of each dimension based on a uniform distribution
mn = np.tile(np.min(getattr(self,'data'), axis =0), (getattr(self, 'nnodes'),1))
mx = np.tile(np.max(getattr(self,'data'), axis =0), (getattr(self, 'nnodes'),1))
setattr(self, 'codebook', mn + (mx-mn)*(np.random.rand(getattr(self, 'nnodes'), getattr(self, 'dim'))))
elif getattr(self, 'initmethod') == 'pca':
codebooktmp = lininit(self) #it is based on two largest eigenvalues of correlation matrix
setattr(self, 'codebook', codebooktmp)
else:
print 'please select a corect initialization method'
print 'set a correct one in SOM. current SOM.initmethod: ', getattr(self, 'initmethod')
print "possible init methods:'random', 'pca'"
#Main loop of training
def train(self, trainlen = None, n_job = 1, shared_memory = 'no',verbose='on'):
t0 = time()
data = getattr(self, 'data')
nnodes = getattr(self, 'nnodes')
dlen = getattr(self, 'dlen')
dim = getattr(self, 'dim')
mapsize = getattr(self, 'mapsize')
mem = np.log10(dlen*nnodes*dim)
print 'data len is %d and data dimension is %d' % (dlen, dim)
print 'map size is %d, %d' %(mapsize[0], mapsize[1])
print 'array size in log10 scale' , mem
print 'number of jobs in parallel: ', n_job
#######################################
#initialization
if verbose=='on':
print
print 'initialization method = %s, initializing..' %getattr(self, 'initmethod')
print
t0 = time()
self.init_map()
if verbose=='on':
print 'initialization done in %f seconds' % round(time()-t0 , 3 )
########################################
#rough training
if verbose=='on':
print
batchtrain(self, njob = n_job, phase = 'rough', shared_memory = 'no',verbose=verbose)
if verbose=='on':
print
#######################################
#Finetuning
if verbose=='on':
print
batchtrain(self, njob = n_job, phase = 'finetune', shared_memory = 'no',verbose=verbose)
err = np.mean(getattr(self, 'bmu')[1])
if verbose=='on':
# or verbose == 'off':
# print
ts = round(time() - t0, 3)
print
print "Total time elapsed: %f secodns" %ts
print "final quantization error: %f" %err
#to project a data set to a trained SOM and find the index of bmu
#It is based on nearest neighborhood search module of scikitlearn, but it is not that fast.
def project_data(self, data):
codebook = getattr(self, 'codebook')
data_raw = getattr(self,'data_raw')
clf = neighbors.KNeighborsClassifier(n_neighbors = 1)
labels = np.arange(0,codebook.shape[0])
clf.fit(codebook, labels)
# the codebook values are all normalized
#we can normalize the input data based on mean and std of original data
data = normalize_by(data_raw, data, method='var')
#data = normalize(data, method='var')
#plt.hist(data[:,2])
Predicted_labels = clf.predict(data)
return Predicted_labels
def predict_by(self, data, Target, K =5, wt= 'distance'):
"""
‘uniform’
"""
# here it is assumed that Target is the last column in the codebook
#and data has dim-1 columns
codebook = getattr(self, 'codebook')
data_raw = getattr(self,'data_raw')
dim = codebook.shape[1]
ind = np.arange(0,dim)
indX = ind[ind != Target]
X = codebook[:,indX]
Y = codebook[:,Target]
n_neighbors = K
clf = neighbors.KNeighborsRegressor(n_neighbors, weights = wt)
clf.fit(X, Y)
# the codebook values are all normalized
#we can normalize the input data based on mean and std of original data
dimdata = data.shape[1]
if dimdata == dim:
data[:,Target] == 0
data = normalize_by(data_raw, data, method='var')
data = data[:,indX]
elif dimdata == dim -1:
data = normalize_by(data_raw[:,indX], data, method='var')
#data = normalize(data, method='var')
Predicted_values = clf.predict(data)
Predicted_values = denormalize_by(data_raw[:,Target], Predicted_values)
return Predicted_values
def predict(self, X_test, K =5, wt= 'distance'):
"""
‘uniform’
"""
#Similar to SKlearn we assume that we have X_tr, Y_tr and X_test
# here it is assumed that Target is the last column in the codebook
#and data has dim-1 columns
codebook = getattr(self, 'codebook')
data_raw = getattr(self,'data_raw')
dim = codebook.shape[1]
Target = data_raw.shape[1]-1
X_train = codebook[:,:Target]
Y_train= codebook[:,Target]
n_neighbors = K
clf = neighbors.KNeighborsRegressor(n_neighbors, weights = wt)
clf.fit(X_train, Y_train)
# the codebook values are all normalized
#we can normalize the input data based on mean and std of original data
X_test = normalize_by(data_raw[:,:Target], X_test, method='var')
Predicted_values = clf.predict(X_test)
Predicted_values = denormalize_by(data_raw[:,Target], Predicted_values)
return Predicted_values
def find_K_nodes(self, data, K =5):
from sklearn.neighbors import NearestNeighbors
# we find the k most similar nodes to the input vector
codebook = getattr(self, 'codebook')
neigh = NearestNeighbors(n_neighbors = K)
neigh.fit(codebook)
data_raw = getattr(self,'data_raw')
# the codebook values are all normalized
#we can normalize the input data based on mean and std of original data
data = normalize_by(data_raw, data, method='var')
return neigh.kneighbors(data)
def ind_to_xy(self, bm_ind):
msize = getattr(self, 'mapsize')
rows = msize[0]
cols = msize[1]
#bmu should be an integer between 0 to no_nodes
out = np.zeros((bm_ind.shape[0],3))
out[:,2] = bm_ind
out[:,0] = rows-1-bm_ind/cols
out[:,0] = bm_ind/cols
out[:,1] = bm_ind%cols
return out.astype(int)
def cluster(self,method='Kmeans',n_clusters=8):
import sklearn.cluster as clust
km= clust.KMeans(n_clusters=8)
labels = km.fit_predict(denormalize_by(self.data_raw, self.codebook, n_method = 'var'))
setattr(self,'cluster_labels',labels)
return labels
def hit_map(self,data=None):
#First Step: show the hitmap of all the training data
# print 'None'
data_tr = getattr(self, 'data_raw')
proj = self.project_data(data_tr)
msz = getattr(self, 'mapsize')
coord = self.ind_to_xy(proj)
fig = plt.figure(figsize=(msz[1]/2,msz[0]/2))
ax = fig.add_subplot(111)
ax.xaxis.set_ticks([i for i in range(0,msz[1])])
ax.yaxis.set_ticks([i for i in range(0,msz[0])])
ax.xaxis.set_ticklabels([])
ax.yaxis.set_ticklabels([])
ax.grid(True,linestyle='-', linewidth=.5)
a = plt.hist2d(coord[:,1], coord[:,0], bins=(msz[1],msz[0]),alpha=.0,norm = LogNorm(),cmap=cm.jet)
# clbar = plt.colorbar()
x = np.arange(.5,msz[1]+.5,1)
y = np.arange(.5,msz[0]+.5,1)
X, Y = np.meshgrid(x, y)
area = a[0].T*12
plt.scatter(X, Y, s=area, alpha=0.2,c='b',marker='o',cmap='jet',linewidths=3, edgecolor = 'r')
plt.scatter(X, Y, s=area, alpha=0.9,c='None',marker='o',cmap='jet',linewidths=3, edgecolor = 'r')
plt.xlim(0,msz[1])
plt.ylim(0,msz[0])
if data != None:
proj = self.project_data(data)
msz = getattr(self, 'mapsize')
coord = self.ind_to_xy(proj)
a = plt.hist2d(coord[:,1], coord[:,0], bins=(msz[1],msz[0]),alpha=.0,norm = LogNorm(),cmap=cm.jet)
# clbar = plt.colorbar()
x = np.arange(.5,msz[1]+.5,1)
y = np.arange(.5,msz[0]+.5,1)
X, Y = np.meshgrid(x, y)
area = a[0].T*50
plt.scatter(X, Y, s=area, alpha=0.2,c='b',marker='o',cmap='jet',linewidths=3, edgecolor = 'r')
plt.scatter(X, Y, s=area, alpha=0.9,c='None',marker='o',cmap='jet',linewidths=3, edgecolor = 'r')
plt.xlim(0,msz[1])
plt.ylim(0,msz[0])
# plt.show()
def hit_map_cluster_number(self,data=None):
if hasattr(self, 'cluster_labels'):
codebook = getattr(self, 'cluster_labels')
else:
print 'clustering based on default parameters...'
codebook = self.cluster()
msz = getattr(self, 'mapsize')
fig = plt.figure(figsize=(msz[1]/2.5,msz[0]/2.5))
ax = fig.add_subplot(111)
# ax.xaxis.set_ticklabels([])
# ax.yaxis.set_ticklabels([])
# ax.grid(True,linestyle='-', linewidth=.5)
if data == None:
data_tr = getattr(self, 'data_raw')
proj = self.project_data(data_tr)
coord = self.ind_to_xy(proj)
cents = self.ind_to_xy(np.arange(0,msz[0]*msz[1]))
for i, txt in enumerate(codebook):
ax.annotate(txt, (cents[i,1],cents[i,0]),size=10, va="center")
if data != None:
proj = self.project_data(data)
coord = self.ind_to_xy(proj)
x = np.arange(.5,msz[1]+.5,1)
y = np.arange(.5,msz[0]+.5,1)
cents = self.ind_to_xy(proj)
# cents[:,1] = cents[:,1]+.2
print cents.shape
label = codebook[proj]
for i, txt in enumerate(label):
ax.annotate(txt, (cents[i,1],cents[i,0]),size=10, va="center")
plt.imshow(codebook.reshape(msz[0],msz[1])[::],alpha=.5)
# plt.pcolor(codebook.reshape(msz[0],msz[1])[::-1],alpha=.5,cmap='jet')
# plt.show()
def predict_Probability(self, data, Target, K =5):
# here it is assumed that Target is the last column in the codebook #and data has dim-1 columns
codebook = getattr(self, 'codebook')
data_raw = getattr(self,'data_raw')
dim = codebook.shape[1]
ind = np.arange(0,dim)
indX = ind[ind != Target]
X = codebook[:,indX]
Y = codebook[:,Target]
n_neighbors = K
clf = neighbors.KNeighborsRegressor(n_neighbors, weights = 'distance')
clf.fit(X, Y)
# the codebook values are all normalized
#we can normalize the input data based on mean and std of original data
dimdata = data.shape[1]
if dimdata == dim:
data[:,Target] == 0
data = normalize_by(data_raw, data, method='var')
data = data[:,indX]
elif dimdata == dim -1:
data = normalize_by(data_raw[:,indX], data, method='var')
#data = normalize(data, method='var')
weights,ind= clf.kneighbors(data, n_neighbors=K, return_distance=True)
weights = 1./weights
sum_ = np.sum(weights,axis=1)
weights = weights/sum_[:,np.newaxis]
labels = np.sign(codebook[ind,Target])
labels[labels>=0]=1
#for positives
pos_prob = labels.copy()
pos_prob[pos_prob<0]=0
pos_prob = pos_prob*weights
pos_prob = np.sum(pos_prob,axis=1)[:,np.newaxis]
#for negatives
neg_prob = labels.copy()
neg_prob[neg_prob>0]=0
neg_prob = neg_prob*weights*-1
neg_prob = np.sum(neg_prob,axis=1)[:,np.newaxis]
#Predicted_values = clf.predict(data)
#Predicted_values = denormalize_by(data_raw[:,Target], Predicted_values)
return np.concatenate((pos_prob,neg_prob),axis=1)
def node_Activation(self, data, wt= 'distance',Target = None):
"""
‘uniform’
"""
if Target == None:
codebook = getattr(self, 'codebook')
data_raw = getattr(self,'data_raw')
clf = neighbors.KNeighborsClassifier(n_neighbors = getattr(self, 'nnodes'))
labels = np.arange(0,codebook.shape[0])
clf.fit(codebook, labels)
# the codebook values are all normalized
#we can normalize the input data based on mean and std of original data
data = normalize_by(data_raw, data, method='var')
weights,ind= clf.kneighbors(data)
weights = 1./weights
##Softmax function
S_ = np.sum(np.exp(weights),axis=1)[:,np.newaxis]
weights = np.exp(weights)/S_
# sum_ = np.sum(weights,axis=1)
# weights = weights/sum_[:,np.newaxis]
return weights , ind
#
def para_bmu_find(self, x, y, njb = 1):
dlen = x.shape[0]
Y2 = None
Y2 = np.einsum('ij,ij->i', y, y)
bmu = None
b = None
#here it finds BMUs for chunk of data in parallel
t_temp = time()
b = Parallel(n_jobs=njb, pre_dispatch='3*n_jobs')(delayed(chunk_based_bmu_find)\
(self, x[i*dlen // njb:min((i+1)*dlen // njb, dlen)],y, Y2) \
for i in xrange(njb))
#print 'bmu finding: %f seconds ' %round(time() - t_temp, 3)
t1 = time()
bmu = np.asarray(list(itertools.chain(*b))).T
#print 'bmu to array: %f seconds' %round(time() - t1, 3)
del b
return bmu
#First finds the Voronoi set of each node. It needs to calculate a smaller matrix. Super fast comparing to classic batch training algorithm
# it is based on the implemented algorithm in som toolbox for Matlab by Helsinky university
def update_codebook_voronoi(self, training_data, bmu, H, radius):
#bmu has shape of 2,dlen, where first row has bmuinds
# we construct ud2 from precomputed UD2 : ud2 = UD2[bmu[0,:]]
nnodes = getattr(self, 'nnodes')
dlen = getattr(self ,'dlen')
dim = getattr(self, 'dim')
New_Codebook = np.empty((nnodes, dim))
inds = bmu[0].astype(int)
row = inds
col = np.arange(dlen)
val = np.tile(1,dlen)
P = csr_matrix( (val,(row,col)), shape=(nnodes,dlen) )
S = np.empty((nnodes, dim))
S = P.dot(training_data)
#assert( S.shape == (nnodes, dim))
#assert( H.shape == (nnodes, nnodes))
# H has nnodes*nnodes and S has nnodes*dim ---> Nominator has nnodes*dim
#print Nom
Nom = np.empty((nnodes,nnodes))
Nom = H.T.dot(S)
#assert( Nom.shape == (nnodes, dim))
nV = np.empty((1,nnodes))
nV = P.sum(axis = 1).reshape(1, nnodes)
#assert(nV.shape == (1, nnodes))
Denom = np.empty((nnodes,1))
Denom = nV.dot(H.T).reshape(nnodes, 1)
#assert( Denom.shape == (nnodes, 1))
New_Codebook = np.divide(Nom, Denom)
Nom = None
Denom = None
#assert (New_Codebook.shape == (nnodes,dim))
#setattr(som, 'codebook', New_Codebook)
return np.around(New_Codebook, decimals = 6)
# we will call this function in parallel for different number of jobs
def chunk_based_bmu_find(self, x, y, Y2):
dim = x.shape[1]
dlen = x.shape[0]
nnodes = y.shape[0]
bmu = np.empty((dlen,2))
#it seems that smal batches for large dlen is really faster:
# that is because of ddata in loops and n_jobs. for large data it slows down due to memory needs in parallel
blen = min(50,dlen)
i0 = 0;
d = None
t = time()
while i0+1<=dlen:
Low = (i0)
High = min(dlen,i0+blen)
i0 = i0+blen
ddata = x[Low:High+1]
d = np.dot(y, ddata.T)
d *= -2
d += Y2.reshape(nnodes,1)
bmu[Low:High+1,0] = np.argmin(d, axis = 0)
bmu[Low:High+1,1] = np.min(d, axis = 0)
del ddata
d = None
return bmu
#Batch training which is called for rought training as well as finetuning
def batchtrain(self, njob = 1, phase = None, shared_memory = 'no', verbose='on'):
t0 = time()
nnodes = getattr(self, 'nnodes')
dlen = getattr(self, 'dlen')
dim = getattr(self, 'dim')
mapsize = getattr(self, 'mapsize')
#############################################
# seting the parameters
initmethod = getattr(self,'initmethod')
mn = np.min(mapsize)
if mn == 1:
mpd = float(nnodes*10)/float(dlen)
else:
mpd = float(nnodes)/float(dlen)
ms = max(mapsize[0],mapsize[1])
if mn == 1:
ms = ms/5.
#Based on somtoolbox, Matlab
#case 'train', sTrain.trainlen = ceil(50*mpd);
#case 'rough', sTrain.trainlen = ceil(10*mpd);
#case 'finetune', sTrain.trainlen = ceil(40*mpd);
if phase == 'rough':
#training length
trainlen = int(np.ceil(10*mpd))
#radius for updating
if initmethod == 'random':
# trainlen = int(np.ceil(15*mpd))
radiusin = max(1, np.ceil(ms/2.))
radiusfin = max(1, radiusin/8.)
elif initmethod == 'pca':
radiusin = max(1, np.ceil(ms/8.))
radiusfin = max(1, radiusin/4.)
elif phase == 'finetune':
#train lening length
trainlen = int(np.ceil(40*mpd))
#radius for updating
if initmethod == 'random':
# trainlen = int(np.ceil(50*mpd))
radiusin = max(1, ms/8.) #from radius fin in rough training
radiusfin = max(1, radiusin/16.)
elif initmethod == 'pca':
radiusin = max(1, np.ceil(ms/8.)/4)
radiusfin = 1#max(1, ms/128)
radius = np.linspace(radiusin, radiusfin, trainlen)
##################################################
UD2 = getattr(self, 'UD2')
New_Codebook_V = np.empty((nnodes, dim))
New_Codebook_V = getattr(self, 'codebook')
#print 'data is in shared memory?', shared_memory
if shared_memory == 'yes':
data = getattr(self, 'data')
Data_folder = tempfile.mkdtemp()
data_name = os.path.join(Data_folder, 'data')
dump(data, data_name)
data = load(data_name, mmap_mode='r')
else:
data = getattr(self, 'data')
#X2 is part of euclidean distance (x-y)^2 = x^2 +y^2 - 2xy that we use for each data row in bmu finding.
#Since it is a fixed value we can skip it during bmu finding for each data point, but later we need it calculate quantification error
X2 = np.einsum('ij,ij->i', data, data)
if verbose=='on':
print '%s training...' %phase
print 'radius_ini: %f , radius_final: %f, trainlen: %d' %(radiusin, radiusfin, trainlen)
for i in range(trainlen):
#in case of Guassian neighborhood
H = np.exp(-1.0*UD2/(2.0*radius[i]**2)).reshape(nnodes, nnodes)
t1 = time()
bmu = None
bmu = self.para_bmu_find(data, New_Codebook_V, njb = njob)
if verbose=='on':
print
#updating the codebook
t2 = time()
New_Codebook_V = self.update_codebook_voronoi(data, bmu, H, radius)
#print 'updating nodes: ', round (time()- t2, 3)
if verbose=='on':
print "epoch: %d ---> elapsed time: %f, quantization error: %f " %(i+1, round(time() - t1, 3),np.mean(np.sqrt(bmu[1] + X2)))
setattr(self, 'codebook', New_Codebook_V)
bmu[1] = np.sqrt(bmu[1] + X2)
setattr(self, 'bmu', bmu)
def grid_dist(self,bmu_ind):
"""
som and bmu_ind
depending on the lattice "hexa" or "rect" we have different grid distance
functions.
bmu_ind is a number between 0 and number of nodes-1. depending on the map size
bmu_coord will be calculated and then distance matrix in the map will be returned
"""
try:
lattice = getattr(self, 'lattice')
except:
lattice = 'hexa'
print 'lattice not found! Lattice as hexa was set'
if lattice == 'rect':
return rect_dist(self,bmu_ind)
elif lattice == 'hexa':
try:
msize = getattr(self, 'mapsize')
rows = msize[0]
cols = msize[1]
except:
rows = 0.
cols = 0.
pass
#needs to be implemented
print 'to be implemented' , rows , cols
return np.zeros((rows,cols))
def rect_dist(self,bmu):
#the way we consider the list of nodes in a planar grid is that node0 is on top left corner,
#nodemapsz[1]-1 is top right corner and then it goes to the second row.
#no. of rows is map_size[0] and no. of cols is map_size[1]
try:
msize = getattr(self, 'mapsize')
rows = msize[0]
cols = msize[1]
except:
pass
#bmu should be an integer between 0 to no_nodes
if 0<=bmu<=(rows*cols):
c_bmu = int(bmu%cols)
r_bmu = int(bmu/cols)
else:
print 'wrong bmu'
#calculating the grid distance
if np.logical_and(rows>0 , cols>0):
r,c = np.arange(0, rows, 1)[:,np.newaxis] , np.arange(0,cols, 1)
dist2 = (r-r_bmu)**2 + (c-c_bmu)**2
return dist2.ravel()
else:
print 'please consider the above mentioned errors'
return np.zeros((rows,cols)).ravel()
def view_2d(self, text_size,which_dim='all', what = 'codebook'):
msz0, msz1 = getattr(self, 'mapsize')
if what == 'codebook':
if hasattr(self, 'codebook'):
codebook = getattr(self, 'codebook')
data_raw = getattr(self,'data_raw')
codebook = denormalize_by(data_raw, codebook)
else:
print 'first initialize codebook'
if which_dim == 'all':
dim = getattr(self, 'dim')
indtoshow = np.arange(0,dim).T
ratio = float(dim)/float(dim)
ratio = np.max((.35,ratio))
sH, sV = 16,16*ratio*1
plt.figure(figsize=(sH,sV))
elif type(which_dim) == int:
dim = 1
indtoshow = np.zeros(1)
indtoshow[0] = int(which_dim)
sH, sV = 6,6
plt.figure(figsize=(sH,sV))
elif type(which_dim) == list:
max_dim = codebook.shape[1]
dim = len(which_dim)
ratio = float(dim)/float(max_dim)
#print max_dim, dim, ratio
ratio = np.max((.35,ratio))
indtoshow = np.asarray(which_dim).T
sH, sV = 16,16*ratio*1
plt.figure(figsize=(sH,sV))
no_row_in_plot = dim/6 + 1 #6 is arbitrarily selected
if no_row_in_plot <=1:
no_col_in_plot = dim
else:
no_col_in_plot = 6
axisNum = 0
compname = getattr(self, 'compname')
norm = matplotlib.colors.normalize(vmin = np.mean(codebook.flatten())-1*np.std(codebook.flatten()), vmax = np.mean(codebook.flatten())+1*np.std(codebook.flatten()), clip = True)
while axisNum <dim:
axisNum += 1
ax = plt.subplot(no_row_in_plot, no_col_in_plot, axisNum)
ind = int(indtoshow[axisNum-1])
mp = codebook[:,ind].reshape(msz0, msz1)
pl = plt.pcolor(mp[::-1],norm = norm)
# pl = plt.imshow(mp[::-1])
plt.title(compname[0][ind])
font = {'size' : text_size*sH/no_col_in_plot}
plt.rc('font', **font)
plt.axis('off')
plt.axis([0, msz0, 0, msz1])
ax.set_yticklabels([])
ax.set_xticklabels([])
plt.colorbar(pl)
plt.show()
def view_2d_Pack(self, text_size,which_dim='all', what = 'codebook',save='No', grid='Yes', save_dir = 'empty',text='Yes'):
msz0, msz1 = getattr(self, 'mapsize')
if what == 'codebook':
if hasattr(self, 'codebook'):
codebook = getattr(self, 'codebook')
data_raw = getattr(self,'data_raw')
codebook = denormalize_by(data_raw, codebook)
else:
print 'first initialize codebook'
if which_dim == 'all':
dim = getattr(self, 'dim')
indtoshow = np.arange(0,dim).T
ratio = float(dim)/float(dim)
ratio = np.max((.35,ratio))
sH, sV = 16,16*ratio*1
# plt.figure(figsize=(sH,sV))
elif type(which_dim) == int:
dim = 1
indtoshow = np.zeros(1)
indtoshow[0] = int(which_dim)
sH, sV = 6,6
# plt.figure(figsize=(sH,sV))
elif type(which_dim) == list:
max_dim = codebook.shape[1]
dim = len(which_dim)
ratio = float(dim)/float(max_dim)
#print max_dim, dim, ratio
ratio = np.max((.35,ratio))
indtoshow = np.asarray(which_dim).T
sH, sV = 16,16*ratio*1
# plt.figure(figsize=(sH,sV))
# plt.figure(figsize=(7,7))
no_row_in_plot = dim/20 + 1 #6 is arbitrarily selected
if no_row_in_plot <=1:
no_col_in_plot = dim
else:
no_col_in_plot = 20
axisNum = 0
compname = getattr(self, 'compname')
h = .2
w= .001
fig = plt.figure(figsize=(no_col_in_plot*1.5*(1+w),no_row_in_plot*1.5*(1+h)))
# print no_row_in_plot, no_col_in_plot
norm = matplotlib.colors.Normalize(vmin = np.median(codebook.flatten())-1.5*np.std(codebook.flatten()), vmax = np.median(codebook.flatten())+1.5*np.std(codebook.flatten()), clip = False)
DD = pd.Series(data = codebook.flatten()).describe(percentiles=[.03,.05,.1,.25,.3,.4,.5,.6,.7,.8,.9,.95,.97])
norm = matplotlib.colors.Normalize(vmin = DD.ix['3%'], vmax = DD.ix['97%'], clip = False)
while axisNum <dim:
axisNum += 1
ax = fig.add_subplot(no_row_in_plot, no_col_in_plot, axisNum)
ind = int(indtoshow[axisNum-1])
mp = codebook[:,ind].reshape(msz0, msz1)
if grid=='Yes':
pl = plt.pcolor(mp[::-1])
elif grid=='No':
plt.imshow(mp[::-1],norm = None)
# plt.pcolor(mp[::-1])
plt.axis('off')
if text=='Yes':
plt.title(compname[0][ind])
font = {'size' : text_size}
plt.rc('font', **font)
plt.axis([0, msz0, 0, msz1])
ax.set_yticklabels([])
ax.set_xticklabels([])
ax.xaxis.set_ticks([i for i in range(0,msz1)])
ax.yaxis.set_ticks([i for i in range(0,msz0)])
ax.xaxis.set_ticklabels([])
ax.yaxis.set_ticklabels([])
ax.grid(True,linestyle='-', linewidth=0.5,color='k')
# plt.grid()
# plt.colorbar(pl)
# plt.tight_layout()
plt.subplots_adjust(hspace = h,wspace=w)
if what == 'cluster':
if hasattr(self, 'cluster_labels'):
codebook = getattr(self, 'cluster_labels')
else:
print 'clustering based on default parameters...'
codebook = self.cluster()
h = .2
w= .001
fig = plt.figure(figsize=(msz0/2,msz1/2))
ax = fig.add_subplot(1, 1, 1)
mp = codebook[:].reshape(msz0, msz1)
if grid=='Yes':
pl = plt.pcolor(mp[::-1])
elif grid=='No':
plt.imshow(mp[::-1])
# plt.pcolor(mp[::-1])
plt.axis('off')
if text=='Yes':
plt.title('clusters')
font = {'size' : text_size}
plt.rc('font', **font)
plt.axis([0, msz0, 0, msz1])
ax.set_yticklabels([])
ax.set_xticklabels([])
ax.xaxis.set_ticks([i for i in range(0,msz1)])
ax.yaxis.set_ticks([i for i in range(0,msz0)])
ax.xaxis.set_ticklabels([])
ax.yaxis.set_ticklabels([])
ax.grid(True,linestyle='-', linewidth=0.5,color='k')
plt.subplots_adjust(hspace = h,wspace=w)
if save == 'Yes':
if save_dir != 'empty':
# print save_dir
fig.savefig(save_dir,bbox_inches='tight', transparent=False, dpi=200)
else:
# print save_dir
add = '/Users/itadmin/Desktop/SOM.png'
fig.savefig(add,bbox_inches='tight', transparent=False, dpi=200)
plt.close(fig)
def view_1d(self, text_size, which_dim ='all', what = 'codebook'):
msz0, msz1 = getattr(self, 'mapsize')
if what == 'codebook':
if hasattr(self, 'codebook'):
codebook = getattr(self, 'codebook')
data_raw = getattr(self,'data_raw')
codebook = denormalize_by(data_raw, codebook)
else:
print 'first initialize codebook'
if which_dim == 'all':
dim = getattr(self, 'dim')
indtoshow = np.arange(0,dim).T
ratio = float(dim)/float(dim)
ratio = np.max((.35,ratio))
sH, sV = 16,16*ratio*1
plt.figure(figsize=(sH,sV))
elif type(which_dim) == int:
dim = 1
indtoshow = np.zeros(1)
indtoshow[0] = int(which_dim)
sH, sV = 6,6
plt.figure(figsize=(sH,sV))
elif type(which_dim) == list:
max_dim = codebook.shape[1]
dim = len(which_dim)
ratio = float(dim)/float(max_dim)
#print max_dim, dim, ratio
ratio = np.max((.35,ratio))
indtoshow = np.asarray(which_dim).T
sH, sV = 16,16*ratio*1
plt.figure(figsize=(sH,sV))
no_row_in_plot = dim/6 + 1 #6 is arbitrarily selected
if no_row_in_plot <=1:
no_col_in_plot = dim
else:
no_col_in_plot = 6
axisNum = 0
compname = getattr(self, 'compname')
while axisNum < dim:
axisNum += 1
ax = plt.subplot(no_row_in_plot, no_col_in_plot, axisNum)
ind = int(indtoshow[axisNum-1])
mp = codebook[:,ind]
plt.plot(mp,'-k',linewidth = 0.8)
#pl = plt.pcolor(mp[::-1])
plt.title(compname[0][ind])
font = {'size' : text_size*sH/no_col_in_plot}
plt.rc('font', **font)
#plt.axis('off')
#plt.axis([0, msz0, 0, msz1])
#ax.set_yticklabels([])
#ax.set_xticklabels([])
#plt.colorbar(pl)
plt.show()
def lininit(self):
#X = UsigmaWT
#XTX = Wsigma^2WT
#T = XW = Usigma #Transformed by W EigenVector, can be calculated by
#multiplication PC matrix by eigenval too
#Furthe, we can get lower ranks by using just few of the eigen vevtors
#T(2) = U(2)sigma(2) = XW(2) ---> 2 is the number of selected eigenvectors
# This is how we initialize the map, just by using the first two first eigen vals and eigenvectors
# Further, we create a linear combination of them in the new map by giving values from -1 to 1 in each
#Direction of SOM map
# it shoud be noted that here, X is the covariance matrix of original data
msize = getattr(self, 'mapsize')
rows = msize[0]
cols = msize[1]
nnodes = getattr(self, 'nnodes')
if np.min(msize)>1:
coord = np.zeros((nnodes, 2))
for i in range(0,nnodes):
coord[i,0] = int(i/cols) #x
coord[i,1] = int(i%cols) #y
mx = np.max(coord, axis = 0)
mn = np.min(coord, axis = 0)
coord = (coord - mn)/(mx-mn)
coord = (coord - .5)*2
data = getattr(self, 'data')
me = np.mean(data, 0)
data = (data - me)
codebook = np.tile(me, (nnodes,1))
pca = RandomizedPCA(n_components=2) #Randomized PCA is scalable
#pca = PCA(n_components=2)
pca.fit(data)
eigvec = pca.components_
eigval = pca.explained_variance_
norms = np.sqrt(np.einsum('ij,ij->i', eigvec, eigvec))
eigvec = ((eigvec.T/norms)*eigval).T; eigvec.shape
for j in range(nnodes):
for i in range(eigvec.shape[0]):
codebook[j,:] = codebook[j, :] + coord[j,i]*eigvec[i,:]
return np.around(codebook, decimals = 6)
elif np.min(msize) == 1:
coord = np.zeros((nnodes, 1))
for i in range(0,nnodes):
#coord[i,0] = int(i/cols) #x
coord[i,0] = int(i%cols) #y
mx = np.max(coord, axis = 0)
mn = np.min(coord, axis = 0)
#print coord
coord = (coord - mn)/(mx-mn)
coord = (coord - .5)*2
#print coord
data = getattr(self, 'data')
me = np.mean(data, 0)
data = (data - me)
codebook = np.tile(me, (nnodes,1))
pca = RandomizedPCA(n_components=1) #Randomized PCA is scalable
#pca = PCA(n_components=2)
pca.fit(data)
eigvec = pca.components_
eigval = pca.explained_variance_
norms = np.sqrt(np.einsum('ij,ij->i', eigvec, eigvec))
eigvec = ((eigvec.T/norms)*eigval).T; eigvec.shape
for j in range(nnodes):
for i in range(eigvec.shape[0]):
codebook[j,:] = codebook[j, :] + coord[j,i]*eigvec[i,:]
return np.around(codebook, decimals = 6)
def normalize(data, method='var'):
#methods = ['var','range','log','logistic','histD','histC']
#status = ['done', 'undone']
me = np.mean(data, axis = 0)
st = np.std(data, axis = 0)
if method == 'var':
me = np.mean(data, axis = 0)
st = np.std(data, axis = 0)
n_data = (data-me)/st
return n_data
def normalize_by(data_raw, data, method='var'):
#methods = ['var','range','log','logistic','histD','histC']
#status = ['done', 'undone']
# to have the mean and std of the original data, by which SOM is trained
me = np.mean(data_raw, axis = 0)
st = np.std(data_raw, axis = 0)
if method == 'var':
n_data = (data-me)/st
return n_data
def denormalize_by(data_by, n_vect, n_method = 'var'):
#based on the normalization
if n_method == 'var':
me = np.mean(data_by, axis = 0)
st = np.std(data_by, axis = 0)
vect = n_vect* st + me
return vect
else:
print 'data is not normalized before'
return n_vect
##Function to show hits
#som_labels = sm.project_data(Tr_Data)
#S = pd.DataFrame(data=som_labels,columns= ['label'])
#a = S['label'].value_counts()
#a = a.sort_index()
#a = pd.DataFrame(data=a.values, index=a.index,columns=['label'])
#d = pd.DataFrame(data= range(msz0*msz1),columns=['node_ID'])
#c = d.join(a,how='outer')
#c.fillna(value=0,inplace=True)
#hits = c.values[:,1]
#hits = hits
#nodeID = np.arange(msz0*msz1)
#c_bmu = nodeID%msz1
#r_bmu = msz0 - nodeID/msz1
#fig, ax = plt.subplots()
#plt.axis([0, msz0, 0, msz1])
#ax.scatter(r_bmu, c_bmu, s=hits/2)
|
sniemi/EuclidVisibleInstrument
|
sandbox/SOMPY.py
|
Python
|
bsd-2-clause
| 41,421
|
[
"Gaussian"
] |
d6c7c3723c9c63a8260d005a760ae540266a10ae782cf86171d0de0acd978d9f
|
# from the Python Standard Library
import os, re, socket, sys, subprocess
# from Twisted
from twisted.internet import defer, threads, reactor
from twisted.internet.protocol import DatagramProtocol
from twisted.python.procutils import which
from twisted.python import log
try:
import resource
def increase_rlimits():
# We'd like to raise our soft resource.RLIMIT_NOFILE, since certain
# systems (OS-X, probably solaris) start with a relatively low limit
# (256), and some unit tests want to open up more sockets than this.
# Most linux systems start with both hard and soft limits at 1024,
# which is plenty.
# unfortunately the values to pass to setrlimit() vary widely from
# one system to another. OS-X reports (256, HUGE), but the real hard
# limit is 10240, and accepts (-1,-1) to mean raise it to the
# maximum. Cygwin reports (256, -1), then ignores a request of
# (-1,-1): instead you have to guess at the hard limit (it appears to
# be 3200), so using (3200,-1) seems to work. Linux reports a
# sensible (1024,1024), then rejects (-1,-1) as trying to raise the
# maximum limit, so you could set it to (1024,1024) but you might as
# well leave it alone.
try:
current = resource.getrlimit(resource.RLIMIT_NOFILE)
except AttributeError:
# we're probably missing RLIMIT_NOFILE
return
if current[0] >= 1024:
# good enough, leave it alone
return
try:
if current[1] > 0 and current[1] < 1000000:
# solaris reports (256, 65536)
resource.setrlimit(resource.RLIMIT_NOFILE,
(current[1], current[1]))
else:
# this one works on OS-X (bsd), and gives us 10240, but
# it doesn't work on linux (on which both the hard and
# soft limits are set to 1024 by default).
resource.setrlimit(resource.RLIMIT_NOFILE, (-1,-1))
new = resource.getrlimit(resource.RLIMIT_NOFILE)
if new[0] == current[0]:
# probably cygwin, which ignores -1. Use a real value.
resource.setrlimit(resource.RLIMIT_NOFILE, (3200,-1))
except ValueError:
log.msg("unable to set RLIMIT_NOFILE: current value %s"
% (resource.getrlimit(resource.RLIMIT_NOFILE),))
except:
# who knows what. It isn't very important, so log it and continue
log.err()
except ImportError:
def _increase_rlimits():
# TODO: implement this for Windows. Although I suspect the
# solution might be "be running under the iocp reactor and
# make this function be a no-op".
pass
# pyflakes complains about two 'def FOO' statements in the same time,
# since one might be shadowing the other. This hack appeases pyflakes.
increase_rlimits = _increase_rlimits
def get_local_addresses_async(target="198.41.0.4"): # A.ROOT-SERVERS.NET
"""
Return a Deferred that fires with a list of IPv4 addresses (as dotted-quad
strings) that are currently configured on this host, sorted in descending
order of how likely we think they are to work.
@param target: we want to learn an IP address they could try using to
connect to us; The default value is fine, but it might help if you
pass the address of a host that you are actually trying to be
reachable to.
"""
addresses = []
local_ip = get_local_ip_for(target)
if local_ip:
addresses.append(local_ip)
if sys.platform == "cygwin":
d = _cygwin_hack_find_addresses(target)
else:
d = _find_addresses_via_config()
def _collect(res):
for addr in res:
if addr != "0.0.0.0" and not addr in addresses:
addresses.append(addr)
return addresses
d.addCallback(_collect)
return d
def get_local_ip_for(target):
"""Find out what our IP address is for use by a given target.
@return: the IP address as a dotted-quad string which could be used by
to connect to us. It might work for them, it might not. If
there is no suitable address (perhaps we don't currently have an
externally-visible interface), this will return None.
"""
try:
target_ipaddr = socket.gethostbyname(target)
except socket.gaierror:
# DNS isn't running, or somehow we encountered an error
# note: if an interface is configured and up, but nothing is
# connected to it, gethostbyname("A.ROOT-SERVERS.NET") will take 20
# seconds to raise socket.gaierror . This is synchronous and occurs
# for each node being started, so users of
# test.common.SystemTestMixin (like test_system) will see something
# like 120s of delay, which may be enough to hit the default trial
# timeouts. For that reason, get_local_addresses_async() was changed
# to default to the numerical ip address for A.ROOT-SERVERS.NET, to
# avoid this DNS lookup. This also makes node startup fractionally
# faster.
return None
udpprot = DatagramProtocol()
port = reactor.listenUDP(0, udpprot)
try:
udpprot.transport.connect(target_ipaddr, 7)
localip = udpprot.transport.getHost().host
except socket.error:
# no route to that host
localip = None
port.stopListening() # note, this returns a Deferred
return localip
# k: result of sys.platform, v: which kind of IP configuration reader we use
_platform_map = {
"linux-i386": "linux", # redhat
"linux-ppc": "linux", # redhat
"linux2": "linux", # debian
"win32": "win32",
"irix6-n32": "irix",
"irix6-n64": "irix",
"irix6": "irix",
"openbsd2": "bsd",
"openbsd3": "bsd",
"openbsd4": "bsd",
"darwin": "bsd", # Mac OS X
"freebsd4": "bsd",
"freebsd5": "bsd",
"freebsd6": "bsd",
"freebsd7": "bsd",
"freebsd8": "bsd",
"freebsd9": "bsd",
"netbsd1": "bsd",
"netbsd2": "bsd",
"netbsd3": "bsd",
"netbsd4": "bsd",
"netbsd5": "bsd",
"netbsd6": "bsd",
"sunos5": "sunos",
"cygwin": "cygwin",
}
class UnsupportedPlatformError(Exception):
pass
# Wow, I'm really amazed at home much mileage we've gotten out of calling
# the external route.exe program on windows... It appears to work on all
# versions so far. Still, the real system calls would much be preferred...
# ... thus wrote Greg Smith in time immemorial...
_win32_path = 'route.exe'
_win32_args = ('print',)
_win32_re = re.compile('^\s*\d+\.\d+\.\d+\.\d+\s.+\s(?P<address>\d+\.\d+\.\d+\.\d+)\s+(?P<metric>\d+)\s*$', flags=re.M|re.I|re.S)
# These work in Redhat 6.x and Debian 2.2 potato
_linux_path = '/sbin/ifconfig'
_linux_re = re.compile('^\s*inet [a-zA-Z]*:?(?P<address>\d+\.\d+\.\d+\.\d+)\s.+$', flags=re.M|re.I|re.S)
# NetBSD 1.4 (submitted by Rhialto), Darwin, Mac OS X
_netbsd_path = '/sbin/ifconfig'
_netbsd_args = ('-a',)
_netbsd_re = re.compile('^\s+inet [a-zA-Z]*:?(?P<address>\d+\.\d+\.\d+\.\d+)\s.+$', flags=re.M|re.I|re.S)
# Irix 6.5
_irix_path = '/usr/etc/ifconfig'
# Solaris 2.x
_sunos_path = '/usr/sbin/ifconfig'
# k: platform string as provided in the value of _platform_map
# v: tuple of (path_to_tool, args, regex,)
_tool_map = {
"linux": (_linux_path, (), _linux_re,),
"win32": (_win32_path, _win32_args, _win32_re,),
"cygwin": (_win32_path, _win32_args, _win32_re,),
"bsd": (_netbsd_path, _netbsd_args, _netbsd_re,),
"irix": (_irix_path, _netbsd_args, _netbsd_re,),
"sunos": (_sunos_path, _netbsd_args, _netbsd_re,),
}
def _find_addresses_via_config():
return threads.deferToThread(_synchronously_find_addresses_via_config)
def _synchronously_find_addresses_via_config():
# originally by Greg Smith, hacked by Zooko to conform to Brian's API
platform = _platform_map.get(sys.platform)
if not platform:
raise UnsupportedPlatformError(sys.platform)
(pathtotool, args, regex,) = _tool_map[platform]
# If pathtotool is a fully qualified path then we just try that.
# If it is merely an executable name then we use Twisted's
# "which()" utility and try each executable in turn until one
# gives us something that resembles a dotted-quad IPv4 address.
if os.path.isabs(pathtotool):
return _query(pathtotool, args, regex)
else:
exes_to_try = which(pathtotool)
for exe in exes_to_try:
try:
addresses = _query(exe, args, regex)
except Exception:
addresses = []
if addresses:
return addresses
return []
def _query(path, args, regex):
env = {'LANG': 'en_US.UTF-8'}
p = subprocess.Popen([path] + list(args), stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env)
(output, err) = p.communicate()
addresses = []
outputsplit = output.split('\n')
for outline in outputsplit:
m = regex.match(outline)
if m:
addr = m.groupdict()['address']
if addr not in addresses:
addresses.append(addr)
return addresses
def _cygwin_hack_find_addresses(target):
addresses = []
for h in [target, "localhost", "127.0.0.1",]:
try:
addr = get_local_ip_for(h)
if addr not in addresses:
addresses.append(addr)
except socket.gaierror:
pass
return defer.succeed(addresses)
|
drewp/tahoe-lafs
|
src/allmydata/util/iputil.py
|
Python
|
gpl-2.0
| 9,668
|
[
"Brian"
] |
5a0de7e4228e40fbe5420b1006eb15a87b3211582cdc4ecaa4946ab45bdb316d
|
# Orca
#
# Copyright 2005-2009 Sun Microsystems Inc.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., Franklin Street, Fifth Floor,
# Boston MA 02110-1301 USA.
"""Utilities for obtaining speech utterances for objects."""
__id__ = "$Id:$"
__version__ = "$Revision:$"
__date__ = "$Date:$"
__copyright__ = "Copyright (c) 2005-2009 Sun Microsystems Inc."
__license__ = "LGPL"
import pyatspi
import urllib.parse, urllib.request, urllib.error, urllib.parse
from gi.repository import Atspi, Atk
from . import debug
from . import generator
from . import messages
from . import object_properties
from . import settings
from . import settings_manager
from . import sound
from . import text_attribute_names
class Pause:
"""A dummy class to indicate we want to insert a pause into an
utterance."""
def __init__(self):
pass
PAUSE = [Pause()]
class LineBreak:
"""A dummy class to indicate we want to break an utterance into
separate calls to speak."""
def __init__(self):
pass
LINE_BREAK = [LineBreak()]
# [[[WDW - general note -- for all the _generate* methods, it would be great if
# we could return an empty array if we can determine the method does not
# apply to the object. This would allow us to reduce the number of strings
# needed in formatting.py.]]]
# The prefix to use for the individual generator methods
#
METHOD_PREFIX = "_generate"
DEFAULT = "default"
UPPERCASE = "uppercase"
HYPERLINK = "hyperlink"
SYSTEM = "system"
STATE = "state" # Candidate for sound
VALUE = "value" # Candidate for sound
voiceType = {
DEFAULT: settings.DEFAULT_VOICE,
UPPERCASE: settings.UPPERCASE_VOICE,
HYPERLINK: settings.HYPERLINK_VOICE,
SYSTEM: settings.SYSTEM_VOICE,
STATE: settings.SYSTEM_VOICE, # Users may prefer DEFAULT_VOICE here
VALUE: settings.SYSTEM_VOICE, # Users may prefer DEFAULT_VOICE here
}
_settingsManager = settings_manager.getManager()
class SpeechGenerator(generator.Generator):
"""Takes accessible objects and produces a string to speak for
those objects. See the generateSpeech method, which is the primary
entry point. Subclasses can feel free to override/extend the
speechGenerators instance field as they see fit."""
# pylint: disable-msg=W0142
def __init__(self, script):
generator.Generator.__init__(self, script, "speech")
def _addGlobals(self, globalsDict):
"""Other things to make available from the formatting string.
"""
generator.Generator._addGlobals(self, globalsDict)
globalsDict['voice'] = self.voice
globalsDict['play'] = self.play
def play(self, key):
"""Returns an array containing a sound.Sound instance.
The key can a value to be used to look up a filename in the
settings.py:sounds dictionary (e.g., a pyatspi.ROLE_* value)
or just the name of an audio file to use.
"""
sounds = _settingsManager.getSetting('sounds')
try:
soundBite = sound.Sound(sounds[key])
except:
if isinstance(key, str):
soundBite = sound.Sound(key)
else:
soundBite = None
return [soundBite]
def generateSpeech(self, obj, **args):
return self.generate(obj, **args)
#####################################################################
# #
# Name, role, and label information #
# #
#####################################################################
def _generateName(self, obj, **args):
"""Returns an array of strings for use by speech and braille that
represent the name of the object. If the object is directly
displaying any text, that text will be treated as the name.
Otherwise, the accessible name of the object will be used. If
there is no accessible name, then the description of the
object will be used. This method will return an empty array
if nothing can be found. [[[WDW - I wonder if we should just
have _generateName, _generateDescription,
_generateDisplayedText, etc., that don't do any fallback.
Then, we can allow the formatting to do the fallback (e.g.,
'displayedText or name or description'). [[[JD to WDW - I
needed a _generateDescription for whereAmI. :-) See below.
"""
try:
role = args.get('role', obj.getRole())
except (LookupError, RuntimeError):
debug.println(debug.LEVEL_FINE, "Error getting role for: %s" % obj)
role = None
if role == pyatspi.ROLE_LAYERED_PANE:
if _settingsManager.getSetting('onlySpeakDisplayedText'):
return []
else:
acss = self.voice(SYSTEM)
else:
acss = self.voice(DEFAULT)
result = generator.Generator._generateName(self, obj, **args)
if result:
result.extend(acss)
return result
def _generateLabel(self, obj, **args):
"""Returns the label for an object as an array of strings for use by
speech and braille. The label is determined by the displayedLabel
method of the script utility, and an empty array will be returned if
no label can be found.
"""
acss = self.voice(DEFAULT)
result = generator.Generator._generateLabel(self, obj, **args)
if result:
result.extend(acss)
return result
def _generateLabelOrName(self, obj, **args):
"""Returns the label as an array of strings for speech and braille.
If the label cannot be found, the name will be used instead.
If the name cannot be found, an empty array will be returned.
"""
result = []
acss = self.voice(DEFAULT)
result.extend(self._generateLabel(obj, **args))
if not result:
if obj.name and (len(obj.name)):
result.append(obj.name)
result.extend(acss)
return result
def _generatePlaceholderText(self, obj, **args):
"""Returns an array of strings for use by speech and braille that
represent the 'placeholder' text. This is typically text that
serves as a functional label and is found in a text widget until
that widget is given focus at which point the text is removed,
the assumption being that the user was able to see the text prior
to giving the widget focus.
"""
acss = self.voice(DEFAULT)
result = generator.Generator._generatePlaceholderText(self, obj, **args)
if result:
result.extend(acss)
return result
def _generateDescription(self, obj, **args):
"""Returns an array of strings fo use by speech and braille that
represent the description of the object, if that description
is different from that of the name and label.
"""
if _settingsManager.getSetting('onlySpeakDisplayedText'):
return []
acss = self.voice(SYSTEM)
result = generator.Generator._generateDescription(self, obj, **args)
if result:
result.extend(acss)
return result
def _generateReadOnly(self, obj, **args):
"""Returns an array of strings for use by speech and braille that
represent the read only state of this object, but only if it
is read only (i.e., it is a text area that cannot be edited).
"""
acss = self.voice(SYSTEM)
result = generator.Generator._generateReadOnly(self, obj, **args)
if result:
result.extend(acss)
return result
def _generateTextRole(self, obj, **args):
"""A convenience method to prevent the pyatspi.ROLE_PARAGRAPH role
from being spoken. In the case of a pyatspi.ROLE_PARAGRAPH
role, an empty array will be returned. In all other cases, the
role name will be returned as an array of strings (and
possibly voice and audio specifications). Note that a 'role'
attribute in args will override the accessible role of the
obj. [[[WDW - I wonder if this should be moved to
_generateRoleName. Or, maybe make a 'do not speak roles' attribute
of a speech generator that we can update and the user can
override.]]]
"""
if _settingsManager.getSetting('onlySpeakDisplayedText'):
return []
result = []
role = args.get('role', obj.getRole())
if role != pyatspi.ROLE_PARAGRAPH:
result.extend(self._generateRoleName(obj, **args))
return result
def _generateRoleName(self, obj, **args):
"""Returns the role name for the object in an array of strings (and
possibly voice and audio specifications), with the exception
that the pyatspi.ROLE_UNKNOWN role will yield an empty array.
Note that a 'role' attribute in args will override the
accessible role of the obj.
"""
if _settingsManager.getSetting('onlySpeakDisplayedText'):
return []
result = []
acss = self.voice(SYSTEM)
role = args.get('role', obj.getRole())
doNotPresent = [pyatspi.ROLE_UNKNOWN,
pyatspi.ROLE_FILLER,
pyatspi.ROLE_EXTENDED]
# egg-list-box, e.g. privacy panel in gnome-control-center
if obj.parent and obj.parent.getRole() == pyatspi.ROLE_LIST_BOX:
doNotPresent.append(obj.getRole())
if _settingsManager.getSetting('speechVerbosityLevel') \
== settings.VERBOSITY_LEVEL_BRIEF:
doNotPresent.extend([pyatspi.ROLE_ICON, pyatspi.ROLE_CANVAS])
if role not in doNotPresent:
result.append(self.getLocalizedRoleName(obj, role))
result.extend(acss)
return result
def getRoleName(self, obj, **args):
"""Returns the role name for the object in an array of strings (and
possibly voice and audio specifications), with the exception
that the pyatspi.ROLE_UNKNOWN role will yield an empty array.
Note that a 'role' attribute in args will override the
accessible role of the obj. This is provided mostly as a
method for scripts to call.
"""
return self._generateRoleName(obj, **args)
@staticmethod
def getLocalizedRoleName(obj, role=None):
"""Returns the localized name of the given Accessible object; the name
is suitable to be spoken.
Arguments:
- obj: an Accessible object
- role: an optional pyatspi role to use instead
"""
if not isinstance(role, pyatspi.Role):
try:
return obj.getLocalizedRoleName()
except:
return ''
if not role:
return ''
nonlocalized = Atspi.role_get_name(role)
atkRole = Atk.role_for_name(nonlocalized)
return Atk.role_get_localized_name(atkRole)
def _generateUnrelatedLabels(self, obj, **args):
"""Returns, as an array of strings (and possibly voice
specifications), all the labels which are underneath the obj's
hierarchy and which are not in a label for or labelled by
relation.
"""
result = []
acss = self.voice(DEFAULT)
labels = self._script.utilities.unrelatedLabels(obj)
for label in labels:
name = self._generateName(label, **args)
result.extend(name)
if result:
result.extend(acss)
return result
def _generateEmbedded(self, obj, **args):
"""Returns an array of strings (and possibly voice and audio
specifications) used especially for handling embedded objects.
This either is the label or name of the object or the name of
the application for the object.
"""
acss = self.voice(DEFAULT)
result = self._generateLabelOrName(obj, **args)
if not result:
try:
result.append(obj.getApplication().name)
except:
pass
if result:
result.extend(acss)
return result
#####################################################################
# #
# State information #
# #
#####################################################################
def _generateCheckedState(self, obj, **args):
"""Returns an array of strings for use by speech and braille that
represent the checked state of the object. This is typically
for check boxes. [[[WDW - should we return an empty array if
we can guarantee we know this thing is not checkable?]]]
"""
if _settingsManager.getSetting('onlySpeakDisplayedText'):
return []
acss = self.voice(STATE)
result = generator.Generator._generateCheckedState(self, obj, **args)
if result:
result.extend(acss)
return result
def _generateExpandableState(self, obj, **args):
"""Returns an array of strings for use by speech and braille that
represent the expanded/collapsed state of an object, such as a
tree node. If the object is not expandable, an empty array
will be returned.
"""
if _settingsManager.getSetting('onlySpeakDisplayedText'):
return []
acss = self.voice(STATE)
result = generator.Generator._generateExpandableState(self, obj, **args)
if result:
result.extend(acss)
return result
def _generateMenuItemCheckedState(self, obj, **args):
"""Returns an array of strings for use by speech and braille that
represent the checked state of the menu item, only if it is
checked. Otherwise, and empty array will be returned.
"""
if _settingsManager.getSetting('onlySpeakDisplayedText'):
return []
acss = self.voice(STATE)
result = generator.Generator.\
_generateMenuItemCheckedState(self, obj, **args)
if result:
result.extend(acss)
return result
def _generateMultiselectableState(self, obj, **args):
"""Returns an array of strings (and possibly voice and audio
specifications) that represent the multiselectable state of
the object. This is typically for check boxes. If the object
is not multiselectable, an empty array will be returned.
"""
if _settingsManager.getSetting('onlySpeakDisplayedText'):
return []
result = []
acss = self.voice(STATE)
if obj.getState().contains(pyatspi.STATE_MULTISELECTABLE):
# Translators: "multi-select" refers to a web form list
# in which more than one item can be selected at a time.
#
result.append(self._script.formatting.getString(
mode='speech',
stringType='multiselect'))
result.extend(acss)
return result
def _generateRadioState(self, obj, **args):
"""Returns an array of strings for use by speech and braille that
represent the checked state of the object. This is typically
for check boxes. [[[WDW - should we return an empty array if
we can guarantee we know this thing is not checkable?]]]
"""
if _settingsManager.getSetting('onlySpeakDisplayedText'):
return []
acss = self.voice(STATE)
result = generator.Generator._generateRadioState(self, obj, **args)
if result:
result.extend(acss)
return result
def _generateToggleState(self, obj, **args):
"""Returns an array of strings for use by speech and braille that
represent the checked state of the object. This is typically
for check boxes. [[[WDW - should we return an empty array if
we can guarantee we know this thing is not checkable?]]]
"""
if _settingsManager.getSetting('onlySpeakDisplayedText'):
return []
acss = self.voice(STATE)
result = generator.Generator._generateToggleState(self, obj, **args)
if result:
result.extend(acss)
return result
#####################################################################
# #
# Link information #
# #
#####################################################################
def _generateLinkInfo(self, obj, **args):
"""Returns an array of strings (and possibly voice and audio
specifications) that represent the protocol of the URI of
the link associated with obj.
"""
result = []
acss = self.voice(HYPERLINK)
# Get the URI for the link of interest and parse it. The parsed
# URI is returned as a tuple containing six components:
# scheme://netloc/path;parameters?query#fragment.
#
link_uri = self._script.utilities.uri(obj)
if not link_uri:
# [[[TODO - JD: For some reason, this is failing for certain
# links. The current whereAmI code says, "It might be an anchor.
# Try to speak the text." and passes things off to whereAmI's
# _speakText method. That won't work in the new world order.
# Therefore, for now, I will hack in some code to do that
# work here so that the before and after end results match.]]]
#
result.extend(self._generateLabel(obj))
result.extend(self._generateRoleName(obj))
result.append(self._script.utilities.displayedText(obj))
else:
link_uri_info = urllib.parse.urlparse(link_uri)
if link_uri_info[0] in ["ftp", "ftps", "file"]:
fileName = link_uri_info[2].split('/')
result.append(messages.LINK_TO_FILE \
% {"uri" : link_uri_info[0],
"file" : fileName[-1]})
else:
linkOutput = messages.LINK_WITH_PROTOCOL % link_uri_info[0]
text = self._script.utilities.displayedText(obj)
if not text:
# If there's no text for the link, expose part of the
# URI to the user.
#
text = self._script.utilities.linkBasename(obj)
if text:
linkOutput += " " + text
result.append(linkOutput)
if obj.childCount and obj[0].getRole() == pyatspi.ROLE_IMAGE:
result.extend(self._generateRoleName(obj[0]))
if result:
result.extend(acss)
return result
def _generateSiteDescription(self, obj, **args):
"""Returns an array of strings (and possibly voice and audio
specifications) that describe the site (same or different)
pointed to by the URI of the link associated with obj.
"""
result = []
acss = self.voice(HYPERLINK)
link_uri = self._script.utilities.uri(obj)
if link_uri:
link_uri_info = urllib.parse.urlparse(link_uri)
else:
return result
doc_uri = self._script.utilities.documentFrameURI()
if doc_uri:
doc_uri_info = urllib.parse.urlparse(doc_uri)
if link_uri_info[1] == doc_uri_info[1]:
if link_uri_info[2] == doc_uri_info[2]:
result.append(messages.LINK_SAME_PAGE)
else:
result.append(messages.LINK_SAME_SITE)
else:
# check for different machine name on same site
#
linkdomain = link_uri_info[1].split('.')
docdomain = doc_uri_info[1].split('.')
if len(linkdomain) > 1 and len(docdomain) > 1 \
and linkdomain[-1] == docdomain[-1] \
and linkdomain[-2] == docdomain[-2]:
result.append(messages.LINK_SAME_SITE)
else:
result.append(messages.LINK_DIFFERENT_SITE)
if result:
result.extend(acss)
return result
def _generateFileSize(self, obj, **args):
"""Returns an array of strings (and possibly voice and audio
specifications) that represent the size (Content-length) of
the file pointed to by the URI of the link associated with
obj.
"""
result = []
acss = self.voice(HYPERLINK)
sizeString = ""
uri = self._script.utilities.uri(obj)
if not uri:
return result
try:
x = urllib.request.urlopen(uri)
try:
sizeString = x.info()['Content-length']
except KeyError:
pass
except (ValueError, urllib.error.URLError, OSError):
pass
if sizeString:
size = int(sizeString)
if size < 10000:
result.append(messages.fileSizeBytes(size))
elif size < 1000000:
result.append(messages.FILE_SIZE_KB % (float(size) * .001))
elif size >= 1000000:
result.append(messages.FILE_SIZE_MB % (float(size) * .000001))
if result:
result.extend(acss)
return result
#####################################################################
# #
# Image information #
# #
#####################################################################
def _generateImage(self, obj, **args):
"""Returns an array of strings (and possibly voice and audio
specifications) that represent the image on the the object, if
it exists. Otherwise, an empty array is returned.
"""
result = []
acss = self.voice(DEFAULT)
try:
image = obj.queryImage()
except:
pass
else:
args['role'] = pyatspi.ROLE_IMAGE
result.extend(self.generate(obj, **args))
result.extend(acss)
return result
#####################################################################
# #
# Table interface information #
# #
#####################################################################
def _generateNewRowHeader(self, obj, **args):
"""Returns an array of strings (and possibly voice and audio
specifications) that represent the row header for an object
that is in a table, if it exists and if it is different from
the previous row header. Otherwise, an empty array is
returned. The previous row header is determined by looking at
the row header for the 'priorObj' attribute of the args
dictionary. The 'priorObj' is typically set by Orca to be the
previous object with focus.
"""
result = []
acss = self.voice(DEFAULT)
if obj:
priorObj = args.get('priorObj', None)
try:
priorParent = priorObj.parent
except:
priorParent = None
if (obj.getRole() == pyatspi.ROLE_TABLE_CELL) \
or (obj.parent and obj.parent.getRole() == pyatspi.ROLE_TABLE):
try:
table = priorParent.queryTable()
except:
table = None
if table \
and ((priorObj.getRole() == pyatspi.ROLE_TABLE_CELL) \
or (priorObj.getRole() == pyatspi.ROLE_TABLE)):
index = self._script.utilities.cellIndex(priorObj)
oldRow = table.getRowAtIndex(index)
else:
oldRow = -1
try:
table = obj.parent.queryTable()
except:
pass
else:
index = self._script.utilities.cellIndex(obj)
newRow = table.getRowAtIndex(index)
if (newRow >= 0) \
and (index != newRow) \
and ((newRow != oldRow) \
or (obj.parent != priorParent)):
result = self._generateRowHeader(obj, **args)
if result:
result.extend(acss)
return result
def _generateNewColumnHeader(self, obj, **args):
"""Returns an array of strings (and possibly voice and audio
specifications) that represent the column header for an object
that is in a table, if it exists and if it is different from
the previous column header. Otherwise, an empty array is
returned. The previous column header is determined by looking
at the column header for the 'priorObj' attribute of the args
dictionary. The 'priorObj' is typically set by Orca to be the
previous object with focus.
"""
result = []
acss = self.voice(DEFAULT)
if obj and not args.get('readingRow', False):
priorObj = args.get('priorObj', None)
try:
priorParent = priorObj.parent
except:
priorParent = None
if (obj.getRole() == pyatspi.ROLE_TABLE_CELL) \
or (obj.parent and obj.parent.getRole() == pyatspi.ROLE_TABLE):
try:
table = priorParent.queryTable()
except:
table = None
if table \
and ((priorObj.getRole() == pyatspi.ROLE_TABLE_CELL) \
or (priorObj.getRole() == pyatspi.ROLE_TABLE)):
index = self._script.utilities.cellIndex(priorObj)
oldCol = table.getColumnAtIndex(index)
else:
oldCol = -1
try:
table = obj.parent.queryTable()
except:
pass
else:
index = self._script.utilities.cellIndex(obj)
newCol = table.getColumnAtIndex(index)
if (newCol >= 0) \
and (index != newCol) \
and ((newCol != oldCol) \
or (obj.parent != priorParent)):
result = self._generateColumnHeader(obj, **args)
if result:
result.extend(acss)
return result
def _generateRealTableCell(self, obj, **args):
"""Orca has a feature to automatically read an entire row of a table
as the user arrows up/down the roles. This leads to complexity in
the code. This method is used to return an array of strings
(and possibly voice and audio specifications) for a single table
cell itself. The string, 'blank', is added for empty cells.
"""
result = []
acss = self.voice(DEFAULT)
oldRole = self._overrideRole('REAL_ROLE_TABLE_CELL', args)
result.extend(self.generate(obj, **args))
self._restoreRole(oldRole, args)
if not result and _settingsManager.getSetting('speakBlankLines') \
and not args.get('readingRow', False):
result.append(messages.BLANK)
if result:
result.extend(acss)
return result
def _generateUnselectedCell(self, obj, **args):
"""Returns an array of strings (and possibly voice and audio
specifications) if this is an icon within an layered pane or a
table cell within a table or a tree table and the item is
focused but not selected. Otherwise, an empty array is
returned. [[[WDW - I wonder if this string should be moved to
settings.py.]]]
"""
if _settingsManager.getSetting('onlySpeakDisplayedText'):
return []
result = []
acss = self.voice(STATE)
# If this is an icon within an layered pane or a table cell
# within a table or a tree table and the item is focused but not
# selected, let the user know. See bug #486908 for more details.
#
checkIfSelected = False
objRole, parentRole, state = None, None, None
if obj:
objRole = obj.getRole()
state = obj.getState()
if obj.parent:
parentRole = obj.parent.getRole()
if objRole == pyatspi.ROLE_TABLE_CELL \
and (parentRole == pyatspi.ROLE_TREE_TABLE \
or parentRole == pyatspi.ROLE_TABLE):
checkIfSelected = True
# If we met the last set of conditions, but we got here by
# moving left or right on the same row, then don't announce the
# selection state to the user. See bug #523235 for more details.
#
lastKey, mods = self._script.utilities.lastKeyAndModifiers()
if checkIfSelected and lastKey in ["Left", "Right"]:
checkIfSelected = False
if objRole == pyatspi.ROLE_ICON \
and parentRole == pyatspi.ROLE_LAYERED_PANE:
checkIfSelected = True
if checkIfSelected \
and state and not state.contains(pyatspi.STATE_SELECTED):
result.append(object_properties.STATE_UNSELECTED_TABLE_CELL)
result.extend(acss)
return result
def _generateColumn(self, obj, **args):
"""Returns an array of strings (and possibly voice and audio
specifications) reflecting the column number of a cell.
"""
if _settingsManager.getSetting('onlySpeakDisplayedText'):
return []
result = []
acss = self.voice(SYSTEM)
col = -1
if obj.parent.getRole() == pyatspi.ROLE_TABLE_CELL:
obj = obj.parent
parent = obj.parent
try:
table = parent.queryTable()
except:
if args.get('guessCoordinates', False):
col = self._script.pointOfReference.get('lastColumn', -1)
else:
index = self._script.utilities.cellIndex(obj)
col = table.getColumnAtIndex(index)
if col >= 0:
result.append(messages.TABLE_COLUMN % (col + 1))
if result:
result.extend(acss)
return result
def _generateRow(self, obj, **args):
"""Returns an array of strings (and possibly voice and audio
specifications) reflecting the row number of a cell.
"""
if _settingsManager.getSetting('onlySpeakDisplayedText'):
return []
result = []
acss = self.voice(SYSTEM)
row = -1
if obj.parent.getRole() == pyatspi.ROLE_TABLE_CELL:
obj = obj.parent
parent = obj.parent
try:
table = parent.queryTable()
except:
if args.get('guessCoordinates', False):
row = self._script.pointOfReference.get('lastRow', -1)
else:
index = self._script.utilities.cellIndex(obj)
row = table.getRowAtIndex(index)
if row >= 0:
result.append(messages.TABLE_ROW % (row + 1))
if result:
result.extend(acss)
return result
def _generateColumnAndRow(self, obj, **args):
"""Returns an array of strings (and possibly voice and audio
specifications) reflecting the position of the cell in terms
of its column number, the total number of columns, its row,
and the total number of rows.
"""
if _settingsManager.getSetting('onlySpeakDisplayedText'):
return []
result = []
acss = self.voice(SYSTEM)
if obj.parent.getRole() == pyatspi.ROLE_TABLE_CELL:
obj = obj.parent
parent = obj.parent
try:
table = parent.queryTable()
except:
table = None
else:
index = self._script.utilities.cellIndex(obj)
col = table.getColumnAtIndex(index)
row = table.getRowAtIndex(index)
result.append(messages.TABLE_COLUMN_DETAILED \
% {"index" : (col + 1),
"total" : table.nColumns})
result.append(messages.TABLE_ROW_DETAILED \
% {"index" : (row + 1),
"total" : table.nRows})
if result:
result.extend(acss)
return result
def _generateEndOfTableIndicator(self, obj, **args):
"""Returns an array of strings (and possibly voice and audio
specifications) indicating that this cell is the last cell
in the table.
"""
if _settingsManager.getSetting('onlySpeakDisplayedText'):
return []
result = []
acss = self.voice(SYSTEM)
if _settingsManager.getSetting('speechVerbosityLevel') \
== settings.VERBOSITY_LEVEL_VERBOSE:
if obj.getRole() == pyatspi.ROLE_TABLE_CELL:
cell = obj
else:
cell = self._script.utilities.ancestorWithRole(
obj, [pyatspi.ROLE_TABLE_CELL], [pyatspi.ROLE_FRAME])
try:
table = cell.parent.queryTable()
except:
pass
else:
index = self._script.utilities.cellIndex(cell)
row = table.getRowAtIndex(index)
col = table.getColumnAtIndex(index)
if row + 1 == table.nRows and col + 1 == table.nColumns:
result.append(messages.TABLE_END)
if result:
result.extend(acss)
return result
#####################################################################
# #
# Terminal information #
# #
#####################################################################
def _generateTerminal(self, obj, **args):
"""Returns an array of strings (and possibly voice and audio
specifications) used especially for handling terminal objects.
This either is the name of the frame the terminal is in or the
displayed label of the terminal. [[[WDW - it might be nice
to return an empty array if this is not a terminal.]]]
"""
result = []
acss = self.voice(DEFAULT)
title = None
frame = self._script.utilities.ancestorWithRole(
obj, [pyatspi.ROLE_FRAME], [])
if frame:
title = frame.name
if not title:
title = self._script.utilities.displayedLabel(obj)
result.append(title)
if result:
result.extend(acss)
return result
#####################################################################
# #
# Text interface information #
# #
#####################################################################
def _generateCurrentLineText(self, obj, **args):
"""Returns an array of strings for use by speech and braille
that represents the current line of text, if
this is a text object. [[[WDW - consider returning an empty
array if this is not a text object.]]]
"""
acss = self.voice(DEFAULT)
result = generator.Generator._generateCurrentLineText(self, obj, **args)
if result and result[0]:
if result[0] == "\n":
result[0] = messages.BLANK
result.extend(acss)
return result
def _getCharacterAttributes(self,
obj,
text,
textOffset,
lineIndex,
keys=["style", "weight", "underline"]):
"""Helper function that returns a string containing the
given attributes from keys for the given character.
"""
attribStr = ""
defaultAttributes = text.getDefaultAttributes()
keyList, attributesDictionary = \
self._script.utilities.stringToKeysAndDict(defaultAttributes)
charAttributes = text.getAttributes(textOffset)
if charAttributes[0]:
keyList, charDict = \
self._script.utilities.stringToKeysAndDict(charAttributes[0])
for key in keyList:
attributesDictionary[key] = charDict[key]
if attributesDictionary:
for key in keys:
localizedKey = text_attribute_names.getTextAttributeName(key)
if key in attributesDictionary:
attribute = attributesDictionary[key]
localizedValue = \
text_attribute_names.getTextAttributeName(attribute)
if attribute:
# If it's the 'weight' attribute and greater than 400,
# just speak it as bold, otherwise speak the weight.
#
if key == "weight":
if int(attribute) > 400:
attribStr += " %s" % messages.BOLD
elif key == "underline":
if attribute != "none":
attribStr += " %s" % localizedKey
elif key == "style":
if attribute != "normal":
attribStr += " %s" % localizedValue
else:
attribStr += " "
attribStr += (localizedKey + " " + localizedValue)
# Also check to see if this is a hypertext link.
#
if self._script.utilities.linkIndex(obj, textOffset) >= 0:
attribStr += " %s" % messages.LINK
return attribStr
def _getTextInformation(self, obj):
"""Returns [textContents, startOffset, endOffset, selected] as
follows:
A. if no text on the current line is selected, the current line
B. if text is selected, the selected text
C. if the current line is blank/empty, 'blank'
Also sets up a 'textInformation' attribute in
self._script.generatorCache to prevent computing this
information repeatedly while processing a single event.
"""
try:
return self._script.generatorCache['textInformation']
except:
pass
textObj = obj.queryText()
caretOffset = textObj.caretOffset
textContents = ""
selected = False
nSelections = textObj.getNSelections()
[current, other] = self._script.utilities.hasTextSelections(obj)
if current or other:
selected = True
[textContents, startOffset, endOffset] = \
self._script.utilities.allSelectedText(obj)
else:
# Get the line containing the caret
#
[line, startOffset, endOffset] = textObj.getTextAtOffset(
textObj.caretOffset,
pyatspi.TEXT_BOUNDARY_LINE_START)
if len(line):
# Check for embedded object characters. If we find any,
# expand the text. TODO - JD: This expansion doesn't
# include the role information; just the text. However,
# the handling of roles should probably be dealt with as
# a formatting string. We have not yet worked out how to
# do this with Gecko (primary user of embedded object
# characters). Until we do, this expansion is better than
# presenting the actual embedded object character.
#
if self._script.EMBEDDED_OBJECT_CHARACTER in line:
line = self._script.utilities.expandEOCs(
obj, startOffset, endOffset)
line = self._script.utilities.adjustForRepeats(line)
textContents = line
else:
char = textObj.getTextAtOffset(caretOffset,
pyatspi.TEXT_BOUNDARY_CHAR)
if char[0] == "\n" and startOffset == caretOffset \
and _settingsManager.getSetting('speakBlankLines'):
textContents = (messages.BLANK)
self._script.generatorCache['textInformation'] = \
[textContents, startOffset, endOffset, selected]
return self._script.generatorCache['textInformation']
def _generateTextContent(self, obj, **args):
"""Returns an array of strings (and possibly voice and audio
specifications) containing the text content. This requires
_generateTextInformation to have been called prior to this method.
"""
try:
text = obj.queryText()
except NotImplementedError:
return []
result = []
acss = self.voice(DEFAULT)
[line, startOffset, endOffset, selected] = \
self._getTextInformation(obj)
# The empty string seems to be messing with using 'or' in
# formatting strings.
#
if line:
result.append(line)
result.extend(acss)
return result
def _generateTextContentWithAttributes(self, obj, **args):
"""Returns an array of strings (and possibly voice and audio
specifications) containing the text content, obtained from the
'textInformation' value, with character attribute information
mixed in. This requires _generateTextInformation to have been
called prior to this method.
"""
try:
text = obj.queryText()
except NotImplementedError:
return []
acss = self.voice(DEFAULT)
[line, startOffset, endOffset, selected] = \
self._getTextInformation(obj)
newLine = ""
lastAttribs = None
textOffset = startOffset
for i in range(0, len(line)):
attribs = self._getCharacterAttributes(obj, text, textOffset, i)
if attribs and attribs != lastAttribs:
if newLine:
newLine += " ; "
newLine += attribs
newLine += " "
lastAttribs = attribs
newLine += line[i]
textOffset += 1
attribs = self._getCharacterAttributes(obj,
text,
startOffset,
0,
["paragraph-style"])
if attribs:
if newLine:
newLine += " ; "
newLine += attribs
result = [newLine]
result.extend(acss)
return result
def _generateAnyTextSelection(self, obj, **args):
"""Returns an array of strings (and possibly voice and audio
specifications) that says if any of the text for the entire
object is selected. [[[WDW - I wonder if this string should be
moved to settings.py.]]]
"""
if _settingsManager.getSetting('onlySpeakDisplayedText'):
return []
result = []
acss = self.voice(SYSTEM)
[line, startOffset, endOffset, selected] = \
self._getTextInformation(obj)
if selected:
result.append(messages.TEXT_SELECTED)
result.extend(acss)
return result
def _generateAllTextSelection(self, obj, **args):
"""Returns an array of strings (and possibly voice and audio
specifications) that says if all the text for the entire
object is selected. [[[WDW - I wonder if this string should be
moved to settings.py.]]]
"""
if _settingsManager.getSetting('onlySpeakDisplayedText'):
return []
result = []
acss = self.voice(SYSTEM)
try:
textObj = obj.queryText()
except:
pass
else:
noOfSelections = textObj.getNSelections()
if noOfSelections == 1:
[string, startOffset, endOffset] = \
textObj.getTextAtOffset(0, pyatspi.TEXT_BOUNDARY_LINE_START)
if startOffset == 0 and endOffset == len(string):
result = [messages.TEXT_SELECTED]
result.extend(acss)
return result
def generateTextIndentation(self, obj, **args):
return self._generateTextIndentation(obj, **args)
def _generateTextIndentation(self, obj, **args):
"""Speaks a summary of the number of spaces and/or tabs at the
beginning of the given line.
Arguments:
- obj: the text object.
- line: the string to check for spaces and tabs.
"""
if _settingsManager.getSetting('onlySpeakDisplayedText'):
return []
acss = self.voice(SYSTEM)
if not _settingsManager.getSetting('enableSpeechIndentation'):
return []
line = args.get('alreadyFocused', "")
if not line:
[line, caretOffset, startOffset] = \
self._script.getTextLineAtCaret(obj)
# For the purpose of speaking the text indentation, replace
# occurances the non breaking space character with spaces.
line = line.replace("\u00a0", " ")
spaceCount = 0
tabCount = 0
utterance = ""
offset = 0
while True:
while (offset < len(line)) and line[offset] == ' ':
spaceCount += 1
offset += 1
if spaceCount:
utterance += "%s " % messages.spacesCount(spaceCount)
while (offset < len(line)) and line[offset] == '\t':
tabCount += 1
offset += 1
if tabCount:
utterance += "%s " % messages.tabsCount(tabCount)
if not (spaceCount or tabCount):
break
spaceCount = tabCount = 0
result = [utterance]
if result and result[0]:
result.extend(acss)
return result
#####################################################################
# #
# Tree interface information #
# #
#####################################################################
def _generateNewNodeLevel(self, obj, **args):
"""Returns an array of strings (and possibly voice and audio
specifications) that represents the tree node level of the
object, or an empty array if the object is not a tree node or
if the node level is not different from the 'priorObj'
'priorObj' attribute of the args dictionary. The 'priorObj'
is typically set by Orca to be the previous object with
focus.
"""
if _settingsManager.getSetting('onlySpeakDisplayedText'):
return []
result = []
acss = self.voice(SYSTEM)
oldLevel = self._script.utilities.nodeLevel(args.get('priorObj', None))
newLevel = self._script.utilities.nodeLevel(obj)
if (oldLevel != newLevel) and (newLevel >= 0):
result.extend(self._generateNodeLevel(obj, **args))
result.extend(acss)
return result
#####################################################################
# #
# Value interface information #
# #
#####################################################################
def _generatePercentage(self, obj, **args ):
"""Returns an array of strings (and possibly voice and audio
specifications) that represents the percentage value of the
object. This is typically for progress bars. [[[WDW - we
should consider returning an empty array if there is no value.
"""
if _settingsManager.getSetting('onlySpeakDisplayedText'):
return []
result = []
acss = self.voice(SYSTEM)
try:
value = obj.queryValue()
except NotImplementedError:
pass
else:
percentValue = \
(value.currentValue
/ (value.maximumValue - value.minimumValue)) \
* 100.0
result.append(messages.percentage(percentValue))
if result:
result.extend(acss)
return result
#####################################################################
# #
# Hierarchy and related dialog information #
# #
#####################################################################
def _generateNewRadioButtonGroup(self, obj, **args):
"""Returns an array of strings (and possibly voice and audio
specifications) that represents the radio button group label
of the object, or an empty array if the object has no such
label or if the radio button group is not different from the
'priorObj' 'priorObj' attribute of the args dictionary. The
'priorObj' is typically set by Orca to be the previous object
with focus.
"""
# [[[TODO: WDW - hate duplicating code from _generateRadioButtonGroup
# but don't want to call it because it will make the same
# AT-SPI method calls.]]]
#
result = []
acss = self.voice(DEFAULT)
priorObj = args.get('priorObj', None)
if obj and obj.getRole() == pyatspi.ROLE_RADIO_BUTTON:
radioGroupLabel = None
inSameGroup = False
relations = obj.getRelationSet()
for relation in relations:
if (not radioGroupLabel) \
and (relation.getRelationType() \
== pyatspi.RELATION_LABELLED_BY):
radioGroupLabel = relation.getTarget(0)
if (not inSameGroup) \
and (relation.getRelationType() \
== pyatspi.RELATION_MEMBER_OF):
for i in range(0, relation.getNTargets()):
target = relation.getTarget(i)
if target == priorObj:
inSameGroup = True
break
if (not inSameGroup) and radioGroupLabel:
result.append(self._script.utilities.\
displayedText(radioGroupLabel))
result.extend(acss)
return result
def _generateNumberOfChildren(self, obj, **args):
"""Returns an array of strings (and possibly voice and audio
specifications) that represents the number of children the
object has. [[[WDW - can we always return an empty array if
this doesn't apply?]]] [[[WDW - I wonder if this string should
be moved to settings.py.]]]
"""
if _settingsManager.getSetting('onlySpeakDisplayedText'):
return []
result = []
acss = self.voice(SYSTEM)
childNodes = self._script.utilities.childNodes(obj)
children = len(childNodes)
if children:
result.append(messages.itemCount(children))
result.extend(acss)
return result
def _generateNoShowingChildren(self, obj, **args):
"""Returns an array of strings (and possibly voice and audio
specifications) that says if this object has no showing
children (e.g., it's an empty table or list). object has.
[[[WDW - can we always return an empty array if this doesn't
apply?]]] [[[WDW - I wonder if this string should be moved to
settings.py.]]]
"""
if _settingsManager.getSetting('onlySpeakDisplayedText'):
return []
result = []
acss = self.voice(SYSTEM)
hasItems = False
for child in obj:
state = child.getState()
if state.contains(pyatspi.STATE_SHOWING):
hasItems = True
break
if not hasItems:
result.append(messages.ZERO_ITEMS)
result.extend(acss)
return result
def _generateNoChildren(self, obj, **args ):
"""Returns an array of strings (and possibly voice and audio
specifications) that says if this object has no children at
all (e.g., it's an empty table or list). object has. [[[WDW
- can we always return an empty array if this doesn't
apply?]]] [[[WDW - I wonder if this string should be moved to
settings.py.]]]
"""
if _settingsManager.getSetting('onlySpeakDisplayedText'):
return []
result = []
acss = self.voice(SYSTEM)
if not obj.childCount:
result.append(messages.ZERO_ITEMS)
result.extend(acss)
return result
def _generateFocusedItem(self, obj, **args):
result = []
role = args.get('role', obj.getRole())
if role != pyatspi.ROLE_LIST:
return result
try:
s = obj.querySelection()
except NotImplementedError:
isFocused = \
lambda x: x and x.getState().contains(pyatspi.STATE_FOCUSED)
items = pyatspi.utils.findAllDescendants(obj, isFocused)
else:
items = [s.getSelectedChild(i) for i in range(s.nSelectedChildren)]
if not items and obj.childCount:
items.append(obj[0])
items = list(map(self._generateName, items))
for item in items:
result.extend(item)
return result
def _generateSelectedItemCount(self, obj, **args):
"""Returns an array of strings (and possibly voice and audio
specifications) indicating how many items are selected in this
and the position of the current item. This object will be an icon
panel or a layered pane.
"""
if _settingsManager.getSetting('onlySpeakDisplayedText'):
return []
container = obj
if not 'Selection' in pyatspi.listInterfaces(container):
container = obj.parent
if not 'Selection' in pyatspi.listInterfaces(container):
return []
result = []
acss = self.voice(SYSTEM)
childCount = container.childCount
selectedCount = len(self._script.utilities.selectedChildren(container))
result.append(messages.selectedItemsCount(selectedCount, childCount))
result.extend(acss)
result.append(self._script.formatting.getString(
mode='speech',
stringType='iconindex') \
% {"index" : obj.getIndexInParent() + 1,
"total" : childCount})
result.extend(acss)
return result
def _generateSelectedItems(self, obj, **args):
"""Returns an array of strings (and possibly voice and audio
specifications) containing the names of all the selected items.
This object will be an icon panel or a layered pane.
"""
if _settingsManager.getSetting('onlySpeakDisplayedText'):
return []
container = obj
if not 'Selection' in pyatspi.listInterfaces(container):
container = obj.parent
if not 'Selection' in pyatspi.listInterfaces(container):
return []
selectedItems = self._script.utilities.selectedChildren(container)
return list(map(self._generateLabelAndName, selectedItems))
def _generateUnfocusedDialogCount(self, obj, **args):
"""Returns an array of strings (and possibly voice and audio
specifications) that says how many unfocused alerts and
dialogs are associated with the application for this object.
[[[WDW - I wonder if this string should be moved to
settings.py.]]]
"""
if _settingsManager.getSetting('onlySpeakDisplayedText'):
return []
result = []
acss = self.voice(SYSTEM)
# If this application has more than one unfocused alert or
# dialog window, then speak '<m> unfocused dialogs'
# to let the user know.
#
try:
alertAndDialogCount = \
self._script.utilities.unfocusedAlertAndDialogCount(obj)
except:
alertAndDialogCount = 0
if alertAndDialogCount > 0:
result.append(messages.dialogCountSpeech(alertAndDialogCount))
result.extend(acss)
return result
def _generateAncestors(self, obj, **args):
"""Returns an array of strings (and possibly voice and audio
specifications) that represent the text of the ancestors for
the object. This is typically used to present the context for
an object (e.g., the names of the window, the panels, etc.,
that the object is contained in). If the 'priorObj' attribute
of the args dictionary is set, only the differences in
ancestry between the 'priorObj' and the current obj will be
computed. The 'priorObj' is typically set by Orca to be the
previous object with focus.
"""
result = []
priorObj = args.get('priorObj', None)
commonAncestor = self._script.utilities.commonAncestor(priorObj, obj)
try:
role = commonAncestor.getRole()
except:
pass
else:
if role == pyatspi.ROLE_COMBO_BOX:
return []
skipRoles = args.get('skipRoles', [])
stopAtRoles = args.get('stopAtRoles', [])
stopAtRoles.append(pyatspi.ROLE_APPLICATION)
if obj != commonAncestor:
parent = obj.parent
while parent and not parent in [commonAncestor, parent.parent]:
parentRole = parent.getRole()
if parentRole in stopAtRoles:
break
if parentRole not in skipRoles \
and not self._script.utilities.isLayoutOnly(parent):
result.append(self.generate(parent, formatType='focused'))
parent = parent.parent
result.reverse()
return result
def _generateOldAncestors(self, obj, **args):
"""Returns an array of strings (and possibly voice and audio
specifications) that represent the text of the ancestors for
the object being left."""
return []
def _generateNewAncestors(self, obj, **args):
"""Returns an array of strings (and possibly voice and audio
specifications) that represent the text of the ancestors for
the object. This is typically used to present the context for
an object (e.g., the names of the window, the panels, etc.,
that the object is contained in). If the 'priorObj' attribute
of the args dictionary is set, only the differences in
ancestry between the 'priorObj' and the current obj will be
computed. Otherwise, no ancestry will be computed. The
'priorObj' is typically set by Orca to be the previous object
with focus.
"""
result = []
if args.get('priorObj', None):
result = self._generateAncestors(obj, **args)
return result
def _generateParentRoleName(self, obj, **args):
"""Returns an array of strings (and possibly voice and audio
specifications) containing the role name of the parent of obj.
"""
if args.get('role', obj.getRole()) == pyatspi.ROLE_ICON \
and args.get('formatType', None) \
in ['basicWhereAmI', 'detailedWhereAmI']:
return [object_properties.ROLE_ICON_PANEL]
elif obj.parent.getRole() == pyatspi.ROLE_TABLE_CELL:
obj = obj.parent
return self._generateRoleName(obj.parent)
def _generateToolbar(self, obj, **args):
"""Returns an array of strings (and possibly voice and audio
specifications) containing the name and role of the toolbar
which contains obj.
"""
result = []
ancestor = self._script.utilities.ancestorWithRole(
obj, [pyatspi.ROLE_TOOL_BAR], [pyatspi.ROLE_FRAME])
if ancestor:
result.extend(self._generateLabelAndName(ancestor))
result.extend(self._generateRoleName(ancestor))
return result
def _generatePositionInGroup(self, obj, **args):
"""Returns an array of strings (and possibly voice and audio
specifications) that represent the relative position of an
object in a group.
"""
if _settingsManager.getSetting('onlySpeakDisplayedText'):
return []
result = []
acss = self.voice(SYSTEM)
position = -1
total = -1
try:
relations = obj.getRelationSet()
except:
relations = []
for relation in relations:
if relation.getRelationType() == pyatspi.RELATION_MEMBER_OF:
total = 0
for i in range(0, relation.getNTargets()):
target = relation.getTarget(i)
if target.getState().contains(pyatspi.STATE_SHOWING):
total += 1
if target == obj:
position = total
if position >= 0:
# Adjust the position because the relations tend to be given
# in the reverse order.
position = total - position + 1
result.append(self._script.formatting.getString(
mode='speech',
stringType='groupindex') \
% {"index" : position,
"total" : total})
result.extend(acss)
return result
def _generatePositionInList(self, obj, **args):
"""Returns an array of strings (and possibly voice and audio
specifications) that represent the relative position of an
object in a list.
"""
if _settingsManager.getSetting('onlySpeakDisplayedText') \
or not (_settingsManager.getSetting('enablePositionSpeaking') \
or args.get('forceList', False)):
return []
result = []
acss = self.voice(SYSTEM)
position = -1
index = 0
total = 0
name = self._generateName(obj)
# TODO - JD: There might be a better way to do this (e.g. pass
# roles in maybe?).
#
role = args.get('role', obj.getRole())
if role == pyatspi.ROLE_COMBO_BOX:
obj = obj[0]
elif role in [pyatspi.ROLE_PAGE_TAB,
pyatspi.ROLE_MENU,
pyatspi.ROLE_MENU_ITEM,
pyatspi.ROLE_CHECK_MENU_ITEM,
pyatspi.ROLE_RADIO_MENU_ITEM]:
obj = obj.parent
elif role == pyatspi.ROLE_LIST_ITEM:
parent = obj.parent
for relation in obj.getRelationSet():
if relation.getRelationType() == \
pyatspi.RELATION_NODE_CHILD_OF:
# childNodes assumes that we have an accessible table
# interface to work with. If we don't, it will fail. So
# don't set the parent until verifying the interface we
# expect actually exists.
#
target = relation.getTarget(0)
try:
target.parent.queryTable()
except:
pass
else:
parent = target
break
obj = parent
else:
obj = obj.parent
# We want to return the position relative to this hierarchical
# level and not the entire list. If the object in question
# uses the NODE_CHILD_OF relationship, we need to use it instead
# of the childCount.
#
childNodes = self._script.utilities.childNodes(obj)
total = len(childNodes)
for i in range(0, total):
childName = self._generateName(childNodes[i])
if childName == name:
position = i+1
break
if not total:
for child in obj:
nextName = self._generateName(child)
state = child.getState()
if not nextName or nextName[0] in ["", "Empty", "separator"] \
or not state.contains(pyatspi.STATE_VISIBLE):
continue
index += 1
total += 1
if nextName == name:
position = index
if (_settingsManager.getSetting('enablePositionSpeaking') \
or args.get('forceList', False)) \
and position >= 0:
result.append(self._script.formatting.getString(
mode='speech',
stringType='groupindex') \
% {"index" : position,
"total" : total})
result.extend(acss)
return result
def _generateDefaultButton(self, obj, **args):
"""Returns an array of strings (and possibly voice and audio
specifications) that represent the default button in a dialog.
This method should initially be called with a top-level window.
"""
result = []
button = self._script.utilities.defaultButton(obj)
if button and button.getState().contains(pyatspi.STATE_SENSITIVE):
name = self._generateName(button)
if name:
result.append(messages.DEFAULT_BUTTON_IS % name[0])
return result
def generateDefaultButton(self, obj, **args):
"""Returns an array of strings (and possibly voice and audio
specifications) that represent the default button of the window
containing the object.
"""
return self._generateDefaultButton(obj, **args)
def _generateStatusBar(self, obj, **args):
"""Returns an array of strings (and possibly voice and audio
specifications) that represent the status bar of a window.
This method should initially be called with a top-level window.
"""
result = []
statusBar = self._script.utilities.statusBar(obj)
if statusBar:
name = self._generateName(statusBar)
if name:
result.extend(name)
else:
for child in statusBar:
result.extend(self._generateName(child))
return result
def generateStatusBar(self, obj, **args):
"""Returns an array of strings (and possibly voice and audio
specifications) that represent the status bar of the window
containing the object.
"""
return self._generateStatusBar(obj, **args)
def generateTitle(self, obj, **args):
"""Returns an array of strings (and possibly voice and audio
specifications) that represent the title of the window, obj.
containing the object, along with information associated with
any unfocused dialog boxes.
"""
result = []
acss = self.voice(DEFAULT)
frame, dialog = self._script.utilities.frameAndDialog(obj)
if frame:
result.append(self._generateLabelAndName(frame))
if dialog:
result.append(self._generateLabelAndName(dialog))
alertAndDialogCount = \
self._script.utilities.unfocusedAlertAndDialogCount(obj)
if alertAndDialogCount > 0:
dialogs = [messages.dialogCountSpeech(alertAndDialogCount)]
dialogs.extend(acss)
result.append(dialogs)
return result
#####################################################################
# #
# Keyboard shortcut information #
# #
#####################################################################
def _generateAccelerator(self, obj, **args):
"""Returns an array of strings (and possibly voice and audio
specifications) that represent the accelerator for the object,
or an empty array if no accelerator can be found.
"""
if _settingsManager.getSetting('onlySpeakDisplayedText'):
return []
result = []
acss = self.voice(SYSTEM)
[mnemonic, shortcut, accelerator] = \
self._script.utilities.mnemonicShortcutAccelerator(obj)
if accelerator:
result.append(accelerator)
result.extend(acss)
return result
def _generateMnemonic(self, obj, **args):
"""Returns an array of strings (and possibly voice and audio
specifications) that represent the mnemonic for the object, or
an empty array if no mnemonic can be found.
"""
if _settingsManager.getSetting('onlySpeakDisplayedText'):
return []
result = []
acss = self.voice(SYSTEM)
if _settingsManager.getSetting('enableMnemonicSpeaking') \
or args.get('forceMnemonic', False):
[mnemonic, shortcut, accelerator] = \
self._script.utilities.mnemonicShortcutAccelerator(obj)
if mnemonic:
mnemonic = mnemonic[-1] # we just want a single character
if not mnemonic and shortcut:
mnemonic = shortcut
if mnemonic:
result = [mnemonic]
result.extend(acss)
return result
#####################################################################
# #
# Tutorial information #
# #
#####################################################################
def _generateTutorial(self, obj, **args):
"""Returns an array of strings (and possibly voice and audio
specifications) that represent the tutorial for the object.
The tutorial will only be generated if the user has requested
tutorials, and will then be generated according to the
tutorial generator. A tutorial can be forced by setting the
'forceTutorial' attribute of the args dictionary to True.
"""
if _settingsManager.getSetting('onlySpeakDisplayedText'):
return []
result = []
acss = self.voice(SYSTEM)
alreadyFocused = args.get('alreadyFocused', False)
forceTutorial = args.get('forceTutorial', False)
result.extend(self._script.tutorialGenerator.getTutorial(
obj,
alreadyFocused,
forceTutorial))
if args.get('role', obj.getRole()) == pyatspi.ROLE_ICON \
and args.get('formatType', 'unfocused') == 'basicWhereAmI':
frame, dialog = self._script.utilities.frameAndDialog(obj)
if frame:
result.extend(self._script.tutorialGenerator.getTutorial(
frame,
alreadyFocused,
forceTutorial))
if result and result[0]:
result.extend(acss)
return result
#####################################################################
# #
# Other things for prosody and voice selection #
# #
#####################################################################
def _generatePause(self, obj, **args):
return PAUSE
def _generateLineBreak(self, obj, **args):
return LINE_BREAK
def voice(self, key=None, **args):
"""Returns an array containing a voice. The key is a value
to be used to look up the voice in the settings.py:voices
dictionary. Other arguments can be passed in for future
decision making.
"""
voicename = voiceType.get(key) or voiceType.get(DEFAULT)
voices = _settingsManager.getSetting('voices')
rv = voices.get(voicename)
if rv and rv.get('established') == False:
rv.pop('established')
return [rv]
|
h4ck3rm1k3/orca-sonar
|
src/orca/speech_generator.py
|
Python
|
lgpl-2.1
| 74,935
|
[
"ORCA"
] |
7354eadaff2365e72a5aa8133f57b0b61b1e1501bb09abf4fca460f94d4d494d
|
# Copyright 2022 the GPflow authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import tensorflow as tf
import gpflow
from gpflow.config import default_float
def test_update_vgp_data() -> None:
rng = np.random.default_rng(20220223)
sample = lambda *shape: tf.convert_to_tensor(rng.standard_normal(shape), dtype=default_float())
n_inputs = 2
n_outputs = 1
n_data_1 = 3
n_data_2 = 2
X_1 = tf.Variable(sample(n_data_1, n_inputs), shape=(None, n_inputs), trainable=False)
Y_1 = tf.Variable(sample(n_data_1, n_outputs), shape=(None, n_outputs), trainable=False)
model = gpflow.models.VGP(
(X_1, Y_1),
gpflow.kernels.SquaredExponential(),
gpflow.likelihoods.Gaussian(),
num_latent_gps=n_outputs,
)
gpflow.optimizers.Scipy().minimize(
model.training_loss_closure(),
variables=model.trainable_variables,
options=dict(maxiter=25),
compile=True,
)
X_test = tf.constant(tf.convert_to_tensor(X_1))
mean_before, var_before = model.predict_f(X_test)
X_2 = sample(n_data_2, n_inputs)
Y_2 = sample(n_data_2, n_outputs)
gpflow.models.vgp.update_vgp_data(
model, (tf.concat([X_1, X_2], axis=0), tf.concat([Y_1, Y_2], axis=0))
)
(
mean_after,
var_after,
) = model.predict_f(X_test)
np.testing.assert_allclose(mean_before, mean_after, atol=1e-5)
np.testing.assert_allclose(var_before, var_after, atol=1e-6)
|
GPflow/GPflow
|
tests/gpflow/models/test_vgp.py
|
Python
|
apache-2.0
| 1,991
|
[
"Gaussian"
] |
b6ae1c9a69267ede599cd4e928840ec20cc2b954c8e0790707fd083e608f3dee
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import unittest
from telemetry import decorators
from telemetry.core import util
from core import find_dependencies
class FindDependenciesTest(unittest.TestCase):
@decorators.Disabled('chromeos') # crbug.com/818230
def testFindPythonDependencies(self):
try:
dog_object_path = os.path.join(
util.GetUnittestDataDir(),
'dependency_test_dir', 'dog', 'dog', 'dog_object.py')
cat_module_path = os.path.join(
util.GetUnittestDataDir(),
'dependency_test_dir', 'other_animals', 'cat', 'cat')
cat_module_init_path = os.path.join(cat_module_path, '__init__.py')
cat_object_path = os.path.join(cat_module_path, 'cat_object.py')
dependencies = set(
p for p in find_dependencies.FindPythonDependencies(dog_object_path))
self.assertEquals(dependencies, {
dog_object_path, cat_module_path, cat_module_init_path,
cat_object_path
})
except ImportError: # crbug.com/559527
pass
@decorators.Disabled('chromeos') # crbug.com/818230
def testFindPythonDependenciesWithNestedImport(self):
try:
moose_module_path = os.path.join(
util.GetUnittestDataDir(),
'dependency_test_dir', 'other_animals', 'moose', 'moose')
moose_object_path = os.path.join(moose_module_path, 'moose_object.py')
horn_module_path = os.path.join(moose_module_path, 'horn')
horn_module_init_path = os.path.join(horn_module_path, '__init__.py')
horn_object_path = os.path.join(horn_module_path, 'horn_object.py')
self.assertEquals(
set(p for p in
find_dependencies.FindPythonDependencies(moose_object_path)),
{moose_object_path,
horn_module_path, horn_module_init_path, horn_object_path})
except ImportError: # crbug.com/559527
pass
|
chromium/chromium
|
tools/perf/core/find_dependencies_unittest.py
|
Python
|
bsd-3-clause
| 2,010
|
[
"MOOSE"
] |
59d04bc3e366849d95df9e603ff08ddb5425b3152ce6f4c3532d1ec7eedfe0e6
|
#
# @BEGIN LICENSE
#
# Psi4: an open-source quantum chemistry software package
#
# Copyright (c) 2007-2019 The Psi4 Developers.
#
# The copyrights for code used from other parties are included in
# the corresponding files.
#
# This file is part of Psi4.
#
# Psi4 is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, version 3.
#
# Psi4 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with Psi4; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @END LICENSE
#
from psi4 import core
from psi4.driver import constants
def print_sapt_var(name, value, short=False, start_spacer=" "):
"""
Converts the incoming value as hartree to a correctly formatted Psi print format.
"""
vals = (name, value * 1000, value * constants.hartree2kcalmol, value * constants.hartree2kJmol)
if short:
return start_spacer + "%-20s % 15.8f [mEh]" % vals[:2]
else:
return start_spacer + "%-20s % 15.8f [mEh] % 15.8f [kcal/mol] % 15.8f [kJ/mol]" % vals
def print_sapt_hf_summary(data, name, short=False, delta_hf=False):
ret = " %s Results\n" % name
ret += " " + "-" * 97 + "\n"
# Elst
ret += print_sapt_var("Electrostatics", data["Elst10,r"]) + "\n"
ret += print_sapt_var(" Elst10,r", data["Elst10,r"]) + "\n"
ret += "\n"
core.set_variable("SAPT ELST ENERGY", data["Elst10,r"])
# Exchange
ret += print_sapt_var("Exchange", data["Exch10"]) + "\n"
ret += print_sapt_var(" Exch10", data["Exch10"]) + "\n"
ret += print_sapt_var(" Exch10(S^2)", data["Exch10(S^2)"]) + "\n"
ret += "\n"
core.set_variable("SAPT EXCH ENERGY", data["Exch10"])
ind = data["Ind20,r"] + data["Exch-Ind20,r"]
ind_ab = data["Ind20,r (A<-B)"] + data["Exch-Ind20,r (A<-B)"]
ind_ba = data["Ind20,r (A->B)"] + data["Exch-Ind20,r (A->B)"]
ret += print_sapt_var("Induction", ind) + "\n"
ret += print_sapt_var(" Ind20,r", data["Ind20,r"]) + "\n"
ret += print_sapt_var(" Exch-Ind20,r", data["Exch-Ind20,r"]) + "\n"
ret += print_sapt_var(" Induction (A<-B)", ind_ab) + "\n"
ret += print_sapt_var(" Induction (A->B)", ind_ba) + "\n"
ret += "\n"
core.set_variable("SAPT IND ENERGY", ind)
if delta_hf:
total_sapt = (data["Elst10,r"] + data["Exch10"] + ind)
sapt_hf_delta = delta_hf - total_sapt
core.set_variable("SAPT(DFT) Delta HF", sapt_hf_delta)
ret += print_sapt_var("%-21s" % "Total SAPT", total_sapt, start_spacer=" ") + "\n"
ret += print_sapt_var("%-21s" % "Total HF", delta_hf, start_spacer=" ") + "\n"
ret += print_sapt_var("%-21s" % "Delta HF", sapt_hf_delta, start_spacer=" ") + "\n"
ret += " " + "-" * 97 + "\n"
return ret
else:
# Dispersion
disp = data["Disp20"] + data["Exch-Disp20,u"]
ret += print_sapt_var("Dispersion", disp) + "\n"
ret += print_sapt_var(" Disp20", data["Disp20,u"]) + "\n"
ret += print_sapt_var(" Exch-Disp20", data["Exch-Disp20,u"]) + "\n"
ret += "\n"
core.set_variable("SAPT DISP ENERGY", disp)
# Total energy
total = data["Elst10,r"] + data["Exch10"] + ind + disp
ret += print_sapt_var("Total %-15s" % name, total, start_spacer=" ") + "\n"
core.set_variable("SAPT0 TOTAL ENERGY", total)
core.set_variable("SAPT TOTAL ENERGY", total)
core.set_variable("CURRENT ENERGY", total)
ret += " " + "-" * 97 + "\n"
return ret
def print_sapt_dft_summary(data, name, short=False):
ret = " %s Results\n" % name
ret += " " + "-" * 97 + "\n"
# Elst
ret += print_sapt_var("Electrostatics", data["Elst10,r"]) + "\n"
ret += print_sapt_var(" Elst1,r", data["Elst10,r"]) + "\n"
ret += "\n"
core.set_variable("SAPT ELST ENERGY", data["Elst10,r"])
# Exchange
ret += print_sapt_var("Exchange", data["Exch10"]) + "\n"
ret += print_sapt_var(" Exch1", data["Exch10"]) + "\n"
ret += print_sapt_var(" Exch1(S^2)", data["Exch10(S^2)"]) + "\n"
ret += "\n"
core.set_variable("SAPT EXCH ENERGY", data["Exch10"])
# Induction
ind = data["Ind20,r"] + data["Exch-Ind20,r"]
ind_ab = data["Ind20,r (A<-B)"] + data["Exch-Ind20,r (A<-B)"]
ind_ba = data["Ind20,r (A->B)"] + data["Exch-Ind20,r (A->B)"]
if "Delta HF Correction" in list(data):
ind += data["Delta HF Correction"]
ret += print_sapt_var("Induction", ind) + "\n"
ret += print_sapt_var(" Ind2,r", data["Ind20,r"]) + "\n"
ret += print_sapt_var(" Exch-Ind2,r", data["Exch-Ind20,r"]) + "\n"
ret += print_sapt_var(" Induction (A<-B)", ind_ab) + "\n"
ret += print_sapt_var(" Induction (A->B)", ind_ba) + "\n"
if "Delta HF Correction" in list(data):
ret += print_sapt_var(" delta HF,r (2)", data["Delta HF Correction"]) + "\n"
ret += "\n"
core.set_variable("SAPT IND ENERGY", ind)
# Dispersion
disp = data["Disp20"] + data["Exch-Disp20,u"]
ret += print_sapt_var("Dispersion", disp) + "\n"
ret += print_sapt_var(" Disp2,r", data["Disp20"]) + "\n"
ret += print_sapt_var(" Disp2,u", data["Disp20,u"]) + "\n"
ret += print_sapt_var(" Exch-Disp2,u", data["Exch-Disp20,u"]) + "\n"
ret += "\n"
core.set_variable("SAPT DISP ENERGY", disp)
# Total energy
total = data["Elst10,r"] + data["Exch10"] + ind + disp
ret += print_sapt_var("Total %-15s" % name, total, start_spacer=" ") + "\n"
core.set_variable("SAPT(DFT) TOTAL ENERGY", total)
core.set_variable("SAPT TOTAL ENERGY", total)
core.set_variable("CURRENT ENERGY", total)
ret += " " + "-" * 97 + "\n"
return ret
|
CDSherrill/psi4
|
psi4/driver/procrouting/sapt/sapt_util.py
|
Python
|
lgpl-3.0
| 6,071
|
[
"Psi4"
] |
8ae0dd1d8928dcd2ae5b5b8f3358595888cfc7a7c5d7b3c68f02903e41ceba2b
|
# pywws - Python software for USB Wireless Weather Stations
# http://github.com/jim-easterbrook/pywws
# Copyright (C) 2018 pywws contributors
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""Upload weather data to Open Weather Map.
`Open Weather Map`_ is a Latvian based IT company seeking to provide
affordable weather data.
* Create account: https://home.openweathermap.org/users/sign_up
* API: https://openweathermap.org/stations
* Additional dependency: http://docs.python-requests.org/
* Example ``weather.ini`` configuration::
[openweathermap]
api key = b1b15e88fa797225412429c1c50c122a1
external id = SW1Aweather
station name = Buck House
lat = 51.501
long = -0.142
alt = 10
station id = 583436dd9643a9000196b8d6
[logged]
services = ['openweathermap', 'underground']
Configuring pywws to use Open Weather Map is a bit more complicated than
with other services. Start by running the module to set some default
values in weather.ini (with no other pywws software running)::
python -m pywws.service.openweathermap data_dir
After signing up and logging in to Open Weather Map visit the `API keys
page`_ and copy your default key to the ``api key`` entry in
weather.ini. The ``external id`` field is a single word name to identify
your station. You could use a name based on your post code, or maybe
your id from Weather Underground or CWOP. The ``station name`` is a
longer, human readable, name. I'm not sure what use Open Weather Map
makes of either of these. ``lat`` and ``long`` should be set to the
latitude and longitude of your station (in degrees) and ``alt`` to its
altitude in metres.
After setting (or changing) the above fields you need to "register" your
station with Open Weather Map. This is done by running the module with
the ``-r`` flag::
python -m pywws.service.openweathermap -r -v data_dir
If you already have any stations registered with Open Weather Map this
will show you their details. You can select one of these existing
stations or register a new one. pywws then sends the configuration
values from weather.ini to Open Weather Map.
If this succeeds then Open Weather Map will have allocated a ``station
id`` value which pywws stores in weather.ini. All this complication is
to allow more than one station to be attached to one user's account.
.. _Open Weather Map: https://openweathermap.org/
.. _API keys page: https://home.openweathermap.org/api_keys
"""
from __future__ import absolute_import, unicode_literals
from contextlib import contextmanager
from datetime import timedelta
import json
import logging
import os
import sys
import requests
import pywws.service
__docformat__ = "restructuredtext en"
service_name = os.path.splitext(os.path.basename(__file__))[0]
logger = logging.getLogger(__name__)
class ToService(pywws.service.CatchupDataService):
config = {
'api key' : ('', True, None),
'station id' : ('', True, 'station_id'),
'external id' : ('', False, None),
'station name': ('', False, None),
'lat' : ('', False, None),
'long' : ('', False, None),
'alt' : ('', False, None),
}
logger = logger
service_name = service_name
template = """
#live#
#idx "'dt' : %s,"#
#temp_out "'temperature': %.1f,"#
#wind_ave "'wind_speed' : %.1f,"#
#wind_gust "'wind_gust' : %.1f,"#
#wind_dir "'wind_deg' : %.0f," "" "winddir_degrees(x)"#
#rel_pressure "'pressure' : %.1f,"#
#hum_out "'humidity' : %.d,"#
#calc "rain_hour(data)" "'rain_1h': %.1f,"#
#calc "rain_24hr(data)" "'rain_24h': %.1f,"#
#calc "dew_point(data['temp_out'], data['hum_out'])" "'dew_point': %.1f,"#
"""
@contextmanager
def session(self):
with requests.Session() as session:
session.headers.update({'Content-Type': 'application/json'})
session.params.update({'appid': self.params['api key']})
yield session
def upload_data(self, session, prepared_data={}):
url = 'https://api.openweathermap.org/data/3.0/measurements'
try:
rsp = session.post(url, json=[prepared_data], timeout=60)
except Exception as ex:
return False, repr(ex)
if rsp.status_code != 204:
return False, 'http status: {:d} {:s}'.format(
rsp.status_code, rsp.text)
return True, 'OK'
def register(self):
import pprint
self.check_params('external id', 'station name', 'lat', 'long', 'alt')
url = 'https://api.openweathermap.org/data/3.0/stations'
data = {
'external_id': self.params['external id'],
'name' : self.params['station name'],
'latitude' : float(self.params['lat']),
'longitude' : float(self.params['long']),
'altitude' : float(self.params['alt']),
}
station_id = self.params['station id']
idx = -1
with self.session() as session:
# get current stations
try:
rsp = session.get(url, timeout=60)
except Exception as ex:
print('exception', repr(ex))
return
stations = rsp.json()
if stations:
print('The following stations are registered to your account')
for i, station in enumerate(stations):
if station['id'] == station_id:
idx = i
print('Number:', i, '\t\t\t\t<- current station')
else:
print('Number:', i)
pprint.pprint(station)
if sys.version_info[0] >= 3:
input_ = input
else:
input_ = raw_input
i = input_('Please enter number of station to use, or N' +
' to register a new station: ')
if i in ('N', 'n'):
idx = -1
station_id = None
else:
idx = int(i)
station_id = stations[idx]['id']
for i, station in enumerate(stations):
if i == idx:
continue
yn = input_('Would you like to delete station number' +
' {} and all its data (Y/N)? '.format(i))
if yn in ('Y', 'y'):
try:
session.delete(
url + '/' + station['id'], timeout=60)
except Exception as ex:
print('exception', repr(ex))
return
if station_id:
# update existing station
logger.debug('Udating station id ' + station_id)
url += '/' + station_id
try:
rsp = session.put(url, json=data, timeout=60)
except Exception as ex:
print('exception', repr(ex))
return
rsp = rsp.json()
logger.debug('response: ' + repr(rsp))
else:
# create new station
logger.debug('Creating new station')
try:
rsp = session.post(url, json=data, timeout=60)
except Exception as ex:
print('exception', repr(ex))
return
rsp = rsp.json()
logger.debug('response: ' + repr(rsp))
for key in 'id', 'ID':
if key in rsp:
self.context.params.set(
service_name, 'station id', rsp[key])
if __name__ == "__main__":
sys.exit(pywws.service.main(ToService))
|
3v1n0/pywws
|
src/pywws/service/openweathermap.py
|
Python
|
gpl-2.0
| 8,542
|
[
"VisIt"
] |
6fe38a1ad2223c71eb97933ba81c7fc2c689ee2830fb7780c4a1162e80c205c0
|
"""Example implementation of using a marshmallow Schema for both request input
and output with a `use_schema` decorator.
Run the app:
$ python examples/schema_example.py
Try the following with httpie (a cURL-like utility, http://httpie.org):
$ pip install httpie
$ http GET :5001/users/
$ http GET :5001/users/42
$ http POST :5001/users/ usename=brian first_name=Brian last_name=May
$ http PATCH :5001/users/42 username=freddie
$ http GET :5001/users/ limit==1
"""
import functools
from flask import Flask, request, jsonify
import random
from marshmallow import Schema, fields, post_dump
from webargs.flaskparser import parser, use_kwargs
app = Flask(__name__)
##### Fake database and models #####
class Model:
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
def update(self, **kwargs):
self.__dict__.update(kwargs)
@classmethod
def insert(cls, db, **kwargs):
collection = db[cls.collection]
new_id = None
if 'id' in kwargs: # for setting up fixtures
new_id = kwargs.pop('id')
else: # find a new id
found_id = False
while not found_id:
new_id = random.randint(1, 9999)
if new_id not in collection:
found_id = True
new_record = cls(id=new_id, **kwargs)
collection[new_id] = new_record
return new_record
class User(Model):
collection = 'users'
db = {'users': {}}
##### use_schema #####
def use_schema(schema, list_view=False, locations=None):
"""View decorator for using a marshmallow schema to
(1) parse a request's input and
(2) serializing the view's output to a JSON response.
"""
def decorator(func):
@functools.wraps(func)
def wrapped(*args, **kwargs):
use_args_wrapper = parser.use_args(schema, locations=locations)
# Function wrapped with use_args
func_with_args = use_args_wrapper(func)
ret = func_with_args(*args, **kwargs)
# Serialize and jsonify the return value
return jsonify(schema.dump(ret, many=list_view).data)
return wrapped
return decorator
##### Schemas #####
class UserSchema(Schema):
id = fields.Int(dump_only=True)
username = fields.Str()
first_name = fields.Str()
last_name = fields.Str()
class Meta:
strict = True
@post_dump(pass_many=True)
def wrap_with_envelope(self, data, many):
return {'data': data}
##### Routes #####
@app.route('/users/<int:user_id>', methods=['GET', 'PATCH'])
@use_schema(UserSchema())
def user_detail(reqargs, user_id):
user = db['users'].get(user_id)
if not user:
return jsonify({'message': 'User not found'}), 404
if request.method == 'PATCH' and reqargs:
user.update(**reqargs)
return user
# You can add additional arguments with use_kwargs
@app.route('/users/', methods=['GET', 'POST'])
@use_kwargs({'limit': fields.Int(missing=10, location='query')})
@use_schema(UserSchema(), list_view=True)
def user_list(reqargs, limit):
users = db['users'].values()
if request.method == 'POST':
User.insert(db=db, **reqargs)
return list(users)[:limit]
# Return validation errors as JSON
@app.errorhandler(422)
def handle_validation_error(err):
exc = err.data['exc']
return jsonify({'errors': exc.messages}), 422
if __name__ == "__main__":
User.insert(db=db, id=42, username='fred', first_name='Freddie', last_name='Mercury')
app.run(port=5001, debug=True)
|
Basis/webargs
|
examples/schema_example.py
|
Python
|
mit
| 3,583
|
[
"Brian"
] |
ed39a6955109eba558ca00e6a35a0159c336a049f8bb99ce37b97f670d04acc4
|
from dateutil.relativedelta import relativedelta
from django.test import TestCase, tag
from edc_appointment.models import Appointment
from edc_base import get_utcnow
from edc_facility.import_holidays import import_holidays
from edc_visit_schedule.site_visit_schedules import site_visit_schedules
from ..constants import SCHEDULED
from ..model_mixins import PreviousVisitError
from ..visit_sequence import VisitSequence, VisitSequenceError
from .helper import Helper
from .models import SubjectVisit
from .visit_schedule import visit_schedule1, visit_schedule2
class DisabledVisitSequence(VisitSequence):
def enforce_sequence(self):
return None
class TestPreviousVisit(TestCase):
helper_cls = Helper
def setUp(self):
import_holidays()
SubjectVisit.visit_sequence_cls = VisitSequence
self.subject_identifier = '12345'
self.helper = self.helper_cls(
subject_identifier=self.subject_identifier)
site_visit_schedules._registry = {}
site_visit_schedules.register(visit_schedule=visit_schedule1)
site_visit_schedules.register(visit_schedule=visit_schedule2)
self.helper.consent_and_put_on_schedule()
def tearDown(self):
SubjectVisit.visit_sequence_cls = VisitSequence
def test_visit_sequence_enforcer_on_first_visit_in_sequence(self):
appointments = Appointment.objects.all().order_by('timepoint_datetime')
SubjectVisit.visit_sequence_cls = DisabledVisitSequence
visit = SubjectVisit.objects.create(
appointment=appointments[0],
report_datetime=get_utcnow() - relativedelta(months=10),
reason=SCHEDULED)
visit_sequence = VisitSequence(appointment=visit.appointment)
try:
visit_sequence.enforce_sequence()
except VisitSequenceError as e:
self.fail(f'VisitSequenceError unexpectedly raised. Got \'{e}\'')
def test_visit_sequence_enforcer_without_first_visit_in_sequence(self):
appointments = Appointment.objects.all().order_by('timepoint_datetime')
SubjectVisit.visit_sequence_cls = DisabledVisitSequence
visit = SubjectVisit.objects.create(
appointment=appointments[1],
report_datetime=get_utcnow() - relativedelta(months=10),
reason=SCHEDULED)
visit_sequence = VisitSequence(appointment=visit.appointment)
self.assertRaises(VisitSequenceError, visit_sequence.enforce_sequence)
def test_requires_previous_visit_thru_model(self):
"""Asserts requires previous visit to exist on create.
"""
appointments = Appointment.objects.all().order_by('timepoint_datetime')
SubjectVisit.objects.create(
appointment=appointments[0],
report_datetime=get_utcnow() - relativedelta(months=10),
reason=SCHEDULED)
self.assertRaises(
PreviousVisitError, SubjectVisit.objects.create,
appointment=appointments[2],
report_datetime=get_utcnow() - relativedelta(months=8),
reason=SCHEDULED)
SubjectVisit.objects.create(
appointment=appointments[1],
report_datetime=get_utcnow() - relativedelta(months=10),
reason=SCHEDULED)
self.assertRaises(
PreviousVisitError, SubjectVisit.objects.create,
appointment=appointments[3],
report_datetime=get_utcnow() - relativedelta(months=8),
reason=SCHEDULED)
|
botswana-harvard/edc-visit-tracking
|
edc_visit_tracking/tests/test_visit_sequence.py
|
Python
|
gpl-2.0
| 3,499
|
[
"VisIt"
] |
bb2c0b5bd144f535502a4e9d2404c2d270b95dc806328d3861771679a9ad9d62
|
""" WMSHistory corrector for the group and ingroup shares
"""
__RCSID__ = "$Id$"
import datetime
import time as nativetime
from DIRAC import gLogger, S_OK, S_ERROR
from DIRAC.WorkloadManagementSystem.private.correctors.BaseCorrector import BaseCorrector
from DIRAC.Core.Utilities import Time
from DIRAC.AccountingSystem.Client.ReportsClient import ReportsClient
from DIRAC.Core.Security import CS
class WMSHistoryCorrector( BaseCorrector ):
_GLOBAL_MAX_CORRECTION = 'MaxGlobalCorrection'
_SLICE_TIME_SPAN = 'TimeSpan'
_SLICE_WEIGHT = 'Weight'
_SLICE_MAX_CORRECTION = 'MaxCorrection'
def initialize( self ):
self.__log = gLogger.getSubLogger( "WMSHistoryCorrector" )
self.__reportsClient = ReportsClient()
self.__usageHistory = {}
self.__slices = {}
self.__lastHistoryUpdate = 0
self.__globalCorrectionFactor = 5
self._fillSlices()
return S_OK()
# def _applyHistoryCorrections( self, entityShares, baseSection = "" ):
# if baseSection not in self.__historyForCorrections or not self.__historyForCorrections[ baseSection ]:
# return entityShares
def _fillSlices( self ):
self.__log.info( "Filling time slices..." )
self.__slices = {}
self.__globalCorrectionFactor =self.getCSOption( self._GLOBAL_MAX_CORRECTION, 5 )
result = self.getCSSections()
if not result[ 'OK' ]:
self.__log.error( "Cound not get configured time slices", result[ 'Message' ] )
return
timeSlices = result[ 'Value' ]
for timeSlice in timeSlices:
self.__slices[ timeSlice ] = {}
for key, defaultValue in ( ( self._SLICE_TIME_SPAN, 604800 ),
( self._SLICE_WEIGHT, 1 ),
( self._SLICE_MAX_CORRECTION, 3 ) ):
self.__slices[ timeSlice ][ key ] = self.getCSOption( "%s/%s" % ( timeSlice, key ), defaultValue )
#Weight has to be normalized to sum 1
weightSum = 0
for timeSlice in self.__slices:
weightSum += self.__slices[ timeSlice ][ self._SLICE_WEIGHT ]
for timeSlice in self.__slices:
self.__slices[ timeSlice ][ self._SLICE_WEIGHT ] /= float( weightSum )
self.__log.info( "Found %s time slices" % len( self.__slices ) )
def updateHistoryKnowledge( self ):
updatePeriod = self.getCSOption( 'UpdateHistoryPeriod', 900 )
now = nativetime.time()
if self.__lastHistoryUpdate + updatePeriod > now:
self.__log.verbose( "Skipping history update. Last update was less than %s secs ago" % updatePeriod)
return
self.__lastHistoryUpdate = now
self.__log.info( "Updating history knowledge" )
self.__usageHistory = {}
for timeSlice in self.__slices:
result = self._getUsageHistoryForTimeSpan( self.__slices[ timeSlice ][ self._SLICE_TIME_SPAN ],
self.getGroup() )
if not result[ 'OK' ]:
self.__usageHistory = {}
self.__log.error( "Could not get history for slice", "%s: %s" % ( timeSlice, result[ 'Message' ] ) )
return
self.__usageHistory[ timeSlice ] = result[ 'Value' ]
self.__log.info( "Got history for slice %s (%s entities in slice)" % ( timeSlice, len( self.__usageHistory[ timeSlice ] ) ) )
self.__log.info( "Updated history knowledge" )
def _getUsageHistoryForTimeSpan( self, timeSpan, groupToUse = "" ):
reportCondition = { 'Status' : [ 'Running' ] }
if not groupToUse:
reportGrouping = 'UserGroup'
else:
reportGrouping = 'User'
reportCondition = { 'UserGroup' : groupToUse }
now = Time.dateTime()
result = self.__reportsClient.getReport( 'WMSHistory', 'AverageNumberOfJobs',
now - datetime.timedelta( seconds = timeSpan ), now,
reportCondition, reportGrouping,
{ 'lastSeconds' : timeSpan } )
if not result[ 'OK' ]:
self.__log.error( "Cannot get history from Accounting", result[ 'Message' ] )
return result
data = result['Value'].get( 'data', [] )
if not data:
message = "Empty history data from Accounting"
self.__log.error( message )
return S_ERROR( message )
#Map the usernames to DNs
if groupToUse:
mappedData = {}
for userName in data:
result = CS.getDNForUsername( userName )
if not result[ 'OK' ]:
self.__log.error( "User does not have any DN assigned", "%s :%s" % ( userName, result[ 'Message' ] ) )
continue
for userDN in result[ 'Value' ]:
mappedData[ userDN ] = data[ userName ]
data = mappedData
return S_OK( data )
def __normalizeShares( self, entityShares ):
totalShare = 0.0
normalizedShares = {}
#Normalize shares
for entity in entityShares:
totalShare += entityShares[ entity ]
self.__log.verbose( "Total share for given entities is %.3f" % totalShare )
for entity in entityShares:
normalizedShare = entityShares[ entity ] / totalShare
normalizedShares[ entity ] = normalizedShare
self.__log.verbose( "Normalized share for %s: %.3f" % ( entity, normalizedShare ) )
return normalizedShares
def applyCorrection( self, entitiesExpectedShare ):
#Normalize expected shares
normalizedShares = self.__normalizeShares( entitiesExpectedShare )
if not self.__usageHistory:
self.__log.verbose( "No history knowledge available. Correction is 1 for all entities" )
return entitiesExpectedShare
entitiesSliceCorrections = dict( [ ( entity, [] ) for entity in entitiesExpectedShare ] )
for timeSlice in self.__usageHistory:
self.__log.verbose( "Calculating correction for time slice %s" % timeSlice )
sliceTotal = 0.0
sliceHistory = self.__usageHistory[ timeSlice ]
for entity in entitiesExpectedShare:
if entity in sliceHistory:
sliceTotal += sliceHistory[ entity ]
self.__log.verbose( "Usage for %s: %.3f" % ( entity, sliceHistory[ entity ] ) )
self.__log.verbose( "Total usage for slice %.3f" % sliceTotal )
if sliceTotal == 0.0:
self.__log.verbose( "Slice usage is 0, skeeping slice" )
continue
maxSliceCorrection = self.__slices[ timeSlice ][ self._SLICE_MAX_CORRECTION ]
minSliceCorrection = 1.0/maxSliceCorrection
for entity in entitiesExpectedShare:
if entity in sliceHistory:
normalizedSliceUsage = sliceHistory[ entity ] / sliceTotal
self.__log.verbose( "Entity %s is present in slice %s (normalized usage %.2f)" % ( entity,
timeSlice,
normalizedSliceUsage ) )
sliceCorrectionFactor = normalizedShares[ entity ] / normalizedSliceUsage
sliceCorrectionFactor = min( sliceCorrectionFactor, maxSliceCorrection )
sliceCorrectionFactor = max( sliceCorrectionFactor, minSliceCorrection )
sliceCorrectionFactor *= self.__slices[ timeSlice ][ self._SLICE_WEIGHT ]
else:
self.__log.verbose( "Entity %s is not present in slice %s" % ( entity, timeSlice ) )
sliceCorrectionFactor = maxSliceCorrection
self.__log.verbose( "Slice correction factor for entity %s is %.3f" % ( entity, sliceCorrectionFactor ) )
entitiesSliceCorrections[ entity ].append( sliceCorrectionFactor )
correctedEntityShare = {}
maxGlobalCorrectionFactor = self.__globalCorrectionFactor
minGlobalCorrectionFactor = 1.0/maxGlobalCorrectionFactor
for entity in entitiesSliceCorrections:
entityCorrectionFactor = 0.0
slicesCorrections = entitiesSliceCorrections[ entity ]
if not slicesCorrections:
self.__log.verbose( "Entity does not have any correction %s" % entity )
correctedEntityShare[ entity ] = entitiesExpectedShare[ entity ]
else:
for cF in entitiesSliceCorrections[ entity ]:
entityCorrectionFactor += cF
entityCorrectionFactor = min( entityCorrectionFactor, maxGlobalCorrectionFactor )
entityCorrectionFactor = max( entityCorrectionFactor, minGlobalCorrectionFactor )
correctedShare = entitiesExpectedShare[ entity ] * entityCorrectionFactor
correctedEntityShare[ entity ] = correctedShare
self.__log.verbose( "Final correction factor for entity %s is %.3f\n Final share is %.3f" % ( entity,
entityCorrectionFactor,
correctedShare ) )
self.__log.verbose( "Initial shares:\n %s" % "\n ".join( [ "%s : %.2f" % ( en, entitiesExpectedShare[ en ] ) \
for en in entitiesExpectedShare ] ) )
self.__log.verbose( "Corrected shares:\n %s" % "\n ".join( [ "%s : %.2f" % ( en, correctedEntityShare[ en ] ) \
for en in correctedEntityShare ] ) )
return correctedEntityShare
|
vmendez/DIRAC
|
WorkloadManagementSystem/private/correctors/WMSHistoryCorrector.py
|
Python
|
gpl-3.0
| 9,228
|
[
"DIRAC"
] |
dc2b4983e0c033e5ea8062053105302e5125b3f71df26f91f250659edd29438d
|
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import glob
import os
from spack import *
class SspaceStandard(Package):
"""SSPACE standard is a stand-alone program for scaffolding pre-assembled
contigs using NGS paired-read data
Note: A manual download is required for SSPACE-Standard.
Spack will search your current directory for the download file.
Alternatively, add this file to a mirror so that Spack can find it.
For instructions on how to set up a mirror, see
https://spack.readthedocs.io/en/latest/mirrors.html"""
homepage = "https://www.baseclear.com/genomics/bioinformatics/basetools/SSPACE"
url = "file://{0}/41SSPACE-STANDARD-3.0_linux-x86_64.tar.gz".format(os.getcwd())
manual_download = True
version('3.0', '7e171b4861b9d514e80aafc3d9cdf554')
depends_on('perl+threads', type=('build', 'run'))
depends_on('perl-perl4-corelibs', type=('build', 'run'))
def install(self, spec, prefix):
rootscript = 'SSPACE_Standard_v{0}.pl'.format(self.version)
scripts = [rootscript]
scripts.extend(glob.glob('tools/*.pl'))
scripts.extend(glob.glob('bwa/*.pl'))
for s in scripts:
filter_file('/usr/bin/perl', '/usr/bin/env perl',
s, string=True)
filter_file('require "getopts.pl";', 'use Getopt::Std;',
s, string=True)
filter_file('&Getopts(', 'getopts(', s, string=True)
install_tree('bin', prefix.bin)
install_tree('bowtie', prefix.bowtie)
install_tree('bwa', prefix.bwa)
install_tree('dotlib', prefix.dotlib)
install_tree('tools', prefix.tools)
install(rootscript, prefix)
def setup_run_environment(self, env):
env.set('SSPACE_HOME', self.prefix)
env.prepend_path('PATH', self.prefix)
|
LLNL/spack
|
var/spack/repos/builtin/packages/sspace-standard/package.py
|
Python
|
lgpl-2.1
| 2,017
|
[
"BWA",
"Bowtie"
] |
7a9dd9f7aade51773ae5ec11ef35a9445f5e1da432b7cd4ff64e4743f3728460
|
from __future__ import (absolute_import, division, print_function)
import unittest
from paraview.simple import *
class PVPythonTest(unittest.TestCase):
def test_PVPython(self):
self.assertEqual(GetParaViewVersion().major, 5)
if __name__ == '__main__':
unittest.main()
|
ScreamingUdder/mantid
|
Framework/PythonInterface/test/python/mantid/PVPythonTest.py
|
Python
|
gpl-3.0
| 288
|
[
"ParaView"
] |
4a3d2d69b96ff6f80e5370d5cc7a1d8d9a95b102b612d870842ea1f32b173f3f
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.