text
stringlengths 12
1.05M
| repo_name
stringlengths 5
86
| path
stringlengths 4
191
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 12
1.05M
| keyword
listlengths 1
23
| text_hash
stringlengths 64
64
|
|---|---|---|---|---|---|---|---|
from itertools import takewhile
import re
from StringIO import StringIO
import shutil
import tempfile
import time
from unittest import skipUnless
from urllib import urlencode
import django
from django.conf import settings
from django.core.files.uploadedfile import SimpleUploadedFile
from django.core.urlresolvers import reverse
from django.contrib.auth.models import User, Group, Permission
from django.contrib.gis.geos import fromstr, Point
from django.db import connection as dj_connection
from django.http import HttpRequest, QueryDict
from django.test import TestCase
from django.test.utils import override_settings
from pthelma import timeseries
from enhydris.hcore.forms import TimeseriesDataForm
from enhydris.hcore.models import (
EventType, FileType, GentityEvent, GentityFile, Instrument,
InstrumentType, IntervalType, Organization, PoliticalDivision, Station,
StationType, Timeseries, TimeZone, UnitOfMeasurement, UserProfile,
Variable, WaterBasin, WaterDivision)
from enhydris.hcore.views import ALLOWED_TO_EDIT, StationListBaseView
try:
# Experimental Selenium support. Enhydris does not require Selenium
# to be installed or configured. If it is, it runs the selenium tests;
# otherwise the tests are skipped. If you want to run this tests, pip
# install django_selenium_clean and add to your settings.py the snippet at
# https://github.com/aptiko/django-selenium-clean#executing-the-test
from django_selenium_clean import selenium, SeleniumTestCase, PageElement
from selenium.webdriver.common.by import By
except ImportError:
selenium = False
# Create some dummy stuff to allow the Selenium tests to compile (even if
# they are always skipped).
class Dummy:
XPATH = ''
ID = ''
def __init__(*args, **kwargs):
pass
SeleniumTestCase = TestCase
PageElement = Dummy
By = Dummy()
def create_test_data():
user1 = User.objects.create_user('admin', 'anthony@itia.ntua.gr',
'topsecret')
user1.is_active = True
user1.is_superuser = True
user1.is_staff = True
user1.save()
organization1 = Organization.objects.create(
name="We're rich and we fancy it SA")
organization2 = Organization.objects.create(
name="We're poor and dislike it Ltd")
organization3 = Organization.objects.create(
name="We get all your money and enjoy it plc")
water_division1 = WaterDivision.objects.create(
name="North Syldavia Basins")
water_division2 = WaterDivision.objects.create(
name="South Syldavia Basins")
# water_division3
WaterDivision.objects.create(name="East Syldavia Basins")
# water_division4
WaterDivision.objects.create(name="West Syldavia Basins")
water_basin1 = WaterBasin.objects.create(name="Arachthos")
water_basin2 = WaterBasin.objects.create(name="Pinios")
water_basin3 = WaterBasin.objects.create(name="Greyflood")
# Political divisions
# +-> Greece +-> Epirus +-> Preveza
# | | +-> Arta
# | +-> Thessaly +-> Karditsa
# | +-> Magnisia
# +-> Middle Earth +-> Eriador +-> Arthedain
# | +-> Cardolan
# +-> Gondor +-> Lamedon
# +-> Lebbenin
pd = PoliticalDivision
pd_greece = pd.objects.create(name="Greece")
pd_epirus = pd.objects.create(name="Epirus", parent=pd_greece)
pd_thessaly = pd.objects.create(name="Thessaly", parent=pd_greece)
# pd_preveza
pd.objects.create(name="Preveza", parent=pd_epirus)
pd_arta = pd.objects.create(name="Arta", parent=pd_epirus)
pd_karditsa = pd.objects.create(name="Karditsa", parent=pd_thessaly)
# pd_magnisia
pd.objects.create(name="Magnisia", parent=pd_thessaly)
pd_middleearth = pd.objects.create(name="Middle Earth")
pd_eriador = pd.objects.create(name="Eriador", parent=pd_middleearth)
pd_gondor = pd.objects.create(name="Gondor", parent=pd_middleearth)
# pd_arthedain
pd.objects.create(name="Arthedain", parent=pd_eriador)
pd_cardolan = pd.objects.create(name="Cardolan", parent=pd_eriador)
# pd_lamedon
pd.objects.create(name="Lamedon", parent=pd_gondor)
# pd_lebbenin
pd.objects.create(name="Lamedon", parent=pd_gondor)
stype1 = StationType.objects.create(descr="Important")
stype2 = StationType.objects.create(descr="Unimportant")
stype3 = StationType.objects.create(descr="Even less significant")
# filetype1
FileType.objects.create(mime_type='image/jpeg')
variable1 = Variable.objects.create(descr='Rainfall')
variable2 = Variable.objects.create(descr='Temperature')
unit_of_measurement1 = UnitOfMeasurement.objects.create(
descr='millimeter', symbol='mm')
unit_of_measurement1.variables = [variable1]
unit_of_measurement1.save()
unit_of_measurement2 = UnitOfMeasurement.objects.create(
descr='Degrees Celsius', symbol=u'\u00b0C')
unit_of_measurement2.variables = [variable2]
unit_of_measurement2.save()
timezone1 = TimeZone.objects.create(code='EET', utc_offset=120)
# interval_type1
IntervalType.objects.create(
descr='Sum', value='SUM', descr_alt='Sum')
# interval_type2
IntervalType.objects.create(
descr='Average value', value='AVERAGE', descr_alt='Average value')
# interval_type3
IntervalType.objects.create(
descr='Minimum', value='MINIMUM', descr_alt='Minimum')
# interval_type4
IntervalType.objects.create(
descr='Maximum', value='MAXIMUM', descr_alt='Maximum')
# interval_type5
IntervalType.objects.create(
descr='Vector average', value='VECTOR_AVERAGE',
descr_alt='Vector average')
station1 = Station.objects.create(
name='Komboti',
approximate=False,
is_automatic=False,
copyright_holder="We're poor and dislike it Ltd",
copyright_years='2013',
owner=organization2,
water_division=water_division1,
water_basin=water_basin1,
political_division=pd_arta,
point=Point(x=21.06071, y=39.09518, srid=4326),
srid=4326)
station1.stype = [stype1]
station1.save()
station2 = Station.objects.create(
name='Agios Athanasios',
approximate=False,
is_automatic=False,
copyright_holder="We're poor and dislike it Ltd",
copyright_years='2013',
owner=organization2,
water_division=water_division2,
water_basin=water_basin2,
political_division=pd_karditsa,
point=Point(x=21.60121, y=39.22440, srid=4326),
srid=4326)
station2.stype = [stype1, stype2]
station2.save()
station3 = Station.objects.create(
name='Tharbad',
approximate=False,
is_automatic=False,
copyright_holder="Isaac Newton",
copyright_years='1687',
owner=organization1,
water_division=water_division2,
water_basin=water_basin3,
political_division=pd_cardolan,
point=Point(x=-176.48368, y=0.19377, srid=4326),
srid=4326)
station3.stype = [stype2]
station3.save()
# Station 4 has no time series
station4 = Station.objects.create(
name='Lefkada',
approximate=False,
is_automatic=False,
copyright_holder='Alice Brown',
copyright_years='2014',
owner=organization3)
station4.stype = [stype3]
station4.save()
# timeseries1
Timeseries.objects.create(
unit_of_measurement=unit_of_measurement1,
gentity=station1,
time_zone=timezone1,
variable=variable1,
name='Rain')
# timeseries2
Timeseries.objects.create(
unit_of_measurement=unit_of_measurement2,
gentity=station1,
time_zone=timezone1,
variable=variable2,
name='Air temperature')
# timeseries3
Timeseries.objects.create(
unit_of_measurement=unit_of_measurement1,
gentity=station2,
time_zone=timezone1,
variable=variable1,
name='Rain')
# timeseries4
Timeseries.objects.create(
unit_of_measurement=unit_of_measurement2,
gentity=station2,
time_zone=timezone1,
variable=variable2,
name='Air temperature')
# timeseries5
Timeseries.objects.create(
unit_of_measurement=unit_of_measurement2,
gentity=station3,
time_zone=timezone1,
variable=variable2,
name='Temperature',
remarks='This is an extremely important time series, just because it '
'is hugely significant and markedly outstanding.')
# EventType
EventType.objects.create(descr="WAR: World Is A Ghetto")
class SearchTestCase(TestCase):
def setUp(self):
create_test_data()
def get_queryset(self, query_string):
view = StationListBaseView()
view.request = HttpRequest()
view.request.method = 'GET'
view.request.GET = QueryDict(query_string)
return view.get_queryset()
def test_invalid_sort_terms_view_call(self):
# Request for host.domain/?sort=999.9
response = self.client.get(reverse('station_list') + '?sort=999.9')
i = response.content.index
# Sort is only made with default ['name'] term
# Checking test stations 'Komboti', 'Tharbad' alphabetical order index
self.assertLess(i('Komboti'), i('Tharbad'))
# Order for host.domain/?sort=name&sort=999.9
response = self.client.get(reverse('station_list') \
+ '?sort=name&sort=999.9')
i = response.content.index
# Order is only made with default ['name'] term
# Checking test stations 'Komboti', 'Tharbad' alphabetical order index
self.assertLess(i('Komboti'), i('Tharbad'))
def test_valid_sort_terms_view_call(self):
# Request for host.domain/?sort=water_division&sort=name
response = self.client.get(reverse('station_list') \
+ '?sort=water_division&sort=name')
i = response.content.index
# Checking test stations 'Komboti', 'Agios Athanasios', 'Tharbad'
# alphabetical order ['water_division', 'name']
self.assertTrue(i('Komboti')< i('Agios Athanasios') < i('Tharbad'))
def test_search_in_timeseries_remarks(self):
# Search for something that exists
queryset = self.get_queryset(urlencode({
'q': 'extremely important time series'}))
self.assertEqual(queryset.count(), 1)
self.assertEqual(queryset[0].name, 'Tharbad')
# Search for something that doesn't exist
queryset = self.get_queryset(urlencode({
'q': 'this should not exist anywhere'}))
self.assertEqual(queryset.count(), 0)
def test_search_by_owner(self):
queryset = self.get_queryset(urlencode({'q': 'owner:RiCh'}))
self.assertEqual(queryset.count(), 1)
self.assertEqual(queryset[0].owner.organization.name,
"We're rich and we fancy it SA")
queryset = self.get_queryset(urlencode({'owner': 'poor'}))
self.assertEqual(queryset.count(), 2)
self.assertEqual(queryset[0].owner.organization.name,
"We're poor and dislike it Ltd")
queryset = self.get_queryset(urlencode({'owner': 'nonexistent'}))
self.assertEqual(queryset.count(), 0)
def test_search_by_type(self):
# The following will find both "Important" and "Unimportant" stations,
# because the string "important" is included in "Unimportant".
queryset = self.get_queryset(urlencode({'q': 'type:Important'}))
queryset = queryset.distinct()
self.assertEqual(queryset.count(), 3)
queryset = self.get_queryset(urlencode({'type': 'Unimportant'}))
queryset = queryset.order_by('name')
self.assertEqual(queryset.count(), 2)
self.assertEqual(queryset[0].name, 'Agios Athanasios')
self.assertEqual(queryset[1].name, 'Tharbad')
queryset = self.get_queryset(urlencode({'type': 'Nonexistent'}))
self.assertEqual(queryset.count(), 0)
def test_search_by_water_division(self):
queryset = self.get_queryset(urlencode({'q': 'water_division:north'}))
self.assertEqual(queryset.count(), 1)
self.assertEqual(queryset[0].name, 'Komboti')
queryset = self.get_queryset(urlencode({'q': 'water_division:south'}))
queryset = queryset.order_by('name')
self.assertEqual(queryset.count(), 2)
self.assertEqual(queryset[0].name, 'Agios Athanasios')
self.assertEqual(queryset[1].name, 'Tharbad')
queryset = self.get_queryset(urlencode({'q': 'water_division:east'}))
self.assertEqual(queryset.count(), 0)
def test_search_by_water_basin(self):
queryset = self.get_queryset(urlencode({'q': 'water_basin:arachthos'}))
self.assertEqual(queryset.count(), 1)
self.assertEqual(queryset[0].name, 'Komboti')
queryset = self.get_queryset(urlencode({'water_basin': 'greyflood'}))
queryset = queryset.order_by('name')
self.assertEqual(queryset.count(), 1)
self.assertEqual(queryset[0].name, 'Tharbad')
queryset = self.get_queryset(urlencode({'water_basin': 'nonexistent'}))
self.assertEqual(queryset.count(), 0)
def test_search_by_variable(self):
queryset = self.get_queryset(urlencode({'q': 'variable:rain'}))
queryset = queryset.order_by('name')
self.assertEqual(queryset.count(), 2)
self.assertEqual(queryset[0].name, 'Agios Athanasios')
self.assertEqual(queryset[1].name, 'Komboti')
queryset = self.get_queryset(urlencode({'q': 'variable:temperature'}))
queryset = queryset.order_by('name')
self.assertEqual(queryset.count(), 3)
self.assertEqual(queryset[0].name, 'Agios Athanasios')
self.assertEqual(queryset[1].name, 'Komboti')
self.assertEqual(queryset[2].name, 'Tharbad')
queryset = self.get_queryset(urlencode({'q': 'variable:nonexistent'}))
self.assertEqual(queryset.count(), 0)
def test_search_by_gentityId(self):
station_id = Station.objects.get(name='Komboti').id
queryset = self.get_queryset(urlencode({'gentityId': str(station_id)}))
self.assertEqual(queryset.count(), 1)
self.assertEqual(queryset[0].name, 'Komboti')
queryset = self.get_queryset(urlencode({'gentityId': '98765'}))
self.assertEqual(queryset.count(), 0)
def test_search_by_ts_only(self):
queryset = self.get_queryset('')
self.assertEqual(queryset.count(), 4)
queryset = self.get_queryset(urlencode({'q': 'ts_only:'}))
self.assertEqual(queryset.count(), 3)
def test_search_by_political_division(self):
queryset = self.get_queryset(
urlencode({'political_division': 'Cardolan'}))
self.assertEqual(queryset.count(), 1)
self.assertEqual(queryset[0].name, 'Tharbad')
queryset = self.get_queryset(
urlencode({'political_division': 'Arthedain'}))
self.assertEqual(queryset.count(), 0)
queryset = self.get_queryset(
urlencode({'political_division': 'Karditsa'}))
self.assertEqual(queryset.count(), 1)
self.assertEqual(queryset[0].name, 'Agios Athanasios')
queryset = self.get_queryset(
urlencode({'political_division': 'Arta'}))
self.assertEqual(queryset.count(), 1)
self.assertEqual(queryset[0].name, 'Komboti')
queryset = self.get_queryset(
urlencode({'political_division': 'Epirus'}))
self.assertEqual(queryset.count(), 1)
self.assertEqual(queryset[0].name, 'Komboti')
queryset = self.get_queryset(
urlencode({'political_division': 'Greece'}))
queryset = queryset.order_by('name')
self.assertEqual(queryset.count(), 2)
self.assertEqual(queryset[0].name, 'Agios Athanasios')
self.assertEqual(queryset[1].name, 'Komboti')
queryset = self.get_queryset(
urlencode({'political_division': 'Middle Earth'}))
self.assertEqual(queryset.count(), 1)
self.assertEqual(queryset[0].name, 'Tharbad')
class RandomMediaRoot(override_settings):
"""
Override MEDIA_ROOT to a temporary directory.
Specifying "@RandomMediaRoot()" as a decorator is the same as
"@override_settings(MEDIA_ROOT=tempfile.mkdtemp())", except that in the
end it removes the temporary directory.
"""
def __init__(self):
self.tmpdir = tempfile.mkdtemp()
super(RandomMediaRoot, self).__init__(MEDIA_ROOT=self.tmpdir)
def disable(self):
super(RandomMediaRoot, self).disable()
shutil.rmtree(self.tmpdir)
class GentityFileTestCase(TestCase):
def setUp(self):
create_test_data()
@RandomMediaRoot()
def test_gentity_file(self):
# Upload a gentity file
gentity_id = Station.objects.get(name='Komboti').id
r = self.client.login(username='admin', password='topsecret')
self.assertTrue(r)
self.assertEqual(GentityFile.objects.filter(gentity__id=gentity_id
).count(), 0)
filetype_id = FileType.objects.get(mime_type='image/jpeg').id
with tempfile.TemporaryFile(suffix='.jpg') as tmpfile:
tmpfile.write('Irrelevant data\n')
tmpfile.seek(0)
response = self.client.post(reverse('gentityfile_add'),
{'gentity': gentity_id,
'date': '',
'file_type': filetype_id,
'descr': 'A description',
'remarks': '',
'descr_alt': 'An alt description',
'remarks_alt': '',
'content': tmpfile,
})
self.assertEqual(response.status_code, 302)
self.assertEqual(GentityFile.objects.filter(gentity__id=gentity_id
).count(), 1)
# Now try to download that gentity file
gentity_file_id = GentityFile.objects.all()[0].id
response = self.client.get(reverse('gentityfile_dl',
kwargs={'gf_id': gentity_file_id}))
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, 'Irrelevant data\n')
class TsTestCase(TestCase):
"""Test timeseries data upload/download code."""
def setUp(self):
# create dependecies of timeseries.
self.stype = StationType.objects.create(descr='stype')
self.stype.save()
self.organization = Organization.objects.create(name='org')
self.organization.save()
self.var = Variable.objects.create(descr='var')
self.var.save()
self.unit = UnitOfMeasurement.objects.create(symbol='+')
self.unit.variables.add(self.var)
self.unit.save()
self.tz = TimeZone.objects.create(code='UTC', utc_offset='0')
self.tz.save()
self.station = Station.objects.create(
name='station',
owner=self.organization,
approximate=False,
is_automatic=True,
point=fromstr('POINT(24.67890 38.12345)'),
srid=4326,
altitude=219.22)
self.station.save()
self.ts = Timeseries(name="tstest", gentity=self.station,
time_zone=self.tz,
unit_of_measurement=self.unit,
variable=self.var)
self.ts.save()
self.user = User.objects.create_user('test', 'test@test.com',
'test')
self.user.save()
def tearDown(self):
self.stype.delete()
self.organization.delete()
self.var.delete()
self.unit.delete()
self.tz.delete()
self.ts.delete()
self.user.delete()
def test_timeseries_data(self):
# Upload
with open("enhydris/hcore/tests/tsdata.hts", "r") as f:
file_dict = {'data': SimpleUploadedFile(f.name, f.read())}
post_dict = {'gentity': self.station.pk, 'variable': self.var.pk,
'unit_of_measurement': self.unit.pk,
'time_zone': self.tz.pk
}
form = TimeseriesDataForm(post_dict, file_dict, instance=self.ts)
self.assertTrue(form.is_valid())
ts = form.save()
ts.save()
pts = timeseries.Timeseries(ts.id)
pts.read_from_db(dj_connection)
self.assertEqual(len(pts.items()), 12872)
# Download
def nrecords():
lines = response.content.splitlines()
linecount = len(lines)
headerlinecount = sum([1 for x in takewhile(lambda x: x != '',
lines)]) + 1
return linecount - headerlinecount
if not settings.ENHYDRIS_TSDATA_AVAILABLE_FOR_ANONYMOUS_USERS:
self.client.login(username='test', password='test')
url = "/timeseries/d/{}/download/".format(self.ts.pk)
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content.splitlines()[0].strip(), 'Version=2')
self.assertEqual(nrecords(), 12872)
url = "/timeseries/d/{}/download/?version=3".format(self.ts.pk)
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
ats = timeseries.Timeseries()
ats.read_file(StringIO(response.content))
self.assertAlmostEqual(ats.location['abscissa'], 24.67890, places=6)
self.assertAlmostEqual(ats.location['ordinate'], 38.12345, places=6)
self.assertEqual(ats.location['srid'], 4326)
self.assertAlmostEqual(ats.location['altitude'], 219.22, places=2)
self.assertTrue(ats.location['asrid'] is None)
self.assertEqual(nrecords(), 12872)
url = "/timeseries/d/{}/download/1960-11-04/".format(self.ts.pk)
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertEqual(nrecords(), 12870)
url = "/timeseries/d/{}/download/1960-11-04/1960-11-08T08:00/".format(
self.ts.pk)
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertEqual(nrecords(), 4)
url = "/timeseries/d/{}/download//1960-11-08T08:00/".format(self.ts.pk)
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertEqual(nrecords(), 6)
url = "/timeseries/d/{}/download//1960-11-08T08:00:00/".format(
self.ts.pk)
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertEqual(nrecords(), 6)
url = "/timeseries/d/{}/download/1950-02-02/1960-11-08T08:00/".format(
self.ts.pk)
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertEqual(nrecords(), 6)
url = "/timeseries/d/{}/download/1950-02-02/1960-01-01T08:00/".format(
self.ts.pk)
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertEqual(nrecords(), 0)
url = "/timeseries/d/{}/download/1998-02-02//".format(
self.ts.pk)
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertEqual(nrecords(), 0)
self.client.logout()
@override_settings(ENHYDRIS_USERS_CAN_ADD_CONTENT=True)
class OpenVTestCase(TestCase):
"""
Test that the behaviour of the site when USERS_CAN_ADD_CONTENT is set to
TRUE is as expected.
"""
def setUp(self):
# Create the editors group
permitted = ["eventtype", "filetype", "garea", "gentityaltcode",
"gentityaltcodetype", "gentityevent", "gentityfile",
"gline", "instrument", "instrumenttype", "overseer",
"politicaldivision", "station", "stationtype",
"timeseries", "timestep", "timezone", "unitofmeasurement",
"userprofile", "variable", "waterbasin", "waterdivision",
"person", "organization", "gentitygenericdatatype"]
editors = Group(name='editors')
editors.save()
for x in ('add', 'change', 'delete'):
for y in permitted:
editors.permissions.add(
Permission.objects.get(codename=x + '_' + y,
content_type__app_label='hcore'))
# create user and add him to editors group. this'll be the
# creator/owner to check permissions
self.user = User.objects.create_user('opentest', 'opentest@test.com',
'opentest')
# another user who won't have permissions over the tested objects to
# verify that permission handling works as expected.
self.user2 = User.objects.create_user('fail', 'fail@faildom.com',
'fail')
self.editors = Group.objects.get(name='editors')
self.editors.user_set.add(self.user)
self.editors.user_set.add(self.user2)
# create a station, instrument and timeseries to check permissions
self.stype = StationType.objects.create(descr='stype')
self.itype = InstrumentType.objects.create(descr='itype')
self.organization = Organization.objects.create(name='org')
self.var = Variable.objects.create(descr='var')
self.unit = UnitOfMeasurement.objects.create(symbol='+')
self.unit.variables.add(self.var)
self.tz = TimeZone.objects.create(code='UTC', utc_offset='0')
self.station = Station.objects.create(name='station',
owner=self.organization,
approximate=False,
is_automatic=True)
self.ts = Timeseries(name="tstest", gentity=self.station,
time_zone=self.tz, unit_of_measurement=self.unit,
variable=self.var)
self.ts.save()
def tearDown(self):
self.user.delete()
self.stype.delete()
self.organization.delete()
self.var.delete()
self.unit.delete()
self.tz.delete()
self.ts.delete()
def testStatusCode(self):
"""Test that the response status code is correct"""
self.pages = ['/stations/add/',
'/timeseries/add/',
'/instrument/add/',
]
# check that anonymous users cannot see the forms
for page_url in self.pages:
page = self.client.get(page_url)
self.assertEqual(
page.status_code, 302,
"Status code for page '%s' was %s instead of %s" %
(page_url, page.status_code, 302))
self.assertRedirects(
page, '/accounts/login/?next=%s' % page_url, status_code=302,
target_status_code=200)
self.assertEqual(self.client.login(username='opentest',
password='opentest'), True)
# check that logged in users can see the forms
for page_url in self.pages:
page = self.client.get(page_url)
self.assertEqual(
page.status_code, 200,
"Status code for page '%s' was %s instead of %s" %
(page_url, page.status_code, 200))
self.client.logout()
def testStationPermissions(self):
"""
Check that edit forms honour the permissions.
"""
self.assertEqual(self.client.login(username='opentest',
password='opentest'), True)
post_data = {
'name': 'station_test',
'stype': self.stype.pk,
'owner': self.organization.pk,
'creator': self.user.pk,
'copyright_holder': 'Copyright Holder',
'copyright_years': '1990-2011',
'Overseer-TOTAL_FORMS': '1',
'Instrument-TOTAL_FORMS': '1',
'Timeseries-TOTAL_FORMS': '1',
'Timeseries-INITIAL_FORMS': '0',
'Overseer-INITIAL_FORMS': '0',
'Instrument-INITIAL_FORMS': '0'
}
# create new station as a logged in user. this should work
url = "/stations/add/"
resp = self.client.post(url, post_data)
self.assertEqual(resp.status_code, 302)
s = Station.objects.get(name='station_test')
# edit my station. this should work
url = "/stations/edit/%s/" % str(s.pk)
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
self.assertTemplateUsed(resp, "station_edit.html")
# delete my station. this should work
url = "/stations/delete/%s/" % str(s.pk)
resp = self.client.get(url, follow=True)
self.assertEqual(resp.status_code, 200)
self.assertEqual(Station.objects.filter(name='station_test').count(),
0)
# try to edit a random station. this should fail
url = "/stations/edit/%s/" % str(self.station.pk)
resp = self.client.get(url)
self.assertEqual(resp.status_code, 403)
# try to delete a random station. this should fail
url = "/stations/delete/%s/" % str(self.station.pk)
resp = self.client.get(url)
self.assertEqual(resp.status_code, 403)
# recreate again for further tests
url = "/stations/add/"
resp = self.client.post(url, post_data)
self.assertEqual(resp.status_code, 302)
s = Station.objects.get(name='station_test')
self.client.logout()
# login as another user to check 403 perms
self.assertEqual(self.client.login(username='fail', password='fail'),
True)
# edit station. this shouldn't work
url = "/stations/edit/%s/" % str(s.pk)
resp = self.client.get(url)
self.assertEqual(resp.status_code, 403)
# delete station. this shouldn't work
url = "/stations/delete/%s/" % str(s.pk)
resp = self.client.get(url)
self.assertEqual(resp.status_code, 403)
# add user to maintainers and check if it's fixed.
s.maintainers.add(self.user2)
s.save()
# edit maintaining station. this should work
url = "/stations/edit/%s/" % str(s.pk)
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
self.assertTemplateUsed(resp, "station_edit.html")
# delete maintaining station. this shouldn't work
url = "/stations/delete/%s/" % str(s.pk)
resp = self.client.get(url, follow=True)
self.assertEqual(resp.status_code, 403)
self.client.logout()
s.delete()
def testTimeseriesPermissions(self):
"""
Check that edit forms honour the permissions.
"""
self.assertEqual(self.client.login(username='opentest',
password='opentest'), True)
post_data = {
'name': 'station_test',
'stype': self.stype.pk,
'owner': self.organization.pk,
'creator': self.user.pk,
'copyright_holder': 'Copyright Holder',
'copyright_years': '1990-2011',
'Overseer-TOTAL_FORMS': '1',
'Instrument-TOTAL_FORMS': '1',
'Timeseries-INITIAL_FORMS': '0',
'Timeseries-TOTAL_FORMS': '1',
'Overseer-INITIAL_FORMS': '0',
'Instrument-INITIAL_FORMS': '0',
}
# create new station as a logged in user. this should work
url = "/stations/add/"
resp = self.client.post(url, post_data)
self.assertEqual(resp.status_code, 302)
s = Station.objects.get(name='station_test')
post_data = {
'name': 'timeseries_test',
'gentity': s.pk,
'time_zone': self.tz.pk,
'variable': self.var.pk,
'unit_of_measurement': self.unit.pk
}
# create new timeseries
url = "/timeseries/add/"
resp = self.client.post(url, post_data)
self.assertEqual(resp.status_code, 302)
t = Timeseries.objects.get(name="timeseries_test")
# edit my timeseries. this should work
url = "/timeseries/edit/%s/" % str(t.pk)
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
self.assertTemplateUsed(resp, "timeseries_edit.html")
# delete my timeseries. this should work
url = "/timeseries/delete/%s/" % str(t.pk)
resp = self.client.get(url, follow=True)
self.assertEqual(resp.status_code, 200)
self.assertEqual(Station.objects.filter(
name='timeseries_test').count(), 0)
# recreate deleted timeseries for further tests
url = "/timeseries/add/"
resp = self.client.post(url, post_data)
t = Timeseries.objects.get(name="timeseries_test")
self.assertEqual(resp.status_code, 302)
self.client.logout()
# login as another user to check 403 perms
self.assertEqual(self.client.login(username='fail', password='fail'),
True)
# edit my timeseries. this shouldn't work
url = "/timeseries/edit/%s/" % str(t.pk)
resp = self.client.get(url)
self.assertEqual(resp.status_code, 403)
# delete my timeseries. this shouldn't work
url = "/timeseries/delete/%s/" % str(t.pk)
resp = self.client.get(url, follow=True)
self.assertEqual(resp.status_code, 403)
# add user to maintainers and check if it's fixed.
s.maintainers.add(self.user2)
s.save()
# edit maintaining timeseries. this should work
url = "/timeseries/edit/%s/" % str(t.pk)
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
self.assertTemplateUsed(resp, "timeseries_edit.html")
# delete maintaining timeseries, this should work
url = "/timeseries/delete/%s/" % str(t.pk)
resp = self.client.get(url, follow=True)
self.assertEqual(resp.status_code, 200)
self.assertEqual(Station.objects.filter(
name='timeseries_test').count(), 0)
s.delete()
def testInstrumentPermissions(self):
"""
Check that edit forms honour the permissions.
"""
self.assertEqual(self.client.login(username='opentest',
password='opentest'), True)
post_data = {
'name': 'station_test',
'stype': self.stype.pk,
'owner': self.organization.pk,
'creator': self.user.pk,
'copyright_holder': 'Copyright Holder',
'copyright_years': '1990-2011',
'Overseer-TOTAL_FORMS': '1',
'Instrument-TOTAL_FORMS': '1',
'Timeseries-INITIAL_FORMS': '0',
'Timeseries-TOTAL_FORMS': '1',
'Overseer-INITIAL_FORMS': '0',
'Instrument-INITIAL_FORMS': '0',
}
# create new station as a logged in user. this should work
url = "/stations/add/"
resp = self.client.post(url, post_data)
self.assertEqual(resp.status_code, 302)
s = Station.objects.get(name='station_test')
post_data = {
'name': 'instrument_test',
'station': s.pk,
'type': self.itype.pk
}
# create new instrument
url = "/instrument/add/"
resp = self.client.post(url, post_data)
self.assertEqual(resp.status_code, 302)
i = Instrument.objects.get(name="instrument_test")
# edit my instrument. this should work
url = "/instrument/edit/%s/" % str(i.pk)
resp = self.client.get(url, follow=True)
self.assertEqual(resp.status_code, 200)
self.assertTemplateUsed(resp, "instrument_edit.html")
# delete my station. this should work
url = "/instrument/delete/%s/" % str(i.pk)
resp = self.client.get(url, follow=True)
self.assertEqual(resp.status_code, 200)
self.assertEqual(Station.objects.filter(
name='instrument_test').count(), 0)
# recreate deleted instrument for further tests
url = "/instrument/add/"
resp = self.client.post(url, post_data)
self.assertEqual(resp.status_code, 302)
i = Instrument.objects.get(name="instrument_test")
self.client.logout()
# login as another user to check 403 perms
self.assertEqual(self.client.login(username='fail', password='fail'),
True)
# edit my instrument. this shouldn't work
url = "/instrument/edit/%s/" % str(i.pk)
resp = self.client.get(url, follow=True)
self.assertEqual(resp.status_code, 403)
# delete my instrument. this shouldn't work
url = "/instrument/delete/%s/" % str(i.pk)
resp = self.client.get(url, follow=True)
self.assertEqual(resp.status_code, 403)
# add user to maintainers and check if it's fixed.
s.maintainers.add(self.user2)
s.save()
# edit my instrument. this should work
url = "/instrument/edit/%s/" % str(i.pk)
resp = self.client.get(url, follow=True)
self.assertEqual(resp.status_code, 200)
self.assertTemplateUsed(resp, "instrument_edit.html")
# delete my station. this should work
url = "/instrument/delete/%s/" % str(i.pk)
resp = self.client.get(url, follow=True)
self.assertEqual(resp.status_code, 200)
self.assertEqual(Station.objects.filter(
name='instrument_test').count(), 0)
def testGenericModelCreation(self):
"""
Test the generic model forms
"""
self.assertEqual(self.client.login(username='opentest',
password='opentest'), True)
for model in ALLOWED_TO_EDIT:
url = "/add/%s/?_popup=1" % model
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200, "Error in page %s." % url)
self.assertTemplateUsed(resp, "model_add.html")
self.client.logout()
class RegisterTestCase(TestCase):
"""
Test that "Register" link appears depending on REGISTRATION_OPEN setting.
"""
@override_settings(REGISTRATION_OPEN=False)
def test_register_link_absent(self):
response = self.client.get('/')
self.assertNotContains(response, 'Register')
@override_settings(REGISTRATION_OPEN=True)
def test_register_link_present(self):
response = self.client.get('/')
self.assertContains(response, 'Register')
class StationTestCase(TestCase):
def setUp(self):
create_test_data()
@override_settings(ENHYDRIS_USERS_CAN_ADD_CONTENT=True)
def test_add_station(self):
"""
Test that the add station form appears properly.
"""
r = self.client.login(username='admin', password='topsecret')
self.assertTrue(r)
response = self.client.get('/stations/add/')
self.assertEqual(response.status_code, 200)
@override_settings(ENHYDRIS_USERS_CAN_ADD_CONTENT=True)
def test_station_invalid_SRID_submission(self):
r = self.client.login(username='admin', password='topsecret')
self.assertTrue(r)
post_data = {
'name': 'station_test',
'stype': StationType.objects.get(descr="Important").id,
'owner': Organization.objects.get(
name="We're rich and we fancy it SA").id,
'copyright_holder': 'Copyright Holder',
'copyright_years': '1990-2011',
'ordinate': '17',
'abscissa': '25',
'srid': '210',
}
response = self.client.post('/stations/add/', post_data)
self.assertFormError(response, 'form', 'srid', 'Invalid SRID')
class ProfileTestCase(TestCase):
def test_profile(self):
# Create a user
self.auser = User.objects.create_user(
username='auser', email='irrelevant@example.com',
password='topsecret')
self.auser.save()
profile = UserProfile.objects.get(user=self.auser)
profile.fname = 'A'
profile.lname = 'User'
profile.address = 'Nowhere'
profile.email_is_public = True
profile.save()
# Create a second user
self.buser = User.objects.create_user(
username='buser', email='irrelevant_indeed@example.com',
password='topsecret')
self.buser.save()
# View the first user's profile
response = self.client.get('/profile/auser/')
self.assertContains(response, 'irrelevant@example.com')
# Prepare the post data that we will be attempting to post -
# essentially this sets email_is_public to False.
post_data = {'user': self.auser.id, 'fname': 'A', 'lname': 'User',
'address': 'Nowhere', 'organization': 'UN',
'email_is_public': False}
# Try to modify first user's profile anonymously - should deny
response = self.client.post('/profile/edit/', post_data)
self.assertEqual(response.status_code, 200)
# Try to modify first user's profile as second user - should deny
r = self.client.login(username='buser', password='topsecret')
self.assertTrue(r)
response = self.client.post('/profile/edit/', post_data)
self.assertEqual(response.status_code, 200)
self.client.logout()
# Try to modify first user's profile as first user - should accept.
# Also check that email_is_public makes a difference.
r = self.client.login(username='auser', password='topsecret')
self.assertTrue(r)
response = self.client.get('/profile/auser/')
self.assertContains(response, 'irrelevant@example.com')
response = self.client.post('/profile/edit/', post_data)
self.assertEqual(response.status_code, 302)
response = self.client.get('/profile/auser/')
self.assertNotContains(response, 'irrelevant@example.com')
self.client.logout()
@override_settings(
EMAIL_BACKEND='django.core.mail.backends.locmem.EmailBackend')
class ResetPasswordTestCase(TestCase):
def test_reset_password(self):
# Create a user
self.auser = User.objects.create_user(
username='auser', email='irrelevant@example.com',
password='topsecret1')
self.auser.save()
# Ooops... I thought my password was topsecret2, but apparently I
# forgot it...
r = self.client.login(username='auser', password='topsecret2')
self.assertFalse(r)
# No problem, let me submit the password reset form
response = self.client.get('/accounts/password/reset/')
self.assertEqual(response.status_code, 200)
response = self.client.post('/accounts/password/reset/',
{'email': 'irrelevant@example.com'})
self.assertEqual(response.status_code, 302)
# Did I receive an email?
self.assertEqual(len(django.core.mail.outbox), 1)
# Get the link from the email
m = re.search('http://[^/]+(\S+)', django.core.mail.outbox[0].body)
reset_link = m.group(1)
# Visit the link and submit the form
response = self.client.get(reset_link)
self.assertEqual(response.status_code, 200)
response = self.client.post(reset_link,
{'new_password1': 'topsecret2',
'new_password2': 'topsecret2'})
self.assertEqual(response.status_code, 302)
# Cool, now let me log in
r = self.client.login(username='auser', password='topsecret2')
self.assertTrue(r)
class GentityEventTestCase(TestCase):
def setUp(self):
create_test_data()
def test_add_gentity_event(self):
# Gentity and Station models share the same id
gentity_id = Station.objects.get(name='Komboti').id
# Before uploading, there should be 0 events in the database
self.assertEquals(GentityEvent.objects.filter(gentity__id=gentity_id
).count(), 0)
# Login user as admin
r = self.client.login(username='admin', password='topsecret')
self.assertTrue(r)
# Post data at 'gentityevent_add' form
response = self.client.post(
reverse('gentityevent_add'),
{'date': '10/06/2015',
'gentity': gentity_id,
'type': EventType.objects.get(descr="WAR: World Is A Ghetto").id,
'user': User.objects.get(username='admin').id}
)
# Check that form redirect to correct Station.id
self.assertRedirects(response, reverse('station_detail',
kwargs={'pk': gentity_id}))
# Check that the database contains one event in the datebase
self.assertEquals(GentityEvent.objects.filter(gentity__id=gentity_id
).count(), 1)
@skipUnless(selenium, 'Selenium is missing or unconfigured')
class CoordinatesTestCase(SeleniumTestCase):
# Elements in "Edit Station" view
label_ordinate = PageElement(By.XPATH, '//label[@for="id_ordinate"]')
field_ordinate = PageElement(By.ID, 'id_ordinate')
label_abscissa = PageElement(By.XPATH, '//label[@for="id_abscissa"]')
field_abscissa = PageElement(By.ID, 'id_abscissa')
field_srid = PageElement(By.ID, 'id_srid')
field_altitude = PageElement(By.ID, 'id_altitude')
field_asrid = PageElement(By.ID, 'id_asrid')
field_approximate = PageElement(By.ID, 'id_approximate')
button_coordinates = PageElement(By.ID, 'btnCoordinates')
field_stype = PageElement(By.ID, 'id_stype')
stype_option_2 = PageElement(By.XPATH,
'//select[@id="id_stype"]/option[2]')
field_owner = PageElement(By.ID, 'id_owner')
owner_option_2 = PageElement(By.XPATH,
'//select[@id="id_owner"]/option[2]')
field_copyright_holder = PageElement(By.ID, 'id_copyright_holder')
field_copyright_years = PageElement(By.ID, 'id_copyright_years')
button_submit = PageElement(By.XPATH, '//button[@type="submit"]')
# Elements in "View Station" view
button_edit = PageElement(
By.XPATH, '//a[starts-with(@class, "btn") and '
'starts-with(@href, "/stations/edit/")]')
def setUp(self):
create_test_data()
@override_settings(DEBUG=True)
def test_coordinates(self):
# Login
r = selenium.login(username='admin', password='topsecret')
self.assertTrue(r)
# Go to the add new station page and check that the simple view is
# active
selenium.get(self.live_server_url + '/stations/add/')
self.label_ordinate.wait_until_contains("Latitude")
self.assertEqual(self.label_ordinate.text, "Latitude")
self.assertEqual(self.label_abscissa.text, "Longitude")
self.assertFalse(self.field_srid.is_displayed())
self.assertFalse(self.field_asrid.is_displayed())
self.assertFalse(self.field_approximate.is_displayed())
# Switch to the advanced view and check it's ok
self.button_coordinates.click()
self.label_ordinate.wait_until_contains("Ordinate")
self.assertEqual(self.label_ordinate.text, "Ordinate")
self.assertEqual(self.label_abscissa.text, "Abscissa")
self.assertTrue(self.field_srid.is_displayed())
self.assertTrue(self.field_asrid.is_displayed())
self.assertTrue(self.field_approximate.is_displayed())
self.assertEqual(self.field_srid.get_attribute('value'), '4326')
# Go back to the simple view and check it's ok
self.button_coordinates.click()
self.label_ordinate.wait_until_contains("Latitude")
self.assertEqual(self.label_ordinate.text, "Latitude")
self.assertEqual(self.label_abscissa.text, "Longitude")
self.assertFalse(self.field_srid.is_displayed())
self.assertFalse(self.field_asrid.is_displayed())
self.assertFalse(self.field_approximate.is_displayed())
# Enter a latitude and longitude and other data and submit
self.field_ordinate.send_keys('37.97522')
self.field_abscissa.send_keys('23.73700')
self.stype_option_2.click()
self.owner_option_2.click()
self.field_copyright_holder.send_keys('Alice')
self.field_copyright_years.send_keys('2015')
self.button_submit.click()
# Wait for the response, then go to edit the station and check that
# it's the simple view
self.button_edit.wait_until_is_displayed()
self.button_edit.click()
self.label_ordinate.wait_until_contains("Latitude")
self.assertEqual(self.label_ordinate.text, "Latitude")
self.assertEqual(self.label_abscissa.text, "Longitude")
self.assertFalse(self.field_srid.is_displayed())
self.assertFalse(self.field_asrid.is_displayed())
self.assertFalse(self.field_approximate.is_displayed())
self.assertEqual(self.field_ordinate.get_attribute('value'),
'37.97522')
self.assertEqual(self.field_abscissa.get_attribute('value'),
'23.737')
# Switch to the advanced view
self.button_coordinates.click()
self.label_ordinate.wait_until_contains("Ordinate")
self.assertEqual(self.label_ordinate.text, "Ordinate")
self.assertEqual(self.label_abscissa.text, "Abscissa")
self.assertTrue(self.field_srid.is_displayed())
self.assertTrue(self.field_asrid.is_displayed())
self.assertTrue(self.field_approximate.is_displayed())
self.assertEqual(self.field_srid.get_attribute('value'), '4326')
# Enter some advanced data and submit
self.field_ordinate.clear()
self.field_ordinate.send_keys('4202810.33')
self.field_abscissa.clear()
self.field_abscissa.send_keys('476751.84')
self.field_srid.clear()
self.field_srid.send_keys('2100')
self.button_submit.click()
# Go to the edit page again, and check that the advanced view shows
self.button_edit.wait_until_is_displayed()
self.button_edit.click()
self.label_ordinate.wait_until_is_displayed()
time.sleep(1) # Wait for JavaScript to take action
self.assertEqual(self.label_ordinate.text, "Ordinate")
self.assertEqual(self.label_abscissa.text, "Abscissa")
self.assertTrue(self.field_srid.is_displayed())
self.assertTrue(self.field_asrid.is_displayed())
self.assertTrue(self.field_approximate.is_displayed())
self.assertEqual(self.field_srid.get_attribute('value'), '2100')
self.assertEqual(self.field_ordinate.get_attribute('value'),
'4202810.33')
self.assertEqual(self.field_abscissa.get_attribute('value'),
'476751.84')
# It should be impossible to change to the simple view
self.assertFalse(self.button_coordinates.is_displayed())
@skipUnless(selenium, 'Selenium is missing or unconfigured')
class ListStationsVisibleOnMapTestCase(SeleniumTestCase):
button_limit_to_map = PageElement(By.ID, 'limit-to-map')
td_komboti = PageElement(By.XPATH, '//td[text()="Komboti"]')
td_agios_athanasios = PageElement(By.XPATH,
'//td[text()="Agios Athanasios"]')
td_tharbad = PageElement(By.XPATH, '//td[text()="Tharbad"]')
def setUp(self):
create_test_data()
def test_list_stations_visible_on_map(self):
# Visit site and wait until three stations are shown
selenium.get(self.live_server_url)
self.td_komboti.wait_until_is_displayed()
self.td_agios_athanasios.wait_until_is_displayed()
self.td_tharbad.wait_until_is_displayed()
# Zoom station to an area that covers only two of these stations.
# The co-ordinates below are 21, 39, 22, 40 in srid=3857.
selenium.execute_script("""
enhydris.map.zoomToExtent([2337700, 4721700, 2449000, 4865900]);
""")
# Click on "List stations visible on map"
self.button_limit_to_map.click()
# Now only two stations should be displayed
self.td_komboti.wait_until_is_displayed()
self.td_agios_athanasios.wait_until_is_displayed()
self.assertFalse(self.td_tharbad.exists())
|
ellak-monades-aristeias/enhydris
|
enhydris/hcore/tests/test_views.py
|
Python
|
agpl-3.0
| 53,671
|
[
"VisIt"
] |
0a02ae6686a7450bf79b5c7e3a73302b235f0d000cf219c309c56465f4c67e53
|
from django.db import models
from django.apps import apps
from django.core.exceptions import ValidationError
from edc_base.model.validators import (date_not_before_study_start, date_not_future,
datetime_not_future, datetime_not_before_study_start)
from edc_constants.choices import POS_NEG_UNTESTED_REFUSAL, YES_NO_NA, POS_NEG, YES_NO
from edc_constants.constants import NO, YES, POS, NEG
from edc_registration.models import RegisteredSubject
from .enrollment_helper import EnrollmentHelper
class EnrollmentMixin(models.Model):
"""Base Model for antenal enrollment"""
registered_subject = models.OneToOneField(RegisteredSubject)
report_datetime = models.DateTimeField(
verbose_name="Report date",
validators=[
datetime_not_before_study_start,
datetime_not_future, ],
help_text='')
enrollment_hiv_status = models.CharField(
max_length=15,
null=True,
editable=False,
help_text='Auto-filled by enrollment helper')
date_at_32wks = models.DateField(
null=True,
editable=False,
help_text='Auto-filled by enrollment helper')
is_eligible = models.BooleanField(
editable=False)
pending_ultrasound = models.BooleanField(
editable=False)
is_diabetic = models.CharField(
verbose_name='Are you diabetic?',
choices=YES_NO,
help_text='INELIGIBLE if YES',
max_length=3)
will_breastfeed = models.CharField(
verbose_name='Are you willing to breast-feed your child for 6 months?',
choices=YES_NO,
help_text='INELIGIBLE if NO',
max_length=3)
will_remain_onstudy = models.CharField(
verbose_name="Are you willing to remain in the study for the child's first three year of life",
choices=YES_NO,
help_text='INELIGIBLE if NO',
max_length=3)
current_hiv_status = models.CharField(
verbose_name="What is your current HIV status?",
choices=POS_NEG_UNTESTED_REFUSAL,
max_length=30,
help_text=("if POS or NEG, ask for documentation."))
evidence_hiv_status = models.CharField(
verbose_name="(Interviewer) Have you seen evidence of the HIV result?",
max_length=15,
null=True,
blank=False,
choices=YES_NO_NA,
help_text=("evidence = clinic and/or IDCC records. check regimes/drugs. If NO, more criteria required."))
week32_test = models.CharField(
verbose_name="Have you tested for HIV before or during this pregnancy?",
choices=YES_NO,
default=NO,
max_length=3)
week32_test_date = models.DateField(
verbose_name="Date of HIV Test",
validators=[date_not_future],
null=True,
blank=True)
week32_result = models.CharField(
verbose_name="What was your result?",
choices=POS_NEG,
max_length=15,
null=True,
blank=True)
evidence_32wk_hiv_status = models.CharField(
verbose_name="(Interviewer) Have you seen evidence of the result from HIV test on or before this pregnancy?",
max_length=15,
null=True,
blank=False,
choices=YES_NO_NA,
help_text=("evidence = clinic and/or IDCC records. check regimes/drugs."))
will_get_arvs = models.CharField(
verbose_name="(Interviewer) If HIV+ve, do records show that participant is taking, is prescribed,"
"or will be prescribed ARVs (if newly diagnosed) during pregnancy?",
choices=YES_NO_NA,
null=True,
blank=False,
max_length=15,
help_text=("If found POS by RAPID TEST. Then answer YES, can take them OFF STUDY at birth visit if there were"
" not on therapy for atleast 4 weeks."))
rapid_test_done = models.CharField(
verbose_name="Was a rapid test processed?",
choices=YES_NO_NA,
null=True,
blank=False,
max_length=15,
help_text=(
'Remember, rapid test is for NEG, UNTESTED, UNKNOWN and Don\'t want to answer.'))
rapid_test_date = models.DateField(
verbose_name="Date of rapid test",
null=True,
validators=[
date_not_before_study_start,
date_not_future],
blank=True)
rapid_test_result = models.CharField(
verbose_name="What is the rapid test result?",
choices=POS_NEG,
max_length=15,
null=True,
blank=True)
unenrolled = models.TextField(
verbose_name="Reason not enrolled",
max_length=350,
null=True,
editable=False)
def __str__(self):
return "{0} {1}".format(
self.registered_subject.subject_identifier,
self.registered_subject.first_name)
def save(self, *args, **kwargs):
enrollment_helper = EnrollmentHelper(instance_antenatal=self)
# if not enrollment_helper.validate_rapid_test():
# raise ValidationError('Ensure a rapid test id done for this subject.')
self.edd_by_lmp = enrollment_helper.evaluate_edd_by_lmp
self.ga_lmp_enrollment_wks = enrollment_helper.evaluate_ga_lmp(
self.report_datetime.date())
self.enrollment_hiv_status = enrollment_helper.enrollment_hiv_status
self.date_at_32wks = enrollment_helper.date_at_32wks
if not self.ultrasound:
self.pending_ultrasound = enrollment_helper.pending
self.is_eligible = self.antenatal_criteria(enrollment_helper)
self.unenrolled = self.unenrolled_error_messages()
super(EnrollmentMixin, self).save(*args, **kwargs)
def antenatal_criteria(self, enrollment_helper):
"""Returns True if basic criteria is met for enrollment."""
if self.pending_ultrasound:
basic_criteria = False
else:
lmp_to_use = self.ga_lmp_enrollment_wks if self.ga_lmp_enrollment_wks else self.ultrasound.ga_confirmed
basic_criteria = (lmp_to_use >= 16 and lmp_to_use <= 36 and
enrollment_helper.no_chronic_conditions() and self.will_breastfeed == YES and
self.will_remain_onstudy == YES and
(self.ultrasound.pass_antenatal_enrollment if self.ultrasound else True) and
(self.delivery.keep_on_study if self.delivery else True))
if basic_criteria and self.enrollment_hiv_status == POS and self.will_get_arvs == YES:
return True
elif basic_criteria and self.enrollment_hiv_status == NEG:
return True
else:
return False
def get_registration_datetime(self):
return self.report_datetime
@property
def ultrasound(self):
MaternalUltraSoundInitial = apps.get_model(
'td_maternal', 'MaternalUltraSoundInitial')
try:
return MaternalUltraSoundInitial.objects.get(
maternal_visit__appointment__registered_subject=self.registered_subject)
except MaternalUltraSoundInitial.DoesNotExist:
return None
@property
def delivery(self):
MaternalLabourDel = apps.get_model('td_maternal', 'MaternalLabourDel')
try:
return MaternalLabourDel.objects.get(
registered_subject=self.registered_subject)
except MaternalLabourDel.DoesNotExist:
return None
@property
def subject_identifier(self):
return self.registered_subject.subject_identifier
def get_subject_identifier(self):
return self.registered_subject.subject_identifier
class Meta:
abstract = True
|
botswana-harvard/tshilo-dikotla
|
td_maternal/models/enrollment_mixin.py
|
Python
|
gpl-2.0
| 7,736
|
[
"VisIt"
] |
f18d146d5472c68f9c3608365fda6d5da2befa8e659f5d5220068b9d83f8a5ca
|
"""
Restricted open-shell Kohn-Sham
"""
import numpy as np
from frankenstein.sgscf import rks, rhf, rohf
from pyscf.dft import uks as pyuks
def get_E(mf):
h1e = mf.h1e
dmm = mf.rdm1m
dmt = mf.rdm1t
vhfm = mf.vhfm
vhft = mf.vhft
e1m = sum([np.trace(h1e@dmm[s]) for s in [0,1]])
em_scf = e1m + vhfm.ecoul + vhfm.exc
e1t = sum([np.trace(h1e@dmt[s]) for s in [0,1]])
et_scf = e1t + vhft.ecoul + vhft.exc
e_scf = em_scf*2. - et_scf
mf.em_tot = em_scf + mf.e_nuc
mf.et_tot = et_scf + mf.e_nuc
return e_scf
def update_hf(mf):
""" Compute rdm1 and fock
"""
mol = mf.mol
h1e = mf.h1e
dmm_last = mf.rdm1m
dmt_last = mf.rdm1t
mf.rdm1c, mf.rdm1s = mf.make_rdm1()
mf.rdm1m = mf.rdm1c + mf.rdm1s
mf.rdm1t = np.asarray([mf.rdm1c+np.sum(mf.rdm1s,axis=0), mf.rdm1c])
mf.vhfm = mf.get_veff(mol, dm=mf.rdm1m, dm_last=dmm_last,
vhf_last=mf.vhfm)
mf.vhft = mf.get_veff(mol, dm=mf.rdm1t, dm_last=dmt_last,
vhf_last=mf.vhft)
mf.vhfc = np.sum(mf.vhfm,axis=0) - 0.5*np.sum(mf.vhft,axis=0)
mf.vhfs = 2*mf.vhfm - mf.vhft[0]
class ROKS(pyuks.UKS):
# methods for initialization
get_init_guess_DC = rohf.get_init_guess_DC
get_optimizer = rohf.get_optimizer
get_init_guess_ov = rohf.get_init_guess_ov
get_init_guess_ov_zeros = rohf.get_init_guess_ov_zeros
preproc_mo_coeff = rohf.preproc_mo_coeff
# methods for output
print_info = rohf.print_info
# methods for SCF
get_exp_ov = rohf.get_exp_ov
update_mo_coeff = rohf.update_mo_coeff
save_new_origin = rohf.save_new_origin
back_to_origin = rohf.back_to_origin
get_occm = rohf.get_occm
get_occt = rohf.get_occt
make_rdm1 = rohf.make_rdm1
get_E = get_E
update_hf = update_hf
get_grad_E = rohf.get_grad_E
get_S01 = rohf.get_S01
update_all = rohf.update_all
# methods for err/conv check
get_diis_err = rohf.get_diis_err
check_conv = rohf.check_conv
# methods for post-processing
dumpmoe = rohf.dumpmoe
post_proc = rohf.post_proc
save_chkfile = rohf.save_chkfile
analyze = rohf.analyze
# methods for GDM
get_value_gdm = rohf.get_value_gdm
get_grad_gdm = rohf.get_grad_gdm
pseudo_cano_ro = rohf.pseudo_cano_ro
get_prec = rohf.get_prec
get_prec_Q = rohf.get_prec_Q
parallel_transport_gdm = rohf.parallel_transport_gdm
regularize_step_gdm = rohf.regularize_step_gdm
update_gdm = rohf.update_gdm
# methods for DIIS
get_fock_diis = rohf.get_fock_diis
get_err_diis = rohf.get_err_diis
update_diis = rohf.update_diis
def __init__(self, mol):
pyuks.UKS.__init__(self, mol)
rhf.scf_common_init(self)
self.converged = False
self.h1e = None
self.s1e = None
self.mo_coeff0 = None # RHF reference
self.mo_coeff_old = None
self.mo_coeff = None
self.rdm1 = 0.
self.rdm1c = 0.
self.rdm1s = 0.
self.vhfc = 0.
self.vhfs = 0.
self.rdm1m = 0.
self.rdm1t = 0.
self.vhfm = 0.
self.vhft = 0.
self.de_scf = 0.
self.S01 = None
self.err_diis = float("INF")
self.grad_E = None
self.e_mp2 = None
def kernel(self, dm0=None, mo_coeff0=None, ov0=None):
self.converged = rks.kernel(self, dm0=dm0, mo_coeff0=mo_coeff0, ov0=ov0)
@property
def name(self):
return str(self.__class__).split(".")[-1].split("'")[0]
@property
def nao(self):
return self.mol.nao_nr()
@property
def nmo(self):
return self.mo_coeff0.shape[1]
@property
def no(self):
return self.mol.nelectron//2
@property
def nv(self):
return self.nao - self.no
@property
def icsv(self):
no = self.no
nmo = self.nmo
return [list(range(no-1)),[no-1,no],list(range(no+1,nmo))]
@property
def ncsv(self):
return [len(self.icsv[s]) for s in [0,1,2]]
@property
def nco(self):
return len(self.icsv[0])
@property
def nso(self):
return len(self.icsv[1])
@property
def ncv(self):
return len(self.icsv[2])
@property
def ov_size_list(self):
no, na, nv = self.ncsv
return [no*na, no*nv, na*nv, 1]
@property
def ov_shift(self):
return np.cumsum(self.ov_size_list, dtype=int)
@property
def ov_size(self):
return sum(self.ov_size_list)
@property
def e_nuc(self):
return self.mol.energy_nuc()
@property
def mo_coeff_core(self):
return self.mo_coeff[:,:self.ncsv[0]]
@property
def mo_coeff_act(self):
nco = self.ncsv[0]
return [self.mo_coeff[:,nco], self.mo_coeff[:,nco+1]]
@property
def mo_coeff_vir(self):
no, na = self.ncsv[:2]
return self.mo_coeff[:,no+na:]
@property
def err_grad_E(self):
grad_E = self.get_grad_E() if self.grad_E is None else self.grad_E
return np.mean(grad_E**2.)**0.5
@property
def fockc(self):
return self.h1e + self.vhfc
@property
def focks(self):
return self.h1e + self.vhfs
@property
def fockm(self):
return self.h1e + self.vhfm
@property
def fockt(self):
return self.h1e + self.vhft
if __name__ == "__main__":
import sys
try:
geom = sys.argv[1]
basis = sys.argv[2]
xc = sys.argv[3]
ii = int(sys.argv[4])
aa = int(sys.argv[5])
except:
print("Usage: geom, basis, xc, ii, aa")
sys.exit(1)
from frankenstein.tools.pyscf_utils import get_pymol
pymol = get_pymol(geom, basis, verbose=3)
rmf = rks.RKS(pymol)
rmf.xc = xc
rmf.kernel()
mf = ROKS(pymol)
mf.orb_swap = [[[rmf.no-1-ii,rmf.no-1+aa]], []]
mf.xc = xc
mf.method = "diis"
mf.mom_start = 1
mf.kernel(mo_coeff0=rmf.mo_coeff.copy())
mf.analyze(C0=rmf.mo_coeff)
eex = (mf.e_tot - rmf.e_tot) * 27.211399
print("Eex (%s) = %.3f eV" % (xc, eex))
for orth in [0,1,-1,2]:
mu, f = rmf.get_tdm(mf, orth=orth)
mu2 = np.sum(mu**2.)
print(("orth = %2d %.6f %10.6f "+"%.4f "*3)%(orth,f,mu2,*mu))
|
hongzhouye/frankenstein
|
sgscf/roks.py
|
Python
|
bsd-3-clause
| 6,266
|
[
"PyMOL",
"PySCF"
] |
ae4fba801ee47e08215fb838f5fa53e458a6f2dbcc0ff1c68b8ab3e5f11bef32
|
# python3
# pylint: disable=g-bad-file-header
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Implementing a Gaussian ENN in JAX.
This model came out of brainstorming with who/benvanroy, who/iosband.
Each linear unit in the neural net is augmented:
Wx + b --> Wx + b + c Z, for Z ~ N(0, 1)
You are adding a learnable bias for each segment of
This is an implementation framing that network as an ENN.
"""
from typing import Callable, Sequence
from enn import base
from enn.networks import indexers
import haiku as hk
import haiku.experimental as hke
import jax
import jax.numpy as jnp
def _is_linear_bias(context: hke.ParamContext):
return (context.full_name.endswith('/b')
and isinstance(context.module, hk.Linear))
def make_enn_creator(init_scale: float = 1.):
"""Make enn_creator initializing c unit to init_scale."""
custom_init = lambda shape, dtype: init_scale * jnp.ones(shape, dtype)
def enn_creator(next_creator, shape, dtype, init, context):
"""Create gaussian enn linear layer."""
# TODO(author2): How to import hk._src.base types correctly?
if _is_linear_bias(context): # Append gaussian bias term
standard_bias = next_creator(shape, dtype, init)
gaussian_bias = next_creator(shape, dtype, custom_init)
return standard_bias, gaussian_bias
else: # Return the usual creator
return next_creator(shape, dtype, init)
return enn_creator
def enn_getter(next_getter, value, context):
"""Get variables for gaussian enn linear layer."""
# TODO(author2): How to import hk._src.base types correctly?
if _is_linear_bias(context):
standard_bias = next_getter(value[0])
gaussian_bias = next_getter(value[1])
noise = jax.random.normal(hk.next_rng_key(), standard_bias.shape)
return standard_bias + gaussian_bias * noise
else:
return next_getter(value)
class GaussianNoiseEnn(base.EpistemicNetwork):
"""GaussianNoiseEnn from callable module."""
def __init__(self,
module_ctor: Callable[[], hk.Module],
init_scale: float = 1.):
"""GaussianNoiseEnn from callable module."""
enn_creator = make_enn_creator(init_scale=init_scale)
def net_fn(inputs: base.Array) -> base.Array:
with hke.custom_getter(enn_getter), hke.custom_creator(enn_creator):
output = module_ctor()(inputs) # pytype: disable=not-callable
return output
# TODO(author2): Note that the GaussianENN requires a rng_key in place of an
# index. Therefore we do *not* hk.without_apply_rng.
transformed = hk.transform(net_fn)
super().__init__(
apply=lambda params, x, z: transformed.apply(params, z, x),
init=lambda rng, x, z: transformed.init(rng, x),
indexer=indexers.PrngIndexer(),
)
class GaussianNoiseMLP(base.EpistemicNetwork):
"""Gaussian Enn on a standard MLP."""
def __init__(self, output_sizes: Sequence[int], init_scale: float = 1.):
"""Gaussian Enn on a standard MLP."""
enn = GaussianNoiseEnn(lambda: hk.nets.MLP(output_sizes), init_scale)
super().__init__(enn.apply, enn.init, enn.indexer)
|
deepmind/enn
|
enn/networks/gaussian_enn.py
|
Python
|
apache-2.0
| 3,742
|
[
"Gaussian"
] |
3d242c509f6ad7f6c43a500fb4a453264015745060cd6c1a6423e165fb5dc37e
|
# -*- coding: utf-8 -*-
# vi:si:et:sw=4:sts=4:ts=4
##
## Copyright (C) 2012 Async Open Source <http://www.async.com.br>
## All rights reserved
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., or visit: http://www.gnu.org/.
##
## Author(s): Stoq Team <stoq-devel@async.com.br>
##
import gtk
import mock
from stoqlib.gui.editors.baseeditor import BaseEditorSlave, BaseEditor
from stoqlib.gui.test.uitestutils import GUITest
from stoqlib.gui.events import EditorCreateEvent
class _TestEditorSlave(BaseEditorSlave):
model_type = object
on_confirm_count = 0
on_cancel_count = 0
def on_confirm(self):
self.on_confirm_count += 1
def on_cancel(self):
self.on_cancel_count += 1
def attach_slave(self, holder, slave):
# mimic attach slave behaviour
self.slaves[holder] = slave
class _TempModel(object):
def __init__(self, name):
self.name = name
class _TestEditor(BaseEditor):
model_type = _TempModel
gladefile = 'HolderTemplate'
def create_model(self, store):
return _TempModel('new model')
class TestBaseEditorSlave(GUITest):
"""Tests for :class:`stoqlib.editors.baseeditor.BaseEditorSlave`"""
def setUp(self):
super(TestBaseEditorSlave, self).setUp()
self.slave_a = _TestEditorSlave(self.store, object())
self.slave_b = _TestEditorSlave(self.store, object())
self.slave_c = _TestEditorSlave(self.store, object())
self.slave_d = _TestEditorSlave(self.store, object())
self.slaves = [self.slave_a, self.slave_b, self.slave_c, self.slave_d]
# This will generate the following:
# [A [B [C, D]]]
self.slave_b.attach_slave('C', self.slave_c)
self.slave_b.attach_slave('D', self.slave_d)
self.slave_a.attach_slave('B', self.slave_b)
def test_confirm(self):
# none of the slaves should have on_confirm, on_cancel called yet
for slave in self.slaves:
self.assertEqual(slave.on_confirm_count, 0)
self.assertEqual(slave.on_cancel_count, 0)
self.slave_a.confirm()
# now on_confirm should be called once
for slave in self.slaves:
self.assertEqual(slave.on_confirm_count, 1)
self.assertEqual(slave.on_cancel_count, 0)
def test_cancel(self):
# none of the slaves should have on_confirm, on_cancel called yet
for slave in self.slaves:
self.assertEqual(slave.on_confirm_count, 0)
self.assertEqual(slave.on_cancel_count, 0)
self.slave_a.cancel()
# now on_cancel should be called once
for slave in self.slaves:
self.assertEqual(slave.on_confirm_count, 0)
self.assertEqual(slave.on_cancel_count, 1)
def test_validate_confirm(self):
# test each time making one slave return False on validate_confirm.
for slave in self.slaves:
_old_validate_confirm = slave.validate_confirm
slave.validate_confirm = lambda: False
self.slave_a.confirm()
slave.validate_confirm = _old_validate_confirm
# on_confirm should not get called on any slave here
for slave in self.slaves:
self.assertEqual(slave.on_confirm_count, 0)
self.assertEqual(slave.on_cancel_count, 0)
class TestBaseEditor(GUITest):
def test_event_with_model(self):
obj = _TempModel(name='existing model')
self._callcount = 0
def _callback(editor, model, store, visual_mode):
self._callcount += 1
self.assertEqual(model.name, 'existing model')
EditorCreateEvent.connect(_callback)
_TestEditor(self.store, obj)
self.assertEqual(self._callcount, 1)
EditorCreateEvent.disconnect(_callback)
def test_event_without_model(self):
self._callcount = 0
def _callback(editor, model, store, visual_mode):
self._callcount += 1
self.assertEqual(model.name, 'new model')
EditorCreateEvent.connect(_callback)
_TestEditor(self.store, None)
self.assertEqual(self._callcount, 1)
EditorCreateEvent.disconnect(_callback)
@mock.patch('stoqlib.gui.editors.baseeditor.yesno')
def test_cancel(self, yesno):
yesno.return_value = False
sellable = self.create_sellable()
# Flush the store so any modifications to sellable will mark it as dirty
self.store.flush()
editor = _TestEditor(self.store, None)
self.assertTrue(editor.cancel())
self.assertEqual(yesno.call_count, 0)
# Any modification to change pending count
sellable.description = u'Other description'
self.assertTrue(editor.cancel())
self.assertEqual(yesno.call_count, 0)
# Set need_cancel_confirmation to trigger the yesno
editor.need_cancel_confirmation = True
self.assertFalse(editor.cancel())
yesno.assert_called_once_with(
"If you cancel this dialog all changes will be lost. "
"Are you sure?", gtk.RESPONSE_NO, "Cancel", "Don't cancel")
|
tiagocardosos/stoq
|
stoqlib/gui/test/test_baseeditor.py
|
Python
|
gpl-2.0
| 5,747
|
[
"VisIt"
] |
1de7e2e59546099b88665d2b16f8da0a4cc35758f5ec609a8bd38a96b03e0de0
|
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import glob
import logging
import os
import pyauto_functional # Must be imported before pyauto
import pyauto
class ThemesTest(pyauto.PyUITest):
"""TestCase for Themes."""
def Debug(self):
"""Test method for experimentation.
This method will not run automatically.
"""
while True:
raw_input('Hit <enter> to dump info.. ')
self.pprint(self.GetThemeInfo())
def _SetThemeAndVerify(self, crx_file, theme_name):
"""Set theme and verify infobar appears and the theme name is correct.
Args:
crx_file: Path to .crx file to be set as theme.
theme_name: String to be compared to GetThemeInfo()['name'].
"""
# Starting infobar count is the number of non-themes infobars.
infobars = self.GetBrowserInfo()['windows'][0]['tabs'][0]['infobars']
infobar_count = 0
for infobar in infobars:
if not (('text' in infobar) and
infobar['text'].startswith('Installed theme')):
infobar_count += 1
self.SetTheme(crx_file)
# Verify infobar shows up.
self.assertTrue(self.WaitForInfobarCount(infobar_count + 1))
self.assertTrue(self.GetBrowserInfo()['windows'][0]['tabs'][0]['infobars'])
# Verify theme name is correct.
self.assertEqual(theme_name, self.GetThemeInfo()['name'])
def testSetTheme(self):
"""Verify theme install."""
self.assertFalse(self.GetThemeInfo()) # Verify there's no theme at startup
crx_file = os.path.abspath(
os.path.join(self.DataDir(), 'extensions', 'theme.crx'))
self._SetThemeAndVerify(crx_file, 'camo theme')
def testThemeInFullScreen(self):
"""Verify theme can be installed in FullScreen mode."""
self.ApplyAccelerator(pyauto.IDC_FULLSCREEN)
self.assertFalse(self.GetThemeInfo()) # Verify there's no theme at startup
crx_file = os.path.abspath(
os.path.join(self.DataDir(), 'extensions', 'theme.crx'))
self._SetThemeAndVerify(crx_file, 'camo theme')
def testThemeReset(self):
"""Verify theme reset."""
crx_file = os.path.abspath(
os.path.join(self.DataDir(), 'extensions', 'theme.crx'))
self.SetTheme(crx_file)
self.assertTrue(self.ResetToDefaultTheme())
self.assertFalse(self.GetThemeInfo())
def testThemeUndo(self):
"""Verify theme undo."""
crx_file = os.path.abspath(
os.path.join(self.DataDir(), 'extensions', 'theme.crx'))
self._SetThemeAndVerify(crx_file, 'camo theme')
# Undo theme install.
infobars = self.GetBrowserInfo()['windows'][0]['tabs'][0]['infobars']
for index, infobar in enumerate(infobars):
if (('text' in infobar) and
infobar['text'].startswith('Installed theme')):
theme_index = index
break
self.PerformActionOnInfobar('cancel', infobar_index=theme_index)
self.assertFalse(self.GetThemeInfo())
def testThemeOverInstall(self):
"""Verify that can install a theme over an existing theme."""
crx_file = os.path.abspath(
os.path.join(self.DataDir(), 'extensions', 'theme.crx'))
self._SetThemeAndVerify(crx_file, 'camo theme')
# Install a different theme.
crx_file = os.path.abspath(
os.path.join(self.DataDir(), 'extensions', 'theme2.crx'))
self._SetThemeAndVerify(crx_file, 'snowflake theme')
def _ReturnCrashingThemes(self, themes, group_size, urls):
"""Install the given themes in groups of group_size and return the
group of themes that crashes (if any).
Note: restarts the browser at the beginning of the function.
Args:
themes: A list of themes to install.
group_size: The number of themes to install at one time.
urls: The list of urls to visit.
Returns:
Group of themes that crashed (if any).
"""
self.RestartBrowser()
curr_theme = 0
num_themes = len(themes)
while curr_theme < num_themes:
logging.debug('New group of %d themes.' % group_size)
group_end = curr_theme + group_size
this_group = themes[curr_theme:group_end]
# Apply each theme in this group.
for theme in this_group:
logging.debug('Applying theme: %s' % theme)
self.SetTheme(theme)
for url in urls:
self.NavigateToURL(url)
def _LogAndReturnCrashing():
logging.debug('Crashing themes: %s' % this_group)
return this_group
# Assert that there is at least 1 browser window.
try:
num_browser_windows = self.GetBrowserWindowCount()
except:
return _LogAndReturnCrashing()
else:
if not num_browser_windows:
return _LogAndReturnCrashing()
curr_theme = group_end
# None of the themes crashed.
return None
def Runner(self):
"""Apply themes; verify that theme has been applied and browser doesn't
crash.
This does not get run automatically. To run:
python themes.py themes.ThemesTest.Runner
Note: this test requires that a directory of crx files called 'themes'
exists in the data directory.
"""
themes_dir = os.path.join(self.DataDir(), 'themes')
urls_file = os.path.join(self.DataDir(), 'urls.txt')
assert os.path.exists(themes_dir), \
'The dir "%s" must exist' % os.path.abspath(themes_dir)
group_size = 20
num_urls_to_visit = 100
urls = [l.rstrip() for l in
open(urls_file).readlines()[:num_urls_to_visit]]
failed_themes = glob.glob(os.path.join(themes_dir, '*.crx'))
while failed_themes and group_size:
failed_themes = self._ReturnCrashingThemes(failed_themes, group_size,
urls)
group_size = group_size // 2
self.assertFalse(failed_themes,
'Theme(s) in failing group: %s' % failed_themes)
if __name__ == '__main__':
pyauto_functional.Main()
|
keishi/chromium
|
chrome/test/functional/themes.py
|
Python
|
bsd-3-clause
| 5,976
|
[
"VisIt"
] |
439c49b303493391fd3bf726850e572d3fd559ccb14114e933911b3216baa840
|
#!/usr/bin/env python
#
# This code was copied from the data generation program of Tencent Alchemy
# project (https://github.com/tencent-alchemy).
#
#
# Copyright 2019 Tencent America LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: Qiming Sun <osirpt.sun@gmail.com>
#
from pyscf import lib
from pyscf.lib import logger
from pyscf.grad import uks as uks_grad
from pyscf.df.grad import rhf as df_rhf_grad
def get_veff(ks_grad, mol=None, dm=None):
'''Coulomb + XC functional
'''
if mol is None: mol = ks_grad.mol
if dm is None: dm = ks_grad.base.make_rdm1()
t0 = (logger.process_clock(), logger.perf_counter())
mf = ks_grad.base
ni = mf._numint
if ks_grad.grids is not None:
grids = ks_grad.grids
else:
grids = mf.grids
if grids.coords is None:
grids.build(with_non0tab=True)
if mf.nlc != '':
raise NotImplementedError
#enabling range-separated hybrids
omega, alpha, hyb = ni.rsh_and_hybrid_coeff(mf.xc, spin=mol.spin)
mem_now = lib.current_memory()[0]
max_memory = max(2000, ks_grad.max_memory*.9-mem_now)
if ks_grad.grid_response:
exc, vxc = uks_grad.get_vxc_full_response(
ni, mol, grids, mf.xc, dm,
max_memory=max_memory, verbose=ks_grad.verbose)
logger.debug1(ks_grad, 'sum(grids response) %s', exc.sum(axis=0))
else:
exc, vxc = uks_grad.get_vxc(
ni, mol, grids, mf.xc, dm,
max_memory=max_memory, verbose=ks_grad.verbose)
t0 = logger.timer(ks_grad, 'vxc', *t0)
if abs(hyb) < 1e-10:
vj = ks_grad.get_j(mol, dm)
vxc += vj[0] + vj[1]
if ks_grad.auxbasis_response:
e1_aux = vj.aux
else:
vj, vk = ks_grad.get_jk(mol, dm)
if ks_grad.auxbasis_response:
vk_aux = vk.aux * hyb
vk *= hyb
if abs(omega) > 1e-10: # For range separated Coulomb operator
raise NotImplementedError
vk_lr = ks_grad.get_k(mol, dm, omega=omega)
vk += vk_lr * (alpha - hyb)
if ks_grad.auxbasis_response:
vk_aux += vk_lr.aux * (alpha - hyb)
vxc += vj[0] + vj[1] - vk
if ks_grad.auxbasis_response:
e1_aux = vj.aux - vk_aux
if ks_grad.auxbasis_response:
logger.debug1(ks_grad, 'sum(auxbasis response) %s', e1_aux.sum(axis=0))
vxc = lib.tag_array(vxc, exc1_grid=exc, aux=e1_aux)
else:
vxc = lib.tag_array(vxc, exc1_grid=exc)
return vxc
class Gradients(uks_grad.Gradients):
def __init__(self, mf):
# Whether to include the response of DF auxiliary basis when computing
# nuclear gradients of J/K matrices
self.auxbasis_response = True
uks_grad.Gradients.__init__(self, mf)
get_jk = df_rhf_grad.get_jk
def get_j(self, mol=None, dm=None, hermi=0):
return self.get_jk(mol, dm, with_k=False)[0]
def get_k(self, mol=None, dm=None, hermi=0):
return self.get_jk(mol, dm, with_j=False)[1]
get_veff = get_veff
def extra_force(self, atom_id, envs):
if self.auxbasis_response:
e1 = uks_grad.Gradients.extra_force(self, atom_id, envs)
return e1 + envs['vhf'].aux[atom_id]
else:
return 0
Grad = Gradients
if __name__ == '__main__':
from pyscf import gto
from pyscf import dft
mol = gto.Mole()
mol.atom = [
['O' , (0. , 0. , 0.)],
[1 , (0. , -0.757 , 0.587)],
[1 , (0. , 0.757 , 0.587)] ]
mol.basis = '631g'
mol.charge = 1
mol.spin = 1
mol.build()
mf = dft.UKS(mol).density_fit()
mf.conv_tol = 1e-12
e0 = mf.scf()
g = Gradients(mf).set(auxbasis_response=False)
print(lib.finger(g.kernel()) - -0.12092643506961044)
g = Gradients(mf)
print(lib.finger(g.kernel()) - -0.12092884149543644)
# O -0.0000000000 0.0000000000 0.0533109212
# H -0.0000000000 0.0675360271 -0.0266615265
# H 0.0000000000 -0.0675360271 -0.0266615265
g.grid_response = True
# O -0.0000000000 0.0000000000 0.0533189584
# H -0.0000000000 0.0675362403 -0.0266594792
# H 0.0000000000 -0.0675362403 -0.0266594792
print(lib.finger(g.kernel()) - -0.12093220332146028)
mf.xc = 'b3lypg'
e0 = mf.kernel()
g = Gradients(mf)
print(lib.finger(g.kernel()) - -0.1020433598546214)
# O -0.0000000000 -0.0000000000 0.0397385108
# H -0.0000000000 0.0587977564 -0.0198734952
# H 0.0000000000 -0.0587977564 -0.0198734952
|
sunqm/pyscf
|
pyscf/df/grad/uks.py
|
Python
|
apache-2.0
| 5,117
|
[
"PySCF"
] |
62c8da9330baefb65ed3b9ecaeb9da06080c28a2d7204a37fe99ba4a536b01ab
|
from django.contrib.auth.models import User
from django.core.mail import send_mail
from django.core.mail.message import EmailMessage
from django.db import models
from what_apps.people.models import Group, Role, RoleInGroup, RoleHierarchy
class BlastMessage(models.Model):
'''
This is the model whose fields will be a part of the Blast Form
'''
subject = models.CharField(max_length=60)
message = models.TextField()
role = models.ForeignKey('people.Role')
group = models.ForeignKey('people.Group')
send_to_higher_roles = models.BooleanField(default=True)
creator = models.ForeignKey('auth.User', related_name='blasts_sent')
created = models.DateTimeField(auto_now_add=True)
sent = models.DateTimeField(blank=True, null=True)
def get_email_address(self):
return "%s__%s@blasts.slashrootcafe.com" % (self.group.name, self.role.name) #TODO: Unhardcode slashrootcafe.com?
def prepare(self):
return self.subject, self.message, self.creator.email, self.populate_targets()
def populate_targets(self):
user_emails = set()
if self.send_to_higher_roles:
roles = self.role.get_higher_roles(self.group, include_self=True)
roles_in_groups = RoleInGroup.objects.filter(group=self.group, role__in=roles)
users = User.objects.filter(what_groups__role__in=roles_in_groups).distinct()
else:
role_in_group = RoleInGroup.objects.get(role=self.role, group=self.group)
users = role_in_group.users.all()
for user in users:
user_emails.add(user.email)
return user_emails
def send_blast(self):
preparation_tuple = self.prepare()
blast_email_object = EmailMessage(*preparation_tuple, headers = {'Reply-To': self.get_email_address()})
return blast_email_object.send()
|
SlashRoot/WHAT
|
what_apps/email_blast/models.py
|
Python
|
mit
| 1,927
|
[
"BLAST"
] |
82c06f2ce05ed97aa6b07389d0b4426ae894c9ac121e35430adf1842a1c233ba
|
import unittest
import json
import os
import igraph as ig
import networkx as nx
from py2cytoscape import util
import scipy as sp
import tempfile
# Utilities
def compare_edge_sets(nx_edges, cy_edges):
edge_set = set()
for cyedge in cy_edges:
source = cyedge['data']['source']
target = cyedge['data']['target']
edge = (int(source), int(target))
edge_set.add(edge)
return edge_set.difference(nx_edges)
class NetworkConversionTests(unittest.TestCase):
def setUp(self):
self.cur_dir = os.path.dirname(os.path.realpath(__file__))
pass
def test_networkx_emptynetwork(self):
g = nx.Graph()
cyjs_g = util.from_networkx(g)
print('\n---------- Empty Test Start -----------\n')
print(json.dumps(cyjs_g, indent=4))
self.assertIsNotNone(cyjs_g)
self.assertIsNotNone(cyjs_g['data'])
self.assertEqual(0, len(cyjs_g['elements']['nodes']))
self.assertEqual(0, len(cyjs_g['elements']['edges']))
def test_networkx_edge_attribute(self):
print('\n---------- Edge Att Test Start -----------\n')
g = nx.Graph()
g.add_edge(1, 2, interaction='itr1', score=0.1)
original_edge = g[1][2]
print(original_edge.keys())
cyjs = util.from_networkx(g)
print(json.dumps(cyjs, indent=4))
# There is only one edge, so this should be OK...
edge = cyjs['elements']['edges'][0]
print(json.dumps(edge, indent=4))
self.assertEqual('itr1', cyjs['elements']['edges'][0]['data']['interaction'])
def test_networkx_empty_edge_attribute(self):
print('\n---------- Edge Att Test 2 Start -----------\n')
g = nx.scale_free_graph(5)
cyjs = util.from_networkx(g)
# print(json.dumps(cyjs, indent=4))
# There is only one edge, so this should be OK...
edge = cyjs['elements']['edges'][0]
print(json.dumps(edge, indent=4))
self.assertEqual(3, len(edge['data']))
def test_networkx_digraph_edge_attr(self):
print('\n---------- Digraph Edge Att Test Start -----------\n')
g = nx.DiGraph()
g.add_path([0, 1, 2, 3, 4])
eb = nx.edge_betweenness(g)
nx.set_edge_attributes(g, 'eb', eb)
cyjs = util.from_networkx(g)
print(json.dumps(cyjs, indent=4))
# There is only one edge, so this should be OK...
edge = cyjs['elements']['edges'][0]
self.assertEqual(3, len(edge['data']))
def test_networkx_multidigraph_edge_attr(self):
print('\n---------- Multi-Digraph Edge Att Test Start -----------\n')
g = nx.MultiDiGraph()
g.add_node(1)
g.add_node(2)
g.add_node(3)
g.add_edge(1, 2)
g.add_edge(1, 2, attr_dict={'foo': 'bar'})
g.add_edge(1, 2)
g.add_edge(1, 3)
edges = g.edges(data=True, keys=True)
for edge in edges:
print(edge)
cyjs = util.from_networkx(g)
print(json.dumps(cyjs, indent=4))
edge = cyjs['elements']['edges'][0]
self.assertTrue(3 <= len(edge['data']))
def test_networkx_ba(self):
g = nx.barabasi_albert_graph(100, 3)
nodes = g.nodes()
edges = g.edges()
g.graph['name'] = 'ba test'
cyjs_g = util.from_networkx(g)
print('\n---------- BA graph Test Start -----------\n')
self.assertIsNotNone(cyjs_g)
self.assertIsNotNone(cyjs_g['data'])
self.assertEqual('ba test', cyjs_g['data']['name'])
self.assertEqual(len(nodes), len(cyjs_g['elements']['nodes']))
self.assertEqual(len(edges), len(cyjs_g['elements']['edges']))
diff = compare_edge_sets(set(edges), cyjs_g['elements']['edges'])
self.assertEqual(0, len(diff))
def test_networkx_matrix(self):
print('\n---------- Matrix Test Start -----------\n')
g = nx.barabasi_albert_graph(30, 2)
nodes = g.nodes()
edges = g.edges()
print(edges)
mx1 = nx.adjacency_matrix(g)
fp = tempfile.NamedTemporaryFile()
file_name = fp.name
sp.savetxt(file_name, mx1.toarray(), fmt='%d')
# Load it back to matrix
mx2 = sp.loadtxt(file_name)
fp.close()
g2 = nx.from_numpy_matrix(mx2)
cyjs_g = util.from_networkx(g2)
#print(json.dumps(cyjs_g, indent=4))
self.assertIsNotNone(cyjs_g)
self.assertIsNotNone(cyjs_g['data'])
self.assertEqual(len(nodes), len(cyjs_g['elements']['nodes']))
self.assertEqual(len(edges), len(cyjs_g['elements']['edges']))
# Make sure all edges are reproduced
print(set(edges))
diff = compare_edge_sets(set(edges), cyjs_g['elements']['edges'])
self.assertEqual(0, len(diff))
# def test_networkx_gml(self):
# g = nx.read_gml(self.cur_dir + '/data/galFiltered.gml')
# g.graph['name'] = 'gml_test'
#
# cyjs_g = util.from_networkx(g)
#
# print('\n---------- GML Test Start -----------\n')
# # print(json.dumps(cyjs_g, indent=4))
# self.assertIsNotNone(cyjs_g)
#
# net_data = cyjs_g['data']
# self.assertIsNotNone(net_data)
# self.assertEqual('gml_test', net_data['name'])
# self.assertEqual(331, len(cyjs_g['elements']['nodes']))
# self.assertEqual(362, len(cyjs_g['elements']['edges']))
#
# nodes = cyjs_g['elements']['nodes']
# node0 = nodes[0]
# self.assertEqual(type("1"), type(node0['data']['id']))
def test_networkx_scale_free(self):
g = nx.scale_free_graph(100)
edge_count = g.number_of_edges()
g.graph['name'] = 'scale_free_test'
cyjs_g = util.from_networkx(g)
print('\n---------- Scale free network Test Start -----------\n')
print('Edge count = ' + str(edge_count))
# print(json.dumps(cyjs_g, indent=4))
self.assertIsNotNone(cyjs_g)
net_data = cyjs_g['data']
self.assertIsNotNone(net_data)
self.assertEqual('scale_free_test', net_data['name'])
self.assertEqual(100, len(cyjs_g['elements']['nodes']))
self.assertEqual(edge_count, len(cyjs_g['elements']['edges']))
nodes = cyjs_g['elements']['nodes']
node0 = nodes[0]
self.assertEqual(type("1"), type(node0['data']['id']))
def test_networkx_parse_network(self):
f = open(self.cur_dir + '/data/galFiltered.json', 'r')
jsonData = json.load(f)
j_nodes = jsonData['elements']['nodes']
j_edges = jsonData['elements']['edges']
print('\n---------- JSON Loading Test Start -----------\n')
# print(json.dumps(jsonData, indent=4))
g = util.to_networkx(jsonData)
nodes = g.nodes()
edges = g.edges()
self.assertEqual('Yeast Network Sample', g.graph['name'])
self.assertEqual('Sample network created by JSON export.', g.graph['description'])
self.assertEqual(4, len(g.graph['numberList']))
self.assertEqual(len(j_nodes), len(nodes))
self.assertEqual(len(j_edges), len(edges))
edge_set = set(list(map(lambda x: (int(x[0]), int(x[1])), edges)))
self.assertEqual(0, len(compare_edge_sets(edge_set, j_edges)))
def test_networkx_roundtrip(self):
print('\n---------- NetworkX Data Roundtrip Test Start -----------\n')
g = nx.newman_watts_strogatz_graph(100, 3, 0.5)
nodes = g.nodes()
edges = g.edges()
# Add some attributes
g.graph['name'] = 'original'
g.graph['density'] = nx.density(g)
nx.set_node_attributes(g, 'betweenness', nx.betweenness_centrality(g))
nx.set_node_attributes(g, 'degree', nx.degree(g))
nx.set_node_attributes(g, 'closeness', nx.closeness_centrality(g))
nx.set_edge_attributes(g, 'eb', nx.edge_betweenness(g))
cyjs1 = util.from_networkx(g)
g2 = util.to_networkx(cyjs1)
self.assertEqual(len(g2.nodes()), len(nodes))
self.assertEqual(len(g2.edges()), len(edges))
edge_set = set(list(map(lambda x: (int(x[0]), int(x[1])), g2.edges())))
self.assertEqual(0, len(edge_set.difference(set(edges))))
node_original = g.node[1]
node_generated = g2.node['1']
print(node_original)
print(node_generated)
self.assertEqual(node_original['degree'], node_generated['degree'])
self.assertEqual(node_original['betweenness'], node_generated['betweenness'])
self.assertEqual(node_original['closeness'], node_generated['closeness'])
def test_from_igraph(self):
print('---------- From igraph object to Cytoscape.js -----------\n')
empty = ig.Graph()
cyjs = util.from_igraph(empty)
print(json.dumps(cyjs, indent=4))
self.assertIsNotNone(cyjs)
self.assertIsNotNone(cyjs['data'])
self.assertEqual(0, len(cyjs['elements']['nodes']))
self.assertEqual(0, len(cyjs['elements']['edges']))
def test_to_igraph(self):
print('---------- Cytoscape.js to igraph object -----------\n')
from py2cytoscape.util.util_igraph import to_igraph
f = open(self.cur_dir + '/data/galFiltered.json', 'r')
network = json.load(f)
g = to_igraph(network)
self.assertIsNotNone(g)
nodes = g.vs
edges = g.es
self.assertEqual(len(network['elements']['nodes']), len(nodes))
self.assertEqual(len(network['elements']['edges']), len(edges))
self.assertEqual('Yeast Network Sample', g['name'])
self.assertEqual('Yeast Sample', g['shared_name'])
net_names = g.attributes()
print(net_names)
self.assertEqual(type([]), type(g['numberList']))
self.assertEqual(type([]), type(g['floatList']))
self.assertEqual(type(u""), type(g['description']))
self.assertEqual(10, len(net_names))
na_names = g.vs.attribute_names()
self.assertEqual(31, len(na_names))
ea_names = g.es.attribute_names()
self.assertEqual(8, len(ea_names))
def test_from_igraph_random(self):
print('---------- From igraph random network object to Cytoscape.js -----------\n')
ba_graph = ig.Graph.Barabasi(100, 3)
ba_graph['name'] = 'Barabasi'
ba_graph['number_attr'] = 12345
ba_graph.vs['degree'] = ba_graph.degree()
ba_graph.vs['bw'] = ba_graph.betweenness()
ba_graph.es['ebw'] = ba_graph.edge_betweenness()
cyjs = util.from_igraph(ba_graph)
# print(json.dumps(cyjs, indent=4))
self.assertIsNotNone(cyjs)
self.assertIsNotNone(cyjs['data'])
self.assertEqual('Barabasi', cyjs['data']['name'])
self.assertEqual(12345, cyjs['data']['number_attr'])
cyjs_nodes = cyjs['elements']['nodes']
cyjs_edges = cyjs['elements']['edges']
self.assertEqual(len(ba_graph.vs), len(cyjs_nodes))
self.assertEqual(len(ba_graph.es), len(cyjs_edges))
self.assertEqual(ba_graph.vs[0]['degree'], cyjs_nodes[0]['data']['degree'])
self.assertEqual(ba_graph.vs[0]['bw'], cyjs_nodes[0]['data']['bw'])
# Test edge
target_edge = ba_graph.es[10]
edge0 = None
for e in cyjs_edges:
if e['data']['source'] == str(target_edge.source) \
and e['data']['target'] == str(target_edge.target):
edge0 = e
break
self.assertIsNotNone(edge0)
self.assertEqual(target_edge['ebw'], edge0['data']['ebw'])
if __name__ == '__main__':
unittest.main()
|
scholer/py2cytoscape
|
tests/test_util.py
|
Python
|
mit
| 11,636
|
[
"Cytoscape"
] |
a0968c7facdd43694c089820f30cb73b143b251d3de3a9c4151432404d10fa19
|
# (C) British Crown Copyright 2010 - 2015, Met Office
#
# This file is part of Iris.
#
# Iris is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Iris is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Iris. If not, see <http://www.gnu.org/licenses/>.
"""
Miscellaneous utility functions.
"""
from __future__ import (absolute_import, division, print_function)
from six.moves import (filter, input, map, range, zip) # noqa
import six
import abc
import collections
import copy
import functools
import inspect
import os
import os.path
import sys
import tempfile
import time
import warnings
import numpy as np
import numpy.ma as ma
import iris
import iris.exceptions
def broadcast_weights(weights, array, dims):
"""
Broadcast a weights array to the shape of another array.
Each dimension of the weights array must correspond to a dimension
of the other array.
.. deprecated:: 1.6
Please use :func:`~iris.util.broadcast_to_shape()`.
Args:
* weights (:class:`numpy.ndarray`-like):
An array of weights to broadcast.
* array (:class:`numpy.ndarray`-like):
An array whose shape is the target shape for *weights*.
* dims (:class:`list` :class:`tuple` etc.):
A sequence of dimension indices, specifying which dimensions of
*array* are represented in *weights*. The order the dimensions
are given in is not important, but the order of the dimensions
in *weights* should be the same as the relative ordering of the
corresponding dimensions in *array*. For example, if *array* is
4d with dimensions (ntime, nlev, nlat, nlon) and *weights*
provides latitude-longitude grid weightings then *dims* could be
set to [2, 3] or [3, 2] but *weights* must have shape
(nlat, nlon) since the latitude dimension comes before the
longitude dimension in *array*.
"""
warnings.warn('broadcast_weights() is deprecated and will be removed '
'in a future release. Consider converting existing code '
'to use broadcast_to_shape() as a replacement.',
stacklevel=2)
# Create a shape array, which *weights* can be re-shaped to, allowing
# them to be broadcast with *array*.
weights_shape = np.ones(array.ndim)
for dim in dims:
if dim is not None:
weights_shape[dim] = array.shape[dim]
# Broadcast the arrays together.
return np.broadcast_arrays(weights.reshape(weights_shape), array)[0]
def broadcast_to_shape(array, shape, dim_map):
"""
Broadcast an array to a given shape.
Each dimension of the array must correspond to a dimension in the
given shape. Striding is used to repeat the array until it matches
the desired shape, returning repeated views on the original array.
If you need to write to the resulting array, make a copy first.
Args:
* array (:class:`numpy.ndarray`-like)
An array to broadcast.
* shape (:class:`list`, :class:`tuple` etc.):
The shape the array should be broadcast to.
* dim_map (:class:`list`, :class:`tuple` etc.):
A mapping of the dimensions of *array* to their corresponding
element in *shape*. *dim_map* must be the same length as the
number of dimensions in *array*. Each element of *dim_map*
corresponds to a dimension of *array* and its value provides
the index in *shape* which the dimension of *array* corresponds
to, so the first element of *dim_map* gives the index of *shape*
that corresponds to the first dimension of *array* etc.
Examples:
Broadcasting an array of shape (2, 3) to the shape (5, 2, 6, 3)
where the first dimension of the array corresponds to the second
element of the desired shape and the second dimension of the array
corresponds to the fourth element of the desired shape::
a = np.array([[1, 2, 3], [4, 5, 6]])
b = broadcast_to_shape(a, (5, 2, 6, 3), (1, 3))
Broadcasting an array of shape (48, 96) to the shape (96, 48, 12)::
# a is an array of shape (48, 96)
result = broadcast_to_shape(a, (96, 48, 12), (1, 0))
"""
if len(dim_map) != array.ndim:
# We must check for this condition here because we cannot rely on
# getting an error from numpy if the dim_map argument is not the
# correct length, we might just get a segfault.
raise ValueError('dim_map must have an entry for every '
'dimension of the input array')
def _broadcast_helper(a):
strides = [0] * len(shape)
for idim, dim in enumerate(dim_map):
if shape[dim] != a.shape[idim]:
# We'll get garbage values if the dimensions of array are not
# those indicated by shape.
raise ValueError('shape and array are not compatible')
strides[dim] = a.strides[idim]
return np.lib.stride_tricks.as_strided(a, shape=shape, strides=strides)
array_view = _broadcast_helper(array)
if ma.isMaskedArray(array):
if array.mask is ma.nomask:
# Degenerate masks can be applied as-is.
mask_view = array.mask
else:
# Mask arrays need to be handled in the same way as the data array.
mask_view = _broadcast_helper(array.mask)
array_view = ma.array(array_view, mask=mask_view)
return array_view
def delta(ndarray, dimension, circular=False):
"""
Calculates the difference between values along a given dimension.
Args:
* ndarray:
The array over which to do the difference.
* dimension:
The dimension over which to do the difference on ndarray.
* circular:
If not False then return n results in the requested dimension
with the delta between the last and first element included in
the result otherwise the result will be of length n-1 (where n
is the length of ndarray in the given dimension's direction)
If circular is numeric then the value of circular will be added
to the last element of the given dimension if the last element
is negative, otherwise the value of circular will be subtracted
from the last element.
The example below illustrates the process::
original array -180, -90, 0, 90
delta (with circular=360): 90, 90, 90, -270+360
.. note::
The difference algorithm implemented is forward difference:
>>> import numpy as np
>>> import iris.util
>>> original = np.array([-180, -90, 0, 90])
>>> iris.util.delta(original, 0)
array([90, 90, 90])
>>> iris.util.delta(original, 0, circular=360)
array([90, 90, 90, 90])
"""
if circular is not False:
_delta = np.roll(ndarray, -1, axis=dimension)
last_element = [slice(None, None)] * ndarray.ndim
last_element[dimension] = slice(-1, None)
if not isinstance(circular, bool):
result = np.where(ndarray[last_element] >= _delta[last_element])[0]
_delta[last_element] -= circular
_delta[last_element][result] += 2*circular
np.subtract(_delta, ndarray, _delta)
else:
_delta = np.diff(ndarray, axis=dimension)
return _delta
def describe_diff(cube_a, cube_b, output_file=None):
"""
Prints the differences that prevent compatibility between two cubes, as
defined by :meth:`iris.cube.Cube.is_compatible()`.
Args:
* cube_a:
An instance of :class:`iris.cube.Cube` or
:class:`iris.cube.CubeMetadata`.
* cube_b:
An instance of :class:`iris.cube.Cube` or
:class:`iris.cube.CubeMetadata`.
* output_file:
A :class:`file` or file-like object to receive output. Defaults to
sys.stdout.
.. seealso::
:meth:`iris.cube.Cube.is_compatible()`
.. note::
Compatibility does not guarantee that two cubes can be merged.
Instead, this function is designed to provide a verbose description
of the differences in metadata between two cubes. Determining whether
two cubes will merge requires additional logic that is beyond the
scope of this function.
"""
if output_file is None:
output_file = sys.stdout
if cube_a.is_compatible(cube_b):
output_file.write('Cubes are compatible\n')
else:
common_keys = set(cube_a.attributes).intersection(cube_b.attributes)
for key in common_keys:
if np.any(cube_a.attributes[key] != cube_b.attributes[key]):
output_file.write('"%s" cube_a attribute value "%s" is not '
'compatible with cube_b '
'attribute value "%s"\n'
% (key,
cube_a.attributes[key],
cube_b.attributes[key]))
if cube_a.name() != cube_b.name():
output_file.write('cube_a name "%s" is not compatible '
'with cube_b name "%s"\n'
% (cube_a.name(), cube_b.name()))
if cube_a.units != cube_b.units:
output_file.write(
'cube_a units "%s" are not compatible with cube_b units "%s"\n'
% (cube_a.units, cube_b.units))
if cube_a.cell_methods != cube_b.cell_methods:
output_file.write('Cell methods\n%s\nand\n%s\nare not compatible\n'
% (cube_a.cell_methods, cube_b.cell_methods))
def guess_coord_axis(coord):
"""
Returns a "best guess" axis name of the coordinate.
Heuristic categorisation of the coordinate into either label
'T', 'Z', 'Y', 'X' or None.
Args:
* coord:
The :class:`iris.coords.Coord`.
Returns:
'T', 'Z', 'Y', 'X', or None.
"""
axis = None
if coord.standard_name in ('longitude', 'grid_longitude',
'projection_x_coordinate'):
axis = 'X'
elif coord.standard_name in ('latitude', 'grid_latitude',
'projection_y_coordinate'):
axis = 'Y'
elif (coord.units.is_convertible('hPa')
or coord.attributes.get('positive') in ('up', 'down')):
axis = 'Z'
elif coord.units.is_time_reference():
axis = 'T'
return axis
def rolling_window(a, window=1, step=1, axis=-1):
"""
Make an ndarray with a rolling window of the last dimension
Args:
* a : array_like
Array to add rolling window to
Kwargs:
* window : int
Size of rolling window
* step : int
Size of step between rolling windows
* axis : int
Axis to take the rolling window over
Returns:
Array that is a view of the original array with an added dimension
of the size of the given window at axis + 1.
Examples::
>>> x = np.arange(10).reshape((2, 5))
>>> rolling_window(x, 3)
array([[[0, 1, 2], [1, 2, 3], [2, 3, 4]],
[[5, 6, 7], [6, 7, 8], [7, 8, 9]]])
Calculate rolling mean of last dimension::
>>> np.mean(rolling_window(x, 3), -1)
array([[ 1., 2., 3.],
[ 6., 7., 8.]])
"""
# NOTE: The implementation of this function originates from
# https://github.com/numpy/numpy/pull/31#issuecomment-1304851 04/08/2011
if window < 1:
raise ValueError("`window` must be at least 1.")
if window > a.shape[axis]:
raise ValueError("`window` is too long.")
if step < 1:
raise ValueError("`step` must be at least 1.")
axis = axis % a.ndim
num_windows = (a.shape[axis] - window + step) // step
shape = a.shape[:axis] + (num_windows, window) + a.shape[axis + 1:]
strides = (a.strides[:axis] + (step * a.strides[axis], a.strides[axis]) +
a.strides[axis + 1:])
rw = np.lib.stride_tricks.as_strided(a, shape=shape, strides=strides)
if ma.isMaskedArray(a):
mask = ma.getmaskarray(a)
strides = (mask.strides[:axis] +
(step * mask.strides[axis], mask.strides[axis]) +
mask.strides[axis + 1:])
rw = ma.array(rw, mask=np.lib.stride_tricks.as_strided(
mask, shape=shape, strides=strides))
return rw
def array_equal(array1, array2):
"""
Returns whether two arrays have the same shape and elements.
This provides the same functionality as :func:`numpy.array_equal` but with
additional support for arrays of strings.
"""
array1, array2 = np.asarray(array1), np.asarray(array2)
if array1.shape != array2.shape:
eq = False
else:
eq = bool(np.asarray(array1 == array2).all())
return eq
def approx_equal(a, b, max_absolute_error=1e-10, max_relative_error=1e-10):
"""
Returns whether two numbers are almost equal, allowing for the
finite precision of floating point numbers.
"""
# Deal with numbers close to zero
if abs(a - b) < max_absolute_error:
return True
# Ensure we get consistent results if "a" and "b" are supplied in the
# opposite order.
max_ab = max([a, b], key=abs)
relative_error = abs(a - b) / max_ab
return relative_error < max_relative_error
def between(lh, rh, lh_inclusive=True, rh_inclusive=True):
"""
Provides a convenient way of defining a 3 element inequality such as
``a < number < b``.
Arguments:
* lh
The left hand element of the inequality
* rh
The right hand element of the inequality
Keywords:
* lh_inclusive - boolean
Affects the left hand comparison operator to use in the inequality.
True for ``<=`` false for ``<``. Defaults to True.
* rh_inclusive - boolean
Same as lh_inclusive but for right hand operator.
For example::
between_3_and_6 = between(3, 6)
for i in range(10):
print(i, between_3_and_6(i))
between_3_and_6 = between(3, 6, rh_inclusive=False)
for i in range(10):
print(i, between_3_and_6(i))
"""
if lh_inclusive and rh_inclusive:
return lambda c: lh <= c <= rh
elif lh_inclusive and not rh_inclusive:
return lambda c: lh <= c < rh
elif not lh_inclusive and rh_inclusive:
return lambda c: lh < c <= rh
else:
return lambda c: lh < c < rh
def reverse(array, axes):
"""
Reverse the array along the given axes.
Args:
* array
The array to reverse
* axes
A single value or array of values of axes to reverse
::
>>> import numpy as np
>>> a = np.arange(24).reshape(2, 3, 4)
>>> print(a)
[[[ 0 1 2 3]
[ 4 5 6 7]
[ 8 9 10 11]]
<BLANKLINE>
[[12 13 14 15]
[16 17 18 19]
[20 21 22 23]]]
>>> print(reverse(a, 1))
[[[ 8 9 10 11]
[ 4 5 6 7]
[ 0 1 2 3]]
<BLANKLINE>
[[20 21 22 23]
[16 17 18 19]
[12 13 14 15]]]
>>> print(reverse(a, [1, 2]))
[[[11 10 9 8]
[ 7 6 5 4]
[ 3 2 1 0]]
<BLANKLINE>
[[23 22 21 20]
[19 18 17 16]
[15 14 13 12]]]
"""
index = [slice(None, None)] * array.ndim
axes = np.array(axes, ndmin=1)
if axes.ndim != 1:
raise ValueError('Reverse was expecting a single axis or a 1d array '
'of axes, got %r' % axes)
if np.min(axes) < 0 or np.max(axes) > array.ndim-1:
raise ValueError('An axis value out of range for the number of '
'dimensions from the given array (%s) was received. '
'Got: %r' % (array.ndim, axes))
for axis in axes:
index[axis] = slice(None, None, -1)
return array[tuple(index)]
def monotonic(array, strict=False, return_direction=False):
"""
Return whether the given 1d array is monotonic.
Note that, the array must not contain missing data.
Kwargs:
* strict (boolean)
Flag to enable strict monotonic checking
* return_direction (boolean)
Flag to change return behaviour to return
(monotonic_status, direction). Direction will be 1 for positive
or -1 for negative. The direction is meaningless if the array is
not monotonic.
Returns:
* monotonic_status (boolean)
Whether the array was monotonic.
If the return_direction flag was given then the returned value
will be:
``(monotonic_status, direction)``
"""
if array.ndim != 1 or len(array) <= 1:
raise ValueError('The array to check must be 1 dimensional and have '
'more than 1 element.')
if ma.isMaskedArray(array) and ma.count_masked(array) != 0:
raise ValueError('The array to check contains missing data.')
# Identify the directions of the largest/most-positive and
# smallest/most-negative steps.
d = np.diff(array)
sign_max_d = np.sign(np.max(d))
sign_min_d = np.sign(np.min(d))
if strict:
monotonic = sign_max_d == sign_min_d and sign_max_d != 0
else:
monotonic = (sign_min_d < 0 and sign_max_d <= 0) or \
(sign_max_d > 0 and sign_min_d >= 0) or \
(sign_min_d == sign_max_d == 0)
if return_direction:
if sign_max_d == 0:
direction = sign_min_d
else:
direction = sign_max_d
return monotonic, direction
return monotonic
def column_slices_generator(full_slice, ndims):
"""
Given a full slice full of tuples, return a dictionary mapping old
data dimensions to new and a generator which gives the successive
slices needed to index correctly (across columns).
This routine deals with the special functionality for tuple based
indexing e.g. [0, (3, 5), :, (1, 6, 8)] by first providing a slice
which takes the non tuple slices out first i.e. [0, :, :, :] then
subsequently iterates through each of the tuples taking out the
appropriate slices i.e. [(3, 5), :, :] followed by [:, :, (1, 6, 8)]
This method was developed as numpy does not support the direct
approach of [(3, 5), : , (1, 6, 8)] for column based indexing.
"""
list_of_slices = []
# Map current dimensions to new dimensions, or None
dimension_mapping = {None: None}
_count_current_dim = 0
for i, i_key in enumerate(full_slice):
if isinstance(i_key, (int, np.integer)):
dimension_mapping[i] = None
else:
dimension_mapping[i] = _count_current_dim
_count_current_dim += 1
# Get all of the dimensions for which a tuple of indices were provided
# (numpy.ndarrays are treated in the same way tuples in this case)
is_tuple_style_index = lambda key: isinstance(key, tuple) or \
(isinstance(key, np.ndarray) and key.ndim == 1)
tuple_indices = [i for i, key in enumerate(full_slice)
if is_tuple_style_index(key)]
# stg1: Take a copy of the full_slice specification, turning all tuples
# into a full slice
if tuple_indices != list(range(len(full_slice))):
first_slice = list(full_slice)
for tuple_index in tuple_indices:
first_slice[tuple_index] = slice(None, None)
# turn first_slice back into a tuple ready for indexing
first_slice = tuple(first_slice)
list_of_slices.append(first_slice)
data_ndims = max(dimension_mapping.values())
if data_ndims is not None:
data_ndims += 1
# stg2 iterate over each of the tuples
for tuple_index in tuple_indices:
# Create a list with the indices to span the whole data array that we
# currently have
spanning_slice_with_tuple = [slice(None, None)] * data_ndims
# Replace the slice(None, None) with our current tuple
spanning_slice_with_tuple[dimension_mapping[tuple_index]] = \
full_slice[tuple_index]
# if we just have [(0, 1)] turn it into [(0, 1), ...] as this is
# Numpy's syntax.
if len(spanning_slice_with_tuple) == 1:
spanning_slice_with_tuple.append(Ellipsis)
spanning_slice_with_tuple = tuple(spanning_slice_with_tuple)
list_of_slices.append(spanning_slice_with_tuple)
# return the dimension mapping and a generator of slices
return dimension_mapping, iter(list_of_slices)
def _build_full_slice_given_keys(keys, ndim):
"""
Given the keys passed to a __getitem__ call, build an equivalent
tuple of keys which span ndims.
"""
# Ensure that we always have a tuple of keys
if not isinstance(keys, tuple):
keys = tuple([keys])
# catch the case where an extra Ellipsis has been provided which can be
# discarded iff len(keys)-1 == ndim
if len(keys)-1 == ndim and \
Ellipsis in filter(lambda obj:
not isinstance(obj, np.ndarray), keys):
keys = list(keys)
is_ellipsis = [key is Ellipsis for key in keys]
keys.pop(is_ellipsis.index(True))
keys = tuple(keys)
# for ndim >= 1 appending a ":" to the slice specification is allowable,
# remove this now
if len(keys) > ndim and ndim != 0 and keys[-1] == slice(None, None):
keys = keys[:-1]
if len(keys) > ndim:
raise IndexError('More slices requested than dimensions. Requested '
'%r, but there were only %s dimensions.' %
(keys, ndim))
# For each dimension get the slice which has been requested.
# If no slice provided, then default to the whole dimension
full_slice = [slice(None, None)] * ndim
for i, key in enumerate(keys):
if key is Ellipsis:
# replace any subsequent Ellipsis objects in keys with
# slice(None, None) as per Numpy
keys = keys[:i] + tuple([slice(None, None) if key is Ellipsis
else key for key in keys[i:]])
# iterate over the remaining keys in reverse to fill in
# the gaps from the right hand side
for j, key in enumerate(keys[:i:-1]):
full_slice[-j-1] = key
# we've finished with i now so stop the iteration
break
else:
full_slice[i] = key
# remove any tuples on dimensions, turning them into numpy array's for
# consistent behaviour
full_slice = tuple([np.array(key, ndmin=1) if isinstance(key, tuple)
else key for key in full_slice])
return full_slice
def _wrap_function_for_method(function, docstring=None):
"""
Returns a wrapper function modified to be suitable for use as a
method.
The wrapper function renames the first argument as "self" and allows
an alternative docstring, thus allowing the built-in help(...)
routine to display appropriate output.
"""
# Generate the Python source for the wrapper function.
# NB. The first argument is replaced with "self".
args, varargs, varkw, defaults = inspect.getargspec(function)
if defaults is None:
basic_args = ['self'] + args[1:]
default_args = []
simple_default_args = []
else:
cutoff = -len(defaults)
basic_args = ['self'] + args[1:cutoff]
default_args = ['%s=%r' % pair
for pair in zip(args[cutoff:], defaults)]
simple_default_args = args[cutoff:]
var_arg = [] if varargs is None else ['*' + varargs]
var_kw = [] if varkw is None else ['**' + varkw]
arg_source = ', '.join(basic_args + default_args + var_arg + var_kw)
simple_arg_source = ', '.join(basic_args + simple_default_args +
var_arg + var_kw)
source = ('def %s(%s):\n return function(%s)' %
(function.__name__, arg_source, simple_arg_source))
# Compile the wrapper function
# NB. There's an outstanding bug with "exec" where the locals and globals
# dictionaries must be the same if we're to get closure behaviour.
my_locals = {'function': function}
exec(source, my_locals, my_locals)
# Update the docstring if required, and return the modified function
wrapper = my_locals[function.__name__]
if docstring is None:
wrapper.__doc__ = function.__doc__
else:
wrapper.__doc__ = docstring
return wrapper
class _MetaOrderedHashable(abc.ABCMeta):
"""
A metaclass that ensures that non-abstract subclasses of _OrderedHashable
without an explicit __init__ method are given a default __init__ method
with the appropriate method signature.
Also, an _init method is provided to allow subclasses with their own
__init__ constructors to initialise their values via an explicit method
signature.
NB. This metaclass is used to construct the _OrderedHashable class as well
as all its subclasses.
"""
def __new__(cls, name, bases, namespace):
# We only want to modify concrete classes that have defined the
# "_names" property.
if '_names' in namespace and \
not isinstance(namespace['_names'], abc.abstractproperty):
args = ', '.join(namespace['_names'])
# Ensure the class has a constructor with explicit arguments.
if '__init__' not in namespace:
# Create a default __init__ method for the class
method_source = ('def __init__(self, %s):\n '
'self._init_from_tuple((%s,))' % (args, args))
exec(method_source, namespace)
# Ensure the class has a "helper constructor" with explicit
# arguments.
if '_init' not in namespace:
# Create a default _init method for the class
method_source = ('def _init(self, %s):\n '
'self._init_from_tuple((%s,))' % (args, args))
exec(method_source, namespace)
return super(_MetaOrderedHashable, cls).__new__(
cls, name, bases, namespace)
@functools.total_ordering
class _OrderedHashable(six.with_metaclass(_MetaOrderedHashable,
collections.Hashable)):
"""
Convenience class for creating "immutable", hashable, and ordered classes.
Instance identity is defined by the specific list of attribute names
declared in the abstract attribute "_names". Subclasses must declare the
attribute "_names" as an iterable containing the names of all the
attributes relevant to equality/hash-value/ordering.
Initial values should be set by using ::
self._init(self, value1, value2, ..)
.. note::
It's the responsibility of the subclass to ensure that the values of
its attributes are themselves hashable.
"""
@abc.abstractproperty
def _names(self):
"""
Override this attribute to declare the names of all the attributes
relevant to the hash/comparison semantics.
"""
pass
def _init_from_tuple(self, values):
for name, value in zip(self._names, values):
object.__setattr__(self, name, value)
def __repr__(self):
class_name = type(self).__name__
attributes = ', '.join('%s=%r' % (name, value)
for (name, value)
in zip(self._names, self._as_tuple()))
return '%s(%s)' % (class_name, attributes)
def _as_tuple(self):
return tuple(getattr(self, name) for name in self._names)
# Prevent attribute updates
def __setattr__(self, name, value):
raise AttributeError('Instances of %s are immutable' %
type(self).__name__)
def __delattr__(self, name):
raise AttributeError('Instances of %s are immutable' %
type(self).__name__)
# Provide hash semantics
def _identity(self):
return self._as_tuple()
def __hash__(self):
return hash(self._identity())
def __eq__(self, other):
return (isinstance(other, type(self)) and
self._identity() == other._identity())
def __ne__(self, other):
# Since we've defined __eq__ we should also define __ne__.
return not self == other
# Provide default ordering semantics
def __lt__(self, other):
if isinstance(other, _OrderedHashable):
return self._identity() < other._identity()
else:
return NotImplemented
def create_temp_filename(suffix=''):
"""Return a temporary file name.
Args:
* suffix - Optional filename extension.
"""
temp_file = tempfile.mkstemp(suffix)
os.close(temp_file[0])
return temp_file[1]
def clip_string(the_str, clip_length=70, rider="..."):
"""
Returns a clipped version of the string based on the specified clip
length and whether or not any graceful clip points can be found.
If the string to be clipped is shorter than the specified clip
length, the original string is returned.
If the string is longer than the clip length, a graceful point (a
space character) after the clip length is searched for. If a
graceful point is found the string is clipped at this point and the
rider is added. If no graceful point can be found, then the string
is clipped exactly where the user requested and the rider is added.
Args:
* the_str
The string to be clipped
* clip_length
The length in characters that the input string should be clipped
to. Defaults to a preconfigured value if not specified.
* rider
A series of characters appended at the end of the returned
string to show it has been clipped. Defaults to a preconfigured
value if not specified.
Returns:
The string clipped to the required length with a rider appended.
If the clip length was greater than the orignal string, the
original string is returned unaltered.
"""
if clip_length >= len(the_str) or clip_length <= 0:
return the_str
else:
if the_str[clip_length].isspace():
return the_str[:clip_length] + rider
else:
first_part = the_str[:clip_length]
remainder = the_str[clip_length:]
# Try to find a graceful point at which to trim i.e. a space
# If no graceful point can be found, then just trim where the user
# specified by adding an empty slice of the remainder ( [:0] )
termination_point = remainder.find(" ")
if termination_point == -1:
termination_point = 0
return first_part + remainder[:termination_point] + rider
def ensure_array(a):
""".. deprecated:: 1.7"""
warnings.warn('ensure_array() is deprecated and will be removed '
'in a future release.')
if not isinstance(a, (np.ndarray, ma.core.MaskedArray)):
a = np.array([a])
return a
class _Timers(object):
"""
A utility class for timing things.
.. deprecated:: 1.7
"""
# See help for timers, below.
def __init__(self):
self.timers = {}
def start(self, name, step_name):
warnings.warn('Timers was deprecated in v1.7.0 and will be removed '
'in future Iris releases.')
self.stop(name)
timer = self.timers.setdefault(name, {})
timer[step_name] = time.time()
timer["active_timer_step"] = step_name
def restart(self, name, step_name):
warnings.warn('Timers was deprecated in v1.7.0 and will be removed '
'in future Iris releases.')
self.stop(name)
timer = self.timers.setdefault(name, {})
timer[step_name] = time.time() - timer.get(step_name, 0)
timer["active_timer_step"] = step_name
def stop(self, name):
if name in self.timers and "active_timer_step" in self.timers[name]:
timer = self.timers[name]
active = timer["active_timer_step"]
start = timer[active]
timer[active] = time.time() - start
return self.get(name)
def get(self, name):
result = (name, [])
if name in self.timers:
result = (name, ", ".join(["'%s':%8.5f" % (k, v)
for k, v in self.timers[name].items()
if k != "active_timer_step"]))
return result
def reset(self, name):
self.timers[name] = {}
timers = _Timers()
"""
Provides multiple named timers, each composed of multiple named steps.
.. deprecated:: 1.7
Only one step is active at a time, so calling start(timer_name, step_name)
will stop the current step and start the new one.
Example Usage::
from iris.util import timers
def little_func(param):
timers.restart("little func", "init")
init()
timers.restart("little func", "main")
main(param)
timers.restart("little func", "cleanup")
cleanup()
timers.stop("little func")
def my_big_func():
timers.start("big func", "input")
input()
timers.start("big func", "processing")
little_func(123)
little_func(456)
timers.start("big func", "output")
output()
print(timers.stop("big func"))
print(timers.get("little func"))
"""
def format_array(arr):
"""
Returns the given array as a string, using the python builtin str
function on a piecewise basis.
Useful for xml representation of arrays.
For customisations, use the :mod:`numpy.core.arrayprint` directly.
"""
if arr.size > 85:
summary_insert = "..., "
else:
summary_insert = ""
ffunc = str
return np.core.arrayprint._formatArray(arr, ffunc, len(arr.shape),
max_line_len=50,
next_line_prefix='\t\t',
separator=', ', edge_items=3,
summary_insert=summary_insert)[:-1]
def new_axis(src_cube, scalar_coord=None):
"""
Create a new axis as the leading dimension of the cube, promoting a scalar
coordinate if specified.
Args:
* src_cube (:class:`iris.cube.Cube`)
Source cube on which to generate a new axis.
Kwargs:
* scalar_coord (:class:`iris.coord.Coord` or 'string')
Scalar coordinate to promote to a dimension coordinate.
Returns:
A new :class:`iris.cube.Cube` instance with one extra leading dimension
(length 1).
For example::
>>> cube.shape
(360, 360)
>>> ncube = iris.util.new_axis(cube, 'time')
>>> ncube.shape
(1, 360, 360)
.. warning::
Calling this method will trigger any deferred loading, causing the
data array of the cube to be loaded into memory.
"""
if scalar_coord is not None:
scalar_coord = src_cube.coord(scalar_coord)
# Indexing numpy arrays requires loading deferred data here returning a
# copy of the data with a new leading dimension.
new_cube = iris.cube.Cube(src_cube.data[None])
new_cube.metadata = src_cube.metadata
for coord in src_cube.aux_coords:
if scalar_coord and scalar_coord == coord:
dim_coord = iris.coords.DimCoord.from_coord(coord)
new_cube.add_dim_coord(dim_coord, 0)
else:
dims = np.array(src_cube.coord_dims(coord)) + 1
new_cube.add_aux_coord(coord.copy(), dims)
for coord in src_cube.dim_coords:
coord_dims = np.array(src_cube.coord_dims(coord)) + 1
new_cube.add_dim_coord(coord.copy(), coord_dims)
for factory in src_cube.aux_factories:
new_cube.add_aux_factory(copy.deepcopy(factory))
return new_cube
def as_compatible_shape(src_cube, target_cube):
"""
Return a cube with added length one dimensions to match the dimensionality
and dimension ordering of `target_cube`.
This function can be used to add the dimensions that have been collapsed,
aggregated or sliced out, promoting scalar coordinates to length one
dimension coordinates where necessary. It operates by matching coordinate
metadata to infer the dimensions that need modifying, so the provided
cubes must have coordinates with the same metadata
(see :class:`iris.coords.CoordDefn`).
.. note:: This function will load and copy the data payload of `src_cube`.
Args:
* src_cube:
An instance of :class:`iris.cube.Cube` with missing dimensions.
* target_cube:
An instance of :class:`iris.cube.Cube` with the desired dimensionality.
Returns:
A instance of :class:`iris.cube.Cube` with the same dimensionality as
`target_cube` but with the data and coordinates from `src_cube`
suitably reshaped to fit.
"""
dim_mapping = {}
for coord in target_cube.aux_coords + target_cube.dim_coords:
dims = target_cube.coord_dims(coord)
try:
collapsed_dims = src_cube.coord_dims(coord)
except iris.exceptions.CoordinateNotFoundError:
continue
if collapsed_dims:
if len(collapsed_dims) == len(dims):
for dim_from, dim_to in zip(dims, collapsed_dims):
dim_mapping[dim_from] = dim_to
elif dims:
for dim_from in dims:
dim_mapping[dim_from] = None
if len(dim_mapping) != target_cube.ndim:
raise ValueError('Insufficient or conflicting coordinate '
'metadata. Cannot infer dimension mapping '
'to restore cube dimensions.')
new_shape = [1] * target_cube.ndim
for dim_from, dim_to in six.iteritems(dim_mapping):
if dim_to is not None:
new_shape[dim_from] = src_cube.shape[dim_to]
new_data = src_cube.data.copy()
# Transpose the data (if necessary) to prevent assignment of
# new_shape doing anything except adding length one dims.
order = [v for k, v in sorted(dim_mapping.items()) if v is not None]
if order != sorted(order):
new_order = [order.index(i) for i in range(len(order))]
new_data = np.transpose(new_data, new_order).copy()
new_cube = iris.cube.Cube(new_data.reshape(new_shape))
new_cube.metadata = copy.deepcopy(src_cube.metadata)
# Record a mapping from old coordinate IDs to new coordinates,
# for subsequent use in creating updated aux_factories.
coord_mapping = {}
def add_coord(coord):
"""Closure used to add a suitably reshaped coord to new_cube."""
dims = target_cube.coord_dims(coord)
shape = [new_cube.shape[dim] for dim in dims]
if not shape:
shape = [1]
points = coord.points.reshape(shape)
bounds = None
if coord.has_bounds():
bounds = coord.bounds.reshape(shape + [coord.nbounds])
new_coord = coord.copy(points=points, bounds=bounds)
# If originally in dim_coords, add to dim_coords, otherwise add to
# aux_coords.
if target_cube.coords(coord, dim_coords=True):
try:
new_cube.add_dim_coord(new_coord, dims)
except ValueError:
# Catch cases where the coord is an AuxCoord and therefore
# cannot be added to dim_coords.
new_cube.add_aux_coord(new_coord, dims)
else:
new_cube.add_aux_coord(new_coord, dims)
coord_mapping[id(coord)] = new_coord
for coord in src_cube.aux_coords + src_cube.dim_coords:
add_coord(coord)
for factory in src_cube.aux_factories:
new_cube.add_aux_factory(factory.updated(coord_mapping))
return new_cube
def squeeze(cube):
"""
Removes any dimension of length one. If it has an associated DimCoord or
AuxCoord, this becomes a scalar coord.
Args:
* cube (:class:`iris.cube.Cube`)
Source cube to remove length 1 dimension(s) from.
Returns:
A new :class:`iris.cube.Cube` instance without any dimensions of
length 1.
For example::
>>> cube.shape
(1, 360, 360)
>>> ncube = iris.util.squeeze(cube)
>>> ncube.shape
(360, 360)
"""
slices = [0 if cube.shape[dim] == 1 else slice(None)
for dim in range(cube.ndim)]
squeezed = cube[tuple(slices)]
return squeezed
def file_is_newer_than(result_path, source_paths):
"""
Return whether the 'result' file has a later modification time than all of
the 'source' files.
If a stored result depends entirely on known 'sources', it need only be
re-built when one of them changes. This function can be used to test that
by comparing file timestamps.
Args:
* result_path (string):
The filepath of a file containing some derived result data.
* source_paths (string or iterable of strings):
The path(s) to the original datafiles used to make the result. May
include wildcards and '~' expansions (like Iris load paths), but not
URIs.
Returns:
True if all the sources are older than the result, else False.
If any of the file paths describes no existing files, an exception will
be raised.
.. note::
There are obvious caveats to using file timestamps for this, as correct
usage depends on how the sources might change. For example, a file
could be replaced by one of the same name, but an older timestamp.
If wildcards and '~' expansions are used, this introduces even more
uncertainty, as then you cannot even be sure that the resulting list of
file names is the same as the originals. For example, some files may
have been deleted or others added.
.. note::
The result file may often be a :mod:`pickle` file. In that case, it
also depends on the relevant module sources, so extra caution is
required. Ideally, an additional check on iris.__version__ is advised.
"""
# Accept a string as a single source path
if isinstance(source_paths, six.string_types):
source_paths = [source_paths]
# Fix our chosen timestamp function
file_date = os.path.getmtime
# Get the 'result file' time
result_timestamp = file_date(result_path)
# Get all source filepaths, with normal Iris.io load helper function
source_file_paths = iris.io.expand_filespecs(source_paths)
# Compare each filetime, for each spec, with the 'result time'
for path in source_file_paths:
source_timestamp = file_date(path)
if source_timestamp >= result_timestamp:
return False
return True
def is_regular(coord):
"""Determine if the given coord is regular."""
try:
regular_step(coord)
except iris.exceptions.CoordinateNotRegularError:
return False
except (TypeError, ValueError):
return False
return True
def regular_step(coord):
"""Return the regular step from a coord or fail."""
if coord.ndim != 1:
raise iris.exceptions.CoordinateMultiDimError("Expected 1D coord")
if coord.shape[0] < 2:
raise ValueError("Expected a non-scalar coord")
diffs = coord.points[1:] - coord.points[:-1]
avdiff = np.mean(diffs)
if not np.allclose(diffs, avdiff, rtol=0.001):
# TODO: This value is set for test_analysis to pass...
msg = "Coord %s is not regular" % coord.name()
raise iris.exceptions.CoordinateNotRegularError(msg)
return avdiff.astype(coord.points.dtype)
def unify_time_units(cubes):
"""
Performs an in-place conversion of the time units of all time coords in the
cubes in a given iterable. One common epoch is defined for each calendar
found in the cubes to prevent units being defined with inconsistencies
between epoch and calendar.
Each epoch is defined from the first suitable time coordinate found in the
input cubes.
Arg:
* cubes:
An iterable containing :class:`iris.cube.Cube` instances.
"""
epochs = {}
for cube in cubes:
for time_coord in cube.coords():
if time_coord.units.is_time_reference():
epoch = epochs.setdefault(time_coord.units.calendar,
time_coord.units.origin)
new_unit = iris.unit.Unit(epoch, time_coord.units.calendar)
time_coord.convert_units(new_unit)
def _is_circular(points, modulus, bounds=None):
"""
Determine whether the provided points or bounds are circular in nature
relative to the modulus value.
If the bounds are provided then these are checked for circularity rather
than the points.
Args:
* points:
:class:`numpy.ndarray` of point values.
* modulus:
Circularity modulus value.
Kwargs:
* bounds:
:class:`numpy.ndarray` of bound values.
Returns:
Boolean.
"""
circular = False
if bounds is not None:
# Set circular to True if the bounds ends are equivalent.
first_bound = last_bound = None
if bounds.ndim == 1 and bounds.shape[-1] == 2:
first_bound = bounds[0] % modulus
last_bound = bounds[1] % modulus
elif bounds.ndim == 2 and bounds.shape[-1] == 2:
first_bound = bounds[0, 0] % modulus
last_bound = bounds[-1, 1] % modulus
if first_bound is not None and last_bound is not None:
circular = np.allclose(first_bound, last_bound,
rtol=1.0e-5)
else:
# set circular if points are regular and last+1 ~= first
if len(points) > 1:
diffs = list(set(np.diff(points)))
diff = np.mean(diffs)
abs_tol = diff * 1.0e-4
diff_approx_equal = np.max(np.abs(diffs - diff)) < abs_tol
if diff_approx_equal:
circular_value = (points[-1] + diff) % modulus
try:
np.testing.assert_approx_equal(points[0],
circular_value,
significant=4)
circular = True
except AssertionError:
if points[0] == 0:
try:
np.testing.assert_approx_equal(modulus,
circular_value,
significant=4)
circular = True
except AssertionError:
pass
else:
# XXX - Inherited behaviour from NetCDF PyKE rules.
# We need to decide whether this is valid!
circular = points[0] >= modulus
return circular
def promote_aux_coord_to_dim_coord(cube, name_or_coord):
"""
Promotes an AuxCoord on the cube to a DimCoord. This AuxCoord must be
associated with a single cube dimension. If the AuxCoord is associated
with a dimension that already has a DimCoord, that DimCoord gets
demoted to an AuxCoord.
Args:
* cube
An instance of :class:`iris.cube.Cube`
* name_or_coord:
Either
(a) An instance of :class:`iris.coords.AuxCoord`
or
(b) the :attr:`standard_name`, :attr:`long_name`, or
:attr:`var_name` of an instance of an instance of
:class:`iris.coords.AuxCoord`.
For example::
>>> print cube
air_temperature / (K) (time: 12; latitude: 73; longitude: 96)
Dimension coordinates:
time x - -
latitude - x -
longitude - - x
Auxiliary coordinates:
year x - -
>>> promote_aux_coord_to_dim_coord(cube, 'year')
>>> print cube
air_temperature / (K) (year: 12; latitude: 73; longitude: 96)
Dimension coordinates:
year x - -
latitude - x -
longitude - - x
Auxiliary coordinates:
time x - -
"""
if isinstance(name_or_coord, six.string_types):
aux_coord = cube.coord(name_or_coord)
elif isinstance(name_or_coord, iris.coords.Coord):
aux_coord = name_or_coord
else:
# Don't know how to handle this type
msg = ("Don't know how to handle coordinate of type {}. "
"Ensure all coordinates are of type six.string_types or "
"iris.coords.Coord.")
msg = msg.format(type(name_or_coord))
raise TypeError(msg)
if aux_coord in cube.dim_coords:
# nothing to do
return
if aux_coord not in cube.aux_coords:
msg = ("Attempting to promote an AuxCoord ({}) "
"which does not exist in the cube.")
msg = msg.format(aux_coord.name())
raise ValueError(msg)
coord_dim = cube.coord_dims(aux_coord)
if len(coord_dim) != 1:
msg = ("Attempting to promote an AuxCoord ({}) "
"which is associated with {} dimensions.")
msg = msg.format(aux_coord.name(), len(coord_dim))
raise ValueError(msg)
try:
dim_coord = iris.coords.DimCoord.from_coord(aux_coord)
except ValueError as valerr:
msg = ("Attempt to promote an AuxCoord ({}) fails "
"when attempting to create a DimCoord from the "
"AuxCoord because: {}")
msg = msg.format(aux_coord.name(), str(valerr))
raise ValueError(msg)
old_dim_coord = cube.coords(dim_coords=True,
contains_dimension=coord_dim[0])
if len(old_dim_coord) == 1:
demote_dim_coord_to_aux_coord(cube, old_dim_coord[0])
# order matters here: don't want to remove
# the aux_coord before have tried to make
# dim_coord in case that fails
cube.remove_coord(aux_coord)
cube.add_dim_coord(dim_coord, coord_dim)
def demote_dim_coord_to_aux_coord(cube, name_or_coord):
"""
Demotes a DimCoord on the cube to an AuxCoord, leaving that
dimension anonymous.
Args:
* cube
An instance of :class:`iris.cube.Cube`
* name_or_coord:
Either
(a) An instance of :class:`iris.coords.DimCoord`
or
(b) the :attr:`standard_name`, :attr:`long_name`, or
:attr:`var_name` of an instance of an instance of
:class:`iris.coords.DimCoord`.
For example::
>>> print cube
air_temperature / (K) (time: 12; latitude: 73; longitude: 96)
Dimension coordinates:
time x - -
latitude - x -
longitude - - x
Auxiliary coordinates:
year x - -
>>> demote_dim_coord_to_aux_coord(cube, 'time')
>>> print cube
air_temperature / (K) (-- : 12; latitude: 73; longitude: 96)
Dimension coordinates:
latitude - x -
longitude - - x
Auxiliary coordinates:
time x - -
year x - -
"""
if isinstance(name_or_coord, six.string_types):
dim_coord = cube.coord(name_or_coord)
elif isinstance(name_or_coord, iris.coords.Coord):
dim_coord = name_or_coord
else:
# Don't know how to handle this type
msg = ("Don't know how to handle coordinate of type {}. "
"Ensure all coordinates are of type six.string_types or "
"iris.coords.Coord.")
msg = msg.format(type(name_or_coord))
raise TypeError(msg)
if dim_coord not in cube.dim_coords:
# nothing to do
return
coord_dim = cube.coord_dims(dim_coord)
cube.remove_coord(dim_coord)
cube.add_aux_coord(dim_coord, coord_dim)
|
ghislainp/iris
|
lib/iris/util.py
|
Python
|
gpl-3.0
| 53,476
|
[
"NetCDF"
] |
ff5fbf9b0a338e0f7cd77ab71718e81c4b39a99d6fe075b19e9af9855f018ce9
|
#!/usr/bin/env python
#* This file is part of the MOOSE framework
#* https://www.mooseframework.org
#*
#* All rights reserved, see COPYRIGHT for full restrictions
#* https://github.com/idaholab/moose/blob/master/COPYRIGHT
#*
#* Licensed under LGPL 2.1, please see LICENSE for details
#* https://www.gnu.org/licenses/lgpl-2.1.html
import sys
import os
# Locate MOOSE directory
MOOSE_DIR = os.getenv('MOOSE_DIR', os.path.abspath(os.path.join(os.path.dirname(__name__), '..', 'moose')))
if not os.path.exists(MOOSE_DIR):
MOOSE_DIR = os.path.abspath(os.path.join(os.path.dirname(__name__), '..', '..', 'moose'))
if not os.path.exists(MOOSE_DIR):
raise Exception('Failed to locate MOOSE, specify the MOOSE_DIR environment variable.')
os.environ['MOOSE_DIR'] = MOOSE_DIR
# Append MOOSE python directory
MOOSE_PYTHON_DIR = os.path.join(MOOSE_DIR, 'python')
if MOOSE_PYTHON_DIR not in sys.path:
sys.path.append(MOOSE_PYTHON_DIR)
from MooseDocs import main
if __name__ == '__main__':
sys.exit(main.run())
|
harterj/moose
|
tutorials/tutorial02_multiapps/app/doc/moosedocs.py
|
Python
|
lgpl-2.1
| 1,016
|
[
"MOOSE"
] |
754e13f227d9428952646d7ccaef8ba95c6c7faed1d521a594c56b27805b7f1f
|
"""
This program takes a set of documents (all streamed from stdin at once) and formats them in a way suitable for
use with the NIST-distributed mteval script. The output is in UTF-8.
Usage: cat [MTC_DIR_FOR_SYSTEM] | python convert-mtc.systems.py [doctype-string {'source', 'target', 'ref'} (default='target')] | [NEW_XML_DOC_TO_STDOUT]
"""
import sys, os, re, codecs, xml.sax.saxutils, my_unicode
try:
import chardet
except:
chardet = None
from xml.etree.ElementTree import *
from collections import defaultdict
def tokenize(t):
"""very simple text tokenization:
<blah>n't => <blah> n't
<blah>'s => <blah> 's
<blah>' => <blah> '
where '<blah>' is not whitespace.
"""
t = t.replace("n't", " n't").replace("'s", " 's").replace("' ", " ' ")
if t[-1] == "'":
t = t[:-1] + " " + t[-1]
return t.replace(" ", " ")
def decode_line(ln, encoding):
res = None
try:
res = ln.decode(encoding)
except:
try:
res = ln.decode("iso-8859-2")
except:
try:
res = ln.decode("iso-8859-1")
except:
try:
res = ln.decode("utf-8")
except:
try:
res = ln.decode("GB2312")
except:
try:
res = ln.decode("Big5")
except:
try:
res = ln.decode("EUC-TW")
except:
res = ln
return res
doc_pattern = re.compile(u"<[Dd][Oo][Cc] docid=\"(.*)\" sysid=\"(.*)\">(.*)")
doc_pattern_source = re.compile(u"<[Dd][Oo][Cc] docid=\"(.*)\">(.*)")
seg_pattern = re.compile(u"<seg id=\"?(.*)\"?>(.*)</seg>$")
doc_type = "target"
if len(sys.argv) >= 2:
doc_type = sys.argv[1].lower()
if not doc_type in ["target", "source", "reference"]:
doc_type = "target"
mtc_in = sys.stdin.readlines()
mtc_all = (os.linesep).join(mtc_in)
if not chardet is None:
encoding = chardet.detect(mtc_all)['encoding']
else:
encoding = "ISO-8859-2"
# turn stdout into a UTF-8 converting writer.
streamWriter = codecs.lookup("UTF-8")[-1]
sys.stdout = streamWriter(sys.stdout)
output = sys.stdout
# map from auto-assigned ID to MTC ID.
autoid2mtcid = {}
mtc = defaultdict(lambda: [])
sys = None
for l in mtc_in:
l = decode_line(l, encoding).strip()
if l.startswith("<DOC"):
match = doc_pattern.findall(l)[0] if (doc_type == "target") else doc_pattern_source.findall(l)[0]
if doc_type == "target":
(docid, sysid) = (match[0], match[1])
else:
(docid, sysid) = (match[0], None)
docid = docid.replace("_",".")
curr_doc = docid
curr_sys = sysid
sys = curr_sys
elif l.startswith("<seg"):
match = seg_pattern.findall(l)[0]
(segid, text) = (match[0], match[1])
mtc[(curr_sys, curr_doc)].append((segid, text.strip()))
output.write("<?xml version=\"1.0\" encoding=\"UTF-8\"?>" + \
os.linesep + "<!DOCTYPE mteval SYSTEM \"ftp://jaguar.ncsl.nist.gov/mt/resources/mteval-xml-v1.3.dtd\">" + os.linesep +\
"<mteval>" + os.linesep)
if doc_type == "target":
output.write(" <tstset setid=\"multiple_translation_set\" srclang=\"Chinese\" trglang=\"English\" sysid=\"%s\">" % sys + os.linesep)
elif doc_type == "source":
output.write(" <srcset setid=\"multiple_translation_set\" srclang=\"Chinese\">" + os.linesep)
else:
output.write(" <refset setid=\"multiple_translation_set\" srclang=\"Chinese\" trglang=\"English\" refid=\"1\">" + os.linesep)
docs = mtc.keys()
docs.sort()
for (sy,doc) in docs:
output.write(" <doc docid=\"%s\" genre=\"nw\">" % doc + os.linesep)
segids_and_texts = mtc[(sy,doc)]
segids_and_texts.sort(lambda a,b: cmp(int(a[0]),int(b[0])))
for (segid,text) in segids_and_texts:
output.write(" <seg id=\"%s\"> %s </seg>" % (segid,xml.sax.saxutils.escape(my_unicode.removeInvalidChars(tokenize(text)))) + os.linesep)
output.write(" </doc>" + os.linesep)
if doc_type == "target":
output.write(" </tstset>" + os.linesep + "</mteval>")
elif doc_type == "source":
output.write(" </srcset>" + os.linesep + "</mteval>")
else:
output.write(" </refset>" + os.linesep + "</mteval>")
|
DanielCoutoVale/openccg
|
ccgbank/bin/convert-mtc-systems.py
|
Python
|
lgpl-2.1
| 4,455
|
[
"Jaguar"
] |
0d66b88e77f0ddf1250cfe189ad0ee66b03f76520aab17449ae1f507d1f99f75
|
from ev3.ev3dev import LED
import unittest
import time
from util import get_input
class TestLED(unittest.TestCase):
def test_led(self):
get_input('Test LED')
led = LED()
get_input('Test left red')
led.left.color=LED.COLOR.RED
get_input('Test left green')
led.left.color=LED.COLOR.GREEN
get_input('Test left amber')
led.left.color=LED.COLOR.AMBER
get_input('Test right blink')
led.right.blink(color=LED.COLOR.GREEN,delay_on=1000, delay_off=2000 )
time.sleep(10)
get_input('Test left and right on')
led.left.on()
led.right.on()
time.sleep(5)
get_input('Test left and right off')
led.left.off()
led.right.off()
if __name__ == '__main__':
unittest.main()
|
topikachu/python-ev3
|
test/test_ev3_led.py
|
Python
|
apache-2.0
| 824
|
[
"Amber"
] |
05bcd44273b69d43d46d1fa1260df2c3491c5ab57f41ec13a65bcc80869c3304
|
# -*- coding: utf-8 -*-
#########################################################################
## This program is part of 'MOOSE', the
## Messaging Object Oriented Simulation Environment.
## Copyright (C) 2014 Upinder S. Bhalla. and NCBS
## It is made available under the terms of the
## GNU Lesser General Public License version 2.1
## See the file COPYING.LIB for the full notice.
#########################################################################
import math
import numpy as np
import moose
import moose.fixXreacs as fixXreacs
def makeModel():
# create container for model
num = 1 # number of compartments
model = moose.Neutral( '/model' )
compartment = moose.CylMesh( '/model/compartment' )
compartment.x1 = 1.0e-6 # Set it to a 1 micron single-voxel cylinder
# create molecules and reactions
s = moose.Pool( '/model/compartment/s' )
#####################################################################
# Put in endo compartment. Add molecule s
endo = moose.EndoMesh( '/model/endo' )
endo.isMembraneBound = True
endo.surround = compartment
es = moose.Pool( '/model/endo/s' )
rXfer = moose.Reac( '/model/endo/rXfer' )
#####################################################################
moose.connect( rXfer, 'sub', s, 'reac' )
moose.connect( rXfer, 'prd', es, 'reac' )
volRatio = compartment.volume / endo.volume
rXfer.Kf = 0.04 # 0.04/sec
rXfer.Kb = 0.02 # 0.02/sec
#####################################################################
fixXreacs.fixXreacs( '/model' )
#fixXreacs.restoreXreacs( '/model' )
#fixXreacs.fixXreacs( '/model' )
#####################################################################
# Make solvers
ksolve = moose.Ksolve( '/model/compartment/ksolve' )
dsolve = moose.Dsolve( '/model/dsolve' )
eksolve = moose.Ksolve( '/model/endo/ksolve' )
edsolve = moose.Dsolve( '/model/endo/dsolve' )
stoich = moose.Stoich( '/model/compartment/stoich' )
stoich.compartment = compartment
stoich.ksolve = ksolve
stoich.dsolve = dsolve
stoich.path = "/model/compartment/##"
assert( dsolve.numPools == 1 )
s.vec.concInit = [1]*num
estoich = moose.Stoich( '/model/endo/stoich' )
estoich.compartment = endo
estoich.ksolve = eksolve
estoich.dsolve = edsolve
estoich.path = "/model/endo/##"
assert( edsolve.numPools == 2 )
edsolve.buildMeshJunctions( dsolve )
plot1 = moose.Table2( '/model/plot1' )
plot2 = moose.Table2( '/model/plot2' )
moose.connect( '/model/plot1', 'requestOut', s, 'getN' )
moose.connect( '/model/plot2', 'requestOut', es, 'getN' )
plot3 = moose.Table2( '/model/plot3' )
plot4 = moose.Table2( '/model/plot4' )
moose.connect( '/model/plot3', 'requestOut', s, 'getConc' )
moose.connect( '/model/plot4', 'requestOut', es, 'getConc' )
def doPlot( ax, plot1, plot2, label ):
plt.ylabel( label )
plt.xlabel( 'time(s)' )
v1 = moose.element(plot1).vector
v2 = moose.element(plot2).vector
ax.plot( v1, label='s' )
ax.plot( v2, label='es' )
ax.plot( np.array( v1 ) + np.array( v2 ), label='sum' )
plt.legend()
def almostEq( a, b ):
return abs(a-b)/(a+b) < 5e-5
def main( standalone = False ):
runtime = 200
displayInterval = 2
makeModel()
moose.reinit()
moose.start( runtime )
assert( almostEq( 2.0 * moose.element( 'model/compartment/s' ).conc,
moose.element( '/model/endo/s' ).conc ) )
if standalone:
fig = plt.figure( figsize=(12,10) )
ax1 = fig.add_subplot(211)
doPlot( ax1, '/model/plot1', '/model/plot2', '# of molecules' )
ax2 = fig.add_subplot(212)
doPlot( ax2, '/model/plot3', '/model/plot4', 'conc (mM)' )
plt.show()
moose.delete( '/model' )
# Run the 'main' if this script is executed standalone.
if __name__ == '__main__':
main( standalone = True )
|
upibhalla/moose-core
|
tests/python/testXreacs4a.py
|
Python
|
gpl-3.0
| 3,950
|
[
"MOOSE"
] |
9c62a2780276eb5c2533fe56f75019d064df8a954b0bad78f6abacc68cd78b4d
|
import param
import numpy as np
from ..core import Dimension, Dataset, NdOverlay
from ..core.operation import Operation
from ..core.util import basestring, cartesian_product, isfinite
from ..element import (Curve, Area, Image, Distribution, Bivariate,
Contours, Polygons)
from .element import contours
def _kde_support(bin_range, bw, gridsize, cut, clip):
"""Establish support for a kernel density estimate."""
kmin, kmax = bin_range[0] - bw * cut, bin_range[1] + bw * cut
if isfinite(clip[0]):
kmin = max(kmin, clip[0])
if isfinite(clip[1]):
kmax = min(kmax, clip[1])
return np.linspace(kmin, kmax, gridsize)
class univariate_kde(Operation):
"""
Computes a 1D kernel density estimate (KDE) along the supplied
dimension. Kernel density estimation is a non-parametric way to
estimate the probability density function of a random variable.
The KDE works by placing a Gaussian kernel at each sample with
the supplied bandwidth. These kernels are then summed to produce
the density estimate. By default a good bandwidth is determined
using the bw_method but it may be overridden by an explicit value.
"""
bw_method = param.ObjectSelector(default='scott', objects=['scott', 'silverman'], doc="""
Method of automatically determining KDE bandwidth""")
bandwidth = param.Number(default=None, doc="""
Allows supplying explicit bandwidth value rather than relying on scott or silverman method.""")
cut = param.Number(default=3, doc="""
Draw the estimate to cut * bw from the extreme data points.""")
bin_range = param.NumericTuple(default=None, length=2, doc="""
Specifies the range within which to compute the KDE.""")
dimension = param.String(default=None, doc="""
Along which dimension of the Element to compute the KDE.""")
filled = param.Boolean(default=True, doc="""
Controls whether to return filled or unfilled KDE.""")
n_samples = param.Integer(default=100, doc="""
Number of samples to compute the KDE over.""")
groupby = param.ClassSelector(default=None, class_=(basestring, Dimension), doc="""
Defines a dimension to group the Histogram returning an NdOverlay of Histograms.""")
def _process(self, element, key=None):
if self.p.groupby:
if not isinstance(element, Dataset):
raise ValueError('Cannot use histogram groupby on non-Dataset Element')
grouped = element.groupby(self.p.groupby, group_type=Dataset, container_type=NdOverlay)
self.p.groupby = None
return grouped.map(self._process, Dataset)
try:
from scipy import stats
from scipy.linalg import LinAlgError
except ImportError:
raise ImportError('%s operation requires SciPy to be installed.' % type(self).__name__)
params = {}
if isinstance(element, Distribution):
selected_dim = element.kdims[0]
if element.group != type(element).__name__:
params['group'] = element.group
params['label'] = element.label
vdim = element.vdims[0]
vdim_name = '{}_density'.format(selected_dim.name)
vdims = [vdim(vdim_name, label='Density') if vdim.name == 'Density' else vdim]
else:
if self.p.dimension:
selected_dim = element.get_dimension(self.p.dimension)
else:
dimensions = element.vdims+element.kdims
if not dimensions:
raise ValueError("%s element does not declare any dimensions "
"to compute the kernel density estimate on." %
type(element).__name__)
selected_dim = dimensions[0]
vdim_name = '{}_density'.format(selected_dim.name)
vdims = [Dimension(vdim_name, label='Density')]
data = element.dimension_values(selected_dim)
bin_range = self.p.bin_range or element.range(selected_dim)
if bin_range == (0, 0) or any(not isfinite(r) for r in bin_range):
bin_range = (0, 1)
elif bin_range[0] == bin_range[1]:
bin_range = (bin_range[0]-0.5, bin_range[1]+0.5)
element_type = Area if self.p.filled else Curve
data = data[isfinite(data)] if len(data) else []
if len(data) > 1:
try:
kde = stats.gaussian_kde(data)
except LinAlgError:
return element_type([], selected_dim, vdims, **params)
if self.p.bandwidth:
kde.set_bandwidth(self.p.bandwidth)
bw = kde.scotts_factor() * data.std(ddof=1)
if self.p.bin_range:
xs = np.linspace(bin_range[0], bin_range[1], self.p.n_samples)
else:
xs = _kde_support(bin_range, bw, self.p.n_samples, self.p.cut, selected_dim.range)
ys = kde.evaluate(xs)
else:
xs = np.linspace(bin_range[0], bin_range[1], self.p.n_samples)
ys = np.full_like(xs, 0)
return element_type((xs, ys), kdims=[selected_dim], vdims=vdims, **params)
class bivariate_kde(Operation):
"""
Computes a 2D kernel density estimate (KDE) of the first two
dimensions in the input data. Kernel density estimation is a
non-parametric way to estimate the probability density function of
a random variable.
The KDE works by placing 2D Gaussian kernel at each sample with
the supplied bandwidth. These kernels are then summed to produce
the density estimate. By default a good bandwidth is determined
using the bw_method but it may be overridden by an explicit value.
"""
contours = param.Boolean(default=True, doc="""
Whether to compute contours from the KDE, determines whether to
return an Image or Contours/Polygons.""")
bw_method = param.ObjectSelector(default='scott', objects=['scott', 'silverman'], doc="""
Method of automatically determining KDE bandwidth""")
bandwidth = param.Number(default=None, doc="""
Allows supplying explicit bandwidth value rather than relying
on scott or silverman method.""")
cut = param.Number(default=3, doc="""
Draw the estimate to cut * bw from the extreme data points.""")
filled = param.Boolean(default=False, doc="""
Controls whether to return filled or unfilled contours.""")
levels = param.ClassSelector(default=10, class_=(list, int), doc="""
A list of scalar values used to specify the contour levels.""")
n_samples = param.Integer(default=100, doc="""
Number of samples to compute the KDE over.""")
x_range = param.NumericTuple(default=None, length=2, doc="""
The x_range as a tuple of min and max x-value. Auto-ranges
if set to None.""")
y_range = param.NumericTuple(default=None, length=2, doc="""
The x_range as a tuple of min and max y-value. Auto-ranges
if set to None.""")
def _process(self, element, key=None):
try:
from scipy import stats
except ImportError:
raise ImportError('%s operation requires SciPy to be installed.' % type(self).__name__)
if len(element.dimensions()) < 2:
raise ValueError("bivariate_kde can only be computed on elements "
"declaring at least two dimensions.")
xdim, ydim = element.dimensions()[:2]
params = {}
if isinstance(element, Bivariate):
if element.group != type(element).__name__:
params['group'] = element.group
params['label'] = element.label
vdim = element.vdims[0]
else:
vdim = 'Density'
data = element.array([0, 1]).T
xmin, xmax = self.p.x_range or element.range(0)
ymin, ymax = self.p.y_range or element.range(1)
if any(not isfinite(v) for v in (xmin, xmax)):
xmin, xmax = -0.5, 0.5
elif xmin == xmax:
xmin, xmax = xmin-0.5, xmax+0.5
if any(not isfinite(v) for v in (ymin, ymax)):
ymin, ymax = -0.5, 0.5
elif ymin == ymax:
ymin, ymax = ymin-0.5, ymax+0.5
data = data[:, isfinite(data).min(axis=0)] if data.shape[1] > 1 else np.empty((2, 0))
if data.shape[1] > 1:
kde = stats.gaussian_kde(data)
if self.p.bandwidth:
kde.set_bandwidth(self.p.bandwidth)
bw = kde.scotts_factor() * data.std(ddof=1)
if self.p.x_range:
xs = np.linspace(xmin, xmax, self.p.n_samples)
else:
xs = _kde_support((xmin, xmax), bw, self.p.n_samples, self.p.cut, xdim.range)
if self.p.y_range:
ys = np.linspace(ymin, ymax, self.p.n_samples)
else:
ys = _kde_support((ymin, ymax), bw, self.p.n_samples, self.p.cut, ydim.range)
xx, yy = cartesian_product([xs, ys], False)
positions = np.vstack([xx.ravel(), yy.ravel()])
f = np.reshape(kde(positions).T, xx.shape)
elif self.p.contours:
eltype = Polygons if self.p.filled else Contours
return eltype([], kdims=[xdim, ydim], vdims=[vdim])
else:
xs = np.linspace(xmin, xmax, self.p.n_samples)
ys = np.linspace(ymin, ymax, self.p.n_samples)
f = np.zeros((self.p.n_samples, self.p.n_samples))
img = Image((xs, ys, f.T), kdims=element.dimensions()[:2], vdims=[vdim], **params)
if self.p.contours:
cntr = contours(img, filled=self.p.filled, levels=self.p.levels)
return cntr.clone(cntr.data[1:], **params)
return img
|
basnijholt/holoviews
|
holoviews/operation/stats.py
|
Python
|
bsd-3-clause
| 9,824
|
[
"Gaussian"
] |
14aa62c0dab2716e1fcef021dbe2a36453cd203c8a2e8ee58525383cf45d21c3
|
import os
import operator
import subprocess
import multiprocessing
from .. utils import *
from .. process import *
from .. task import *
from .. package import *
__all__ = ["Index", "Align"]
class Index(Process):
Command = "bwa"
Arguments = [
ProcessArgument(name="bwa_module", type=str, default="index", required=True, help="BWA module name"),
ProcessArgument(name="prefix", argument="-p", type=str, help="Prefix of database"),
ProcessArgument(name="algorithm", argument="-a", type=str, help="Algorithm for index construction"),
ProcessArgument(name="reference", required=True, type=str, help="Path to reference"),
]
IndexExtensionList = ["amb", "ann", "bwt", "pac", "sa"]
def get_index_dir(self):
return getattr(self, "_index_dir", None)
def set_index_dir(self, path):
self._index_dir = path
index_dir = property(get_index_dir, set_index_dir)
def get_prefix(self):
if hasattr(self, "_prefix"):
return self._prefix
if hasattr(self, "_prefix_dir") and hasattr(self, "_reference"):
ref_fn = os.path.split(self._reference)[-1]
return os.path.join(self._prefix_dir, ref_fn)
if hasattr(self, "_reference"):
return self._reference
return None
def set_prefix(self, prefix):
self._prefix = prefix
prefix = property(get_prefix, set_prefix)
def get_reference(self):
if hasattr(self, "_reference"):
return self._reference
if hasattr(self, "_prefix"):
return self._prefix
return None
def set_reference(self, reference):
self._reference = reference
prefix = property(get_prefix, set_prefix)
@property
def is_valid(self):
for ext in self.IndexExtensionList:
fn = "%s.%s" % (self.prefix, ext)
if not os.path.exists(fn):
return False
if self.reference and is_stale(self.reference, fn):
return False
return True
class Align(Process):
Command = "bwa"
ProcessOptions = {
"stdout": "pipe"
}
Arguments = [
ProcessArgument(name="bwa_module", type=str, default="mem", required=True, help="BWA module name"),
ProcessArgument(name="threads", argument="-t", type=int, default=1, help="Number of threads"),
ProcessArgument(name="min_seed_len", argument="-k", type=int, default=19, help="Minimum seed length"),
ProcessArgument(name="bandwidth", argument="-w", type=int, default=100, help="Gaps longer than bandwidth will not be found"),
ProcessArgument(name="zdropoff", argument="-d", type=int, default=100, help="Off-diagonal X-dropoff"),
ProcessArgument(name="seed_split_ratio", argument="-r", type=float, default=1.5, help="Re-seeding trigger ratio"),
ProcessArgument(name="max_occurrence", argument="-c", type=int, default=10000, help="Threshold for MEM occurence before discarding"),
ProcessArgument(name="lazy_rescue", argument="-P", type=bool, default=False, help="Ignore pairing information for rescued hits"),
ProcessArgument(name="matching_score", argument="-A", type=int, default=1, help="Matching score"),
ProcessArgument(name="mismatch_penalty", argument="-B", type=int, default=4, help="Mismatch penalty"),
ProcessArgument(name="gap_open_penalty", argument="-O", type=int, default=6, help="Gap open penalty"),
ProcessArgument(name="gap_extension_penalty", argument="-E", type=int, default=1, help="Gap extentsion penalty"),
ProcessArgument(name="clipping_penalty", argument="-L", type=int, default=5, help="Clipping penalty"),
ProcessArgument(name="unpaired_read_penalty", argument="-U", type=int, default=9, help="Unpaired read penalty"),
ProcessArgument(name="interleaved_pairs", argument="-p", type=bool, default=False, help="Pairs are interleaved"),
ProcessArgument(name="read_group", argument="-R", type=str, help="Complete read group header line"),
ProcessArgument(name="score_cutoff", argument="-T", type=int, default=30, help="Don't output reads with score less than cutoff"),
ProcessArgument(name="all_alignments", argument="-a", type=bool, default=False, help="Output all alignments including secondary alignments"),
ProcessArgument(name="comment", argument="-C", type=str, help="Append comment to the SAM file"),
ProcessArgument(name="hard_clipping", argument="-H", type=bool, default=False, help="Use hard clipping in SAM file"),
ProcessArgument(name="mark_short_splits", argument="-M", type=bool, default=False, help="Mark shorter split hits as secondary"),
ProcessArgument(name="verbose", argument="-v", type=int, default=3, help="Control the verbose level of the output."),
ProcessArgument(name="prefix", type=str, required=True, help="Base index of reference"),
ProcessArgument(name="reads", type=str, required=True, help="Path to reads file"),
ProcessArgument(name="mates", type=str, help="Path to mate reads file"),
]
def get_prefix(self):
thing = getattr(self, "_prefix", None)
if type(thing) in (str, unicode):
return thing
if isinstance(thing, Index):
return thing.prefix
return None
def set_prefix(self, prefix):
self._prefix = prefix
prefix = property(get_prefix, set_prefix)
class BWA_IndexTask(BoneTask):
TaskName = "bwa_index"
Directories = ["reads", "reference"]
def _init(self):
self.reference = symlink(self.reference, self.dir_reference)
def _run(self, *args, **kw):
cmdkw = {"reference": self.reference}
cmdkw.update(kw)
index = bwa.Index(**cmdkw)
index.run(wait=True)
self.index_prefix = index.prefix
class BWA_AlignmentTask(BoneTask):
Directories = ["alignment"]
TaskName = "bwa_align"
def _init(self):
self.reads = [symlink(fn, self.dir_reads) for fn in self.reads]
self.fn_alignment = os.path.join(self.dir_alignment, "alignment.sam")
def _run(self, *args, **kw):
#if not is_stale(self.reference, self.fn_alignment):
# return
cmdkw = {
"prefix": self.reference,
"reads": self.reads[0],
}
if len(self.reads) > 1:
cmdkw["mates"] = self.reads[1]
cmdkw.update(kw)
align = bwa.Align(**cmdkw)
with open(self.fn_alignment, 'w') as fh:
align.run(stdout=fh, wait=True)
class PackageBWA(Package):
PackageName = "bwa"
Depends = {
"dpkg": ["git", "build-essential", "zlib1g-dev"],
"pip": []
}
Version = "v0.7.13"
def script(self):
script = [
"git clone -b ${PKG_VERSION} https://github.com/lh3/bwa.git ${PKG_SRCDIR}/bwa",
"cd ${PKG_SRCDIR}/bwa",
"make",
"cp bwa ${PKG_BINDIR}",
]
return script
|
vishnubob/bones
|
bones/tools/bwa.py
|
Python
|
mit
| 6,980
|
[
"BWA"
] |
12ec4d25dde0eebf4bd960538c6d05792f6cd7076a2c676eacb0b05d897c5274
|
# Copyright (C) 2012,2013
# Max Planck Institute for Polymer Research
# Copyright (C) 2008,2009,2010,2011
# Max-Planck-Institute for Polymer Research & Fraunhofer SCAI
#
# This file is part of ESPResSo++.
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import unittest
import espressopp
import espressopp.esutil
import espressopp.unittest
import espressopp.storage
import espressopp.integrator
import espressopp.interaction
import espressopp.analysis
import espressopp.bc
import mpi4py.MPI as MPI
import math
import logging
from espressopp import Real3D
# Input values for system
N = 10
cutoff = 2.5
skin = 0.3
def calcNumberCells(size, nodes, cutoff):
ncells = 1
while size / (ncells * nodes) >= cutoff:
ncells = ncells + 1
return ncells - 1
class TestVerletList(espressopp.unittest.TestCase) :
def test0Build(self) :
system = espressopp.System()
rng = espressopp.esutil.RNG()
SIZE = float(N)
box = Real3D(SIZE)
bc = espressopp.bc.OrthorhombicBC(None, box)
system.bc = bc
system.rng = rng
system.skin = skin
comm = espressopp.MPI.COMM_WORLD
nodeGrid = (1, 1, comm.size)
cellGrid = [1, 1, 1]
for i in range(3):
cellGrid[i] = calcNumberCells(SIZE, nodeGrid[i], cutoff)
print 'NodeGrid = %s'%(nodeGrid,)
print 'CellGrid = %s'%cellGrid
dd = espressopp.storage.DomainDecomposition(system, comm, nodeGrid, cellGrid)
system.storage = dd
id = 0
for i in range(N):
for j in range(N):
for k in range(N):
m = (i + 2*j + 3*k) % 11
r = 0.45 + m * 0.01
x = (i + r) / N * SIZE
y = (j + r) / N * SIZE
z = (k + r) / N * SIZE
dd.addParticle(id, Real3D(x, y, z))
# not yet: dd.setVelocity(id, (1.0, 0.0, 0.0))
id = id + 1
dd.decompose()
integrator = espressopp.integrator.VelocityVerlet(system)
print 'integrator.dt = %g, will be set to 0.005'%integrator.dt
integrator.dt = 0.005
print 'integrator.dt = %g, is now '%integrator.dt
# now build Verlet List
# ATTENTION: you have to add the skin explicitly here
vl = espressopp.VerletList(system, cutoff = cutoff + system.skin)
potLJ = espressopp.interaction.LennardJones(1.0, 1.0, cutoff = cutoff)
# ATTENTION: auto shift was enabled
print "potLJ, shift = %g"%potLJ.shift
interLJ = espressopp.interaction.VerletListLennardJones(vl)
interLJ.setPotential(type1 = 0, type2 = 0, potential = potLJ)
# Todo
system.addInteraction(interLJ)
temp = espressopp.analysis.Temperature(system)
temperature = temp.compute()
kineticEnergy = 0.5 * temperature * (3 * N * N * N)
potentialEnergy = interLJ.computeEnergy()
print 'Start: tot energy = %10.6f pot = %10.6f kin = %10.f temp = %10.6f'%(kineticEnergy + potentialEnergy,
potentialEnergy, kineticEnergy, temperature)
nsteps = 10
# logging.getLogger("MDIntegrator").setLevel(logging.DEBUG)
for i in range(20):
integrator.run(nsteps)
temperature = temp.compute()
kineticEnergy = 0.5 * temperature * (3 * N * N * N)
potentialEnergy = interLJ.computeEnergy()
print 'Step %6d: tot energy = %10.6f pot = %10.6f kin = %10.6f temp = %f'%(nsteps*(i+1),
kineticEnergy + potentialEnergy, potentialEnergy, kineticEnergy, temperature)
if __name__ == "__main__":
unittest.main()
|
capoe/espressopp.soap
|
src/integrator/unittest/PTestVelocityVerlet.py
|
Python
|
gpl-3.0
| 4,246
|
[
"ESPResSo"
] |
970644ec21ad13ae0c1eea0372717bbc5d632b2e4ebd8c24042e1c1bf88ac975
|
# -*- coding: utf-8 -*-
# vi:si:et:sw=4:sts=4:ts=4
##
## Copyright (C) 2007 Async Open Source
##
## This program is free software; you can redistribute it and/or
## modify it under the terms of the GNU Lesser General Public License
## as published by the Free Software Foundation; either version 2
## of the License, or (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU Lesser General Public License for more details.
##
## You should have received a copy of the GNU Lesser General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., or visit: http://www.gnu.org/.
##
##
## Author(s): Stoq Team <stoq-devel@async.com.br>
##
# TODO:
# Focus could be improved, arrows shouldn't focus other widgets
# Column/List field
"""Widget containing a Grid of fields
"""
import pickle
import gobject
import glib
import pango
import gtk
from gtk import gdk
from gtk import keysyms
from kiwi.python import clamp
from kiwi.utils import gsignal
(FIELD_NONE,
FIELD_MOVE,
FIELD_RESIZE) = range(3)
# Bindings
(FIELD_MOVEMENT_HORIZONTAL,
FIELD_MOVEMENT_VERTICAL,
FIELD_DELETION) = range(3)
# _CURSOR_LEFT_SIDE = gdk.Cursor(gdk.LEFT_SIDE)
_CURSOR_RIGHT_SIDE = gdk.Cursor(gdk.RIGHT_SIDE)
# _CURSOR_TOP_SIDE = gdk.Cursor(gdk.TOP_SIDE)
_CURSOR_BOTTOM_SIDE = gdk.Cursor(gdk.BOTTOM_SIDE)
# _CURSOR_BOTTOM_LEFT = gdk.Cursor(gdk.BOTTOM_LEFT_CORNER)
_CURSOR_BOTTOM_RIGHT = gdk.Cursor(gdk.BOTTOM_RIGHT_CORNER)
# _CURSOR_TOP_LEFT = gdk.Cursor(gdk.TOP_LEFT_CORNER)
# _CURSOR_TOP_RIGHT = gdk.Cursor(gdk.TOP_RIGHT_CORNER)
class Range(object):
def __init__(self, start, end):
self.start = start
self.end = end
def __contains__(self, x):
return self.start <= x <= self.end
class FieldInfo(object):
def __init__(self, grid, name, widget, x, y, width=-1, height=1, model=None):
if width == -1:
width = len(name)
self.grid = grid
self.name = name
self.widget = widget
self.x = x
self.y = y
self.width = width
self.height = height
self.model = model
def update_label(self, text):
fmt = '<span letter_spacing="3072">%s</span>'
self.widget.set_markup(fmt % (glib.markup_escape_text(text), ))
def allocate(self, width, height):
req_width, req_height = self.widget.size_request()
self.widget.size_allocate(((self.x * width) - 1,
(self.y * height) - 1,
(self.width * width) + 2,
(self.height * height) + 3))
def find_at(self, x, y):
wx, wy, ww, wh = self.widget.allocation
return (x in Range(wx, wx + ww) and
y in Range(wy, wy + wh))
def show(self):
self.widget.show()
def get_cursor(self, x, y):
a = self.widget.allocation
cx = a.x + 1
cy = a.y + 1
cw = a.width
ch = a.height
intop = y in Range(cy - 1, cy + 1)
inbottom = y in Range(cy + ch - 3, cy + ch)
if x in Range(cx - 1, cx + 1):
if intop:
return # _CURSOR_TOP_LEFT
elif inbottom:
return # _CURSOR_BOTTOM_LEFT
else:
return # _CURSOR_LEFT_SIDE
elif x in Range(cx + cw - 2, cx + cw + 1):
if intop:
return # _CURSOR_TOP_RIGHT
elif inbottom:
return _CURSOR_BOTTOM_RIGHT
else:
return _CURSOR_RIGHT_SIDE
elif intop:
return # _CURSOR_TOP_SIDE
elif inbottom:
return _CURSOR_BOTTOM_SIDE
class FieldGrid(gtk.Layout):
"""FieldGrid is a Grid like widget which you can add fields to
* **field-added** (object): Emitted when a field is added to the grid
* **field-removed** (object): Emitted when a field is removed
from the grid
* ** selection-changed** (object): Emitted when a field is selected or
deselected by the user.
"""
gsignal('selection-changed', object,
flags=gobject.SIGNAL_RUN_LAST | gobject.SIGNAL_ACTION)
gsignal('field-added', object)
gsignal('field-removed', object)
def __init__(self, font, width, height):
gtk.Layout.__init__(self)
self.set_can_focus(True)
self.drag_dest_set(
gtk.DEST_DEFAULT_ALL,
[('OBJECTLIST_ROW', 0, 10),
('text/uri-list', 0, 11),
('_NETSCAPE_URL', 0, 12)],
gdk.ACTION_LINK | gdk.ACTION_COPY | gdk.ACTION_MOVE)
self.font = pango.FontDescription(font)
self.width = width
self.height = height
self._fields = []
self._moving_field = None
self._moving_start_x_pointer = 0
self._moving_start_y_pointer = 0
self._moving_start_x_position = 0
self._moving_start_y_position = 0
self._action_type = FIELD_NONE
self._selected_field = None
self._draw_grid = True
TEXT = '1234567890ABCDEFTI'
self._layout = self.create_pango_layout(TEXT)
self._layout.set_font_description(self.font)
self._field_width = (self._layout.get_pixel_size()[0] / len(TEXT)) + 2
self._field_height = self._layout.get_pixel_size()[1] + 2
#
# Private API
#
def _pick_field(self, window_x, window_y):
for field in self._fields:
if field.find_at(window_x, window_y):
return field
return None
def _remove_selected_field(self):
field = self._selected_field
if field:
self._remove_field(field)
def _remove_field(self, field):
if field == self._selected_field:
self.select_field(None)
self._fields.remove(field)
self.remove(field.widget)
self.emit('field-removed', field)
def _add_field(self, name, description, x, y, width=-1, height=1, model=None):
label = gtk.Label()
label.set_alignment(0, 0)
label.set_padding(2, 4)
if not description:
description = name
label.modify_font(self.font)
field = FieldInfo(self, name, label, x, y, width, height, model)
field.update_label(description)
self._fields.append(field)
self.emit('field-added', field)
label.connect('size-allocate',
self._on_field__size_allocate, field)
self.put(label, -1, -1)
return field
def _set_field_position(self, field, x, y):
x = clamp(x, 0, self.width - field.width - 1)
y = clamp(y, 0, self.height - field.height - 1)
if field.x == x and field.y == y:
return
field.x, field.y = x, y
if field.widget.props.visible:
self.queue_resize()
self.emit('selection-changed', field)
def _resize_field(self, field, width, height):
width = clamp(width, 1, self.width - field.x - 1)
height = clamp(height, 1, self.height - field.y - 1)
if field.width == width and field.height == height:
return
field.width, field.height = width, height
if field.widget.props.visible:
self.queue_resize()
self.emit('selection-changed', field)
def _get_field_from_widget(self, widget):
for field in self._fields:
if field.widget == widget:
return field
else:
raise AssertionError
def _begin_move_field(self, field, x, y, time):
if self._moving_field is not None:
raise AssertionError("can't move two fields at once")
mask = (gdk.BUTTON_RELEASE_MASK | gdk.BUTTON_RELEASE_MASK |
gdk.POINTER_MOTION_MASK)
grab = gdk.pointer_grab(self.window, False, mask, None, None,
long(time))
if grab != gdk.GRAB_SUCCESS:
raise AssertionError("grab failed")
self._moving_field = field
self._moving_start_x_pointer = x
self._moving_start_y_pointer = y
self._moving_start_x_position = field.x
self._moving_start_y_position = field.y
self._moving_start_width = field.width
self._moving_start_height = field.height
w, h = field.widget.get_size_request()
self._moving_start_w, self._moving_start_h = w, h
def _update_move_field(self, x, y):
field = self._moving_field
if not field:
return
if self._action_type == FIELD_MOVE:
dx, dy = self._get_coords(
x - self._moving_start_x_pointer,
y - self._moving_start_y_pointer)
self._set_field_position(field,
self._moving_start_x_position + dx,
self._moving_start_y_position + dy)
elif self._action_type == FIELD_RESIZE:
dx, dy = self._get_coords(
x - self._moving_start_x_pointer,
y - self._moving_start_y_pointer)
self._resize_field(field,
self._moving_start_width + dx,
self._moving_start_height + dy)
def _end_move_field(self, time):
if not self._moving_field:
return
gdk.pointer_ungrab(long(time))
self._moving_field = None
def _get_coords(self, x, y):
"""Returns the grid coordinates given absolute coordinates
:param x: absolute x
:param y: absoluyte y
:returns: (gridx, gridy)
"""
return (int(float(x) / (self._field_width + 1)),
int(float(y) / (self._field_height + 1)))
def _move_field(self, movement_type, delta):
field = self._selected_field
if not field:
return True
x = field.x
y = field.y
if movement_type == FIELD_MOVEMENT_VERTICAL:
y += delta
elif movement_type == FIELD_MOVEMENT_HORIZONTAL:
x += delta
else:
raise AssertionError
self._set_field_position(field, x, y)
def _on_field__size_allocate(self, label, event, field):
field.allocate(self._field_width + 1, self._field_height + 1)
#
# GtkWidget
#
def do_realize(self):
gtk.Layout.do_realize(self)
# Use the same gdk.window (from gtk.Layout) to capture these events.
self.window.set_events(self.get_events() |
gdk.BUTTON_PRESS_MASK |
gdk.BUTTON_RELEASE_MASK |
gdk.KEY_PRESS_MASK |
gdk.KEY_RELEASE_MASK |
gdk.ENTER_NOTIFY_MASK |
gdk.LEAVE_NOTIFY_MASK |
gdk.POINTER_MOTION_MASK)
self.modify_bg(gtk.STATE_NORMAL, gdk.color_parse('white'))
gc = gdk.GC(self.window,
line_style=gdk.LINE_ON_OFF_DASH,
line_width=2)
gc.set_rgb_fg_color(gdk.color_parse('blue'))
self._selection_gc = gc
gc = gdk.GC(self.window)
gc.set_rgb_fg_color(gdk.color_parse('grey80'))
self._grid_gc = gc
gc = gdk.GC(self.window)
gc.set_rgb_fg_color(gdk.color_parse('black'))
self._border_gc = gc
gc = gdk.GC(self.window)
gc.set_rgb_fg_color(gdk.color_parse('grey40'))
self._field_border_gc = gc
def do_size_request(self, req):
border_width = 1
req.width = self.width * (self._field_width + border_width) + border_width
req.height = self.height * (self._field_height + border_width) + border_width
def do_expose_event(self, event):
window = event.window
if not self.get_realized():
return
for c in self._fields:
self.propagate_expose(c.widget, event)
fw = self._field_width + 1
fh = self._field_height + 1
width = (self.width * fw) - 1
height = (self.height * fh) - 1
window.draw_rectangle(self._border_gc, False, 0, 0,
width + 1, height + 1)
if self._draw_grid:
grid_gc = self._grid_gc
for x in range(self.width):
window.draw_line(grid_gc,
x * fw, 0,
x * fw, height)
for y in range(self.height):
window.draw_line(grid_gc,
0, y * fh,
width, y * fh)
fields = self._fields[:]
if self._selected_field:
gc = self._selection_gc
field = self._selected_field
cx, cy, cw, ch = field.widget.allocation
window.draw_rectangle(gc, False,
cx + 1, cy + 1, cw - 2, ch - 2)
fields.remove(field)
gc = self._field_border_gc
for field in fields:
cx, cy, cw, ch = field.widget.allocation
window.draw_rectangle(gc, False,
cx + 1, cy + 1, cw - 2, ch - 3)
def do_button_press_event(self, event):
x, y = int(event.x), int(event.y)
field = self._pick_field(x, y)
self.select_field(field)
self.grab_focus()
if not field:
return
if not self._moving_field:
if field.get_cursor(x, y):
self._action_type = FIELD_RESIZE
else:
self._action_type = FIELD_MOVE
self._begin_move_field(field, x, y, event.time)
return False
def do_button_release_event(self, event):
self._update_move_field(int(event.x), int(event.y))
self._end_move_field(event.time)
return False
def do_motion_notify_event(self, event):
if self._moving_field is not None:
self._update_move_field(int(event.x), int(event.y))
else:
field = self._pick_field(event.x, event.y)
cursor = None
if field:
cursor = field.get_cursor(event.x, event.y)
self.window.set_cursor(cursor)
def do_key_press_event(self, event):
if self._moving_field:
return
if event.keyval == keysyms.Up:
self._move_field(FIELD_MOVEMENT_VERTICAL, -1)
elif event.keyval == keysyms.Down:
self._move_field(FIELD_MOVEMENT_VERTICAL, 1)
elif event.keyval == keysyms.Left:
self._move_field(FIELD_MOVEMENT_HORIZONTAL, -1)
elif event.keyval == keysyms.Right:
self._move_field(FIELD_MOVEMENT_HORIZONTAL, 1)
elif event.keyval == keysyms.Delete:
self._remove_selected_field()
if gtk.Layout.do_key_press_event(self, event):
return True
return True
def do_drag_drop(self, context, x, y, time):
return True
# pylint: disable=E1120
def do_drag_data_received(self, context, x, y, data, info, time):
if data.type == 'OBJECTLIST_ROW':
row = pickle.loads(data.data)
x, y = self._get_coords(x, y)
if self.objectlist_dnd_handler(row, x, y):
context.finish(True, False, time)
elif data.type == '_NETSCAPE_URL':
d = data.data.split('\n')[1]
d = d.replace('&', '&')
x, y = self._get_coords(x, y)
field = self.add_field(d, x, y)
field.show()
self.select_field(field)
context.finish(True, False, time)
context.finish(False, False, time)
# pylint: enable=E1120
def do_focus(self, direction):
self.set_can_focus(False)
res = gtk.Layout.do_focus(self, direction)
self.set_can_focus(True)
return res
#
# Public API
#
def add_field(self, text, description, x, y, width=-1, height=1, model=None):
"""Adds a new field to the grid
:param text: text of the field
:param description: description of the field
:param x: x position of the field
:param y: y position of the field
"""
return self._add_field(text, description, x, y, width, height, model)
def select_field(self, field):
"""Selects a field
:param field: the field to select, must be FieldInfo or None
"""
if field == self._selected_field:
return
self._selected_field = field
self.queue_resize()
self.grab_focus()
self.emit('selection-changed', field)
def get_selected_field(self):
""" Returns the currently selected field
:returns: the currently selected field
:rtype: FieldInfo
"""
return self._selected_field
def get_fields(self):
""" Returns a list of fields in the grid
:returns: a list of fields in the grid
"""
return self._fields
def objectlist_dnd_handler(self, item, x, y):
"""A subclass can implement this to support dnd from
an ObjectList.
:param item: the row dragged from the objectlist
:param x: the x position it was dragged to
:param y: the y position it was dragged to
"""
return False
def resize(self, width, height):
"""
Resize the grid.
:param width: the new width
:param height: the new height
"""
self.width = width
self.height = height
self.queue_resize()
gobject.type_register(FieldGrid)
|
andrebellafronte/stoq
|
stoqlib/gui/widgets/fieldgrid.py
|
Python
|
gpl-2.0
| 17,791
|
[
"VisIt"
] |
80cd08b8ea1aa5d543471f0f46eab93b31cd5387945cf00b57b6ff0f4df93ad9
|
# This file is part of cclib (http://cclib.github.io), a library for parsing
# and interpreting the results of computational chemistry packages.
#
# Copyright (C) 2006, the cclib development team
#
# The library is free software, distributed under the terms of
# the GNU Lesser General Public version 2.1 or later. You should have
# received a copy of the license along with cclib. You can also access
# the full license online at http://www.gnu.org/copyleft/lgpl.html.
"""Bridge for using cclib data in biopython (http://biopython.org)."""
from Bio.PDB.Atom import Atom
from cclib.parser.utils import PeriodicTable
def makebiopython(atomcoords, atomnos):
"""Create a list of BioPython Atoms.
This creates a list of BioPython Atoms suitable for use
by Bio.PDB.Superimposer, for example.
>>> import numpy
>>> from Bio.PDB.Superimposer import Superimposer
>>> atomnos = numpy.array([1,8,1],"i")
>>> a = numpy.array([[-1,1,0],[0,0,0],[1,1,0]],"f")
>>> b = numpy.array([[1.1,2,0],[1,1,0],[2,1,0]],"f")
>>> si = Superimposer()
>>> si.set_atoms(makebiopython(a,atomnos),makebiopython(b,atomnos))
>>> print si.rms
0.29337859596
"""
pt = PeriodicTable()
bioatoms = []
for coords, atomno in zip(atomcoords, atomnos):
bioatoms.append(Atom(pt.element[atomno], coords, 0, 0, 0, 0, 0))
return bioatoms
if __name__ == "__main__":
import doctest
doctest.testmod()
|
Clyde-fare/cclib
|
src/cclib/bridge/cclib2biopython.py
|
Python
|
lgpl-2.1
| 1,441
|
[
"Biopython",
"cclib"
] |
7958eda2a39d359aa9e7562505aa9c40b1c91afe40b2db0994d0982dae733098
|
#
# Adrian Soto
# 22-12-2014
# Stony Brook University
#
################################################
# Plot band structure and DOS from the
# output of the bands.x program in the
# Quantum Espresso package.
#
# Features:
# 1) Allows for scissor correction (band shift)
# 2)
#
################################################
import math
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import rcParams
from matplotlib.ticker import AutoMinorLocator
import matplotlib.gridspec as gridspec
import csv
plt.rcParams['font.family'] = 'Serif'
plt.rcParams['font.serif'] = 'Times New Roman'
#rcParams['text.usetex'] = True
rcParams['font.size'] = 24
class band:
def __init__(self, numkpoints, bandenergies):
self.nks = numkpoints
if (len(bandenergies) != numkpoints):
print "ERROR: list of band energies has wrong length. Setting band to 0."
self.nrg = [0] * numkpoints
else:
self.nrg = bandenergies
def printband(self):
print self.nrg
def shift(self, delta):
self.nrg = map(lambda x : x+delta, self.nrg) # watch for scope here.
return
################################################
# End of class band
################################################
class kpoints:
def __init__(self):
self.klist = []
class dos:
def __init__(self): #, numE, dosE, dosG, dosI):
self.numE = 0
self.dosE = []
self.dosG = []
self.dosI = []
def Load(self, dosfile):
#
# Load DOS from dos.x output
#
print " "
print "Loading DOS from ", dosfile
print " "
# Count lines in file
self.numE=sum(1 for line in open(dosfile))
# Read file line by line and process
f=open(dosfile, 'r')
# First line is header. Discard
data=f.readline()
# Iterate over file lines
for ilin in range(1,self.numE):
data=f.readline()
E=float(data[0:7])
self.dosE.append(E)
G=float(data[9:19])
self.dosG.append(G)
I=float(data[21:31])
self.dosI.append(I)
f.close()
return
################################################
# End of class dos
################################################
#
# Global functions
#
def w0gauss(x):
# As in flib/w0gauss.f90 in the QE package
pi = 3.141592653589793
w0 = 1.0/math.sqrt(pi)*math.exp(-(x-1.0/math.sqrt(2.0))**2)*(2.0-math.sqrt(2.0)*x)
return w0
def ReadBandStructure(bandsfile):
#
# This function reads the band structure as written
# to output of the bands.x program. It returns the bs
# as a flat list with all energies and another list with
# the k-point coordinates.
#
f = open(bandsfile, 'r')
# First line contains nbnd and nks. Read.
currentline = f.readline()
nks = int(currentline[22:26])
nbnd = int(currentline[12:16])
# Following lines contain the k-point coordinates
# and the band energies.
# Calculate number of lines containing band structure:
# nks k-point lines
# At each k-point there are (1+nbnd/10) energy values.
nlpkp = 1+nbnd/10 # Number of Lines Per K-Point
nlines = nks + nks * nlpkp
bsaux = []
xk = []
for ik in range (0, nks):
currentline = f.readline()
#kpoint = currentline[12:40]
kpoint = [float(x) for x in currentline.split()]
xk.append(kpoint)
auxenerg = []
for ibnd in range(0, nlpkp):
currentline = f.readline()
# append current line to auxiliary list
auxenerg.append( float(x) for x in currentline.split() )
# flatten list of lists containing energies for a given kpoint
# (each sublist corresponds to one line in the bands.dat file)
energ = [item for sublist in auxenerg for item in sublist]
# Sort ascendingly band energies for current k-point (to
# prevent artificial level crossings if QE bands.x output
# does not sort them correctly) and append to band structure
bsaux.append(sorted(energ))
f.close()
# Flatten bs list
bsflat = [item for sublist in bsaux for item in sublist]
return nks, nbnd, xk, bsflat
def SortByBands(nks, nbnd, bsflat):
# Rearrarange bs from k-points to bands
bs = []
for ibnd in range (0, nbnd):
currentband=[]
for ik in range (0, nks):
#currentband.append(bsflat[ik*nbnd+ibnd])
bs.append(bsflat[ik*nbnd+ibnd])
#bs.append( currentband )
return bs
def FindHLGap(nks, hvb, lcb):
#
# Find HOMO and LUMO energies and energy gap
#
# hvb = highest valence band
# lcb = lowest conduction band
#
# Ehvb = highest valence energy or HOMO energy
# Elcb = lowest conduction energy or LUMO energy
#
gap = lcb[0] - hvb[0]
for ik1 in range (0, nks):
auxcond = lcb[ik1]
for ik2 in range (0, nks):
auxval = hvb[ik2]
currentgap = auxcond-auxval
if (currentgap < 0.0):
print "ERROR: negative gap"
elif (currentgap < gap):
gap = currentgap
Ehvb = max(hvb)
Elcb = min(lcb)
return Ehvb, Elcb, gap
def Scissor(nks, newgap, bands, shifttype):
#
# shifttype == 0 : shift valence bands by -0.5*delta and
# conduction bands by 0.5*delta
# shifttype == 1 : as in 0 but placing the highest valence
# energy at 0.0
# shifttype == 2 : as in 0 but placing the gap center at 0.0
#
EHOMO, ELUMO, oldgap = FindHLGap(nks, bands[nval-1].nrg , bands[nval].nrg)
delta=(newgap-oldgap)/2.0
# Apply scissor to band structure
for ibnd in range (0, nbnd):
if (ibnd < nval):
bands[ibnd].shift(-1.0*delta)
else:
bands[ibnd].shift(delta)
if (shifttype==0):
print "Scissor correction to band energies has been applied."
return
elif (shifttype==1):
EHOMO, ELUMO, gap = FindHLGap(nks, bands[nval-1].nrg , bands[nval].nrg)
delta = -1.0*EHOMO
#print "delta=", delta
for ibnd in range (0, nbnd):
bands[ibnd].shift(delta)
print "Scissor correction to band energies has been applied."
print "Highest valence energy has been set to 0.0 eV"
return
elif (shifttype==2):
EHOMO, ELUMO, gap = FindHLGap(nks, bands[nval-1].nrg , bands[nval].nrg)
delta = -0.5*(EHOMO+ELUMO)
for ibnd in range (0, nbnd):
bands[ibnd].shift(delta)
print "Scissor correction to band energies has been applied."
print "Gap center has been set to 0.0 eV"
return
else:
print "ERROR: shifttype has an non-valid value. Default value shifttype==0."
print "Scissor correction to band energies has been applied."
return
def CreateDOS(nks, nbnd, bzv, Emin, Emax, deltaE, bnd, normalize):
# ATTENTION: bnd must be an object of the class band
Emin = min(bnd[0].nrg)
Emax = max(bnd[nbnd-1].nrg)
ndos = int((Emax - Emin)/deltaE + 0.50000001) # int always rounds to lower integer
dosE = []
dosG = []
intg=0.0
deltaEgauss=5.0*deltaE
d3k=(1.0/nks)*bzv
wk=2.0/nks
print "Creating DOS with uniform k-point weights"
# Create DOS
for idos in range (0, ndos):
E = Emin + idos * deltaE
dosg = 0.0
for ik in range(0, nks):
for ibnd in range (0, nbnd):
dosg = dosg + w0gauss ( (E - bnd[ibnd].nrg[ik] ) / deltaEgauss ) * wk
dosg = dosg/deltaEgauss
intg = intg + dosg*deltaE # integrated DOS
dosE.append(E)
dosG.append(dosg)
print "\n Integrated DOS=", intg,
# Normalize DOS
if (normalize == 1):
print "Normalizing DOS to 1.0 \n"
dosGnorm=dosG
for idos in range (0, ndos):
dosGnorm[idos]=dosGnorm[idos]/intg
return dosE, dosGnorm
if(normalize==0):
return dosE, dosG
else:
print " ERROR!! in CreateDOS function: wrong DOS normalization choice."
return
def PlotBandStructure(nbnd, nval, bnd, plotfile, Ef, sympoints, nks_btw_sympoints ):
#
# ATTENTION: bnd must be an object of the class band
#
# nval: number of valence bands
# Ef: Fermi Energy. If false then it won't print horizontal line
# sympoints: list containing labels of symmetry points
# nks_btw_sympoints: number of k-points between symmetry points
#
# NOTE: this function assumes that the number of points
# between symmetry points is constant
#
print "Plotting band structure to", plotfile
col = 'k'
for ibnd in range (0, nbnd):
#if (ibnd < nval):
# col='b'
#else:
# col='r'
plt.plot(bnd[ibnd].nrg, markersize=2, linestyle='-', color=col) #marker = 'o')
y_min = min(bnd[0].nrg)
y_max = min(bnd[nbnd-1].nrg)
plt.xlabel("Brillouin zone path")
plt.ylabel("band energies (eV)")
numsympoints = len(sympoints)
kpath=[]
xticks = range(0, numsympoints*nks_btw_sympoints + 1, nks_btw_sympoints)
for i in range(0, numsympoints):
kpath.append(sympoints[i])
if (i < numsympoints-1):
for j in range (0, nks_btw_sympoints-1):
kpath.append('')
# plt.axvline(x=xticks, ymin=0, ymax=1, hold=None, **kwargs)
# Ticks and vertical lines across BS plot
plt.xticks(xticks, sympoints)
for i in range(0,numsympoints):
plt.axvline(x=xticks[i], ymin=y_min, ymax=y_max, hold=None, color='k', linewidth=0.25)
if (not Ef):
plt.axhline(Ef, color="black", linestyle="--")
plt.xlim( 0, len(bnd[0].nrg)-1 )
plt.savefig(plotfile)
return
def PlotDOS(dosE, dosG, plotname):
# ATTENTION: dosG and dosE must be lists of reals
plt.plot(dosG, dosE)
plt.xlabel("Density Of States")
plt.ylabel("band energies (eV)")
plt.gca().set_xlim(left=0)
plt.savefig(plotname)
return
def PlotBnD(nbnd, nval, bnd, Ef, sympoints, nks_btw_sympoints, dosE, dosG, plotname):
col = 'k'
# Two subplots, unpack the axes array immediately
f, (ax1, ax2) = plt.subplots(1, 2, sharey=True)
for ibnd in range (0, nbnd):
ax1.plot(bnd[ibnd].nrg, markersize=2, linestyle='-', color=col) #marker = 'o')
ax1.set_title('Sharing Y axis')
ax2.plot(dosG, dosE)
ax2.set_xlim([0, 0.1])
plt.ylim([-15.0, 20.0])
#plt.subplots_adjust(left=0.0, right=0.8)
plt.subplots_adjust(wspace = 0.0)
plt.show()
return
def PlotBnDD(nbnd, nval, bnd, Ef, sympoints, nks_btw_sympoints, sym_pt_dists, dosE1, dosG1, dosE2, dosG2, plotname):
######################################
# Plot generation and formatting
######################################
# Two subplots, unpack the axes array immediately
gs = gridspec.GridSpec(1, 2,width_ratios=[4,1])
f = plt.figure()
ax1 = plt.subplot(gs[0])
ax2 = plt.subplot(gs[1])
# Formatting
col = 'k'
ax1.set_xlabel("Brillouin zone path")
ax1.xaxis.set_label_position("bottom")
ax1.set_ylabel("E [eV]")
ax1.text(0.07, -12.50, 'Si', fontsize=28)
###ax2.text(0.07, 18.00, 'Si', fontsize=18)
ax2.set_xlabel("DOS \n [eV$^{-1}$]")
ax2.xaxis.set_label_position("top")
# ax2.yaxis.set_label_position("right")
# ax2.set_ylabel("E [eV]", rotation=270)
y_min = -13.0
y_max = 20.0
x2_min = 0.0
x2_max = 5.0
ax1.set_ylim([y_min, y_max])
ax2.set_xlim([x2_min, x2_max])
ax2.set_ylim([y_min, y_max])
# Ticks
minor_locator = AutoMinorLocator(2)
ax2.xaxis.set_minor_locator(minor_locator)
# Number of symmetry points
numsympoints = len(sympoints)
# for i in range(0, numsympoints):
# kpath.append(sympoints[i])
# if (i < numsympoints-1):
# for j in range (0, nks_btw_sympoints-1):
# kpath.append('')
# Generate horizontal axis containing k-path accumulated length (for BS plot)
x=0.0
klen=[x]
dx=1.0/((numsympoints-1)*nks_btw_sympoints)
for isym in range(0, numsympoints-1):
dx=sym_pt_dists[isym]/nks_btw_sympoints
for ipt in range(1, nks_btw_sympoints+1):
x=x+dx
klen.append(x)
#xticks = range(0, numsympoints*nks_btw_sympoints + 1, nks_btw_sympoints)
xticks=[]
for isym in range(0, numsympoints):
j = isym * nks_btw_sympoints
xticks.append(klen[j])
x1_min=min(xticks)
x1_max=max(xticks)
ax1.set_xlim(x1_min, x1_max)
# Plot bands
col = '0.4'
for ibnd in range (0, nbnd):
ax1.plot(klen , bnd[ibnd].nrg, markersize=2, linestyle='-', color=col) #marker = 'o')
# plt.axvline(x=xticks, ymin=0, ymax=1, hold=None, **kwargs)
# Ticks and vertical lines across BS plot
ax1.set_xticks(xticks)
ax1.set_xticklabels(sympoints)
# Plot DOSs
ax2.plot(dosG1, dosE1, linestyle='-', linewidth=1.0, color='b')
ax2.plot(dosG2, dosE2, linestyle='-', color='r')
dosticks=[0, 5]
#dosticks=[0]
ax2.set_xticks(dosticks)
ax2.set_xticklabels(dosticks)
minorx2ticks=[1.0, 2.0, 3.0, 4.0]
ax2.set_xticks(minorx2ticks, minor = True)
# BS ticks
yticks=[-10, -5, 0, 5, 10, 15, 20]
minor_locator = AutoMinorLocator(5)
ax1.yaxis.set_minor_locator(minor_locator)
ax2.yaxis.set_minor_locator(minor_locator)
ax1.xaxis.tick_top()
ax1.set_yticks(yticks)
ax1.set_yticklabels(yticks)
#ax2.axes.get_yaxis().set_visible(False)
ax2.yaxis.tick_right()
ax2.set_yticks(yticks)
#ax2.set_yticklabels(yticks)
#plt.subplots_adjust(left=0.0, right=0.8)
plt.subplots_adjust(wspace = 0.0)
# Attempt to fill the area to the left of the DOS
# split values into positive and negative
alpha_fill=0.5
dosE1neg=[]
dosG1neg=[]
dosE1pos=[]
dosG1pos=[]
for i in range(0, len(dosE1)):
if(dosE1[i] < 0.0):
dosE1neg.append(dosE1[i])
dosG1neg.append(dosG1[i])
else:
dosE1pos.append(dosE1[i])
dosG1pos.append(dosG1[i])
# ax2.fill_between(dosG1pos, 0, dosE1pos, alpha=alpha_fill, linewidth=0.0) # Fill left of curve above 0.0 eV
# ax2.fill_between(dosG1neg, 0, dosE1neg, alpha=alpha_fill, linewidth=0.0) # Fill left of curve below 0.0 eV
dosE1new =[y_min]+dosE1
dosG1new =[0.0]+dosG1
ax2.fill_between(dosG1new, 0, dosE1new, alpha=alpha_fill, linewidth=0.0, edgecolor='w')
# Vertical lines across BS plot
for i in range(0,numsympoints):
ax1.axvline(x=xticks[i], ymin=y_min, ymax=y_max, color='k', linewidth=0.25)
# Horizontal line at top of valence band
if (not Ef):
ax1.axhline(Ef, color="black", linestyle="--")
ax2.axhline(Ef, color="black", linestyle="--")
#plt.show()
plt.savefig(plotname, bbox_inches='tight')
return
def PlotMultipleDOS(dosE, dosG, plotname):
# ATTENTION: dosG and dosE must be lists of lists of reals
Ndos=len(dosE[:])
for i in range(0, Ndos):
plt.plot(dosG[i], dosE[i])
plt.xlabel("Density Of States")
plt.ylabel("band energies (eV)")
plt.savefig(plotname)
return
#def WriteBandStructure():
# print (" %10.6f%10.6f%10.6f" % (kpoint[0], kpoint[1], kpoint[2]) )
############################################################################################
############################################################################################
############################################################################################
############################################################################################
############################ PROGRAM STARTS HERE ###################################
############################################################################################
############################################################################################
############################################################################################
############################################################################################
bohr2ang=0.52918
############
# Band structure
############
filename="si.bands.dat"
nks = 0
nbnd=0
xk=[]
bsflt=[]
bs=[]
sympoints=['$L$','$\Gamma$', '$X$', '$W$', '$K$', '$\Gamma$']
sym_pt_dists=[0.5*math.sqrt(3), 1.0, 0.5, 0.25*math.sqrt(2), 0.75*math.sqrt(2)] ## distances between symmetry points (by hand)
nks_btw_sympoints=50
# Read from file and sort bs by bands
nks, nbnd, xk, bsflt = ReadBandStructure(filename)
if(nbnd==0):
print "%% ERROR READING BANDS. EXIT %%"
else:
bs = SortByBands(nks, nbnd, bsflt)
print "nks=", nks
print "nbnd=", nbnd
# Create band objects
bands=[]
for ibnd in range (0, nbnd):
ledge = ibnd*nks
redge = ledge+nks
currentband = bs[ledge:redge]
bands.append( band(nks, currentband) )
# Scissor correction
# Si
alat = 10.330495 # Bohr
nval = 4 # for Si
exptgap = 1.11 # eV # Si
# Ge
###alat = 10.8171069 # Bohr
###nval = 14 # for Ge with semicore
###exptgap = 0.67 # Ge
# Convert to ANG and calculate BZV
alat=alat*bohr2ang
V=(alat**3)/4.0 # Good for FCC
bzv = (2.0*math.pi)**3/V
ncond = nbnd - nval
Scissor(nks, exptgap, bands, 1) # 3rd argument is 1. Then set 3rd argument of PlotBandStructure to 0.0
print "Scissor correction with gap set to", exptgap
#############
# DOS
#############
filename='si.bands_full.dat'
nks1, nbnd1, xk1, bsflt1 = ReadBandStructure(filename)
if(nbnd==0):
print "%% ERROR READING BANDS. EXIT %%"
else:
bs1 = SortByBands(nks1, nbnd1, bsflt1)
print "nks=", nks1
print "nbnd=", nbnd1
# Create band objects
bands1=[]
for ibnd in range (0, nbnd1):
ledge1 = ibnd*nks1
redge1 = ledge1+nks1
currentband1 = bs1[ledge1:redge1]
bands1.append( band(nks1, currentband1) )
# Scissor correction
Scissor(nks1, exptgap, bands1, 1) # 3rd argument is 1. Then set 3rd argument of PlotBandStructure to 0.0
print "Scissor correction with gap set to", exptgap
filename='si.bands_243.dat'
nks2, nbnd2, xk2, bsflt2 = ReadBandStructure(filename)
if(nbnd==0):
print "%% ERROR READING BANDS. EXIT %%"
else:
bs2 = SortByBands(nks2, nbnd2, bsflt2)
print "nks=", nks2
print "nbnd=", nbnd2
# Create band objects
bands2=[]
for ibnd in range (0, nbnd2):
ledge2 = ibnd*nks2
redge2 = ledge2+nks2
currentband2 = bs2[ledge2:redge2]
bands2.append( band(nks2, currentband2) )
# Scissor correction
Scissor(nks2, exptgap, bands2, 1) # 3rd argument is 1. Then set 3rd argument of PlotBandStructure to 0.0
print "Scissor correction with gap set to", exptgap
# Generate DOSs
deltaE = 0.03 #eV
dosE1, dosG1 = CreateDOS(nks1, nbnd1, bzv, -13.0, 25.0, deltaE, bands1, 0)
dosE2, dosG2 = CreateDOS(nks2, nbnd2, bzv, -13.0, 25.0, deltaE, bands2, 0)
# Plot
#PlotDOS(dosE, dosG, "DOS.pdf")
#PlotBandStructure(nbnd, nval, bands, "BS.pdf", 0.0, sympoints, nks_btw_sympoints)
PlotBnDD(nbnd, nval, bands, 0.0, sympoints, nks_btw_sympoints, sym_pt_dists, dosE1, dosG1, dosE2, dosG2, "BSnDOS.pdf")
# DOS
#mydos=dos()
#mydos.Load('dos_full.dat')
#mydos.Printout()
|
adrian-soto/QEdark_repo
|
tools/bandsndos/bandsndos_TlBr.py
|
Python
|
gpl-2.0
| 20,254
|
[
"Quantum ESPRESSO"
] |
b761697578032650a21ffe64d4d1a166b18eae82fc4d864d11b477fa6b826364
|
import pandas as pd
from . import cytoscapejs as cyjs
def from_dataframe(df, source_col='source', target_col='target', interaction_col='interaction',
name='From DataFrame', edge_attr_cols=[]):
"""
Utility to convert Pandas DataFrame object into Cytoscape.js JSON
:param df:
:param source_col:
:param target_col:
:param interaction_col:
:param name:
:param edge_attr_cols:
:return:
"""
network = cyjs.get_empty_network(name=name)
nodes = set()
for index, row in df.iterrows():
s = row[source_col]
t = row[target_col]
if s not in nodes:
nodes.add(s)
source = get_node(s)
network['elements']['nodes'].append(source)
if t not in nodes:
nodes.add(t)
target = get_node(t)
network['elements']['nodes'].append(target)
network['elements']['edges'].append(get_edge(s, t, interaction=row[interaction_col]))
return network
def to_dataframe(network, interaction='interaction', default_interaction='-'):
edges = network['elements']['edges']
network_array = []
for edge in edges:
edge_data = edge['data']
source = edge_data['source']
target = edge_data['target']
if interaction in edge_data:
itr = edge_data[interaction]
else:
itr = default_interaction
row = (source, itr, target)
network_array.append(row)
return pd.DataFrame(network_array, columns=['source', 'interaction',
'target'])
def get_node(id):
node = {
'data': {
'id': str(id),
'name': str(id)
}
}
return node
def get_edge(source, target, interaction):
if interaction is None:
itr = '-'
else:
itr = interaction
edge = {
'data': {
'source': source,
'target': target,
'interaction': itr
}
}
return edge
|
scholer/py2cytoscape
|
py2cytoscape/util/dataframe.py
|
Python
|
mit
| 2,033
|
[
"Cytoscape"
] |
02951c6f7687776a2bc796f7686b411501ce371cf28a7d6dc8fd27e3962661de
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.2 on 2016-04-08 11:15
from __future__ import unicode_literals
import django_rdkit.models.fields
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('compound_db', '0002_auto_20160314_2022'),
]
operations = [
migrations.RunSQL('CREATE EXTENSION IF NOT EXISTS rdkit;'),
migrations.AddField(
model_name='compound',
name='mol',
field=django_rdkit.models.fields.MolField(default=None),
preserve_default=False,
),
]
|
martin-sicho/MI_ADM
|
MI_ADM_website/compound_db/migrations/0003_compound_mol.py
|
Python
|
gpl-3.0
| 599
|
[
"RDKit"
] |
ff233bbe75ccf2f2b809fdb8be611f92246aa9b00f1b4edf5b9831e9cb698db8
|
# Write the benchmarking functions here.
# See "Writing benchmarks" in the asv docs for more information.
import os
import py_entitymatching as em
from py_entitymatching.utils.generic_helper import get_install_path
import sys
if sys.version[0] == '2':
reload(sys)
sys.setdefaultencoding("utf-8")
PATH = get_install_path()
DATASET_PATH = os.sep.join([PATH, 'datasets', 'example_datasets'])
class TimeDownSampleRestaurants:
def setup(self):
path_for_a = os.sep.join([DATASET_PATH, 'restaurants', 'A.csv'])
path_for_b = os.sep.join([DATASET_PATH, 'restaurants', 'B.csv'])
try:
self.A = em.read_csv_metadata(path_for_a)
self.B = em.read_csv_metadata(path_for_b)
self.size = 500
self.y_param = 2
except AssertionError:
print("Dataset \'restaurants\' not found. Please visit the project website to download the dataset.")
raise SystemExit
def time_down_sample_tables(self):
em.down_sample(self.A, self.B, self.size, self.y_param)
def teardown(self):
del self.A
del self.B
del self.size
del self.y_param
class TimeDownSampleElectronics:
def setup(self):
path_for_a = os.sep.join([DATASET_PATH, 'electronics', 'A.csv'])
path_for_b = os.sep.join([DATASET_PATH, 'electronics', 'B.csv'])
try:
self.A = em.read_csv_metadata(path_for_a)
self.B = em.read_csv_metadata(path_for_b)
self.size = 500
self.y_param = 5
except AssertionError:
print("Dataset \'electronics\' not found. Please visit the project website to download the dataset.")
raise SystemExit
def time_down_sample_tables(self):
em.down_sample(self.A, self.B, self.size, self.y_param)
def teardown(self):
del self.A
del self.B
del self.size
del self.y_param
class TimeDownSampleAnime:
def setup(self):
path_for_a = os.sep.join([DATASET_PATH, 'anime', 'A.csv'])
path_for_b = os.sep.join([DATASET_PATH, 'anime', 'B.csv'])
try:
self.A = em.read_csv_metadata(path_for_a)
self.B = em.read_csv_metadata(path_for_b)
self.size = 1000
self.y_param = 1
except AssertionError:
print("Dataset \'anime\' not found. Please visit the project website to download the dataset.")
raise SystemExit
def time_down_sample_tables(self):
em.down_sample(self.A, self.B, self.size, self.y_param)
def teardown(self):
del self.A
del self.B
del self.size
del self.y_param
class TimeDownSampleBooks:
def setup(self):
path_for_a = os.sep.join([DATASET_PATH, 'books', 'A.csv'])
path_for_b = os.sep.join([DATASET_PATH, 'books', 'B.csv'])
try:
self.A = em.read_csv_metadata(path_for_a)
self.B = em.read_csv_metadata(path_for_b)
self.size = 2000
self.y_param = 2
except AssertionError:
print("Dataset \'books\' not found. Please visit the project website to download the dataset.")
raise SystemExit
def time_down_sample_tables(self):
em.down_sample(self.A, self.B, self.size, self.y_param)
def teardown(self):
del self.A
del self.B
del self.size
del self.y_param
class TimeDownSampleCitations:
def setup(self):
path_for_a = os.sep.join([DATASET_PATH, 'citations', 'A.csv'])
path_for_b = os.sep.join([DATASET_PATH, 'citations', 'B.csv'])
try:
self.A = em.read_csv_metadata(path_for_a)
self.B = em.read_csv_metadata(path_for_b)
self.size = 3000
self.y_param = 2
except AssertionError:
print("Dataset \'citations\' not found. Please visit the project website to download the dataset.")
raise SystemExit
def time_down_sample_tables(self):
em.down_sample(self.A, self.B, self.size, self.y_param)
def teardown(self):
del self.A
del self.B
del self.size
del self.y_param
class TimeDownSampleBikes:
def setup(self):
path_for_a = os.sep.join([DATASET_PATH, 'bikes', 'A.csv'])
path_for_b = os.sep.join([DATASET_PATH, 'bikes', 'B.csv'])
try:
self.A = em.read_csv_metadata(path_for_a)
self.B = em.read_csv_metadata(path_for_b)
self.size = 2500
self.y_param = 2
except AssertionError:
print("Dataset \'bikes\' not found. Please visit the project website to download the dataset.")
raise SystemExit
def time_down_sample_tables(self):
em.down_sample(self.A, self.B, self.size, self.y_param)
def teardown(self):
del self.A
del self.B
del self.size
del self.y_param
class TimeDownSampleCosmetics:
def setup(self):
path_for_a = os.sep.join([DATASET_PATH, 'cosmetics', 'A.csv'])
path_for_b = os.sep.join([DATASET_PATH, 'cosmetics', 'B.csv'])
try:
self.A = em.read_csv_metadata(path_for_a)
self.B = em.read_csv_metadata(path_for_b)
self.size = 4000
self.y_param = 1
except AssertionError:
print("Dataset \'cosmetics\' not found. Please visit the project website to download the dataset.")
raise SystemExit
def time_down_sample_tables(self):
em.down_sample(self.A, self.B, self.size, self.y_param)
def teardown(self):
del self.A
del self.B
del self.size
del self.y_param
class TimeDownSampleEbooks:
def setup(self):
path_for_a = os.sep.join([DATASET_PATH, 'ebooks', 'A.csv'])
path_for_b = os.sep.join([DATASET_PATH, 'ebooks', 'B.csv'])
try:
self.A = em.read_csv_metadata(path_for_a)
self.B = em.read_csv_metadata(path_for_b)
self.size = 3000
self.y_param = 1
except AssertionError:
print("Dataset \'ebooks\' not found. Please visit the project website to download the dataset.")
raise SystemExit
def time_down_sample_tables(self):
em.down_sample(self.A, self.B, self.size, self.y_param)
def teardown(self):
del self.A
del self.B
del self.size
del self.y_param
class TimeDownSampleMovies:
def setup(self):
path_for_a = os.sep.join([DATASET_PATH, 'movies', 'A.csv'])
path_for_b = os.sep.join([DATASET_PATH, 'movies', 'B.csv'])
try:
self.A = em.read_csv_metadata(path_for_a)
self.B = em.read_csv_metadata(path_for_b)
self.size = 1000
self.y_param = 2
except AssertionError:
print("Dataset \'movies\' not found. Please visit the project website to download the dataset.")
raise SystemExit
def time_down_sample_tables(self):
em.down_sample(self.A, self.B, self.size, self.y_param)
def teardown(self):
del self.A
del self.B
del self.size
del self.y_param
class TimeDownSampleMusic:
def setup(self):
path_for_a = os.sep.join([DATASET_PATH, 'music', 'A.csv'])
path_for_b = os.sep.join([DATASET_PATH, 'music', 'B.csv'])
try:
self.A = em.read_csv_metadata(path_for_a)
self.B = em.read_csv_metadata(path_for_b)
self.size = 1500
self.y_param = 2
except AssertionError:
print("Dataset \'music\' not found. Please visit the project website to download the dataset.")
raise SystemExit
def time_down_sample_tables(self):
em.down_sample(self.A, self.B, self.size, self.y_param)
def teardown(self):
del self.A
del self.B
del self.size
del self.y_param
class TimeDownSampleBeer:
def setup(self):
path_for_a = os.sep.join([DATASET_PATH, 'beer', 'A.csv'])
path_for_b = os.sep.join([DATASET_PATH, 'beer', 'B.csv'])
try:
self.A = em.read_csv_metadata(path_for_a)
self.B = em.read_csv_metadata(path_for_b)
self.size = 500
self.y_param = 10
except AssertionError:
print("Dataset \'beer\' not found. Please visit the project website to download the dataset.")
raise SystemExit
def time_down_sample_tables(self):
em.down_sample(self.A, self.B, self.size, self.y_param)
def teardown(self):
del self.A
del self.B
del self.size
del self.y_param
class TimeDownSampleASongs1:
timeout = 2000.0
def setup(self):
path_for_a = os.sep.join([DATASET_PATH, 'songs', 'A.csv'])
path_for_b = os.sep.join([DATASET_PATH, 'songs', 'A.csv'])
try:
self.A = em.read_csv_metadata(path_for_a)
self.B = em.read_csv_metadata(path_for_b)
self.size = 2000
self.y_param = 2
except AssertionError:
print("Dataset \'songs\' not found. Please visit the project website to download the dataset.")
raise SystemExit
def time_down_sample_tables(self):
em.down_sample(self.A, self.B, self.size, self.y_param)
def teardown(self):
del self.A
del self.B
del self.size
del self.y_param
class TimeDownSampleASongs2:
timeout = 2000.0
def setup(self):
path_for_a = os.sep.join([DATASET_PATH, 'songs', 'A.csv'])
path_for_b = os.sep.join([DATASET_PATH, 'songs', 'A.csv'])
try:
self.A = em.read_csv_metadata(path_for_a)
self.B = em.read_csv_metadata(path_for_b)
self.size = 3000
self.y_param = 1
except AssertionError:
print("Dataset \'songs\' not found. Please visit the project website to download the dataset.")
raise SystemExit
def time_down_sample_tables(self):
em.down_sample(self.A, self.B, self.size, self.y_param)
def teardown(self):
del self.A
del self.B
del self.size
del self.y_param
class TimeDownSampleASongs3:
timeout = 2000.0
def setup(self):
path_for_a = os.sep.join([DATASET_PATH, 'songs', 'A.csv'])
path_for_b = os.sep.join([DATASET_PATH, 'songs', 'A.csv'])
try:
self.A = em.read_csv_metadata(path_for_a)
self.B = em.read_csv_metadata(path_for_b)
self.size = 4000
self.y_param = 1
except AssertionError:
print("Dataset \'songs\' not found. Please visit the project website to download the dataset.")
raise SystemExit
def time_down_sample_tables(self):
em.down_sample(self.A, self.B, self.size, self.y_param)
def teardown(self):
del self.A
del self.B
del self.size
del self.y_param
class TimeDownSampleCitation1:
timeout = 2000.0
def setup(self):
path_for_a = os.sep.join([DATASET_PATH, 'citation', 'A.csv'])
path_for_b = os.sep.join([DATASET_PATH, 'citation', 'B.csv'])
try:
self.A = em.read_csv_metadata(path_for_a)
self.B = em.read_csv_metadata(path_for_b)
self.size = 1000
self.y_param = 1
except AssertionError:
print("Dataset \'citation\' not found. Please visit the project website to download the dataset.")
raise SystemExit
def time_down_sample_tables(self):
em.down_sample(self.A, self.B, self.size, self.y_param)
def teardown(self):
del self.A
del self.B
del self.size
del self.y_param
class TimeDownSampleCitation2:
timeout = 2000.0
def setup(self):
path_for_a = os.sep.join([DATASET_PATH, 'citation', 'A.csv'])
path_for_b = os.sep.join([DATASET_PATH, 'citation', 'B.csv'])
try:
self.A = em.read_csv_metadata(path_for_a)
self.B = em.read_csv_metadata(path_for_b)
self.size = 2000
self.y_param = 1
except AssertionError:
print("Dataset \'citation\' not found. Please visit the project website to download the dataset.")
raise SystemExit
def time_down_sample_tables(self):
em.down_sample(self.A, self.B, self.size, self.y_param)
def teardown(self):
del self.A
del self.B
del self.size
del self.y_param
|
anhaidgroup/py_entitymatching
|
benchmarks/benchmark_down_sample_sampler.py
|
Python
|
bsd-3-clause
| 12,585
|
[
"VisIt"
] |
41a66fc2d65cedd6510fcb00caad256d0267555616f9790d4d2f87422b11c3ed
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
This module provides classes to interface with the Materials Project REST
API v2 to enable the creation of data structures and pymatgen objects using
Materials Project data.
To make use of the Materials API, you need to be a registered user of the
Materials Project, and obtain an API key by going to your dashboard at
https://www.materialsproject.org/dashboard.
"""
import itertools
import json
import logging
import platform
import re
import sys
import warnings
from collections import defaultdict
from enum import Enum, unique
from time import sleep
import requests
from monty.json import MontyDecoder, MontyEncoder
from monty.serialization import dumpfn
from pymatgen import SETTINGS, SETTINGS_FILE, yaml
from pymatgen import __version__ as pmg_version
from pymatgen.core.composition import Composition
from pymatgen.core.periodic_table import Element
from pymatgen.core.structure import Structure
from pymatgen.core.surface import get_symmetrically_equivalent_miller_indices
from pymatgen.entries.computed_entries import ComputedEntry, ComputedStructureEntry
from pymatgen.entries.exp_entries import ExpEntry
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer
from pymatgen.util.sequence import PBar, get_chunks
logger = logging.getLogger(__name__)
@unique
class TaskType(Enum):
"""task types available in MP"""
GGA_OPT = "GGA Structure Optimization"
GGAU_OPT = "GGA+U Structure Optimization"
SCAN_OPT = "SCAN Structure Optimization"
GGA_LINE = "GGA NSCF Line"
GGAU_LINE = "GGA+U NSCF Line"
GGA_UNIFORM = "GGA NSCF Uniform"
GGAU_UNIFORM = "GGA+U NSCF Uniform"
GGA_STATIC = "GGA Static"
GGAU_STATIC = "GGA+U Static"
GGA_STATIC_DIEL = "GGA Static Dielectric"
GGAU_STATIC_DIEL = "GGA+U Static Dielectric"
GGA_DEF = "GGA Deformation"
GGAU_DEF = "GGA+U Deformation"
LDA_STATIC_DIEL = "LDA Static Dielectric"
class MPRester:
"""
A class to conveniently interface with the Materials Project REST
interface. The recommended way to use MPRester is with the "with" context
manager to ensure that sessions are properly closed after usage::
with MPRester("API_KEY") as m:
do_something
MPRester uses the "requests" package, which provides for HTTP connection
pooling. All connections are made via https for security.
For more advanced uses of the Materials API, please consult the API
documentation at https://github.com/materialsproject/mapidoc.
"""
supported_properties = (
"energy",
"energy_per_atom",
"volume",
"formation_energy_per_atom",
"nsites",
"unit_cell_formula",
"pretty_formula",
"is_hubbard",
"elements",
"nelements",
"e_above_hull",
"hubbards",
"is_compatible",
"spacegroup",
"task_ids",
"band_gap",
"density",
"icsd_id",
"icsd_ids",
"cif",
"total_magnetization",
"material_id",
"oxide_type",
"tags",
"elasticity",
)
supported_task_properties = (
"energy",
"energy_per_atom",
"volume",
"formation_energy_per_atom",
"nsites",
"unit_cell_formula",
"pretty_formula",
"is_hubbard",
"elements",
"nelements",
"e_above_hull",
"hubbards",
"is_compatible",
"spacegroup",
"band_gap",
"density",
"icsd_id",
"cif",
)
def __init__(
self,
api_key=None,
endpoint=None,
notify_db_version=True,
include_user_agent=True,
):
"""
Args:
api_key (str): A String API key for accessing the MaterialsProject
REST interface. Please obtain your API key at
https://www.materialsproject.org/dashboard. If this is None,
the code will check if there is a "PMG_MAPI_KEY" setting.
If so, it will use that environment variable. This makes
easier for heavy users to simply add this environment variable to
their setups and MPRester can then be called without any arguments.
endpoint (str): Url of endpoint to access the MaterialsProject REST
interface. Defaults to the standard Materials Project REST
address at "https://materialsproject.org/rest/v2", but
can be changed to other urls implementing a similar interface.
notify_db_version (bool): If True, the current MP database version will
be retrieved and logged locally in the ~/.pmgrc.yaml. If the database
version changes, you will be notified. The current database version is
also printed on instantiation. These local logs are not sent to
materialsproject.org and are not associated with your API key, so be
aware that a notification may not be presented if you run MPRester
from multiple computing environments.
include_user_agent (bool): If True, will include a user agent with the
HTTP request including information on pymatgen and system version
making the API request. This helps MP support pymatgen users, and
is similar to what most web browsers send with each page request.
Set to False to disable the user agent.
"""
if api_key is not None:
self.api_key = api_key
else:
self.api_key = SETTINGS.get("PMG_MAPI_KEY", "")
if endpoint is not None:
self.preamble = endpoint
else:
self.preamble = SETTINGS.get("PMG_MAPI_ENDPOINT", "https://materialsproject.org/rest/v2")
if self.preamble != "https://materialsproject.org/rest/v2":
warnings.warn("Non-default endpoint used: {}".format(self.preamble))
self.session = requests.Session()
self.session.headers = {"x-api-key": self.api_key}
if include_user_agent:
pymatgen_info = "pymatgen/" + pmg_version
python_info = "Python/{}.{}.{}".format(
sys.version_info.major, sys.version_info.minor, sys.version_info.micro
)
platform_info = "{}/{}".format(platform.system(), platform.release())
self.session.headers["user-agent"] = "{} ({} {})".format(pymatgen_info, python_info, platform_info)
if notify_db_version:
db_version = self.get_database_version()
logger.info(f"Connection established to Materials Project database, version {db_version}.")
try:
with open(SETTINGS_FILE, "rt") as f:
d = yaml.safe_load(f)
except IOError:
d = {}
if "MAPI_DB_VERSION" not in d:
d["MAPI_DB_VERSION"] = {"LOG": {}, "LAST_ACCESSED": None}
# store a log of what database versions are being connected to
if db_version not in d["MAPI_DB_VERSION"]["LOG"]:
d["MAPI_DB_VERSION"]["LOG"][db_version] = 1
else:
d["MAPI_DB_VERSION"]["LOG"][db_version] += 1
# alert user if db version changed
last_accessed = d["MAPI_DB_VERSION"]["LAST_ACCESSED"]
if last_accessed and last_accessed != db_version:
print(
f"This database version has changed from the database last accessed ({last_accessed}).\n"
f"Please see release notes on materialsproject.org for information about what has changed."
)
d["MAPI_DB_VERSION"]["LAST_ACCESSED"] = db_version
# write out new database log
dumpfn(d, SETTINGS_FILE)
def __enter__(self):
"""
Support for "with" context.
"""
return self
def __exit__(self, exc_type, exc_val, exc_tb):
"""
Support for "with" context.
"""
self.session.close()
def _make_request(self, sub_url, payload=None, method="GET", mp_decode=True):
response = None
url = self.preamble + sub_url
try:
if method == "POST":
response = self.session.post(url, data=payload, verify=True)
else:
response = self.session.get(url, params=payload, verify=True)
if response.status_code in [200, 400]:
if mp_decode:
data = json.loads(response.text, cls=MontyDecoder)
else:
data = json.loads(response.text)
if data["valid_response"]:
if data.get("warning"):
warnings.warn(data["warning"])
return data["response"]
raise MPRestError(data["error"])
raise MPRestError("REST query returned with error status code {}".format(response.status_code))
except Exception as ex:
msg = "{}. Content: {}".format(str(ex), response.content) if hasattr(response, "content") else str(ex)
raise MPRestError(msg)
def get_database_version(self):
"""
The Materials Project database is periodically updated and has a
database version associated with it. When the database is updated,
consolidated data (information about "a material") may and does
change, while calculation data about a specific calculation task
remains unchanged and available for querying via its task_id.
The database version is set as a date in the format YYYY-MM-DD,
where "-DD" may be optional. An additional numerical suffix
might be added if multiple releases happen on the same day.
Returns: database version as a string
"""
d = self._make_request("/api_check")
return d["version"]["db"]
def get_materials_id_from_task_id(self, task_id):
"""
Returns a new MP materials id from a task id (which can be
equivalent to an old materials id)
Args:
task_id (str): A task id.
Returns:
materials_id (str)
"""
return self._make_request("/materials/mid_from_tid/%s" % task_id)
def get_materials_id_references(self, material_id):
"""
Returns all references for a materials id.
Args:
material_id (str): A material id.
Returns:
BibTeX (str)
"""
return self._make_request("/materials/%s/refs" % material_id)
def get_data(self, chemsys_formula_id, data_type="vasp", prop=""):
"""
Flexible method to get any data using the Materials Project REST
interface. Generally used by other methods for more specific queries.
Format of REST return is *always* a list of dict (regardless of the
number of pieces of data returned. The general format is as follows:
[{"material_id": material_id, "property_name" : value}, ...]
This is generally a call to
https://www.materialsproject.org/rest/v2/materials/vasp/<prop>.
See https://github.com/materialsproject/mapidoc for details.
Args:
chemsys_formula_id (str): A chemical system (e.g., Li-Fe-O),
or formula (e.g., Fe2O3) or materials_id (e.g., mp-1234).
data_type (str): Type of data to return. Currently can either be
"vasp" or "exp".
prop (str): Property to be obtained. Should be one of the
MPRester.supported_task_properties. Leave as empty string for a
general list of useful properties.
"""
sub_url = "/materials/%s/%s" % (chemsys_formula_id, data_type)
if prop:
sub_url += "/" + prop
return self._make_request(sub_url)
def get_materials_ids(self, chemsys_formula):
"""
Get all materials ids for a formula or chemsys.
Args:
chemsys_formula (str): A chemical system (e.g., Li-Fe-O),
or formula (e.g., Fe2O3).
Returns:
([str]) List of all materials ids.
"""
return self._make_request("/materials/%s/mids" % chemsys_formula, mp_decode=False)
def get_doc(self, materials_id):
"""
Get the entire data document for one materials id. Use this judiciously.
REST Endpoint: https://www.materialsproject.org/materials/<mp-id>/doc.
Args:
materials_id (str): E.g., mp-1143 for Al2O3
Returns:
Dict of json document of all data that is displayed on a materials
details page.
"""
return self._make_request("/materials/%s/doc" % materials_id, mp_decode=False)
def get_xas_data(self, material_id, absorbing_element):
"""
Get X-ray absorption spectroscopy data for absorbing element in the
structure corresponding to a material_id. Only X-ray Absorption Near Edge
Structure (XANES) for K-edge is supported.
REST Endpoint:
https://www.materialsproject.org/materials/<mp-id>/xas/<absorbing_element>.
Args:
material_id (str): E.g., mp-1143 for Al2O3
absorbing_element (str): The absorbing element in the corresponding
structure. E.g., Al in Al2O3
"""
element_list = self.get_data(material_id, prop="elements")[0]["elements"]
if absorbing_element not in element_list:
raise ValueError(
"{} element not contained in corresponding structure with "
"mp_id: {}".format(absorbing_element, material_id)
)
data = self._make_request(
"/materials/{}/xas/{}".format(material_id, absorbing_element),
mp_decode=False,
)
return data[0]
def get_task_data(self, chemsys_formula_id, prop=""):
"""
Flexible method to get any data using the Materials Project REST
interface. Generally used by other methods for more specific queries.
Unlike the :func:`get_data`_, this method queries the task collection
for specific run information.
Format of REST return is *always* a list of dict (regardless of the
number of pieces of data returned. The general format is as follows:
[{"material_id": material_id, "property_name" : value}, ...]
Args:
chemsys_formula_id (str): A chemical system (e.g., Li-Fe-O),
or formula (e.g., Fe2O3) or materials_id (e.g., mp-1234).
prop (str): Property to be obtained. Should be one of the
MPRester.supported_properties. Leave as empty string for a
general list of useful properties.
"""
sub_url = "/tasks/%s" % chemsys_formula_id
if prop:
sub_url += "/" + prop
return self._make_request(sub_url)
def get_structures(self, chemsys_formula_id, final=True):
"""
Get a list of Structures corresponding to a chemical system, formula,
or materials_id.
Args:
chemsys_formula_id (str): A chemical system (e.g., Li-Fe-O),
or formula (e.g., Fe2O3) or materials_id (e.g., mp-1234).
final (bool): Whether to get the final structure, or the initial
(pre-relaxation) structure. Defaults to True.
Returns:
List of Structure objects.
"""
prop = "final_structure" if final else "initial_structure"
data = self.get_data(chemsys_formula_id, prop=prop)
return [d[prop] for d in data]
def find_structure(self, filename_or_structure):
"""
Finds matching structures on the Materials Project site.
Args:
filename_or_structure: filename or Structure object
Returns:
A list of matching materials project ids for structure.
Raises:
MPRestError
"""
try:
if isinstance(filename_or_structure, str):
s = Structure.from_file(filename_or_structure)
elif isinstance(filename_or_structure, Structure):
s = filename_or_structure
else:
raise MPRestError("Provide filename or Structure object.")
payload = {"structure": json.dumps(s.as_dict(), cls=MontyEncoder)}
response = self.session.post("{}/find_structure".format(self.preamble), data=payload)
if response.status_code in [200, 400]:
resp = json.loads(response.text, cls=MontyDecoder)
if resp["valid_response"]:
return resp["response"]
raise MPRestError(resp["error"])
raise MPRestError("REST error with status code {} and error {}".format(response.status_code, response.text))
except Exception as ex:
raise MPRestError(str(ex))
def get_entries(
self,
chemsys_formula_id_criteria,
compatible_only=True,
inc_structure=None,
property_data=None,
conventional_unit_cell=False,
sort_by_e_above_hull=False,
):
"""
Get a list of ComputedEntries or ComputedStructureEntries corresponding
to a chemical system, formula, or materials_id or full criteria.
Args:
chemsys_formula_id_criteria (str/dict): A chemical system
(e.g., Li-Fe-O), or formula (e.g., Fe2O3) or materials_id
(e.g., mp-1234) or full Mongo-style dict criteria.
compatible_only (bool): Whether to return only "compatible"
entries. Compatible entries are entries that have been
processed using the MaterialsProjectCompatibility class,
which performs adjustments to allow mixing of GGA and GGA+U
calculations for more accurate phase diagrams and reaction
energies.
inc_structure (str): If None, entries returned are
ComputedEntries. If inc_structure="initial",
ComputedStructureEntries with initial structures are returned.
Otherwise, ComputedStructureEntries with final structures
are returned.
property_data (list): Specify additional properties to include in
entry.data. If None, no data. Should be a subset of
supported_properties.
conventional_unit_cell (bool): Whether to get the standard
conventional unit cell
sort_by_e_above_hull (bool): Whether to sort the list of entries by
e_above_hull (will query e_above_hull as a property_data if True).
Returns:
List of ComputedEntry or ComputedStructureEntry objects.
"""
# TODO: This is a very hackish way of doing this. It should be fixed
# on the REST end.
params = [
"run_type",
"is_hubbard",
"pseudo_potential",
"hubbards",
"potcar_symbols",
"oxide_type",
]
props = ["energy", "unit_cell_formula", "task_id"] + params
if sort_by_e_above_hull:
if property_data and "e_above_hull" not in property_data:
property_data.append("e_above_hull")
elif not property_data:
property_data = ["e_above_hull"]
if property_data:
props += property_data
if inc_structure:
if inc_structure == "initial":
props.append("initial_structure")
else:
props.append("structure")
if not isinstance(chemsys_formula_id_criteria, dict):
criteria = MPRester.parse_criteria(chemsys_formula_id_criteria)
else:
criteria = chemsys_formula_id_criteria
data = self.query(criteria, props)
entries = []
for d in data:
d["potcar_symbols"] = [
"%s %s" % (d["pseudo_potential"]["functional"], l) for l in d["pseudo_potential"]["labels"]
]
data = {"oxide_type": d["oxide_type"]}
if property_data:
data.update({k: d[k] for k in property_data})
if not inc_structure:
e = ComputedEntry(
d["unit_cell_formula"],
d["energy"],
parameters={k: d[k] for k in params},
data=data,
entry_id=d["task_id"],
)
else:
prim = d["initial_structure"] if inc_structure == "initial" else d["structure"]
if conventional_unit_cell:
s = SpacegroupAnalyzer(prim).get_conventional_standard_structure()
energy = d["energy"] * (len(s) / len(prim))
else:
s = prim.copy()
energy = d["energy"]
e = ComputedStructureEntry(
s,
energy,
parameters={k: d[k] for k in params},
data=data,
entry_id=d["task_id"],
)
entries.append(e)
if compatible_only:
from pymatgen.entries.compatibility import MaterialsProjectCompatibility
entries = MaterialsProjectCompatibility().process_entries(entries)
if sort_by_e_above_hull:
entries = sorted(entries, key=lambda entry: entry.data["e_above_hull"])
return entries
def get_pourbaix_entries(self, chemsys, solid_compat="MaterialsProjectCompatibility"):
"""
A helper function to get all entries necessary to generate
a pourbaix diagram from the rest interface.
Args:
chemsys (str or [str]): Chemical system string comprising element
symbols separated by dashes, e.g., "Li-Fe-O" or List of element
symbols, e.g., ["Li", "Fe", "O"].
solid_compat: Compatiblity scheme used to pre-process solid DFT energies prior to applying aqueous
energy adjustments. May be passed as a class (e.g. MaterialsProjectCompatibility) or an instance
(e.g., MaterialsProjectCompatibility()). If None, solid DFT energies are used as-is.
Default: MaterialsProjectCompatibility
"""
# imports are not top-level due to expense
from pymatgen.analysis.phase_diagram import PhaseDiagram
from pymatgen.analysis.pourbaix_diagram import IonEntry, PourbaixEntry
from pymatgen.core.ion import Ion
from pymatgen.entries.compatibility import (
MaterialsProjectAqueousCompatibility,
MaterialsProjectCompatibility,
)
if solid_compat == "MaterialsProjectCompatibility":
solid_compat = MaterialsProjectCompatibility
pbx_entries = []
if isinstance(chemsys, str):
chemsys = chemsys.split("-")
# Get ion entries first, because certain ions have reference
# solids that aren't necessarily in the chemsys (Na2SO4)
url = "/pourbaix_diagram/reference_data/" + "-".join(chemsys)
ion_data = self._make_request(url)
ion_ref_comps = [Composition(d["Reference Solid"]) for d in ion_data]
ion_ref_elts = list(itertools.chain.from_iterable(i.elements for i in ion_ref_comps))
ion_ref_entries = self.get_entries_in_chemsys(
list(set([str(e) for e in ion_ref_elts] + ["O", "H"])),
property_data=["e_above_hull"],
compatible_only=False,
)
# suppress the warning about supplying the required energies; they will be calculated from the
# entries we get from MPRester
with warnings.catch_warnings():
warnings.filterwarnings(
"ignore",
message="You did not provide the required O2 and H2O energies.",
)
compat = MaterialsProjectAqueousCompatibility(solid_compat=solid_compat)
ion_ref_entries = compat.process_entries(ion_ref_entries)
ion_ref_pd = PhaseDiagram(ion_ref_entries)
# position the ion energies relative to most stable reference state
for n, i_d in enumerate(ion_data):
ion = Ion.from_formula(i_d["Name"])
refs = [e for e in ion_ref_entries if e.composition.reduced_formula == i_d["Reference Solid"]]
if not refs:
raise ValueError("Reference solid not contained in entry list")
stable_ref = sorted(refs, key=lambda x: x.data["e_above_hull"])[0]
rf = stable_ref.composition.get_reduced_composition_and_factor()[1]
solid_diff = ion_ref_pd.get_form_energy(stable_ref) - i_d["Reference solid energy"] * rf
elt = i_d["Major_Elements"][0]
correction_factor = ion.composition[elt] / stable_ref.composition[elt]
energy = i_d["Energy"] + solid_diff * correction_factor
ion_entry = IonEntry(ion, energy)
pbx_entries.append(PourbaixEntry(ion_entry, "ion-{}".format(n)))
# Construct the solid pourbaix entries from filtered ion_ref entries
extra_elts = set(ion_ref_elts) - {Element(s) for s in chemsys} - {Element("H"), Element("O")}
for entry in ion_ref_entries:
entry_elts = set(entry.composition.elements)
# Ensure no OH chemsys or extraneous elements from ion references
if not (entry_elts <= {Element("H"), Element("O")} or extra_elts.intersection(entry_elts)):
# Create new computed entry
form_e = ion_ref_pd.get_form_energy(entry)
new_entry = ComputedEntry(entry.composition, form_e, entry_id=entry.entry_id)
pbx_entry = PourbaixEntry(new_entry)
pbx_entries.append(pbx_entry)
return pbx_entries
def get_structure_by_material_id(self, material_id, final=True, conventional_unit_cell=False):
"""
Get a Structure corresponding to a material_id.
Args:
material_id (str): Materials Project material_id (a string,
e.g., mp-1234).
final (bool): Whether to get the final structure, or the initial
(pre-relaxation) structure. Defaults to True.
conventional_unit_cell (bool): Whether to get the standard
conventional unit cell
Returns:
Structure object.
"""
prop = "final_structure" if final else "initial_structure"
data = self.get_data(material_id, prop=prop)
if not data:
try:
new_material_id = self.get_materials_id_from_task_id(material_id)
if new_material_id:
warnings.warn(
"The calculation task {} is mapped to canonical mp-id {}, "
"so structure for {} returned. "
"This is not an error, see documentation. "
"If original task data for {} is required, "
"use get_task_data(). To find the canonical mp-id from a task id "
"use get_materials_id_from_task_id().".format(
material_id, new_material_id, new_material_id, material_id
)
)
return self.get_structure_by_material_id(new_material_id)
except MPRestError:
raise MPRestError(
"material_id {} unknown, if this seems like "
"an error please let us know at "
"matsci.org/materials-project".format(material_id)
)
if conventional_unit_cell:
data[0][prop] = SpacegroupAnalyzer(data[0][prop]).get_conventional_standard_structure()
return data[0][prop]
def get_entry_by_material_id(
self,
material_id,
compatible_only=True,
inc_structure=None,
property_data=None,
conventional_unit_cell=False,
):
"""
Get a ComputedEntry corresponding to a material_id.
Args:
material_id (str): Materials Project material_id (a string,
e.g., mp-1234).
compatible_only (bool): Whether to return only "compatible"
entries. Compatible entries are entries that have been
processed using the MaterialsProjectCompatibility class,
which performs adjustments to allow mixing of GGA and GGA+U
calculations for more accurate phase diagrams and reaction
energies.
inc_structure (str): If None, entries returned are
ComputedEntries. If inc_structure="final",
ComputedStructureEntries with final structures are returned.
Otherwise, ComputedStructureEntries with initial structures
are returned.
property_data (list): Specify additional properties to include in
entry.data. If None, no data. Should be a subset of
supported_properties.
conventional_unit_cell (bool): Whether to get the standard
conventional unit cell
Returns:
ComputedEntry or ComputedStructureEntry object.
"""
data = self.get_entries(
material_id,
compatible_only=compatible_only,
inc_structure=inc_structure,
property_data=property_data,
conventional_unit_cell=conventional_unit_cell,
)
return data[0]
def get_dos_by_material_id(self, material_id):
"""
Get a Dos corresponding to a material_id.
REST Endpoint: https://www.materialsproject.org/rest/v2/materials/<mp-id>/vasp/dos
Args:
material_id (str): Materials Project material_id (a string,
e.g., mp-1234).
Returns:
A Dos object.
"""
data = self.get_data(material_id, prop="dos")
return data[0]["dos"]
def get_bandstructure_by_material_id(self, material_id, line_mode=True):
"""
Get a BandStructure corresponding to a material_id.
REST Endpoint: https://www.materialsproject.org/rest/v2/materials/<mp-id>/vasp/bandstructure or
https://www.materialsproject.org/rest/v2/materials/<mp-id>/vasp/bandstructure_uniform
Args:
material_id (str): Materials Project material_id.
line_mode (bool): If True, fetch a BandStructureSymmLine object
(default). If False, return the uniform band structure.
Returns:
A BandStructure object.
"""
prop = "bandstructure" if line_mode else "bandstructure_uniform"
data = self.get_data(material_id, prop=prop)
return data[0][prop]
def get_phonon_dos_by_material_id(self, material_id):
"""
Get phonon density of states data corresponding to a material_id.
Args:
material_id (str): Materials Project material_id.
Returns:
CompletePhononDos: A phonon DOS object.
"""
return self._make_request("/materials/{}/phonondos".format(material_id))
def get_phonon_bandstructure_by_material_id(self, material_id):
"""
Get phonon dispersion data corresponding to a material_id.
Args:
material_id (str): Materials Project material_id.
Returns:
PhononBandStructureSymmLine: A phonon band structure.
"""
return self._make_request("/materials/{}/phononbs".format(material_id))
def get_phonon_ddb_by_material_id(self, material_id):
"""
Get ABINIT Derivative Data Base (DDB) output for phonon calculations.
Args:
material_id (str): Materials Project material_id.
Returns:
str: ABINIT DDB file as a string.
"""
return self._make_request("/materials/{}/abinit_ddb".format(material_id))
def get_entries_in_chemsys(
self,
elements,
compatible_only=True,
inc_structure=None,
property_data=None,
conventional_unit_cell=False,
):
"""
Helper method to get a list of ComputedEntries in a chemical system.
For example, elements = ["Li", "Fe", "O"] will return a list of all
entries in the Li-Fe-O chemical system, i.e., all LixOy,
FexOy, LixFey, LixFeyOz, Li, Fe and O phases. Extremely useful for
creating phase diagrams of entire chemical systems.
Args:
elements (str or [str]): Chemical system string comprising element
symbols separated by dashes, e.g., "Li-Fe-O" or List of element
symbols, e.g., ["Li", "Fe", "O"].
compatible_only (bool): Whether to return only "compatible"
entries. Compatible entries are entries that have been
processed using the MaterialsProjectCompatibility class,
which performs adjustments to allow mixing of GGA and GGA+U
calculations for more accurate phase diagrams and reaction
energies.
inc_structure (str): If None, entries returned are
ComputedEntries. If inc_structure="final",
ComputedStructureEntries with final structures are returned.
Otherwise, ComputedStructureEntries with initial structures
are returned.
property_data (list): Specify additional properties to include in
entry.data. If None, no data. Should be a subset of
supported_properties.
conventional_unit_cell (bool): Whether to get the standard
conventional unit cell
Returns:
List of ComputedEntries.
"""
if isinstance(elements, str):
elements = elements.split("-")
all_chemsyses = []
for i in range(len(elements)):
for els in itertools.combinations(elements, i + 1):
all_chemsyses.append("-".join(sorted(els)))
entries = self.get_entries(
{"chemsys": {"$in": all_chemsyses}},
compatible_only=compatible_only,
inc_structure=inc_structure,
property_data=property_data,
conventional_unit_cell=conventional_unit_cell,
)
return entries
def get_exp_thermo_data(self, formula):
"""
Get a list of ThermoData objects associated with a formula using the
Materials Project REST interface.
Args:
formula (str): A formula to search for.
Returns:
List of ThermoData objects.
"""
return self.get_data(formula, data_type="exp")
def get_exp_entry(self, formula):
"""
Returns an ExpEntry object, which is the experimental equivalent of a
ComputedEntry and can be used for analyses using experimental data.
Args:
formula (str): A formula to search for.
Returns:
An ExpEntry object.
"""
return ExpEntry(Composition(formula), self.get_exp_thermo_data(formula))
def query(
self,
criteria,
properties,
chunk_size=500,
max_tries_per_chunk=5,
mp_decode=True,
):
r"""
Performs an advanced query using MongoDB-like syntax for directly
querying the Materials Project database. This allows one to perform
queries which are otherwise too cumbersome to perform using the standard
convenience methods.
Please consult the Materials API documentation at
https://github.com/materialsproject/mapidoc, which provides a
comprehensive explanation of the document schema used in the Materials
Project (supported criteria and properties) and guidance on how best to
query for the relevant information you need.
For queries that request data on more than CHUNK_SIZE materials at once,
this method will chunk a query by first retrieving a list of material
IDs that satisfy CRITERIA, and then merging the criteria with a
restriction to one chunk of materials at a time of size CHUNK_SIZE. You
can opt out of this behavior by setting CHUNK_SIZE=0. To guard against
intermittent server errors in the case of many chunks per query,
possibly-transient server errors will result in re-trying a give chunk
up to MAX_TRIES_PER_CHUNK times.
Args:
criteria (str/dict): Criteria of the query as a string or
mongo-style dict.
If string, it supports a powerful but simple string criteria.
E.g., "Fe2O3" means search for materials with reduced_formula
Fe2O3. Wild cards are also supported. E.g., "\\*2O" means get
all materials whose formula can be formed as \\*2O, e.g.,
Li2O, K2O, etc.
Other syntax examples:
mp-1234: Interpreted as a Materials ID.
Fe2O3 or *2O3: Interpreted as reduced formulas.
Li-Fe-O or *-Fe-O: Interpreted as chemical systems.
You can mix and match with spaces, which are interpreted as
"OR". E.g. "mp-1234 FeO" means query for all compounds with
reduced formula FeO or with materials_id mp-1234.
Using a full dict syntax, even more powerful queries can be
constructed. For example, {"elements":{"$in":["Li",
"Na", "K"], "$all": ["O"]}, "nelements":2} selects all Li, Na
and K oxides. {"band_gap": {"$gt": 1}} selects all materials
with band gaps greater than 1 eV.
properties (list): Properties to request for as a list. For
example, ["formula", "formation_energy_per_atom"] returns
the formula and formation energy per atom.
chunk_size (int): Number of materials for which to fetch data at a
time. More data-intensive properties may require smaller chunk
sizes. Use chunk_size=0 to force no chunking -- this is useful
when fetching only properties such as 'material_id'.
max_tries_per_chunk (int): How many times to re-try fetching a given
chunk when the server gives a 5xx error (e.g. a timeout error).
mp_decode (bool): Whether to do a decoding to a Pymatgen object
where possible. In some cases, it might be useful to just get
the raw python dict, i.e., set to False.
Returns:
List of results. E.g.,
[{u'formula': {u'O': 1, u'Li': 2.0}},
{u'formula': {u'Na': 2.0, u'O': 2.0}},
{u'formula': {u'K': 1, u'O': 3.0}},
...]
"""
if not isinstance(criteria, dict):
criteria = self.parse_criteria(criteria)
payload = {
"criteria": json.dumps(criteria),
"properties": json.dumps(properties),
}
if chunk_size == 0:
return self._make_request("/query", payload=payload, method="POST", mp_decode=mp_decode)
count_payload = payload.copy()
count_payload["options"] = json.dumps({"count_only": True})
num_results = self._make_request("/query", payload=count_payload, method="POST")
if num_results <= chunk_size:
return self._make_request("/query", payload=payload, method="POST", mp_decode=mp_decode)
data = []
mids = [d["material_id"] for d in self.query(criteria, ["material_id"], chunk_size=0)]
chunks = get_chunks(mids, size=chunk_size)
progress_bar = PBar(total=len(mids))
for chunk in chunks:
chunk_criteria = criteria.copy()
chunk_criteria.update({"material_id": {"$in": chunk}})
num_tries = 0
while num_tries < max_tries_per_chunk:
try:
data.extend(
self.query(
chunk_criteria,
properties,
chunk_size=0,
mp_decode=mp_decode,
)
)
break
except MPRestError as e:
# pylint: disable=E1101
match = re.search(r"error status code (\d+)", e.message)
if match:
if not match.group(1).startswith("5"):
raise e
num_tries += 1
print(
"Unknown server error. Trying again in five "
"seconds (will try at most {} times)...".format(max_tries_per_chunk)
)
sleep(5)
progress_bar.update(len(chunk))
return data
def submit_structures(
self,
structures,
authors,
projects=None,
references="",
remarks=None,
data=None,
histories=None,
created_at=None,
):
"""
Submits a list of structures to the Materials Project as SNL files.
The argument list mirrors the arguments for the StructureNL object,
except that a list of structures with the same metadata is used as an
input.
.. note::
As of now, this MP REST feature is open only to a select group of
users. Opening up submissions to all users is being planned for
the future.
Args:
structures: A list of Structure objects
authors (list): List of {"name":'', "email":''} dicts,
*list* of Strings as 'John Doe <johndoe@gmail.com>',
or a single String with commas separating authors
projects ([str]): List of Strings ['Project A', 'Project B'].
This applies to all structures.
references (str): A String in BibTeX format. Again, this applies to
all structures.
remarks ([str]): List of Strings ['Remark A', 'Remark B']
data ([dict]): A list of free form dict. Namespaced at the root
level with an underscore, e.g. {"_materialsproject":<custom
data>}. The length of data should be the same as the list of
structures if not None.
histories: List of list of dicts - [[{'name':'', 'url':'',
'description':{}}], ...] The length of histories should be the
same as the list of structures if not None.
created_at (datetime): A datetime object
Returns:
A list of inserted submission ids.
"""
from pymatgen.util.provenance import StructureNL
snl_list = StructureNL.from_structures(
structures,
authors,
projects,
references,
remarks,
data,
histories,
created_at,
)
self.submit_snl(snl_list)
def submit_snl(self, snl):
"""
Submits a list of StructureNL to the Materials Project site.
.. note::
As of now, this MP REST feature is open only to a select group of
users. Opening up submissions to all users is being planned for
the future.
Args:
snl (StructureNL/[StructureNL]): A single StructureNL, or a list
of StructureNL objects
Returns:
A list of inserted submission ids.
Raises:
MPRestError
"""
try:
snl = snl if isinstance(snl, list) else [snl]
jsondata = [s.as_dict() for s in snl]
payload = {"snl": json.dumps(jsondata, cls=MontyEncoder)}
response = self.session.post("{}/snl/submit".format(self.preamble), data=payload)
if response.status_code in [200, 400]:
resp = json.loads(response.text, cls=MontyDecoder)
if resp["valid_response"]:
if resp.get("warning"):
warnings.warn(resp["warning"])
return resp["inserted_ids"]
raise MPRestError(resp["error"])
raise MPRestError("REST error with status code {} and error {}".format(response.status_code, response.text))
except Exception as ex:
raise MPRestError(str(ex))
def delete_snl(self, snl_ids):
"""
Delete earlier submitted SNLs.
.. note::
As of now, this MP REST feature is open only to a select group of
users. Opening up submissions to all users is being planned for
the future.
Args:
snl_ids: List of SNL ids.
Raises:
MPRestError
"""
try:
payload = {"ids": json.dumps(snl_ids)}
response = self.session.post("{}/snl/delete".format(self.preamble), data=payload)
if response.status_code in [200, 400]:
resp = json.loads(response.text, cls=MontyDecoder)
if resp["valid_response"]:
if resp.get("warning"):
warnings.warn(resp["warning"])
return resp
raise MPRestError(resp["error"])
raise MPRestError("REST error with status code {} and error {}".format(response.status_code, response.text))
except Exception as ex:
raise MPRestError(str(ex))
def query_snl(self, criteria):
"""
Query for submitted SNLs.
.. note::
As of now, this MP REST feature is open only to a select group of
users. Opening up submissions to all users is being planned for
the future.
Args:
criteria (dict): Query criteria.
Returns:
A dict, with a list of submitted SNLs in the "response" key.
Raises:
MPRestError
"""
try:
payload = {"criteria": json.dumps(criteria)}
response = self.session.post("{}/snl/query".format(self.preamble), data=payload)
if response.status_code in [200, 400]:
resp = json.loads(response.text)
if resp["valid_response"]:
if resp.get("warning"):
warnings.warn(resp["warning"])
return resp["response"]
raise MPRestError(resp["error"])
raise MPRestError("REST error with status code {} and error {}".format(response.status_code, response.text))
except Exception as ex:
raise MPRestError(str(ex))
def submit_vasp_directory(
self,
rootdir,
authors,
projects=None,
references="",
remarks=None,
master_data=None,
master_history=None,
created_at=None,
ncpus=None,
):
"""
Assimilates all vasp run directories beneath a particular
directory using BorgQueen to obtain structures, and then submits thhem
to the Materials Project as SNL files. VASP related meta data like
initial structure and final energies are automatically incorporated.
.. note::
As of now, this MP REST feature is open only to a select group of
users. Opening up submissions to all users is being planned for
the future.
Args:
rootdir (str): Rootdir to start assimilating VASP runs from.
authors: *List* of {"name":'', "email":''} dicts,
*list* of Strings as 'John Doe <johndoe@gmail.com>',
or a single String with commas separating authors. The same
list of authors should apply to all runs.
projects ([str]): List of Strings ['Project A', 'Project B'].
This applies to all structures.
references (str): A String in BibTeX format. Again, this applies to
all structures.
remarks ([str]): List of Strings ['Remark A', 'Remark B']
master_data (dict): A free form dict. Namespaced at the root
level with an underscore, e.g. {"_materialsproject":<custom
data>}. This data is added to all structures detected in the
directory, in addition to other vasp data on a per structure
basis.
master_history: A master history to be added to all entries.
created_at (datetime): A datetime object
ncpus (int): Number of cpus to use in using BorgQueen to
assimilate. Defaults to None, which means serial.
"""
from pymatgen.apps.borg.hive import VaspToComputedEntryDrone
from pymatgen.apps.borg.queen import BorgQueen
drone = VaspToComputedEntryDrone(inc_structure=True, data=["filename", "initial_structure"])
queen = BorgQueen(drone, number_of_drones=ncpus)
queen.parallel_assimilate(rootdir)
structures = []
metadata = []
histories = []
for e in queen.get_data():
structures.append(e.structure)
m = {
"_vasp": {
"parameters": e.parameters,
"final_energy": e.energy,
"final_energy_per_atom": e.energy_per_atom,
"initial_structure": e.data["initial_structure"].as_dict(),
}
}
if "history" in e.parameters:
histories.append(e.parameters["history"])
if master_data is not None:
m.update(master_data)
metadata.append(m)
if master_history is not None:
histories = master_history * len(structures)
return self.submit_structures(
structures,
authors,
projects=projects,
references=references,
remarks=remarks,
data=metadata,
histories=histories,
created_at=created_at,
)
def get_stability(self, entries):
"""
Returns the stability of all entries.
"""
try:
payload = {"entries": json.dumps(entries, cls=MontyEncoder)}
response = self.session.post(
"{}/phase_diagram/calculate_stability".format(self.preamble),
data=payload,
)
if response.status_code in [200, 400]:
resp = json.loads(response.text, cls=MontyDecoder)
if resp["valid_response"]:
if resp.get("warning"):
warnings.warn(resp["warning"])
return resp["response"]
raise MPRestError(resp["error"])
raise MPRestError("REST error with status code {} and error {}".format(response.status_code, response.text))
except Exception as ex:
raise MPRestError(str(ex))
def get_cohesive_energy(self, material_id, per_atom=False):
"""
Gets the cohesive for a material (eV per formula unit). Cohesive energy
is defined as the difference between the bulk energy and the sum of
total DFT energy of isolated atoms for atom elements in the bulk.
Args:
material_id (str): Materials Project material_id, e.g. 'mp-123'.
per_atom (bool): Whether or not to return cohesive energy per atom
Returns:
Cohesive energy (eV).
"""
entry = self.get_entry_by_material_id(material_id)
ebulk = entry.energy / entry.composition.get_integer_formula_and_factor()[1]
comp_dict = entry.composition.reduced_composition.as_dict()
isolated_atom_e_sum, n = 0, 0
for el in comp_dict.keys():
e = self._make_request("/element/%s/tasks/isolated_atom" % (el), mp_decode=False)[0]
isolated_atom_e_sum += e["output"]["final_energy_per_atom"] * comp_dict[el]
n += comp_dict[el]
ecoh_per_formula = isolated_atom_e_sum - ebulk
return ecoh_per_formula / n if per_atom else ecoh_per_formula
def get_reaction(self, reactants, products):
"""
Gets a reaction from the Materials Project.
Args:
reactants ([str]): List of formulas
products ([str]): List of formulas
Returns:
rxn
"""
return self._make_request(
"/reaction",
payload={"reactants[]": reactants, "products[]": products},
mp_decode=False,
)
def get_substrates(self, material_id, number=50, orient=None):
"""
Get a substrate list for a material id. The list is in order of
increasing elastic energy if a elastic tensor is available for
the material_id. Otherwise the list is in order of increasing
matching area.
Args:
material_id (str): Materials Project material_id, e.g. 'mp-123'.
orient (list) : substrate orientation to look for
number (int) : number of substrates to return
n=0 returns all available matches
Returns:
list of dicts with substrate matches
"""
req = "/materials/{}/substrates?n={}".format(material_id, number)
if orient:
req += "&orient={}".format(" ".join(map(str, orient)))
return self._make_request(req)
def get_all_substrates(self):
"""
Gets the list of all possible substrates considered in the
Materials Project substrate database
Returns:
list of material_ids corresponding to possible substrates
"""
return self._make_request("/materials/all_substrate_ids")
def get_surface_data(self, material_id, miller_index=None, inc_structures=False):
"""
Gets surface data for a material. Useful for Wulff shapes.
Reference for surface data:
Tran, R., Xu, Z., Radhakrishnan, B., Winston, D., Sun, W., Persson, K.
A., & Ong, S. P. (2016). Data Descripter: Surface energies of elemental
crystals. Scientific Data, 3(160080), 1–13.
http://dx.doi.org/10.1038/sdata.2016.80
Args:
material_id (str): Materials Project material_id, e.g. 'mp-123'.
miller_index (list of integer): The miller index of the surface.
e.g., [3, 2, 1]. If miller_index is provided, only one dictionary
of this specific plane will be returned.
inc_structures (bool): Include final surface slab structures.
These are unnecessary for Wulff shape construction.
Returns:
Surface data for material. Energies are given in SI units (J/m^2).
"""
req = "/materials/{}/surfaces".format(material_id)
if inc_structures:
req += "?include_structures=true"
if miller_index:
surf_data_dict = self._make_request(req)
surf_list = surf_data_dict["surfaces"]
ucell = self.get_structure_by_material_id(material_id, conventional_unit_cell=True)
eq_indices = get_symmetrically_equivalent_miller_indices(ucell, miller_index)
for one_surf in surf_list:
if tuple(one_surf["miller_index"]) in eq_indices:
return one_surf
raise ValueError("Bad miller index.")
return self._make_request(req)
def get_wulff_shape(self, material_id):
"""
Constructs a Wulff shape for a material.
Args:
material_id (str): Materials Project material_id, e.g. 'mp-123'.
Returns:
pymatgen.analysis.wulff.WulffShape
"""
from pymatgen.analysis.wulff import WulffShape
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer
structure = self.get_structure_by_material_id(material_id)
surfaces = self.get_surface_data(material_id)["surfaces"]
lattice = SpacegroupAnalyzer(structure).get_conventional_standard_structure().lattice
miller_energy_map = {}
for surf in surfaces:
miller = tuple(surf["miller_index"])
# Prefer reconstructed surfaces, which have lower surface energies.
if (miller not in miller_energy_map) or surf["is_reconstructed"]:
miller_energy_map[miller] = surf["surface_energy"]
millers, energies = zip(*miller_energy_map.items())
return WulffShape(lattice, millers, energies)
def get_gb_data(
self,
material_id=None,
pretty_formula=None,
chemsys=None,
sigma=None,
gb_plane=None,
rotation_axis=None,
include_work_of_separation=False,
):
"""
Gets grain boundary data for a material.
Args:
material_id (str): Materials Project material_id, e.g., 'mp-129'.
pretty_formula (str): The formula of metals. e.g., 'Fe'
sigma(int): The sigma value of a certain type of grain boundary
gb_plane(list of integer): The Miller index of grain
boundary plane. e.g., [1, 1, 1]
rotation_axis(list of integer): The Miller index of rotation
axis. e.g., [1, 0, 0], [1, 1, 0], and [1, 1, 1]
Sigma value is determined by the combination of rotation axis and
rotation angle. The five degrees of freedom (DOF) of one grain boundary
include: rotation axis (2 DOFs), rotation angle (1 DOF), and grain
boundary plane (2 DOFs).
include_work_of_separation (bool): whether to include the work of separation
(in unit of (J/m^2)). If you want to query the work of separation, please
specify the material_id.
Returns:
A list of grain boundaries that satisfy the query conditions (sigma, gb_plane).
Energies are given in SI units (J/m^2).
"""
if gb_plane:
gb_plane = ",".join([str(i) for i in gb_plane])
if rotation_axis:
rotation_axis = ",".join([str(i) for i in rotation_axis])
payload = {
"material_id": material_id,
"pretty_formula": pretty_formula,
"chemsys": chemsys,
"sigma": sigma,
"gb_plane": gb_plane,
"rotation_axis": rotation_axis,
}
if include_work_of_separation and material_id:
list_of_gbs = self._make_request("/grain_boundaries", payload=payload)
for i, gb_dict in enumerate(list_of_gbs):
gb_energy = gb_dict["gb_energy"]
gb_plane_int = gb_dict["gb_plane"]
surface_energy = self.get_surface_data(material_id=material_id, miller_index=gb_plane_int)[
"surface_energy"
]
wsep = 2 * surface_energy - gb_energy # calculate the work of separation
gb_dict["work_of_separation"] = wsep
return list_of_gbs
return self._make_request("/grain_boundaries", payload=payload)
def get_interface_reactions(
self,
reactant1,
reactant2,
open_el=None,
relative_mu=None,
use_hull_energy=False,
):
"""
Gets critical reactions between two reactants.
Get critical reactions ("kinks" in the mixing ratio where
reaction products change) between two reactants. See the
`pymatgen.analysis.interface_reactions` module for more info.
Args:
reactant1 (str): Chemical formula for reactant
reactant2 (str): Chemical formula for reactant
open_el (str): Element in reservoir available to system
relative_mu (float): Relative chemical potential of element in
reservoir with respect to pure substance. Must be non-positive.
use_hull_energy (bool): Whether to use the convex hull energy for a
given composition for the reaction energy calculation. If false,
the energy of the ground state structure will be preferred; if a
ground state can not be found for a composition, the convex hull
energy will be used with a warning message.
Returns:
list: list of dicts of form {ratio,energy,rxn} where `ratio` is the
reactant mixing ratio, `energy` is the reaction energy
in eV/atom, and `rxn` is a
`pymatgen.analysis.reaction_calculator.Reaction`.
"""
payload = {
"reactants": " ".join([reactant1, reactant2]),
"open_el": open_el,
"relative_mu": relative_mu,
"use_hull_energy": use_hull_energy,
}
return self._make_request("/interface_reactions", payload=payload, method="POST")
def get_download_info(self, material_ids, task_types=None, file_patterns=None):
"""
get a list of URLs to retrieve raw VASP output files from the NoMaD repository
Args:
material_ids (list): list of material identifiers (mp-id's)
task_types (list): list of task types to include in download (see TaskType Enum class)
file_patterns (list): list of wildcard file names to include for each task
Returns:
a tuple of 1) a dictionary mapping material_ids to task_ids and
task_types, and 2) a list of URLs to download zip archives from
NoMaD repository. Each zip archive will contain a manifest.json with
metadata info, e.g. the task/external_ids that belong to a directory
"""
# task_id's correspond to NoMaD external_id's
task_types = [t.value for t in task_types if isinstance(t, TaskType)] if task_types else []
meta = defaultdict(list)
for doc in self.query({"material_id": {"$in": material_ids}}, ["material_id", "blessed_tasks"]):
for task_type, task_id in doc["blessed_tasks"].items():
if task_types and task_type not in task_types:
continue
meta[doc["material_id"]].append({"task_id": task_id, "task_type": task_type})
if not meta:
raise ValueError("No tasks found.")
# return a list of URLs for NoMaD Downloads containing the list of files
# for every external_id in `task_ids`
prefix = "http://labdev-nomad.esc.rzg.mpg.de/fairdi/nomad/mp/api/raw/query?"
if file_patterns is not None:
for file_pattern in file_patterns:
prefix += f"file_pattern={file_pattern}&"
prefix += "external_id="
# NOTE: IE has 2kb URL char limit
nmax = int((2000 - len(prefix)) / 11) # mp-<7-digit> + , = 11
task_ids = [t["task_id"] for tl in meta.values() for t in tl]
chunks = get_chunks(task_ids, size=nmax)
urls = [prefix + ",".join(tids) for tids in chunks]
return meta, urls
@staticmethod
def parse_criteria(criteria_string):
"""
Parses a powerful and simple string criteria and generates a proper
mongo syntax criteria.
Args:
criteria_string (str): A string representing a search criteria.
Also supports wild cards. E.g.,
something like "*2O" gets converted to
{'pretty_formula': {'$in': [u'B2O', u'Xe2O', u"Li2O", ...]}}
Other syntax examples:
mp-1234: Interpreted as a Materials ID.
Fe2O3 or *2O3: Interpreted as reduced formulas.
Li-Fe-O or *-Fe-O: Interpreted as chemical systems.
You can mix and match with spaces, which are interpreted as
"OR". E.g., "mp-1234 FeO" means query for all compounds with
reduced formula FeO or with materials_id mp-1234.
Returns:
A mongo query dict.
"""
toks = criteria_string.split()
def parse_sym(sym):
if sym == "*":
return [el.symbol for el in Element]
m = re.match(r"\{(.*)\}", sym)
if m:
return [s.strip() for s in m.group(1).split(",")]
return [sym]
def parse_tok(t):
if re.match(r"\w+-\d+", t):
return {"task_id": t}
if "-" in t:
elements = [parse_sym(sym) for sym in t.split("-")]
chemsyss = []
for cs in itertools.product(*elements):
if len(set(cs)) == len(cs):
# Check for valid symbols
cs = [Element(s).symbol for s in cs]
chemsyss.append("-".join(sorted(cs)))
return {"chemsys": {"$in": chemsyss}}
all_formulas = set()
explicit_els = []
wild_card_els = []
for sym in re.findall(r"(\*[\.\d]*|\{.*\}[\.\d]*|[A-Z][a-z]*)[\.\d]*", t):
if ("*" in sym) or ("{" in sym):
wild_card_els.append(sym)
else:
m = re.match(r"([A-Z][a-z]*)[\.\d]*", sym)
explicit_els.append(m.group(1))
nelements = len(wild_card_els) + len(set(explicit_els))
parts = re.split(r"(\*|\{.*\})", t)
parts = [parse_sym(s) for s in parts if s != ""]
for f in itertools.product(*parts):
c = Composition("".join(f))
if len(c) == nelements:
# Check for valid Elements in keys.
for e in c.keys():
Element(e.symbol)
all_formulas.add(c.reduced_formula)
return {"pretty_formula": {"$in": list(all_formulas)}}
if len(toks) == 1:
return parse_tok(toks[0])
return {"$or": list(map(parse_tok, toks))}
class MPRestError(Exception):
"""
Exception class for MPRestAdaptor.
Raised when the query has problems, e.g., bad query format.
"""
pass
|
davidwaroquiers/pymatgen
|
pymatgen/ext/matproj.py
|
Python
|
mit
| 66,579
|
[
"ABINIT",
"VASP",
"pymatgen"
] |
5717ad124890fa137a52d1e08664ddac35c664fc314337b264bc9cca1e7bee25
|
"""
Change directory to provide relative paths for doctests
>>> import os
>>> filepath = os.path.dirname( os.path.realpath( __file__ ) )
>>> datadir = os.path.realpath(os.path.join(filepath, '../../testing/data'))
>>> os.chdir(datadir)
"""
import os
import glob
from nipype.interfaces.base import (CommandLineInputSpec, CommandLine, traits,
TraitedSpec, File, StdOutCommandLine,
OutputMultiPath, StdOutCommandLineInputSpec,
isdefined)
from nipype.utils.filemanip import split_filename
class Image2VoxelInputSpec(StdOutCommandLineInputSpec):
in_file = File(exists=True, argstr='-4dimage %s',
mandatory=True, position=1,
desc='4d image file')
#TODO convert list of files on the fly
# imagelist = File(exists=True, argstr='-imagelist %s',
# mandatory=True, position=1,
# desc='Name of a file containing a list of 3D images')
#
# imageprefix = traits.Str(argstr='-imageprefix %s', position=3,
# desc='Path to prepend onto filenames in the imagelist.')
out_type = traits.Enum("float", "char", "short", "int", "long", "double", argstr='-outputdatatype %s', position=2,
desc='"i.e. Bfloat". Can be "char", "short", "int", "long", "float" or "double"', usedefault=True)
class Image2VoxelOutputSpec(TraitedSpec):
voxel_order = File(exists=True, desc='path/name of 4D volume in voxel order')
class Image2Voxel(StdOutCommandLine):
"""
Converts Analyze / NIFTI / MHA files to voxel order.
Converts scanner-order data in a supported image format to voxel-order data.
Either takes a 4D file (all measurements in single image)
or a list of 3D images.
Examples
--------
>>> import nipype.interfaces.camino as cmon
>>> img2vox = cmon.Image2Voxel()
>>> img2vox.inputs.in_file = '4d_dwi.nii'
>>> img2vox.run() # doctest: +SKIP
"""
_cmd = 'image2voxel'
input_spec = Image2VoxelInputSpec
output_spec = Image2VoxelOutputSpec
def _list_outputs(self):
outputs = self.output_spec().get()
outputs['voxel_order'] = os.path.abspath(self._gen_outfilename())
return outputs
def _gen_outfilename(self):
_, name , _ = split_filename(self.inputs.in_file)
return name + '.B'+ self.inputs.out_type
class FSL2SchemeInputSpec(StdOutCommandLineInputSpec):
bvec_file = File(exists=True, argstr='-bvecfile %s',
mandatory=True, position=1,
desc='b vector file')
bval_file = File(exists=True, argstr='-bvalfile %s',
mandatory=True, position=2,
desc='b value file')
numscans = traits.Int(argstr='-numscans %d', units='NA',
desc="Output all measurements numerous (n) times, used when combining multiple scans from the same imaging session.")
interleave = traits.Bool(argstr='-interleave', desc="Interleave repeated scans. Only used with -numscans.")
bscale = traits.Float(argstr='-bscale %d', units='NA',
desc="Scaling factor to convert the b-values into different units. Default is 10^6.")
diffusiontime = traits.Float(argstr = '-diffusiontime %f', units = 'NA',
desc="Diffusion time")
flipx = traits.Bool(argstr='-flipx', desc="Negate the x component of all the vectors.")
flipy = traits.Bool(argstr='-flipy', desc="Negate the y component of all the vectors.")
flipz = traits.Bool(argstr='-flipz', desc="Negate the z component of all the vectors.")
usegradmod = traits.Bool(argstr='-usegradmod', desc="Use the gradient magnitude to scale b. This option has no effect if your gradient directions have unit magnitude.")
class FSL2SchemeOutputSpec(TraitedSpec):
scheme = File(exists=True, desc='Scheme file')
class FSL2Scheme(StdOutCommandLine):
"""
Converts b-vectors and b-values from FSL format to a Camino scheme file.
Examples
--------
>>> import nipype.interfaces.camino as cmon
>>> makescheme = cmon.FSL2Scheme()
>>> makescheme.inputs.bvec_file = 'bvecs'
>>> makescheme.inputs.bvec_file = 'bvals'
>>> makescheme.run() # doctest: +SKIP
"""
_cmd = 'fsl2scheme'
input_spec=FSL2SchemeInputSpec
output_spec=FSL2SchemeOutputSpec
def _list_outputs(self):
outputs = self.output_spec().get()
outputs['scheme'] = os.path.abspath(self._gen_outfilename())
return outputs
def _gen_outfilename(self):
_, name , _ = split_filename(self.inputs.bvec_file)
return name + '.scheme'
class VtkStreamlinesInputSpec(StdOutCommandLineInputSpec):
inputmodel = traits.Enum('raw', 'voxels', argstr='-inputmodel %s', desc='input model type (raw or voxels)', usedefault=True)
in_file = File(exists=True, argstr=' < %s',
mandatory=True, position=-2,
desc='data file')
voxeldims = traits.List(traits.Int, desc = 'voxel dimensions in mm',
argstr='-voxeldims %s', minlen=3, maxlen=3, position=4,
units='mm')
seed_file = File(exists=False, argstr='-seedfile %s', position=1,
desc='image containing seed points')
target_file = File(exists=False, argstr='-targetfile %s', position=2,
desc='image containing integer-valued target regions')
scalar_file = File(exists=False, argstr='-scalarfile %s', position=3,
desc='image that is in the same physical space as the tracts')
colourorient = traits.Bool(argstr='-colourorient', desc="Each point on the streamline is coloured by the local orientation.")
interpolatescalars = traits.Bool(argstr='-interpolatescalars', desc="the scalar value at each point on the streamline is calculated by trilinear interpolation")
interpolate = traits.Bool(argstr='-interpolate', desc="the scalar value at each point on the streamline is calculated by trilinear interpolation")
class VtkStreamlinesOutputSpec(TraitedSpec):
vtk = File(exists=True, desc='Streamlines in VTK format')
class VtkStreamlines(StdOutCommandLine):
"""
Use vtkstreamlines to convert raw or voxel format streamlines to VTK polydata
Examples
--------
>>> import nipype.interfaces.camino as cmon
>>> vtk = cmon.VtkStreamlines()
>>> vtk.inputs.in_file = 'tract_data.Bfloat'
>>> vtk.inputs.voxeldims = [1,1,1]
>>> vtk.run() # doctest: +SKIP
"""
_cmd = 'vtkstreamlines'
input_spec=VtkStreamlinesInputSpec
output_spec=VtkStreamlinesOutputSpec
def _list_outputs(self):
outputs = self.output_spec().get()
outputs['vtk'] = os.path.abspath(self._gen_outfilename())
return outputs
def _gen_outfilename(self):
_, name , _ = split_filename(self.inputs.in_file)
return name + '.vtk'
class ProcStreamlinesInputSpec(StdOutCommandLineInputSpec):
inputmodel = traits.Enum('raw', 'voxels', argstr='-inputmodel %s', desc='input model type (raw or voxels)', usedefault=True)
in_file = File(exists=True, argstr='-inputfile %s',
mandatory=True, position=1,
desc='data file')
maxtractpoints= traits.Int(argstr='-maxtractpoints %d', units='NA',
desc="maximum number of tract points")
mintractpoints= traits.Int(argstr='-mintractpoints %d', units='NA',
desc="minimum number of tract points")
maxtractlength= traits.Int(argstr='-maxtractlength %d', units='mm',
desc="maximum length of tracts")
mintractlength= traits.Int(argstr='-mintractlength %d', units='mm',
desc="minimum length of tracts")
datadims = traits.List(traits.Int, desc = 'data dimensions in voxels',
argstr='-datadims %s', minlen=3, maxlen=3,
units='voxels')
voxeldims = traits.List(traits.Int, desc = 'voxel dimensions in mm',
argstr='-voxeldims %s', minlen=3, maxlen=3,
units='mm')
seedpointmm = traits.List(traits.Int, desc = 'The coordinates of a single seed point for tractography in mm',
argstr='-seedpointmm %s', minlen=3, maxlen=3,
units='mm')
seedpointvox = traits.List(traits.Int, desc = 'The coordinates of a single seed point for tractography in voxels',
argstr='-seedpointvox %s', minlen=3, maxlen=3,
units='voxels')
seedfile = File(exists=False, argstr='-seedfile %s',
desc='Image Containing Seed Points')
regionindex = traits.Int(argstr='-regionindex %d', units='mm',
desc="index of specific region to process")
iterations = traits.Float(argstr='-iterations %d', units='NA',
desc="Number of streamlines generated for each seed. Not required when outputting streamlines, but needed to create PICo images. The default is 1 if the output is streamlines, and 5000 if the output is connection probability images.")
targetfile = File(exists=False, argstr='-targetfile %s',
desc='Image containing target volumes.')
allowmultitargets = traits.Bool(argstr='-allowmultitargets', desc="Allows streamlines to connect to multiple target volumes.")
directional = traits.List(traits.Int, desc = 'Splits the streamlines at the seed point and computes separate connection probabilities for each segment. Streamline segments are grouped according to their dot product with the vector (X, Y, Z). The ideal vector will be tangential to the streamline trajectory at the seed, such that the streamline projects from the seed along (X, Y, Z) and -(X, Y, Z). However, it is only necessary for the streamline trajectory to not be orthogonal to (X, Y, Z).',
argstr='-directional %s', minlen=3, maxlen=3,
units='NA')
waypointfile = File(exists=False, argstr='-waypointfile %s',
desc='Image containing waypoints. Waypoints are defined as regions of the image with the same intensity, where 0 is background and any value > 0 is a waypoint.')
truncateloops = traits.Bool(argstr='-truncateloops', desc="This option allows streamlines to enter a waypoint exactly once. After the streamline leaves the waypoint, it is truncated upon a second entry to the waypoint.")
discardloops = traits.Bool(argstr='-discardloops', desc="This option allows streamlines to enter a waypoint exactly once. After the streamline leaves the waypoint, the entire streamline is discarded upon a second entry to the waypoint.")
exclusionfile = File(exists=False, argstr='-exclusionfile %s',
desc='Image containing exclusion ROIs. This should be an Analyze 7.5 header / image file.hdr and file.img.')
truncateinexclusion = traits.Bool(argstr='-truncateinexclusion', desc="Retain segments of a streamline before entry to an exclusion ROI.")
endpointfile = File(exists=False, argstr='-endpointfile %s',
desc='Image containing endpoint ROIs. This should be an Analyze 7.5 header / image file.hdr and file.img.')
resamplestepsize = traits.Float(argstr='-resamplestepsize %d', units='NA',
desc="Each point on a streamline is tested for entry into target, exclusion or waypoint volumes. If the length between points on a tract is not much smaller than the voxel length, then streamlines may pass through part of a voxel without being counted. To avoid this, the program resamples streamlines such that the step size is one tenth of the smallest voxel dimension in the image. This increases the size of raw or oogl streamline output and incurs some performance penalty. The resample resolution can be controlled with this option or disabled altogether by passing a negative step size or by passing the -noresample option.")
noresample = traits.Bool(argstr='-noresample', desc="Disables resampling of input streamlines. Resampling is automatically disabled if the input model is voxels.")
outputtracts = traits.Bool(argstr='-outputtracts', desc="Output streamlines in raw binary format.")
outputroot = File(exists=False, argstr='-outputroot %s',
desc='Prepended onto all output file names.')
gzip = traits.Bool(argstr='-gzip', desc="save the output image in gzip format")
outputcp = traits.Bool(argstr='-outputcp', desc="output the connection probability map (Analyze image, float)",
requires=['outputroot','seedfile'])
outputsc = traits.Bool(argstr='-outputsc', desc="output the connection probability map (raw streamlines, int)",
requires=['outputroot','seedfile'])
outputacm = traits.Bool(argstr='-outputacm', desc="output all tracts in a single connection probability map (Analyze image)",
requires=['outputroot','seedfile'])
outputcbs = traits.Bool(argstr='-outputcbs', desc="outputs connectivity-based segmentation maps; requires target outputfile",
requires=['outputroot','targetfile','seedfile'])
class ProcStreamlinesOutputSpec(TraitedSpec):
proc = File(exists=True, desc='Processed Streamlines')
outputroot_files = OutputMultiPath(File(exists=True))
class ProcStreamlines(StdOutCommandLine):
"""
Process streamline data
This program does post-processing of streamline output from track. It can either output streamlines or connection probability maps.
* http://web4.cs.ucl.ac.uk/research/medic/camino/pmwiki/pmwiki.php?n=Man.procstreamlines
Examples
--------
>>> import nipype.interfaces.camino as cmon
>>> proc = cmon.ProcStreamlines()
>>> proc.inputs.in_file = 'tract_data.Bfloat'
>>> proc.run() # doctest: +SKIP
"""
_cmd = 'procstreamlines'
input_spec=ProcStreamlinesInputSpec
output_spec=ProcStreamlinesOutputSpec
def _format_arg(self, name, spec, value):
if name == 'outputroot':
return spec.argstr % self._get_actual_outputroot(value)
return super(ProcStreamlines, self)._format_arg(name, spec, value)
def _run_interface(self, runtime):
outputroot = self.inputs.outputroot
if isdefined(outputroot):
actual_outputroot = self._get_actual_outputroot(outputroot)
base, filename, ext = split_filename(actual_outputroot)
if not os.path.exists(base):
os.makedirs(base)
new_runtime = super(ProcStreamlines, self)._run_interface(runtime)
self.outputroot_files = glob.glob(os.path.join(os.getcwd(),actual_outputroot+'*'))
return new_runtime
else:
new_runtime = super(ProcStreamlines, self)._run_interface(runtime)
return new_runtime
def _get_actual_outputroot(self, outputroot):
actual_outputroot = os.path.join('procstream_outfiles', outputroot)
return actual_outputroot
def _list_outputs(self):
outputs = self.output_spec().get()
outputs['proc'] = os.path.abspath(self._gen_outfilename())
outputs['outputroot_files'] = self.outputroot_files
return outputs
def _gen_outfilename(self):
_, name , _ = split_filename(self.inputs.in_file)
return name + '_proc'
class TractShredderInputSpec(StdOutCommandLineInputSpec):
in_file = File(exists=True, argstr='< %s', mandatory=True, position=-2, desc='tract file')
offset = traits.Int(argstr='%d', units='NA',
desc='initial offset of offset tracts', position=1)
bunchsize = traits.Int(argstr='%d', units='NA',
desc='reads and outputs a group of bunchsize tracts', position=2)
space = traits.Int(argstr='%d', units='NA',
desc='skips space tracts', position=3)
class TractShredderOutputSpec(TraitedSpec):
shredded = File(exists=True, desc='Shredded tract file')
class TractShredder(StdOutCommandLine):
"""
Extracts bunches of streamlines.
tractshredder works in a similar way to shredder, but processes streamlines instead of scalar data.
The input is raw streamlines, in the format produced by track or procstreamlines.
The program first makes an initial offset of offset tracts. It then reads and outputs a group of
bunchsize tracts, skips space tracts, and repeats until there is no more input.
Examples
--------
>>> import nipype.interfaces.camino as cmon
>>> shred = cmon.TractShredder()
>>> shred.inputs.in_file = 'tract_data.Bfloat'
>>> shred.inputs.offset = 0
>>> shred.inputs.bunchsize = 1
>>> shred.inputs.space = 2
>>> shred.run() # doctest: +SKIP
"""
_cmd = 'tractshredder'
input_spec=TractShredderInputSpec
output_spec=TractShredderOutputSpec
def _list_outputs(self):
outputs = self.output_spec().get()
outputs['shredded'] = os.path.abspath(self._gen_outfilename())
return outputs
def _gen_outfilename(self):
_, name , _ = split_filename(self.inputs.in_file)
return name + "_shredded"
class DT2NIfTIInputSpec(CommandLineInputSpec):
in_file = File(exists=True, argstr='-inputfile %s', mandatory=True, position=1,
desc='tract file')
output_root = File(argstr='-outputroot %s', position=2, genfile=True,
desc='filename root prepended onto the names of three output files.')
header_file = File(exists=True, argstr='-header %s', mandatory=True, position=3,
desc=' A Nifti .nii or .hdr file containing the header information')
class DT2NIfTIOutputSpec(TraitedSpec):
dt = File(exists=True, desc='diffusion tensors in NIfTI format')
exitcode = File(exists=True, desc='exit codes from Camino reconstruction in NIfTI format')
lns0 = File(exists=True, desc='estimated lns0 from Camino reconstruction in NIfTI format')
class DT2NIfTI(CommandLine):
"""
Converts camino tensor data to NIfTI format
Reads Camino diffusion tensors, and converts them to NIFTI format as three .nii files.
"""
_cmd = 'dt2nii'
input_spec=DT2NIfTIInputSpec
output_spec=DT2NIfTIOutputSpec
def _list_outputs(self):
outputs = self.output_spec().get()
output_root = self._gen_outputroot()
outputs["dt"] = os.path.abspath(output_root + "dt.nii")
outputs["exitcode"] = os.path.abspath(output_root + "exitcode.nii")
outputs["lns0"] = os.path.abspath(output_root + "lns0.nii")
return outputs
def _gen_outfilename(self):
return self._gen_outputroot()
def _gen_outputroot(self):
output_root = self.inputs.output_root
if not isdefined(output_root):
output_root = self._gen_filename('output_root')
return output_root
def _gen_filename(self, name):
if name == 'output_root':
_, filename , _ = split_filename(self.inputs.in_file)
filename = filename + "_"
return filename
class NIfTIDT2CaminoInputSpec(StdOutCommandLineInputSpec):
in_file = File(exists=True, argstr='-inputfile %s', mandatory=True, position=1,
desc='A NIFTI-1 dataset containing diffusion tensors. The tensors are assumed to be '
'in lower-triangular order as specified by the NIFTI standard for the storage of '
'symmetric matrices. This file should be either a .nii or a .hdr file.')
s0_file = File(argstr='-s0 %s', exists=True,
desc='File containing the unweighted signal for each voxel, may be a raw binary '
'file (specify type with -inputdatatype) or a supported image file.')
lns0_file = File(argstr='-lns0 %s', exists=True,
desc='File containing the log of the unweighted signal for each voxel, may be a '
'raw binary file (specify type with -inputdatatype) or a supported image file.')
bgmask = File(argstr='-bgmask %s', exists=True,
desc='Binary valued brain / background segmentation, may be a raw binary file '
'(specify type with -maskdatatype) or a supported image file.')
scaleslope = traits.Float(argstr='-scaleslope %s',
desc='A value v in the diffusion tensor is scaled to v * s + i. This is '
'applied after any scaling specified by the input image. Default is 1.0.')
scaleinter = traits.Float(argstr='-scaleinter %s',
desc='A value v in the diffusion tensor is scaled to v * s + i. This is '
'applied after any scaling specified by the input image. Default is 0.0.')
uppertriangular = traits.Bool(argstr='-uppertriangular %s',
desc = 'Specifies input in upper-triangular (VTK style) order.')
class NIfTIDT2CaminoOutputSpec(TraitedSpec):
out_file = File(desc='diffusion tensors data in Camino format')
class NIfTIDT2Camino(CommandLine):
"""
Converts NIFTI-1 diffusion tensors to Camino format. The program reads the
NIFTI header but does not apply any spatial transformations to the data. The
NIFTI intensity scaling parameters are applied.
The output is the tensors in Camino voxel ordering: [exit, ln(S0), dxx, dxy,
dxz, dyy, dyz, dzz].
The exit code is set to 0 unless a background mask is supplied, in which case
the code is 0 in brain voxels and -1 in background voxels.
The value of ln(S0) in the output is taken from a file if one is supplied,
otherwise it is set to 0.
NOTE FOR FSL USERS - FSL's dtifit can output NIFTI tensors, but they are not
stored in the usual way (which is using NIFTI_INTENT_SYMMATRIX). FSL's
tensors follow the ITK / VTK "upper-triangular" convention, so you will need
to use the -uppertriangular option to convert these correctly.
"""
_cmd = 'niftidt2camino'
input_spec=NIfTIDT2CaminoInputSpec
output_spec=NIfTIDT2CaminoOutputSpec
def _list_outputs(self):
outputs = self.output_spec().get()
outputs["out_file"] = self._gen_filename('out_file')
return outputs
def _gen_filename(self, name):
if name == 'out_file':
_, filename , _ = split_filename(self.inputs.in_file)
return filename
class AnalyzeHeaderInputSpec(StdOutCommandLineInputSpec):
in_file = File(exists=True, argstr='< %s', mandatory=True, position=1,
desc='Tensor-fitted data filename')
scheme_file = File(exists=True, argstr='%s', position=2,
desc=('Camino scheme file (b values / vectors, '
'see camino.fsl2scheme)'))
readheader = File(exists=True, argstr='-readheader %s', position=3,
desc=('Reads header information from file and prints to '
'stdout. If this option is not specified, then the '
'program writes a header based on the other '
'arguments.'))
printimagedims = File(exists=True, argstr='-printimagedims %s', position=3,
desc=('Prints image data and voxel dimensions as '
'Camino arguments and exits.'))
# How do we implement both file and enum (for the program) in one argument?
# Is this option useful anyway?
#-printprogargs <file> <prog>
#Prints data dimension (and type, if relevant) arguments for a specific
# Camino program, where prog is one of shredder, scanner2voxel,
# vcthreshselect, pdview, track.
printprogargs = File(exists=True, argstr='-printprogargs %s', position=3,
desc=('Prints data dimension (and type, if relevant) '
'arguments for a specific Camino program, where '
'prog is one of shredder, scanner2voxel, '
'vcthreshselect, pdview, track.'))
printintelbyteorder = File(exists=True, argstr='-printintelbyteorder %s',
position=3,
desc=('Prints 1 if the header is little-endian, '
'0 otherwise.'))
printbigendian = File(exists=True, argstr='-printbigendian %s', position=3,
desc=('Prints 1 if the header is big-endian, 0 '
'otherwise.'))
initfromheader = File(exists=True, argstr='-initfromheader %s', position=3,
desc=('Reads header information from file and '
'intializes a new header with the values read '
'from the file. You may replace any '
'combination of fields in the new header by '
'specifying subsequent options.'))
data_dims = traits.List(traits.Int, desc = 'data dimensions in voxels',
argstr='-datadims %s', minlen=3, maxlen=3,
units='voxels')
voxel_dims = traits.List(traits.Float, desc = 'voxel dimensions in mm',
argstr='-voxeldims %s', minlen=3, maxlen=3,
units='mm')
centre = traits.List(traits.Int, argstr='-centre %s', minlen=3, maxlen=3,
units='mm',
desc=('Voxel specifying origin of Talairach '
'coordinate system for SPM, default [0 0 0].'))
picoseed = traits.List(traits.Int, argstr='-picoseed %s', minlen=3,
maxlen=3,
desc=('Voxel specifying the seed (for PICo maps), '
'default [0 0 0].'), units='mm')
nimages = traits.Int(argstr='-nimages %d', units='NA',
desc="Number of images in the img file. Default 1.")
datatype = traits.Enum('byte', 'char', '[u]short', '[u]int', 'float',
'complex', 'double', argstr='-datatype %s',
desc=('The char datatype is 8 bit (not the 16 bit '
'char of Java), as specified by the Analyze '
'7.5 standard. The byte, ushort and uint '
'types are not part of the Analyze '
'specification but are supported by SPM.'),
mandatory=True)
offset = traits.Int(argstr='-offset %d', units='NA',
desc=('According to the Analyze 7.5 standard, this is '
'the byte offset in the .img file at which '
'voxels start. This value can be negative to '
'specify that the absolute value is applied for '
'every image in the file.'))
greylevels = traits.List(traits.Int, argstr='-gl %s', minlen=2, maxlen=2,
desc=('Minimum and maximum greylevels. Stored as '
'shorts in the header.'), units='NA')
scaleslope = traits.Float(argstr='-scaleslope %d', units='NA',
desc=('Intensities in the image are scaled by '
'this factor by SPM and MRICro. Default is '
'1.0.'))
scaleinter = traits.Float(argstr='-scaleinter %d', units='NA',
desc=('Constant to add to the image intensities. '
'Used by SPM and MRIcro.'))
description = traits.String(argstr='-description %s',
desc=('Short description - No spaces, max '
'length 79 bytes. Will be null '
'terminated automatically.'))
intelbyteorder = traits.Bool(argstr='-intelbyteorder',
desc=("Write header in intel byte order "
"(little-endian)."))
networkbyteorder = traits.Bool(argstr='-networkbyteorder',
desc=("Write header in network byte order "
"(big-endian). This is the default "
"for new headers."))
class AnalyzeHeaderOutputSpec(TraitedSpec):
header = File(exists=True, desc='Analyze header')
class AnalyzeHeader(StdOutCommandLine):
"""
Create or read an Analyze 7.5 header file.
Analyze image header, provides support for the most common header fields.
Some fields, such as patient_id, are not currently supported. The program allows
three nonstandard options: the field image_dimension.funused1 is the image scale.
The intensity of each pixel in the associated .img file is (image value from file) * scale.
Also, the origin of the Talairach coordinates (midline of the anterior commisure) are encoded
in the field data_history.originator. These changes are included for compatibility with SPM.
All headers written with this program are big endian by default.
Example
-------
>>> import nipype.interfaces.camino as cmon
>>> hdr = cmon.AnalyzeHeader()
>>> hdr.inputs.in_file = 'tensor_fitted_data.Bdouble'
>>> hdr.inputs.scheme_file = 'A.scheme'
>>> hdr.inputs.data_dims = [256,256,256]
>>> hdr.inputs.voxel_dims = [1,1,1]
>>> hdr.run() # doctest: +SKIP
"""
_cmd = 'analyzeheader'
input_spec=AnalyzeHeaderInputSpec
output_spec=AnalyzeHeaderOutputSpec
def _list_outputs(self):
outputs = self.output_spec().get()
outputs['header'] = os.path.abspath(self._gen_outfilename())
return outputs
def _gen_outfilename(self):
_, name , _ = split_filename(self.inputs.in_file)
return name + ".hdr"
class ShredderInputSpec(StdOutCommandLineInputSpec):
in_file = File(exists=True, argstr='< %s', mandatory=True, position=-2, desc='raw binary data file')
offset = traits.Int(argstr='%d', units='NA',
desc='initial offset of offset bytes', position=1)
chunksize = traits.Int(argstr='%d', units='NA',
desc='reads and outputs a chunk of chunksize bytes', position=2)
space = traits.Int(argstr='%d', units='NA',
desc='skips space bytes', position=3)
class ShredderOutputSpec(TraitedSpec):
shredded = File(exists=True, desc='Shredded binary data file')
class Shredder(StdOutCommandLine):
"""
Extracts periodic chunks from a data stream.
Shredder makes an initial offset of offset bytes. It then reads and outputs
chunksize bytes, skips space bytes, and repeats until there is no more input.
If the chunksize is negative, chunks of size |chunksize| are read and the
byte ordering of each chunk is reversed. The whole chunk will be reversed, so
the chunk must be the same size as the data type, otherwise the order of the
values in the chunk, as well as their endianness, will be reversed.
Examples
--------
>>> import nipype.interfaces.camino as cam
>>> shred = cam.Shredder()
>>> shred.inputs.in_file = 'SubjectA.Bfloat'
>>> shred.inputs.offset = 0
>>> shred.inputs.chunksize = 1
>>> shred.inputs.space = 2
>>> shred.run() # doctest: +SKIP
"""
_cmd = 'shredder'
input_spec=ShredderInputSpec
output_spec=ShredderOutputSpec
def _list_outputs(self):
outputs = self.output_spec().get()
outputs['shredded_file'] = os.path.abspath(self._gen_outfilename())
return outputs
def _gen_outfilename(self):
_, name , _ = split_filename(self.inputs.in_file)
return name + "_shredded"
|
dmordom/nipype
|
nipype/interfaces/camino/convert.py
|
Python
|
bsd-3-clause
| 31,722
|
[
"VTK"
] |
2789997682af30eaf341108d68dead7a9723877fed10fd21c6881f059e00faa0
|
"""
CGI Script handling github webhooks
this script mainly takes care of in- and output
and hands off most of the actual work to the build module
"""
import sys
import json
import logging
import os
import io
from . import github, build, config
import traceback
from lesson_builder import misc
from .misc import Maybe
__author__ = 'Justus Adam'
__version__ = '0.1'
# has to be the directory of this programs git repo root
_app_repo_root = APP_DIRECTORY = config.BASE_DIRECTORY
THIS_REPO_NAME = 'fsr/lesson-builder'
LOGGER = logging.getLogger(__name__)
_default_data_directory = '.data'
def relative(*args, to=APP_DIRECTORY):
"""
path relative to the APP_DIRECTORY or any other
convenience wrapper around os.path.join
:param args:
:param to:
:return:
"""
return os.path.abspath(os.path.join(to, *args))
def force_cache(func):
cached = Maybe()
def wrapper(*args, **kwargs):
if cached.none():
cached.set(func(*args, **kwargs))
return cached.get()
return wrapper
REPOS_DIRECTORY = relative('repos')
WATCH_CONF_NAME = 'watch_conf.json'
SKIP_STRINGS = {'[skip build]', '[build skip]'}
config.DEBUG = False
def apply(function, iterable):
"""
Apply function to all elements of iterable
:param function:
:param iterable:
:return:
"""
for i in iterable:
function(i)
@force_cache
def get_watch_conf():
"""
Open, read and parse the watch config
:return: python dict
"""
conf_path = relative(WATCH_CONF_NAME)
with open(conf_path) as f:
return json.load(f)
def write_watch_conf(data):
"""
Write python dicts/lists to the watch config file
:param data: data to write
:return: None
"""
conf_path = relative(WATCH_CONF_NAME)
with open(conf_path, mode='w') as f:
json.dump(data, f, indent=4)
def is_known(name, watch_conf=None):
"""
Check whether a repository name is in the watch conf.
you can provide the watch conf yourself if you have
read it already to avoid the IO of loading it in this function
:param name:
:param watch_conf:
:return:
"""
if watch_conf is None:
watch_conf = get_watch_conf()
known = watch_conf.get('watched', ())
mapped = {
a['name'] for a in known
}
return name in mapped
def handle_push(event, raw_data):
"""
Handle the payload received and return a somewhat useful response
:param event: github.Event instance
:param raw_data: raw bytes of the message
:return:
"""
payload = event.payload
repo = event.repo
if repo.name in special_actions:
return special_actions[repo.name](repo)
known = get_watch_conf().get('watched', ())
mapped = {
a['name']: a for a in known
}
if repo.name not in mapped:
LOGGER.error(
'Repository {} not on watchlist'.format(repo.name)
)
return "Repository not on watchlist"
for skip_string in SKIP_STRINGS:
if skip_string in payload['head_commit']['message']:
LOGGER.info(
'Skipping build {} [commit_message]'.format(
repo.name
)
)
return "Commit message demands skip"
if not github.verify(
mapped[repo.name],
raw_data,
get_header_soft(SIGNATURE),
os.environ['HTTP_USER_AGENT']
):
return "Unknown requester"
LOGGER.info(
'Started build for {}'.format(repo.name)
)
repo_path = relative(mapped[repo.name]['directory'], to=REPOS_DIRECTORY)
if not os.path.exists(repo_path):
os.makedirs(repo_path)
code = try_clone(repo, repo_path)
else:
code = try_pull(repo, repo_path)
if code != 0:
LOGGER.error(
'Clone for repository {} in directory {} failed with {}'.format(
repo.name, repo_path, code
)
)
return "Git operations failed"
else:
LOGGER.info(build.build_and_report(repo_path))
return "Build finished"
def update(r):
"""
Update this software using git
:param r:
:return:
"""
p = r.apull(_app_repo_root)
code = p.wait()
if code != 0:
LOGGER.critical(
'Update failed with code {}'.format(code)
)
LOGGER.debug(
misc.error_capture_format(
('stderr', p.stderr.read().decode())
)
)
return 'Update failed'
else:
LOGGER.info(
'Update successful'
)
return "Update successful"
def try_clone(repo, path):
"""
Clone a repository and wait for it to finish
:param repo: repository
:param path:
:return: returncode
"""
return repo.aclone(path).wait()
def try_pull(repo, path):
"""
Pull the repository and clone it if it fails
:param repo:
:param path:
:return: returncode
"""
code = repo.apull(path).wait()
if code != 0:
code = try_clone(repo, path)
return code
def handle_ping(event):
hook_id = event.payload['hook_id']
repo_name = event.repo.name
watch_conf = get_watch_conf()
directory = watch_conf.get('data_directory', _default_data_directory)
file_path = relative(directory, 'hook_{}.conf.json'.format(hook_id))
if os.path.exists(file_path):
mode = 'w'
else:
mode = 'w+'
with open(file_path, mode=mode) as file:
json.dump(event.payload['hook'], fp=file, indent=4)
LOGGER.info(
'Received ping event:'
)
LOGGER.debug(
'repository: {}\n'
'hook_id: {}\n'
'data saved in {}\n'
'watched: {}'.format(
repo_name, hook_id, file_path,
is_known(repo_name, watch_conf) or repo_name in special_actions
)
)
return 'Ping Received'
def do(payload):
"""
Do what needs to be done
parse and handle the payload, print the results
:param payload:
:return: None
"""
payload = json.loads(payload)
event = github.Event(
type=get_header(EVENT_TYPE),
repo=github.GitRepository.from_json(payload['repository']),
payload=payload
)
if event.type == github.PUSH:
return handle_push(event, payload)
elif event.type == github.PING:
return handle_ping(event)
else:
LOGGER.error(
'Unknown event {}\n'.format(event.type)
)
LOGGER.debug(
misc.error_capture_format(
('payload', event.payload)
)
)
ok_html_headers = 'Content-Type: text/html; charset=utf-8'
ok_handled_header = 'Content-Type: text/plain; charset=utf-8'
ok_format_string = """
<html>
<head>
<style type="text/css">
body {{
font-family: monaco, sans-serif;
}}
#box {{
margin: 20px auto;
background-color: rgb(0, 15, 169);
color: white;
width: 600px;
height: auto;
padding: 20px 40px;
}}
#box a {{
color:orange;
}}
</style>
{head}
</head>
<body>
{body}
</body>
</html>
"""
hello = """
<div id="box">
<h1>I am the webhook receiver</h1>
<p>This is a hello message for attempts at reaching me with a get request.</p>
<p>My purpose is to be an endpoint to some github webhooks
for an automated TeX builder.</p>
<p>If you'd like to learn more about the project visit me on
<a href="https://github.com/{}">Github</a>.</p>
</div>
""".format(THIS_REPO_NAME)
def ok(head='', body=''):
return ok_format_string.format(
head=head,
body=body
)
CONTENT_TYPE = 'ct'
EVENT_TYPE = 'event'
SIGNATURE = 'signature'
aliases = {
EVENT_TYPE: ('X-GitHub-Event', 'X_GITHUB_EVENT', 'HTTP_X_GITHUB_EVENT'),
CONTENT_TYPE: ('Content-Type', 'content-type', 'CONTENT_TYPE'),
SIGNATURE: ('X-Hub-Signature', 'HTTP_X_HUB_SIGNATURE')
}
special_actions = {
THIS_REPO_NAME: update
}
def get_header(name):
header_aliases = aliases[name]
for alias in header_aliases:
if alias in os.environ:
return os.environ[alias]
else:
raise KeyError(
'For key {}\n{}'.format(
name,
misc.error_capture_format(
('environ', os.environ)
)
)
)
def get_header_soft(name, default=None):
header_aliases = aliases[name]
for alias in header_aliases:
if alias in os.environ:
return os.environ[alias]
else:
return default
def handle_request():
"""Main function"""
# we return ok in any case
# we don't necessarily want github to know of our problems
is_hook = False
try:
# _, ce = cgi.parse_header(get_header(CONTENT_TYPE))
payload = io.TextIOWrapper(sys.stdin.buffer, encoding='utf-8').read()
if not payload:
message = ok(body=hello)
is_hook = False
else:
message = do(payload)
is_hook = True
except Exception as e:
# we catch any exception and log them before it might accidentally get reported
LOGGER.critical(
'Caught build exception {}'.format(
repr(e)
)
)
LOGGER.debug(
misc.error_capture_format(
('traceback', '\n'.join(traceback.format_tb(sys.exc_info()[2])))
)
)
message = 'Exception occurred, build failed'
print(ok_handled_header if is_hook else ok_html_headers)
print()
print(message)
|
fsr/lesson-builder
|
lesson_builder/webhook_receiver_proxy.py
|
Python
|
mit
| 9,568
|
[
"VisIt"
] |
b160411407d4bb3cfebfd8422426b59efd0aadd064b21c5c9386e937a5dfabad
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Checks that RamanFiber propagates properly the spectral information. In this way, also the RamanSolver and the NliSolver
are tested.
"""
from pathlib import Path
from pandas import read_csv
from numpy.testing import assert_allclose
import pytest
from gnpy.core.info import create_input_spectral_information
from gnpy.core.elements import RamanFiber
from gnpy.core.parameters import SimParams
from gnpy.tools.json_io import load_json
TEST_DIR = Path(__file__).parent
@pytest.mark.usefixtures('set_sim_params')
def test_raman_fiber():
""" Test the accuracy of propagating the RamanFiber."""
# spectral information generation
power = 1e-3
eqpt_params = load_json(TEST_DIR / 'data' / 'eqpt_config.json')
spectral_info_params = eqpt_params['SI'][0]
spectral_info_params.pop('power_dbm')
spectral_info_params.pop('power_range_db')
spectral_info_params.pop('tx_osnr')
spectral_info_params.pop('sys_margins')
spectral_info_input = create_input_spectral_information(power=power, **spectral_info_params)
SimParams.set_params(load_json(TEST_DIR / 'data' / 'sim_params.json'))
fiber = RamanFiber(**load_json(TEST_DIR / 'data' / 'raman_fiber_config.json'))
# propagation
spectral_info_out = fiber(spectral_info_input)
p_signal = [carrier.power.signal for carrier in spectral_info_out.carriers]
p_ase = [carrier.power.ase for carrier in spectral_info_out.carriers]
p_nli = [carrier.power.nli for carrier in spectral_info_out.carriers]
expected_results = read_csv(TEST_DIR / 'data' / 'test_science_utils_expected_results.csv')
assert_allclose(p_signal, expected_results['signal'], rtol=1e-3)
assert_allclose(p_ase, expected_results['ase'], rtol=1e-3)
assert_allclose(p_nli, expected_results['nli'], rtol=1e-3)
|
Telecominfraproject/gnpy
|
tests/test_science_utils.py
|
Python
|
bsd-3-clause
| 1,839
|
[
"ASE"
] |
5e3c42979714d19fcdb02854049ae839454ab10252200fc2ca20bef44b7cfac6
|
##
# Copyright 2013 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://vscentrum.be/nl/en),
# the Hercules foundation (http://www.herculesstichting.be/in_English)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# http://github.com/hpcugent/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
EasyBuild support for building and installing Bowtie, implemented as an easyblock
@author: Cedric Laczny (Uni.Lu)
@author: Fotis Georgatos (Uni.Lu)
@author: Kenneth Hoste (Ghent University)
@author: Jens Timmerman (Ghent University)
"""
import os
import shutil
from easybuild.easyblocks.generic.configuremake import ConfigureMake
class EB_Bowtie(ConfigureMake):
"""
Support for building bowtie (ifast and sensitive read alignment)
"""
def configure_step(self):
"""
Set compilers in buildopts, there is no configure script.
"""
self.cfg.update('buildopts', 'CC="%s" CPP="%s"' % (os.getenv('CC'), os.getenv('CXX')))
def install_step(self):
"""
Install by copying files to install dir
"""
srcdir = self.cfg['start_dir']
destdir = os.path.join(self.installdir, 'bin')
srcfile = None
try:
os.makedirs(destdir)
for filename in ['bowtie-build', 'bowtie', 'bowtie-inspect']:
srcfile = os.path.join(srcdir, filename)
shutil.copy2(srcfile, destdir)
except (IOError, OSError), err:
self.log.error("Copying %s to installation dir %s failed: %s" % (srcfile, destdir, err))
def sanity_check_step(self):
"""Custom sanity check for Bowtie."""
custom_paths = {
'files': ['bin/bowtie', 'bin/bowtie-build', 'bin/bowtie-inspect'],
'dirs': []
}
super(EB_Bowtie, self).sanity_check_step(custom_paths=custom_paths)
|
geimer/easybuild-easyblocks
|
easybuild/easyblocks/b/bowtie.py
|
Python
|
gpl-2.0
| 2,601
|
[
"Bowtie"
] |
fb2ef3198d6c210721db3eb76793e6e2d9a108b94741a664a7bfdc3821827c24
|
########################################################################
#
# B a r a K u d a
#
# Ploting functions and utilities
#
## Authors:
# --------
# 2010-2015: Laurent Brodeau (original primitive code)
# 2016: Saeed Falahat (update to fancy grown-up coding!) :D
#
#######################################################################
import os
import sys
import numpy as nmp
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.colors as colors
# Some defaults:
#WDTH_DEF = 10.
#HGHT_DEF = 4.
WDTH_DEF = 9.
HGHT_DEF = 3.6
FIG_SIZE_DEF = ( WDTH_DEF , HGHT_DEF )
RAT_XY = WDTH_DEF/10.
DPI_DEF = 120
AXES_DEF = [0.09, 0.082, 0.89, 0.84]
# Colors for line: (https://www.daftlogic.com/projects-hex-colour-tester.htm)
b_blu = '#2C558A'
b_red = '#AD0000'
b_gre = '#3A783E' ; # b_gre = '#42BD82'
b_prp = '#8C008C'
b_org = '#ED7C4C'
v_dflt_colors = [b_blu, b_red, b_gre, b_org, b_prp, 'pink', '0.5', 'b', 'g', 'brown', 'orange',
'0.25','0.75','k' ]
nmax_colors = len(v_dflt_colors)
# Some projections to use with BaseMap:
#
# zone PROJ llcrnrlon llcrnrlat urcrnrlon urcrnrlat lat1 lon0 mer/par continent-res
# (lcc = Lambert conformal conic)
projection_def = [
['nseas', 'lcc', -55., 40., 55., 75., 60., -20., 10., 'l' ], # Nordic seas
['natarct', 'lcc', -60., 40., 80., 72., 55., -32., 10., 'l' ], # NATL + Arctic
['labir', 'lcc', -62., 48., -10., 75., 50., -30., 5., 'l' ],
['labsp', 'lcc', -60., 48., 50., 75.5, 50., -30., 10., 'l' ],
['npol', 'stere', -75., 45., 100., 60., 80., -30., 10., 'l' ],
['npol2', 'stere', -55., 40., 145., 40., 80., -5., 10., 'l' ], # North Pole
['spstere', 'stere', 0., 0., 0., 0., -48., 90., 10., 'l' ], # South Pole Default matplotlib!
['matl' , 'cyl', -82.,-21., 12., 79., 30., -30., 15., 'l' ], # Nordic seas
['atmed', 'lcc', -18., 33., -2., 42., 30., -10., 5., 'h' ],
['kav7' , 'kav', 0., 0., 0., 0., 0., 0., 0., 'l' ] ] # global map-monde
class plot :
''' This class encapsulates all the plot routines
In order to use it you need to type as follows:
plot(function name without the prefix __) (all the arguments of the function)
for example for __vert_section we do as follows:
plot("vert_section")(VX, VZ, XF, XMSK, rmin, rmax, dc, lkcont=True, cpal='jet',
xmin=-80., xmax=85., dx=5, cfignm='fig', cbunit='', cxunit=' ',
zmin = 0., zmax = 5000., l_zlog=False, cfig_type='png',
czunit=' ', ctitle=' ', lforce_lim=False, i_cb_subsamp=1, l_z_increase=False )
The reason to prefix all the function with double underscore __ is that all these function become private members of
the plot class and they can not be accessed directly outside of the class. That is, you need to call these functios through the call wrapper
as seen below in the function __call__.
'''
def __init__(self,splot) :
self.splot = splot
def __call__(self,*args, **kw) :
if "_"+self.__class__.__name__+ "__" + self.splot in self.__class__.__dict__.keys() :
self.__class__.__dict__["_"+self.__class__.__name__+ "__" + self.splot](self,*args, **kw)
else :
print "function " + "__" + self.splot + " does not exist"
sys.exit()
# Functions:
def __vert_section(self, VX, VZ, XF, XMSK, rmin, rmax, dc, lkcont=False, cpal='jet', lzonal=True,
xmin=-80., xmax=85., dx=5, cfignm='fig', cbunit='', cxunit=' ',
zmin = 0., zmax = 5000., l_zlog=False, cfig_type='png',
czunit=' ', ctitle=' ', lforce_lim=False, i_cb_subsamp=1, l_z_increase=False ):
import barakuda_colmap as bcm
font_ttl, font_xylb, font_clb = __font_unity__(fig_dpi=DPI_DEF)
zVZ = __prepare_z_log_axis__(l_zlog, VZ)
if lforce_lim: __force_min_and_max__(rmin, rmax, XF)
i_x_sbsmp = 1
dxgap = nmp.amax(VX) - nmp.amin(VX)
if dxgap > 140.: dx = 5.; i_x_sbsmp = 2
if dxgap < 50.: dx = 2.
if dxgap < 25.: dx = 1.
cbgcol='w'
if not lkcont:
cbgcol='k'
XF = nmp.ma.masked_where(XMSK == 0, XF) ; # Masking where mask is zero!
fig = plt.figure(num = 1, figsize=(WDTH_DEF , RAT_XY*5.), dpi=None, facecolor='w', edgecolor='k')
ax = plt.axes([0.07, 0.06, 0.98, 0.88], axisbg=cbgcol)
vc = __vcontour__(rmin, rmax, dc)
# Colormap:
colmap = bcm.chose_colmap(cpal)
pal_norm = colors.Normalize(vmin = rmin, vmax = rmax, clip = False)
if lkcont:
from barakuda_tool import drown
Xtmp = nmp.zeros(nmp.shape(XF))
Xtmp[:,:] = XF[:,:]
drown(Xtmp, XMSK, k_ew=2, nb_max_inc=5, nb_smooth=5)
cf = plt.contourf(VX, zVZ, Xtmp, vc, cmap=colmap, norm=pal_norm, zorder=0.1)
plt.contour( VX, zVZ, Xtmp, vc, colors='k', linewidths=0.2, zorder=0.5)
del Xtmp
else:
cf = plt.pcolormesh(VX, zVZ, XF, cmap=colmap, norm=pal_norm)
# Colorbar:
__nice_colorbar__(cf, plt, vc, i_sbsmp=i_cb_subsamp, lkc=lkcont, cunit=cbunit, cfont=font_clb, fontsize=10)
# Masking "rock":
if lkcont:
pal_lsm = bcm.chose_colmap('mask')
norm_lsm = colors.Normalize(vmin = 0., vmax = 1., clip = False)
prock = nmp.ma.masked_where(XMSK > 0.5, XMSK)
cm = plt.pcolormesh(VX, zVZ, prock, cmap=pal_lsm, norm=norm_lsm, zorder=1)
# X-axis:
if lzonal:
__nice_longitude_axis__(ax, plt, xmin, xmax, dx*i_x_sbsmp, axt='x')
else:
__nice_latitude_axis__(ax, plt, xmin, xmax, dx*i_x_sbsmp, axt='x')
# Depth-axis:
__nice_depth_axis__(ax, plt, zmin, zmax, l_log=l_zlog, l_z_inc=l_z_increase, cunit=czunit, cfont=font_xylb)
plt.title(ctitle, **font_ttl)
plt.savefig(cfignm+'.'+cfig_type, dpi=DPI_DEF, orientation='portrait', transparent=False) ; #vert_section
print ' => '+cfignm+'.'+cfig_type+' created!'
plt.close(1)
return
def __2d(self,VX, VY, XF, XMSK, rmin, rmax, dc, corca='ORCA1', lkcont=True, cpal='jet',
cfignm='fig', cbunit='', ctitle=' ', lforce_lim=False, i_cb_subsamp=1, cb_orient='vertical',
cfig_type='pdf', lat_min=-75., lat_max=75., lpix=False, vcont_spec = []):
#
# Plot nicely a field given on ORCA coordinates on 2D world map without using any projection
#
# if VX = [0] and VY = [0] => ignoring lon and lat...
import barakuda_tool as bt
import barakuda_orca as bo
import barakuda_colmap as bcm
font_ttl, font_xylb, font_clb = __font_unity__(fig_dpi=DPI_DEF)
i_lat_lon = 1
if len(VX) == 1 or len(VY) == 1: i_lat_lon = 0 ; # no long. and lat. provided !
if corca[:5] == 'eORCA':
# don't know how to lat-lon 2d plot eORCA
# so just plot without projections
i_lat_lon = 0
# First drowning the field:
if not lpix:
# Don't want to modify XF array, working with XFtmp:
[ny, nx] = nmp.shape(XF)
XFtmp = nmp.zeros((ny,nx))
XFtmp[:,:] = XF[:,:]
bt.drown(XFtmp, XMSK, k_ew=2, nb_max_inc=20, nb_smooth=10)
else:
XFtmp = XF
ilon_ext = 32
if lforce_lim: __force_min_and_max__(rmin, rmax, XFtmp)
XMSK0 = bo.lon_reorg_orca(XMSK, VX, ilon_ext=ilon_ext)
XF0 = bo.lon_reorg_orca(XFtmp, VX, ilon_ext=ilon_ext)
if i_lat_lon == 1:
[ny, nx] = nmp.shape(XF0)
dlong = abs(VX[11] - VX[10])
VX0 = nmp.arange(0.,nx)
VX0 = VX0*dlong + dlong/2.
if i_lat_lon == 1:
vert_rat = (lat_max - lat_min)/(75. + 75.)
fig_size = (WDTH_DEF , RAT_XY*4.76*vert_rat) ; #lolo 4.76 => 1080x520 when on 77S->75N
else:
fig_size = (WDTH_DEF , RAT_XY*float(nx)/float(ny)*5.)
# FIGURE
# ~~~~~~
fig = plt.figure(num = 1, figsize=fig_size, dpi=None, facecolor='w', edgecolor='k')
ax = plt.axes([0.05, 0.06, 1., 0.86], axisbg = '0.5')
vc = __vcontour__(rmin, rmax, dc)
# Colmap:
colmap = bcm.chose_colmap(cpal)
pal_norm = colors.Normalize(vmin = rmin, vmax = rmax, clip = False)
if lpix:
# Pixelized plot:
XF0 = nmp.ma.masked_where(XMSK0 == 0, XF0)
if i_lat_lon == 1:
cf = plt.pcolormesh(VX0, VY, XF0, cmap = colmap, norm = pal_norm)
else:
cf = plt.imshow( XF0, cmap = colmap, norm = pal_norm)
else:
# Contour fill plot:
if i_lat_lon == 1:
cf = plt.contourf(VX0, VY, XF0, vc, cmap = colmap, norm = pal_norm)
else:
cf = plt.contourf( XF0, vc, cmap = colmap, norm = pal_norm)
for c in cf.collections: c.set_zorder(0.15)
if lkcont:
if i_lat_lon == 1:
cfk = plt.contour(VX0, VY, XF0, vc, colors='k', linewidths = 0.2)
else:
cfk = plt.contour( XF0, vc, colors='k', linewidths = 0.2)
for c in cfk.collections: c.set_zorder(0.25)
# contour for specific values on the ploted field:
if len(vcont_spec) >= 1:
if i_lat_lon == 1:
cfs = plt.contour(VX0, VY, XF0, vcont_spec, colors='black', linewidths = 1.5)
else:
cfs = plt.contour( XF0, vcont_spec, colors='black', linewidths = 1.5)
plt.clabel(cfs, inline=1, fmt='%4.1f', fontsize=10)
for c in cfs.collections: c.set_zorder(0.35)
if not lpix:
# Putting land-sea mask on top of current plot, cleaner than initial masking...
# because won't influence contours since they are done
# field needs to be DROWNED prior to this though!!!
idx_land = nmp.where(XMSK0[:,:] < 0.5)
XF0 = nmp.ma.masked_where(XMSK0[:,:] > 0.5, XF0)
XF0[idx_land] = 1000.
if i_lat_lon == 1:
cf0 = plt.pcolormesh(VX0, VY, XF0, cmap=bcm.chose_colmap("mask"))
else:
cf0 = plt.imshow( XF0, cmap=bcm.chose_colmap("mask"))
# Colorbar:
ifsize = 14.*100./float(DPI_DEF)
if i_lat_lon == 1: ifsize = int(ifsize*vert_rat); ifsize=max(ifsize,6)
__nice_colorbar__(cf, plt, vc, i_sbsmp=i_cb_subsamp, lkc=lkcont,
cb_or=cb_orient, cunit=cbunit, cfont=font_clb, fontsize=ifsize)
# X and Y nice ticks:
if i_lat_lon == 1:
[vvx, vvy, clon, clat] = __name_coor_ticks__(lon_ext=ilon_ext);
plt.yticks(vvy,clat) ; plt.xticks(vvx,clon)
plt.axis([ 0., 360.+ilon_ext-2., lat_min, lat_max])
else:
#ax.set_xlim(0., 360.+ilon_ext-2.)
plt.axis([ 0., float(nx)+ilon_ext-2., 0, float(ny)])
plt.title(ctitle, **font_ttl)
plt.savefig(cfignm+'.'+cfig_type, dpi=DPI_DEF, orientation='portrait', transparent=False) ; #2d
print ' => '+cfignm+'.'+cfig_type+' created!'
plt.close(1)
del XFtmp, XF0
return
def __2d_reg(self,VX, VY, XF, XMSK, rmin, rmax, dc, lkcont=False, cpal='jet',
cfignm='fig', cfig_type='pdf', cbunit=' ', ctitle='',
cb_orient='vertical', lat_min=-77., lat_max=77., i_cb_subsamp=1,
lpix=False, l_continent_pixel=True, colorbar_fs=14,
col_min='k', col_max='k', vcont_spec = []):
import barakuda_tool as bt
import barakuda_colmap as bcm
font_ttl, font_xylb, font_clb = __font_unity__()
# Don't want to modify XF array, working with XFtmp:
[ny, nx] = nmp.shape(XF)
XFtmp = nmp.zeros(ny*nx) ; XFtmp.shape = [ny, nx]
XFtmp[:,:] = XF[:,:]
# First drowning the field:
bt.drown(XFtmp, XMSK, k_ew=0, nb_max_inc=20, nb_smooth=10)
iskp = 28 ; iext = 32
# Extending / longitude:
VXe = bt.extend_domain(VX, iext, skp_west_deg=iskp) ; nxe = len(VXe)
XFe = bt.extend_domain(XFtmp, iext, skp_west_deg=iskp)
XMSKe = bt.extend_domain(XMSK, iext, skp_west_deg=iskp)
# FIGURE
rat_vert = 1. / ( ( 77. + 77. ) / ( lat_max - lat_min ) )
if cb_orient == 'horizontal':
# Horizontal colorbar!
if ctitle == '':
fig = plt.figure(num = 1, figsize=(12.4,7.*rat_vert), dpi=None, facecolor='w', edgecolor='k')
ax = plt.axes([0.05, -0.01, 0.93, 1.], axisbg = 'white')
else:
fig = plt.figure(num = 1, figsize=(12.4,7.4*rat_vert), dpi=None, facecolor='w', edgecolor='k')
ax = plt.axes([0.05, -0.01, 0.93, 0.96], axisbg = 'white')
else:
# Vertical colorbar!
fig = plt.figure(num = 1, figsize=(12.4,6.*rat_vert), dpi=None, facecolor='w', edgecolor='k')
ax = plt.axes([0.046, 0.06, 1.02, 0.88], axisbg = 'white')
vc = __vcontour__(rmin, rmax, dc)
# Colmap:
pal_norm = colors.Normalize(vmin = rmin, vmax = rmax, clip = False)
mpl.rcParams['contour.negative_linestyle'] = 'solid'
plt.contour.negative_linestyle='solid'
if lpix:
cf = plt.pcolormesh(VXe, VY, XFe, cmap = cpal, norm = pal_norm)
else:
cf = plt.contourf(VXe, VY, XFe, vc, cmap = cpal, norm = pal_norm, extend="both")
for c in cf.collections: c.set_zorder(0.15)
cf.cmap.set_under(col_min)
cf.cmap.set_over(col_max)
# contour for specific values on the ploted field:
if len(vcont_spec) >= 1:
cfs = plt.contour(VXe, VY, XFe, vcont_spec, colors='w', linewidths = 1.)
#plt.clabel(cfs, inline=1, fmt='%4.1f', fontsize=12)
if lkcont:
cfk = plt.contour(VXe, VY, XFe, vc, colors='k', linewidths = 0.2)
for c in cfk.collections: c.set_zorder(0.25)
# Putting land-sea mask on top of current plot, cleaner than initial masking...
# because won't influence contours since they are done
# field needs to be DROWNED prior to this though!!!
if l_continent_pixel:
idx_land = nmp.where(XMSKe[:,:] < 0.5)
XFe = nmp.ma.masked_where(XMSKe[:,:] > 0.5, XFe)
XFe[idx_land] = 1000.
cf0 = plt.pcolor(VXe, VY, XFe, cmap = bcm.chose_colmap("mask"))
else:
# Masking with contour rather than pixel:
cf0 = plt.contourf(VXe, VY, XMSKe, [ 0., 0.1 ], cmap = bcm.chose_colmap("mask"))
for c in cf0.collections: c.set_zorder(5)
plt.contour(VXe, VY, XMSKe, [ 0.25 ], colors='k', linewidths = 1.)
# COLOR BAR:
__nice_colorbar__(cf, plt, vc, i_sbsmp=i_cb_subsamp, lkc=lkcont, cb_or=cb_orient, cunit=cbunit, cfont=font_clb, fontsize=colorbar_fs)
# X and Y nice ticks:
print "VXe[0], VXe[nxe-1] =>", VXe[0], VXe[nxe-1]
rlon_min = round(VXe[0],0) ; rlon_max = round(VXe[nxe-1],0)
print "rlon_min, rlon_max =>", rlon_min, rlon_max
[vvx, vvy, clon, clat] = __name_coor_ticks__(lon_min=rlon_min, lon_max=rlon_max, dlon=30., lon_ext=iext-iskp)
plt.yticks(vvy,clat) ; plt.xticks(vvx,clon)
plt.axis([ rlon_min, rlon_max, lat_min, lat_max])
if ctitle != ' ': plt.title(ctitle, **font_ttl)
plt.savefig(cfignm+'.'+cfig_type, dpi=110, orientation='portrait', transparent=False)
print ' => '+cfignm+'.'+cfig_type+' created!'
plt.close(1)
return
def __2d_box(self,XF, XMSK, rmin, rmax, dc, lkcont=True,
cpal='jet', cfignm='fig', cbunit='', ctitle=' ', lforce_lim=False,
i_cb_subsamp=1, cfig_type='pdf', lcontours=True,
x_offset=0., y_offset=0., vcont_spec = [], lcont_mask=False):
import barakuda_colmap as bcm
if lforce_lim: __force_min_and_max__(rmin, rmax, XF)
[ ny , nx ] = XF.shape
vert_rat = float(ny)/float(nx)
print "Vert. ratio, nx, ny =", vert_rat, nx, ny
# Masking field:
if lcontours:
idxm = nmp.where(XMSK[:,:] == 0); XF[idxm] = -9999.9 # c'est NaN qui merde!!!
else:
XF = nmp.ma.masked_where(XMSK == 0, XF)
font_ttl, font_xylb, font_clb = __font_unity__()
# FIGURE
# ~~~~~~
fig = plt.figure(num = 1, figsize=(7.,6.*vert_rat), dpi=None, facecolor='w', edgecolor='k')
ax = plt.axes([0.07, 0.05, 0.9, 0.9], axisbg = 'gray')
vc = __vcontour__(rmin, rmax, dc); #print vc, '\n'
# Colmap:
colmap = bcm.chose_colmap(cpal)
pal_norm = colors.Normalize(vmin = rmin, vmax = rmax, clip = False)
if lcontours:
cf = plt.contourf(XF, vc, cmap = colmap, norm = pal_norm)
for c in cf.collections: c.set_zorder(0.5)
else:
cf = plt.pcolor(XF, cmap = colmap, norm = pal_norm)
# contour for specific values on the ploted field:
if len(vcont_spec) >= 1:
cfs = plt.contour(XF, vcont_spec, colors='white', linewidths = 1.)
plt.clabel(cfs, inline=1, fmt='%4.1f', fontsize=10)
if lkcont:
cfk = plt.contour(XF, vc, colors='k', linewidths = 0.1)
for c in cfk.collections: c.set_zorder(0.75)
# contour for continents:
if lcontours and lcont_mask:
cfm = plt.contour(XMSK, [ 0.7 ], colors='k', linewidths = 1.)
for c in cfm.collections: c.set_zorder(1.)
# Colorbar:
__nice_colorbar__(cf, plt, vc, i_sbsmp=i_cb_subsamp, lkc=lkcont, cunit=cbunit, cfont=font_clb)
if x_offset > 0 or y_offset > 0 : __add_xy_offset__(plt, x_offset, y_offset)
plt.axis([ 0., nx-1, 0, ny-1])
plt.title(ctitle, **font_ttl)
# Prevents from using scientific notations in axess ticks numbering:
ax.get_xaxis().get_major_formatter().set_useOffset(False)
plt.savefig(cfignm+'.'+cfig_type, dpi=100, orientation='portrait', transparent=True)
print ' => '+cfignm+'.'+cfig_type+' created!'
plt.close(1)
return
def __zonal(self,VYn, VZn, VY1=[0.],VZ1=[0.], VY2=[0.],VZ2=[0.], VY3=[0.],VZ3=[0.],
cfignm='fig_zonal', zmin=-100., zmax=100., dz=25., i_z_jump=1,
xmin=-90., xmax=90., dx=15., cfig_type='png', cxunit=r'Latitude ($^{\circ}$N)',
czunit='', ctitle='', lab='', lab1='', lab2='', lab3='', box_legend=(0.6, 0.75),
loc_legend='lower center', fig_size=FIG_SIZE_DEF):
font_ttl, font_xylb, font_clb = __font_unity__(fig_dpi=DPI_DEF)
ny = len(VYn)
if len(VZn) != ny: print 'ERROR: plot_zonal.barakuda_plot => VYn and VZn do not agree in size'; sys.exit(0)
lp1=False ; lp2=False ; lp3=False
if len(VZ1) > 1 and len(VZ1)==len(VY1): lp1=True
if len(VZ2) > 1 and len(VZ2)==len(VY2): lp2=True
if len(VZ3) > 1 and len(VZ3)==len(VY3): lp3=True
if fig_size==FIG_SIZE_DEF: fig_size = (fig_size[0], 1.5*fig_size[1]) # extend height if == to default
# Do we put the legend outside of the plot?
l_legend_out = False ; y_leg = 0.
if loc_legend == 'out':
l_legend_out = True
y_leg = 0.1 ; # Figure needs to be vertically extended in that case
fig_size = (fig_size[0],(1.+y_leg)*fig_size[1])
fig = plt.figure(num = 1, figsize=fig_size, facecolor='w', edgecolor='k')
ax = plt.axes([0.08, 0.075, 0.9, 0.85])
plt.plot(VYn, VZn*0.0, 'k', linewidth=1)
plt.plot(VYn, VZn, 'k', linewidth=3., label=lab)
if lp1: plt.plot(VY1, VZ1, color=b_red, linewidth=2., label=lab1)
if lp2: plt.plot(VY2, VZ2, color=b_blu, linewidth=2., label=lab2)
if lp3: plt.plot(VY3, VZ3, color=b_gre, linewidth=2., label=lab3)
# X-axis
__nice_latitude_axis__(ax, plt, xmin, xmax, dx, axt='x')
# Y-axis:
__nice_y_axis__(ax, plt, zmin, zmax, dz, i_sbsmp=i_z_jump, cunit=czunit, cfont=font_xylb, dy_minor=0)
# Legend:
__fancy_legend__(ax, plt, loc_leg=loc_legend, ylg=y_leg, leg_out=l_legend_out, ncol=1)
plt.title(ctitle, **font_ttl)
plt.savefig(cfignm+'.'+cfig_type, dpi=DPI_DEF, orientation='portrait', transparent=False)
plt.close(1)
return
def __nproj(self,czone, rmin, rmax, dc, xlon, xlat, XF,
cfignm='fig', lkcont=False, cpal='jet', cbunit=' ',
cfig_type='pdf', ctitle=' ', lforce_lim=False,
cb_orient='vertical', i_cb_subsamp=1, dpi_fig=DPI_DEF, lpcont=True):
# Plot projection with basemap...
#===================================================================================
# INPUT:
# xlon and xlat can be 1D or 2D !!!
#
# lpcont=True => do contourf
# lpcont=False => do pcolor
#
#===================================================================================
from mpl_toolkits.basemap import Basemap
from mpl_toolkits.basemap import shiftgrid
import barakuda_colmap as bcm
font_ttl, font_xylb, font_clb = __font_unity__(fig_dpi=dpi_fig)
# For projections :
vp = __give_proj__(czone) ; # projection information
# must work with XFtmp rather than XF, because sometimes XF is overwrited...
[ny, nx] = nmp.shape(XF)
XFtmp = nmp.zeros(ny*nx) ; XFtmp.shape = [ny, nx]
XFtmp[:,:] = XF[:,:]
if len(nmp.shape(xlat)) == 1 and len(nmp.shape(xlon)) == 1:
if czone == 'kav7' and xlon[0] >= 0.:
# Shifting data and longitude to be consistent with map projection
XFtmp, xlon = shiftgrid(180.+xlon[0], XFtmp, xlon, start=False, cyclic=360.0)
LON_2D, LAT_2D = nmp.meshgrid(xlon,xlat)
else:
LAT_2D = nmp.zeros(ny*nx) ; LAT_2D.shape = [ny, nx] ; LAT_2D[:,:] = xlat[:,:]
LON_2D = nmp.zeros(ny*nx) ; LON_2D.shape = [ny, nx] ; LON_2D[:,:] = xlon[:,:]
if lforce_lim: __force_min_and_max__(rmin, rmax, XFtmp)
vc = __vcontour__(rmin, rmax, dc)
# Colorbar position/size if horizontal
vcbar = [0.1, 0.08, 0.86, 0.03]
# Figure/canvas size:
if cb_orient == 'horizontal':
if czone == 'natarct':
vfig_size = [ 5.8, 5.6 ]; vsporg = [0.08, 0.1, 0.9, 0.92]
vcbar = [0.05, 0.08, 0.9, 0.03]
if czone == 'npol2':
vfig_size = [ 4.4, 5.6 ]; vsporg = [0.01, 0.15, 1., 0.8]
vcbar = [0.05, 0.065, 0.92, 0.03]
if czone == 'kav7':
vfig_size = [ 8.1, 5.6 ]; vsporg = [0.001, 0.15, 1., 0.8]
vcbar = [0.04, 0.08, 0.92, 0.03]
else:
# Vertical color bar on the rhs
rw = 5.
vfig_size = [ rw, rw ]; vsporg = [0.1, 0.1, 0.85, 0.85]
if czone == 'nseas': vfig_size = [ rw , 0.7*rw ]; vsporg = [0.08, 0.07, 0.85, 0.85]
if czone == 'natarct': vfig_size = [ rw , rw ]; vsporg = [0.065, 0.04, 0.95, 0.92]
if czone == 'spstere': vfig_size = [ rw , 0.76*rw ]; vsporg = [0.11, 0.05, 0.82, 0.89]
if czone == 'npol2': vfig_size = [ rw , 0.96*rw ] ; vsporg = [0.1, 0.05, 0.86, 0.9]
fig = plt.figure(num = 1, figsize=(vfig_size), dpi=None, facecolor='w', edgecolor='k')
ax = plt.axes(vsporg, axisbg = 'w')
## Colmap:
colmap = bcm.chose_colmap(cpal)
pal_norm = colors.Normalize(vmin = rmin, vmax = rmax, clip = False)
mpl.rcParams['contour.negative_linestyle'] = 'solid'; plt.contour.negative_linestyle='solid'
if vp[1] == 'lcc' or vp[1] == 'cyl' :
carte = Basemap(llcrnrlon=vp[2],llcrnrlat=vp[3],urcrnrlon=vp[4],urcrnrlat=vp[5],\
resolution=vp[9],area_thresh=1000.,projection=vp[1],\
lat_1=vp[6],lon_0=vp[7])
elif vp[1] == 'stere' :
if vp[0] == 'spstere' or vp[0] == 'npstere':
carte = Basemap(projection=vp[0], boundinglat=vp[6], lon_0=vp[7], resolution=vp[9])
else:
carte = Basemap(llcrnrlon=vp[2],llcrnrlat=vp[3],urcrnrlon=vp[4],urcrnrlat=vp[5],\
resolution=vp[9],area_thresh=1000.,projection='stere',\
lat_0=vp[6],lon_0=vp[7])
elif vp[1] == 'kav' :
print ' *** plot_nproj.barakuda_plot => Projection '+vp[0]+' / '+str(vp[7])+' / '+vp[9]
carte = Basemap(projection=vp[0],lon_0=vp[7],resolution=vp[9])
else:
print 'ERROR: barakuda_plot.py => proj type '+vp[1]+' unknown!!!'; sys.exit(0)
x0,y0 = carte(LON_2D,LAT_2D)
if lpcont:
cf = carte.contourf(x0, y0, XFtmp, vc, cmap = colmap, norm = pal_norm)
# Black contours if needed :
if lkcont:
ckf = carte.contour(x0, y0, XFtmp, vc, colors='k', linewidths=0.5)
if cpal != 'ice':
for c in cf.collections: c.set_zorder(0.5) # Changing zorder so black cont. on top
for c in ckf.collections: c.set_zorder(1.) # of filled cont. and under continents (zorder 1)
else:
cf = carte.pcolor(x0, y0, XFtmp, cmap = colmap, norm = pal_norm)
carte.drawcoastlines() ; carte.fillcontinents(color='grey') ; carte.drawmapboundary()
if vp[1] == 'lcc' or vp[1] == 'cyl':
carte.drawmeridians(nmp.arange(-360,360,vp[8]), labels=[0,0,0,1])
carte.drawparallels(nmp.arange( -90, 90,vp[8]), labels=[1,0,0,0])
if vp[1] == 'stere':
carte.drawmeridians(nmp.arange(-180,180,20), labels=[0,0,0,1])
carte.drawparallels(nmp.arange( -90, 90,10), labels=[1,0,0,0])
plt.title(ctitle, **font_ttl)
# Colorbar:
if cb_orient == 'horizontal':
clbax = fig.add_axes(vcbar) # new axes for colorbar!
__nice_colorbar__(cf, plt, vc, cax_other=clbax, i_sbsmp=i_cb_subsamp, lkc=(lkcont and lpcont), cb_or='horizontal', cunit=cbunit, cfont=font_clb, fontsize=10)
else:
__nice_colorbar__(cf, plt, vc, i_sbsmp=i_cb_subsamp, lkc=(lkcont and lpcont), cunit=cbunit, cfont=font_clb, fontsize=12)
plt.savefig(cfignm+'.'+cfig_type, dpi=dpi_fig, orientation='portrait', transparent=False) ; #, transparent=True, acecolor='w', edgecolor='w',trans
plt.close(1)
print ' *** created figure '+cfignm+'.'+cfig_type+'\n'
del LON_2D, LAT_2D, XFtmp
return
def __2d_box_2f(self,XF1, XF2, XMSK, rmin, rmax, dc, vcont_spec2, corca='ORCA1', lkcont=True,
cpal='jet', cfignm='fig', cbunit='', ctitle=' ', lforce_lim=False,
i_cb_subsamp=1, cfig_type='pdf', lcontours=True,
x_offset=0., y_offset=0., vcont_spec1 = []):
# Take 2 fields as imput and shows contours of second field (vcont_spec2) on top of field 1
import matplotlib.colors as colors # colmap and co.
import barakuda_colmap as bcm
if nmp.shape(XF1) != nmp.shape(XF2):
print 'ERROR barakuda_plot.plot_2d_box_2f: fields F1 and F2 dont have the same shape!'
sys.exit(0)
font_ttl, font_xylb, font_clb = __font_unity__()
if lforce_lim: __force_min_and_max__(rmin, rmax, XF1)
[ ny , nx ] = XF1.shape
vert_rat = float(ny)/float(nx)
print "Vert. ratio, nx, ny =", vert_rat, nx, ny
# Masking field:
if lcontours:
idxm = nmp.where(XMSK[:,:] == 0); XF1[idxm] = -9999.9 # c'est NaN qui merde!!!
else:
XF1 = nmp.ma.masked_where(XMSK == 0, XF1)
# FIGURE
# ~~~~~~
fig = plt.figure(num = 1, figsize=(7.,6.*vert_rat), dpi=None, facecolor='w', edgecolor='k')
ax = plt.axes([0.07, 0.05, 0.9, 0.9], axisbg = 'gray')
vc = __vcontour__(rmin, rmax, dc); #print vc, '\n'
# Colmap:
colmap = bcm.chose_colmap(cpal)
pal_norm = colors.Normalize(vmin = rmin, vmax = rmax, clip = False)
if lcontours:
cf = plt.contourf(XF1, vc, cmap = colmap, norm = pal_norm)
for c in cf.collections: c.set_zorder(0.5)
else:
cf = plt.pcolor(XF1, cmap = colmap, norm = pal_norm)
# contour for specific values on the ploted field:
if len(vcont_spec1) >= 1:
cfs1 = plt.contour(XF1, vcont_spec1, colors='white', linewidths = 1.)
plt.clabel(cfs1, inline=1, fmt='%4.1f', fontsize=10)
# Contours of field F2:
cfs2 = plt.contour(XF2, vcont_spec2, colors=b_red, linewidths = 1.3)
#plt.clabel(cfs1, inline=1, fmt='%4.1f', fontsize=10)
if lkcont:
cfk = plt.contour(XF1, vc, colors='k', linewidths = 0.1)
for c in cfk.collections: c.set_zorder(0.75)
# contour for continents:
if lcontours:
cfm = plt.contour(XMSK, [ 0.7 ], colors='k', linewidths = 0.4)
for c in cfm.collections: c.set_zorder(1.)
# Colorbar:
__nice_colorbar__(cf, plt, vc, i_sbsmp=i_cb_subsamp, lkc=lkcont, cunit=cbunit, cfont=font_clb)
if x_offset > 0 or y_offset > 0 : __add_xy_offset__(plt, x_offset, y_offset)
plt.axis([ 0., nx-1, 0, ny-1])
plt.title(ctitle, **font_ttl)
plt.savefig(cfignm+'.'+cfig_type, dpi=100, orientation='portrait', transparent=True)
print ' => '+cfignm+'.'+cfig_type+' created!'
plt.close(1)
del Xtmp
return
def __trsp_sig_class(self,VT, vsigma_bounds, XF, rmin, rmax, dc,
lkcont=True, cpal='sigtr', dt=5., cfignm='fig',
cfig_type='pdf', ctitle='', vcont_spec1 = [],
i_cb_subsamp=2):
# Plot transport by sigma class...
if nmp.sum(XF) == 0.:
print '\n WARNING: plot_trsp_sig_class => doing nothing, arrays contains only 0!\n'
else:
import matplotlib.colors as colors # colmap and co.
import barakuda_colmap as bcm
font_ttl, font_xylb, font_clb = __font_unity__(fig_dpi=DPI_DEF)
fig = plt.figure(num = 1, figsize=(WDTH_DEF , RAT_XY*6.), dpi=None, facecolor='w', edgecolor='k') ; #trsp_sig_class
ax = plt.axes([0.075, -0.025, 0.9, 0.98], axisbg = 'w')
vc = __vcontour__(rmin, rmax, dc)
nbins = len(vsigma_bounds) - 1
# Colmap:
colmap = bcm.chose_colmap(cpal)
pal_norm = colors.Normalize(vmin = rmin, vmax = rmax, clip = False)
mpl.rcParams['contour.negative_linestyle'] = 'solid'
plt.contour.negative_linestyle='solid'
#cf = plt.contourf(VT, vsigma_bounds[:-1], XF, vc, cmap = colmap, norm = pal_norm)
cf = plt.pcolormesh(VT, vsigma_bounds[:-1], XF, cmap = colmap, norm = pal_norm)
if lkcont:
cfc = plt.contour(VT, vsigma_bounds[:-1], XF, nmp.arange(-3.,3.,0.5), colors='k', linewidths=0.4)
# contour for specific values on the ploted field:
if len(vcont_spec1) >= 1:
cfs1 = plt.contour(VT, vsigma_bounds[:-1], XF, vcont_spec1, colors='white', linewidths = 1.)
plt.clabel(cfs1, inline=1, fmt='%4.1f', fontsize=11, manual=[(2080,2.)] )
__nice_colorbar__(cf, plt, vc, i_sbsmp=i_cb_subsamp, cb_or='horizontal', cunit='Sv', cfont=font_clb, fontsize=10)
# AXES:
x1 = int(min(VT)) ; x2 = int(max(VT))+1
plt.axis([x1, x2, vsigma_bounds[nbins], vsigma_bounds[0]])
__nice_x_axis__(ax, plt, x1, x2, dt, cfont=font_xylb, dx_minor=__time_axis_minor_ticks__(dt))
plt.yticks( nmp.flipud(vsigma_bounds) )
label_big = { 'fontname':'Trebuchet MS', 'fontweight':'normal', 'fontsize':18 }
plt.ylabel(r'$\sigma_0$', **label_big)
plt.title(ctitle, **font_ttl)
plt.savefig(cfignm+'.'+cfig_type, dpi=DPI_DEF, orientation='portrait', transparent=False) ; #trsp_sig_class
print ' => '+cfignm+'.'+cfig_type+' created!'
plt.close(1)
return
def __vert_section_extra(self,VX, VZ, XF, XMSK, Vcurve, rmin, rmax, dc, lkcont=True, cpal='jet', xmin=-80., xmax=85.,
cfignm='fig', cbunit='', cxunit=' ', zmin = 0., zmax = 5000., l_zlog=False,
cfig_type='pdf', czunit=' ', ctitle=' ', lforce_lim=False, fig_size=(8.,8.) ):
import matplotlib.colors as colors # colmap and co.
import barakuda_colmap as bcm
zVZ = __prepare_z_log_axis__(l_zlog, VZ)
XF = nmp.ma.masked_where(XMSK == 0, XF)
if lforce_lim: __force_min_and_max__(rmin, rmax, XF)
#
font_ttl, font_xylb, font_clb = __font_unity__()
fig = plt.figure(num = 1, figsize=fig_size, dpi=None, facecolor='w', edgecolor='k')
ax = plt.axes([0.1, 0.065, 0.92, 0.89], axisbg = 'gray')
vc = __vcontour__(rmin, rmax, dc)
# Colmap:
colmap = bcm.chose_colmap(cpal)
pal_norm = colors.Normalize(vmin = rmin, vmax = rmax, clip = False)
cf = plt.contourf(VX, zVZ, XF, vc, cmap = colmap, norm = pal_norm)
if lkcont: plt.contour(VX, zVZ, XF, vc, colors='k', linewidths=0.2)
# Colorbar:
__nice_colorbar__(cf, plt, vc, i_sbsmp=i_cb_subsamp, cunit=cbunit, cfont=font_clb, fontsize=10)
# X-axis:
__nice_x_axis__(ax, plt, xmin, xmax, dx, cunit=cxunit, cfont=font_xylb)
plt.plot(VX,Vcurve, 'w', linewidth=2)
for zz in zVZ[:]: plt.plot(VX,VX*0.+zz, 'k', linewidth=0.3)
# Depth axis:
__nice_depth_axis__(ax, plt, zmin, zmax, l_log=l_zlog, l_z_inc=False, cunit=czunit, cfont=font_xylb)
plt.title(ctitle, **font_ttl)
plt.savefig(cfignm+'.'+cfig_type, dpi=100, orientation='portrait', transparent=True)
print ' => '+cfignm+'.'+cfig_type+' created!'
plt.close(1)
#
return
def __hovmoeller(self, VT, VY, XF, XMSK, rmin, rmax, dc, c_y_is='depth',
lkcont=False, cpal='jet', tmin=0., tmax=50., dt=5,
ymin=0., ymax=5000., dy=100., l_ylog=False,
cfignm='fig', cbunit='', ctunit=' ', cfig_type='png',
cyunit=' ', ctitle=' ', i_cb_subsamp=1,
l_y_increase=False ):
#
# c_y_is : 'depth', 'latitude'
# lkcont : use contours rather than "pcolormesh"
#
import barakuda_colmap as bcm
font_ttl, font_xylb, font_clb = __font_unity__(fig_dpi=DPI_DEF)
if c_y_is == 'depth':
zVY = __prepare_z_log_axis__(l_ylog, VY)
vax = [0.095, 0.06, 0.92, 0.88]
else:
zVY = VY
vax = [0.05, 0.06, 0.98, 0.88]
# Masking where mask is zero!
XF = nmp.ma.masked_where(XMSK == 0, XF)
fig = plt.figure(num = 1, figsize=(WDTH_DEF , RAT_XY*5.), dpi=None, facecolor='w', edgecolor='k')
ax = plt.axes(vax, axisbg='gray')
vc = __vcontour__(rmin, rmax, dc)
# Colormap:
colmap = bcm.chose_colmap(cpal)
pal_norm = colors.Normalize(vmin = rmin, vmax = rmax, clip = False)
if lkcont:
cf = plt.contourf(VT, zVY, XF, vc, cmap = colmap, norm = pal_norm)
#plt.contour( VT, zVY, XF, vc, colors='k', linewidths=0.2)
else:
cf = plt.pcolormesh(VT, zVY, XF, cmap = colmap, norm = pal_norm)
__nice_colorbar__(cf, plt, vc, i_sbsmp=i_cb_subsamp, cunit=cbunit, cfont=font_clb, fontsize=10)
# Time-axis:
__nice_x_axis__(ax, plt, tmin, tmax, dt, cunit=ctunit, cfont=font_xylb, dx_minor=__time_axis_minor_ticks__(dt))
# Y-axis:
if c_y_is == 'depth':
__nice_depth_axis__(ax, plt, ymin, ymax, l_log=l_ylog, l_z_inc=l_y_increase, cunit=cyunit, cfont=font_xylb)
elif c_y_is == 'latitude':
__nice_latitude_axis__(ax, plt, ymin, ymax, dy, axt='y')
else:
print 'ERROR: plot_hoevmoller.barakuda_plot => axis "'+c_y_is+'" not supported!'; sys.exit(0)
plt.title(ctitle, **font_ttl)
plt.savefig(cfignm+'.'+cfig_type, dpi=DPI_DEF, orientation='portrait', transparent=False) ; #vert_section
print ' => '+cfignm+'.'+cfig_type+' created!'
plt.close(1)
return
def __oscillation_index(self, VT, VF, ymax=2.5, dy=0.5, yplusminus=0.,
tmin=0., tmax=0., dt=5,
cfignm='fig', cfig_type='png', cyunit='', ctitle=''):
#--------------------------------------------------------------------------------------
# Plot a ENSO / AMO / PDO -like graph from a time series VF that
# has already been smoothed and detrended
#--------------------------------------------------------------------------------------
font_ttl, font_xylb, font_clb = __font_unity__(fig_dpi=DPI_DEF)
Nt = len(VT)
if len(VF) != Nt: print 'ERROR: oscillation_index.barakuda_plot => VT and VF do not agree in size'; sys.exit(0)
vf_plus = nmp.zeros(Nt) ; vf_mins = nmp.zeros(Nt)
vf_plus[:] = VF[:] ; vf_mins[:] = VF[:]
vf_plus[nmp.where(VF[:] < 0. )] = 0.
vf_mins[nmp.where(VF[:] > 0. )] = 0.
vf_plus[0] = 0. ; vf_mins[0] = 0.
vf_plus[-1] = 0. ; vf_mins[-1] = 0.
t1 = tmin ; t2 = tmax
if tmin == 0. and tmax == 0.:
t1 = float(int(min(VT)))
t2 = float(int(round(max(VT),0)))
fig = plt.figure(num = 2, figsize=FIG_SIZE_DEF, facecolor='w', edgecolor='k')
ax = plt.axes(AXES_DEF)
if yplusminus > 0.:
plt.plot(VT, 0.*VT+yplusminus, 'r--', linewidth=1.5)
plt.plot(VT, 0.*VT-yplusminus, 'b--', linewidth=1.5)
plt.fill(VT, vf_plus, b_red, VT, vf_mins, b_blu, linewidth=0)
plt.plot(VT, VF[:], 'k', linewidth=0.7)
plt.plot(VT, 0.*VT, 'k', linewidth=0.7)
__nice_x_axis__(ax, plt, t1, t2, dt, cfont=font_xylb)
__nice_y_axis__(ax, plt, -ymax, ymax, dy, cunit=cyunit, cfont=font_xylb)
plt.title(ctitle, **font_ttl)
plt.savefig(cfignm+'.'+cfig_type, dpi=DPI_DEF, orientation='portrait', transparent=True)
plt.close(2)
return
def __1d_mon_ann(self,VTm, VTy, VDm, VDy, cfignm='fig', dt=5, cyunit='', ctitle='',
ymin=0, ymax=0, dy=0, i_y_jump=1, mnth_col='b', plt_m03=False, plt_m09=False,
cfig_type='png', l_tranparent_bg=True, fig_size=FIG_SIZE_DEF, y_cst_to_add=-9999.):
# if you specify ymin and ymax you can also specify y increment (for y grid) as dy
#
# plt_m03 => plot march values on top in green
# plt_m09 => plot september values on top in green
font_ttl, font_xylb, font_clb = __font_unity__(fig_dpi=DPI_DEF)
Nt1 = len(VTm) ; Nt2 = len(VTy)
if len(VTm) != len(VDm): print 'ERROR: plot_1d_mon_ann.barakuda_plot => VTm and VDm do not agree in size'; sys.exit(0)
if len(VTy) != len(VDy): print 'ERROR: plot_1d_mon_ann.barakuda_plot => VTy and VDy do not agree in size'; sys.exit(0)
l_add_monthly = True
if Nt1 == Nt2: l_add_monthly = False
y_leg = 0.
if plt_m03 or plt_m09:
# We put the legend outside of the plot...
y_leg = 0.075*2. ; # Figure needs to be vertically extended in that case
fig_size = (fig_size[0],(1.+y_leg)*fig_size[1])
fig = plt.figure(num = 1, figsize=fig_size, facecolor='w', edgecolor='k')
ax = plt.axes(AXES_DEF) ; #1d_mon_ann
if mnth_col == 'g': mnth_col = b_gre
if mnth_col == 'b': mnth_col = b_blu
if y_cst_to_add > -9000.:
plt.plot(VTm, VTm*0.+y_cst_to_add, 'k', label=None, linewidth=1.8)
if l_add_monthly:
plt.plot(VTm, VDm, mnth_col, label=r'monthly', linewidth=1)
plt.plot(VTy, VDy, b_red, label=r'annual', linewidth=2)
ax.get_yaxis().get_major_formatter().set_useOffset(False); # Prevents from using scientific notations in axess ticks numbering
if l_add_monthly:
if plt_m03: plt.plot(VTm[2:Nt1:12], VDm[2:Nt1:12], 'orange', label=r'March', linewidth=2)
if plt_m09: plt.plot(VTm[8:Nt1:12], VDm[8:Nt1:12], 'orange', label=r'September', linewidth=2)
if plt_m03 or plt_m09:
box = ax.get_position()
ax.set_position([box.x0, box.y0 + box.height*y_leg, box.width, box.height*(1.-y_leg)])
plt.legend(bbox_to_anchor=(0.95, -0.075), ncol=2, shadow=True, fancybox=True)
# Time bounds for t-axis:
dcorr = 0.
x1 = float(int(min(VTy)))
#x2 = float(int(max(VTy)))
x2 = round(max(VTy),0)
#if min(VTy)-x1==0.5:
#x1 = int(min(VTy)-dcorr)
#x2 = int(max(VTy)+dcorr)
mean_val = nmp.mean(VDy)
df = max( abs(min(VDm)-mean_val), abs(max(VDm)-mean_val) )
if ymin==0 and ymax==0:
y1, y2, dy = __suitable_axis_dx__(min(VDm)-0.2*df, max(VDm)+0.2*df, nb_val=10.)
elif dy == 0:
y1, y2, dy = __suitable_axis_dx__(ymin, ymax, nb_val=10.)
else:
y1=ymin ; y2=ymax
__nice_y_axis__(ax, plt, y1, y2, dy, i_sbsmp=i_y_jump, cunit=cyunit, cfont=font_xylb, dy_minor=0)
__nice_x_axis__(ax, plt, x1, x2, dt, cfont=font_xylb, dx_minor=__time_axis_minor_ticks__(dt))
plt.title(ctitle, **font_ttl)
cf_fig = cfignm+'.'+cfig_type
plt.savefig(cf_fig, dpi=DPI_DEF, orientation='portrait', transparent=l_tranparent_bg)
print ' => '+cfignm+'.'+cfig_type+' created!'
plt.close(1)
def __1d_multi(self,vt, XD, vlabels, cfignm='fig', dt=5, i_t_jump=1, cyunit=None, ctitle='',
cfig_type='png', ymin=0, ymax=0, lzonal=False, xmin=0, xmax=0,
loc_legend='lower center', line_styles=[], fig_size=FIG_SIZE_DEF,
l_tranparent_bg=True, cxunit=None, lmask=True, cinfo='', y_cst_to_add=-9999.):
# lzonal => zonally averaged curves...
if lzonal:
font_ttl, font_big_fixed, font_xylb, font_clb = __font_unity__(fig_dpi=DPI_DEF, size='big')
else:
font_ttl, font_xylb, font_clb = __font_unity__(fig_dpi=DPI_DEF)
# Number of lines to plot:
[ nb_plt, nbt ] = XD.shape
if len(vt) != nbt: print 'ERROR: plot_1d_multi.barakuda_plot.py => vt and XD do not agree in shape! =>', len(vt), nbt,'\n'; sys.exit(0)
if len(vlabels) != nb_plt: print 'ERROR: plot_1d_multi.barakuda_plot.py => wrong number of labels...'; sys.exit(0)
n0 = len(line_styles)
if n0 > 0 and n0 != nb_plt: print 'ERROR: plot_1d_multi.barakuda_plot.py => wrong number line styles!!!'; sys.exit(0)
nb_col, nb_row = __nb_col_row_legend__(nb_plt) ; # nb of columns and rows for legend
# Do we put the legend outside of the plot?
l_legend_out = False ; y_leg = 0.
if loc_legend == 'out':
l_legend_out = True
y_leg = 0.075*nb_row ; # Figure needs to be vertically extended in that case
fig_size = (fig_size[0],(1.+y_leg)*fig_size[1])
# Masking the time-series shorter than others (masked with -999.)
if lmask: XD = nmp.ma.masked_where(XD < -900., XD)
if lzonal:
fig = plt.figure(num = 1, figsize=fig_size, facecolor='w', edgecolor='k')
ax = plt.axes([0.08, 0.11, 0.88, 0.83])
else:
fig = plt.figure(num = 1, figsize=fig_size, facecolor='w', edgecolor='k')
ax = plt.axes(AXES_DEF) ; #1d_multi
if y_cst_to_add > -9000.:
plt.plot(vt, vt*0.+y_cst_to_add, 'k', label=None, linewidth=1.8)
if lzonal: plt.plot(vt, XD[0,:]*0., 'k', linewidth=1)
if n0 <= 0 and nb_plt > nmax_colors:
print 'ERROR: plot_1d_multi.barakuda_plot => not enough colors defined in "v_dflt_colors", extend it!!!'
sys.exit(0)
for jp in range(nb_plt):
if n0 > 0:
plt.plot(vt, XD[jp,:], line_styles[jp], label=vlabels[jp], linewidth=2)
else:
plt.plot(vt, XD[jp,:], v_dflt_colors[jp], label=vlabels[jp], linewidth=2)
#ax.get_yaxis().get_major_formatter().set_useOffset(False) ; # Prevents from using scientific notations in axess ticks numbering
if lzonal:
dt = 15. ; # x-axis increment (latitude!)
if xmin == 0 and xmax == 0:
x1 = -90. ; x2 = 90.
else:
x1 = xmin ; x2 = xmax
else:
if xmin == 0 and xmax == 0:
x1 = int(vt[0])
x2 = int(round(vt[len(vt)-1]+0.4))
else:
x1 = xmin ; x2 = xmax
if ymin==0 and ymax==0:
ymin = nmp.min(XD[:,:])
ymax = nmp.max(XD[:,:])
ymin, ymax, dy = __suitable_axis_dx__(ymin, ymax, nb_val=10.)
if lzonal:
__nice_x_axis__(ax, plt, x1, x2, 10., cunit=r'Latitude ($^{\circ}$N)', cfont=font_xylb, dx_minor=5.)
else:
__nice_x_axis__(ax, plt, x1, x2, dt, i_sbsmp=i_t_jump, cunit=cxunit, cfont=font_xylb, dx_minor=__time_axis_minor_ticks__(dt))
__nice_y_axis__(ax, plt, ymin, ymax, dy, i_sbsmp=1, cunit=cyunit, cfont=font_xylb, dy_minor=0)
plt.title(ctitle, **font_ttl)
if cinfo != '':
# Ading info:
yp = 0.95
if loc_legend != '0' and l_legend_out: yp = -0.1
props = dict(boxstyle='round', facecolor='w') ;#, alpha=0.5)
ax.text(0.05, yp, cinfo, transform=ax.transAxes,
verticalalignment='top', bbox=props, fontsize=10)
__fancy_legend__(ax, plt, loc_leg=loc_legend, ylg=y_leg, leg_out=l_legend_out, ncol=nb_col)
cf_fig = cfignm+'.'+cfig_type
plt.savefig(cf_fig, dpi=DPI_DEF, orientation='portrait', transparent=l_tranparent_bg) ; #1d_multi
plt.close(1)
print ' => Multi figure "'+cf_fig+'" created!'
def __1d(self,vt, VF, cfignm='fig', dt=5, i_t_jump=1, cyunit='', ctitle='',
cfig_type='png', ymin=0, ymax=0, xmin=0, xmax=0,
loc_legend='lower center', line_styles='-', fig_size=FIG_SIZE_DEF,
l_tranparent_bg=False, cxunit='', lmask=True):
font_ttl, font_xylb, font_clb = __font_unity__(fig_dpi=DPI_DEF)
# Number of lines to plot:
nbt = len(VF)
if len(vt) != nbt: print 'ERROR: plot_1d.barakuda_plot.py => vt and VF do not agree in shape!'; sys.exit(0)
# Masking the time-series shorter than others (masked with -999.)
if lmask: VF = nmp.ma.masked_where(VF < -900., VF)
fig = plt.figure(num = 1, figsize=fig_size, facecolor='w', edgecolor='k')
ax = plt.axes(AXES_DEF) ; #1d
plt.plot(vt, VF[:], line_styles, linewidth=2)
ax.get_yaxis().get_major_formatter().set_useOffset(False) ; # Prevents from using scientific notations in axess ticks numbering
if xmin == 0 and xmax == 0:
x1 = int(vt[0])
x2 = int(round(vt[len(vt)-1]+0.4))
else:
x1 = xmin ; x2 = xmax
if ymin==0 and ymax==0:
mean_val = nmp.mean(VF[:])
dA = max( abs(nmp.min(VF[:])-mean_val), abs(nmp.max(VF[:])-mean_val) )
plt.axis( [x1, x2, nmp.min(VF[:])-0.2*dA, nmp.max(VF[:])+0.2*dA] )
else:
plt.axis([x1, x2, ymin, ymax])
print nmp.arange(x1, x2+dt, dt)
__nice_x_axis__(ax, plt, x1, x2, dt, i_sbsmp=i_t_jump, cfont=font_xylb)
if cyunit != '': plt.ylabel('('+cyunit+')', **font_xylb)
if cxunit != '': plt.xlabel('('+cxunit+')', **font_xylb)
plt.title(ctitle, **font_ttl)
cf_fig = cfignm+'.'+cfig_type
plt.savefig(cf_fig, dpi=DPI_DEF, orientation='portrait', transparent=l_tranparent_bg) ; #1d
plt.close(1)
print ' => Multi figure "'+cf_fig+'" created!'
def __spectrum(self,vfrq, Vspec, cfignm='fig', cyunit='', log_x=True, log_y=False,
year_min=3., year_max = 50., rmax_amp = 10., rmin_amp = 0.,
cfig_type='png', vnoise=[ 0 ], vrci95=[ 0 ], lab95_xpos=0.5, lplot_1onF=False,
cnoise='White noise', lplot_freq_ax=True):
l_do_ci95 = False ; l_do_ci95m = False
nnoise = len(vnoise); nl95 = len(vrci95)
if nnoise != 1 and nl95 != 1:
if nl95 != len(Vspec) or nl95 != nnoise:
print "ERROR: plot_spectrum.barakuda_plot.py => length of 95 CI array and/or noise array doesnt match spectrum length!"
sys.exit(0)
l_do_ci95 = True
l_do_ci95m = True
font_ttl, font_xylb, font_clb = __font_unity__()
print "avant:", rmin_amp, rmax_amp
if log_y:
if rmin_amp <= 0.: rmin_amp = 0.01
rmin_amp = 20.*nmp.log10(rmin_amp); rmax_amp = 20.*nmp.log10(rmax_amp)
print "apres:", rmin_amp, rmax_amp
# Spectral axis:
x_min = 1./year_max ; x_max = 1./year_min ; # min and max in frequency!
clbnd = str(int(round(year_min)))
if log_x:
cvee = [ '50','45','40','35','30','25','22','20','17','15','13','12','11','10','9','8','7','6','5','4','3' ]
if year_max == 40.: cvee = cvee[2:]
if year_max == 35.: cvee = cvee[3:]
if year_min == 5.: cvee = cvee[:-2]
else:
cvee = [ '50','30','20','15','12','10','9','8','7','6','5','4', '3' ]
lvee = []
civee = []
for ce in cvee:
lvee.append(float(ce))
civee.append(str(round(1./float(ce),3)))
vee = nmp.asarray(lvee)
print civee[:]
rnoise = nmp.mean(vnoise[5:20])
rrci95 = nmp.mean(vrci95[5:20])
fig = plt.figure(num = 1, figsize=(8.,4.), facecolor='w', edgecolor='k')
if lplot_freq_ax:
ax = plt.axes([0.069, 0.13, 0.9, 0.8])
else:
ax = plt.axes([0.08, 0.13, 0.9, 0.83])
if log_x:
ifr1 = 1
vfl = nmp.log10(vfrq[ifr1:])
if not log_y:
if l_do_ci95:
plt.plot(vfl, vnoise[ifr1:], '--k' , linewidth=1.8, label=cnoise)
plt.plot(vfl, vnoise[ifr1:]+vrci95[ifr1:], '0.4', linewidth=1.8, label='95% CI')
plt.plot(vfl, vnoise[ifr1:]-vrci95[ifr1:], '0.4', linewidth=1.8)
plt.plot(vfl, Vspec[ifr1:], '*-k', linewidth=2.)
#if lplot_1onF: plt.plot(vfl, 1./vfl, b_red, linewidth=2)
else:
if l_do_ci95:
plt.plot(vfl, 20.*nmp.log10(vnoise[ifr1:]), '--k' , linewidth=1.8, label=cnoise)
plt.plot(vfl, 20.*nmp.log10(vnoise[ifr1:]+vrci95[ifr1:]), '0.4', linewidth=1.8, label='95% CI')
plt.plot(vfl, 20.*nmp.log10(vnoise[ifr1:]-vrci95[ifr1:]), '0.4', linewidth=1.8)
plt.plot(vfl, 20.*nmp.log10(Vspec[ifr1:]), '*-k', linewidth=2.)
else:
if not log_y:
if l_do_ci95:
plt.plot(vfrq, vnoise, '--k' , linewidth=1.8)
plt.plot(vfrq, vnoise+vrci95, '0.4', linewidth=1.8)
plt.plot(vfrq, vnoise-vrci95, '0.4', linewidth=1.8)
plt.plot(vfrq, Vspec, '*-k', linewidth=2)
if lplot_1onF: plt.plot(vfrq[1:], 0.03*1./vfrq[1:], b_red, linewidth=2)
else:
if l_do_ci95:
plt.plot(vfrq, 20.*nmp.log10(vnoise), '--k' , linewidth=1.8)
plt.plot(vfrq, 20.*nmp.log10(vnoise+vrci95), '0.4', linewidth=1.8)
plt.plot(vfrq, 20.*nmp.log10(vnoise-vrci95), '0.4', linewidth=1.8)
plt.plot(vfrq, 20.*nmp.log10(Vspec), '*-k', linewidth=2)
plt.ylabel('Amplitude Spectrum ('+cyunit+')', color='k', **font_xylb)
plt.xlabel('Period (years)', color='k', **font_xylb)
if log_x:
x1=nmp.log10(x_max) ; x2=nmp.log10(x_min)
plt.axis([x1, x2, rmin_amp, rmax_amp])
if lplot_freq_ax:
plt.xticks(nmp.log10(1./vee[:]),cvee[:])
else:
print ''
vee_n = nmp.arange(vee[0], vee[len(vee)-1]-1, -1.)
print vee_n[:]
cvee_n = []
for rr in vee_n:
cr = str(int(rr))
if cr in cvee:
cvee_n.append(cr)
else:
cvee_n.append('')
print 'cvee =>', cvee[:]
print 'cvee_n =>', cvee_n[:]
plt.xticks(nmp.log10(1./vee_n[:]),cvee_n[:])
else:
x1=x_max; x2=x_min
plt.axis([x1, x2, rmin_amp, rmax_amp])
plt.xticks(1./vee[:],cvee[:], color='k')
ax.grid(color='0.4', linestyle='-', linewidth=0.3)
plt.legend(loc='upper left', shadow=False, fancybox=True)
if lplot_freq_ax:
ax2 = ax.twiny()
ax2.set_xlabel('Frequency (cpy)', color='k', **font_xylb)
if log_x:
plt.axis([x1, x2, rmin_amp, rmax_amp])
for jp in range(1,18,2): civee[jp] = ''
plt.xticks(nmp.log10(1./vee[:]),civee)
#ax2.xaxis.set_ticks(nmp.log10(1./vee[:]))
else:
plt.axis([x1, x2, rmin_amp, rmax_amp])
plt.xticks(1./vee[:],civee)
for t in ax2.get_xticklabels(): t.set_fontsize(14)
ax2.xaxis.labelpad = 12 ; # move label upwards a bit...
plt.savefig(cfignm+'.'+cfig_type, dpi=100, facecolor='w', edgecolor='w', orientation='portrait', transparent=False)
plt.close(1)
def __del__(self) :
plot.__counter -= 1
# LOCAL functions
# ===============
def __get_mat__(cf):
f1 = open(cf, 'r')
lines1=f1.readlines()
f1.close()
zm = [] ; jy = 0
for l in lines1:
if l[0] != '#':
jy = jy + 1
ls = l.split()
zm.append([])
for c in ls:
zm[jy-1].append(float(c))
zxm = array(zm)
print 'Shape zxm = ',nmp.shape(zxm), '\n'
return zxm
def __vcontour__(zmin, zmax, zdc):
if (zmin,zmax) == (0.,0.) or (zmin,zmax) == (0,0):
vcont = [0.]
else:
lngt = zmax - zmin
ncont = lngt/zdc
vcont = nmp.arange(zmin, zmax + zdc, zdc)
return vcont
def __name_longitude_ticks__(lon_min=0., lon_max=360., dlon=30., lon_ext=0):
#
# Builds nice ticks for X (lon) axis!
#
# Arrange longitude axis !
VX = nmp.arange(lon_min, lon_max+lon_ext+dlon, dlon); VX0 = nmp.arange(lon_min, lon_max+lon_ext+dlon, dlon);
ivf = nmp.where(VX>180); VX0[ivf] = VX[ivf] - 360
cn_lon = []
for rlon in VX0:
jlon = int(rlon)
if jlon < 0:
cn_lon.append(str(-jlon)+r'$^{\circ}$W')
else:
if jlon == 0:
cn_lon.append(str(jlon)+r'$^{\circ}$')
else:
cn_lon.append(str(jlon)+r'$^{\circ}$E')
return VX, cn_lon
def __name_latitude_ticks__(lat_min=-90., lat_max=90., dlat=15.):
#
# Builds nice ticks for Y (lat) axis!
#
# Arrange latitude axis !
VY = nmp.arange(lat_min, lat_max+dlat, dlat)
cn_lat = []
for rlat in VY:
jlat = int(rlat)
if jlat < 0:
cn_lat.append(str(-jlat)+r'$^{\circ}$S')
else:
if jlat == 0:
cn_lat.append(str(jlat)+r'$^{\circ}$')
else:
cn_lat.append(str(jlat)+r'$^{\circ}$N')
return VY, cn_lat
def __name_coor_ticks__(lon_min=0., lon_max=360., dlon=30., lat_min=-90., lat_max=90., dlat=15., lon_ext=0):
# Builds nice ticks for X and Y (lon, lat) axes!
VX, cn_lon = __name_longitude_ticks__(lon_min=lon_min, lon_max=lon_max, dlon=dlon, lon_ext=lon_ext)
VY, cn_lat = __name_latitude_ticks__(lat_min=lat_min, lat_max=lat_max, dlat=dlat)
return VX, VY, cn_lon, cn_lat
def __give_proj__(cname):
nb =nmp.shape(projection_def)[0]
vproj = [ 'NC', 'NC', 0., 0., 0., 0., 0., 0., 'NC' ]
jb = 0
while jb < nb :
if projection_def[jb][0] == cname:
break
else :
jb = jb + 1
if jb == nb :
print 'Zone "'+cname+'" does not exist!\n'
print 'so far choice is :'
for jb in range(nb): print projection_def[jb][0]
sys.exit(0)
vproj = projection_def[jb][:]
return vproj
def __font_unity__(fig_dpi=100., size='normal'):
rat = 100./float(fig_dpi)
if size == 'big': rat = 1.25*rat
params = { 'font.family':'Trebuchet MS',
'font.size': int(13.*rat),
'legend.fontsize': int(13.*rat),
'xtick.labelsize': int(13.*rat),
'ytick.labelsize': int(13.*rat),
'axes.labelsize': int(13.*rat),
'legend.facecolor': 'white',
'figure.facecolor': 'white' }
mpl.rcParams.update(params)
title_fonts = { 'fontname':'Trebuchet MS', 'fontweight':'normal', 'fontsize':int(15.*rat) }
label_fonts = { 'fontname':'Arial' , 'fontweight':'normal', 'fontsize':int(14.*rat) }
colorbar_fonts = { 'fontname':'Arial' , 'fontweight':'normal', 'fontsize':int(13.*rat) }
return title_fonts, label_fonts, colorbar_fonts
def __force_min_and_max__(rm, rp, Xin):
idx_bad = nmp.where(nmp.logical_not(nmp.isfinite(Xin)))
Xin[idx_bad] = 0.
idx1 = nmp.where(Xin <= rm); Xin[idx1] = rm + abs(rp-rm)*1.E-4
idx2 = nmp.where(Xin >= rp); Xin[idx2] = rp - abs(rp-rm)*1.E-4
Xin[idx_bad] = nmp.nan
def __subsample_colorbar__(i_sbsmp, vcc, clb_hndl, cb_or='vertical'):
cb_labs = []
# First checking if vcc countains integers or not...
lcint = False
vc = vcc.astype(nmp.int64) ; # integer version of vcc
if nmp.max(nmp.abs(vcc))>5 and nmp.sum(vcc-vc) == 0. : lcint=True
cpt = 0
nn = int(round(abs(vcc[-1]-vcc[0])/abs(vcc[0]-vcc[1]),0))
if nn % 2 != 0: cpt = 1
for rr in vcc:
if cpt % i_sbsmp == 0:
if lcint:
cr = str(int(rr))
else:
cr = str(round(float(rr),6))
cb_labs.append(cr)
else:
cb_labs.append(' ')
cpt = cpt + 1
if cb_or == 'horizontal':
clb_hndl.ax.set_xticklabels(cb_labs)
else:
clb_hndl.ax.set_yticklabels(cb_labs)
del cb_labs, vc
def __nice_colorbar__(fig_hndl, plt_hndl, vcc,
cax_other=None, i_sbsmp=1, lkc=False, cb_or='vertical', cunit=None, cfont=None, fontsize=0):
if cb_or not in {'horizontal','vertical'}:
print "ERROR: only 'vertical' or 'horizontal' can be specified for the colorbar orientation!"
cb_or = 'vertical'
if cb_or == 'horizontal':
if cax_other is not None:
clb = plt_hndl.colorbar(fig_hndl, cax=cax_other, ticks=vcc, drawedges=lkc, orientation='horizontal',
pad=0.07, shrink=1., aspect=40, extend='both')
else:
clb = plt_hndl.colorbar(fig_hndl, ticks=vcc, drawedges=lkc, orientation='horizontal',
pad=0.07, shrink=1., aspect=40, extend='both')
else:
if cax_other is not None:
clb = plt_hndl.colorbar(fig_hndl, cax=cax_other, ticks=vcc, drawedges=lkc,
pad=0.03, extend='both')
else:
clb = plt_hndl.colorbar(fig_hndl, ticks=vcc, drawedges=lkc,
pad=0.03, extend='both')
if i_sbsmp > 1: __subsample_colorbar__(i_sbsmp, vcc, clb, cb_or=cb_or)
if not cunit is None:
if cfont is None:
clb.set_label(cunit)
else:
clb.set_label(cunit, **cfont)
if fontsize > 0:
if cb_or == 'horizontal':
for t in clb.ax.get_xticklabels(): t.set_fontsize(fontsize) # Font size for colorbar ticks!
else:
for t in clb.ax.get_yticklabels(): t.set_fontsize(fontsize) # Font size for colorbar ticks!
def _add_xy_offset__(plt_hndl, ixo, iyo):
if ( ixo != 0. ):
locs, labels = plt_hndl.xticks() ; jl=0
vlabs = []
for ll in locs:
clab = str(int(locs[jl])+int(ixo))
vlabs.append(clab); jl=jl+1
plt_hndl.xticks(locs,vlabs)
if ( y_offset != 0. ):
locs, labels = plt_hndl.yticks() ; jl=0; vlabs = []
for ll in locs:
clab = str(int(locs[jl])+int(iyo))
vlabs.append(clab); jl=jl+1
plt_hndl.yticks(locs,vlabs)
del vlabs
def __subsample_axis__(plt_hndl, cax, i_sbsmp, icpt=1):
ax_lab = []
if cax == 'x':
locs, labels = plt_hndl.xticks()
elif cax == 'y':
locs, labels = plt_hndl.yticks()
else:
print ' Error: __subsample_axis__.barakuda_plot => only "x" or "y" please'; sys.exit(0)
cpt = icpt # with ipct = 1: tick priting will start at y1+dt on x axis rather than y1
for rr in locs:
if cpt % i_sbsmp == 0:
if rr%1.0 == 0.:
cr = str(int(rr)) # it's something like 22.0, we want 22 !!!
else:
cr = str(rr)
ax_lab.append(cr)
else:
ax_lab.append(' ')
cpt = cpt + 1
if cax == 'x': plt_hndl.xticks(locs,ax_lab)
if cax == 'y': plt_hndl.yticks(locs,ax_lab)
del ax_lab
def __nice_x_axis__(ax_hndl, plt_hndl, x_0, x_H, dx, i_sbsmp=1, cunit=None, cfont=None, dx_minor=0):
x_l = x_0
if x_0%dx != 0.: x_l = float(int(x_0/dx))*dx
if x_H%dx != 0.: x_H = float(int(x_H/dx)+1)*dx
plt_hndl.xticks( nmp.arange(x_l, x_H+dx, dx) )
locs, labels = plt_hndl.xticks()
ax_hndl.get_xaxis().get_major_formatter().set_useOffset(False) ; # Prevents from using scientific notations in axess ticks numbering...
if i_sbsmp > 1: __subsample_axis__( plt, 'x', i_sbsmp)
if not cunit is None:
if cfont is None:
plt_hndl.xlabel(cunit)
else:
plt_hndl.xlabel(cunit, **cfont)
# Add minor x-ticks and corresponding grid:
if dx_minor > 0:
locs, labels = plt_hndl.xticks()
ax_hndl.set_xticks( nmp.arange(locs[0], locs[len(locs)-1] , dx_minor) , minor=True)
ax_hndl.grid(which='both')
ax_hndl.grid(which='minor', color='k', linestyle='-', linewidth=0.1)
ax_hndl.grid(which='major', color='k', linestyle='-', linewidth=0.2)
ax_hndl.set_xlim(x_l,x_H+dx/1000.)
def __nice_y_axis__(ax_hndl, plt_hndl, y_0, y_H, dy, i_sbsmp=1, cunit=None, cfont=None, dy_minor=0):
y_l = y_0
if y_0%dy != 0.: y_l = float(int(y_0/dy))*dy
plt_hndl.yticks( nmp.arange(y_l, y_H+dy, dy) )
locs, labels = plt_hndl.yticks()
ax_hndl.get_yaxis().get_major_formatter().set_useOffset(False) ; # Prevents from using scientific notations in axess ticks numbering...
if i_sbsmp > 1: __subsample_axis__( plt, 'y', i_sbsmp)
if not cunit is None:
if cfont is None:
plt_hndl.ylabel(cunit)
else:
plt_hndl.ylabel(cunit, **cfont)
# Add minor y-ticks and corresponding grid:
if dy_minor > 0:
locs, labels = plt_hndl.yticks()
ax_hndl.set_yticks( nmp.arange(locs[0], locs[len(locs)-1] , dy_minor) , minor=True)
ax_hndl.grid(which='both')
ax_hndl.grid(which='minor', color='k', linestyle='-', linewidth=0.1)
ax_hndl.grid(which='major', color='k', linestyle='-', linewidth=0.2)
ax_hndl.set_ylim(y_0,y_H+dy/1000.)
def __nice_depth_axis__(ax_hndl, plt_hndl, z0, zK, l_log=False, l_z_inc=True, cunit=None, cfont=None):
ax_hndl.get_yaxis().get_major_formatter().set_useOffset(False)
if l_log:
y_log_ofs = 10.
vyview_list = [ 3. , 10. , 25., 50. , 100. , 250. , 500. , 1000. , 2500., 5000. ]
nd = len(vyview_list)
vyview = nmp.zeros(nd)
for jn in range(nd): vyview[jn] = vyview_list[jn]
vyview_log = nmp.log10(vyview + y_log_ofs)
ylab = []
for rr in vyview_list: ylab.append(str(int(rr)))
z0 = nmp.log10(z0+y_log_ofs)
zK = nmp.log10(zK+y_log_ofs)
ax_hndl.set_yticks(vyview_log)
ax_hndl.get_yaxis().set_major_formatter(mpl.ticker.ScalarFormatter())
ax_hndl.set_yticklabels(ylab)
if l_z_inc:
ax_hndl.set_ylim(z0,zK)
else:
ax_hndl.set_ylim(zK+(zK-z0)/50. , z0)
ax_hndl.grid(color='k', linestyle='-', linewidth=0.5)
if not cunit is None:
if cfont is None:
plt_hndl.ylabel(cunit)
else:
plt_hndl.ylabel(cunit, **cfont)
def __nice_latitude_axis__(ax_hndl, plt_hndl, lmin, lmax, dl, axt='x'):
if axt == 'x':
ax_hndl.get_xaxis().get_major_formatter().set_useOffset(False)
elif axt == 'y':
ax_hndl.get_yaxis().get_major_formatter().set_useOffset(False)
else:
print 'ERROR barakuda_plot.__nice_latitude_axis__: only accept "x" or "y" for axt!'
sys.exit(0)
[vvl, ctck] = __name_latitude_ticks__(lat_min=lmin, lat_max=lmax, dlat=dl)
if axt == 'x':
plt_hndl.xticks(vvl,ctck)
ax_hndl.set_xlim(lmin,lmax)
else:
plt_hndl.yticks(vvl,ctck)
ax_hndl.set_ylim(lmin,lmax)
def __nice_longitude_axis__(ax_hndl, plt_hndl, lmin, lmax, dl, axt='x'):
if axt == 'x':
ax_hndl.get_xaxis().get_major_formatter().set_useOffset(False)
elif axt == 'y':
ax_hndl.get_yaxis().get_major_formatter().set_useOffset(False)
else:
print 'ERROR barakuda_plot.__nice_longitude_axis__: only accept "x" or "y" for axt!'
sys.exit(0)
[vvl, ctck] = __name_longitude_ticks__(lon_min=lmin, lon_max=lmax, dlon=dl)
if axt == 'x':
plt_hndl.xticks(vvl,ctck)
ax_hndl.set_xlim(lmin,lmax)
else:
plt_hndl.yticks(vvl,ctck)
ax_hndl.set_ylim(lmin,lmax)
def __prepare_z_log_axis__(l_log, vz):
import math
nk = len(vz)
zvz = nmp.zeros(nk)
if l_log:
for jk in range(nk):
zvz[jk] = math.log10(vz[jk])
else:
zvz= vz
return zvz
def __nb_col_row_legend__(nn):
if nn <= 3:
nbc = 1 ; nbr = nn
elif nn == 4:
nbc = 2 ; nbr = 2
elif nn > 4 and nn <= 6:
nbc = 2 ; nbrfull = 2; nfull = nbc*nbrfull; nbr = nbrfull + nn/nfull
elif nn > 6 and nn <= 9:
nbc = 3 ; nbrfull = 2; nfull = nbc*nbrfull; nbr = nbrfull + nn/nfull
elif nn > 9 and nn <= 16:
nbc = 4 ; nbrfull = 3; nfull = nbc*nbrfull; nbr = nbrfull + nn/nfull
else:
nbc = 4 ; nbr = nn/nbc + 1
return nbc, nbr
def __time_axis_minor_ticks__(dt):
dt_mnr=0
if ((dt>=2) and (dt<10) and (dt%2 == 0)) or (dt==5) : dt_mnr=1
if (dt>=10) and (dt<50) and (dt%5 == 0) : dt_mnr=5
if (dt>=50) and (dt%50 == 0) : dt_mnr=10
return dt_mnr
def __suitable_axis_dx__(hmin, hmax, nb_val=20, lsym0=False):
if (hmin,hmax) == (0.,0.) or (hmin,hmax) == (0,0):
dh = 0.
else:
dh = abs(hmax - hmin)/float(nb_val)
lfound = False
iexp = 20
while not lfound :
if dh%(10.**(iexp-1)) != dh: lfound = True
iexp = iexp - 1
if iexp < 1:
dh = round(dh, -iexp)
else:
dh = round(dh,0)
dh = round(dh/(10.**iexp),0)*10.**iexp
dhi = dh*10.**(-iexp)
if dhi == 3.: dh = 2.5*10.**(iexp)
if dhi in [4.,6.,7.]: dh = 5.*10.**(iexp)
if dhi in [8.,9.]: dh = 10.*10.**(iexp) ; iexp=iexp+1
hmin = float(int(hmin*10.**(-iexp)))*10.**iexp
hmax = float(int((hmax+dh)*10.**(-iexp)))*10.**iexp
if lsym0:
# Force symetry about 0 !
hmax = max(abs(hmax),abs(hmin))
if hmax%dh != 0.: hmax = float(int(hmax/dh))*dh
hmin = -hmax
return hmin, hmax, dh
def __fancy_legend__(ax_hndl, plt_hndl, loc_leg='0', ylg=0, leg_out=False, ncol=1):
if loc_leg != '0':
if leg_out:
# Shrink Y axis's height by % on the bottom
box = ax_hndl.get_position()
ax_hndl.set_position([box.x0, box.y0 + box.height*ylg, box.width, box.height*(1.-ylg)])
plt_hndl.legend(bbox_to_anchor=(0.95, -0.075), ncol=ncol, shadow=True, fancybox=True) ; #lolo
else:
plt_hndl.legend(loc=loc_leg, ncol=ncol, shadow=True, fancybox=True)
|
plesager/barakuda
|
python/modules/barakuda_plot.py
|
Python
|
gpl-2.0
| 69,785
|
[
"ORCA"
] |
15a9d42db950ea2e47fce80c5b8969cc525b8d390ba86773e6dc6e270570a3dd
|
# Copyright (c) 2014, The MITRE Corporation. All rights reserved.
# See LICENSE.txt for complete terms.
import os
from collections import defaultdict
from lxml import etree
class XmlSchemaValidator(object):
NS_XML_SCHEMA_INSTANCE = "http://www.w3.org/2001/XMLSchema-instance"
NS_XML_SCHEMA = "http://www.w3.org/2001/XMLSchema"
def __init__(self, schema_dir=None):
self.__imports = self._build_imports(schema_dir)
def _get_target_ns(self, fp):
'''Returns the target namespace for a schema file
Keyword Arguments
fp - the path to the schema file
'''
parser = etree.ETCompatXMLParser(huge_tree=True)
tree = etree.parse(fp, parser=parser)
root = tree.getroot()
return root.attrib['targetNamespace'] # throw an error if it
# doesn't exist...we can't
# validate
def _get_include_base_schema(self, list_schemas):
'''Returns the root schema which defines a namespace.
Certain schemas, such as OASIS CIQ use xs:include statements in their
schemas, where two schemas define a namespace (e.g., XAL.xsd and
XAL-types.xsd). This makes validation difficult, when we must refer to
one schema for a given namespace.
To fix this, we attempt to find the root schema which includes the
others. We do this by seeing if a schema has an xs:include element,
and if it does we assume that it is the parent. This is totally wrong
and needs to be fixed. Ideally this would build a tree of includes and
return the root node.
Keyword Arguments:
list_schemas - a list of schema file paths that all belong to the same
namespace
'''
parent_schema = None
tag_include = "{%s}include" % (self.NS_XML_SCHEMA)
for fn in list_schemas:
tree = etree.parse(fn)
root = tree.getroot()
includes = root.findall(tag_include)
if len(includes) > 0: # this is a hack that assumes if the schema
# includes others, it is the base schema for
# the namespace
return fn
return parent_schema
def _build_imports(self, schema_dir):
'''Given a directory of schemas, this builds a dictionary of schemas
that need to be imported under a wrapper schema in order to enable
validation. This returns a dictionary of the form
{namespace : path to schema}.
Keyword Arguments
schema_dir - a directory of schema files
'''
if not schema_dir:
return None
imports = defaultdict(list)
for top, dirs, files in os.walk(schema_dir):
for f in files:
if f.endswith('.xsd'):
fp = os.path.join(top, f)
target_ns = self._get_target_ns(fp)
imports[target_ns].append(fp)
for k, v in imports.iteritems():
if len(v) > 1:
base_schema = self._get_include_base_schema(v)
imports[k] = base_schema
else:
imports[k] = v[0]
return imports
def _build_wrapper_schema(self, import_dict):
'''Creates a wrapper schema that imports all namespaces defined by the
input dictionary. This enables validation of instance documents that
refer to multiple namespaces and schemas
Keyword Arguments
import_dict - a dictionary of the form {namespace : path to schema} that
will be used to build the list of xs:import statements
'''
schema_txt = '''<xs:schema xmlns:xs="http://www.w3.org/2001/XMLSchema"
targetNamespace="http://stix.mitre.org/tools/validator"
elementFormDefault="qualified"
attributeFormDefault="qualified"/>'''
root = etree.fromstring(schema_txt)
tag_import = "{%s}import" % (self.NS_XML_SCHEMA)
for ns, list_schemaloc in import_dict.iteritems():
schemaloc = list_schemaloc
schemaloc = schemaloc.replace("\\", "/")
attrib = {'namespace': ns, 'schemaLocation': schemaloc}
el_import = etree.Element(tag_import, attrib=attrib)
root.append(el_import)
return root
def _extract_schema_locations(self, root):
schemaloc_dict = {}
tag_schemaloc = "{%s}schemaLocation" % (self.NS_XML_SCHEMA_INSTANCE)
schemaloc = root.attrib[tag_schemaloc].split()
schemaloc_pairs = zip(schemaloc[::2], schemaloc[1::2])
for ns, loc in schemaloc_pairs:
schemaloc_dict[ns] = loc
return schemaloc_dict
def _build_result_dict(self, result, errors=None):
d = {}
d['result'] = result
if errors:
if not hasattr(errors, "__iter__"):
errors = [errors]
d['errors'] = errors
return d
def validate(self, doc, schemaloc=False):
'''Validates an instance documents.
Returns a tuple of where the first item is the boolean validation
result and the second is the validation error if there was one.
Keyword Arguments
instance_doc - a filename, file-like object, etree._Element, or
etree._ElementTree to be validated
'''
if not(schemaloc or self.__imports):
return self._build_result_dict(False,
"No schemas to validate "
"against! Try instantiating "
"XmlValidator with "
"use_schemaloc=True or setting the "
"schema_dir param in __init__")
if isinstance(doc, etree._Element):
root = doc
elif isinstance(doc, etree._ElementTree):
root = doc.getroot()
else:
try:
parser = etree.ETCompatXMLParser(huge_tree=True)
tree = etree.parse(doc, parser=parser)
root = tree.getroot()
except etree.XMLSyntaxError as e:
return self._build_result_dict(False, str(e))
if schemaloc:
try:
required_imports = self._extract_schema_locations(root)
except KeyError as e:
return self._build_result_dict(False,
"No schemaLocation attribute "
"set on instance document. "
"Unable to validate")
else:
required_imports = {}
# visit all nodes and gather schemas
for elem in root.iter():
for prefix, ns in elem.nsmap.iteritems():
schema_location = self.__imports.get(ns)
if schema_location:
required_imports[ns] = schema_location
if not required_imports:
return self._build_result_dict(False, "Unable to determine schemas "
"to validate against")
wrapper_schema_doc = self._build_wrapper_schema(import_dict=required_imports)
xmlschema = etree.XMLSchema(wrapper_schema_doc)
isvalid = xmlschema.validate(root)
if isvalid:
return self._build_result_dict(True)
else:
return self._build_result_dict(False,
[str(x) for x in xmlschema.error_log])
|
bschmoker/stix-validator
|
validators/xml_schema.py
|
Python
|
bsd-3-clause
| 7,809
|
[
"VisIt"
] |
4553c1d25bbcd9f002a6882029cf8abb37fc1f05fc885706882723d3f1d66ba2
|
# Copyright 2010-2017, The University of Melbourne
# Copyright 2010-2017, Brian May
#
# This file is part of Karaage.
#
# Karaage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Karaage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Karaage If not, see <http://www.gnu.org/licenses/>.
import datetime
import os.path
from decimal import Decimal, DivisionByZero, InvalidOperation
import django
from django import template
from django.conf import settings
from django.contrib.contenttypes.models import ContentType
from django.http import QueryDict
from django.utils.html import escape
from django.utils.safestring import mark_safe
from karaage.common.models import COMMENT, LogEntry
from karaage.plugins import BasePlugin
register = template.Library()
class UrlWithParamNode(template.Node):
def __init__(self, copy, nopage, changes):
self.copy = copy
self.nopage = nopage
self.changes = []
for key, newvalue in changes:
newvalue = template.Variable(newvalue)
self.changes.append((key, newvalue,))
def render(self, context):
if 'request' not in context:
return ""
request = context['request']
result = {}
if self.copy:
result = request.GET.copy()
else:
result = QueryDict("", mutable=True)
if self.nopage:
result.pop("page", None)
for key, newvalue in self.changes:
newvalue = newvalue.resolve(context)
result[key] = newvalue
return "?" + result.urlencode()
@register.tag
def url_with_param(parser, token):
bits = token.split_contents()
qschanges = []
bits.pop(0)
copy = False
if bits[0] == "copy":
copy = True
bits.pop(0)
nopage = False
if bits[0] == "nopage":
nopage = True
bits.pop(0)
for i in bits:
try:
key, newvalue = i.split('=', 1)
qschanges.append((key, newvalue,))
except ValueError:
raise template.TemplateSyntaxError(
"Argument syntax wrong: should be key=value")
return UrlWithParamNode(copy, nopage, qschanges)
@register.inclusion_tag('karaage/common/comments.html', takes_context=True)
def comments(context, obj):
""" Render comments for obj. """
content_type = ContentType.objects.get_for_model(obj.__class__)
comment_list = LogEntry.objects.filter(
content_type=content_type,
object_id=obj.pk,
action_flag=COMMENT
)
return {
'obj': obj,
'comment_list': comment_list,
'is_admin': context['is_admin'],
}
@register.simple_tag
def comment_count(obj):
content_type = ContentType.objects.get_for_model(obj.__class__)
comment_list = LogEntry.objects.filter(
content_type=content_type,
object_id=obj.pk,
action_flag=COMMENT
)
return int(comment_list.count())
@register.simple_tag
def active(request, pattern):
import re
spec = '^%s/%s' % (request.META['SCRIPT_NAME'], pattern)
if re.search(spec, request.path):
return 'active'
return ''
@register.simple_tag
def date_filter(start, end):
result = QueryDict("", mutable=True)
today = datetime.date.today()
last_7 = (today - datetime.timedelta(days=7)).strftime('%Y-%m-%d')
last_90 = (today - datetime.timedelta(days=90)).strftime('%Y-%m-%d')
last_365 = (today - datetime.timedelta(days=365)).strftime('%Y-%m-%d')
view_7, view_90, view_365 = False, False, False
if end == today:
if start == today - datetime.timedelta(days=7):
view_7 = True
if start == today - datetime.timedelta(days=90):
view_90 = True
if start == today - datetime.timedelta(days=365):
view_365 = True
s = []
if view_7:
s.append('Last 7 Days')
else:
result["start"] = last_7
url = ".?" + result.urlencode()
s.append("""<a href="%s">Last 7 Days</a>""" % escape(url))
if view_90:
s.append("Last 90 Days")
else:
result["start"] = last_90
url = ".?" + result.urlencode()
s.append("""<a href="%s">Last 90 Days</a>""" % escape(url))
if view_365:
s.append("Last 365 Days")
else:
result["start"] = last_365
url = ".?" + result.urlencode()
s.append("""<a href="%s">Last 365 Days</a>""" % escape(url))
return mark_safe(" | ".join(s))
@register.simple_tag
def yes_no(boolean, true_msg='Yes', false_msg='No'):
if reversed == 'reversed':
if boolean:
boolean = False
else:
boolean = True
if boolean:
return mark_safe("<span class='yes'>%s</span>" % escape(true_msg))
else:
return mark_safe("<span class='no'>%s</span>" % escape(false_msg))
class SearchFormNode(template.Node):
def __init__(self, post_url):
self.post_url = post_url
def render(self, context):
template_obj = template.loader.get_template(
'karaage/common/search_form.html')
context.push()
context['post_url'] = self.post_url
output = template_obj.render(context)
context.pop()
return output
@register.simple_tag
def divide(a, b):
two_places = Decimal(10) ** -2
try:
return (Decimal(a) / Decimal(b) * 100).quantize(two_places)
except (DivisionByZero, InvalidOperation):
return ''
def get_app_labels():
if django.VERSION < (1, 7):
for app in settings.INSTALLED_APPS:
_, _, label = app.rpartition(".")
if label is not None:
yield label
else:
from django.apps import apps
for config in apps.get_app_configs():
if isinstance(config, BasePlugin):
yield config.label
class ForEachAppIncludeNode(template.Node):
def __init__(self, template_name):
self.template_name = template.Variable(template_name)
def render(self, context):
template_name = self.template_name.resolve(context)
result = []
for label in get_app_labels():
template_path = os.path.join(label, template_name)
try:
template_obj = template.loader.get_template(template_path)
except template.TemplateDoesNotExist:
pass
else:
context.push()
output = template_obj.render(context.flatten())
result.append(output)
context.pop()
return "".join(result)
@register.tag
def for_each_app_include(parser, token):
try:
tag_name, template_name = token.split_contents()
except ValueError:
raise template.TemplateSyntaxError(
"%r tag requires one arguments" % token.contents.split()[0])
return ForEachAppIncludeNode(template_name)
@register.simple_tag()
def is_for_each_app_include_empty(template_name):
for label in get_app_labels():
template_path = os.path.join(label, template_name)
try:
template.loader.get_template(template_path)
return False
except template.TemplateDoesNotExist:
pass
return True
|
brianmay/karaage
|
karaage/templatetags/karaage_tags.py
|
Python
|
gpl-3.0
| 7,686
|
[
"Brian"
] |
dd88956d8e36ea1e4c5dde1e5a5af680ab98ee6a7b0c4b2b56646f301346d79f
|
# -*- coding: utf-8 -*-
"""
Created on Thu Aug 20 18:23:08 2015
@author: Eric Dodds
"""
import numpy as np
# try/except block gets around an issue on the cluster
try:
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
except ImportError:
print("Plotting unavailable.")
class StimSet(object):
def __init__(self, data, stimshape, batch_size=None):
"""Notice that stimshape and the length of a datum may be different, since the
data may be represented in a reduced form."""
self.data = data
self.stimshape = stimshape
self.stimsize = np.prod(stimshape)
self.nstims = data.shape[0]
self.datasize = data.shape[1]
self.batch_size = batch_size
def rand_stim(self, batch_size=None):
"""Select random inputs. Return an array of batch_size columns,
each of which is an input represented as a (column) vector. """
batch_size = batch_size or self.batch_size
veclength = np.prod(self.datasize)
X = np.zeros((veclength, batch_size))
for i in range(batch_size):
which = np.random.randint(self.nstims)
vec = self.data[which, ...]
if len(vec.shape) > 1:
vec = vec.reshape(self.stimsize)
X[:, i] = vec
return X
@staticmethod
def _stimarray(stims, stimshape, layout='sqrt'):
"""Returns an array of the stimuli reshaped to 2d and tiled."""
length, height = stimshape
assert length*height == stims.shape[1]
buf = 1 # buffer pixels between stimuli
nstim = stims.shape[0]
# n and m are respectively the numbers of rows and columns of stimuli in the array
n, m = StimSet._get_layout(nstim, length, height, layout)
array = 0.5*np.ones((buf+n*(length+buf), buf+m*(height+buf)))
k = 0
for i in range(m):
for j in range(n):
if k < nstim:
normfactor = np.max(np.abs(stims[k, :]))
hstart = buf+i*(height+buf)
lstart = buf+j*(length+buf)
thestim = stims[k, :].reshape(length, height)/normfactor
array[lstart:lstart+length, hstart:hstart+height] = thestim
k = k+1
return array.T
@staticmethod
def _get_layout(nstim, length=None, height=None, layout='square'):
"""Get a number of rows n and columns m corresponding to a given
layout type. Returns the argument if it's already a pair."""
if layout == 'square':
if np.floor(np.sqrt(nstim))**2 != nstim:
n = int(np.ceil(np.sqrt(nstim/2.)))
m = int(np.ceil(nstim/n))
else:
# M is a perfect square
m = int(np.sqrt(nstim))
n = m
elif layout == 'sqrt':
# if length != height, partly account for this so stimuli aren't so distorted.
# could remove the extra square root to fully accommodate
n = int(np.sqrt(nstim*np.sqrt(height/length)))
m = int(np.ceil(nstim/n))
else:
n, m = layout
return n, m
def stimarray(self, stims, stimshape=None, layout='sqrt'):
stimshape = stimshape or self.stimshape
return StimSet._stimarray(stims, stimshape, layout)
def modspec(self, elem):
"""Compute the modulation power spectrum."""
image = elem.reshape(self.stimshape)
fourier = np.fft.rfft2(image)
mid = int(fourier.shape[0]/2)
power = np.abs(fourier)**2
avgmag = np.array([(power[ii] + power[-ii])/2 for ii in range(mid)])
return avgmag
def stim_for_display(self, stim):
return stim.reshape(self.stimshape)
def tiledplot(self, stims, cmap='RdBu_r', layout='square',
aspect='auto', savestr=None):
nstim = stims.shape[0]
length, height = self.stimshape
n, m = StimSet._get_layout(nstim, length, height, layout)
gs = gridspec.GridSpec(m, n, wspace=0.0, hspace=0.0)
for ii in range(m):
for jj in range(n):
ax = plt.subplot(gs[ii, jj])
index = ii*n + jj
if index < nstim:
image = self.stim_for_display(stims[index])
image /= np.max(np.abs(image))
ax.imshow(image.T,
cmap=cmap, origin='lower', aspect=aspect,
interpolation='nearest',
clim=(-1.0, 1.0))
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
if savestr is not None:
plt.savefig(savestr, bbox_inches='tight')
class ImageSet(StimSet):
"""Container for image data. The 'stimuli' are patches drawn randomly from
the set of images."""
def __init__(self, data, stimshape=(16, 16), batch_size=None,
buffer=20, patchwisenorm=False):
self.buffer = buffer
self.patchwisenorm = patchwisenorm
StimSet.__init__(self, data, stimshape, batch_size)
self.datasize = np.prod(stimshape) # size of a patch
def rand_stim(self, stimshape=None, batch_size=None):
"""
Select random patches from the image data. Returns data array of
batch_size columns, each of which is an unrolled image patch of size
prod(stimshape).
"""
batch_size = batch_size or self.batch_size or 100
length, height = stimshape or self.stimshape
# extract subimages at random from images array to make data array X
X = np.zeros((length*height, batch_size))
for i in range(batch_size):
which = np.random.randint(self.data.shape[-1])
nrows, ncols = self.data[:, :, which].shape
row = self.buffer + int(np.ceil((nrows-length-2*self.buffer)*np.random.rand()))
col = self.buffer + int(np.ceil((nrows-height-2*self.buffer)*np.random.rand()))
animage = self.data[row:row+length,
col:col+height,
which]
animage = animage.reshape(self.stimsize)
if self.patchwisenorm:
# normalize image
animage -= animage.mean()
animage /= animage.std()
X[:, i] = animage
return X
class PCvecSet(StimSet):
"""Principal component vector representations of arbitrary data."""
def __init__(self, data, stimshape, pca, batch_size=None):
self.pca = pca
self.datasize = data.shape[1]
StimSet.__init__(self, data, stimshape, batch_size)
def stimarray(self, stims, layout='sqrt'):
reconst = self.pca.inverse_transform(stims)
return StimSet.stimarray(self, reconst, self.stimshape, layout)
def modspec(self, elem):
return StimSet.modspec(self, self.pca.inverse_transform(elem))
def stim_for_display(self, stim):
return StimSet.stim_for_display(self, self.pca.inverse_transform(stim))
class SpectroPCSet(PCvecSet):
"""A PCvecSet with some extra functionality specifically for spectrograms."""
def __init__(self, data, stimshape, pca, batch_size=None,
freqs=None, tbin_width=None):
"""
Parameters:
freqs : array_like of the frequencies sampled, in Hz
tbin_width : time in ms separating centers of adjacent time bins
"""
# spectrogram parameters default to those in Carlson, Ming, & DeWeese 2012
self.tbin_width = tbin_width or 8
self.freqs = freqs or np.logspace(2, np.log10(16000/4), 256)
PCvecSet.__init__(self, data, stimshape, pca, batch_size)
def show_stim(self, stim, cmap='RdBu_r', savestr=None, cbar=False):
reshaped = self.stim_for_display(stim)
tlength, nfreqs = self.stimshape
vmax = np.max(reshaped)
plt.imshow(reshaped.T, interpolation='nearest',
cmap=cmap, aspect='auto', origin='lower',
vmin=-vmax, vmax=vmax)
plt.ylabel('Frequency')
plt.xlabel('Time (ms, bin = '+str(self.tbin_width)+' ms)')
middlef = str(int(self.freqs[int(nfreqs/2)]))
middlet = str(int(self.tbin_width*(tlength+1)/2))
endtime = (tlength + 2)*self.tbin_width
plt.xticks([0, int(tlength/2)+1, tlength-1], ['0', middlet, endtime])
plt.yticks([0, int(nfreqs/2), nfreqs-1],
[str(self.freqs[0])+' Hz', middlef+' Hz', str(int(self.freqs[-1]/1000)) + ' kHz'])
if cbar:
plt.colorbar()
if savestr is not None:
plt.savefig(savestr, bbox_inches='tight')
plt.show()
def show_set(self, stims, cmap='RdBu_r', layout=(4, 5), savestr=None):
"""
Parameters:
stims : (number of stim, flattened stim length) stimuli to plot
layout : (number of rows, number of columns) per figure
"""
tlength, nfreqs = self.stimshape
per_figure = np.prod(layout)
nstim = stims.shape[0]
plt.figure()
for ii in range(nstim):
if ii % per_figure == 0 and ii > 0:
if savestr is not None:
plt.tight_layout()
plt.subplots_adjust(wspace=.05, hspace=.05)
plt.savefig(savestr+str(int(ii/per_figure)), bbox_inches='tight')
plt.figure()
plt.subplot(layout[0], layout[1], (ii % per_figure)+1)
plt.imshow(self.stim_for_display(stims[ii]).T, interpolation='nearest',
cmap=cmap, aspect='auto', origin='lower')
if ii % per_figure == per_figure - layout[1]:
# label axes for bottom left example
plt.ylabel('Frequency')
plt.xlabel('Time (ms, bin = '+str(self.tbin_width/2)+' ms)')
middlef = str(int(self.freqs[len(self.freqs)/2]))
middlet = str(int(self.tbin_width/2*(tlength+1)/2))
endtime = str(int((tlength + 2)*self.tbin_width/2))
plt.xticks([0, int(tlength/2)+1, tlength-1], ['0', middlet, endtime])
plt.yticks([0, int(nfreqs/2), nfreqs-1],
[str(int(self.freqs[0]))+' Hz', middlef+' Hz', str(int(self.freqs[-1]/1000))+ ' kHz'])
else:
plt.gca().get_yaxis().set_visible(False)
plt.gca().get_xaxis().set_visible(False)
plt.tight_layout()
plt.subplots_adjust(wspace=.05, hspace=.05)
if savestr is not None:
plt.savefig(savestr, bbox_inches='tight')
plt.show()
class WaveformSet(StimSet):
"""1D signals, especially audio, of uniform length."""
def tiledplot(self, stims):
"""Tiled plots of the given stumili. Zeroth index is over stimuli.
Kind of slow, expect about 10s for 100 plots."""
nstim = stims.shape[0]
plotrows = int(np.sqrt(nstim))
plotcols = int(np.ceil(nstim/plotrows))
f, axes = plt.subplots(plotrows, plotcols, sharex=True, sharey=True)
for ii in range(nstim):
axes.flatten()[ii].plot(stims[ii])
f.subplots_adjust(hspace=0, wspace=0)
plt.setp([a.get_xticklabels() for a in f.axes[:-1]], visible=False)
plt.setp([a.get_yticklabels() for a in f.axes[:-1]], visible=False)
class WaveformPCSet(PCvecSet, WaveformSet):
"""Specifically for PCA reps of waveforms, i.e., 1D time series."""
def tiledplot(self, stims):
WaveformSet.tiledplot(self, self.pca.inverse_transform(stims))
class ToySparseSet(StimSet):
"""Gaussian sources linearly mixed by laplacian coefficients,
optiohally with isotropic gaussian noise added."""
def __init__(self, dim=200, nsource=None, scale=1,
nonneg=False, nstims=300000, rng=None,
batch_size=100, white=False, noise=0,
mean_center=True, ksparse=None):
"""
Parameters:
----------
dim: (int) dimensionality of data
nsource: (int) number of sources
scale: (float) scale of laplacians,
only relevant relative to noise scale
nonneg: if True, draw from exponential instead of laplace
nstims: (int) number of data to generate
rng: (int) random seed, default 912017
batch_size: (int) default batch size for rand_stim
white: if True, PCA-whiten after generation
noise: (float) scale of gaussian noise
mean_center: if True, center data after generation
ksparse: (int or None) number of sources generating each datum
"""
self.stimshape = [dim]
self.stimsize = dim
self.nstims = nstims
self.batch_size = batch_size
self.datasize = dim
if rng is None:
rng = np.random.RandomState(912017)
self.rng = rng
# generate data
if nsource is None:
nsource = dim
self.sources = rng.normal(size=(nsource, dim))
self.sources /= np.linalg.norm(self.sources, axis=1, keepdims=True)
self.scale = scale
coefficients = rng.laplace(scale=self.scale, size=(self.nstims, nsource))
self.nonneg = nonneg
if self.nonneg:
coefficients = np.abs(coefficients)
if ksparse is not None:
ktrues = np.array([True]*ksparse + [False]*(nsource - ksparse))
mask = [np.random.permutation(ktrues) for _ in range(nstims)]
mask = np.vstack(mask)
coefficients[~mask] = 0
self.data = coefficients.dot(self.sources)
if noise > 0:
self.data += rng.normal(scale=noise, size=self.data.shape)
if mean_center:
self.data -= self.data.mean(0, keepdims=True)
self.white = white
if white:
self.whiten()
stddev = self.data.std(0, keepdims=True)
self.data /= stddev
self.sparsity_parameter = noise**2 / stddev.mean()
def test_fit(self, model):
"""Given a model for the sources, calculate a distance metric from
true sources. The metric is the median of the normalized dot products
between each true source and the closest model source."""
fit = model/np.linalg.norm(model, axis=1, keepdims=True)
if self.white:
sources = self.sources.dot(self.zca_matrix)
sources /= np.linalg.norm(sources, axis=1, keepdims=True)
else:
sources = self.sources
allthedots = sources.dot(fit.T)
# how close is the closest model source to each true source?
bestfits = np.max(np.abs(allthedots), axis=1)
return np.median(bestfits)
def whiten(self, blocks=20000, eps=0.0001):
"""Assumes self.data already created, mean 0."""
cov = np.zeros([self.datasize, self.datasize])
nblocks = int(np.ceil(self.nstims / blocks))
for ind in range(nblocks):
X = self.data[blocks*ind:blocks*(ind+1)]
cov += X.T.dot(X)
eigvals, eigvecs = np.linalg.eigh(cov)
if np.any(np.isnan(eigvals)):
print('Warning: some nan eigenvalues found, replacing with small numbers.')
eigvals[np.isnan(eigvals)] = 0.9 * eps**2
if np.any(eigvals < 0):
print('Warning: some negative eigenvalues of covariance matrix found. Replacing with small numbers.')
eigvals[eigvals < 0] = 0.9 * eps**2
idx = np.argsort(eigvals)
svals = np.sqrt(eigvals[idx][::-1])
eigvecs = eigvecs[idx][::-1]
# do ZCA whitening
wm = np.diag(1./np.maximum(svals, eps))
self.zca_matrix = eigvecs.T.dot(wm).dot(eigvecs)
self.data = self.data.dot(self.zca_matrix)
|
emdodds/DictLearner
|
StimSet.py
|
Python
|
mit
| 16,335
|
[
"Gaussian"
] |
54c43b7a0ccae919d07d77d2400e593358823611792e91eca97686f85bf7bf05
|
'''
NAME
NetCDF with Python
PURPOSE
To demonstrate how to read and write data with NetCDF files using
a NetCDF file from the NCEP/NCAR Reanalysis.
Plotting using Matplotlib and Basemap is also shown.
PROGRAMMER(S)
Chris Slocum
REVISION HISTORY
20140320 -- Initial version created and posted online
20140722 -- Added basic error handling to ncdump
Thanks to K.-Michael Aye for highlighting the issue
REFERENCES
netcdf4-python -- http://code.google.com/p/netcdf4-python/
NCEP/NCAR Reanalysis -- Kalnay et al. 1996
http://dx.doi.org/10.1175/1520-0477(1996)077<0437:TNYRP>2.0.CO;2
'''
import datetime as dt # Python standard library datetime module
import numpy as np
from netCDF4 import Dataset # http://code.google.com/p/netcdf4-python/
import matplotlib.pyplot as plt
from mpl_toolkits.basemap import Basemap, addcyclic, shiftgrid
def ncdump(nc_fid, verb=True):
'''
ncdump outputs dimensions, variables and their attribute information.
The information is similar to that of NCAR's ncdump utility.
ncdump requires a valid instance of Dataset.
Parameters
----------
nc_fid : netCDF4.Dataset
A netCDF4 dateset object
verb : Boolean
whether or not nc_attrs, nc_dims, and nc_vars are printed
Returns
-------
nc_attrs : list
A Python list of the NetCDF file global attributes
nc_dims : list
A Python list of the NetCDF file dimensions
nc_vars : list
A Python list of the NetCDF file variables
'''
def print_ncattr(key):
"""
Prints the NetCDF file attributes for a given key
Parameters
----------
key : unicode
a valid netCDF4.Dataset.variables key
"""
try:
print "\t\ttype:", repr(nc_fid.variables[key].dtype)
for ncattr in nc_fid.variables[key].ncattrs():
print '\t\t%s:' % ncattr,\
repr(nc_fid.variables[key].getncattr(ncattr))
except KeyError:
print "\t\tWARNING: %s does not contain variable attributes" % key
# NetCDF global attributes
nc_attrs = nc_fid.ncattrs()
if verb:
print "NetCDF Global Attributes:"
for nc_attr in nc_attrs:
print '\t%s:' % nc_attr, repr(nc_fid.getncattr(nc_attr))
nc_dims = [dim for dim in nc_fid.dimensions] # list of nc dimensions
# Dimension shape information.
if verb:
print "NetCDF dimension information:"
for dim in nc_dims:
print "\tName:", dim
print "\t\tsize:", len(nc_fid.dimensions[dim])
print_ncattr(dim)
# Variable information.
nc_vars = [var for var in nc_fid.variables] # list of nc variables
if verb:
print "NetCDF variable information:"
for var in nc_vars:
if var not in nc_dims:
print '\tName:', var
print "\t\tdimensions:", nc_fid.variables[var].dimensions
print "\t\tsize:", nc_fid.variables[var].size
print_ncattr(var)
return nc_attrs, nc_dims, nc_vars
nc_f = './CLM45_Micro_UW_SRF.2005120100.for.test.nc' # Your filename
nc_fid = Dataset(nc_f, 'r') # Dataset is the class behavior to open the file
# and create an instance of the ncCDF4 class
nc_attrs, nc_dims, nc_vars = ncdump(nc_fid)
# Extract data from NetCDF file
lats = nc_fid.variables['xlat'][:] # extract/copy the data
lons = nc_fid.variables['xlon'][:]
time = nc_fid.variables['time'][:]
rsds = nc_fid.variables['rsds'][:] # shape is time, lat, lon as shown above
time_idx = 237 # some random day in 2012
# Python and the renalaysis are slightly off in time so this fixes that problem
offset = dt.timedelta(hours=48)
# List of all times in the file as datetime objects
dt_time = [dt.date(1, 1, 1) + dt.timedelta(hours=t/20) - offset\
for t in time]
cur_time = dt_time[time_idx]
# Plot of global temperature on our random day
fig = plt.figure()
fig.subplots_adjust(left=0., right=1., bottom=0., top=0.9)
# Setup the map. See http://matplotlib.org/basemap/users/mapsetup.html
# for other projections.
m = Basemap(projection='moll', llcrnrlat=-90, urcrnrlat=90,\
llcrnrlon=0, urcrnrlon=360, resolution='c', lon_0=0)
m.drawcoastlines()
m.drawmapboundary()
# Make the plot continuous
test=rsds[0,:,:]
print test.shape
print rsds.shape
print lons.shape
rsds_cyclic, lons_cyclic = addcyclic(rsds[time_idx,:,:], lons)
# Shift the grid so lons go from -180 to 180 instead of 0 to 360.
rsds_cyclic, lons_cyclic = shiftgrid(180., rsds_cyclic, lons_cyclic, start=False)
# Create 2D lat/lon arrays for Basemap
lon2d, lat2d = np.meshgrid(lons_cyclic, lats)
# Transforms lat/lon into plotting coordinates for projection
x, y = m(lon2d, lat2d)
# Plot of rsds temperature with 11 contour intervals
cs = m.contourf(x, y, rsds_cyclic, 11, cmap=plt.cm.Spectral_r)
cbar = plt.colorbar(cs, orientation='horizontal', shrink=0.5)
cbar.set_label("%s (%s)" % (nc_fid.variables['rsds'].var_desc,\
nc_fid.variables['rsds'].units))
plt.title("%s on %s" % (nc_fid.variables['rsds'].var_desc, cur_time))
# Writing NetCDF files
# For this example, we will create two NetCDF4 files. One with the global rsds
# temperature departure from its value at Darwin, Australia. The other with
# the temperature profile for the entire year at Darwin.
darwin = {'name': 'Darwin, Australia', 'lat': -12.45, 'lon': 130.83}
# Find the nearest latitude and longitude for Darwin
lat_idx = np.abs(lats - darwin['lat']).argmin()
lon_idx = np.abs(lons - darwin['lon']).argmin()
# Simple example: temperature profile for the entire year at Darwin.
# Open a new NetCDF file to write the data to. For format, you can choose from
# 'NETCDF3_CLASSIC', 'NETCDF3_64BIT', 'NETCDF4_CLASSIC', and 'NETCDF4'
w_nc_fid = Dataset('darwin_2012.nc', 'w', format='NETCDF4')
w_nc_fid.description = "NCEP/NCAR Reanalysis %s from its value at %s. %s" %\
(nc_fid.variables['rsds'].var_desc.lower(),\
darwin['name'], nc_fid.description)
# Using our previous dimension info, we can create the new time dimension
# Even though we know the size, we are going to set the size to unknown
w_nc_fid.createDimension('time', None)
w_nc_dim = w_nc_fid.createVariable('time', nc_fid.variables['time'].dtype,\
('time',))
# You can do this step yourself but someone else did the work for us.
for ncattr in nc_fid.variables['time'].ncattrs():
w_nc_dim.setncattr(ncattr, nc_fid.variables['time'].getncattr(ncattr))
# Assign the dimension data to the new NetCDF file.
w_nc_fid.variables['time'][:] = time
w_nc_var = w_nc_fid.createVariable('rsds', 'f8', ('time'))
w_nc_var.setncatts({'long_name': u"mean Daily Air temperature",\
'units': u"degK", 'level_desc': u'Surface',\
'var_desc': u"Air temperature",\
'statistic': u'Mean\nM'})
w_nc_fid.variables['rsds'][:] = rsds[time_idx, lat_idx, lon_idx]
w_nc_fid.close() # close the new file
# A plot of the temperature profile for Darwin in 2012
fig = plt.figure()
plt.plot(dt_time, rsds[:, lat_idx, lon_idx], c='r')
plt.plot(dt_time[time_idx], rsds[time_idx, lat_idx, lon_idx], c='b', marker='o')
plt.text(dt_time[time_idx], rsds[time_idx, lat_idx, lon_idx], cur_time,\
ha='right')
fig.autofmt_xdate()
plt.ylabel("%s (%s)" % (nc_fid.variables['rsds'].var_desc,\
nc_fid.variables['rsds'].units))
plt.xlabel("Time")
plt.title("%s from\n%s for %s" % (nc_fid.variables['rsds'].var_desc,\
darwin['name'], cur_time.year))
# Complex example: global temperature departure from its value at Darwin
departure = rsds[:, :, :] - rsds[:, lat_idx, lon_idx].reshape((time.shape[0],\
1, 1))
# Open a new NetCDF file to write the data to. For format, you can choose from
# 'NETCDF3_CLASSIC', 'NETCDF3_64BIT', 'NETCDF4_CLASSIC', and 'NETCDF4'
w_nc_fid = Dataset('rsds.departure.sig995.2012.nc', 'w', format='NETCDF4')
w_nc_fid.description = "The departure of the NCEP/NCAR Reanalysis " +\
"%s from its value at %s. %s" %\
(nc_fid.variables['rsds'].var_desc.lower(),\
darwin['name'], nc_fid.description)
# Using our previous dimension information, we can create the new dimensions
data = {}
for dim in nc_dims:
w_nc_fid.createDimension(dim, nc_fid.variables[dim].size)
data[dim] = w_nc_fid.createVariable(dim, nc_fid.variables[dim].dtype,\
(dim,))
# You can do this step yourself but someone else did the work for us.
for ncattr in nc_fid.variables[dim].ncattrs():
data[dim].setncattr(ncattr, nc_fid.variables[dim].getncattr(ncattr))
# Assign the dimension data to the new NetCDF file.
w_nc_fid.variables['time'][:] = time
w_nc_fid.variables['lat'][:] = lats
w_nc_fid.variables['lon'][:] = lons
# Ok, time to create our departure variable
w_nc_var = w_nc_fid.createVariable('rsds_dep', 'f8', ('time', 'lat', 'lon'))
w_nc_var.setncatts({'long_name': u"mean Daily Air temperature departure",\
'units': u"degK", 'level_desc': u'Surface',\
'var_desc': u"Air temperature departure",\
'statistic': u'Mean\nM'})
w_nc_fid.variables['rsds_dep'][:] = departure
w_nc_fid.close() # close the new file
# Rounded maximum absolute value of the departure used for contouring
max_dep = np.round(np.abs(departure[time_idx, :, :]).max()+5., decimals=-1)
# Generate a figure of the departure for a single day
fig = plt.figure()
fig.subplots_adjust(left=0., right=1., bottom=0., top=0.9)
m = Basemap(projection='moll', llcrnrlat=-90, urcrnrlat=90,\
llcrnrlon=0, urcrnrlon=360, resolution='c', lon_0=0)
m.drawcoastlines()
m.drawmapboundary()
dep_cyclic, lons_cyclic = addcyclic(departure[time_idx, :, :], lons)
dep_cyclic, lons_cyclic = shiftgrid(180., dep_cyclic, lons_cyclic, start=False)
lon2d, lat2d = np.meshgrid(lons_cyclic, lats)
x, y = m(lon2d, lat2d)
levels = np.linspace(-max_dep, max_dep, 11)
cs = m.contourf(x, y, dep_cyclic, levels=levels, cmap=plt.cm.bwr)
x, y = m(darwin['lon'], darwin['lat'])
plt.plot(x, y, c='c', marker='o')
plt.text(x, y, 'Darwin,\nAustralia', color='r', weight='semibold')
cbar = plt.colorbar(cs, orientation='horizontal', shrink=0.5)
cbar.set_label("%s departure (%s)" % (nc_fid.variables['rsds'].var_desc,\
nc_fid.variables['rsds'].units))
plt.title("Departure of Global %s from\n%s for %s" %\
(nc_fid.variables['rsds'].var_desc, darwin['name'], cur_time))
plt.show()
# Close original NetCDF file.
nc_fid.close()
|
CopyChat/Plotting
|
Python/PythonNetCDF.py
|
Python
|
gpl-3.0
| 10,821
|
[
"NetCDF"
] |
207fb05507aa130abd18ac8b821ba0b9a94deb48dd5b2ffe18bee9508ee304d7
|
#!/usr/bin/env python
# mirto_code_main.py
import numpy as np
from scipy.linalg import lu , solve
import cProfile, pstats
import mirto_code_configuration
import mirto_code_compute_F
import sys
import ctypes
# Empty class to store result
class mirto_state:
pass
class mirto_residuals:
pass
class mirto_history:
def __init__(self):
self.measurement_space_only = []
class mirto_norm:
def __init__(self):
self.measurement_space_only = np.NAN
class mirto:
def __init__(self,datapath,oss):
# Load configuration parameters and Input data for retrieval
self.cx = mirto_code_configuration.mirto_config(datapath,oss)
self.obsErr = mirto_code_configuration.mirto_obsErr(self.cx)
self.apriori = mirto_code_configuration.mirto_apriori(self.cx)
self.state = mirto_state()
self.norm = mirto_norm()
self.hist = mirto_history()
self.residuals = mirto_residuals()
self.converge = 0.03 * len(self.cx.state_var_indx)
def compute_chi_square(self):
self.norm.measurement_space_only = (
np.dot(np.dot(self.residuals.yobs_minus_yhat.T,self.obsErr.SeInv),
self.residuals.yobs_minus_yhat/len(self.residuals.yobs_minus_yhat)) )
def update_solution(self,fm):
use_mkl = True
psize = np.shape(fm.K.T)
if ( use_mkl ):
mkl = ctypes.cdll.LoadLibrary('./libmkl_rt.so')
cblas_dgemm = mkl.cblas_dgemm
CblasRowMajor = ctypes.c_int(101)
CblasNoTrans = ctypes.c_int(111)
c_double_p = ctypes.POINTER(ctypes.c_double)
KtSeInv = np.zeros(shape=(psize[0],psize[1]))
cblas_dgemm(CblasRowMajor,CblasNoTrans,CblasNoTrans,
ctypes.c_int(psize[0]),ctypes.c_int(psize[1]),
ctypes.c_int(psize[1]),ctypes.c_double(1.0),
fm.K.T.ctypes.data_as(c_double_p),
ctypes.c_int(psize[1]),
self.obsErr.SeInv.ctypes.data_as(c_double_p),
ctypes.c_int(psize[1]), ctypes.c_double(0.0),
KtSeInv.ctypes.data_as(c_double_p),
ctypes.c_int(psize[1]))
else:
KtSeInv = np.dot(fm.K.T,self.obsErr.SeInv)
KtSeInvK = np.dot(KtSeInv,fm.K)
A = (KtSeInvK+(1.0+self.cx.gamma)*self.state.SaInv_ret)
dx = (self.state.xhat-self.state.xa)
d = (np.dot(KtSeInv,self.residuals.yobs_minus_yhat) -
np.dot(self.state.SaInv_ret,dx))
# Use iterative LU decomposition to determine the solution
# First iteration
L,U = lu(A,permute_l=True)
y = solve(L,d)
x = solve(U,y)
# Second iteration
r = d - np.dot(A,x)
dz = solve(L,r)
ddx = solve(U,dz)
# Solution
totx = x+ddx
self.state.xhat_new = self.state.xhat+totx
self.state.d2 = np.dot(totx.T,d)
def invert(self,profiling=None):
if ( profiling is not None ):
if ( profiling == True ):
pr = cProfile.Profile()
pr.enable()
# Assing values for the state vector
xhat = self.apriori.x0
# Iteration of the Newton-Gauss method to find the zero of the first
# derivative of the Gaussian PDF
Iteration = 0
# Initialize state vector per previous iteration to apriori.xa
# (same as apriori.X0)
xhat_pre = self.apriori.xa
fm = mirto_code_compute_F.radiance(self.cx)
jj = self.cx.state_var_indx
self.state.SaInv_ret = self.apriori.SaInv[jj,:]
self.state.SaInv_ret = self.state.SaInv_ret[:,jj]
self.state.xa = self.apriori.xa[jj]
while (Iteration < self.cx.Iteration_limit):
#
# Compute F (forward model)
#
# print('... Compute_F')
fm.compute_forward(xhat)
fm.estimate_K(xhat)
fm.compute_residuals(self.residuals)
# Subselect from forward model output the channels used for the
# inversion
ii = self.cx.indx
fm.K = fm.K[ii,:]
fm.F = fm.F[ii]
fm.wnF = fm.wnF[ii]
fm.wnK = fm.wnK[ii]
# Subselect from forward model output (Jacobians) and from
# whole apriori the variables actually retrieved.
fm.K = fm.K[:,jj]
self.state.xhat = xhat[jj]
self.state.xhat_pre = xhat_pre[jj]
self.compute_chi_square()
self.update_solution(fm)
if ( Iteration > 0 ):
ref_norm = min(self.hist.measurement_space_only)
print ('Actual value of Norm : ',self.norm.measurement_space_only)
print ('Last low value of Norm : ',ref_norm)
if (self.norm.measurement_space_only <= ref_norm):
self.cx.gamma = self.cx.gamma/2.0
xxdel = (100.0 * (ref_norm - self.norm.measurement_space_only) /
self.norm.measurement_space_only)
print('UWPHYSRET residuals decreased by ',xxdel,'%')
else:
self.cx.gamma = self.cx.gamma*5.0
xxdel = (100.0 * (self.norm.measurement_space_only - ref_norm) /
self.norm.measurement_space_only)
print('UWPHYSRET residuals increased by ',xxdel,'%')
xhat[jj] = self.state.xhat
if (abs(self.state.d2) < self.converge):
print('**** CONVERGED!!! ****')
break
else:
if (Iteration < self.cx.Iteration_limit):
# assign new value to the solution and continue the iterative solution
# Note: Don't update xhat if this is the final iteration or it
# will overwrite the solution
xhat_pre[jj] = self.state.xhat
xhat[jj] = self.state.xhat_new
self.hist.measurement_space_only.append(self.norm.measurement_space_only)
Iteration = Iteration + 1
print ('Iteration = ', Iteration)
print ('New Gamma = ', self.cx.gamma)
print ('Distance = ', abs(self.state.d2))
print ('Wanted = ', self.converge)
if ( profiling is not None ):
if ( profiling == True ):
pr.disable()
s = ()
try:
# Default to Python 3
import io
s = io.StringIO()
ps = pstats.Stats(pr, stream=s)
ps.strip_dirs().sort_stats('cumulative').print_stats()
print(s.getvalue())
except:
# May be this is Python 2?
import StringIO
s = StringIO.StringIO()
ps = pstats.Stats(pr, stream=s)
ps.strip_dirs().sort_stats('cumulative').print_stats()
print(s.getvalue())
return(self.state)
if ( __name__ == '__main__' ):
sys.path.append('/home/ggiuliani/pythoncode/oss')
from oss4SHIS import oss4SHIS
from os import path
import time
#
# OSS init input
#
solar = 'solar_irradiances.nc'
precomputed = 'leo.cris.0.05.nc'
datapath = '/home/ggiuliani/pythoncode/data'
oss = oss4SHIS(path.join(datapath,solar),path.join(datapath,precomputed))
# This part of code must be repeated for each input profile in data
# directory. Must find a way to have names here. Probably the errors
# also can be preloaded.
start = time.clock()
profiling = True
check_output = False
inverter = mirto(datapath,oss)
solution = inverter.invert(profiling)
print('Elapsed Time in the Inversion: ',time.clock() - start,' s')
if ( check_output ):
print('Profiles')
print('Pressure Temperature Water Vapor O3')
for i in range(0,61):
print(inverter.cx.pressure_grid[i],solution.xhat[i],
solution.xhat[i+61],solution.xhat[i+122])
print('Skin temperature : ',solution.xhat[183])
print('Surface Emissivity : ')
for i in range(184,189):
print(solution.xhat[i])
print('Value of distance : ',solution.d2)
try:
import pylab as p
except:
print('Cannot use pylab. Is it installed?')
sys.exit()
x = solution.xhat[0:61]
y = np.log(inverter.cx.pressure_grid[0:61])
p.ylabel("Log Pressure")
p.xlabel("Temperature")
p.plot(x,y)
p.plt.gca().invert_yaxis()
p.show()
p.ylabel("Pressure")
p.xlabel("Mixing Ratio")
x = np.exp(solution.xhat[61:122])
y = inverter.cx.pressure_grid[0:61]
p.plot(x,y)
p.plt.gca().invert_yaxis()
p.show()
|
graziano-giuliani/pythoncode
|
mirto/mirto_code_main.py
|
Python
|
mit
| 8,009
|
[
"Gaussian"
] |
9bfcafce94f9def02a7614b1823f77afa57085bb9dc750c233e421d0614f798c
|
import sklearn.cross_validation as cv
import sklearn.dummy as dummy
from sklearn.mixture import GMM
from sklearn.hmm import GMMHMM
from sklearn import linear_model, naive_bayes
import collections
import itertools
import pandas as pd
from testResults import TestResults
from counters import *
import utils as utils
class Model():
__metaclass__ = ABCMeta
params = {}
isSklearn = True
def __init__(self, params, verbose=False):
self.params = params
self.verbose = verbose
def printv(self, arg, title=None):
if self.verbose:
if title is not None:
print title
print arg
@property
def name(self):
return self._name
@abstractmethod
def _train(self, data):
"""Returns a trained model."""
pass
def _test(self, model, testData, resultObj):
"""Compares predictions made by specified model against test data.
Returns a TestResults object.
"""
# restrict test data to principal component features
features = self.params['features']
test = np.array(testData[features])
# predict a dialog sequence using test data
# sklearn counts from 0 so add 1...
if self.isSklearn:
pred = [int(r) + 1 for r in list(model.predict(test))]
else:
pred = [int(r) for r in list(model.predict(test))]
# extract true ratings from test data
true = [int(rating) for rating in testData['rating'].values.tolist()]
resultObj.compare(true, pred)
return resultObj
def loocv(self, data):
"""Leave-one-out cross validation using given data.
Returns a TestResults objects, where results are averages from the
cross validation steps.
"""
mask = cv.LeaveOneLabelOut(data['label'].values)
results = TestResults(self.name, verbose=self.verbose)
for trainMask, testMask in mask:
# training
trainingData = data.loc[trainMask]
self.printv(trainingData, "training data:")
model = self._train(trainingData)
# testing
testData = data.loc[testMask]
self.printv(testData, "test data:")
# leave p labels out
for label, testGroup in testData.groupby("label"):
results = self._test(model, testGroup, results)
return results
def kfoldscv(self, data, folds):
"""K-folds cross validation using given data and number of folds.
Returns a TestResults objects, where results are averages from the
cross validation steps.
"""
results = TestResults(self.name, verbose=self.verbose)
labels = list(np.unique(data['label'].values))
for tr, te in cv.KFold(len(labels), n_folds=folds):
trainD = data[data['label'].isin([labels[i] for i in tr])]
testD = data[data['label'].isin([labels[i] for i in te])]
self.printv(trainD, "training data:")
self.printv(testD, "test data:")
model = self._train(trainD)
for label, testGroup in testD.groupby("label"):
results = self._test(model, testGroup, results)
return results
def setFeatures(self, features):
self.params['features'] = features
class Dummy(Model):
_name = "dummy"
def _train(self, data):
if 'constant' in self.params.keys():
model = dummy.DummyClassifier(strategy=self.params['strategy'],
constant=self.params['constant'])
else:
model = dummy.DummyClassifier(strategy=self.params['strategy'])
d = np.array(zip(*[data[f].values for f in self.params['features']]))
y = np.array(data['rating'].values)
model.fit(d, y)
return model
class Gmm(Model):
"""A Gaussian mixture model.
Parameters are number of mixture components (num_mixc) and
covariance type (cov_type). Example:
model = Gmm(params = {num_mixc: 3,
cov_type:'diag'})
"""
_name = "GMM"
def _train(self, data):
"""Trains a Gaussian mixture model, using the sklearn implementation."""
# parameters
features = self.params['features']
num_mixc = self.params['num_mixc']
cov_type = self.params['cov_type']
# prepare data shape
d = np.array(zip(*[data[f].values for f in features]))
# choose high number of EM-iterations to get constant results
gmm = GMM(num_mixc, cov_type, n_iter=300)
gmm.fit(d)
return gmm
class Gmmhmm(Model):
"""A hidden Markov model with Gaussian mixture emissions.
Parameters are number of mixture components (num_mixc), covariance type
(cov_type) and states (states). One Gaussian mixture model is created for
each state. Example:
model = Gmmhmm(params = {'num_mixc': 3,
'cov_type': 'diag',
'states': [1,2,3,4,5]})
"""
_name = "GMM-HMM"
def _train(self, data):
"""Trains a GMMHMM model, using the sklearn implementation and maximum-
likelihood estimates as HMM parameters (Hmm.mle(...)).
"""
# parameters
features = self.params['features']
num_mixc = self.params['num_mixc']
cov_type = self.params['cov_type']
states = self.params['states']
# train one GMM for each state
mixes = list()
for state in states:
# select data with current state label
d = data[data.rating == state]
# prepare data shape
d = np.array(zip(*[d[f].values for f in features]))
# init GMM
gmm = GMM(num_mixc, cov_type)
# train
gmm.fit(d)
mixes.append(gmm)
# train HMM with init, trans, GMMs=mixes
mle = Hmm.mle(MatrixCounterNoEmissions, data, states)
model = GMMHMM(n_components=len(states), init_params='', gmms=mixes)
model.transmat_ = mle.transition
model.startprob_ = mle.initial
return model
class Ols(Model):
""" Ordinary least squares regression """
_name = "OLS"
isSklearn = True
def _train(self, data):
features = self.params['features']
X = np.array(zip(*[data[f].values for f in features]))
y = np.array(data['rating'])
model = linear_model.LinearRegression()
model.fit(X, y)
return model
class LogisticRegression(Model):
""" Logistic Regression """
_name = "Logit"
isSklearn = True
def _train(self, data):
features = self.params['features']
X = np.array(zip(*[data[f].values for f in features]))
y = np.array(data['rating'])
model = linear_model.LogisticRegression(class_weight=self.params['class_weight'])
model.fit(X, y)
return model
class GaussianNaiveBayes(Model):
""" Gaussian Naive Bayes... """
_name = "G-NB"
isSklearn = True
def _train(self, data):
features = self.params['features']
X = np.array(zip(*[data[f].values for f in features]))
y = np.array(data['rating'])
model = naive_bayes.GaussianNB()
model.fit(X, y)
return model
class MultinomialNaiveBayes(Model):
""" Multinomial Naive Bayes... """
_name = "M-NB"
isSklearn = True
def _train(self, data):
features = self.params['features']
X = np.array(zip(*[data[f].values for f in features]))
y = np.array(data['rating'])
model = naive_bayes.MultinomialNB(alpha=self.params['alpha'],
fit_prior=self.params['fit_prior'])
model.fit(X, y)
return model
class Hmm(Model):
"""A hidden Markov model, using the Nltk implementation and maximum-
likelihood parameter estimates.
"""
_name = "HMM"
isSklearn = False
Parameters = collections.namedtuple(
'Parameters', 'initial transition emission emissionAlph')
class NltkWrapper():
def __init__(self, states, mle):
self.model = nltk.HiddenMarkovModelTagger(mle.emissionAlph,
states,
mle.transition,
mle.emission,
mle.initial)
def predict(self, obs):
tagged = self.model.tag([tuple(o) for o in obs])
return [val[1] for val in tagged]
def _train(self, data):
features = self.params['features']
states = self.params['states']
# calculate maximum-likelihood parameter estimates
mle = Hmm.mle_multipleFeatures(NltkCounter, data, states, features, self.verbose)
# create nltk HMM
model = Hmm.NltkWrapper(states, mle)
return model
@staticmethod
def mle(counterClass, data, stateAlphabet, feature=False):
""" Calculate maximum likelihood estimates for the HMM parameters
transitions probabilites, emission probabilites, and initial state
probabilites.
"""
f = feature is not False
states = utils.dfToSequences(data, ['rating'])
if f:
emissionAlphabet = pd.unique(data[feature].values.ravel())
emissions = utils.dfToSequences(data, [feature])
else:
emissionAlphabet = None
counter = counterClass(stateAlphabet, emissionAlphabet, states)
# count for each state sequence
for k, seq in enumerate(states):
if f: emi = emissions[k]
# for each state transition
for i, current in enumerate(seq):
# count(current, next, first, emission)
if f:
emission = emi[i]
else:
emission = False
next = seq[i + 1] if i < len(seq) - 1 else False
counter.count(i, current, next, emission)
return Hmm.Parameters(
initial=counter.getInitialProb(),
transition=counter.getTransitionProb(),
emission=counter.getEmissionProb(),
emissionAlph=emissionAlphabet
)
@staticmethod
def mle_multipleFeatures(counterClass, data, stateAlphabet, features, verbose=False):
""" Calculate maximum likelihood estimates of HMM parameters.
Parameters are transition probabilites, emission probabilites and
initial sta<te probabilites.
This method allows specifing multiple features and combines multiple
emission features assuming conditional independence:
P(feat1=a & feat2=b|state) = P(feat1=a|state) * P(feat2=b|state)
"""
p = lambda feat: Hmm.mle(DictCounter, data, stateAlphabet, feat)
counter = counterClass(stateAlphabet, [], False)
# calculate conditional probabilites for each feature & corresponding
# emission alphabet entry..
# P(feat_i=emm_ij|state_k) forall: I features, J_i emissions, K states
# ps = {feature:emmission distribution}
emission_probs = [p(f).emission for f in features]
# calculate inital state probabilites, transition probabilites using
# first/any feature
mle_single = Hmm.mle(counterClass, data, stateAlphabet, features[0])
initial_probs = mle_single.initial
transition_probs = mle_single.transition
# combine the emission alphabets of all given features
emissionAlphabet = list()
for f in features:
emissionAlphabet.append(pd.unique(data[f].values.ravel()))
# calculate all emission combinations
# and according probabilities per state
for comb in list(itertools.product(*emissionAlphabet)):
counter.addEmissionCombination(tuple(comb))
for state in stateAlphabet:
# for each individual prob of each feature
for emission, featNum in zip(comb, xrange(0, len(emission_probs))):
prob = emission_probs[featNum][state][emission]
counter.addCombinedEmissionProb(state, tuple(comb), prob)
if verbose:
print("Initial Probabilities")
printDictProbDist(initial_probs)
print("Transition Probabilities")
printCondDictProbDist(transition_probs)
print("Emission Probabilities")
printCondDictProbDist(counter.getCombinedEmissionProb())
return Hmm.Parameters(
initial=initial_probs,
transition=transition_probs,
emission=counter.getCombinedEmissionProb(),
emissionAlph=counter.getEmissionCombinations()
)
|
phihes/sds-models
|
sdsModels/models.py
|
Python
|
mit
| 12,892
|
[
"Gaussian"
] |
814db040e8acba233fe1cb7ef74b8f4772dd28a9069c609cbdd1ed3e80567afc
|
# Copyright: (c) 2013, James Cammarata <jcammarata@ansible.com>
# Copyright: (c) 2018, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os.path
import re
import shutil
import textwrap
import time
import yaml
from jinja2 import BaseLoader, Environment, FileSystemLoader
from yaml.error import YAMLError
import ansible.constants as C
from ansible import context
from ansible.cli import CLI
from ansible.cli.arguments import option_helpers as opt_help
from ansible.errors import AnsibleError, AnsibleOptionsError
from ansible.galaxy import Galaxy, get_collections_galaxy_meta_info
from ansible.galaxy.api import GalaxyAPI
from ansible.galaxy.collection import build_collection, install_collections, publish_collection, \
validate_collection_name
from ansible.galaxy.login import GalaxyLogin
from ansible.galaxy.role import GalaxyRole
from ansible.galaxy.token import BasicAuthToken, GalaxyToken, KeycloakToken, NoTokenSentinel
from ansible.module_utils.ansible_release import __version__ as ansible_version
from ansible.module_utils._text import to_bytes, to_native, to_text
from ansible.module_utils import six
from ansible.parsing.yaml.loader import AnsibleLoader
from ansible.playbook.role.requirement import RoleRequirement
from ansible.utils.display import Display
from ansible.utils.plugin_docs import get_versioned_doclink
display = Display()
urlparse = six.moves.urllib.parse.urlparse
class GalaxyCLI(CLI):
'''command to manage Ansible roles in shared repositories, the default of which is Ansible Galaxy *https://galaxy.ansible.com*.'''
SKIP_INFO_KEYS = ("name", "description", "readme_html", "related", "summary_fields", "average_aw_composite", "average_aw_score", "url")
def __init__(self, args):
# Inject role into sys.argv[1] as a backwards compatibility step
if len(args) > 1 and args[1] not in ['-h', '--help', '--version'] and 'role' not in args and 'collection' not in args:
# TODO: Should we add a warning here and eventually deprecate the implicit role subcommand choice
# Remove this in Ansible 2.13 when we also remove -v as an option on the root parser for ansible-galaxy.
idx = 2 if args[1].startswith('-v') else 1
args.insert(idx, 'role')
self.api_servers = []
self.galaxy = None
super(GalaxyCLI, self).__init__(args)
def init_parser(self):
''' create an options parser for bin/ansible '''
super(GalaxyCLI, self).init_parser(
desc="Perform various Role and Collection related operations.",
)
# Common arguments that apply to more than 1 action
common = opt_help.argparse.ArgumentParser(add_help=False)
common.add_argument('-s', '--server', dest='api_server', help='The Galaxy API server URL')
common.add_argument('--api-key', dest='api_key',
help='The Ansible Galaxy API key which can be found at '
'https://galaxy.ansible.com/me/preferences. You can also use ansible-galaxy login to '
'retrieve this key or set the token for the GALAXY_SERVER_LIST entry.')
common.add_argument('-c', '--ignore-certs', action='store_true', dest='ignore_certs',
default=C.GALAXY_IGNORE_CERTS, help='Ignore SSL certificate validation errors.')
opt_help.add_verbosity_options(common)
force = opt_help.argparse.ArgumentParser(add_help=False)
force.add_argument('-f', '--force', dest='force', action='store_true', default=False,
help='Force overwriting an existing role or collection')
github = opt_help.argparse.ArgumentParser(add_help=False)
github.add_argument('github_user', help='GitHub username')
github.add_argument('github_repo', help='GitHub repository')
offline = opt_help.argparse.ArgumentParser(add_help=False)
offline.add_argument('--offline', dest='offline', default=False, action='store_true',
help="Don't query the galaxy API when creating roles")
default_roles_path = C.config.get_configuration_definition('DEFAULT_ROLES_PATH').get('default', '')
roles_path = opt_help.argparse.ArgumentParser(add_help=False)
roles_path.add_argument('-p', '--roles-path', dest='roles_path', type=opt_help.unfrack_path(pathsep=True),
default=C.DEFAULT_ROLES_PATH, action=opt_help.PrependListAction,
help='The path to the directory containing your roles. The default is the first '
'writable one configured via DEFAULT_ROLES_PATH: %s ' % default_roles_path)
# Add sub parser for the Galaxy role type (role or collection)
type_parser = self.parser.add_subparsers(metavar='TYPE', dest='type')
type_parser.required = True
# Add sub parser for the Galaxy collection actions
collection = type_parser.add_parser('collection', help='Manage an Ansible Galaxy collection.')
collection_parser = collection.add_subparsers(metavar='COLLECTION_ACTION', dest='action')
collection_parser.required = True
self.add_init_options(collection_parser, parents=[common, force])
self.add_build_options(collection_parser, parents=[common, force])
self.add_publish_options(collection_parser, parents=[common])
self.add_install_options(collection_parser, parents=[common, force])
# Add sub parser for the Galaxy role actions
role = type_parser.add_parser('role', help='Manage an Ansible Galaxy role.')
role_parser = role.add_subparsers(metavar='ROLE_ACTION', dest='action')
role_parser.required = True
self.add_init_options(role_parser, parents=[common, force, offline])
self.add_remove_options(role_parser, parents=[common, roles_path])
self.add_delete_options(role_parser, parents=[common, github])
self.add_list_options(role_parser, parents=[common, roles_path])
self.add_search_options(role_parser, parents=[common])
self.add_import_options(role_parser, parents=[common, github])
self.add_setup_options(role_parser, parents=[common, roles_path])
self.add_login_options(role_parser, parents=[common])
self.add_info_options(role_parser, parents=[common, roles_path, offline])
self.add_install_options(role_parser, parents=[common, force, roles_path])
def add_init_options(self, parser, parents=None):
galaxy_type = 'collection' if parser.metavar == 'COLLECTION_ACTION' else 'role'
init_parser = parser.add_parser('init', parents=parents,
help='Initialize new {0} with the base structure of a '
'{0}.'.format(galaxy_type))
init_parser.set_defaults(func=self.execute_init)
init_parser.add_argument('--init-path', dest='init_path', default='./',
help='The path in which the skeleton {0} will be created. The default is the '
'current working directory.'.format(galaxy_type))
init_parser.add_argument('--{0}-skeleton'.format(galaxy_type), dest='{0}_skeleton'.format(galaxy_type),
default=C.GALAXY_ROLE_SKELETON,
help='The path to a {0} skeleton that the new {0} should be based '
'upon.'.format(galaxy_type))
obj_name_kwargs = {}
if galaxy_type == 'collection':
obj_name_kwargs['type'] = validate_collection_name
init_parser.add_argument('{0}_name'.format(galaxy_type), help='{0} name'.format(galaxy_type.capitalize()),
**obj_name_kwargs)
if galaxy_type == 'role':
init_parser.add_argument('--type', dest='role_type', action='store', default='default',
help="Initialize using an alternate role type. Valid types include: 'container', "
"'apb' and 'network'.")
def add_remove_options(self, parser, parents=None):
remove_parser = parser.add_parser('remove', parents=parents, help='Delete roles from roles_path.')
remove_parser.set_defaults(func=self.execute_remove)
remove_parser.add_argument('args', help='Role(s)', metavar='role', nargs='+')
def add_delete_options(self, parser, parents=None):
delete_parser = parser.add_parser('delete', parents=parents,
help='Removes the role from Galaxy. It does not remove or alter the actual '
'GitHub repository.')
delete_parser.set_defaults(func=self.execute_delete)
def add_list_options(self, parser, parents=None):
list_parser = parser.add_parser('list', parents=parents,
help='Show the name and version of each role installed in the roles_path.')
list_parser.set_defaults(func=self.execute_list)
list_parser.add_argument('role', help='Role', nargs='?', metavar='role')
def add_search_options(self, parser, parents=None):
search_parser = parser.add_parser('search', parents=parents,
help='Search the Galaxy database by tags, platforms, author and multiple '
'keywords.')
search_parser.set_defaults(func=self.execute_search)
search_parser.add_argument('--platforms', dest='platforms', help='list of OS platforms to filter by')
search_parser.add_argument('--galaxy-tags', dest='galaxy_tags', help='list of galaxy tags to filter by')
search_parser.add_argument('--author', dest='author', help='GitHub username')
search_parser.add_argument('args', help='Search terms', metavar='searchterm', nargs='*')
def add_import_options(self, parser, parents=None):
import_parser = parser.add_parser('import', parents=parents, help='Import a role')
import_parser.set_defaults(func=self.execute_import)
import_parser.add_argument('--no-wait', dest='wait', action='store_false', default=True,
help="Don't wait for import results.")
import_parser.add_argument('--branch', dest='reference',
help='The name of a branch to import. Defaults to the repository\'s default branch '
'(usually master)')
import_parser.add_argument('--role-name', dest='role_name',
help='The name the role should have, if different than the repo name')
import_parser.add_argument('--status', dest='check_status', action='store_true', default=False,
help='Check the status of the most recent import request for given github_'
'user/github_repo.')
def add_setup_options(self, parser, parents=None):
setup_parser = parser.add_parser('setup', parents=parents,
help='Manage the integration between Galaxy and the given source.')
setup_parser.set_defaults(func=self.execute_setup)
setup_parser.add_argument('--remove', dest='remove_id', default=None,
help='Remove the integration matching the provided ID value. Use --list to see '
'ID values.')
setup_parser.add_argument('--list', dest="setup_list", action='store_true', default=False,
help='List all of your integrations.')
setup_parser.add_argument('source', help='Source')
setup_parser.add_argument('github_user', help='GitHub username')
setup_parser.add_argument('github_repo', help='GitHub repository')
setup_parser.add_argument('secret', help='Secret')
def add_login_options(self, parser, parents=None):
login_parser = parser.add_parser('login', parents=parents,
help="Login to api.github.com server in order to use ansible-galaxy role sub "
"command such as 'import', 'delete', 'publish', and 'setup'")
login_parser.set_defaults(func=self.execute_login)
login_parser.add_argument('--github-token', dest='token', default=None,
help='Identify with github token rather than username and password.')
def add_info_options(self, parser, parents=None):
info_parser = parser.add_parser('info', parents=parents, help='View more details about a specific role.')
info_parser.set_defaults(func=self.execute_info)
info_parser.add_argument('args', nargs='+', help='role', metavar='role_name[,version]')
def add_install_options(self, parser, parents=None):
galaxy_type = 'collection' if parser.metavar == 'COLLECTION_ACTION' else 'role'
args_kwargs = {}
if galaxy_type == 'collection':
args_kwargs['help'] = 'The collection(s) name or path/url to a tar.gz collection artifact. This is ' \
'mutually exclusive with --requirements-file.'
ignore_errors_help = 'Ignore errors during installation and continue with the next specified ' \
'collection. This will not ignore dependency conflict errors.'
else:
args_kwargs['help'] = 'Role name, URL or tar file'
ignore_errors_help = 'Ignore errors and continue with the next specified role.'
install_parser = parser.add_parser('install', parents=parents,
help='Install {0}(s) from file(s), URL(s) or Ansible '
'Galaxy'.format(galaxy_type))
install_parser.set_defaults(func=self.execute_install)
install_parser.add_argument('args', metavar='{0}_name'.format(galaxy_type), nargs='*', **args_kwargs)
install_parser.add_argument('-i', '--ignore-errors', dest='ignore_errors', action='store_true', default=False,
help=ignore_errors_help)
install_exclusive = install_parser.add_mutually_exclusive_group()
install_exclusive.add_argument('-n', '--no-deps', dest='no_deps', action='store_true', default=False,
help="Don't download {0}s listed as dependencies.".format(galaxy_type))
install_exclusive.add_argument('--force-with-deps', dest='force_with_deps', action='store_true', default=False,
help="Force overwriting an existing {0} and its "
"dependencies.".format(galaxy_type))
if galaxy_type == 'collection':
install_parser.add_argument('-p', '--collections-path', dest='collections_path',
default=C.COLLECTIONS_PATHS[0],
help='The path to the directory containing your collections.')
install_parser.add_argument('-r', '--requirements-file', dest='requirements',
help='A file containing a list of collections to be installed.')
else:
install_parser.add_argument('-r', '--role-file', dest='role_file',
help='A file containing a list of roles to be imported.')
install_parser.add_argument('-g', '--keep-scm-meta', dest='keep_scm_meta', action='store_true',
default=False,
help='Use tar instead of the scm archive option when packaging the role.')
def add_build_options(self, parser, parents=None):
build_parser = parser.add_parser('build', parents=parents,
help='Build an Ansible collection artifact that can be publish to Ansible '
'Galaxy.')
build_parser.set_defaults(func=self.execute_build)
build_parser.add_argument('args', metavar='collection', nargs='*', default=('.',),
help='Path to the collection(s) directory to build. This should be the directory '
'that contains the galaxy.yml file. The default is the current working '
'directory.')
build_parser.add_argument('--output-path', dest='output_path', default='./',
help='The path in which the collection is built to. The default is the current '
'working directory.')
def add_publish_options(self, parser, parents=None):
publish_parser = parser.add_parser('publish', parents=parents,
help='Publish a collection artifact to Ansible Galaxy.')
publish_parser.set_defaults(func=self.execute_publish)
publish_parser.add_argument('args', metavar='collection_path',
help='The path to the collection tarball to publish.')
publish_parser.add_argument('--no-wait', dest='wait', action='store_false', default=True,
help="Don't wait for import validation results.")
publish_parser.add_argument('--import-timeout', dest='import_timeout', type=int, default=0,
help="The time to wait for the collection import process to finish.")
def post_process_args(self, options):
options = super(GalaxyCLI, self).post_process_args(options)
display.verbosity = options.verbosity
return options
def run(self):
super(GalaxyCLI, self).run()
self.galaxy = Galaxy()
def server_config_def(section, key, required):
return {
'description': 'The %s of the %s Galaxy server' % (key, section),
'ini': [
{
'section': 'galaxy_server.%s' % section,
'key': key,
}
],
'env': [
{'name': 'ANSIBLE_GALAXY_SERVER_%s_%s' % (section.upper(), key.upper())},
],
'required': required,
}
server_def = [('url', True), ('username', False), ('password', False), ('token', False),
('auth_url', False)]
config_servers = []
# Need to filter out empty strings or non truthy values as an empty server list env var is equal to [''].
server_list = [s for s in C.GALAXY_SERVER_LIST or [] if s]
for server_key in server_list:
# Config definitions are looked up dynamically based on the C.GALAXY_SERVER_LIST entry. We look up the
# section [galaxy_server.<server>] for the values url, username, password, and token.
config_dict = dict((k, server_config_def(server_key, k, req)) for k, req in server_def)
defs = AnsibleLoader(yaml.safe_dump(config_dict)).get_single_data()
C.config.initialize_plugin_configuration_definitions('galaxy_server', server_key, defs)
server_options = C.config.get_plugin_options('galaxy_server', server_key)
# auth_url is used to create the token, but not directly by GalaxyAPI, so
# it doesn't need to be passed as kwarg to GalaxyApi
auth_url = server_options.pop('auth_url', None)
token_val = server_options['token'] or NoTokenSentinel
username = server_options['username']
# default case if no auth info is provided.
server_options['token'] = None
if username:
server_options['token'] = BasicAuthToken(username,
server_options['password'])
else:
if token_val:
if auth_url:
server_options['token'] = KeycloakToken(access_token=token_val,
auth_url=auth_url,
validate_certs=not context.CLIARGS['ignore_certs'])
else:
# The galaxy v1 / github / django / 'Token'
server_options['token'] = GalaxyToken(token=token_val)
config_servers.append(GalaxyAPI(self.galaxy, server_key, **server_options))
cmd_server = context.CLIARGS['api_server']
cmd_token = GalaxyToken(token=context.CLIARGS['api_key'])
if cmd_server:
# Cmd args take precedence over the config entry but fist check if the arg was a name and use that config
# entry, otherwise create a new API entry for the server specified.
config_server = next((s for s in config_servers if s.name == cmd_server), None)
if config_server:
self.api_servers.append(config_server)
else:
self.api_servers.append(GalaxyAPI(self.galaxy, 'cmd_arg', cmd_server, token=cmd_token))
else:
self.api_servers = config_servers
# Default to C.GALAXY_SERVER if no servers were defined
if len(self.api_servers) == 0:
self.api_servers.append(GalaxyAPI(self.galaxy, 'default', C.GALAXY_SERVER, token=cmd_token))
context.CLIARGS['func']()
@property
def api(self):
return self.api_servers[0]
def _parse_requirements_file(self, requirements_file, allow_old_format=True):
"""
Parses an Ansible requirement.yml file and returns all the roles and/or collections defined in it. There are 2
requirements file format:
# v1 (roles only)
- src: The source of the role, required if include is not set. Can be Galaxy role name, URL to a SCM repo or tarball.
name: Downloads the role to the specified name, defaults to Galaxy name from Galaxy or name of repo if src is a URL.
scm: If src is a URL, specify the SCM. Only git or hd are supported and defaults ot git.
version: The version of the role to download. Can also be tag, commit, or branch name and defaults to master.
include: Path to additional requirements.yml files.
# v2 (roles and collections)
---
roles:
# Same as v1 format just under the roles key
collections:
- namespace.collection
- name: namespace.collection
version: version identifier, multiple identifiers are separated by ','
source: the URL or a predefined source name that relates to C.GALAXY_SERVER_LIST
:param requirements_file: The path to the requirements file.
:param allow_old_format: Will fail if a v1 requirements file is found and this is set to False.
:return: a dict containing roles and collections to found in the requirements file.
"""
requirements = {
'roles': [],
'collections': [],
}
b_requirements_file = to_bytes(requirements_file, errors='surrogate_or_strict')
if not os.path.exists(b_requirements_file):
raise AnsibleError("The requirements file '%s' does not exist." % to_native(requirements_file))
display.vvv("Reading requirement file at '%s'" % requirements_file)
with open(b_requirements_file, 'rb') as req_obj:
try:
file_requirements = yaml.safe_load(req_obj)
except YAMLError as err:
raise AnsibleError(
"Failed to parse the requirements yml at '%s' with the following error:\n%s"
% (to_native(requirements_file), to_native(err)))
if requirements_file is None:
raise AnsibleError("No requirements found in file '%s'" % to_native(requirements_file))
def parse_role_req(requirement):
if "include" not in requirement:
role = RoleRequirement.role_yaml_parse(requirement)
display.vvv("found role %s in yaml file" % to_text(role))
if "name" not in role and "src" not in role:
raise AnsibleError("Must specify name or src for role")
return [GalaxyRole(self.galaxy, self.api, **role)]
else:
b_include_path = to_bytes(requirement["include"], errors="surrogate_or_strict")
if not os.path.isfile(b_include_path):
raise AnsibleError("Failed to find include requirements file '%s' in '%s'"
% (to_native(b_include_path), to_native(requirements_file)))
with open(b_include_path, 'rb') as f_include:
try:
return [GalaxyRole(self.galaxy, self.api, **r) for r in
(RoleRequirement.role_yaml_parse(i) for i in yaml.safe_load(f_include))]
except Exception as e:
raise AnsibleError("Unable to load data from include requirements file: %s %s"
% (to_native(requirements_file), to_native(e)))
if isinstance(file_requirements, list):
# Older format that contains only roles
if not allow_old_format:
raise AnsibleError("Expecting requirements file to be a dict with the key 'collections' that contains "
"a list of collections to install")
for role_req in file_requirements:
requirements['roles'] += parse_role_req(role_req)
else:
# Newer format with a collections and/or roles key
extra_keys = set(file_requirements.keys()).difference(set(['roles', 'collections']))
if extra_keys:
raise AnsibleError("Expecting only 'roles' and/or 'collections' as base keys in the requirements "
"file. Found: %s" % (to_native(", ".join(extra_keys))))
for role_req in file_requirements.get('roles', []):
requirements['roles'] += parse_role_req(role_req)
for collection_req in file_requirements.get('collections', []):
if isinstance(collection_req, dict):
req_name = collection_req.get('name', None)
if req_name is None:
raise AnsibleError("Collections requirement entry should contain the key name.")
req_version = collection_req.get('version', '*')
req_source = collection_req.get('source', None)
if req_source:
# Try and match up the requirement source with our list of Galaxy API servers defined in the
# config, otherwise create a server with that URL without any auth.
req_source = next(iter([a for a in self.api_servers if req_source in [a.name, a.api_server]]),
GalaxyAPI(self.galaxy, "explicit_requirement_%s" % req_name, req_source))
requirements['collections'].append((req_name, req_version, req_source))
else:
requirements['collections'].append((collection_req, '*', None))
return requirements
@staticmethod
def exit_without_ignore(rc=1):
"""
Exits with the specified return code unless the
option --ignore-errors was specified
"""
if not context.CLIARGS['ignore_errors']:
raise AnsibleError('- you can use --ignore-errors to skip failed roles and finish processing the list.')
@staticmethod
def _display_role_info(role_info):
text = [u"", u"Role: %s" % to_text(role_info['name'])]
text.append(u"\tdescription: %s" % role_info.get('description', ''))
for k in sorted(role_info.keys()):
if k in GalaxyCLI.SKIP_INFO_KEYS:
continue
if isinstance(role_info[k], dict):
text.append(u"\t%s:" % (k))
for key in sorted(role_info[k].keys()):
if key in GalaxyCLI.SKIP_INFO_KEYS:
continue
text.append(u"\t\t%s: %s" % (key, role_info[k][key]))
else:
text.append(u"\t%s: %s" % (k, role_info[k]))
return u'\n'.join(text)
@staticmethod
def _resolve_path(path):
return os.path.abspath(os.path.expanduser(os.path.expandvars(path)))
@staticmethod
def _get_skeleton_galaxy_yml(template_path, inject_data):
with open(to_bytes(template_path, errors='surrogate_or_strict'), 'rb') as template_obj:
meta_template = to_text(template_obj.read(), errors='surrogate_or_strict')
galaxy_meta = get_collections_galaxy_meta_info()
required_config = []
optional_config = []
for meta_entry in galaxy_meta:
config_list = required_config if meta_entry.get('required', False) else optional_config
value = inject_data.get(meta_entry['key'], None)
if not value:
meta_type = meta_entry.get('type', 'str')
if meta_type == 'str':
value = ''
elif meta_type == 'list':
value = []
elif meta_type == 'dict':
value = {}
meta_entry['value'] = value
config_list.append(meta_entry)
link_pattern = re.compile(r"L\(([^)]+),\s+([^)]+)\)")
const_pattern = re.compile(r"C\(([^)]+)\)")
def comment_ify(v):
if isinstance(v, list):
v = ". ".join([l.rstrip('.') for l in v])
v = link_pattern.sub(r"\1 <\2>", v)
v = const_pattern.sub(r"'\1'", v)
return textwrap.fill(v, width=117, initial_indent="# ", subsequent_indent="# ", break_on_hyphens=False)
def to_yaml(v):
return yaml.safe_dump(v, default_flow_style=False).rstrip()
env = Environment(loader=BaseLoader)
env.filters['comment_ify'] = comment_ify
env.filters['to_yaml'] = to_yaml
template = env.from_string(meta_template)
meta_value = template.render({'required_config': required_config, 'optional_config': optional_config})
return meta_value
############################
# execute actions
############################
def execute_role(self):
"""
Perform the action on an Ansible Galaxy role. Must be combined with a further action like delete/install/init
as listed below.
"""
# To satisfy doc build
pass
def execute_collection(self):
"""
Perform the action on an Ansible Galaxy collection. Must be combined with a further action like init/install as
listed below.
"""
# To satisfy doc build
pass
def execute_build(self):
"""
Build an Ansible Galaxy collection artifact that can be stored in a central repository like Ansible Galaxy.
By default, this command builds from the current working directory. You can optionally pass in the
collection input path (where the ``galaxy.yml`` file is).
"""
force = context.CLIARGS['force']
output_path = GalaxyCLI._resolve_path(context.CLIARGS['output_path'])
b_output_path = to_bytes(output_path, errors='surrogate_or_strict')
if not os.path.exists(b_output_path):
os.makedirs(b_output_path)
elif os.path.isfile(b_output_path):
raise AnsibleError("- the output collection directory %s is a file - aborting" % to_native(output_path))
for collection_path in context.CLIARGS['args']:
collection_path = GalaxyCLI._resolve_path(collection_path)
build_collection(collection_path, output_path, force)
def execute_init(self):
"""
Creates the skeleton framework of a role or collection that complies with the Galaxy metadata format.
Requires a role or collection name. The collection name must be in the format ``<namespace>.<collection>``.
"""
galaxy_type = context.CLIARGS['type']
init_path = context.CLIARGS['init_path']
force = context.CLIARGS['force']
obj_skeleton = context.CLIARGS['{0}_skeleton'.format(galaxy_type)]
obj_name = context.CLIARGS['{0}_name'.format(galaxy_type)]
inject_data = dict(
description='your {0} description'.format(galaxy_type),
ansible_plugin_list_dir=get_versioned_doclink('plugins/plugins.html'),
)
if galaxy_type == 'role':
inject_data.update(dict(
author='your name',
company='your company (optional)',
license='license (GPL-2.0-or-later, MIT, etc)',
role_name=obj_name,
role_type=context.CLIARGS['role_type'],
issue_tracker_url='http://example.com/issue/tracker',
repository_url='http://example.com/repository',
documentation_url='http://docs.example.com',
homepage_url='http://example.com',
min_ansible_version=ansible_version[:3], # x.y
))
obj_path = os.path.join(init_path, obj_name)
elif galaxy_type == 'collection':
namespace, collection_name = obj_name.split('.', 1)
inject_data.update(dict(
namespace=namespace,
collection_name=collection_name,
version='1.0.0',
readme='README.md',
authors=['your name <example@domain.com>'],
license=['GPL-2.0-or-later'],
repository='http://example.com/repository',
documentation='http://docs.example.com',
homepage='http://example.com',
issues='http://example.com/issue/tracker',
build_ignore=[],
))
obj_path = os.path.join(init_path, namespace, collection_name)
b_obj_path = to_bytes(obj_path, errors='surrogate_or_strict')
if os.path.exists(b_obj_path):
if os.path.isfile(obj_path):
raise AnsibleError("- the path %s already exists, but is a file - aborting" % to_native(obj_path))
elif not force:
raise AnsibleError("- the directory %s already exists. "
"You can use --force to re-initialize this directory,\n"
"however it will reset any main.yml files that may have\n"
"been modified there already." % to_native(obj_path))
if obj_skeleton is not None:
own_skeleton = False
skeleton_ignore_expressions = C.GALAXY_ROLE_SKELETON_IGNORE
else:
own_skeleton = True
obj_skeleton = self.galaxy.default_role_skeleton_path
skeleton_ignore_expressions = ['^.*/.git_keep$']
obj_skeleton = os.path.expanduser(obj_skeleton)
skeleton_ignore_re = [re.compile(x) for x in skeleton_ignore_expressions]
if not os.path.exists(obj_skeleton):
raise AnsibleError("- the skeleton path '{0}' does not exist, cannot init {1}".format(
to_native(obj_skeleton), galaxy_type)
)
template_env = Environment(loader=FileSystemLoader(obj_skeleton))
# create role directory
if not os.path.exists(b_obj_path):
os.makedirs(b_obj_path)
for root, dirs, files in os.walk(obj_skeleton, topdown=True):
rel_root = os.path.relpath(root, obj_skeleton)
rel_dirs = rel_root.split(os.sep)
rel_root_dir = rel_dirs[0]
if galaxy_type == 'collection':
# A collection can contain templates in playbooks/*/templates and roles/*/templates
in_templates_dir = rel_root_dir in ['playbooks', 'roles'] and 'templates' in rel_dirs
else:
in_templates_dir = rel_root_dir == 'templates'
dirs[:] = [d for d in dirs if not any(r.match(d) for r in skeleton_ignore_re)]
for f in files:
filename, ext = os.path.splitext(f)
if any(r.match(os.path.join(rel_root, f)) for r in skeleton_ignore_re):
continue
elif galaxy_type == 'collection' and own_skeleton and rel_root == '.' and f == 'galaxy.yml.j2':
# Special use case for galaxy.yml.j2 in our own default collection skeleton. We build the options
# dynamically which requires special options to be set.
# The templated data's keys must match the key name but the inject data contains collection_name
# instead of name. We just make a copy and change the key back to name for this file.
template_data = inject_data.copy()
template_data['name'] = template_data.pop('collection_name')
meta_value = GalaxyCLI._get_skeleton_galaxy_yml(os.path.join(root, rel_root, f), template_data)
b_dest_file = to_bytes(os.path.join(obj_path, rel_root, filename), errors='surrogate_or_strict')
with open(b_dest_file, 'wb') as galaxy_obj:
galaxy_obj.write(to_bytes(meta_value, errors='surrogate_or_strict'))
elif ext == ".j2" and not in_templates_dir:
src_template = os.path.join(rel_root, f)
dest_file = os.path.join(obj_path, rel_root, filename)
template_env.get_template(src_template).stream(inject_data).dump(dest_file, encoding='utf-8')
else:
f_rel_path = os.path.relpath(os.path.join(root, f), obj_skeleton)
shutil.copyfile(os.path.join(root, f), os.path.join(obj_path, f_rel_path))
for d in dirs:
b_dir_path = to_bytes(os.path.join(obj_path, rel_root, d), errors='surrogate_or_strict')
if not os.path.exists(b_dir_path):
os.makedirs(b_dir_path)
display.display("- %s %s was created successfully" % (galaxy_type.title(), obj_name))
def execute_info(self):
"""
prints out detailed information about an installed role as well as info available from the galaxy API.
"""
roles_path = context.CLIARGS['roles_path']
data = ''
for role in context.CLIARGS['args']:
role_info = {'path': roles_path}
gr = GalaxyRole(self.galaxy, self.api, role)
install_info = gr.install_info
if install_info:
if 'version' in install_info:
install_info['installed_version'] = install_info['version']
del install_info['version']
role_info.update(install_info)
remote_data = False
if not context.CLIARGS['offline']:
remote_data = self.api.lookup_role_by_name(role, False)
if remote_data:
role_info.update(remote_data)
if gr.metadata:
role_info.update(gr.metadata)
req = RoleRequirement()
role_spec = req.role_yaml_parse({'role': role})
if role_spec:
role_info.update(role_spec)
data = self._display_role_info(role_info)
# FIXME: This is broken in both 1.9 and 2.0 as
# _display_role_info() always returns something
if not data:
data = u"\n- the role %s was not found" % role
self.pager(data)
def execute_install(self):
"""
Install one or more roles(``ansible-galaxy role install``), or one or more collections(``ansible-galaxy collection install``).
You can pass in a list (roles or collections) or use the file
option listed below (these are mutually exclusive). If you pass in a list, it
can be a name (which will be downloaded via the galaxy API and github), or it can be a local tar archive file.
"""
if context.CLIARGS['type'] == 'collection':
collections = context.CLIARGS['args']
force = context.CLIARGS['force']
output_path = context.CLIARGS['collections_path']
ignore_certs = context.CLIARGS['ignore_certs']
ignore_errors = context.CLIARGS['ignore_errors']
requirements_file = context.CLIARGS['requirements']
no_deps = context.CLIARGS['no_deps']
force_deps = context.CLIARGS['force_with_deps']
if collections and requirements_file:
raise AnsibleError("The positional collection_name arg and --requirements-file are mutually exclusive.")
elif not collections and not requirements_file:
raise AnsibleError("You must specify a collection name or a requirements file.")
if requirements_file:
requirements_file = GalaxyCLI._resolve_path(requirements_file)
requirements = self._parse_requirements_file(requirements_file, allow_old_format=False)['collections']
else:
requirements = []
for collection_input in collections:
requirement = None
if os.path.isfile(to_bytes(collection_input, errors='surrogate_or_strict')) or \
urlparse(collection_input).scheme.lower() in ['http', 'https']:
# Arg is a file path or URL to a collection
name = collection_input
else:
name, dummy, requirement = collection_input.partition(':')
requirements.append((name, requirement or '*', None))
output_path = GalaxyCLI._resolve_path(output_path)
collections_path = C.COLLECTIONS_PATHS
if len([p for p in collections_path if p.startswith(output_path)]) == 0:
display.warning("The specified collections path '%s' is not part of the configured Ansible "
"collections paths '%s'. The installed collection won't be picked up in an Ansible "
"run." % (to_text(output_path), to_text(":".join(collections_path))))
if os.path.split(output_path)[1] != 'ansible_collections':
output_path = os.path.join(output_path, 'ansible_collections')
b_output_path = to_bytes(output_path, errors='surrogate_or_strict')
if not os.path.exists(b_output_path):
os.makedirs(b_output_path)
install_collections(requirements, output_path, self.api_servers, (not ignore_certs), ignore_errors,
no_deps, force, force_deps)
return 0
role_file = context.CLIARGS['role_file']
if not context.CLIARGS['args'] and role_file is None:
# the user needs to specify one of either --role-file or specify a single user/role name
raise AnsibleOptionsError("- you must specify a user/role name or a roles file")
no_deps = context.CLIARGS['no_deps']
force_deps = context.CLIARGS['force_with_deps']
force = context.CLIARGS['force'] or force_deps
roles_left = []
if role_file:
if not (role_file.endswith('.yaml') or role_file.endswith('.yml')):
raise AnsibleError("Invalid role requirements file, it must end with a .yml or .yaml extension")
roles_left = self._parse_requirements_file(role_file)['roles']
else:
# roles were specified directly, so we'll just go out grab them
# (and their dependencies, unless the user doesn't want us to).
for rname in context.CLIARGS['args']:
role = RoleRequirement.role_yaml_parse(rname.strip())
roles_left.append(GalaxyRole(self.galaxy, self.api, **role))
for role in roles_left:
# only process roles in roles files when names matches if given
if role_file and context.CLIARGS['args'] and role.name not in context.CLIARGS['args']:
display.vvv('Skipping role %s' % role.name)
continue
display.vvv('Processing role %s ' % role.name)
# query the galaxy API for the role data
if role.install_info is not None:
if role.install_info['version'] != role.version or force:
if force:
display.display('- changing role %s from %s to %s' %
(role.name, role.install_info['version'], role.version or "unspecified"))
role.remove()
else:
display.warning('- %s (%s) is already installed - use --force to change version to %s' %
(role.name, role.install_info['version'], role.version or "unspecified"))
continue
else:
if not force:
display.display('- %s is already installed, skipping.' % str(role))
continue
try:
installed = role.install()
except AnsibleError as e:
display.warning(u"- %s was NOT installed successfully: %s " % (role.name, to_text(e)))
self.exit_without_ignore()
continue
# install dependencies, if we want them
if not no_deps and installed:
if not role.metadata:
display.warning("Meta file %s is empty. Skipping dependencies." % role.path)
else:
role_dependencies = role.metadata.get('dependencies') or []
for dep in role_dependencies:
display.debug('Installing dep %s' % dep)
dep_req = RoleRequirement()
dep_info = dep_req.role_yaml_parse(dep)
dep_role = GalaxyRole(self.galaxy, self.api, **dep_info)
if '.' not in dep_role.name and '.' not in dep_role.src and dep_role.scm is None:
# we know we can skip this, as it's not going to
# be found on galaxy.ansible.com
continue
if dep_role.install_info is None:
if dep_role not in roles_left:
display.display('- adding dependency: %s' % to_text(dep_role))
roles_left.append(dep_role)
else:
display.display('- dependency %s already pending installation.' % dep_role.name)
else:
if dep_role.install_info['version'] != dep_role.version:
if force_deps:
display.display('- changing dependant role %s from %s to %s' %
(dep_role.name, dep_role.install_info['version'], dep_role.version or "unspecified"))
dep_role.remove()
roles_left.append(dep_role)
else:
display.warning('- dependency %s (%s) from role %s differs from already installed version (%s), skipping' %
(to_text(dep_role), dep_role.version, role.name, dep_role.install_info['version']))
else:
if force_deps:
roles_left.append(dep_role)
else:
display.display('- dependency %s is already installed, skipping.' % dep_role.name)
if not installed:
display.warning("- %s was NOT installed successfully." % role.name)
self.exit_without_ignore()
return 0
def execute_remove(self):
"""
removes the list of roles passed as arguments from the local system.
"""
if not context.CLIARGS['args']:
raise AnsibleOptionsError('- you must specify at least one role to remove.')
for role_name in context.CLIARGS['args']:
role = GalaxyRole(self.galaxy, self.api, role_name)
try:
if role.remove():
display.display('- successfully removed %s' % role_name)
else:
display.display('- %s is not installed, skipping.' % role_name)
except Exception as e:
raise AnsibleError("Failed to remove role %s: %s" % (role_name, to_native(e)))
return 0
def execute_list(self):
"""
lists the roles installed on the local system or matches a single role passed as an argument.
"""
def _display_role(gr):
install_info = gr.install_info
version = None
if install_info:
version = install_info.get("version", None)
if not version:
version = "(unknown version)"
display.display("- %s, %s" % (gr.name, version))
if context.CLIARGS['role']:
# show the requested role, if it exists
name = context.CLIARGS['role']
gr = GalaxyRole(self.galaxy, self.api, name)
if gr.metadata:
display.display('# %s' % os.path.dirname(gr.path))
_display_role(gr)
else:
display.display("- the role %s was not found" % name)
else:
# show all valid roles in the roles_path directory
roles_path = context.CLIARGS['roles_path']
path_found = False
warnings = []
for path in roles_path:
role_path = os.path.expanduser(path)
if not os.path.exists(role_path):
warnings.append("- the configured path %s does not exist." % role_path)
continue
elif not os.path.isdir(role_path):
warnings.append("- the configured path %s, exists, but it is not a directory." % role_path)
continue
display.display('# %s' % role_path)
path_files = os.listdir(role_path)
path_found = True
for path_file in path_files:
gr = GalaxyRole(self.galaxy, self.api, path_file, path=path)
if gr.metadata:
_display_role(gr)
for w in warnings:
display.warning(w)
if not path_found:
raise AnsibleOptionsError("- None of the provided paths was usable. Please specify a valid path with --roles-path")
return 0
def execute_publish(self):
"""
Publish a collection into Ansible Galaxy. Requires the path to the collection tarball to publish.
"""
collection_path = GalaxyCLI._resolve_path(context.CLIARGS['args'])
wait = context.CLIARGS['wait']
timeout = context.CLIARGS['import_timeout']
publish_collection(collection_path, self.api, wait, timeout)
def execute_search(self):
''' searches for roles on the Ansible Galaxy server'''
page_size = 1000
search = None
if context.CLIARGS['args']:
search = '+'.join(context.CLIARGS['args'])
if not search and not context.CLIARGS['platforms'] and not context.CLIARGS['galaxy_tags'] and not context.CLIARGS['author']:
raise AnsibleError("Invalid query. At least one search term, platform, galaxy tag or author must be provided.")
response = self.api.search_roles(search, platforms=context.CLIARGS['platforms'],
tags=context.CLIARGS['galaxy_tags'], author=context.CLIARGS['author'], page_size=page_size)
if response['count'] == 0:
display.display("No roles match your search.", color=C.COLOR_ERROR)
return True
data = [u'']
if response['count'] > page_size:
data.append(u"Found %d roles matching your search. Showing first %s." % (response['count'], page_size))
else:
data.append(u"Found %d roles matching your search:" % response['count'])
max_len = []
for role in response['results']:
max_len.append(len(role['username'] + '.' + role['name']))
name_len = max(max_len)
format_str = u" %%-%ds %%s" % name_len
data.append(u'')
data.append(format_str % (u"Name", u"Description"))
data.append(format_str % (u"----", u"-----------"))
for role in response['results']:
data.append(format_str % (u'%s.%s' % (role['username'], role['name']), role['description']))
data = u'\n'.join(data)
self.pager(data)
return True
def execute_login(self):
"""
verify user's identify via Github and retrieve an auth token from Ansible Galaxy.
"""
# Authenticate with github and retrieve a token
if context.CLIARGS['token'] is None:
if C.GALAXY_TOKEN:
github_token = C.GALAXY_TOKEN
else:
login = GalaxyLogin(self.galaxy)
github_token = login.create_github_token()
else:
github_token = context.CLIARGS['token']
galaxy_response = self.api.authenticate(github_token)
if context.CLIARGS['token'] is None and C.GALAXY_TOKEN is None:
# Remove the token we created
login.remove_github_token()
# Store the Galaxy token
token = GalaxyToken()
token.set(galaxy_response['token'])
display.display("Successfully logged into Galaxy as %s" % galaxy_response['username'])
return 0
def execute_import(self):
""" used to import a role into Ansible Galaxy """
colors = {
'INFO': 'normal',
'WARNING': C.COLOR_WARN,
'ERROR': C.COLOR_ERROR,
'SUCCESS': C.COLOR_OK,
'FAILED': C.COLOR_ERROR,
}
github_user = to_text(context.CLIARGS['github_user'], errors='surrogate_or_strict')
github_repo = to_text(context.CLIARGS['github_repo'], errors='surrogate_or_strict')
if context.CLIARGS['check_status']:
task = self.api.get_import_task(github_user=github_user, github_repo=github_repo)
else:
# Submit an import request
task = self.api.create_import_task(github_user, github_repo,
reference=context.CLIARGS['reference'],
role_name=context.CLIARGS['role_name'])
if len(task) > 1:
# found multiple roles associated with github_user/github_repo
display.display("WARNING: More than one Galaxy role associated with Github repo %s/%s." % (github_user, github_repo),
color='yellow')
display.display("The following Galaxy roles are being updated:" + u'\n', color=C.COLOR_CHANGED)
for t in task:
display.display('%s.%s' % (t['summary_fields']['role']['namespace'], t['summary_fields']['role']['name']), color=C.COLOR_CHANGED)
display.display(u'\nTo properly namespace this role, remove each of the above and re-import %s/%s from scratch' % (github_user, github_repo),
color=C.COLOR_CHANGED)
return 0
# found a single role as expected
display.display("Successfully submitted import request %d" % task[0]['id'])
if not context.CLIARGS['wait']:
display.display("Role name: %s" % task[0]['summary_fields']['role']['name'])
display.display("Repo: %s/%s" % (task[0]['github_user'], task[0]['github_repo']))
if context.CLIARGS['check_status'] or context.CLIARGS['wait']:
# Get the status of the import
msg_list = []
finished = False
while not finished:
task = self.api.get_import_task(task_id=task[0]['id'])
for msg in task[0]['summary_fields']['task_messages']:
if msg['id'] not in msg_list:
display.display(msg['message_text'], color=colors[msg['message_type']])
msg_list.append(msg['id'])
if task[0]['state'] in ['SUCCESS', 'FAILED']:
finished = True
else:
time.sleep(10)
return 0
def execute_setup(self):
""" Setup an integration from Github or Travis for Ansible Galaxy roles"""
if context.CLIARGS['setup_list']:
# List existing integration secrets
secrets = self.api.list_secrets()
if len(secrets) == 0:
# None found
display.display("No integrations found.")
return 0
display.display(u'\n' + "ID Source Repo", color=C.COLOR_OK)
display.display("---------- ---------- ----------", color=C.COLOR_OK)
for secret in secrets:
display.display("%-10s %-10s %s/%s" % (secret['id'], secret['source'], secret['github_user'],
secret['github_repo']), color=C.COLOR_OK)
return 0
if context.CLIARGS['remove_id']:
# Remove a secret
self.api.remove_secret(context.CLIARGS['remove_id'])
display.display("Secret removed. Integrations using this secret will not longer work.", color=C.COLOR_OK)
return 0
source = context.CLIARGS['source']
github_user = context.CLIARGS['github_user']
github_repo = context.CLIARGS['github_repo']
secret = context.CLIARGS['secret']
resp = self.api.add_secret(source, github_user, github_repo, secret)
display.display("Added integration for %s %s/%s" % (resp['source'], resp['github_user'], resp['github_repo']))
return 0
def execute_delete(self):
""" Delete a role from Ansible Galaxy. """
github_user = context.CLIARGS['github_user']
github_repo = context.CLIARGS['github_repo']
resp = self.api.delete_role(github_user, github_repo)
if len(resp['deleted_roles']) > 1:
display.display("Deleted the following roles:")
display.display("ID User Name")
display.display("------ --------------- ----------")
for role in resp['deleted_roles']:
display.display("%-8s %-15s %s" % (role.id, role.namespace, role.name))
display.display(resp['status'])
return True
|
mikewiebe-ansible/ansible
|
lib/ansible/cli/galaxy.py
|
Python
|
gpl-3.0
| 59,804
|
[
"Galaxy"
] |
ad8a1327e37019a57247342d3906938e70f974a941ff1460e1cc57801bb5eb54
|
# -*- coding: utf-8 -*-
from __future__ import division, print_function, absolute_import
import gzip
from warnings import warn
import numpy as np
from scipy import ndimage
from copy import copy
from nibabel.tmpdirs import InTemporaryDirectory
from nibabel.py3k import asbytes
try:
import Tkinter as tkinter
has_tkinter = True
except ImportError:
try:
import tkinter
has_tkinter = True
except ImportError:
has_tkinter = False
try:
import tkFileDialog as filedialog
except ImportError:
try:
from tkinter import filedialog
except ImportError:
has_tkinter = False
# Conditional import machinery for vtk
from dipy.utils.optpkg import optional_package
from dipy import __version__ as dipy_version
from dipy.utils.six import string_types
from dipy.viz.interactor import CustomInteractorStyle
# Allow import, but disable doctests if we don't have vtk
vtk, have_vtk, setup_module = optional_package('vtk')
colors, have_vtk_colors, _ = optional_package('vtk.util.colors')
numpy_support, have_ns, _ = optional_package('vtk.util.numpy_support')
_, have_imread, _ = optional_package('Image')
if not have_imread:
_, have_imread, _ = optional_package('PIL')
if have_vtk:
version = vtk.vtkVersion.GetVTKSourceVersion().split(' ')[-1]
major_version = vtk.vtkVersion.GetVTKMajorVersion()
from vtk.util.numpy_support import vtk_to_numpy
vtkRenderer = vtk.vtkRenderer
else:
vtkRenderer = object
if have_imread:
from scipy.misc import imread
class Renderer(vtkRenderer):
""" Your scene class
This is an important object that is responsible for preparing objects
e.g. actors and volumes for rendering. This is a more pythonic version
of ``vtkRenderer`` proving simple methods for adding and removing actors
but also it provides access to all the functionality
available in ``vtkRenderer`` if necessary.
"""
def background(self, color):
""" Set a background color
"""
self.SetBackground(color)
def add(self, *actors):
""" Add an actor to the renderer
"""
for actor in actors:
if isinstance(actor, vtk.vtkVolume):
self.AddVolume(actor)
elif isinstance(actor, vtk.vtkActor2D):
self.AddActor2D(actor)
elif hasattr(actor, 'add_to_renderer'):
actor.add_to_renderer(self)
else:
self.AddActor(actor)
def rm(self, actor):
""" Remove a specific actor
"""
self.RemoveActor(actor)
def clear(self):
""" Remove all actors from the renderer
"""
self.RemoveAllViewProps()
def rm_all(self):
""" Remove all actors from the renderer
"""
self.RemoveAllViewProps()
def projection(self, proj_type='perspective'):
""" Deside between parallel or perspective projection
Parameters
----------
proj_type : str
Can be 'parallel' or 'perspective' (default).
"""
if proj_type == 'parallel':
self.GetActiveCamera().ParallelProjectionOn()
else:
self.GetActiveCamera().ParallelProjectionOff()
def reset_camera(self):
""" Reset the camera to an automatic position given by the engine.
"""
self.ResetCamera()
def reset_clipping_range(self):
self.ResetCameraClippingRange()
def camera(self):
return self.GetActiveCamera()
def get_camera(self):
cam = self.GetActiveCamera()
return cam.GetPosition(), cam.GetFocalPoint(), cam.GetViewUp()
def camera_info(self):
cam = self.camera()
print('# Active Camera')
print(' Position (%.2f, %.2f, %.2f)' % cam.GetPosition())
print(' Focal Point (%.2f, %.2f, %.2f)' % cam.GetFocalPoint())
print(' View Up (%.2f, %.2f, %.2f)' % cam.GetViewUp())
def set_camera(self, position=None, focal_point=None, view_up=None):
if position is not None:
self.GetActiveCamera().SetPosition(*position)
if focal_point is not None:
self.GetActiveCamera().SetFocalPoint(*focal_point)
if view_up is not None:
self.GetActiveCamera().SetViewUp(*view_up)
self.ResetCameraClippingRange()
def size(self):
""" Renderer size"""
return self.GetSize()
def zoom(self, value):
""" In perspective mode, decrease the view angle by the specified
factor. In parallel mode, decrease the parallel scale by the specified
factor. A value greater than 1 is a zoom-in, a value less than 1 is a
zoom-out.
"""
self.GetActiveCamera().Zoom(value)
def azimuth(self, angle):
""" Rotate the camera about the view up vector centered at the focal
point. Note that the view up vector is whatever was set via SetViewUp,
and is not necessarily perpendicular to the direction of projection.
The result is a horizontal rotation of the camera.
"""
self.GetActiveCamera().Azimuth(angle)
def yaw(self, angle):
""" Rotate the focal point about the view up vector, using the camera's
position as the center of rotation. Note that the view up vector is
whatever was set via SetViewUp, and is not necessarily perpendicular
to the direction of projection. The result is a horizontal rotation of
the scene.
"""
self.GetActiveCamera().Yaw(angle)
def elevation(self, angle):
""" Rotate the camera about the cross product of the negative of the
direction of projection and the view up vector, using the focal point
as the center of rotation. The result is a vertical rotation of the
scene.
"""
self.GetActiveCamera().Elevation(angle)
def pitch(self, angle):
""" Rotate the focal point about the cross product of the view up
vector and the direction of projection, using the camera's position as
the center of rotation. The result is a vertical rotation of the
camera.
"""
self.GetActiveCamera().Pitch(angle)
def roll(self, angle):
""" Rotate the camera about the direction of projection. This will
spin the camera about its axis.
"""
self.GetActiveCamera().Roll(angle)
def dolly(self, value):
""" Divide the camera's distance from the focal point by the given
dolly value. Use a value greater than one to dolly-in toward the focal
point, and use a value less than one to dolly-out away from the focal
point.
"""
self.GetActiveCamera().Dolly(value)
def camera_direction(self):
""" Get the vector in the direction from the camera position to the
focal point. This is usually the opposite of the ViewPlaneNormal, the
vector perpendicular to the screen, unless the view is oblique.
"""
return self.GetActiveCamera().GetDirectionOfProjection()
def renderer(background=None):
""" Create a renderer.
Parameters
----------
background : tuple
Initial background color of renderer
Returns
-------
v : Renderer
Examples
--------
>>> from dipy.viz import window, actor
>>> import numpy as np
>>> r = window.Renderer()
>>> lines=[np.random.rand(10,3)]
>>> c=actor.line(lines, window.colors.red)
>>> r.add(c)
>>> #window.show(r)
"""
deprecation_msg = ("Method 'dipy.viz.window.renderer' is deprecated, instead"
" use class 'dipy.viz.window.Renderer'.")
warn(DeprecationWarning(deprecation_msg))
ren = Renderer()
if background is not None:
ren.SetBackground(background)
return ren
if have_vtk:
ren = renderer
def add(ren, a):
""" Add a specific actor
"""
ren.add(a)
def rm(ren, a):
""" Remove a specific actor
"""
ren.rm(a)
def clear(ren):
""" Remove all actors from the renderer
"""
ren.clear()
def rm_all(ren):
""" Remove all actors from the renderer
"""
ren.rm_all()
def open_file_dialog(file_types=[("All files", "*")]):
""" Simple Tk file dialog for opening files
Parameters
----------
file_types : tuples of tuples
Accepted file types.
Returns
-------
file_paths : sequence of str
Returns the full paths of all selected files
"""
root = tkinter.Tk()
root.withdraw()
file_paths = filedialog.askopenfilenames(filetypes=file_types)
return file_paths
def save_file_dialog(initial_file='dipy.png', default_ext='.png',
file_types=(("PNG file", "*.png"), ("All Files", "*.*"))):
""" Simple Tk file dialog for saving a file
Parameters
----------
initial_file : str
For example ``dipy.png``.
default_ext : str
Default extension to appear in the save dialog.
file_types : tuples of tuples
Accepted file types.
Returns
-------
filepath : str
Complete filename of saved file
"""
root = tkinter.Tk()
root.withdraw()
file_path = filedialog.asksaveasfilename(initialfile=initial_file,
defaultextension=default_ext,
filetypes=file_types)
return file_path
class ShowManager(object):
""" This class is the interface between the renderer, the window and the
interactor.
"""
def __init__(self, ren=None, title='DIPY', size=(300, 300),
png_magnify=1, reset_camera=True, order_transparent=False,
interactor_style='custom'):
""" Manages the visualization pipeline
Parameters
----------
ren : Renderer() or vtkRenderer()
The scene that holds all the actors.
title : string
A string for the window title bar.
size : (int, int)
``(width, height)`` of the window. Default is (300, 300).
png_magnify : int
Number of times to magnify the screenshot. This can be used to save
high resolution screenshots when pressing 's' inside the window.
reset_camera : bool
Default is True. You can change this option to False if you want to
keep the camera as set before calling this function.
order_transparent : bool
True is useful when you want to order transparent
actors according to their relative position to the camera. The
default option which is False will order the actors according to
the order of their addition to the Renderer().
interactor_style : str or vtkInteractorStyle
If str then if 'trackball' then vtkInteractorStyleTrackballCamera()
is used, if 'image' then vtkInteractorStyleImage() is used (no
rotation) or if 'custom' then CustomInteractorStyle is used.
Otherwise you can input your own interactor style.
Attributes
----------
ren : vtkRenderer()
iren : vtkRenderWindowInteractor()
style : vtkInteractorStyle()
window : vtkRenderWindow()
Methods
-------
initialize()
render()
start()
add_window_callback()
Notes
-----
Default interaction keys for
* 3d navigation are with left, middle and right mouse dragging
* resetting the camera press 'r'
* saving a screenshot press 's'
* for quiting press 'q'
Examples
--------
>>> from dipy.viz import actor, window
>>> renderer = window.Renderer()
>>> renderer.add(actor.axes())
>>> showm = window.ShowManager(renderer)
>>> # showm.initialize()
>>> # showm.render()
>>> # showm.start()
"""
if ren is None:
ren = Renderer()
self.ren = ren
self.title = title
self.size = size
self.png_magnify = png_magnify
self.reset_camera = reset_camera
self.order_transparent = order_transparent
self.interactor_style = interactor_style
if self.reset_camera:
self.ren.ResetCamera()
self.window = vtk.vtkRenderWindow()
self.window.AddRenderer(ren)
if self.title == 'DIPY':
self.window.SetWindowName(title + ' ' + dipy_version)
else:
self.window.SetWindowName(title)
self.window.SetSize(size[0], size[1])
if self.order_transparent:
# Use a render window with alpha bits
# as default is 0 (false))
self.window.SetAlphaBitPlanes(True)
# Force to not pick a framebuffer with a multisample buffer
# (default is 8)
self.window.SetMultiSamples(0)
# Choose to use depth peeling (if supported)
# (default is 0 (false)):
self.ren.UseDepthPeelingOn()
# Set depth peeling parameters
# Set the maximum number of rendering passes (default is 4)
ren.SetMaximumNumberOfPeels(4)
# Set the occlusion ratio (initial value is 0.0, exact image):
ren.SetOcclusionRatio(0.0)
if self.interactor_style == 'image':
self.style = vtk.vtkInteractorStyleImage()
elif self.interactor_style == 'trackball':
self.style = vtk.vtkInteractorStyleTrackballCamera()
elif self.interactor_style == 'custom':
self.style = CustomInteractorStyle()
else:
self.style = interactor_style
self.iren = vtk.vtkRenderWindowInteractor()
self.style.SetCurrentRenderer(self.ren)
# Hack: below, we explicitly call the Python version of SetInteractor.
self.style.SetInteractor(self.iren)
self.iren.SetInteractorStyle(self.style)
self.iren.SetRenderWindow(self.window)
def initialize(self):
""" Initialize interaction
"""
self.iren.Initialize()
def render(self):
""" Renders only once
"""
self.window.Render()
def start(self):
""" Starts interaction
"""
try:
self.iren.Start()
except AttributeError:
self.__init__(self.ren, self.title, size=self.size,
png_magnify=self.png_magnify,
reset_camera=self.reset_camera,
order_transparent=self.order_transparent,
interactor_style=self.interactor_style)
self.initialize()
self.render()
self.iren.Start()
self.window.RemoveRenderer(self.ren)
self.ren.SetRenderWindow(None)
del self.iren
del self.window
def record_events(self):
""" Records events during the interaction.
The recording is represented as a list of VTK events that happened
during the interaction. The recorded events are then returned.
Returns
-------
events : str
Recorded events (one per line).
Notes
-----
Since VTK only allows recording events to a file, we use a
temporary file from which we then read the events.
"""
with InTemporaryDirectory():
filename = "recorded_events.log"
recorder = vtk.vtkInteractorEventRecorder()
recorder.SetInteractor(self.iren)
recorder.SetFileName(filename)
def _stop_recording_and_close(obj, evt):
if recorder:
recorder.Stop()
self.iren.TerminateApp()
self.iren.AddObserver("ExitEvent", _stop_recording_and_close)
recorder.EnabledOn()
recorder.Record()
self.initialize()
self.render()
self.iren.Start()
# Deleting this object is the unique way
# to close the file.
recorder = None
# Retrieved recorded events.
with open(filename, 'r') as f:
events = f.read()
return events
def record_events_to_file(self, filename="record.log"):
""" Records events during the interaction.
The recording is represented as a list of VTK events
that happened during the interaction. The recording is
going to be saved into `filename`.
Parameters
----------
filename : str
Name of the file that will contain the recording (.log|.log.gz).
"""
events = self.record_events()
# Compress file if needed
if filename.endswith(".gz"):
with gzip.open(filename, 'wb') as fgz:
fgz.write(asbytes(events))
else:
with open(filename, 'w') as f:
f.write(events)
def play_events(self, events):
""" Plays recorded events of a past interaction.
The VTK events that happened during the recorded interaction will be
played back.
Parameters
----------
events : str
Recorded events (one per line).
"""
recorder = vtk.vtkInteractorEventRecorder()
recorder.SetInteractor(self.iren)
recorder.SetInputString(events)
recorder.ReadFromInputStringOn()
self.initialize()
self.render()
recorder.Play()
def play_events_from_file(self, filename):
""" Plays recorded events of a past interaction.
The VTK events that happened during the recorded interaction will be
played back from `filename`.
Parameters
----------
filename : str
Name of the file containing the recorded events (.log|.log.gz).
"""
# Uncompress file if needed.
if filename.endswith(".gz"):
with gzip.open(filename, 'r') as f:
events = f.read()
else:
with open(filename) as f:
events = f.read()
self.play_events(events)
def add_window_callback(self, win_callback):
""" Add window callbacks
"""
self.window.AddObserver(vtk.vtkCommand.ModifiedEvent, win_callback)
self.window.Render()
def show(ren, title='DIPY', size=(300, 300),
png_magnify=1, reset_camera=True, order_transparent=False):
""" Show window with current renderer
Parameters
------------
ren : Renderer() or vtkRenderer()
The scene that holds all the actors.
title : string
A string for the window title bar. Default is DIPY and current version.
size : (int, int)
``(width, height)`` of the window. Default is (300, 300).
png_magnify : int
Number of times to magnify the screenshot. Default is 1. This can be
used to save high resolution screenshots when pressing 's' inside the
window.
reset_camera : bool
Default is True. You can change this option to False if you want to
keep the camera as set before calling this function.
order_transparent : bool
True is useful when you want to order transparent
actors according to their relative position to the camera. The default
option which is False will order the actors according to the order of
their addition to the Renderer().
Notes
-----
Default interaction keys for
* 3d navigation are with left, middle and right mouse dragging
* resetting the camera press 'r'
* saving a screenshot press 's'
* for quiting press 'q'
Examples
----------
>>> import numpy as np
>>> from dipy.viz import window, actor
>>> r = window.Renderer()
>>> lines=[np.random.rand(10,3),np.random.rand(20,3)]
>>> colors=np.array([[0.2,0.2,0.2],[0.8,0.8,0.8]])
>>> c=actor.line(lines,colors)
>>> r.add(c)
>>> l=actor.label(text="Hello")
>>> r.add(l)
>>> #window.show(r)
See also
---------
dipy.viz.window.record
dipy.viz.window.snapshot
"""
show_manager = ShowManager(ren, title, size,
png_magnify, reset_camera, order_transparent)
show_manager.initialize()
show_manager.render()
show_manager.start()
def record(ren=None, cam_pos=None, cam_focal=None, cam_view=None,
out_path=None, path_numbering=False, n_frames=1, az_ang=10,
magnification=1, size=(300, 300), reset_camera=True, verbose=False):
""" This will record a video of your scene
Records a video as a series of ``.png`` files of your scene by rotating the
azimuth angle az_angle in every frame.
Parameters
-----------
ren : vtkRenderer() object
as returned from function ren()
cam_pos : None or sequence (3,), optional
Camera's position. If None then default camera's position is used.
cam_focal : None or sequence (3,), optional
Camera's focal point. If None then default camera's focal point is
used.
cam_view : None or sequence (3,), optional
Camera's view up direction. If None then default camera's view up
vector is used.
out_path : str, optional
Output path for the frames. If None a default dipy.png is created.
path_numbering : bool
When recording it changes out_path to out_path + str(frame number)
n_frames : int, optional
Number of frames to save, default 1
az_ang : float, optional
Azimuthal angle of camera rotation.
magnification : int, optional
How much to magnify the saved frame. Default is 1.
size : (int, int)
``(width, height)`` of the window. Default is (300, 300).
reset_camera : bool
If True Call ``ren.reset_camera()``. Otherwise you need to set the
camera before calling this function.
verbose : bool
print information about the camera. Default is False.
Examples
---------
>>> from dipy.viz import window, actor
>>> ren = window.Renderer()
>>> a = actor.axes()
>>> ren.add(a)
>>> # uncomment below to record
>>> # window.record(ren)
>>> #check for new images in current directory
"""
if ren is None:
ren = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren)
renWin.SetSize(size[0], size[1])
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
# ren.GetActiveCamera().Azimuth(180)
if reset_camera:
ren.ResetCamera()
renderLarge = vtk.vtkRenderLargeImage()
if major_version <= 5:
renderLarge.SetInput(ren)
else:
renderLarge.SetInput(ren)
renderLarge.SetMagnification(magnification)
renderLarge.Update()
writer = vtk.vtkPNGWriter()
ang = 0
if cam_pos is not None:
cx, cy, cz = cam_pos
ren.GetActiveCamera().SetPosition(cx, cy, cz)
if cam_focal is not None:
fx, fy, fz = cam_focal
ren.GetActiveCamera().SetFocalPoint(fx, fy, fz)
if cam_view is not None:
ux, uy, uz = cam_view
ren.GetActiveCamera().SetViewUp(ux, uy, uz)
cam = ren.GetActiveCamera()
if verbose:
print('Camera Position (%.2f, %.2f, %.2f)' % cam.GetPosition())
print('Camera Focal Point (%.2f, %.2f, %.2f)' % cam.GetFocalPoint())
print('Camera View Up (%.2f, %.2f, %.2f)' % cam.GetViewUp())
for i in range(n_frames):
ren.GetActiveCamera().Azimuth(ang)
renderLarge = vtk.vtkRenderLargeImage()
renderLarge.SetInput(ren)
renderLarge.SetMagnification(magnification)
renderLarge.Update()
writer.SetInputConnection(renderLarge.GetOutputPort())
if path_numbering:
if out_path is None:
filename = str(i).zfill(6) + '.png'
else:
filename = out_path + str(i).zfill(6) + '.png'
else:
if out_path is None:
filename = 'dipy.png'
else:
filename = out_path
writer.SetFileName(filename)
writer.Write()
ang = +az_ang
def snapshot(ren, fname=None, size=(300, 300), offscreen=True,
order_transparent=False):
""" Saves a snapshot of the renderer in a file or in memory
Parameters
-----------
ren : vtkRenderer
as returned from function renderer()
fname : str or None
Save PNG file. If None return only an array without saving PNG.
size : (int, int)
``(width, height)`` of the window. Default is (300, 300).
offscreen : bool
Default True. Go stealthmode no window should appear.
order_transparent : bool
Default False. Use depth peeling to sort transparent objects.
Returns
-------
arr : ndarray
Color array of size (width, height, 3) where the last dimension
holds the RGB values.
"""
width, height = size
if offscreen:
graphics_factory = vtk.vtkGraphicsFactory()
graphics_factory.SetOffScreenOnlyMode(1)
# TODO check if the line below helps in something
# graphics_factory.SetUseMesaClasses(1)
render_window = vtk.vtkRenderWindow()
if offscreen:
render_window.SetOffScreenRendering(1)
render_window.AddRenderer(ren)
render_window.SetSize(width, height)
if order_transparent:
# Use a render window with alpha bits
# as default is 0 (false))
render_window.SetAlphaBitPlanes(True)
# Force to not pick a framebuffer with a multisample buffer
# (default is 8)
render_window.SetMultiSamples(0)
# Choose to use depth peeling (if supported)
# (default is 0 (false)):
ren.UseDepthPeelingOn()
# Set depth peeling parameters
# Set the maximum number of rendering passes (default is 4)
ren.SetMaximumNumberOfPeels(4)
# Set the occlusion ratio (initial value is 0.0, exact image):
ren.SetOcclusionRatio(0.0)
render_window.Render()
window_to_image_filter = vtk.vtkWindowToImageFilter()
window_to_image_filter.SetInput(render_window)
window_to_image_filter.Update()
vtk_image = window_to_image_filter.GetOutput()
h, w, _ = vtk_image.GetDimensions()
vtk_array = vtk_image.GetPointData().GetScalars()
components = vtk_array.GetNumberOfComponents()
arr = vtk_to_numpy(vtk_array).reshape(h, w, components)
if fname is None:
return arr
writer = vtk.vtkPNGWriter()
writer.SetFileName(fname)
writer.SetInputConnection(window_to_image_filter.GetOutputPort())
writer.Write()
return arr
def analyze_renderer(ren):
class ReportRenderer(object):
bg_color = None
report = ReportRenderer()
report.bg_color = ren.GetBackground()
report.collection = ren.GetActors()
report.actors = report.collection.GetNumberOfItems()
report.collection.InitTraversal()
report.actors_classnames = []
for i in range(report.actors):
class_name = report.collection.GetNextActor().GetClassName()
report.actors_classnames.append(class_name)
return report
def analyze_snapshot(im, bg_color=(0, 0, 0), colors=None,
find_objects=True,
strel=None):
""" Analyze snapshot from memory or file
Parameters
----------
im: str or array
If string then the image is read from a file otherwise the image is
read from a numpy array. The array is expected to be of shape (X, Y, 3)
where the last dimensions are the RGB values.
colors: tuple (3,) or list of tuples (3,)
List of colors to search in the image
find_objects: bool
If True it will calculate the number of objects that are different
from the background and return their position in a new image.
strel: 2d array
Structure element to use for finding the objects.
Returns
-------
report : ReportSnapshot
This is an object with attibutes like ``colors_found`` that give
information about what was found in the current snapshot array ``im``.
"""
if isinstance(im, string_types):
im = imread(im)
class ReportSnapshot(object):
objects = None
labels = None
colors_found = False
report = ReportSnapshot()
if colors is not None:
if isinstance(colors, tuple):
colors = [colors]
flags = [False] * len(colors)
for (i, col) in enumerate(colors):
# find if the current color exist in the array
flags[i] = np.any(np.all(im == col, axis=-1))
report.colors_found = flags
if find_objects is True:
weights = [0.299, 0.587, 0.144]
gray = np.dot(im[..., :3], weights)
bg_color = im[0, 0]
background = np.dot(bg_color, weights)
if strel is None:
strel = np.array([[0, 1, 0],
[1, 1, 1],
[0, 1, 0]])
labels, objects = ndimage.label(gray != background, strel)
report.labels = labels
report.objects = objects
return report
|
nilgoyyou/dipy
|
dipy/viz/window.py
|
Python
|
bsd-3-clause
| 29,276
|
[
"VTK"
] |
f905daccb5a426fd8996c509fa54a1092a1c056a434ed68d96856b18866bdd2a
|
#
# Copyright (C) 2000 greg Landrum
#
""" unit testing code for trees and decision trees (not learning/xvalidation) """
from __future__ import print_function
import copy
import os
import unittest
from rdkit import RDConfig
from rdkit.ML.DecTree import Tree
from rdkit.TestRunner import redirect_stdout
from rdkit.six import StringIO
from rdkit.six.moves import cPickle
class TreeTestCase(unittest.TestCase):
def setUp(self):
self.baseTree = Tree.TreeNode(None, 'root')
self.pickleFileName = RDConfig.RDCodeDir + '/ML/DecTree/test_data/treeunit.pkl'
def test_Tree(self):
tree = Tree.TreeNode(None, 'root', label=0)
self.assertEqual(tree.GetLevel(), 0)
self.assertEqual(tree.GetName(), 'root')
self.assertEqual(tree.GetData(), None)
self.assertEqual(tree.GetTerminal(), False)
self.assertEqual(tree.GetLabel(), 0)
self.assertEqual(tree.GetParent(), None)
self.assertEqual(tree.GetChildren(), [])
for i in range(3):
child = tree.AddChild('child {0}'.format(i), i + 1, data={'key': 'value'})
self.assertEqual(child.GetLevel(), 1)
self.assertEqual(child.GetName(), 'child {0}'.format(i))
self.assertEqual(child.GetData(), {'key': 'value'})
self.assertEqual(child.GetLabel(), i + 1)
self.assertEqual(child.GetParent(), tree)
self.assertEqual(child.GetChildren(), [])
children = tree.GetChildren()
self.assertEqual(len(children), 3)
children[0].AddChild('terminal', 4, isTerminal=True)
s = str(tree)
self.assertIn('root', s)
self.assertIn(' terminal', s)
self.assertIn(' child 2', s)
tree.NameTree(['a', 'b', 'c', 'd', 'e'])
self.assertEqual(str(tree), 'a\n b\n terminal\n c\n d\n')
tree.PruneChild(children[1])
self.assertEqual(str(tree), 'a\n b\n terminal\n d\n')
f = StringIO()
with redirect_stdout(f):
tree.Print(showData=True)
s = f.getvalue()
self.assertIn('value', s)
self.assertIn('None', s)
f = StringIO()
with redirect_stdout(f):
tree.Print()
s = f.getvalue()
self.assertNotIn('value', s)
self.assertNotIn('None', s)
tree.Destroy()
self.assertEqual(str(tree), 'a\n')
def _readyTree(self):
tree = self.baseTree
tree.AddChild('child0')
tree.AddChild('child1')
def test5Equals(self):
# " testing tree equals "
nTree = Tree.TreeNode(None, 'root')
self._readyTree()
tTree = self.baseTree
self.baseTree = nTree
self._readyTree()
assert tTree == self.baseTree, 'Equality test 1 failed. (bad Tree.__cmp__)'
assert self.baseTree == tTree, 'Equality test 2 failed. (bad Tree.__cmp__)'
tTree.AddChild('child2')
assert tTree != self.baseTree, 'Inequality test 1 failed. (bad Tree.__cmp__)'
assert self.baseTree != tTree, 'Inequality test 2 failed. (bad Tree.__cmp__)'
self.assertTrue(tTree > self.baseTree, msg='Larger tree is greater')
self.assertEqual(tTree.__cmp__(self.baseTree), 1)
def test6PickleEquals(self):
# " testing pickled tree equals "
self._readyTree()
pkl = cPickle.dumps(self.baseTree)
oTree = cPickle.loads(pkl)
assert oTree == self.baseTree, 'Pickle inequality test failed'
self.assertEqual(oTree.__cmp__(self.baseTree), 0)
self.baseTree.PruneChild(self.baseTree.GetChildren()[0])
assert oTree != self.baseTree, 'Pickle inequality test failed (bad Tree.__cmp__)'
self.assertEqual(abs(oTree.__cmp__(self.baseTree)), 1)
def test7Copy(self):
# " testing deepcopy on trees "
self._readyTree()
nTree = copy.deepcopy(self.baseTree)
assert nTree == self.baseTree, 'deepcopy failed'
def test8In(self):
# " testing list membership "
self._readyTree()
nTree = copy.deepcopy(self.baseTree)
nTree2 = copy.deepcopy(self.baseTree)
nTree2.PruneChild(self.baseTree.GetChildren()[0])
tList = [nTree2, nTree2, nTree]
assert self.baseTree in tList, 'list membership (tree in list) failed'
tList = [nTree2, nTree2]
assert self.baseTree not in tList, 'list membership (tree not in list) failed'
def test_exampleCode(self):
try:
f = StringIO()
with redirect_stdout(f):
Tree._exampleCode()
self.assertTrue(os.path.isfile('save.pkl'))
self.assertIn('tree==tree2 False', f.getvalue(), 'Example didn' 't run to end')
finally:
if os.path.isfile('save.pkl'):
os.remove('save.pkl')
if __name__ == '__main__': # pragma: nocover
unittest.main()
|
rvianello/rdkit
|
rdkit/ML/DecTree/UnitTestTree.py
|
Python
|
bsd-3-clause
| 4,480
|
[
"RDKit"
] |
21b0c9143d33564fc3764b2e05c92e8b96f8037040c6db8ae69a1ffd67c172d9
|
import os
import random
import pytest
from datetime import datetime
from unittest import mock
from unittest.mock import MagicMock, Mock
from urllib.error import HTTPError
from pytube import request, Stream
@mock.patch("pytube.streams.request")
def test_stream_to_buffer(mock_request, cipher_signature):
# Given
stream_bytes = iter(
[
bytes(os.urandom(8 * 1024)),
bytes(os.urandom(8 * 1024)),
bytes(os.urandom(8 * 1024)),
]
)
mock_request.stream.return_value = stream_bytes
buffer = MagicMock()
# When
cipher_signature.streams[0].stream_to_buffer(buffer)
# Then
assert buffer.write.call_count == 3
def test_filesize(cipher_signature):
assert cipher_signature.streams[0].filesize == 3399554
def test_filesize_approx(cipher_signature):
stream = cipher_signature.streams[0]
assert stream.filesize_approx == 3403320
stream.bitrate = None
assert stream.filesize_approx == 3399554
def test_default_filename(cipher_signature):
expected = "YouTube Rewind 2019 For the Record YouTubeRewind.3gpp"
stream = cipher_signature.streams[0]
assert stream.default_filename == expected
def test_title(cipher_signature):
expected = "YouTube Rewind 2019: For the Record | #YouTubeRewind"
assert cipher_signature.title == expected
def test_expiration(cipher_signature):
assert cipher_signature.streams[0].expiration >= datetime(2020, 10, 30, 5, 39, 41)
def test_caption_tracks(presigned_video):
assert len(presigned_video.caption_tracks) == 13
def test_captions(presigned_video):
assert len(presigned_video.captions) == 13
def test_description(cipher_signature):
expected = (
"In 2018, we made something you didn’t like. "
"For Rewind 2019, let’s see what you DID like.\n\n"
"Celebrating the creators, music and moments "
"that mattered most to you in 2019. \n\n"
"To learn how the top lists in Rewind were generated: "
"https://rewind.youtube/about\n\n"
"Top lists featured the following channels:\n\n"
"@1MILLION Dance Studio \n@A4 \n@Anaysa \n"
"@Andymation \n@Ariana Grande \n@Awez Darbar \n"
"@AzzyLand \n@Billie Eilish \n@Black Gryph0n \n"
"@BLACKPINK \n@ChapkisDanceUSA \n@Daddy Yankee \n"
"@David Dobrik \n@Dude Perfect \n@Felipe Neto \n"
"@Fischer's-フィッシャーズ- \n@Galen Hooks \n@ibighit \n"
"@James Charles \n@jeffreestar \n@Jelly \n@Kylie Jenner \n"
"@LazarBeam \n@Lil Dicky \n@Lil Nas X \n@LOUD \n@LOUD Babi \n"
"@LOUD Coringa \n@Magnet World \n@MrBeast \n"
"@Nilson Izaias Papinho Oficial \n@Noah Schnapp\n"
"@백종원의 요리비책 Paik's Cuisine \n@Pencilmation \n@PewDiePie \n"
"@SethEverman \n@shane \n@Shawn Mendes \n@Team Naach \n"
"@whinderssonnunes \n@워크맨-Workman \n@하루한끼 one meal a day \n\n"
"To see the full list of featured channels in Rewind 2019, "
"visit: https://rewind.youtube/about"
)
assert cipher_signature.description == expected
def test_rating(cipher_signature):
"""Test the rating value of a YouTube object.
This changes each time we rebuild the json files, so we want to use
an estimate of where it will be. The two values seen to make this
estimate were 2.073431 and 2.0860765. This represents a range of
~0.007 below and ~0.006 above 2.08. Allowing for up to 0.02 in either
direction should provide a steady indicator of correctness.
"""
assert abs(cipher_signature.rating - 2.08) < 0.02
def test_length(cipher_signature):
assert cipher_signature.length == 337
def test_views(cipher_signature):
assert cipher_signature.views >= 108531745
@mock.patch(
"pytube.request.head", MagicMock(return_value={"content-length": "6796391"})
)
@mock.patch(
"pytube.request.stream",
MagicMock(return_value=iter([str(random.getrandbits(8 * 1024))])),
)
def test_download(cipher_signature):
with mock.patch("pytube.streams.open", mock.mock_open(), create=True):
stream = cipher_signature.streams[0]
stream.download()
@mock.patch(
"pytube.request.head", MagicMock(return_value={"content-length": "16384"})
)
@mock.patch(
"pytube.request.stream",
MagicMock(return_value=iter([str(random.getrandbits(8 * 1024))])),
)
@mock.patch("pytube.streams.target_directory", MagicMock(return_value="/target"))
def test_download_with_prefix(cipher_signature):
with mock.patch("pytube.streams.open", mock.mock_open(), create=True):
stream = cipher_signature.streams[0]
file_path = stream.download(filename_prefix="prefix")
assert file_path == os.path.join(
"/target",
"prefixYouTube Rewind 2019 For the Record YouTubeRewind.3gpp"
)
@mock.patch(
"pytube.request.head", MagicMock(return_value={"content-length": "16384"})
)
@mock.patch(
"pytube.request.stream",
MagicMock(return_value=iter([str(random.getrandbits(8 * 1024))])),
)
@mock.patch("pytube.streams.target_directory", MagicMock(return_value="/target"))
def test_download_with_filename(cipher_signature):
with mock.patch("pytube.streams.open", mock.mock_open(), create=True):
stream = cipher_signature.streams[0]
file_path = stream.download(filename="cool name bro")
assert file_path == os.path.join(
"/target",
"cool name bro"
)
@mock.patch(
"pytube.request.head", MagicMock(return_value={"content-length": "16384"})
)
@mock.patch(
"pytube.request.stream",
MagicMock(return_value=iter([str(random.getrandbits(8 * 1024))])),
)
@mock.patch("pytube.streams.target_directory", MagicMock(return_value="/target"))
@mock.patch("os.path.isfile", MagicMock(return_value=True))
def test_download_with_existing(cipher_signature):
with mock.patch("pytube.streams.open", mock.mock_open(), create=True):
stream = cipher_signature.streams[0]
os.path.getsize = Mock(return_value=stream.filesize)
file_path = stream.download()
assert file_path == os.path.join(
"/target",
"YouTube Rewind 2019 For the Record YouTubeRewind.3gpp"
)
assert not request.stream.called
@mock.patch(
"pytube.request.head", MagicMock(return_value={"content-length": "16384"})
)
@mock.patch(
"pytube.request.stream",
MagicMock(return_value=iter([str(random.getrandbits(8 * 1024))])),
)
@mock.patch("pytube.streams.target_directory", MagicMock(return_value="/target"))
@mock.patch("os.path.isfile", MagicMock(return_value=True))
def test_download_with_existing_no_skip(cipher_signature):
with mock.patch("pytube.streams.open", mock.mock_open(), create=True):
stream = cipher_signature.streams[0]
os.path.getsize = Mock(return_value=stream.filesize)
file_path = stream.download(skip_existing=False)
assert file_path == os.path.join(
"/target",
"YouTube Rewind 2019 For the Record YouTubeRewind.3gpp"
)
assert request.stream.called
def test_progressive_streams_return_includes_audio_track(cipher_signature):
stream = cipher_signature.streams.filter(progressive=True)[0]
assert stream.includes_audio_track
def test_progressive_streams_return_includes_video_track(cipher_signature):
stream = cipher_signature.streams.filter(progressive=True)[0]
assert stream.includes_video_track
@mock.patch(
"pytube.request.head", MagicMock(return_value={"content-length": "16384"})
)
@mock.patch(
"pytube.request.stream",
MagicMock(return_value=iter([str(random.getrandbits(8 * 1024))])),
)
def test_on_progress_hook(cipher_signature):
callback_fn = mock.MagicMock()
cipher_signature.register_on_progress_callback(callback_fn)
with mock.patch("pytube.streams.open", mock.mock_open(), create=True):
stream = cipher_signature.streams[0]
stream.download()
assert callback_fn.called
args, _ = callback_fn.call_args
assert len(args) == 3
stream, _, _ = args
assert isinstance(stream, Stream)
@mock.patch(
"pytube.request.head", MagicMock(return_value={"content-length": "16384"})
)
@mock.patch(
"pytube.request.stream",
MagicMock(return_value=iter([str(random.getrandbits(8 * 1024))])),
)
def test_on_complete_hook(cipher_signature):
callback_fn = mock.MagicMock()
cipher_signature.register_on_complete_callback(callback_fn)
with mock.patch("pytube.streams.open", mock.mock_open(), create=True):
stream = cipher_signature.streams[0]
stream.download()
assert callback_fn.called
def test_author(cipher_signature):
assert cipher_signature.author == 'YouTube'
def test_thumbnail_when_in_details(cipher_signature):
expected = f"https://i.ytimg.com/vi/{cipher_signature.video_id}/sddefault.jpg"
cipher_signature._player_response = {
"videoDetails": {"thumbnail": {"thumbnails": [{"url": expected}]}}
}
assert cipher_signature.thumbnail_url == expected
def test_repr_for_audio_streams(cipher_signature):
stream = str(cipher_signature.streams.filter(only_audio=True)[1])
expected = (
'<Stream: itag="140" mime_type="audio/mp4" abr="128kbps" '
'acodec="mp4a.40.2" progressive="False" type="audio">'
)
assert stream == expected
def test_repr_for_video_streams(cipher_signature):
stream = str(cipher_signature.streams.filter(only_video=True)[0])
expected = (
'<Stream: itag="137" mime_type="video/mp4" res="1080p" fps="24fps" '
'vcodec="avc1.640028" progressive="False" type="video">'
)
assert stream == expected
def test_repr_for_progressive_streams(cipher_signature):
stream_reprs = [
str(s)
for s in cipher_signature.streams.filter(progressive=True)
]
expected = (
'<Stream: itag="18" mime_type="video/mp4" res="360p" fps="24fps" '
'vcodec="avc1.42001E" acodec="mp4a.40.2" progressive="True" '
'type="video">'
)
assert expected in stream_reprs
def test_repr_for_adaptive_streams(cipher_signature):
stream = str(cipher_signature.streams.filter(adaptive=True)[0])
expected = (
'<Stream: itag="137" mime_type="video/mp4" res="1080p" fps="24fps" '
'vcodec="avc1.640028" progressive="False" type="video">'
)
assert stream == expected
def test_segmented_stream_on_404(cipher_signature):
stream = cipher_signature.streams.filter(adaptive=True)[0]
with mock.patch('pytube.request.head') as mock_head:
with mock.patch('pytube.request.urlopen') as mock_url_open:
# Mock the responses to YouTube
mock_url_open_object = mock.Mock()
# These are our 4 "segments" of a dash stream
# The first explains how many pieces there are, and
# the rest are those pieces
responses = [
b'Raw_data\r\nSegment-Count: 3',
b'a',
b'b',
b'c',
]
joined_responses = b''.join(responses)
# We create response headers to match the segments
response_headers = [
{
'content-length': len(r),
'Content-Range': '0-%s/%s' % (str(len(r)), str(len(r)))
}
for r in responses
]
# Request order for stream:
# 1. get(url&sn=0)
# 2. head(url&sn=[1,2,3])
# 3. info(url) -> 404
# 4. get(url&sn=0)
# 5. get(url&sn=[1,2,3])
# Handle filesize requests
mock_head.side_effect = [
HTTPError('', 404, 'Not Found', '', ''),
*response_headers[1:],
]
# Each response must be followed by None, to break iteration
# in the stream() function
mock_url_open_object.read.side_effect = [
responses[0], None,
responses[1], None,
responses[2], None,
responses[3], None,
]
# This handles the HEAD requests to get content-length
mock_url_open_object.info.side_effect = [
HTTPError('', 404, 'Not Found', '', ''),
*response_headers
]
mock_url_open.return_value = mock_url_open_object
with mock.patch('builtins.open', new_callable=mock.mock_open) as mock_open:
file_handle = mock_open.return_value.__enter__.return_value
fp = stream.download()
full_content = b''
for call in file_handle.write.call_args_list:
args, kwargs = call
full_content += b''.join(args)
assert full_content == joined_responses
mock_open.assert_called_once_with(fp, 'wb')
def test_segmented_only_catches_404(cipher_signature):
stream = cipher_signature.streams.filter(adaptive=True)[0]
with mock.patch('pytube.request.stream') as mock_stream:
mock_stream.side_effect = HTTPError('', 403, 'Forbidden', '', '')
with mock.patch("pytube.streams.open", mock.mock_open(), create=True):
with pytest.raises(HTTPError):
stream.download()
|
pytube/pytube
|
tests/test_streams.py
|
Python
|
unlicense
| 13,366
|
[
"VisIt"
] |
61d1006612e83569cd74305bd7c0dc42ca8d9a4212cc06cd3bccc6830333b1cc
|
# Inside of a python script.
# Call this function with "" or "__main__"
# in Crystal Clear.
print ("Hello, World!")
|
MatthewKaes/CrystalClear
|
Test/python/script.py
|
Python
|
lgpl-3.0
| 115
|
[
"CRYSTAL"
] |
7ba5a5b9821a38775d0d9271a6a67baef494c9a2e9f4d7da22d24b5894b17e14
|
# -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding:utf-8 -*-
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 fileencoding=utf-8
#
# MDAnalysis --- http://www.mdanalysis.org
# Copyright (c) 2006-2016 The MDAnalysis Development Team and contributors
# (see the file AUTHORS for the full list of names)
#
# Released under the GNU Public Licence, v2 or any higher version
#
# Please cite your use of MDAnalysis in published work:
#
# R. J. Gowers, M. Linke, J. Barnoud, T. J. E. Reddy, M. N. Melo, S. L. Seyler,
# D. L. Dotson, J. Domanski, S. Buchoux, I. M. Kenney, and O. Beckstein.
# MDAnalysis: A Python package for the rapid analysis of molecular dynamics
# simulations. In S. Benthall and S. Rostrup editors, Proceedings of the 15th
# Python in Science Conference, pages 102-109, Austin, TX, 2016. SciPy.
#
# N. Michaud-Agrawal, E. J. Denning, T. B. Woolf, and O. Beckstein.
# MDAnalysis: A Toolkit for the Analysis of Molecular Dynamics Simulations.
# J. Comput. Chem. 32 (2011), 2319--2327, doi:10.1002/jcc.21787
#
from __future__ import division
from six.moves import range
import numpy as np
from numpy.testing import (
assert_, dec, raises, assert_raises, assert_equal, assert_array_equal
)
import MDAnalysis as mda
from MDAnalysis.analysis import base
from MDAnalysisTests.datafiles import PSF, DCD
from MDAnalysisTests import parser_not_found
class FrameAnalysis(base.AnalysisBase):
"""Just grabs frame numbers of frames it goes over"""
def __init__(self, reader, **kwargs):
super(FrameAnalysis, self).__init__(reader, **kwargs)
self.traj = reader
self.frames = []
def _single_frame(self):
self.frames.append(self._ts.frame)
class IncompleteAnalysis(base.AnalysisBase):
def __init__(self, reader, **kwargs):
super(IncompleteAnalysis, self).__init__(reader, **kwargs)
class OldAPIAnalysis(base.AnalysisBase):
"""for version 0.15.0"""
def __init__(self, reader, **kwargs):
self._setup_frames(reader, **kwargs)
def _single_frame(self):
pass
class TestAnalysisBase(object):
@dec.skipif(parser_not_found('DCD'),
'DCD parser not available. Are you using python 3?')
def setUp(self):
# has 98 frames
self.u = mda.Universe(PSF, DCD)
def tearDown(self):
del self.u
def test_default(self):
an = FrameAnalysis(self.u.trajectory).run()
assert_equal(an.n_frames, len(self.u.trajectory))
assert_equal(an.frames, list(range(len(self.u.trajectory))))
def test_start(self):
an = FrameAnalysis(self.u.trajectory, start=20).run()
assert_equal(an.n_frames, len(self.u.trajectory) - 20)
assert_equal(an.frames, list(range(20, len(self.u.trajectory))))
def test_stop(self):
an = FrameAnalysis(self.u.trajectory, stop=20).run()
assert_equal(an.n_frames, 20)
assert_equal(an.frames, list(range(20)))
def test_step(self):
an = FrameAnalysis(self.u.trajectory, step=20).run()
assert_equal(an.n_frames, 5)
assert_equal(an.frames, list(range(98))[::20])
def test_verbose(self):
a = FrameAnalysis(self.u.trajectory, verbose=True)
assert_(a._verbose)
assert_(not a._quiet)
@raises(NotImplementedError)
def test_incomplete_defined_analysis(self):
IncompleteAnalysis(self.u.trajectory).run()
def test_old_api(self):
OldAPIAnalysis(self.u.trajectory).run()
def test_filter_baseanalysis_kwargs():
def bad_f(mobile, step=2):
pass
def good_f(mobile, ref):
pass
kwargs = {'step': 3, 'foo': None}
assert_raises(ValueError, base._filter_baseanalysis_kwargs, bad_f, kwargs)
base_kwargs, kwargs = base._filter_baseanalysis_kwargs(good_f, kwargs)
assert_equal(1, len(kwargs))
assert_equal(kwargs['foo'], None)
assert_equal(5, len(base_kwargs))
assert_equal(base_kwargs['start'], None)
assert_equal(base_kwargs['step'], 3)
assert_equal(base_kwargs['stop'], None)
assert_equal(base_kwargs['quiet'], None)
assert_equal(base_kwargs['verbose'], None)
def simple_function(mobile):
return mobile.center_of_geometry()
@dec.skipif(parser_not_found('DCD'),
'DCD parser not available. Are you using python 3?')
def test_AnalysisFromFunction():
u = mda.Universe(PSF, DCD)
step = 2
ana1 = base.AnalysisFromFunction(simple_function, mobile=u.atoms,
step=step).run()
ana2 = base.AnalysisFromFunction(simple_function, u.atoms,
step=step).run()
ana3 = base.AnalysisFromFunction(simple_function, u.trajectory, u.atoms,
step=step).run()
results = []
for ts in u.trajectory[::step]:
results.append(simple_function(u.atoms))
results = np.asarray(results)
for ana in (ana1, ana2, ana3):
assert_array_equal(results, ana.results)
@dec.skipif(parser_not_found('DCD'),
'DCD parser not available. Are you using python 3?')
def test_analysis_class():
ana_class = base.analysis_class(simple_function)
assert_(issubclass(ana_class, base.AnalysisBase))
assert_(issubclass(ana_class, base.AnalysisFromFunction))
u = mda.Universe(PSF, DCD)
step = 2
ana = ana_class(u.atoms, step=step).run()
results = []
for ts in u.trajectory[::step]:
results.append(simple_function(u.atoms))
results = np.asarray(results)
assert_array_equal(results, ana.results)
assert_raises(ValueError, ana_class, 2)
|
alejob/mdanalysis
|
testsuite/MDAnalysisTests/analysis/test_base.py
|
Python
|
gpl-2.0
| 5,601
|
[
"MDAnalysis"
] |
083a5502be9cd2fb3d1a262d34aa78768d578b122ddc9b04d3a63a2bdea05d98
|
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python2, python3
"""Optimize the logP of a molecule."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import json
import os
from absl import app
from absl import flags
from rdkit import Chem
from mol_dqn.chemgraph.dqn import deep_q_networks
from mol_dqn.chemgraph.dqn import molecules as molecules_mdp
from mol_dqn.chemgraph.dqn import run_dqn
from mol_dqn.chemgraph.dqn.py import molecules
from mol_dqn.chemgraph.dqn.tensorflow_core import core
flags.DEFINE_float('gamma', 0.999, 'discount')
FLAGS = flags.FLAGS
class Molecule(molecules_mdp.Molecule):
def _reward(self):
molecule = Chem.MolFromSmiles(self._state)
if molecule is None:
return 0.0
return molecules.penalized_logp(molecule)
def main(argv):
del argv
if FLAGS.hparams is not None:
with open(FLAGS.hparams, 'r') as f:
hparams = deep_q_networks.get_hparams(**json.load(f))
else:
hparams = deep_q_networks.get_hparams()
environment = Molecule(
atom_types=set(hparams.atom_types),
init_mol=FLAGS.start_molecule,
allow_removal=hparams.allow_removal,
allow_no_modification=hparams.allow_no_modification,
allow_bonds_between_rings=hparams.allow_bonds_between_rings,
allowed_ring_sizes=set(hparams.allowed_ring_sizes),
max_steps=hparams.max_steps_per_episode)
dqn = deep_q_networks.DeepQNetwork(
input_shape=(hparams.batch_size, hparams.fingerprint_length + 1),
q_fn=functools.partial(
deep_q_networks.multi_layer_model, hparams=hparams),
optimizer=hparams.optimizer,
grad_clipping=hparams.grad_clipping,
num_bootstrap_heads=hparams.num_bootstrap_heads,
gamma=hparams.gamma,
epsilon=1.0)
run_dqn.run_training(
hparams=hparams,
environment=environment,
dqn=dqn,
)
core.write_hparams(hparams, os.path.join(FLAGS.model_dir, 'config.json'))
if __name__ == '__main__':
app.run(main)
|
google-research/google-research
|
mol_dqn/chemgraph/optimize_logp.py
|
Python
|
apache-2.0
| 2,611
|
[
"RDKit"
] |
ae3f6dc49ded8efcc76aa3d758b6eb8c9de6ff14d04e86a5103b9e62e7ba92ec
|
# -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding:utf-8 -*-
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
#
# MDAnalysis --- http://www.mdanalysis.org
# Copyright (c) 2006-2016 The MDAnalysis Development Team and contributors
# (see the file AUTHORS for the full list of names)
#
# Released under the GNU Public Licence, v2 or any higher version
#
# Please cite your use of MDAnalysis in published work:
#
# R. J. Gowers, M. Linke, J. Barnoud, T. J. E. Reddy, M. N. Melo, S. L. Seyler,
# D. L. Dotson, J. Domanski, S. Buchoux, I. M. Kenney, and O. Beckstein.
# MDAnalysis: A Python package for the rapid analysis of molecular dynamics
# simulations. In S. Benthall and S. Rostrup editors, Proceedings of the 15th
# Python in Science Conference, pages 102-109, Austin, TX, 2016. SciPy.
#
# N. Michaud-Agrawal, E. J. Denning, T. B. Woolf, and O. Beckstein.
# MDAnalysis: A Toolkit for the Analysis of Molecular Dynamics Simulations.
# J. Comput. Chem. 32 (2011), 2319--2327, doi:10.1002/jcc.21787
#
"""
bootstrap procedures --- :mod:`MDAnalysis.analysis.ensemble.bootstrap`
======================================================================
The module contains functions for bootstrapping either ensembles (Universe
objects) or distance matrices, by resampling with replacement.
:Author: Matteo Tiberti, Wouter Boomsma, Tone Bengtsen
.. versionadded:: 0.16.0
"""
import numpy as np
import logging
import MDAnalysis as mda
from .utils import TriangularMatrix, ParallelCalculation
def bootstrapped_matrix(matrix, ensemble_assignment):
"""
Bootstrap an input square matrix. The resulting matrix will have the same
shape as the original one, but the order of its elements will be drawn
(with repetition). Separately bootstraps each ensemble.
Parameters
----------
matrix : encore.utils.TriangularMatrix
similarity/dissimilarity matrix
ensemble_assignment: numpy.array
array of ensemble assignments. This array must be matrix.size long.
Returns
-------
this_m : encore.utils.TriangularMatrix
bootstrapped similarity/dissimilarity matrix
"""
ensemble_identifiers = np.unique(ensemble_assignment)
this_m = TriangularMatrix(size=matrix.size)
indexes = []
for ens in ensemble_identifiers:
old_indexes = np.where(ensemble_assignment == ens)[0]
indexes.append(np.random.randint(low=np.min(old_indexes),
high=np.max(old_indexes) + 1,
size=old_indexes.shape[0]))
indexes = np.hstack(indexes)
for j in range(this_m.size):
for k in range(j):
this_m[j, k] = matrix[indexes[j], indexes[k]]
logging.info("Matrix bootstrapped.")
return this_m
def get_distance_matrix_bootstrap_samples(distance_matrix,
ensemble_assignment,
samples=100,
ncores=1):
"""
Calculates distance matrices corresponding to bootstrapped ensembles, by
resampling with replacement.
Parameters
----------
distance_matrix : encore.utils.TriangularMatrix
Conformational distance matrix
ensemble_assignment : str
Mapping from frames to which ensemble they are from (necessary because
ensembles are bootstrapped independently)
samples : int, optional
How many bootstrap samples to create.
ncores : int, optional
Maximum number of cores to be used (default is 1)
Returns
-------
confdistmatrix : list of encore.utils.TriangularMatrix
"""
bs_args = \
[([distance_matrix, ensemble_assignment]) for i in range(samples)]
pc = ParallelCalculation(ncores, bootstrapped_matrix, bs_args)
pc_results = pc.run()
bootstrap_matrices = zip(*pc_results)[1]
return bootstrap_matrices
def get_ensemble_bootstrap_samples(ensemble,
samples=100):
"""
Generates a bootstrapped ensemble by resampling with replacement.
Parameters
----------
ensemble : MDAnalysis.Universe
Conformational distance matrix
samples : int, optional
How many bootstrap samples to create.
Returns
-------
list of MDAnalysis.Universe objects
"""
ensemble.transfer_to_memory()
ensembles = []
for i in range(samples):
indices = np.random.randint(
low=0,
high=ensemble.trajectory.timeseries().shape[1],
size=ensemble.trajectory.timeseries().shape[1])
ensembles.append(
mda.Universe(ensemble.filename,
ensemble.trajectory.timeseries(format='fac')[indices,:,:],
format=mda.coordinates.memory.MemoryReader))
return ensembles
|
alejob/mdanalysis
|
package/MDAnalysis/analysis/encore/bootstrap.py
|
Python
|
gpl-2.0
| 4,872
|
[
"MDAnalysis"
] |
bb2ef36127490f4c518f953367bf5abd8e89c7666b781981b53e827e74a54372
|
# -*- coding: utf-8 -*-
import unittest
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
from .context import cdk
class BasicTestSuite(unittest.TestCase):
"""Unit tests go here!"""
def test_inserting_custom_css(self):
"""Custom css means opening the produced .html and sneaking
a style tag before the closing </body> tag."""
source_fp = StringIO('<p>a bunch of</p> html</body>\n</html>\n')
cdk.add_css_to_stream(source_fp, 'My rules')
source_fp.seek(0)
out = '<p>a bunch of</p> html<style type="text/css">\nMy rules\n</style>\n</body>\n</html>\n'
self.assertEqual(out, source_fp.read())
def test_toc_gets_numbered(self):
cmd = cdk.create_command("basic", toc=True)
cmd = " ".join(cmd)
assert "-a toc -a numbered" in cmd
if __name__ == '__main__':
unittest.main()
|
twitter/cdk
|
tests/test_basic.py
|
Python
|
apache-2.0
| 930
|
[
"CDK"
] |
aa2a30b04f27e6eb2ed513fadf809508f82228beea42c73488c186d3d0ad6f00
|
# Copyright (C) 2012,2013
# Max Planck Institute for Polymer Research
# Copyright (C) 2008,2009,2010,2011
# Max-Planck-Institute for Polymer Research & Fraunhofer SCAI
#
# This file is part of ESPResSo++.
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
************************************
**espresso.interaction.VSpherePair**
************************************
"""
from espresso import pmi, infinity
from espresso.esutil import *
from espresso.interaction.Potential import *
from espresso.interaction.Interaction import *
from _espresso import interaction_VSpherePair, interaction_VerletListVSpherePair
class VSpherePairLocal(PotentialLocal, interaction_VSpherePair):
'The (local) Lennard-Jones potential.'
def __init__(self, epsilon=1.0, cutoff=infinity, shift="auto"):
"""Initialize the local Lennard Jones object."""
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
if shift =="auto":
cxxinit(self, interaction_VSpherePair,
epsilon, cutoff)
else:
cxxinit(self, interaction_VSpherePair,
epsilon, cutoff, shift)
class VerletListVSpherePairLocal(InteractionLocal, interaction_VerletListVSpherePair):
'The (local) Lennard Jones interaction using Verlet lists.'
def __init__(self, vl):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
cxxinit(self, interaction_VerletListVSpherePair, vl)
def setPotential(self, type1, type2, potential):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
self.cxxclass.setPotential(self, type1, type2, potential)
def getPotential(self, type1, type2):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
return self.cxxclass.getPotential(self, type1, type2)
def getVerletList(self):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
return self.cxxclass.getVerletList(self)
if pmi.isController:
class VSpherePair(Potential):
'The Lennard-Jones potential.'
pmiproxydefs = dict(
cls = 'espresso.interaction.VSpherePairLocal',
pmiproperty = ['epsilon']
)
class VerletListVSpherePair(Interaction):
__metaclass__ = pmi.Proxy
pmiproxydefs = dict(
cls = 'espresso.interaction.VerletListVSpherePairLocal',
pmicall = ['setPotential', 'getPotential', 'getVerletList']
)
|
BackupTheBerlios/espressopp
|
src/interaction/VSpherePair.py
|
Python
|
gpl-3.0
| 3,343
|
[
"ESPResSo"
] |
9f36f8032e630fbd816bac3d27055c5e48f8bda351081342b8cf8aa883a8cdd2
|
########################################################################
#
# (C) 2015, Brian Coca <bcoca@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
########################################################################
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import errno
import datetime
import os
import tarfile
import tempfile
import yaml
from distutils.version import LooseVersion
from shutil import rmtree
from ansible.errors import AnsibleError
from ansible.module_utils.urls import open_url
from ansible.playbook.role.requirement import RoleRequirement
from ansible.galaxy.api import GalaxyAPI
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
class GalaxyRole(object):
SUPPORTED_SCMS = set(['git', 'hg'])
META_MAIN = os.path.join('meta', 'main.yml')
META_INSTALL = os.path.join('meta', '.galaxy_install_info')
ROLE_DIRS = ('defaults', 'files', 'handlers', 'meta', 'tasks', 'templates', 'vars', 'tests')
def __init__(self, galaxy, name, src=None, version=None, scm=None, path=None):
self._metadata = None
self._install_info = None
self._validate_certs = not galaxy.options.ignore_certs
display.debug('Validate TLS certificates: %s' % self._validate_certs)
self.options = galaxy.options
self.galaxy = galaxy
self.name = name
self.version = version
self.src = src or name
self.scm = scm
if path is not None:
if self.name not in path:
path = os.path.join(path, self.name)
self.path = path
else:
for role_path_dir in galaxy.roles_paths:
role_path = os.path.join(role_path_dir, self.name)
if os.path.exists(role_path):
self.path = role_path
break
else:
# use the first path by default
self.path = os.path.join(galaxy.roles_paths[0], self.name)
# create list of possible paths
self.paths = [x for x in galaxy.roles_paths]
self.paths = [os.path.join(x, self.name) for x in self.paths]
def __repr__(self):
"""
Returns "rolename (version)" if version is not null
Returns "rolename" otherwise
"""
if self.version:
return "%s (%s)" % (self.name, self.version)
else:
return self.name
def __eq__(self, other):
return self.name == other.name
@property
def metadata(self):
"""
Returns role metadata
"""
if self._metadata is None:
meta_path = os.path.join(self.path, self.META_MAIN)
if os.path.isfile(meta_path):
try:
f = open(meta_path, 'r')
self._metadata = yaml.safe_load(f)
except:
display.vvvvv("Unable to load metadata for %s" % self.name)
return False
finally:
f.close()
return self._metadata
@property
def install_info(self):
"""
Returns role install info
"""
if self._install_info is None:
info_path = os.path.join(self.path, self.META_INSTALL)
if os.path.isfile(info_path):
try:
f = open(info_path, 'r')
self._install_info = yaml.safe_load(f)
except:
display.vvvvv("Unable to load Galaxy install info for %s" % self.name)
return False
finally:
f.close()
return self._install_info
def _write_galaxy_install_info(self):
"""
Writes a YAML-formatted file to the role's meta/ directory
(named .galaxy_install_info) which contains some information
we can use later for commands like 'list' and 'info'.
"""
info = dict(
version=self.version,
install_date=datetime.datetime.utcnow().strftime("%c"),
)
if not os.path.exists(os.path.join(self.path, 'meta')):
os.makedirs(os.path.join(self.path, 'meta'))
info_path = os.path.join(self.path, self.META_INSTALL)
with open(info_path, 'w+') as f:
try:
self._install_info = yaml.safe_dump(info, f)
except:
return False
return True
def remove(self):
"""
Removes the specified role from the roles path.
There is a sanity check to make sure there's a meta/main.yml file at this
path so the user doesn't blow away random directories.
"""
if self.metadata:
try:
rmtree(self.path)
return True
except:
pass
return False
def fetch(self, role_data):
"""
Downloads the archived role from github to a temp location
"""
if role_data:
# first grab the file and save it to a temp location
if "github_user" in role_data and "github_repo" in role_data:
archive_url = 'https://github.com/%s/%s/archive/%s.tar.gz' % (role_data["github_user"], role_data["github_repo"], self.version)
else:
archive_url = self.src
display.display("- downloading role from %s" % archive_url)
try:
url_file = open_url(archive_url, validate_certs=self._validate_certs)
temp_file = tempfile.NamedTemporaryFile(delete=False)
data = url_file.read()
while data:
temp_file.write(data)
data = url_file.read()
temp_file.close()
return temp_file.name
except Exception as e:
display.error("failed to download the file: %s" % str(e))
return False
def install(self):
# the file is a tar, so open it that way and extract it
# to the specified (or default) roles directory
local_file = False
if self.scm:
# create tar file from scm url
tmp_file = RoleRequirement.scm_archive_role(**self.spec)
elif self.src:
if os.path.isfile(self.src):
# installing a local tar.gz
local_file = True
tmp_file = self.src
elif '://' in self.src:
role_data = self.src
tmp_file = self.fetch(role_data)
else:
api = GalaxyAPI(self.galaxy)
role_data = api.lookup_role_by_name(self.src)
if not role_data:
raise AnsibleError("- sorry, %s was not found on %s." % (self.src, api.api_server))
if role_data.get('role_type') == 'APP':
# Container Role
display.warning("%s is a Container App role, and should only be installed using Ansible "
"Container" % self.name)
role_versions = api.fetch_role_related('versions', role_data['id'])
if not self.version:
# convert the version names to LooseVersion objects
# and sort them to get the latest version. If there
# are no versions in the list, we'll grab the head
# of the master branch
if len(role_versions) > 0:
loose_versions = [LooseVersion(a.get('name', None)) for a in role_versions]
loose_versions.sort()
self.version = str(loose_versions[-1])
elif role_data.get('github_branch', None):
self.version = role_data['github_branch']
else:
self.version = 'master'
elif self.version != 'master':
if role_versions and str(self.version) not in [a.get('name', None) for a in role_versions]:
raise AnsibleError("- the specified version (%s) of %s was not found in the list of available versions (%s)." % (self.version,
self.name,
role_versions))
tmp_file = self.fetch(role_data)
else:
raise AnsibleError("No valid role data found")
if tmp_file:
display.debug("installing from %s" % tmp_file)
if not tarfile.is_tarfile(tmp_file):
raise AnsibleError("the file downloaded was not a tar.gz")
else:
if tmp_file.endswith('.gz'):
role_tar_file = tarfile.open(tmp_file, "r:gz")
else:
role_tar_file = tarfile.open(tmp_file, "r")
# verify the role's meta file
meta_file = None
members = role_tar_file.getmembers()
# next find the metadata file
for member in members:
if self.META_MAIN in member.name:
# Look for parent of meta/main.yml
# Due to possibility of sub roles each containing meta/main.yml
# look for shortest length parent
meta_parent_dir = os.path.dirname(os.path.dirname(member.name))
if not meta_file:
archive_parent_dir = meta_parent_dir
meta_file = member
else:
if len(meta_parent_dir) < len(archive_parent_dir):
archive_parent_dir = meta_parent_dir
meta_file = member
if not meta_file:
raise AnsibleError("this role does not appear to have a meta/main.yml file.")
else:
try:
self._metadata = yaml.safe_load(role_tar_file.extractfile(meta_file))
except:
raise AnsibleError("this role does not appear to have a valid meta/main.yml file.")
# we strip off any higher-level directories for all of the files contained within
# the tar file here. The default is 'github_repo-target'. Gerrit instances, on the other
# hand, does not have a parent directory at all.
installed = False
while not installed:
display.display("- extracting %s to %s" % (self.name, self.path))
try:
if os.path.exists(self.path):
if not os.path.isdir(self.path):
raise AnsibleError("the specified roles path exists and is not a directory.")
elif not getattr(self.options, "force", False):
raise AnsibleError("the specified role %s appears to already exist. Use --force to replace it." % self.name)
else:
# using --force, remove the old path
if not self.remove():
raise AnsibleError("%s doesn't appear to contain a role.\n please remove this directory manually if you really "
"want to put the role here." % self.path)
else:
os.makedirs(self.path)
# now we do the actual extraction to the path
for member in members:
# we only extract files, and remove any relative path
# bits that might be in the file for security purposes
# and drop any containing directory, as mentioned above
if member.isreg() or member.issym():
parts = member.name.replace(archive_parent_dir, "", 1).split(os.sep)
final_parts = []
for part in parts:
if part != '..' and '~' not in part and '$' not in part:
final_parts.append(part)
member.name = os.path.join(*final_parts)
role_tar_file.extract(member, self.path)
# write out the install info file for later use
self._write_galaxy_install_info()
installed = True
except OSError as e:
error = True
if e.errno == errno.EACCES and len(self.paths) > 1:
current = self.paths.index(self.path)
if len(self.paths) > current:
self.path = self.paths[current + 1]
error = False
if error:
raise AnsibleError("Could not update files in %s: %s" % (self.path, str(e)))
# return the parsed yaml metadata
display.display("- %s was installed successfully" % str(self))
if not local_file:
try:
os.unlink(tmp_file)
except (OSError, IOError) as e:
display.warning("Unable to remove tmp file (%s): %s" % (tmp_file, str(e)))
return True
return False
@property
def spec(self):
"""
Returns role spec info
{
'scm': 'git',
'src': 'http://git.example.com/repos/repo.git',
'version': 'v1.0',
'name': 'repo'
}
"""
return dict(scm=self.scm, src=self.src, version=self.version, name=self.name)
|
Azulinho/ansible
|
lib/ansible/galaxy/role.py
|
Python
|
gpl-3.0
| 14,965
|
[
"Brian",
"Galaxy"
] |
c52ce13f53acce947b385b8b334c811491262ee0abdf3870bba9f48bf4f98831
|
from unittest import TestCase
from nose.tools import assert_equals
from rdkit import Chem
import deepchem as dc
class TestOneHotFeaturizer(TestCase):
def test_featurize(self):
smiles = ["Cn1c(=O)c2c(ncn2C)n(C)c1=O", "CC(=O)N1CN(C(C)=O)C(O)C1O"]
mols = [Chem.MolFromSmiles(smile) for smile in smiles]
featurizer = dc.feat.one_hot.OneHotFeaturizer(dc.feat.one_hot.zinc_charset)
one_hots = featurizer.featurize(mols)
untransformed = featurizer.untransform(one_hots)
assert_equals(len(smiles), len(untransformed))
for i in range(len(smiles)):
assert_equals(smiles[i], untransformed[i][0])
|
Agent007/deepchem
|
deepchem/feat/test_one_hot.py
|
Python
|
mit
| 626
|
[
"RDKit"
] |
8aec73a44ede9304a5b2577c33e9e6767a2a3c91867fa7f446c271517ade41c4
|
#!/usr/bin python
# -*- coding: utf-8 -*-
from __future__ import (print_function)
import os
import sys
import warnings
import argparse
import numpy as np
path = os.path.normpath(os.path.join(os.path.dirname(sys.argv[0]), '..'))
sys.path.insert(0, path)
from uvmod import stats
from uvmod import models
from uvmod import plotting
import matplotlib.pyplot as plt
try:
from pylab import (errorbar, plot, savefig, show)
is_pylab = True
except ImportError:
is_pylab = False
try:
from scipy.stats import uniform
is_scipy = True
except ImportError:
warnings.warn('Install ``scipy`` python package to use least squares'
' estimates.')
is_scipy = False
try:
import emcee
is_emcee = True
except ImportError:
warnings.warn('Install ``emcee`` python package to use MCMC.')
is_emcee = False
try:
import triangle
is_triangle = True
except ImportError:
warnings.warn('Install ``triangle.py`` python package to draw beautiful'
' corner plots of posterior PDF.')
is_triangle = False
if __name__ == '__main__':
parser =\
argparse.ArgumentParser(description='Fit simple models in uv-plane',
epilog="Help me to develop it here:"
" https://github.com/ipashchenko/uvmod")
parser.add_argument('-leastsq', action='store_true', dest='use_leastsq',
default=False,
help='- use scipy.optimize.leastsq for analysis of'
' detections')
parser.add_argument('-p0', action='store', dest='p0', nargs='+',
default=None, type=float, help='- starting estimates'
' for the minimization'
' or center of initial'
' ball for MCMC')
parser.add_argument('-jitter', action='store_true', dest='jitter',
default=False, help='- model jitter?')
parser.add_argument('-outliers', action='store_true', dest='outliers',
default=False, help='- model outliers?')
parser.add_argument('-std0', action='store', dest='std0', nargs='+',
default=None, type=float, help='- stds of initial ball'
' for MCMC')
parser.add_argument('-2d', action='store_true', dest='use_2d',
default=False, help='- use 2D-fitting?')
parser.add_argument('path_to_detections', type=str, metavar='detections',
help='- path to file with detections data')
parser.add_argument('path_to_ulimits', nargs='?', metavar='upper limits',
default=None, type=str, help=' - path to file with'
' upper limits data')
parser.add_argument('-max_p', action='store', nargs='+', default=None,
type=float, help='- maximum values of uniform prior'
' distribution for parameters')
parser.add_argument('-savefig', action='store', nargs='?',
default=None, metavar='path to file',
type=str, help='- file to save plots of posterior'
' PDF (if ``triangle.py`` is installed)'
' or histograms (coming soon)')
parser.add_argument('-savemodfig', action='store', nargs='?',
default=None, metavar='path to file', type=str,
help='- file to save plots of model vs data')
parser.add_argument('-savefile', action='store', nargs='?', default=None,
metavar='path to file', type=str, help='- file to save'
' parameters')
args = parser.parse_args()
if args.use_leastsq and (not args.p0):
sys.exit("Use -p0 flag to specify the list of starting values for"
" minimization!")
if args.use_leastsq and args.std0:
print("Specified flag -std0 won't be used in routine!")
# FIXME: In fact we can. Use MLE. Put -LnLike to minimization routine!
if args.use_leastsq and args.jitter:
print("Can't model jitter in LSQ!")
# FIXME: In fact we can. Use MLE. Put -LnLike to minimization routine!
if args.use_leastsq and args.outliers:
print("Can't model outliers in LSQ!")
if args.path_to_ulimits and args.outliers:
sys.exit("Outliers with upper limits coming soon!")
if (not args.use_leastsq) and (not args.max_p):
sys.exit("Use -max_p flag to specify the list of maximum values of"
" parameters in uniform prior distributions")
if (not args.use_leastsq) and args.p0 and (not args.std0):
print("Initializing MCMC using default radius around p0!")
if (not args.use_leastsq) and args.std0 and (not args.p0):
print("Initializing MCMC in (0, 1) interval!")
if (not args.use_leastsq) and (not args.p0) and (not args.std0):
print("Initializing MCMC in (0, 1) interval!")
print(parser.parse_args())
jitter_n = {True: 1, False: 0}
outliers_n = {True: 3, False: 0}
# Loading data
# Pre-initialize in case of no uncertainties supplied
sy, xl, yl, syl = [None] * 4
# TODO: Refactor to function func(fname, tuple_of_dim, optional_tuple)
if not args.use_2d:
model = models.Model_1d
n_gauss_pars = 2
try:
x, y, sy = np.loadtxt(args.path_to_detections, unpack=True)
except ValueError:
x, y = np.loadtxt(args.path_to_detections, unpack=True)
if args.path_to_ulimits:
try:
xl, yl, syl = np.loadtxt(args.path_to_ulimits, unpack=True)
except ValueError:
xl, yl = np.loadtxt(args.path_to_ulimits, unpack=True)
else:
# Pre-initialize in case of no uncertainties supplied. ``None`` values
# will be used later in ploting functions
xl1, xl2 = [None] * 2
# Choose Model class to use
print("We get " + str(len(args.p0)) + " parameters")
n_gauss_pars = len(args.p0) - outliers_n[args.outliers] - jitter_n[args.jitter]
print("Gauss function has number of parameters :")
print(n_gauss_pars)
if n_gauss_pars== 2:
model = models.Model_2d_isotropic
print("Will use isotropic gaussian model")
elif n_gauss_pars == 4:
model = models.Model_2d_anisotropic
print("Will use anisotropic gaussian model")
else:
raise Exception("Only 2 or 4 parameters for 2D case!")
try:
x1, x2, y, sy = np.loadtxt(args.path_to_detections, unpack=True)
except ValueError:
x1, x2, y = np.loadtxt(args.path_to_detections, unpack=True)
x = np.column_stack((x1, x2,))
if args.path_to_ulimits:
try:
xl1, xl2, yl, syl = np.loadtxt(args.path_to_ulimits,
unpack=True)
except ValueError:
xl1, xl2, yl = np.loadtxt(args.path_to_ulimits, unpack=True)
xl = np.column_stack((xl1, xl2,))
# Print data
if args.use_2d:
print("=========== 2D ==============")
else:
print("=========== 1D ==============")
print("Predictors of detections: ")
print(x)
print("Detection values: ")
print(y)
if sy is not None:
print("Detection uncertainties: ")
print(sy)
if args.path_to_ulimits is not None:
print("Predictors of upper limits: ")
print(xl)
print("Upper limits values: ")
print(yl)
if syl is not None:
print("Upper limits uncertainties: ")
print(syl)
print("============================")
# Find max argument for plotting
try:
xmax = max((np.vstack((x, xl))).flatten())
# If no data on limits
except ValueError:
xmax = max(x.flatten())
# If we are told to use LSQ
if args.use_leastsq:
lsq = stats.LS_estimates(x, y, model, sy=sy)
p, pcov = lsq.fit(args.p0)
# Saving best fit params and covariance matrix
if args.savefile:
print ("Saving data to " + args.savefile)
np.savetxt(args.savefile, p)
f_handle = file(args.savefile, 'a')
if not np.isnan(pcov).any():
np.savetxt(f_handle, pcov)
f_handle.close()
# Plotting model vs. data
if args.savemodfig:
if args.use_2d:
plotting.plot_all(p, x1=x1, x2=x2, y=y, sy=sy, ux1=xl1, ux2=xl2,
uy=yl, outfile=args.savemodfig)
else:
errorbar(x, y, sy, fmt='.k')
errorbar(xl, yl, syl, fmt='.r', lolims=True)
model_plot = model(np.arange(1000.) * xmax / 1000.)
plot(np.arange(1000.) * xmax / 1000., model_plot(p))
print ("Saving figure to " + args.savemodfig)
savefig(args.savemodfig)
# If didn't told use LSQ => use MCMC to sample posterior
else:
lnpr_list = list()
for i, max_p in enumerate(args.max_p):
lnpr_list.append((uniform.logpdf, [0, args.max_p[i]], dict(),))
lnprs = tuple(lnpr_list)
lnpr = stats.LnPrior(lnprs)
lnpost = stats.LnPost(x, y, model, sy=sy, x_limits=xl, y_limits=yl,
sy_limits=syl, lnpr=lnpr, jitter=args.jitter,
outliers=args.outliers)
# Using affine-invariant MCMC
nwalkers = 250
ndim = len(lnprs)
if args.p0 is None:
p0 = np.random.uniform(low=0., high=1., size=(nwalkers, ndim))
else:
if args.std0 is None:
std0 = args.std0
else:
std0 = [0.01] * len(args.p0)
p0 = emcee.utils.sample_ball(args.p0, std0, size=nwalkers)
sampler = emcee.EnsembleSampler(nwalkers, ndim, lnpost)
print("Burning-in...")
pos, prob, state = sampler.run_mcmc(p0, 150)
sampler.reset()
print("Sampling posterior...")
sampler.run_mcmc(pos, 200)
# Calculate mean and 95% HDI interval of parameter's posterior
par_list = list()
for i in range(ndim):
sample_vec = sampler.flatchain[::10, i]
p_hdi_min, p_hdi_max = stats.hdi_of_mcmc(sample_vec)
p_mean = np.mean(sample_vec)
par_list.append([p_hdi_min, p_mean, p_hdi_max])
par_array = np.asarray(par_list)
# Save mean and 95% HDI interval of parameter's posterior
if args.savefile:
print ("Saving mean & 95%-HDI to " + args.savefile)
np.savetxt(args.savefile, np.asarray(par_list))
# Visualize with triangle_plot.py
if args.savefig:
# If ``triangle_plot.py`` is install use it for corner plot of
# parameters posterior
if is_triangle:
figure = triangle.corner(sampler.flatchain[::10, :])
print ("Saving parameter's posterior plot to " + args.savefig)
figure.savefig(args.savefig)
# Plot histogram stuff wo triangle
else:
print("Can't plot posterior without triangle_plot.py!")
# Plot model vs. data
if args.savemodfig:
# 2D case
if args.use_2d:
print ("Saving model vs. data plot to " + args.savemodfig)
plotting.plot_all(par_array[:n_gauss_pars, 1], x1=x1, x2=x2, y=y,
sy=sy, ux1=xl1, ux2=xl2, uy=yl,
outfile=args.savemodfig)
# 1D case
else:
fig = plt.figure()
ax = fig.add_subplot(111)
ax.errorbar(x, y, sy, fmt='.k')
ax.errorbar(xl, yl, syl, fmt='.r', lolims=True)
model_plot = model(np.arange(1000.) * xmax / 1000.)
ax.plot(np.arange(1000.) * xmax / 1000.,
model_plot(par_array[:n_gauss_pars, 1]))
fig.show()
print ("Saving model vs. data plot to " + args.savemodfig)
fig.savefig(args.savemodfig)
|
ipashchenko/uvmod
|
bin/fit_amp.py
|
Python
|
mit
| 12,539
|
[
"Gaussian"
] |
b08b0e0612e9316d00987efa15a0d5f16b7e038071424aa425ac78e0cc004dac
|
from sage.matrix.constructor import *
def Riordan_matrix_latex_code(
array, order=10, handlers_tuple=None, handle_triangular_region_only=True):
"""
Produces a chunk of infinite lower matrix denoted by functions *d* and *h*
Enhancement:
1. Add optional arguments that request to return a sympy matrix
(expansion of function *h* is interesting only if the returned object
is of class Eq, otherwise the client could expand *h* by itself)
Fixes:
1. change name of functions *g* and *f* to *d* and *h*, also change
default var name to *t*.
"""
# adjust handlers in order to talk witk "null objects" if no action are desired.
if handlers_tuple is None:
on_computed_coefficient=None
on_computed_row_coefficients=None
else:
on_computed_coefficient, on_computed_row_coefficients = handlers_tuple
if on_computed_coefficient is None:
on_computed_coefficient=lambda row, col, coeff: coeff
if on_computed_row_coefficients is None:
on_computed_row_coefficients=lambda row_index, coefficients: coefficients
# initialize coefficient matrix, aka the Riordan array expansion
QQ_matrix = matrix(QQ, order, order)
def handler(row_index, col_index):
# indexing `array' is the only requirement for it to be a Riordan array
QQ_matrix[row_index, col_index] = coefficient = array[row_index, col_index]
try:
result_from_supplied_block = on_computed_coefficient(
row_index, col_index, coefficient)
except Exception as e:
print e
result_from_supplied_block = None
return result_from_supplied_block
result_list_from_supplied_block = []
result_list_from_supplied_block_per_row = {}
for row_index in range(order):
col_limit = row_index + 1 if handle_triangular_region_only else order
result_list_from_supplied_block_per_row[row_index] = []
for col_index in range(col_limit):
element = handler(row_index, col_index)
result_list_from_supplied_block.append(element)
result_list_from_supplied_block_per_row[row_index].append(element)
result_list_from_supplied_block_per_row[row_index] = \
on_computed_row_coefficients(
row_index,
result_list_from_supplied_block_per_row[row_index])
return (QQ_matrix,
result_list_from_supplied_block,
result_list_from_supplied_block_per_row)
def strip_array_as_generator(array, filter_block=lambda row, col: col <= row):
""" Visit the given array, tringularly by default, top->bottom, left->right. """
return (array[row, col] for row in range(self.order)
for col in range(self.order) if filter_block(row, col))
|
massimo-nocentini/master-thesis
|
sympy/riordan_visiting.py
|
Python
|
mit
| 2,884
|
[
"VisIt"
] |
52dfff2903a63d739108a53ac245fc549ba7bf6e2ca23403c3c66b382aedc1ec
|
import click
from parsec.cli import pass_context, json_loads
from parsec.decorators import custom_exception, json_output
@click.command('update_dataset')
@click.argument("history_id", type=str)
@click.argument("dataset_id", type=str)
@click.option(
"--annotation",
help="Replace history dataset annotation with given string",
type=str
)
@click.option(
"--datatype",
help="Replace the datatype of the history dataset with the given string. The string must be a valid Galaxy datatype, both the current and the target datatypes must allow datatype changes, and the dataset must not be in use as input or output of a running job (including uploads), otherwise an error will be raised.",
type=str
)
@click.option(
"--deleted",
help="Mark or unmark history dataset as deleted",
is_flag=True
)
@click.option(
"--genome_build",
help="Replace history dataset genome build (dbkey)",
type=str
)
@click.option(
"--name",
help="Replace history dataset name with the given string",
type=str
)
@click.option(
"--visible",
help="Mark or unmark history dataset as visible",
is_flag=True
)
@pass_context
@custom_exception
@json_output
def cli(ctx, history_id, dataset_id, annotation=None, datatype=None, deleted=None, genome_build=None, name=None, visible=None):
"""Update history dataset metadata. Some of the attributes that can be modified are documented below.
Output:
details of the updated dataset
.. versionchanged:: 0.8.0
Changed the return value from the status code (type int) to a dict.
"""
kwargs = {}
return ctx.gi.histories.update_dataset(history_id, dataset_id, **kwargs)
|
galaxy-iuc/parsec
|
parsec/commands/histories/update_dataset.py
|
Python
|
apache-2.0
| 1,687
|
[
"Galaxy"
] |
bef8cf0067bccd82d9e9e82031569171324ba6b612f0faa11ce86856b148f784
|
In [57]: from sklearn.grid_search import RandomizedSearchCV
In [58]: from h2o.cross_validation import H2OKFold
In [59]: from h2o.model.regression import h2o_r2_score
In [60]: from sklearn.metrics.scorer import make_scorer
In [61]: from sklearn.metrics.scorer import make_scorer
# Parameters to test
In [62]: params = {"standardize__center": [True, False],
....: "standardize__scale": [True, False],
....: "pca__k": [2,3],
....: "gbm__ntrees": [10,20],
....: "gbm__max_depth": [1,2,3],
....: "gbm__learn_rate": [0.1,0.2]}
In [63]: custom_cv = H2OKFold(iris_df, n_folds=5, seed=42)
In [64]: pipeline = Pipeline([("standardize", H2OScaler()),
....: ("pca", H2OPCA(k=2)),
....: ("gbm", H2OGradientBoostingEstimator(distribution="gaussian"))])
In [65]: random_search = RandomizedSearchCV(pipeline, params,
....: n_iter=5,
....: scoring=make_scorer(h2o_r2_score),
....: cv=custom_cv,
....: random_state=42,
....: n_jobs=1)
In [66]: random_search.fit(iris_df[1:], iris_df[0])
Out[66]:
RandomizedSearchCV(cv=<h2o.cross_validation.H2OKFold instance at 0x10ba413d0>,
error_score='raise',
estimator=Pipeline(steps=[('standardize', <h2o.transforms.preprocessing.H2OScaler object at 0x10c0f18d0>), ('pca', ), ('gbm', )]),
fit_params={}, iid=True, n_iter=5, n_jobs=1,
param_distributions={'pca__k': [2, 3], 'gbm__ntrees': [10, 20], 'standardize__scale': [True, False], 'gbm__max_depth': [1, 2, 3], 'standardize__center': [True, False], 'gbm__learn_rate': [0.1, 0.2]},
pre_dispatch='2*n_jobs', random_state=42, refit=True,
scoring=make_scorer(h2o_r2_score), verbose=0)
In [67]: print random_search.best_estimator_
Model Details
=============
H2OPCA : Principal Component Analysis
Model Key: PCA_model_python_1446220160417_136
Importance of components:
pc1 pc2 pc3
---------------------- -------- ---------- ----------
Standard deviation 9.6974 0.091905 0.031356
Proportion of Variance 0.9999 8.98098e-05 1.04541e-05
Cumulative Proportion 0.9999 0.99999 1
ModelMetricsPCA: pca
** Reported on train data. **
MSE: NaN
RMSE: NaN
Model Details
=============
H2OGradientBoostingEstimator : Gradient Boosting Machine
Model Key: GBM_model_python_1446220160417_138
Model Summary:
number_of_trees number_of_internal_trees model_size_in_bytes min_depth max_depth mean_depth min_leaves max_leaves mean_leaves
-- ----------------- ------------------------- --------------------- ----------- ----------- ------------ ------------ ------------ -------------
20 20 2958 3 3 3 5 8 6.85
ModelMetricsRegression: gbm
** Reported on train data. **
RMSE: 0.193906262445
MAE: 0.155086582663
RMSLE: NaN
Mean Residual Deviance: 0.0375996386155
Scoring History:
timestamp duration number_of_trees training_rmse training_mse training_deviance
-- ------------------- ---------- ----------------- -------------- -------------- -------------------
2016-08-25 13:58:15 0.000 sec 0.0 0.683404046309 0.569341466973 0.467041090512
2016-08-25 13:58:15 0.002 sec 1.0 0.571086656306 0.469106400643 0.326139969011
2016-08-25 13:58:15 0.003 sec 2.0 0.483508601652 0.395952082872 0.233780567872
2016-08-25 13:58:15 0.004 sec 3.0 0.414549015095 0.339981133963 0.171850885916
2016-08-25 13:58:15 0.005 sec 4.0 0.362852508373 0.298212416346 0.131661942833
--- --- --- --- --- --- ---
2016-08-25 13:58:15 0.017 sec 16.0 0.204549491682 0.164292158112 0.0418404945473
2016-08-25 13:58:15 0.018 sec 17.0 0.201762323368 0.162030458841 0.0407080351307
2016-08-25 13:58:15 0.019 sec 18.0 0.199709571992 0.160735480674 0.0398839131454
2016-08-25 13:58:15 0.019 sec 19.0 0.196739590066 0.158067452484 0.0387064662994
2016-08-25 13:58:15 0.020 sec 20.0 0.193906262445 0.155086582663 0.0375996386155
Variable Importances:
variable relative_importance scaled_importance percentage
---------- --------------------- ------------------- ------------
PC1 160.092 1 0.894701
PC3 14.8175 0.0925562 0.08281
PC2 4.0241 0.0251361 0.0224893
Pipeline(steps=[('standardize', <h2o.transforms.preprocessing.H2OScaler object at 0x10c1679d0>), ('pca', ), ('gbm', )])
|
jangorecki/h2o-3
|
h2o-docs/src/booklets/v2_2015/source/python/python_randomized_grid_search.py
|
Python
|
apache-2.0
| 5,102
|
[
"Gaussian"
] |
8701bb33f50bbe2e99a57a59ce50cc3c48d2b5bef439c79a11447967f2debd05
|
# encoding: utf8
import re
from random import *
from collections import Counter
from pprint import pprint
# http://blog.echen.me/2011/08/22/introduction-to-latent-dirichlet-allocation/
# -> https://tedunderwood.com/2012/04/07/topic-modeling-made-just-simple-enough/
documents = [
# space
"Watching a bubble float effortlessly through the @Space_Station may be mesmerizing and beautiful, but that same bubble is also teaching @ISS_Research about how fluids behave differently in microgravity",
"LIVE: On 7 February 2008, the European Columbus laboratory set sail for humanity’s new world of space - join us live as we celebrate 10 years of achievements and look ahead to the future of human exploration ",
"Flexing our robotic muscles💪. A new robotic arm – that could be used to assemble spacecraft & future outposts for humans in space – was successfully tested to demonstrate that it is fully operational",
"Perfect timing -- down to the nanosecond -- is vital to navigation in deep space. More accurate timing = More control. For 20 years, we’ve been “perfecting time” for future deep space exploration. The result? The Deep Space Atomic Clock",
"Europe prepares to celebrate 10 years of its Columbus lab module at the space station as the crew studies the impacts of living in space",
# drones
"Ever fly in a cave? Check out these pilots putting in laps underground!",
"I think this pilot needs more batteries...How many do you bring to the field?",
"This drone is built for tricks! What do you think about seeing Freestyle in MultiGP?",
"Hey everyone hope you have an awesome time at the 2018 Sebring Drone Race presented by Soaring Sky. Here's what we have planned for today! ",
"An army of Racing Drones, getting ready to FLY!!!",
"Jordan Temkin (@jet.fpv) showing his drone racing skills here in Sebring 2018 day 1!!! ",
# games
"We've opened up a public Upcoming Beta branch on Steam so our most eager of Trailmakers can check out upcoming patches! Check out the notes and a tease of more news soon here! ",
"Hey guys! We’re hearing reports that Humble have revoked some of the pre-order keys, we’re not sure why but if you log into your Humble account, go to Purchases and activate the key from there again you’ll get Trailmakers back on your Steam account! Sorry about this!",
"We've deployed a small update with some critical fixes to Trailmakers! Read the notes",
"Build any vehicle you can dream up, and race it around an alien planet. This is for you grease monkeys. Trailmakers is coming out on Wednesday! ",
"Did you pre-order Trailmakers? We just sent you an e-mail asking what name/nickname you want us to put on - THE MONUMENT!",
"We're seeing a lot of feedback coming in from people playing the 10 month old Alpha demo. We love your feedback, but we'd love it even more if you were trying the new Alpha 2. Trailmakers should look like this in your Steam List if you're on the right build!"
]
def get_tf(text):
terms = re.findall('(?u)\w+',text.lower())
tf = Counter(terms)
return dict(tf)
docs = list(map(get_tf,documents))
K = 3
topic_by_d_w = {}
cnt_by_t_w = {t:{} for t in range(1,K+1)}
# INIT
for d,doc in enumerate(docs):
topic_by_d_w[d] = {}
for w in doc:
t = randint(1,K)
topic_by_d_w[d][w] = t
if w not in cnt_by_t_w[t]: cnt_by_t_w[t][w]=0
tf = doc[w]
cnt_by_t_w[t][w] += tf
# ITER
for d,doc in enumerate(docs):
p_by_w_t = {}
for t in range(1,K+1):
total_t = sum(cnt_by_t_w[t].values())
cnt_in_t = sum([doc[w] for w in doc if topic_by_d_w[d][w]==t])
for w in sorted(doc):
p = 1. * cnt_by_t_w[t].get(w,0) / total_t * cnt_in_t
if w not in p_by_w_t: p_by_w_t[w] = {t:0 for t in range(1,K+1)}
p_by_w_t[w][t] = p
for w in p_by_w_t:
print(d,w,p_by_w_t[w])
|
mobarski/sandbox
|
topic/lda/lda3.py
|
Python
|
mit
| 3,747
|
[
"COLUMBUS"
] |
275763bc4472d6061acec0eff83fade39b90a0c4545777e53b64e7f20c9e2dea
|
from BrowserManager import Browser
from DataAggregator import DataAggregator, LevelDBAggregator
from SocketInterface import clientsocket
from Errors import CommandExecutionError
from utilities.platform_utils import get_version, get_configuration_string
import CommandSequence
import MPLogger
from multiprocess import Process, Queue
from Queue import Empty as EmptyQueue
from tblib import pickling_support
pickling_support.install()
from six import reraise
import cPickle
import threading
import copy
import os
import sqlite3
import time
import json
import psutil
SLEEP_CONS = 0.1 # command sleep constant (in seconds)
BROWSER_MEMORY_LIMIT = 1500 # in MB
def load_default_params(num_browsers=1):
"""
Loads num_browsers copies of the default browser_params dictionary.
Also loads a single copy of the default TaskManager params dictionary.
"""
fp = open(os.path.join(os.path.dirname(__file__), 'default_browser_params.json'))
preferences = json.load(fp)
fp.close()
browser_params = [copy.deepcopy(preferences) for i in xrange(0, num_browsers)]
fp = open(os.path.join(os.path.dirname(__file__), 'default_manager_params.json'))
manager_params = json.load(fp)
fp.close()
manager_params['num_browsers'] = num_browsers
return manager_params, browser_params
class TaskManager:
"""
User-facing Class for interfacing with OpenWPM
The TaskManager spawns several child processes to run the automation tasks.
- DataAggregator to aggregate data in a SQLite database
- MPLogger to aggregate logs across processes
- BrowserManager processes to isolate Browsers in a separate process
<manager_params> dict of TaskManager configuration parameters
<browser_params> is a list of (or a single) dictionaries that specify preferences for browsers to instantiate
<process_watchdog> will monitor firefox and Xvfb processes, killing any not indexed in TaskManager's browser list.
NOTE: Only run this in isolated environments. It kills processes by name, indiscriminately.
"""
def __init__(self, manager_params, browser_params, process_watchdog=False):
# Make paths absolute in manager_params
for path in ['data_directory','log_directory']:
if manager_params[path] is not None:
manager_params[path] = os.path.expanduser(manager_params[path])
manager_params['database_name'] = os.path.join(manager_params['data_directory'],manager_params['database_name'])
manager_params['log_file'] = os.path.join(manager_params['log_directory'],manager_params['log_file'])
manager_params['screenshot_path'] = os.path.join(manager_params['data_directory'], 'screenshots')
manager_params['source_dump_path'] = os.path.join(manager_params['data_directory'], 'sources')
self.manager_params = manager_params
# Create data directories if they do not exist
if not os.path.exists(manager_params['screenshot_path']):
os.makedirs(manager_params['screenshot_path'])
if not os.path.exists(manager_params['source_dump_path']):
os.makedirs(manager_params['source_dump_path'])
# check size of parameter dictionary
self.num_browsers = manager_params['num_browsers']
if len(browser_params) != self.num_browsers:
raise Exception("Number of <browser_params> dicts is not the same as manager_params['num_browsers']")
# Flow control
self.closing = False
self.failure_status = None
self.threadlock = threading.Lock()
self.failurecount = 0
if manager_params['failure_limit'] is not None:
self.failure_limit = manager_params['failure_limit']
else:
self.failure_limit = self.num_browsers * 2 + 10
self.process_watchdog = process_watchdog
# sets up the crawl data database
db_path = manager_params['database_name']
if not os.path.exists(manager_params['data_directory']):
os.mkdir(manager_params['data_directory'])
self.db = sqlite3.connect(db_path)
with open(os.path.join(os.path.dirname(__file__), 'schema.sql'), 'r') as f:
self.db.executescript(f.read())
self.db.commit()
# sets up logging server + connect a client
self.logging_status_queue = None
self.loggingserver = self._launch_loggingserver()
# socket location: (address, port)
self.manager_params['logger_address'] = self.logging_status_queue.get()
self.logger = MPLogger.loggingclient(*self.manager_params['logger_address'])
# Mark if LDBAggregator is needed (if js is enabled on any browser)
self.ldb_enabled = False
for params in browser_params:
if params['save_javascript'] or params['save_javascript_proxy']:
self.ldb_enabled = True
break
# Initialize the data aggregators
self._launch_aggregators()
# open client socket
self.sock = clientsocket(serialization='dill')
self.sock.connect(*self.manager_params['aggregator_address'])
print 1
self._save_configuration(browser_params)
print 2
# read the last used site visit id
cur = self.db.cursor()
cur.execute("SELECT MAX(visit_id) from site_visits")
last_visit_id = cur.fetchone()[0]
if last_visit_id is None:
last_visit_id = 0
self.next_visit_id = last_visit_id + 1
print 3
# sets up the BrowserManager(s) + associated queues
self.browsers = self._initialize_browsers(browser_params) # List of the Browser(s)
print 5
self._launch_browsers()
print 4
# start the manager watchdog
thread = threading.Thread(target=self._manager_watchdog, args=())
thread.daemon = True
thread.start()
def _save_configuration(self, browser_params):
""" Saves crawl configuration details to db and logfile"""
cur = self.db.cursor()
# Get git version and commit information
openwpm_v, browser_v = get_version()
# Record task details
cur.execute(("INSERT INTO task "
"(manager_params, openwpm_version, browser_version) "
"VALUES (?,?,?)"),
(json.dumps(self.manager_params), openwpm_v, browser_v))
self.db.commit()
self.task_id = cur.lastrowid
# Record browser details for each brower
for i in xrange(self.num_browsers):
cur.execute("INSERT INTO crawl (task_id, browser_params) VALUES (?,?)",
(self.task_id, json.dumps(browser_params[i])))
self.db.commit()
browser_params[i]['crawl_id'] = cur.lastrowid
# Print the configuration details
self.logger.info(get_configuration_string(self.manager_params,
browser_params,
(openwpm_v, browser_v)))
def _initialize_browsers(self, browser_params):
""" initialize the browser classes, each its unique set of parameters """
browsers = list()
for i in xrange(self.num_browsers):
browsers.append(Browser(self.manager_params, browser_params[i]))
return browsers
def _launch_browsers(self):
print 8
print self.browsers
""" launch each browser manager process / browser """
for browser in self.browsers:
try:
print 9
success = browser.launch_browser_manager()
print 6
except:
print 7
self._cleanup_before_fail(during_init=True)
raise
if not success:
self.logger.critical("Browser spawn failure during TaskManager initialization, exiting...")
self.close()
break
# Update our DB with the random browser settings
# These are found within the scope of each instance of Browser in the browsers list
screen_res = str(browser.browser_settings['screen_res'])
ua_string = str(browser.browser_settings['ua_string'])
self.sock.send(("UPDATE crawl SET screen_res = ?, ua_string = ? \
WHERE crawl_id = ?", (screen_res, ua_string, browser.crawl_id)))
def _manager_watchdog(self):
"""
Periodically checks the following:
- memory consumption of all browsers every 10 seconds
- presence of processes that are no longer in use
"""
while not self.closing:
time.sleep(10)
# Check browser memory usage
for browser in self.browsers:
try:
process = psutil.Process(browser.browser_pid)
mem = process.memory_info()[0] / float(2 ** 20)
if mem > BROWSER_MEMORY_LIMIT:
self.logger.info("BROWSER %i: Memory usage: %iMB, exceeding limit of %iMB"
% (browser.crawl_id, int(mem), BROWSER_MEMORY_LIMIT))
browser.restart_required = True
except psutil.NoSuchProcess:
pass
# Check for browsers or displays that were not closed correctly
# Provide a 300 second buffer to avoid killing freshly launched browsers
# TODO This buffer should correspond to the maximum browser spawn timeout
if self.process_watchdog:
browser_pids = set()
display_pids = set()
check_time = time.time()
for browser in self.browsers:
if browser.browser_pid is not None:
browser_pids.add(browser.browser_pid)
if browser.display_pid is not None:
display_pids.add(browser.display_pid)
for process in psutil.process_iter():
if (process.create_time() + 300 < check_time and
((process.name() == 'firefox' and process.pid not in browser_pids) or
(process.name() == 'Xvfb' and process.pid not in display_pids))):
self.logger.debug("Process: %s (pid: %i) with start time %s found running but not in browser process list. Killing."
% (process.name(), process.pid, process.create_time()))
process.kill()
def _launch_aggregators(self):
"""
Launches the various data aggregators, which serialize data from all processes.
* DataAggregator - sqlite database for crawl data
* LevelDBAggregator - leveldb database for javascript files
"""
# DataAggregator
self.aggregator_status_queue = Queue()
self.data_aggregator = Process(target=DataAggregator.DataAggregator,
args=(self.manager_params, self.aggregator_status_queue))
self.data_aggregator.daemon = True
self.data_aggregator.start()
self.manager_params['aggregator_address'] = self.aggregator_status_queue.get() # socket location: (address, port)
# LevelDB Aggregator
if self.ldb_enabled:
self.ldb_status_queue = Queue()
self.ldb_aggregator = Process(target=LevelDBAggregator.LevelDBAggregator,
args=(self.manager_params, self.ldb_status_queue))
self.ldb_aggregator.daemon = True
self.ldb_aggregator.start()
self.manager_params['ldb_address'] = self.ldb_status_queue.get() # socket location: (address, port)
def _kill_aggregators(self):
""" Terminates the aggregators gracefully """
# DataAggregator
self.logger.debug("Telling the DataAggregator to shut down...")
self.aggregator_status_queue.put("DIE")
start_time = time.time()
self.data_aggregator.join(300)
self.logger.debug("DataAggregator took " + str(time.time() - start_time) + " seconds to close")
# LevelDB Aggregator
if self.ldb_enabled:
self.logger.debug("Telling the LevelDBAggregator to shut down...")
self.ldb_status_queue.put("DIE")
start_time = time.time()
self.ldb_aggregator.join(300)
self.logger.debug("LevelDBAggregator took " + str(time.time() - start_time) + " seconds to close")
def _launch_loggingserver(self):
""" sets up logging server """
self.logging_status_queue = Queue()
loggingserver = Process(target=MPLogger.loggingserver,
args=(self.manager_params['log_file'], self.logging_status_queue, ))
loggingserver.daemon = True
loggingserver.start()
return loggingserver
def _kill_loggingserver(self):
""" terminates logging server gracefully """
self.logging_status_queue.put("DIE")
self.loggingserver.join(300)
def _shutdown_manager(self, failure=False, during_init=False):
"""
Wait for current commands to finish, close all child processes and
threads
<failure> flag to indicate manager failure (True) or end of crawl (False)
<during_init> flag to indicator if this shutdown is occuring during the TaskManager initialization
"""
self.closing = True
for browser in self.browsers:
browser.shutdown_browser(during_init)
if failure:
self.sock.send(("UPDATE crawl SET finished = -1 WHERE crawl_id = ?",
(browser.crawl_id,)))
else:
self.sock.send(("UPDATE crawl SET finished = 1 WHERE crawl_id = ?",
(browser.crawl_id,)))
self.db.close() # close db connection
self.sock.close() # close socket to data aggregator
self._kill_aggregators()
self._kill_loggingserver()
def _cleanup_before_fail(self, during_init=False):
"""
Execute shutdown commands before throwing an exception
This should keep us from having a bunch of hanging processes
and incomplete data.
<during_init> flag to indicator if this shutdown is occuring during
the TaskManager initialization
"""
self._shutdown_manager(failure=True, during_init=during_init)
def _check_failure_status(self):
""" Check the status of command failures. Raise exceptions as necessary
The failure status property is used by the various asynchronous
command execution threads which interface with the
remote browser manager processes. If a failure status is found, the
appropriate steps are taken to gracefully close the infrastructure
"""
self.logger.debug("Checking command failure status indicator...")
if self.failure_status:
self.logger.debug("TaskManager failure status set, halting command execution.")
self._cleanup_before_fail()
if self.failure_status['ErrorType'] == 'ExceedCommandFailureLimit':
raise CommandExecutionError(
"TaskManager exceeded maximum consecutive command "
"execution failures.", self.failure_status['CommandSequence']
)
elif self.failure_status['ErrorType'] == 'ExceedLaunchFailureLimit':
raise CommandExecutionError(
"TaskManager failed to launch browser within allowable "
"failure limit.", self.failure_status['CommandSequence']
)
if self.failure_status['ErrorType'] == 'CriticalChildException':
reraise(*cPickle.loads(self.failure_status['Exception']))
# CRAWLER COMMAND CODE
def _distribute_command(self, command_sequence, index=None):
"""
parses command type and issues command(s) to the proper browser
<index> specifies the type of command this is:
= None -> first come, first serve
= # -> index of browser to send command to
= * -> sends command to all browsers
= ** -> sends command to all browsers (synchronized)
"""
if index is None:
#send to first browser available
command_executed = False
while True:
for browser in self.browsers:
if browser.ready():
browser.current_timeout = command_sequence.total_timeout
thread = self._start_thread(browser, command_sequence)
command_executed = True
break
if command_executed:
break
time.sleep(SLEEP_CONS)
elif 0 <= index < len(self.browsers):
#send the command to this specific browser
while True:
if self.browsers[index].ready():
self.browsers[index].current_timeout = command_sequence.total_timeout
thread = self._start_thread(self.browsers[index], command_sequence)
break
time.sleep(SLEEP_CONS)
elif index == '*':
#send the command to all browsers
command_executed = [False] * len(self.browsers)
while False in command_executed:
for i in xrange(len(self.browsers)):
if self.browsers[i].ready() and not command_executed[i]:
self.browsers[i].current_timeout = command_sequence.total_timeout
thread = self._start_thread(self.browsers[i], command_sequence)
command_executed[i] = True
time.sleep(SLEEP_CONS)
elif index == '**':
#send the command to all browsers and sync it
condition = threading.Condition() # Used to block threads until ready
command_executed = [False] * len(self.browsers)
while False in command_executed:
for i in xrange(len(self.browsers)):
if self.browsers[i].ready() and not command_executed[i]:
self.browsers[i].current_timeout = command_sequence.total_timeout
thread = self._start_thread(self.browsers[i], command_sequence, condition)
command_executed[i] = True
time.sleep(SLEEP_CONS)
with condition:
condition.notifyAll() # All browsers loaded, tell them to start
else:
self.logger.info("Command index type is not supported or out of range")
return
if command_sequence.blocking:
thread.join()
self._check_failure_status()
def _start_thread(self, browser, command_sequence, condition=None):
""" starts the command execution thread """
# Check status flags before starting thread
if self.closing:
self.logger.error("Attempted to execute command on a closed TaskManager")
return
self._check_failure_status()
browser.set_visit_id(self.next_visit_id)
self.sock.send(("INSERT INTO site_visits (visit_id, crawl_id, site_url) VALUES (?,?,?)",
(self.next_visit_id, browser.crawl_id, command_sequence.url)))
self.next_visit_id += 1
# Start command execution thread
args = (browser, command_sequence, condition)
thread = threading.Thread(target=self._issue_command, args=args)
browser.command_thread = thread
thread.daemon = True
thread.start()
return thread
def _issue_command(self, browser, command_sequence, condition=None):
"""
sends command tuple to the BrowserManager
"""
browser.is_fresh = False # since we are issuing a command, the BrowserManager is no longer a fresh instance
# if this is a synced call, block on condition
if condition is not None:
with condition:
condition.wait()
reset = command_sequence.reset
start_time = None # tracks when a site visit started, so that flash/profile
# cookies can be properly tracked.
for command_and_timeout in command_sequence.commands_with_timeout:
command, timeout = command_and_timeout
if command[0] in ['GET', 'BROWSE']:
start_time = time.time()
command += (browser.curr_visit_id,)
elif command[0] in ['DUMP_FLASH_COOKIES', 'DUMP_PROFILE_COOKIES']:
command += (start_time, browser.curr_visit_id,)
browser.current_timeout = timeout
# passes off command and waits for a success (or failure signal)
browser.command_queue.put(command)
command_succeeded = 0 #1 success, 0 failure from error, -1 timeout
command_arguments = command[1] if len(command) > 1 else None
# received reply from BrowserManager, either success signal or failure notice
try:
status = browser.status_queue.get(True, browser.current_timeout)
if status == "OK":
command_succeeded = 1
elif status[0] == "CRITICAL":
self.logger.critical("BROWSER %i: Received critical error "
"from browser process while executing "
"command %s. Setting failure status." % (
browser.crawl_id, str(command)))
self.failure_status = {
'ErrorType': 'CriticalChildException',
'CommandSequence': command_sequence,
'Exception': status[1]
}
return
else:
command_succeeded = 0
self.logger.info("BROWSER %i: Received failure status while"
" executing command: %s" % (browser.crawl_id, command[0]))
except EmptyQueue:
command_succeeded = -1
self.logger.info("BROWSER %i: Timeout while executing command, "
"%s, killing browser manager" % (browser.crawl_id, command[0]))
self.sock.send(("INSERT INTO CrawlHistory (crawl_id, command, arguments, bool_success)"
" VALUES (?,?,?,?)",
(browser.crawl_id, command[0], command_arguments, command_succeeded)))
if command_succeeded != 1:
with self.threadlock:
self.failurecount += 1
if self.failurecount > self.failure_limit:
self.logger.critical("BROWSER %i: Command execution failure"
" pushes failure count above the allowable limit."
" Setting failure_status." % browser.crawl_id)
self.failure_status = {
'ErrorType': 'ExceedCommandFailureLimit',
'CommandSequence': command_sequence
}
return
browser.restart_required = True
else:
with self.threadlock:
self.failurecount = 0
if browser.restart_required:
break
if self.closing:
return
if browser.restart_required or reset:
success = browser.restart_browser_manager(clear_profile = reset)
if not success:
self.logger.critical("BROWSER %i: Exceeded the maximum allowable "
"consecutive browser launch failures. "
"Setting failure_status." % browser.crawl_id)
self.failure_status = {
'ErrorType': 'ExceedLaunchFailureLimit',
'CommandSequence': command_sequence
}
return
browser.restart_required = False
def execute_command_sequence(self, command_sequence, index=None):
self._distribute_command(command_sequence, index)
# DEFINITIONS OF HIGH LEVEL COMMANDS
# NOTE: These wrappers are provided for convenience. To issue sequential
# commands to the same browser in a single 'visit', use the CommandSequence
# class directly.
def get(self, url, index=None, timeout=60, sleep=0, reset=False):
""" goes to a url """
command_sequence = CommandSequence.CommandSequence(url)
command_sequence.get(timeout=timeout, sleep=sleep)
command_sequence.reset = reset
self.execute_command_sequence(command_sequence, index=index)
def browse(self, url, num_links=2, sleep=0, index=None, timeout=60, reset=False):
""" browse a website and visit <num_links> links on the page """
command_sequence = CommandSequence.CommandSequence(url)
command_sequence.get(sleep=sleep, timeout=timeout)
command_sequence.reset = reset
self.execute_command_sequence(command_sequence, index=index)
def close(self):
"""
Execute shutdown procedure for TaskManager
"""
if self.closing:
self.logger.error("TaskManager already closed")
return
self._shutdown_manager()
|
tommybananas/OpenWPM
|
automation/TaskManager.py
|
Python
|
gpl-3.0
| 25,684
|
[
"VisIt"
] |
60b5942b08b0132c91283417bdcd71a41c6661ceac3ef42dd289e6fa6ce6f6d0
|
#!/usr/bin/env python
# fig 3b preprocess files for allele sensitive ATAC analysis
# description: code to "unfix" bams (ie in the appropriate situations,
# remove the 1bp position shift to realign base pairs and position)
# this is necessary to correctly pull the base pair from the correct position
# data: /mnt/lab_data/kundaje/projects/skin/data/bds/processed.atac.2016-10-06.fixed_filtered_align
import os
import sys
import glob
import pysam
import numpy as np
_READ_LEN = 76
def unfix_bam(in_bam_file, out_bam_file):
"""read in bam file and unfix
"""
bam_file = pysam.AlignmentFile(in_bam_file, 'rb')
to_stdout = pysam.AlignmentFile(out_bam_file, "wb", template=bam_file)
seen_read_1 = False
seen_read_2 = False
num_fixed = 0
for read in bam_file:
# process when read 1 AND read 2 are seen
# only important at the very beginning of file
if read.is_read1:
read1 = read
seen_read_1 = True
if read.is_read2:
read2 = read
seen_read_2 = True
if not seen_read_1 or not seen_read_2:
continue
# check to see if read pair fits the problem (in reverse)
if read1.query_name == read2.query_name:
# Check 4 - only fix reads that came from the specific chip in the flowcell
check_4 = ('H3MCTBBXX' in read1.query_name)
# Always change the read 1 start point for appropriate reads UNLESS unmapped or will become unmapped (ie 0 or 1 coordinate)
if (read1.reference_start > 1) and check_4:
read1.reference_start += 1
num_fixed += 1
to_stdout.write(read1)
to_stdout.write(read2)
print "adjusted read total:", num_fixed
return None
def main():
# server specific
WORK_DIR = "/mnt/lab_data/kundaje/projects/skin/data/bds/processed.atac.2016-10-06.fixed_filtered_align"
#OUT_DIR = '/mnt/lab_data/kundaje/projects/skin/data/bds/processed.atac.2019-06-04.bams_bp-position-matched'
OUT_DIR = "/srv/scratch/dskim89/ggr/fig_3-b"
os.system("mkdir -p {}".format(OUT_DIR))
# first unfix BAM files
bam_files = sorted(glob.glob("{}/*/align/rep1/*bam".format(WORK_DIR)))
print len(bam_files)
for bam_file in bam_files:
# read name sort
tmp_sort_bam_file = "{}/{}.sort.bam".format(
OUT_DIR, os.path.basename(bam_file).split(".bam")[0])
sort_cmd = "samtools sort -o {} -n -@ 16 {}".format(
tmp_sort_bam_file, bam_file)
if not os.path.isfile(tmp_sort_bam_file):
os.system(sort_cmd)
# and then unfix
unsorted_fixed_bam_file = "{}/{}.unfix.tmp.bam".format(OUT_DIR, os.path.basename(bam_file).split(".bam")[0])
if not os.path.isfile(unsorted_fixed_bam_file):
unfix_bam(tmp_sort_bam_file, unsorted_fixed_bam_file)
# and finally sort that file
out_bam_file = "{}/{}.unfix.bam".format(OUT_DIR, os.path.basename(bam_file).split(".bam")[0])
sort_cmd = "samtools sort -o {} -@ 16 {}".format(
out_bam_file, unsorted_fixed_bam_file)
if not os.path.isfile(out_bam_file):
os.system(sort_cmd)
# and clean up
os.system("rm {}/*sort.bam".format(OUT_DIR))
os.system("rm {}/*tmp.bam".format(OUT_DIR))
# merge by bio rep
donors = ['D1', 'D2']
timepoints = np.arange(0, 6.5, 0.5)
for timepoint in timepoints:
for donor in donors:
main_day = str(timepoint).split('.')[0]
half_day = str(timepoint).split('.')[1] == '5'
donor_num = donor.split('D')[1]
if not half_day:
which_files = ('find {0}/ -regex ".*Day{1}.*nodup.unfix.bam" | '
'grep {2} | '
'grep -v ".*\({1}-5\|{1}5\).*" | '
'grep -v "5and"').format(OUT_DIR, main_day, donor)
pool_files = ('{0} | '
'xargs samtools merge {3}/primary_keratinocyte-d{1}0.GGR.Stanford_Greenleaf.ATAC-seq.b{2}.fixedtrim.PE2SE.nodup.unfix.bam').format(
which_files, main_day, donor_num, OUT_DIR)
else:
which_files = ('find {0}/ -regex ".*Day{1}.*nodup.unfix.bam" | '
'grep {2} | '
'grep ".*\({1}-5\|{1}5\).*" | '
'grep -v "5and"').format(OUT_DIR, main_day, donor)
pool_files = ('{0} | '
'xargs samtools merge {3}/primary_keratinocyte-d{1}5.GGR.Stanford_Greenleaf.ATAC-seq.b{2}.fixedtrim.PE2SE.nodup.unfix.bam').format(
which_files, main_day, donor_num, OUT_DIR)
print which_files
os.system(which_files)
print pool_files
os.system(pool_files)
print "DONE"
# and do again as pooled (NOTE: do not use pooled, confounds genotyping)
for timepoint in timepoints:
main_day = str(timepoint).split('.')[0]
half_day = str(timepoint).split('.')[1] == '5'
if not half_day:
which_files = ('find {0}/ -regex ".*Day{1}.*nodup.unfix.bam" | '
#'grep {2} | '
'grep -v ".*\({1}-5\|{1}5\).*" | '
'grep -v "5and"').format(OUT_DIR, main_day)
pool_files = ('{0} | '
'xargs samtools merge {2}/primary_keratinocyte-d{1}0.GGR.Stanford_Greenleaf.ATAC-seq.pooled.fixedtrim.PE2SE.nodup.unfix.bam').format(
which_files, main_day, OUT_DIR)
else:
which_files = ('find {0}/ -regex ".*Day{1}.*nodup.unfix.bam" | '
#'grep {2} | '
'grep ".*\({1}-5\|{1}5\).*" | '
'grep -v "5and"').format(OUT_DIR, main_day)
pool_files = ('{0} | '
'xargs samtools merge {2}/primary_keratinocyte-d{1}5.GGR.Stanford_Greenleaf.ATAC-seq.pooled.fixedtrim.PE2SE.nodup.unfix.bam').format(
which_files, main_day, OUT_DIR)
if half_day:
print which_files
os.system(which_files)
print pool_files
os.system(pool_files)
print "DONE"
return None
main()
|
vervacity/ggr-project
|
figs/fig_2.modelling/fig_3-b.0.preprocess.py
|
Python
|
mit
| 6,429
|
[
"pysam"
] |
ff5150a9b850bd837bb9d35c6ad79386e13c95391be03993a444f3707ab41dc1
|
from __future__ import print_function, division
import copy
from collections import defaultdict
from sympy.core.containers import Dict
from sympy.core.expr import Expr
from sympy.core.compatibility import is_sequence, as_int, range
from sympy.core.logic import fuzzy_and
from sympy.core.singleton import S
from sympy.functions.elementary.miscellaneous import sqrt
from sympy.utilities.iterables import uniq
from .matrices import MatrixBase, ShapeError, a2idx
from .dense import Matrix
import collections
class SparseMatrix(MatrixBase):
"""
A sparse matrix (a matrix with a large number of zero elements).
Examples
========
>>> from sympy.matrices import SparseMatrix
>>> SparseMatrix(2, 2, range(4))
Matrix([
[0, 1],
[2, 3]])
>>> SparseMatrix(2, 2, {(1, 1): 2})
Matrix([
[0, 0],
[0, 2]])
See Also
========
sympy.matrices.dense.Matrix
"""
def __init__(self, *args):
if len(args) == 1 and isinstance(args[0], SparseMatrix):
self.rows = args[0].rows
self.cols = args[0].cols
self._smat = dict(args[0]._smat)
return
self._smat = {}
if len(args) == 3:
self.rows = as_int(args[0])
self.cols = as_int(args[1])
if isinstance(args[2], collections.Callable):
op = args[2]
for i in range(self.rows):
for j in range(self.cols):
value = self._sympify(
op(self._sympify(i), self._sympify(j)))
if value:
self._smat[(i, j)] = value
elif isinstance(args[2], (dict, Dict)):
# manual copy, copy.deepcopy() doesn't work
for key in args[2].keys():
v = args[2][key]
if v:
self._smat[key] = self._sympify(v)
elif is_sequence(args[2]):
if len(args[2]) != self.rows*self.cols:
raise ValueError(
'List length (%s) != rows*columns (%s)' %
(len(args[2]), self.rows*self.cols))
flat_list = args[2]
for i in range(self.rows):
for j in range(self.cols):
value = self._sympify(flat_list[i*self.cols + j])
if value:
self._smat[(i, j)] = value
else:
# handle full matrix forms with _handle_creation_inputs
r, c, _list = Matrix._handle_creation_inputs(*args)
self.rows = r
self.cols = c
for i in range(self.rows):
for j in range(self.cols):
value = _list[self.cols*i + j]
if value:
self._smat[(i, j)] = value
def __add__(self, other):
"""Add other to self, efficiently if possible.
When adding a non-sparse matrix, the result is no longer
sparse.
Examples
========
>>> from sympy.matrices import SparseMatrix, eye
>>> A = SparseMatrix(eye(3)) + SparseMatrix(eye(3))
>>> B = SparseMatrix(eye(3)) + eye(3)
>>> A
Matrix([
[2, 0, 0],
[0, 2, 0],
[0, 0, 2]])
>>> A == B
True
>>> isinstance(A, SparseMatrix) and isinstance(B, SparseMatrix)
False
"""
if isinstance(other, SparseMatrix):
return self.add(other)
elif isinstance(other, MatrixBase):
return other._new(other + self)
else:
raise NotImplementedError(
"Cannot add %s to %s" %
tuple([c.__class__.__name__ for c in (other, self)]))
def __eq__(self, other):
try:
if self.shape != other.shape:
return False
if isinstance(other, SparseMatrix):
return self._smat == other._smat
elif isinstance(other, MatrixBase):
return self._smat == MutableSparseMatrix(other)._smat
except AttributeError:
return False
def __getitem__(self, key):
if isinstance(key, tuple):
i, j = key
try:
i, j = self.key2ij(key)
return self._smat.get((i, j), S.Zero)
except (TypeError, IndexError):
if isinstance(i, slice):
# XXX remove list() when PY2 support is dropped
i = list(range(self.rows))[i]
elif is_sequence(i):
pass
elif isinstance(i, Expr) and not i.is_number:
from sympy.matrices.expressions.matexpr import MatrixElement
return MatrixElement(self, i, j)
else:
if i >= self.rows:
raise IndexError('Row index out of bounds')
i = [i]
if isinstance(j, slice):
# XXX remove list() when PY2 support is dropped
j = list(range(self.cols))[j]
elif is_sequence(j):
pass
elif isinstance(j, Expr) and not j.is_number:
from sympy.matrices.expressions.matexpr import MatrixElement
return MatrixElement(self, i, j)
else:
if j >= self.cols:
raise IndexError('Col index out of bounds')
j = [j]
return self.extract(i, j)
# check for single arg, like M[:] or M[3]
if isinstance(key, slice):
lo, hi = key.indices(len(self))[:2]
L = []
for i in range(lo, hi):
m, n = divmod(i, self.cols)
L.append(self._smat.get((m, n), S.Zero))
return L
i, j = divmod(a2idx(key, len(self)), self.cols)
return self._smat.get((i, j), S.Zero)
def __mul__(self, other):
"""Multiply self and other, watching for non-matrix entities.
When multiplying be a non-sparse matrix, the result is no longer
sparse.
Examples
========
>>> from sympy.matrices import SparseMatrix, eye, zeros
>>> I = SparseMatrix(eye(3))
>>> I*I == I
True
>>> Z = zeros(3)
>>> I*Z
Matrix([
[0, 0, 0],
[0, 0, 0],
[0, 0, 0]])
>>> I*2 == 2*I
True
"""
if isinstance(other, SparseMatrix):
return self.multiply(other)
if isinstance(other, MatrixBase):
return other._new(self*self._new(other))
return self.scalar_multiply(other)
def __ne__(self, other):
return not self == other
def __neg__(self):
"""Negate all elements of self.
Examples
========
>>> from sympy.matrices import SparseMatrix, eye
>>> -SparseMatrix(eye(3))
Matrix([
[-1, 0, 0],
[ 0, -1, 0],
[ 0, 0, -1]])
"""
rv = self.copy()
for k, v in rv._smat.items():
rv._smat[k] = -v
return rv
def __rmul__(self, other):
"""Return product the same type as other (if a Matrix).
When multiplying be a non-sparse matrix, the result is no longer
sparse.
Examples
========
>>> from sympy.matrices import Matrix, SparseMatrix
>>> A = Matrix(2, 2, range(1, 5))
>>> S = SparseMatrix(2, 2, range(2, 6))
>>> A*S == S*A
False
>>> (isinstance(A*S, SparseMatrix) ==
... isinstance(S*A, SparseMatrix) == False)
True
"""
if isinstance(other, MatrixBase):
return other*other._new(self)
return self.scalar_multiply(other)
def __setitem__(self, key, value):
raise NotImplementedError()
def _cholesky_solve(self, rhs):
# for speed reasons, this is not uncommented, but if you are
# having difficulties, try uncommenting to make sure that the
# input matrix is symmetric
#assert self.is_symmetric()
L = self._cholesky_sparse()
Y = L._lower_triangular_solve(rhs)
rv = L.T._upper_triangular_solve(Y)
return rv
def _cholesky_sparse(self):
"""Algorithm for numeric Cholesky factorization of a sparse matrix."""
Crowstruc = self.row_structure_symbolic_cholesky()
C = self.zeros(self.rows)
for i in range(len(Crowstruc)):
for j in Crowstruc[i]:
if i != j:
C[i, j] = self[i, j]
summ = 0
for p1 in Crowstruc[i]:
if p1 < j:
for p2 in Crowstruc[j]:
if p2 < j:
if p1 == p2:
summ += C[i, p1]*C[j, p1]
else:
break
else:
break
C[i, j] -= summ
C[i, j] /= C[j, j]
else:
C[j, j] = self[j, j]
summ = 0
for k in Crowstruc[j]:
if k < j:
summ += C[j, k]**2
else:
break
C[j, j] -= summ
C[j, j] = sqrt(C[j, j])
return C
def _diagonal_solve(self, rhs):
"Diagonal solve."
return self._new(self.rows, 1, lambda i, j: rhs[i, 0] / self[i, i])
def _eval_conjugate(self):
"""Return the by-element conjugation.
Examples
========
>>> from sympy.matrices import SparseMatrix
>>> from sympy import I
>>> a = SparseMatrix(((1, 2 + I), (3, 4), (I, -I)))
>>> a
Matrix([
[1, 2 + I],
[3, 4],
[I, -I]])
>>> a.C
Matrix([
[ 1, 2 - I],
[ 3, 4],
[-I, I]])
See Also
========
transpose: Matrix transposition
H: Hermite conjugation
D: Dirac conjugation
"""
conj = self.copy()
for key, value in self._smat.items():
conj._smat[key] = value.conjugate()
return conj
def _eval_inverse(self, **kwargs):
"""Return the matrix inverse using Cholesky or LDL (default)
decomposition as selected with the ``method`` keyword: 'CH' or 'LDL',
respectively.
Examples
========
>>> from sympy import SparseMatrix, Matrix
>>> A = SparseMatrix([
... [ 2, -1, 0],
... [-1, 2, -1],
... [ 0, 0, 2]])
>>> A.inv('CH')
Matrix([
[2/3, 1/3, 1/6],
[1/3, 2/3, 1/3],
[ 0, 0, 1/2]])
>>> A.inv(method='LDL') # use of 'method=' is optional
Matrix([
[2/3, 1/3, 1/6],
[1/3, 2/3, 1/3],
[ 0, 0, 1/2]])
>>> A * _
Matrix([
[1, 0, 0],
[0, 1, 0],
[0, 0, 1]])
"""
sym = self.is_symmetric()
M = self.as_mutable()
I = M.eye(M.rows)
if not sym:
t = M.T
r1 = M[0, :]
M = t*M
I = t*I
method = kwargs.get('method', 'LDL')
if method in "LDL":
solve = M._LDL_solve
elif method == "CH":
solve = M._cholesky_solve
else:
raise NotImplementedError(
'Method may be "CH" or "LDL", not %s.' % method)
rv = M.hstack(*[solve(I[:, i]) for i in range(I.cols)])
if not sym:
scale = (r1*rv[:, 0])[0, 0]
rv /= scale
return self._new(rv)
def _eval_trace(self):
"""Calculate the trace of a square matrix.
Examples
========
>>> from sympy.matrices import eye
>>> eye(3).trace()
3
"""
trace = S.Zero
for i in range(self.cols):
trace += self._smat.get((i, i), 0)
return trace
def _eval_transpose(self):
"""Returns the transposed SparseMatrix of this SparseMatrix.
Examples
========
>>> from sympy.matrices import SparseMatrix
>>> a = SparseMatrix(((1, 2), (3, 4)))
>>> a
Matrix([
[1, 2],
[3, 4]])
>>> a.T
Matrix([
[1, 3],
[2, 4]])
"""
tran = self.zeros(self.cols, self.rows)
for key, value in self._smat.items():
key = key[1], key[0] # reverse
tran._smat[key] = value
return tran
def _LDL_solve(self, rhs):
# for speed reasons, this is not uncommented, but if you are
# having difficulties, try uncommenting to make sure that the
# input matrix is symmetric
#assert self.is_symmetric()
L, D = self._LDL_sparse()
Z = L._lower_triangular_solve(rhs)
Y = D._diagonal_solve(Z)
return L.T._upper_triangular_solve(Y)
def _LDL_sparse(self):
"""Algorithm for numeric LDL factization, exploiting sparse structure.
"""
Lrowstruc = self.row_structure_symbolic_cholesky()
L = self.eye(self.rows)
D = self.zeros(self.rows, self.cols)
for i in range(len(Lrowstruc)):
for j in Lrowstruc[i]:
if i != j:
L[i, j] = self[i, j]
summ = 0
for p1 in Lrowstruc[i]:
if p1 < j:
for p2 in Lrowstruc[j]:
if p2 < j:
if p1 == p2:
summ += L[i, p1]*L[j, p1]*D[p1, p1]
else:
break
else:
break
L[i, j] -= summ
L[i, j] /= D[j, j]
elif i == j:
D[i, i] = self[i, i]
summ = 0
for k in Lrowstruc[i]:
if k < i:
summ += L[i, k]**2*D[k, k]
else:
break
D[i, i] -= summ
return L, D
def _lower_triangular_solve(self, rhs):
"""Fast algorithm for solving a lower-triangular system,
exploiting the sparsity of the given matrix.
"""
rows = [[] for i in range(self.rows)]
for i, j, v in self.row_list():
if i > j:
rows[i].append((j, v))
X = rhs.copy()
for i in range(self.rows):
for j, v in rows[i]:
X[i, 0] -= v*X[j, 0]
X[i, 0] /= self[i, i]
return self._new(X)
def _upper_triangular_solve(self, rhs):
"""Fast algorithm for solving an upper-triangular system,
exploiting the sparsity of the given matrix.
"""
rows = [[] for i in range(self.rows)]
for i, j, v in self.row_list():
if i < j:
rows[i].append((j, v))
X = rhs.copy()
for i in range(self.rows - 1, -1, -1):
rows[i].reverse()
for j, v in rows[i]:
X[i, 0] -= v*X[j, 0]
X[i, 0] /= self[i, i]
return self._new(X)
def add(self, other):
"""Add two sparse matrices with dictionary representation.
Examples
========
>>> from sympy.matrices import SparseMatrix, eye, ones
>>> SparseMatrix(eye(3)).add(SparseMatrix(ones(3)))
Matrix([
[2, 1, 1],
[1, 2, 1],
[1, 1, 2]])
>>> SparseMatrix(eye(3)).add(-SparseMatrix(eye(3)))
Matrix([
[0, 0, 0],
[0, 0, 0],
[0, 0, 0]])
Only the non-zero elements are stored, so the resulting dictionary
that is used to represent the sparse matrix is empty:
>>> _._smat
{}
See Also
========
multiply
"""
if not isinstance(other, SparseMatrix):
raise ValueError('only use add with %s, not %s' %
tuple([c.__class__.__name__ for c in (self, other)]))
if self.shape != other.shape:
raise ShapeError()
M = self.copy()
for i, v in other._smat.items():
v = M[i] + v
if v:
M._smat[i] = v
else:
M._smat.pop(i, None)
return M
def applyfunc(self, f):
"""Apply a function to each element of the matrix.
Examples
========
>>> from sympy.matrices import SparseMatrix
>>> m = SparseMatrix(2, 2, lambda i, j: i*2+j)
>>> m
Matrix([
[0, 1],
[2, 3]])
>>> m.applyfunc(lambda i: 2*i)
Matrix([
[0, 2],
[4, 6]])
"""
if not callable(f):
raise TypeError("`f` must be callable.")
out = self.copy()
for k, v in self._smat.items():
fv = f(v)
if fv:
out._smat[k] = fv
else:
out._smat.pop(k, None)
return out
def as_immutable(self):
"""Returns an Immutable version of this Matrix."""
from .immutable import ImmutableSparseMatrix
return ImmutableSparseMatrix(self)
def as_mutable(self):
"""Returns a mutable version of this matrix.
Examples
========
>>> from sympy import ImmutableMatrix
>>> X = ImmutableMatrix([[1, 2], [3, 4]])
>>> Y = X.as_mutable()
>>> Y[1, 1] = 5 # Can set values in Y
>>> Y
Matrix([
[1, 2],
[3, 5]])
"""
return MutableSparseMatrix(self)
def cholesky(self):
"""
Returns the Cholesky decomposition L of a matrix A
such that L * L.T = A
A must be a square, symmetric, positive-definite
and non-singular matrix
Examples
========
>>> from sympy.matrices import SparseMatrix
>>> A = SparseMatrix(((25,15,-5),(15,18,0),(-5,0,11)))
>>> A.cholesky()
Matrix([
[ 5, 0, 0],
[ 3, 3, 0],
[-1, 1, 3]])
>>> A.cholesky() * A.cholesky().T == A
True
"""
from sympy.core.numbers import nan, oo
if not self.is_symmetric():
raise ValueError('Cholesky decomposition applies only to '
'symmetric matrices.')
M = self.as_mutable()._cholesky_sparse()
if M.has(nan) or M.has(oo):
raise ValueError('Cholesky decomposition applies only to '
'positive-definite matrices')
return self._new(M)
def col_list(self):
"""Returns a column-sorted list of non-zero elements of the matrix.
Examples
========
>>> from sympy.matrices import SparseMatrix
>>> a=SparseMatrix(((1, 2), (3, 4)))
>>> a
Matrix([
[1, 2],
[3, 4]])
>>> a.CL
[(0, 0, 1), (1, 0, 3), (0, 1, 2), (1, 1, 4)]
See Also
========
col_op
row_list
"""
return [tuple(k + (self[k],)) for k in sorted(list(self._smat.keys()), key=lambda k: list(reversed(k)))]
def col(self, j):
"""Returns column j from self as a column vector.
Examples
========
>>> from sympy.matrices import SparseMatrix
>>> a = SparseMatrix(((1, 2), (3, 4)))
>>> a.col(0)
Matrix([
[1],
[3]])
See Also
========
row
col_list
"""
return self[:, j]
def copy(self):
return self._new(self.rows, self.cols, self._smat)
def extract(self, rowsList, colsList):
urow = list(uniq(rowsList))
ucol = list(uniq(colsList))
smat = {}
if len(urow)*len(ucol) < len(self._smat):
# there are fewer elements requested than there are elements in the matrix
for i, r in enumerate(urow):
for j, c in enumerate(ucol):
smat[i, j] = self._smat.get((r, c), 0)
else:
# most of the request will be zeros so check all of self's entries,
# keeping only the ones that are desired
for rk, ck in self._smat:
if rk in urow and ck in ucol:
smat[(urow.index(rk), ucol.index(ck))] = self._smat[(rk, ck)]
rv = self._new(len(urow), len(ucol), smat)
# rv is nominally correct but there might be rows/cols
# which require duplication
if len(rowsList) != len(urow):
for i, r in enumerate(rowsList):
i_previous = rowsList.index(r)
if i_previous != i:
rv = rv.row_insert(i, rv.row(i_previous))
if len(colsList) != len(ucol):
for i, c in enumerate(colsList):
i_previous = colsList.index(c)
if i_previous != i:
rv = rv.col_insert(i, rv.col(i_previous))
return rv
@classmethod
def eye(cls, n):
"""Return an n x n identity matrix."""
n = as_int(n)
return cls(n, n, {(i, i): S.One for i in range(n)})
def has(self, *patterns):
"""Test whether any subexpression matches any of the patterns.
Examples
========
>>> from sympy import SparseMatrix, Float
>>> from sympy.abc import x, y
>>> A = SparseMatrix(((1, x), (0.2, 3)))
>>> A.has(x)
True
>>> A.has(y)
False
>>> A.has(Float)
True
"""
return any(self[key].has(*patterns) for key in self._smat)
@property
def is_hermitian(self):
"""Checks if the matrix is Hermitian.
In a Hermitian matrix element i,j is the complex conjugate of
element j,i.
Examples
========
>>> from sympy.matrices import SparseMatrix
>>> from sympy import I
>>> from sympy.abc import x
>>> a = SparseMatrix([[1, I], [-I, 1]])
>>> a
Matrix([
[ 1, I],
[-I, 1]])
>>> a.is_hermitian
True
>>> a[0, 0] = 2*I
>>> a.is_hermitian
False
>>> a[0, 0] = x
>>> a.is_hermitian
>>> a[0, 1] = a[1, 0]*I
>>> a.is_hermitian
False
"""
def cond():
d = self._smat
yield self.is_square
if len(d) <= self.rows:
yield fuzzy_and(
d[i, i].is_real for i, j in d if i == j)
else:
yield fuzzy_and(
d[i, i].is_real for i in range(self.rows) if (i, i) in d)
yield fuzzy_and(
((self[i, j] - self[j, i].conjugate()).is_zero
if (j, i) in d else False) for (i, j) in d)
return fuzzy_and(i for i in cond())
@property
def is_Identity(self):
if not self.is_square:
return False
if not all(self[i, i] == 1 for i in range(self.rows)):
return False
return len(self._smat) == self.rows
def is_symmetric(self, simplify=True):
"""Return True if self is symmetric.
Examples
========
>>> from sympy.matrices import SparseMatrix, eye
>>> M = SparseMatrix(eye(3))
>>> M.is_symmetric()
True
>>> M[0, 2] = 1
>>> M.is_symmetric()
False
"""
if simplify:
return all((k[1], k[0]) in self._smat and
not (self[k] - self[(k[1], k[0])]).simplify()
for k in self._smat)
else:
return all((k[1], k[0]) in self._smat and
self[k] == self[(k[1], k[0])] for k in self._smat)
def LDLdecomposition(self):
"""
Returns the LDL Decomposition (matrices ``L`` and ``D``) of matrix
``A``, such that ``L * D * L.T == A``. ``A`` must be a square,
symmetric, positive-definite and non-singular.
This method eliminates the use of square root and ensures that all
the diagonal entries of L are 1.
Examples
========
>>> from sympy.matrices import SparseMatrix
>>> A = SparseMatrix(((25, 15, -5), (15, 18, 0), (-5, 0, 11)))
>>> L, D = A.LDLdecomposition()
>>> L
Matrix([
[ 1, 0, 0],
[ 3/5, 1, 0],
[-1/5, 1/3, 1]])
>>> D
Matrix([
[25, 0, 0],
[ 0, 9, 0],
[ 0, 0, 9]])
>>> L * D * L.T == A
True
"""
from sympy.core.numbers import nan, oo
if not self.is_symmetric():
raise ValueError('LDL decomposition applies only to '
'symmetric matrices.')
L, D = self.as_mutable()._LDL_sparse()
if L.has(nan) or L.has(oo) or D.has(nan) or D.has(oo):
raise ValueError('LDL decomposition applies only to '
'positive-definite matrices')
return self._new(L), self._new(D)
def liupc(self):
"""Liu's algorithm, for pre-determination of the Elimination Tree of
the given matrix, used in row-based symbolic Cholesky factorization.
Examples
========
>>> from sympy.matrices import SparseMatrix
>>> S = SparseMatrix([
... [1, 0, 3, 2],
... [0, 0, 1, 0],
... [4, 0, 0, 5],
... [0, 6, 7, 0]])
>>> S.liupc()
([[0], [], [0], [1, 2]], [4, 3, 4, 4])
References
==========
Symbolic Sparse Cholesky Factorization using Elimination Trees,
Jeroen Van Grondelle (1999)
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.39.7582
"""
# Algorithm 2.4, p 17 of reference
# get the indices of the elements that are non-zero on or below diag
R = [[] for r in range(self.rows)]
for r, c, _ in self.row_list():
if c <= r:
R[r].append(c)
inf = len(R) # nothing will be this large
parent = [inf]*self.rows
virtual = [inf]*self.rows
for r in range(self.rows):
for c in R[r][:-1]:
while virtual[c] < r:
t = virtual[c]
virtual[c] = r
c = t
if virtual[c] == inf:
parent[c] = virtual[c] = r
return R, parent
def multiply(self, other):
"""Fast multiplication exploiting the sparsity of the matrix.
Examples
========
>>> from sympy.matrices import SparseMatrix, ones
>>> A, B = SparseMatrix(ones(4, 3)), SparseMatrix(ones(3, 4))
>>> A.multiply(B) == 3*ones(4)
True
See Also
========
add
"""
A = self
B = other
# sort B's row_list into list of rows
Blist = [[] for i in range(B.rows)]
for i, j, v in B.row_list():
Blist[i].append((j, v))
Cdict = defaultdict(int)
for k, j, Akj in A.row_list():
for n, Bjn in Blist[j]:
temp = Akj*Bjn
Cdict[k, n] += temp
rv = self.zeros(A.rows, B.cols)
rv._smat = {k: v for k, v in Cdict.items() if v}
return rv
def nnz(self):
"""Returns the number of non-zero elements in Matrix."""
return len(self._smat)
def reshape(self, rows, cols):
"""Reshape matrix while retaining original size.
Examples
========
>>> from sympy.matrices import SparseMatrix
>>> S = SparseMatrix(4, 2, range(8))
>>> S.reshape(2, 4)
Matrix([
[0, 1, 2, 3],
[4, 5, 6, 7]])
"""
if len(self) != rows*cols:
raise ValueError("Invalid reshape parameters %d %d" % (rows, cols))
smat = {}
for k, v in self._smat.items():
i, j = k
n = i*self.cols + j
ii, jj = divmod(n, cols)
smat[(ii, jj)] = self._smat[(i, j)]
return self._new(rows, cols, smat)
def row_list(self):
"""Returns a row-sorted list of non-zero elements of the matrix.
Examples
========
>>> from sympy.matrices import SparseMatrix
>>> a = SparseMatrix(((1, 2), (3, 4)))
>>> a
Matrix([
[1, 2],
[3, 4]])
>>> a.RL
[(0, 0, 1), (0, 1, 2), (1, 0, 3), (1, 1, 4)]
See Also
========
row_op
col_list
"""
return [tuple(k + (self[k],)) for k in
sorted(list(self._smat.keys()), key=lambda k: list(k))]
def row_structure_symbolic_cholesky(self):
"""Symbolic cholesky factorization, for pre-determination of the
non-zero structure of the Cholesky factororization.
Examples
========
>>> from sympy.matrices import SparseMatrix
>>> S = SparseMatrix([
... [1, 0, 3, 2],
... [0, 0, 1, 0],
... [4, 0, 0, 5],
... [0, 6, 7, 0]])
>>> S.row_structure_symbolic_cholesky()
[[0], [], [0], [1, 2]]
References
==========
Symbolic Sparse Cholesky Factorization using Elimination Trees,
Jeroen Van Grondelle (1999)
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.39.7582
"""
R, parent = self.liupc()
inf = len(R) # this acts as infinity
Lrow = copy.deepcopy(R)
for k in range(self.rows):
for j in R[k]:
while j != inf and j != k:
Lrow[k].append(j)
j = parent[j]
Lrow[k] = list(sorted(set(Lrow[k])))
return Lrow
def row(self, i):
"""Returns column i from self as a row vector.
Examples
========
>>> from sympy.matrices import SparseMatrix
>>> a = SparseMatrix(((1, 2), (3, 4)))
>>> a.row(0)
Matrix([[1, 2]])
See Also
========
col
row_list
"""
return self[i,:]
def scalar_multiply(self, scalar):
"Scalar element-wise multiplication"
M = self.zeros(*self.shape)
if scalar:
for i in self._smat:
v = scalar*self._smat[i]
if v:
M._smat[i] = v
else:
M._smat.pop(i, None)
return M
def solve_least_squares(self, rhs, method='LDL'):
"""Return the least-square fit to the data.
By default the cholesky_solve routine is used (method='CH'); other
methods of matrix inversion can be used. To find out which are
available, see the docstring of the .inv() method.
Examples
========
>>> from sympy.matrices import SparseMatrix, Matrix, ones
>>> A = Matrix([1, 2, 3])
>>> B = Matrix([2, 3, 4])
>>> S = SparseMatrix(A.row_join(B))
>>> S
Matrix([
[1, 2],
[2, 3],
[3, 4]])
If each line of S represent coefficients of Ax + By
and x and y are [2, 3] then S*xy is:
>>> r = S*Matrix([2, 3]); r
Matrix([
[ 8],
[13],
[18]])
But let's add 1 to the middle value and then solve for the
least-squares value of xy:
>>> xy = S.solve_least_squares(Matrix([8, 14, 18])); xy
Matrix([
[ 5/3],
[10/3]])
The error is given by S*xy - r:
>>> S*xy - r
Matrix([
[1/3],
[1/3],
[1/3]])
>>> _.norm().n(2)
0.58
If a different xy is used, the norm will be higher:
>>> xy += ones(2, 1)/10
>>> (S*xy - r).norm().n(2)
1.5
"""
t = self.T
return (t*self).inv(method=method)*t*rhs
def solve(self, rhs, method='LDL'):
"""Return solution to self*soln = rhs using given inversion method.
For a list of possible inversion methods, see the .inv() docstring.
"""
if not self.is_square:
if self.rows < self.cols:
raise ValueError('Under-determined system.')
elif self.rows > self.cols:
raise ValueError('For over-determined system, M, having '
'more rows than columns, try M.solve_least_squares(rhs).')
else:
return self.inv(method=method)*rhs
def tolist(self):
"""Convert this sparse matrix into a list of nested Python lists.
Examples
========
>>> from sympy.matrices import SparseMatrix, ones
>>> a = SparseMatrix(((1, 2), (3, 4)))
>>> a.tolist()
[[1, 2], [3, 4]]
When there are no rows then it will not be possible to tell how
many columns were in the original matrix:
>>> SparseMatrix(ones(0, 3)).tolist()
[]
"""
if not self.rows:
return []
if not self.cols:
return [[] for i in range(self.rows)]
I, J = self.shape
return [[self[i, j] for j in range(J)] for i in range(I)]
RL = property(row_list, None, None, "Alternate faster representation")
CL = property(col_list, None, None, "Alternate faster representation")
__matmul__ = __mul__
__rmatmul__ = __rmul__
extract.__doc__ = MatrixBase.extract.__doc__
@classmethod
def zeros(cls, r, c=None):
"""Return an r x c matrix of zeros, square if c is omitted."""
c = r if c is None else c
r = as_int(r)
c = as_int(c)
return cls(r, c, {})
class MutableSparseMatrix(SparseMatrix, MatrixBase):
@classmethod
def _new(cls, *args, **kwargs):
return cls(*args)
def __setitem__(self, key, value):
"""Assign value to position designated by key.
Examples
========
>>> from sympy.matrices import SparseMatrix, ones
>>> M = SparseMatrix(2, 2, {})
>>> M[1] = 1; M
Matrix([
[0, 1],
[0, 0]])
>>> M[1, 1] = 2; M
Matrix([
[0, 1],
[0, 2]])
>>> M = SparseMatrix(2, 2, {})
>>> M[:, 1] = [1, 1]; M
Matrix([
[0, 1],
[0, 1]])
>>> M = SparseMatrix(2, 2, {})
>>> M[1, :] = [[1, 1]]; M
Matrix([
[0, 0],
[1, 1]])
To replace row r you assign to position r*m where m
is the number of columns:
>>> M = SparseMatrix(4, 4, {})
>>> m = M.cols
>>> M[3*m] = ones(1, m)*2; M
Matrix([
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[2, 2, 2, 2]])
And to replace column c you can assign to position c:
>>> M[2] = ones(m, 1)*4; M
Matrix([
[0, 0, 4, 0],
[0, 0, 4, 0],
[0, 0, 4, 0],
[2, 2, 4, 2]])
"""
rv = self._setitem(key, value)
if rv is not None:
i, j, value = rv
if value:
self._smat[(i, j)] = value
elif (i, j) in self._smat:
del self._smat[(i, j)]
def as_mutable(self):
return self.copy()
__hash__ = None
def col_del(self, k):
"""Delete the given column of the matrix.
Examples
========
>>> from sympy.matrices import SparseMatrix
>>> M = SparseMatrix([[0, 0], [0, 1]])
>>> M
Matrix([
[0, 0],
[0, 1]])
>>> M.col_del(0)
>>> M
Matrix([
[0],
[1]])
See Also
========
row_del
"""
newD = {}
k = a2idx(k, self.cols)
for (i, j) in self._smat:
if j == k:
pass
elif j > k:
newD[i, j - 1] = self._smat[i, j]
else:
newD[i, j] = self._smat[i, j]
self._smat = newD
self.cols -= 1
def col_join(self, other):
"""Returns B augmented beneath A (row-wise joining)::
[A]
[B]
Examples
========
>>> from sympy import SparseMatrix, Matrix, ones
>>> A = SparseMatrix(ones(3))
>>> A
Matrix([
[1, 1, 1],
[1, 1, 1],
[1, 1, 1]])
>>> B = SparseMatrix.eye(3)
>>> B
Matrix([
[1, 0, 0],
[0, 1, 0],
[0, 0, 1]])
>>> C = A.col_join(B); C
Matrix([
[1, 1, 1],
[1, 1, 1],
[1, 1, 1],
[1, 0, 0],
[0, 1, 0],
[0, 0, 1]])
>>> C == A.col_join(Matrix(B))
True
Joining along columns is the same as appending rows at the end
of the matrix:
>>> C == A.row_insert(A.rows, Matrix(B))
True
"""
if not self:
return type(self)(other)
A, B = self, other
if not A.cols == B.cols:
raise ShapeError()
A = A.copy()
if not isinstance(B, SparseMatrix):
k = 0
b = B._mat
for i in range(B.rows):
for j in range(B.cols):
v = b[k]
if v:
A._smat[(i + A.rows, j)] = v
k += 1
else:
for (i, j), v in B._smat.items():
A._smat[i + A.rows, j] = v
A.rows += B.rows
return A
def col_op(self, j, f):
"""In-place operation on col j using two-arg functor whose args are
interpreted as (self[i, j], i) for i in range(self.rows).
Examples
========
>>> from sympy.matrices import SparseMatrix
>>> M = SparseMatrix.eye(3)*2
>>> M[1, 0] = -1
>>> M.col_op(1, lambda v, i: v + 2*M[i, 0]); M
Matrix([
[ 2, 4, 0],
[-1, 0, 0],
[ 0, 0, 2]])
"""
for i in range(self.rows):
v = self._smat.get((i, j), S.Zero)
fv = f(v, i)
if fv:
self._smat[(i, j)] = fv
elif v:
self._smat.pop((i, j))
def col_swap(self, i, j):
"""Swap, in place, columns i and j.
Examples
========
>>> from sympy.matrices import SparseMatrix
>>> S = SparseMatrix.eye(3); S[2, 1] = 2
>>> S.col_swap(1, 0); S
Matrix([
[0, 1, 0],
[1, 0, 0],
[2, 0, 1]])
"""
if i > j:
i, j = j, i
rows = self.col_list()
temp = []
for ii, jj, v in rows:
if jj == i:
self._smat.pop((ii, jj))
temp.append((ii, v))
elif jj == j:
self._smat.pop((ii, jj))
self._smat[ii, i] = v
elif jj > j:
break
for k, v in temp:
self._smat[k, j] = v
def copyin_list(self, key, value):
if not is_sequence(value):
raise TypeError("`value` must be of type list or tuple.")
self.copyin_matrix(key, Matrix(value))
def copyin_matrix(self, key, value):
# include this here because it's not part of BaseMatrix
rlo, rhi, clo, chi = self.key2bounds(key)
shape = value.shape
dr, dc = rhi - rlo, chi - clo
if shape != (dr, dc):
raise ShapeError(
"The Matrix `value` doesn't have the same dimensions "
"as the in sub-Matrix given by `key`.")
if not isinstance(value, SparseMatrix):
for i in range(value.rows):
for j in range(value.cols):
self[i + rlo, j + clo] = value[i, j]
else:
if (rhi - rlo)*(chi - clo) < len(self):
for i in range(rlo, rhi):
for j in range(clo, chi):
self._smat.pop((i, j), None)
else:
for i, j, v in self.row_list():
if rlo <= i < rhi and clo <= j < chi:
self._smat.pop((i, j), None)
for k, v in value._smat.items():
i, j = k
self[i + rlo, j + clo] = value[i, j]
def fill(self, value):
"""Fill self with the given value.
Notes
=====
Unless many values are going to be deleted (i.e. set to zero)
this will create a matrix that is slower than a dense matrix in
operations.
Examples
========
>>> from sympy.matrices import SparseMatrix
>>> M = SparseMatrix.zeros(3); M
Matrix([
[0, 0, 0],
[0, 0, 0],
[0, 0, 0]])
>>> M.fill(1); M
Matrix([
[1, 1, 1],
[1, 1, 1],
[1, 1, 1]])
"""
if not value:
self._smat = {}
else:
v = self._sympify(value)
self._smat = dict([((i, j), v)
for i in range(self.rows) for j in range(self.cols)])
def row_del(self, k):
"""Delete the given row of the matrix.
Examples
========
>>> from sympy.matrices import SparseMatrix
>>> M = SparseMatrix([[0, 0], [0, 1]])
>>> M
Matrix([
[0, 0],
[0, 1]])
>>> M.row_del(0)
>>> M
Matrix([[0, 1]])
See Also
========
col_del
"""
newD = {}
k = a2idx(k, self.rows)
for (i, j) in self._smat:
if i == k:
pass
elif i > k:
newD[i - 1, j] = self._smat[i, j]
else:
newD[i, j] = self._smat[i, j]
self._smat = newD
self.rows -= 1
def row_join(self, other):
"""Returns B appended after A (column-wise augmenting)::
[A B]
Examples
========
>>> from sympy import SparseMatrix, Matrix
>>> A = SparseMatrix(((1, 0, 1), (0, 1, 0), (1, 1, 0)))
>>> A
Matrix([
[1, 0, 1],
[0, 1, 0],
[1, 1, 0]])
>>> B = SparseMatrix(((1, 0, 0), (0, 1, 0), (0, 0, 1)))
>>> B
Matrix([
[1, 0, 0],
[0, 1, 0],
[0, 0, 1]])
>>> C = A.row_join(B); C
Matrix([
[1, 0, 1, 1, 0, 0],
[0, 1, 0, 0, 1, 0],
[1, 1, 0, 0, 0, 1]])
>>> C == A.row_join(Matrix(B))
True
Joining at row ends is the same as appending columns at the end
of the matrix:
>>> C == A.col_insert(A.cols, B)
True
"""
if not self:
return type(self)(other)
A, B = self, other
if not A.rows == B.rows:
raise ShapeError()
A = A.copy()
if not isinstance(B, SparseMatrix):
k = 0
b = B._mat
for i in range(B.rows):
for j in range(B.cols):
v = b[k]
if v:
A._smat[(i, j + A.cols)] = v
k += 1
else:
for (i, j), v in B._smat.items():
A._smat[(i, j + A.cols)] = v
A.cols += B.cols
return A
def row_op(self, i, f):
"""In-place operation on row ``i`` using two-arg functor whose args are
interpreted as ``(self[i, j], j)``.
Examples
========
>>> from sympy.matrices import SparseMatrix
>>> M = SparseMatrix.eye(3)*2
>>> M[0, 1] = -1
>>> M.row_op(1, lambda v, j: v + 2*M[0, j]); M
Matrix([
[2, -1, 0],
[4, 0, 0],
[0, 0, 2]])
See Also
========
row
zip_row_op
col_op
"""
for j in range(self.cols):
v = self._smat.get((i, j), S.Zero)
fv = f(v, j)
if fv:
self._smat[(i, j)] = fv
elif v:
self._smat.pop((i, j))
def row_swap(self, i, j):
"""Swap, in place, columns i and j.
Examples
========
>>> from sympy.matrices import SparseMatrix
>>> S = SparseMatrix.eye(3); S[2, 1] = 2
>>> S.row_swap(1, 0); S
Matrix([
[0, 1, 0],
[1, 0, 0],
[0, 2, 1]])
"""
if i > j:
i, j = j, i
rows = self.row_list()
temp = []
for ii, jj, v in rows:
if ii == i:
self._smat.pop((ii, jj))
temp.append((jj, v))
elif ii == j:
self._smat.pop((ii, jj))
self._smat[i, jj] = v
elif ii > j:
break
for k, v in temp:
self._smat[j, k] = v
def zip_row_op(self, i, k, f):
"""In-place operation on row ``i`` using two-arg functor whose args are
interpreted as ``(self[i, j], self[k, j])``.
Examples
========
>>> from sympy.matrices import SparseMatrix
>>> M = SparseMatrix.eye(3)*2
>>> M[0, 1] = -1
>>> M.zip_row_op(1, 0, lambda v, u: v + 2*u); M
Matrix([
[2, -1, 0],
[4, 0, 0],
[0, 0, 2]])
See Also
========
row
row_op
col_op
"""
self.row_op(i, lambda v, j: f(v, self[k, j]))
|
antepsis/anteplahmacun
|
sympy/matrices/sparse.py
|
Python
|
bsd-3-clause
| 45,694
|
[
"DIRAC"
] |
bb946c554f14d655a61eaeb6b0f607b7998796c259e817bc644f4fe99f99325c
|
import pkg_resources
# can't find it in the frozen library.zip...
# pkg_resources.require("pysam>=0.8.1")
import pysam
NEEDED_VERSION = "0.15.4"
assert pkg_resources.parse_version(pysam.version.__version__) >= pkg_resources.parse_version(NEEDED_VERSION), "PySam version at least %s required"%(NEEDED_VERSION,)
|
HorvathLab/NGS
|
common/src/pysamimport.py
|
Python
|
mit
| 310
|
[
"pysam"
] |
1dd22a2c1ca56bb9a1ddfeb8cbc8007079770d9140d693e512293305d1e71005
|
from sdssgaussfitter import gaussfit
import numpy as np
from util import utils
import matplotlib.pyplot as plt
import sys
### this is used to view a single frame from display stack or psffit npz. Into the command type "python sdssview_frameBlue.py 3" to view image number 3.
def aperture(startpx,startpy,radius=3):
r = radius
length = 2*r
height = length
allx = xrange(startpx-int(np.ceil(length/2.0)),startpx+int(np.floor(length/2.0))+1)
ally = xrange(startpy-int(np.ceil(height/2.0)),startpy+int(np.floor(height/2.0))+1)
pixx = []
pixy = []
mask=np.ones((46,44))
for x in allx:
for y in ally:
if (np.abs(x-startpx))**2+(np.abs(y-startpy))**2 <= (r)**2 and 0 <= y and y < 46 and 0 <= x and x < 44:
mask[y,x]=0.
return mask
def gaussian(height, center_x, center_y, width_x, width_y,offset):
"""Returns a gaussian function with the given parameters"""
width_x = float(width_x)
width_y = float(width_y)
return lambda x,y: height*np.exp(-(((center_x-x)/width_x)**2+((center_y-y)/width_y)**2)/2)+offset
stackDict = np.load('/Scratch/dataProcessing/SDSS_J0926/Dec8ImageStackAllInt3Aperture510.npz')
stack = stackDict['stack']
if len(sys.argv) == 1:
print 'Useage: ',sys.argv[0],' iFrame'
print """
set0 Frames 0-179
"""
exit(1)
iFrame = int(sys.argv[1])
frame = stack[:,:,iFrame]
# plt.hist(np.ravel(frame),bins=100,range=(0,5000))
# plt.show()
nanMask = np.isnan(frame)
frame[nanMask] = 0
frame = np.ma.masked_array(frame,mask=nanMask)
utils.plotArray(frame,cbar=True)
|
bmazin/ARCONS-pipeline
|
examples/Pal2012-sdss/sdssview_frameBlue.py
|
Python
|
gpl-2.0
| 1,595
|
[
"Gaussian"
] |
bd571c08d0a8705dafc07605f6afb5084147f3d179bd4c29ca4534117477708d
|
##############################################################################
# MDTraj: A Python Library for Loading, Saving, and Manipulating
# Molecular Dynamics Trajectories.
# Copyright 2012-2013 Stanford University and the Authors
#
# Authors: Kyle A. Beauchamp
# Contributors: Robert McGibbon, John D. Chodera
#
# MDTraj is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 2.1
# of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with MDTraj. If not, see <http://www.gnu.org/licenses/>.
#
# Portions of this code originate from the OpenMM molecular simulation
# toolkit, copyright (c) 2012 Stanford University and Peter Eastman. Those
# portions are distributed under the following terms:
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS, CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
# USE OR OTHER DEALINGS IN THE SOFTWARE.
##############################################################################
"""Load an md.Topology from tripos mol2 files.
"""
##############################################################################
# Imports
##############################################################################
from __future__ import print_function, division
import numpy as np
import itertools
from mdtraj.utils import import_
from mdtraj.utils.six.moves import cStringIO as StringIO
from mdtraj.formats.registry import _FormatRegistry
__all__ = ['load_mol2', "mol2_to_dataframes"]
@_FormatRegistry.register_loader('.mol2')
def load_mol2(filename):
"""Load a TRIPOS mol2 file from disk.
Parameters
----------
filename : str
Path to the prmtop file on disk.
Returns
-------
traj : md.Trajectory
The resulting topology, as an md.Topology object.
Notes
-----
This function should work on GAFF and sybyl style MOL2 files, but has
been primarily tested on GAFF mol2 files.
This function does NOT accept multi-structure MOL2 files!!!
The elements are guessed using GAFF atom types or via the atype string.
Examples
--------
>>> traj = md.load_mol2('mysystem.mol2')
"""
from mdtraj.core.trajectory import Trajectory
from mdtraj.core.topology import Topology
atoms, bonds = mol2_to_dataframes(filename)
atoms_mdtraj = atoms[["name", "resName"]].copy()
atoms_mdtraj["serial"] = atoms.index
#Figure out 1 letter element names
# IF this is a GAFF mol2, this line should work without issues
atoms_mdtraj["element"] = atoms.atype.map(gaff_elements)
# If this is a sybyl mol2, there should be NAN (null) values
if atoms_mdtraj.element.isnull().any():
# If this is a sybyl mol2, I think this works generally.
atoms_mdtraj["element"] = atoms.atype.apply(lambda x: x.strip(".")[0])
atoms_mdtraj["resSeq"] = np.ones(len(atoms), 'int')
atoms_mdtraj["chainID"] = np.ones(len(atoms), 'int')
bonds_mdtraj = bonds[["id0", "id1"]].values
offset = bonds_mdtraj.min() # Should this just be 1???
bonds_mdtraj -= offset
top = Topology.from_dataframe(atoms_mdtraj, bonds_mdtraj)
xyzlist = np.array([atoms[["x", "y", "z"]].values])
xyzlist /= 10.0 # Convert from angstrom to nanometer
traj = Trajectory(xyzlist, top)
return traj
def mol2_to_dataframes(filename):
"""Convert a GAFF (or sybyl) mol2 file to a pair of pandas dataframes.
Parameters
----------
filename : str
Name of mol2 filename
Returns
-------
atoms_frame : pd.DataFrame
DataFrame containing atom information
bonds_frame : pd.DataFrame
DataFrame containing bond information
Notes
-----
These dataframes may contain force field information as well as the
information necessary for constructing the coordinates and molecular
topology. This function has been tested for GAFF and sybyl-style
mol2 files but has been primarily tested on GAFF mol2 files.
This function does NOT accept multi-structure MOL2 files!!!
See Also
--------
If you just need the coordinates and bonds, use load_mol2(filename)
to get a Trajectory object.
"""
pd = import_('pandas')
with open(filename) as f:
data = dict((key, list(grp)) for key, grp in itertools.groupby(f, _parse_mol2_sections))
csv = StringIO()
csv.writelines(data["@<TRIPOS>BOND\n"][1:])
csv.seek(0)
bonds_frame = pd.read_table(csv, names=["bond_id", "id0", "id1", "bond_type"],
index_col=0, header=None, sep="\s*", engine='python')
csv = StringIO()
csv.writelines(data["@<TRIPOS>ATOM\n"][1:])
csv.seek(0)
atoms_frame = pd.read_csv(csv, sep="\s*", engine='python', header=None,
names=["serial", "name", "x", "y", "z",
"atype", "code", "resName", "charge"])
return atoms_frame, bonds_frame
def _parse_mol2_sections(x):
"""Helper function for parsing a section in a MOL2 file."""
if x.startswith('@<TRIPOS>'):
_parse_mol2_sections.key = x
return _parse_mol2_sections.key
gaff_elements = {
'br': 'Br',
'c': 'C',
'c1': 'C',
'c2': 'C',
'c3': 'C',
'ca': 'C',
'cc': 'C',
'cd': 'C',
'ce': 'C',
'cf': 'C',
'cg': 'C',
'ch': 'C',
'cl': 'Cl',
'cp': 'C',
'cq': 'C',
'cu': 'C',
'cv': 'C',
'cx': 'C',
'cy': 'C',
'cz': 'C',
'f': 'F',
'h1': 'H',
'h2': 'H',
'h3': 'H',
'h4': 'H',
'h5': 'H',
'ha': 'H',
'hc': 'H',
'hn': 'H',
'ho': 'H',
'hp': 'H',
'hs': 'H',
'hw': 'H',
'hx': 'H',
'i': 'I',
'n': 'N',
'n1': 'N',
'n2': 'N',
'n3': 'N',
'n4': 'N',
'na': 'N',
'nb': 'N',
'nc': 'N',
'nd': 'N',
'ne': 'N',
'nf': 'N',
'nh': 'N',
'no': 'N',
'o': 'O',
'oh': 'O',
'os': 'O',
'ow': 'O',
'p2': 'P',
'p3': 'P',
'p4': 'P',
'p5': 'P',
'pb': 'P',
'px': 'P',
'py': 'P',
's': 'S',
's2': 'S',
's4': 'S',
's6': 'S',
'sh': 'S',
'ss': 'S',
'sx': 'S',
'sy': 'S'}
|
kyleabeauchamp/mdtraj
|
mdtraj/formats/mol2.py
|
Python
|
lgpl-2.1
| 7,419
|
[
"MDTraj",
"OpenMM"
] |
934d7a3238b464d443855d4a072f54b395fb80928aeee5e607f5bd581468cad7
|
import logging
import re
import os
import signal
from avocado.utils import path
from avocado.utils import process
from avocado.utils import linux_modules
from .compat_52lts import results_stdout_52lts
from .versionable_class import VersionableClass, Manager, factory
from . import utils_misc
# Register to class manager.
man = Manager(__name__)
class ServiceManagerInterface(object):
def __new__(cls, *args, **kargs):
ServiceManagerInterface.master_class = ServiceManagerInterface
return super(ServiceManagerInterface, cls).__new__(cls, *args, **kargs)
@classmethod
def get_version(cls):
"""
Get version of ServiceManager.
:return: Version of ServiceManager.
"""
return open("/proc/1/comm", "r").read().strip()
def stop(self, service_name):
raise NotImplementedError("Method 'stop' must be"
" implemented in child class")
def start(self, service_name):
raise NotImplementedError("Method 'start' must be"
" implemented in child class")
def restart(self, service_name):
raise NotImplementedError("Method 'restart' must be"
" implemented in child class")
def status(self, service_name):
raise NotImplementedError("Method 'status' must be"
" implemented in child class")
class ServiceManagerSysvinit(ServiceManagerInterface):
@classmethod
def _is_right_ver(cls):
version = cls.get_version()
if version == "init":
return True
return False
def stop(self, service_name):
process.run("/etc/init.d/%s stop" % (service_name))
def start(self, service_name):
process.run("/etc/init.d/%s start" % (service_name))
def restart(self, service_name):
process.run("/etc/init.d/%s restart" % (service_name))
class ServiceManagerSystemD(ServiceManagerSysvinit):
@classmethod
def _is_right_ver(cls):
version = cls.get_version()
if version == "systemd":
return True
return False
def stop(self, service_name):
process.run("systemctl stop %s.service" % (service_name))
def start(self, service_name):
process.run("systemctl start %s.service" % (service_name))
def restart(self, service_name):
process.run("systemctl restart %s.service" % (service_name))
def status(self, service_name):
process.run("systemctl show %s.service" % (service_name))
class ServiceManager(VersionableClass):
__master__ = ServiceManagerSystemD
class OpenVSwitchControl(object):
"""
Class select the best matches control class for installed version
of OpenVSwitch.
OpenVSwtich parameters are described in man ovs-vswitchd.conf.db
"""
def __new__(cls, db_path=None, db_socket=None, db_pidfile=None,
ovs_pidfile=None, dbschema=None, install_prefix=None):
"""
Makes initialization of OpenVSwitch.
:param tmpdir: Tmp directory for save openvswitch test files.
:param db_path: Path of OVS databimpoty ase.
:param db_socket: Path of OVS db socket.
:param db_pidfile: Path of OVS db ovsdb-server pid.
:param ovs_pidfile: Path of OVS ovs-vswitchd pid.
:param install_prefix: Path where is openvswitch installed.
"""
# if path is None set default path.
if not install_prefix:
install_prefix = "/"
if not db_path:
db_path = os.path.join(install_prefix,
"/etc/openvswitch/conf.db")
if not db_socket:
db_socket = os.path.join(install_prefix,
"/var/run/openvswitch/db.sock")
if not db_pidfile:
db_pidfile = os.path.join(install_prefix,
"/var/run/openvswitch/ovsdb-server.pid")
if not ovs_pidfile:
ovs_pidfile = os.path.join(install_prefix,
"/var/run/openvswitch/ovs-vswitchd.pid")
if not dbschema:
dbschema = os.path.join(install_prefix,
"/usr/share/openvswitch/vswitch.ovsschema")
OpenVSwitchControl.install_prefix = install_prefix
OpenVSwitchControl.db_path = db_path
OpenVSwitchControl.db_socket = db_socket
OpenVSwitchControl.db_pidfile = db_pidfile
OpenVSwitchControl.ovs_pidfile = ovs_pidfile
OpenVSwitchControl.dbschema = install_prefix, dbschema
os.environ["PATH"] = (os.path.join(install_prefix, "usr/bin:") +
os.environ["PATH"])
os.environ["PATH"] = (os.path.join(install_prefix, "usr/sbin:") +
os.environ["PATH"])
return super(OpenVSwitchControl, cls).__new__(cls)
@staticmethod
def convert_version_to_int(version):
"""
:param version: (int) Converted from version string 1.4.0 => int 140
"""
if isinstance(version, int):
return version
try:
a = re.findall('^(\d+)\.?(\d+)\.?(\d+)\-?', version)[0]
int_ver = ''.join(a)
except Exception:
raise ValueError("Wrong version format '%s'" % version)
return int_ver
@classmethod
def get_version(cls):
"""
Get version of installed OpenVSwtich.
:return: Version of OpenVSwtich.
"""
version = None
try:
result = process.run("%s --version" %
path.find_command("ovs-vswitchd"))
pattern = "ovs-vswitchd \(Open vSwitch\) (\d+\.\d+\.\d+).*"
version = re.search(pattern,
results_stdout_52lts(result)).group(1)
except process.CmdError:
logging.debug("OpenVSwitch is not available in system.")
return version
def status(self):
raise NotImplementedError()
def add_br(self, br_name):
raise NotImplementedError()
def del_br(self, br_name):
raise NotImplementedError()
def br_exist(self, br_name):
raise NotImplementedError()
def list_br(self):
raise NotImplementedError()
def add_port(self, br_name, port_name):
raise NotImplementedError()
def del_port(self, br_name, port_name):
raise NotImplementedError()
def add_port_tag(self, port_name, tag):
raise NotImplementedError()
def add_port_trunk(self, port_name, trunk):
raise NotImplementedError()
def set_vlanmode(self, port_name, vlan_mode):
raise NotImplementedError()
def check_port_in_br(self, br_name, port_name):
raise NotImplementedError()
class OpenVSwitchControlDB_140(OpenVSwitchControl):
"""
Don't use this class directly. This class is automatically selected by
OpenVSwitchControl.
"""
@classmethod
def _is_right_ver(cls):
"""
Check condition for select control class.
:param version: version of OpenVSwtich
"""
version = cls.get_version()
if version is not None:
int_ver = cls.convert_version_to_int(version)
if int_ver >= 140:
return True
return False
# TODO: implement database manipulation methods.
class OpenVSwitchControlDB_CNT(VersionableClass):
__master__ = OpenVSwitchControlDB_140
class OpenVSwitchControlCli_140(OpenVSwitchControl):
"""
Don't use this class directly. This class is automatically selected by
OpenVSwitchControl.
"""
@classmethod
def _is_right_ver(cls):
"""
Check condition for select control class.
:param version: version of OpenVSwtich
"""
version = cls.get_version()
if version is not None:
int_ver = cls.convert_version_to_int(version)
if int_ver >= 140:
return True
return False
def ovs_vsctl(self, params, ignore_status=False):
return process.run('%s --db=unix:%s %s' %
(path.find_command("ovs-vsctl"),
self.db_socket, " ".join(params)), timeout=10,
ignore_status=ignore_status, verbose=False)
def status(self):
return results_stdout_52lts(self.ovs_vsctl(["show"]))
def add_br(self, br_name):
self.ovs_vsctl(["add-br", br_name])
def add_fake_br(self, br_name, parent, vlan):
self.ovs_vsctl(["add-br", br_name, parent, vlan])
def del_br(self, br_name):
try:
self.ovs_vsctl(["del-br", br_name])
except process.CmdError as e:
logging.debug(e.result)
raise
def br_exist(self, br_name):
try:
self.ovs_vsctl(["br-exists", br_name])
except process.CmdError as e:
if e.result.exit_status == 2:
return False
else:
raise
return True
def list_br(self):
return results_stdout_52lts(self.ovs_vsctl(["list-br"])).splitlines()
def add_port(self, br_name, port_name):
self.ovs_vsctl(["add-port", br_name, port_name])
def del_port(self, br_name, port_name):
self.ovs_vsctl(["del-port", br_name, port_name])
def add_port_tag(self, port_name, tag):
self.ovs_vsctl(["set", "Port", port_name, "tag=%s" % tag])
def add_port_trunk(self, port_name, trunk):
"""
:param trunk: list of vlans id.
"""
trunk = list(map(lambda x: str(x), trunk))
trunk = "[" + ",".join(trunk) + "]"
self.ovs_vsctl(["set", "Port", port_name, "trunk=%s" % trunk])
def set_vlanmode(self, port_name, vlan_mode):
self.ovs_vsctl(["set", "Port", port_name, "vlan-mode=%s" % vlan_mode])
def list_ports(self, br_name):
result = self.ovs_vsctl(["list-ports", br_name])
return results_stdout_52lts(result).splitlines()
def port_to_br(self, port_name):
"""
Return bridge which contain port.
:param port_name: Name of port.
:return: Bridge name or None if there is no bridge which contain port.
"""
bridge = None
try:
result = self.ovs_vsctl(["port-to-br", port_name])
bridge = results_stdout_52lts(result).strip()
except process.CmdError as e:
if e.result.exit_status == 1:
pass
return bridge
class OpenVSwitchControlCli_CNT(VersionableClass):
__master__ = OpenVSwitchControlCli_140
class OpenVSwitchSystem(OpenVSwitchControlCli_CNT, OpenVSwitchControlDB_CNT):
"""
OpenVSwtich class.
"""
def __init__(self, db_path=None, db_socket=None, db_pidfile=None,
ovs_pidfile=None, dbschema=None, install_prefix=None):
"""
Makes initialization of OpenVSwitch.
:param db_path: Path of OVS database.
:param db_socket: Path of OVS db socket.
:param db_pidfile: Path of OVS db ovsdb-server pid.
:param ovs_pidfile: Path of OVS ovs-vswitchd pid.
:param install_prefix: Path where is openvswitch installed.
"""
sup = super(man[self.__class__, OpenVSwitchSystem], self)
sup.__init__(self, db_path, db_socket, db_pidfile, ovs_pidfile,
dbschema, install_prefix)
self.cleanup = False
self.pid_files_path = None
def is_installed(self):
"""
Check if OpenVSwitch is already installed in system on default places.
:return: Version of OpenVSwtich.
"""
if self.get_version():
return True
else:
return False
def check_db_daemon(self):
"""
Check if OVS daemon is started correctly.
"""
working = utils_misc.program_is_alive(
"ovsdb-server", self.pid_files_path)
if not working:
logging.error("OpenVSwitch database daemon with PID in file %s"
" not working.", self.db_pidfile)
return working
def check_switch_daemon(self):
"""
Check if OVS daemon is started correctly.
"""
working = utils_misc.program_is_alive(
"ovs-vswitchd", self.pid_files_path)
if not working:
logging.error("OpenVSwitch switch daemon with PID in file %s"
" not working.", self.ovs_pidfile)
return working
def check_db_file(self):
"""
Check if db_file exists.
"""
exists = os.path.exists(self.db_path)
if not exists:
logging.error("OpenVSwitch database file %s not exists.",
self.db_path)
return exists
def check_db_socket(self):
"""
Check if db socket exists.
"""
exists = os.path.exists(self.db_socket)
if not exists:
logging.error("OpenVSwitch database socket file %s not exists.",
self.db_socket)
return exists
def check(self):
return (self.check_db_daemon() and self.check_switch_daemon() and
self.check_db_file() and self.check_db_socket())
def init_system(self):
"""
Create new dbfile without any configuration.
"""
sm = factory(ServiceManager)()
try:
if linux_modules.load_module("openvswitch"):
sm.restart("openvswitch")
except process.CmdError:
logging.error("Service OpenVSwitch is probably not"
" installed in system.")
raise
self.pid_files_path = "/var/run/openvswitch/"
def clean(self):
"""
Empty cleanup function
"""
pass
class OpenVSwitch(OpenVSwitchSystem):
"""
OpenVSwtich class.
"""
def __init__(self, tmpdir, db_path=None, db_socket=None, db_pidfile=None,
ovs_pidfile=None, dbschema=None, install_prefix=None):
"""
Makes initialization of OpenVSwitch.
:param tmpdir: Tmp directory for save openvswitch test files.
:param db_path: Path of OVS database.
:param db_socket: Path of OVS db socket.
:param db_pidfile: Path of OVS db ovsdb-server pid.
:param ovs_pidfile: Path of OVS ovs-vswitchd pid.
:param install_prefix: Path where is openvswitch installed.
"""
super(man[self, OpenVSwitch], self).__init__(db_path, db_socket,
db_pidfile, ovs_pidfile,
dbschema, install_prefix)
self.tmpdir = "/%s/openvswitch" % (tmpdir)
try:
os.mkdir(self.tmpdir)
except OSError as e:
if e.errno != 17:
raise
def init_db(self):
process.run('%s %s %s %s' %
(path.find_command("ovsdb-tool"), "create",
self.db_path, self.dbschema))
process.run('%s %s %s %s %s' %
(path.find_command("ovsdb-server"),
"--remote=punix:%s" % (self.db_socket),
"--remote=db:Open_vSwitch,manager_options",
"--pidfile=%s" % (self.db_pidfile),
"--detach"))
self.ovs_vsctl(["--no-wait", "init"])
def start_ovs_vswitchd(self):
process.run('%s %s %s %s' %
(path.find_command("ovs-vswitchd"),
"--detach",
"--pidfile=%s" % self.ovs_pidfile,
"unix:%s" % self.db_socket))
def init_new(self):
"""
Create new dbfile without any configuration.
"""
self.db_path = os.path.join(self.tmpdir, "conf.db")
self.db_socket = os.path.join(self.tmpdir, "db.sock")
self.db_pidfile = utils_misc.get_pid_path("ovsdb-server")
self.ovs_pidfile = utils_misc.get_pid_path("ovs-vswitchd")
self.dbschema = "/usr/share/openvswitch/vswitch.ovsschema"
self.cleanup = True
sm = ServiceManager()
# Stop system openvswitch
try:
sm.stop("openvswitch")
except process.CmdError:
pass
linux_modules.load_module("openvswitch")
self.clean()
if os.path.exists(self.db_path):
os.remove(self.db_path)
self.init_db()
self.start_ovs_vswitchd()
def clean(self):
logging.debug("Killall ovsdb-server")
utils_misc.signal_program("ovsdb-server")
if utils_misc.program_is_alive("ovsdb-server"):
utils_misc.signal_program("ovsdb-server", signal.SIGKILL)
logging.debug("Killall ovs-vswitchd")
utils_misc.signal_program("ovs-vswitchd")
if utils_misc.program_is_alive("ovs-vswitchd"):
utils_misc.signal_program("ovs-vswitchd", signal.SIGKILL)
|
lmr/avocado-vt
|
virttest/openvswitch.py
|
Python
|
gpl-2.0
| 17,049
|
[
"ASE"
] |
18fca03a35e4af6bfabd154da2b244ea3f3769a159491bb4ea2abe46cde89b71
|
#!/usr/bin/env python
# Copyright 2014-2019 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: Qiming Sun <osirpt.sun@gmail.com>
#
'''
UCASSCF (CASSCF without spin-degeneracy between alpha and beta orbitals)
1-step optimization algorithm
'''
import time
import copy
from functools import reduce
import numpy
import scipy.linalg
import pyscf.gto
import pyscf.scf
from pyscf.lib import logger
from pyscf.mcscf import ucasci
from pyscf.mcscf.mc1step import expmat, rotate_orb_cc
from pyscf.mcscf import umc_ao2mo
from pyscf.mcscf import chkfile
from pyscf import __config__
#FIXME: when the number of core orbitals are different for alpha and beta,
# the convergence are very unstable and slow
# gradients, hessian operator and hessian diagonal
def gen_g_hop(casscf, mo, u, casdm1s, casdm2s, eris):
ncas = casscf.ncas
ncore = casscf.ncore
nocc = (ncas + ncore[0], ncas + ncore[1])
nmo = casscf.mo_coeff[0].shape[1]
dm1 = numpy.zeros((2,nmo,nmo))
idx = numpy.arange(ncore[0])
dm1[0,idx,idx] = 1
idx = numpy.arange(ncore[1])
dm1[1,idx,idx] = 1
dm1[0,ncore[0]:nocc[0],ncore[0]:nocc[0]] = casdm1s[0]
dm1[1,ncore[1]:nocc[1],ncore[1]:nocc[1]] = casdm1s[1]
# part2, part3
vhf_c = eris.vhf_c
vhf_ca = (vhf_c[0] + numpy.einsum('uvpq,uv->pq', eris.aapp, casdm1s[0]) \
- numpy.einsum('upqv,uv->pq', eris.appa, casdm1s[0]) \
+ numpy.einsum('uvpq,uv->pq', eris.AApp, casdm1s[1]),
vhf_c[1] + numpy.einsum('uvpq,uv->pq', eris.aaPP, casdm1s[0]) \
+ numpy.einsum('uvpq,uv->pq', eris.AAPP, casdm1s[1]) \
- numpy.einsum('upqv,uv->pq', eris.APPA, casdm1s[1]),)
################# gradient #################
hdm2 = [ numpy.einsum('tuvw,vwpq->tupq', casdm2s[0], eris.aapp) \
+ numpy.einsum('tuvw,vwpq->tupq', casdm2s[1], eris.AApp),
numpy.einsum('vwtu,vwpq->tupq', casdm2s[1], eris.aaPP) \
+ numpy.einsum('tuvw,vwpq->tupq', casdm2s[2], eris.AAPP)]
hcore = casscf.get_hcore()
h1e_mo = (reduce(numpy.dot, (mo[0].T, hcore[0], mo[0])),
reduce(numpy.dot, (mo[1].T, hcore[1], mo[1])))
g = [numpy.dot(h1e_mo[0], dm1[0]),
numpy.dot(h1e_mo[1], dm1[1])]
def gpart(m):
g[m][:,:ncore[m]] += vhf_ca[m][:,:ncore[m]]
g[m][:,ncore[m]:nocc[m]] += \
numpy.einsum('vuuq->qv', hdm2[m][:,:,ncore[m]:nocc[m]]) \
+ numpy.dot(vhf_c[m][:,ncore[m]:nocc[m]], casdm1s[m])
gpart(0)
gpart(1)
def gorb_update(u, fcivec):
r0 = casscf.pack_uniq_var(u)
return g_orb + h_op(r0)
############## hessian, diagonal ###########
# part1
tmp = casdm2s[0].transpose(1,2,0,3) + casdm2s[0].transpose(0,2,1,3)
hdm2apap = numpy.einsum('uvtw,tpqw->upvq', tmp, eris.appa)
hdm2apap += hdm2[0].transpose(0,2,1,3)
hdm2[0] = hdm2apap
tmp = casdm2s[1].transpose(1,2,0,3) + casdm2s[1].transpose(0,2,1,3)
# (jp|RK) *[e(jq,SK) + e(jq,LS)] => qSpR
hdm2apAP = numpy.einsum('uvtw,tpqw->upvq', tmp, eris.apPA)
# (JP|rk) *[e(sk,JQ) + e(ls,JQ)] => QsPr
#hdm2APap = hdm2apAP.transpose(2,3,0,1)
tmp = casdm2s[2].transpose(1,2,0,3) + casdm2s[2].transpose(0,2,1,3)
hdm2APAP = numpy.einsum('uvtw,tpqw->upvq', tmp, eris.APPA)
hdm2APAP += hdm2[1].transpose(0,2,1,3)
hdm2[1] = hdm2APAP
# part7
# h_diag[0] ~ alpha-alpha
h_diag = [numpy.einsum('ii,jj->ij', h1e_mo[0], dm1[0]) - h1e_mo[0] * dm1[0],
numpy.einsum('ii,jj->ij', h1e_mo[1], dm1[1]) - h1e_mo[1] * dm1[1]]
h_diag[0] = h_diag[0] + h_diag[0].T
h_diag[1] = h_diag[1] + h_diag[1].T
# part8
idx = numpy.arange(nmo)
g_diag = g[0].diagonal()
h_diag[0] -= g_diag + g_diag.reshape(-1,1)
h_diag[0][idx,idx] += g_diag * 2
g_diag = g[1].diagonal()
h_diag[1] -= g_diag + g_diag.reshape(-1,1)
h_diag[1][idx,idx] += g_diag * 2
# part2, part3
def fpart2(m):
v_diag = vhf_ca[m].diagonal() # (pr|kl) * e(sq,lk)
h_diag[m][:,:ncore[m]] += v_diag.reshape(-1,1)
h_diag[m][:ncore[m]] += v_diag
idx = numpy.arange(ncore[m])
# (V_{qr} delta_{ps} + V_{ps} delta_{qr}) delta_{pr} delta_{sq}
h_diag[m][idx,idx] -= v_diag[:ncore[m]] * 2
fpart2(0)
fpart2(1)
def fpart3(m):
# V_{pr} e_{sq}
tmp = numpy.einsum('ii,jj->ij', vhf_c[m], casdm1s[m])
h_diag[m][:,ncore[m]:nocc[m]] += tmp
h_diag[m][ncore[m]:nocc[m],:] += tmp.T
tmp = -vhf_c[m][ncore[m]:nocc[m],ncore[m]:nocc[m]] * casdm1s[m]
h_diag[m][ncore[m]:nocc[m],ncore[m]:nocc[m]] += tmp + tmp.T
fpart3(0)
fpart3(1)
# part4
def fpart4(jkcpp, m):
# (qp|rs)-(pr|sq) rp in core
tmp = -numpy.einsum('cpp->cp', jkcpp)
# (qp|sr) - (qr|sp) rp in core => 0
h_diag[m][:ncore[m],:] += tmp
h_diag[m][:,:ncore[m]] += tmp.T
h_diag[m][:ncore[m],:ncore[m]] -= tmp[:,:ncore[m]] * 2
fpart4(eris.jkcpp, 0)
fpart4(eris.jkcPP, 1)
# part5 and part6 diag
#+(qr|kp) e_s^k p in core, sk in active
#+(qr|sl) e_l^p s in core, pl in active
#-(qj|sr) e_j^p s in core, jp in active
#-(qp|kr) e_s^k p in core, sk in active
#+(qj|rs) e_j^p s in core, jp in active
#+(qp|rl) e_l^s p in core, ls in active
#-(qs|rl) e_l^p s in core, lp in active
#-(qj|rp) e_j^s p in core, js in active
def fpart5(jkcpp, m):
jkcaa = jkcpp[:,ncore[m]:nocc[m],ncore[m]:nocc[m]]
tmp = -2 * numpy.einsum('jik,ik->ji', jkcaa, casdm1s[m])
h_diag[m][:ncore[m],ncore[m]:nocc[m]] -= tmp
h_diag[m][ncore[m]:nocc[m],:ncore[m]] -= tmp.T
fpart5(eris.jkcpp, 0)
fpart5(eris.jkcPP, 1)
def fpart1(m):
v_diag = numpy.einsum('ijij->ij', hdm2[m])
h_diag[m][ncore[m]:nocc[m],:] += v_diag
h_diag[m][:,ncore[m]:nocc[m]] += v_diag.T
fpart1(0)
fpart1(1)
g_orb = casscf.pack_uniq_var((g[0]-g[0].T, g[1]-g[1].T))
h_diag = casscf.pack_uniq_var(h_diag)
def h_op(x):
x1a, x1b = casscf.unpack_uniq_var(x)
xa_cu = x1a[:ncore[0],ncore[0]:]
xa_av = x1a[ncore[0]:nocc[0],nocc[0]:]
xa_ac = x1a[ncore[0]:nocc[0],:ncore[0]]
xb_cu = x1b[:ncore[1],ncore[1]:]
xb_av = x1b[ncore[1]:nocc[1],nocc[1]:]
xb_ac = x1b[ncore[1]:nocc[1],:ncore[1]]
# part7
x2a = reduce(numpy.dot, (h1e_mo[0], x1a, dm1[0]))
x2b = reduce(numpy.dot, (h1e_mo[1], x1b, dm1[1]))
# part8, the hessian gives
#x2a -= numpy.dot(g[0], x1a)
#x2b -= numpy.dot(g[1], x1b)
# it may ruin the hermitian of hessian unless g == g.T. So symmetrize it
# x_{pq} -= g_{pr} \delta_{qs} x_{rs} * .5
# x_{rs} -= g_{rp} \delta_{sq} x_{pq} * .5
x2a -= numpy.dot(g[0].T, x1a)
x2b -= numpy.dot(g[1].T, x1b)
# part2
x2a[:ncore[0]] += numpy.dot(xa_cu, vhf_ca[0][ncore[0]:])
x2b[:ncore[1]] += numpy.dot(xb_cu, vhf_ca[1][ncore[1]:])
# part3
def fpart3(m, x2, x_av, x_ac):
x2[ncore[m]:nocc[m]] += reduce(numpy.dot, (casdm1s[m], x_av, vhf_c[m][nocc[m]:])) \
+ reduce(numpy.dot, (casdm1s[m], x_ac, vhf_c[m][:ncore[m]]))
fpart3(0, x2a, xa_av, xa_ac)
fpart3(1, x2b, xb_av, xb_ac)
# part1
x2a[ncore[0]:nocc[0]] += numpy.einsum('upvr,vr->up', hdm2apap, x1a[ncore[0]:nocc[0]])
x2a[ncore[0]:nocc[0]] += numpy.einsum('upvr,vr->up', hdm2apAP, x1b[ncore[1]:nocc[1]])
x2b[ncore[1]:nocc[1]] += numpy.einsum('vrup,vr->up', hdm2apAP, x1a[ncore[0]:nocc[0]])
x2b[ncore[1]:nocc[1]] += numpy.einsum('upvr,vr->up', hdm2APAP, x1b[ncore[1]:nocc[1]])
# part4, part5, part6
if ncore[0] > 0 or ncore[1] > 0:
va, vc = casscf.update_jk_in_ah(mo, (x1a,x1b), casdm1s, eris)
x2a[ncore[0]:nocc[0]] += va[0]
x2b[ncore[1]:nocc[1]] += va[1]
x2a[:ncore[0],ncore[0]:] += vc[0]
x2b[:ncore[1],ncore[1]:] += vc[1]
x2a = x2a - x2a.T
x2b = x2b - x2b.T
return casscf.pack_uniq_var((x2a,x2b))
return g_orb, gorb_update, h_op, h_diag
def kernel(casscf, mo_coeff, tol=1e-7, conv_tol_grad=None,
ci0=None, callback=None, verbose=None, dump_chk=True):
if verbose is None:
verbose = casscf.verbose
log = logger.Logger(casscf.stdout, verbose)
cput0 = (time.clock(), time.time())
log.debug('Start 1-step CASSCF')
mo = mo_coeff
nmo = mo[0].shape[1]
#TODO: lazy evaluate eris, to leave enough memory for FCI solver
eris = casscf.ao2mo(mo)
e_tot, e_cas, fcivec = casscf.casci(mo, ci0, eris, log, locals())
if casscf.ncas == nmo and not casscf.internal_rotation:
return True, e_tot, e_cas, fcivec, mo
if conv_tol_grad is None:
conv_tol_grad = numpy.sqrt(tol)
logger.info(casscf, 'Set conv_tol_grad to %g', conv_tol_grad)
conv_tol_ddm = conv_tol_grad * 3
conv = False
totmicro = totinner = 0
norm_gorb = norm_gci = 0
de, elast = e_tot, e_tot
r0 = None
t1m = log.timer('Initializing 1-step CASSCF', *cput0)
casdm1, casdm2 = casscf.fcisolver.make_rdm12s(fcivec, casscf.ncas, casscf.nelecas)
norm_ddm = 1e2
casdm1_last = casdm1
t3m = t2m = log.timer('CAS DM', *t1m)
imacro = 0
while not conv and imacro < casscf.max_cycle_macro:
imacro += 1
max_cycle_micro = casscf.micro_cycle_scheduler(locals())
max_stepsize = casscf.max_stepsize_scheduler(locals())
imicro = 0
rota = casscf.rotate_orb_cc(mo, lambda:fcivec, lambda:casdm1, lambda:casdm2,
eris, r0, conv_tol_grad*.3, max_stepsize, log)
for u, g_orb, njk, r0 in rota:
imicro += 1
norm_gorb = numpy.linalg.norm(g_orb)
if imicro == 1:
norm_gorb0 = norm_gorb
norm_t = numpy.linalg.norm(u-numpy.eye(nmo))
if imicro >= max_cycle_micro:
log.debug('micro %d |u-1|=%5.3g |g[o]|=%5.3g ',
imicro, norm_t, norm_gorb)
break
casdm1, casdm2, gci, fcivec = casscf.update_casdm(mo, u, fcivec, e_cas, eris)
norm_ddm =(numpy.linalg.norm(casdm1[0] - casdm1_last[0])
+ numpy.linalg.norm(casdm1[1] - casdm1_last[1]))
t3m = log.timer('update CAS DM', *t3m)
if isinstance(gci, numpy.ndarray):
norm_gci = numpy.linalg.norm(gci)
log.debug('micro %d |u-1|=%5.3g |g[o]|=%5.3g |g[c]|=%5.3g |ddm|=%5.3g',
imicro, norm_t, norm_gorb, norm_gci, norm_ddm)
else:
norm_gci = None
log.debug('micro %d |u-1|=%5.3g |g[o]|=%5.3g |g[c]|=%s |ddm|=%5.3g',
imicro, norm_t, norm_gorb, norm_gci, norm_ddm)
if callable(callback):
callback(locals())
t3m = log.timer('micro iter %d'%imicro, *t3m)
if (norm_t < 1e-4 or
(norm_gorb < conv_tol_grad*.5 and norm_ddm < conv_tol_ddm*.4)):
break
rota.close()
rota = None
totmicro += imicro
totinner += njk
eris = None
u = copy.copy(u)
g_orb = copy.copy(g_orb)
mo = casscf.rotate_mo(mo, u, log)
eris = casscf.ao2mo(mo)
t2m = log.timer('update eri', *t3m)
e_tot, e_cas, fcivec = casscf.casci(mo, fcivec, eris, log, locals())
casdm1, casdm2 = casscf.fcisolver.make_rdm12s(fcivec, casscf.ncas, casscf.nelecas)
norm_ddm =(numpy.linalg.norm(casdm1[0] - casdm1_last[0])
+ numpy.linalg.norm(casdm1[1] - casdm1_last[1]))
casdm1_last = casdm1
log.timer('CASCI solver', *t2m)
t2m = t1m = log.timer('macro iter %d'%imacro, *t1m)
de, elast = e_tot - elast, e_tot
if (abs(de) < tol
and (norm_gorb0 < conv_tol_grad and norm_ddm < conv_tol_ddm)):
conv = True
if dump_chk:
casscf.dump_chk(locals())
if callable(callback):
callback(locals())
if conv:
log.info('1-step CASSCF converged in %d macro (%d JK %d micro) steps',
imacro+1, totinner, totmicro)
else:
log.info('1-step CASSCF not converged, %d macro (%d JK %d micro) steps',
imacro+1, totinner, totmicro)
log.timer('1-step CASSCF', *cput0)
return conv, e_tot, e_cas, fcivec, mo
class UCASSCF(ucasci.UCASCI):
max_stepsize = getattr(__config__, 'mcscf_umc1step_UCASSCF_max_stepsize', .02)
max_cycle_macro = getattr(__config__, 'mcscf_umc1step_UCASSCF_max_cycle_macro', 50)
max_cycle_micro = getattr(__config__, 'mcscf_umc1step_UCASSCF_max_cycle_micro', 4)
conv_tol = getattr(__config__, 'mcscf_umc1step_UCASSCF_conv_tol', 1e-7)
conv_tol_grad = getattr(__config__, 'mcscf_umc1step_UCASSCF_conv_tol_grad', None)
# for augmented hessian
ah_level_shift = getattr(__config__, 'mcscf_umc1step_UCASSCF_ah_level_shift', 1e-8)
ah_conv_tol = getattr(__config__, 'mcscf_umc1step_UCASSCF_ah_conv_tol', 1e-12)
ah_max_cycle = getattr(__config__, 'mcscf_umc1step_UCASSCF_ah_max_cycle', 30)
ah_lindep = getattr(__config__, 'mcscf_umc1step_UCASSCF_ah_lindep', 1e-14)
ah_start_tol = getattr(__config__, 'mcscf_umc1step_UCASSCF_ah_start_tol', 2.5)
ah_start_cycle = getattr(__config__, 'mcscf_umc1step_UCASSCF_ah_start_cycle', 3)
ah_grad_trust_region = getattr(__config__, 'mcscf_umc1step_UCASSCF_ah_grad_trust_region', 3.0)
internal_rotation = getattr(__config__, 'mcscf_umc1step_UCASSCF_internal_rotation', False)
ci_response_space = getattr(__config__, 'mcscf_umc1step_UCASSCF_ci_response_space', 4)
with_dep4 = getattr(__config__, 'mcscf_umc1step_UCASSCF_with_dep4', False)
chk_ci = getattr(__config__, 'mcscf_umc1step_UCASSCF_chk_ci', False)
kf_interval = getattr(__config__, 'mcscf_umc1step_UCASSCF_kf_interval', 4)
kf_trust_region = getattr(__config__, 'mcscf_umc1step_UCASSCF_kf_trust_region', 3.0)
natorb = getattr(__config__, 'mcscf_umc1step_UCASSCF_natorb', False)
#canonicalization = getattr(__config__, 'mcscf_umc1step_UCASSCF_canonicalization', True)
#sorting_mo_energy = getattr(__config__, 'mcscf_umc1step_UCASSCF_sorting_mo_energy', False)
def __init__(self, mf_or_mol, ncas, nelecas, ncore=None, frozen=None):
ucasci.UCASCI.__init__(self, mf_or_mol, ncas, nelecas, ncore)
self.frozen = frozen
self.callback = None
self.chkfile = self._scf.chkfile
self.fcisolver.max_cycle = getattr(__config__,
'mcscf_umc1step_UCASSCF_fcisolver_max_cycle', 50)
self.fcisolver.conv_tol = getattr(__config__,
'mcscf_umc1step_UCASSCF_fcisolver_conv_tol', 1e-8)
##################################################
# don't modify the following attributes, they are not input options
self.e_tot = None
self.e_cas = None
self.ci = None
self.mo_coeff = self._scf.mo_coeff
self.converged = False
self._max_stepsize = None
keys = set(('max_stepsize', 'max_cycle_macro', 'max_cycle_micro',
'conv_tol', 'conv_tol_grad', 'ah_level_shift',
'ah_conv_tol', 'ah_max_cycle', 'ah_lindep',
'ah_start_tol', 'ah_start_cycle', 'ah_grad_trust_region',
'internal_rotation', 'ci_response_space',
'with_dep4', 'chk_ci',
'kf_interval', 'kf_trust_region', 'fcisolver_max_cycle',
'fcisolver_conv_tol', 'natorb', 'canonicalization',
'sorting_mo_energy'))
self._keys = set(self.__dict__.keys()).union(keys)
def dump_flags(self, verbose=None):
log = logger.new_logger(self, verbose)
log.info('')
log.info('******** UHF-CASSCF flags ********')
nmo = self.mo_coeff[0].shape[1]
ncore = self.ncore
ncas = self.ncas
nvir_alpha = nmo - ncore[0] - ncas
nvir_beta = nmo - ncore[1] - ncas
log.info('CAS (%de+%de, %do), ncore = [%d+%d], nvir = [%d+%d]',
self.nelecas[0], self.nelecas[1], ncas,
ncore[0], ncore[1], nvir_alpha, nvir_beta)
if ncore[0] != ncore[1]:
log.warn('converge might be slow since num alpha core %d != num beta core %d',
ncore[0], ncore[1])
if self.frozen is not None:
log.info('frozen orbitals %s', str(self.frozen))
log.info('max. macro cycles = %d', self.max_cycle_macro)
log.info('max. micro cycles = %d', self.max_cycle_micro)
log.info('conv_tol = %g', self.conv_tol)
log.info('conv_tol_grad = %s', self.conv_tol_grad)
log.info('max. orb step = %g', self.max_stepsize)
log.info('augmented hessian max_cycle = %d', self.ah_max_cycle)
log.info('augmented hessian conv_tol = %g', self.ah_conv_tol)
log.info('augmented hessian linear dependence = %g', self.ah_lindep)
log.info('augmented hessian level shift = %d', self.ah_level_shift)
log.info('augmented hessian start_tol = %g', self.ah_start_tol)
log.info('augmented hessian start_cycle = %d', self.ah_start_cycle)
log.info('augmented hessian grad_trust_region = %g', self.ah_grad_trust_region)
log.info('kf_trust_region = %g', self.kf_trust_region)
log.info('kf_interval = %d', self.kf_interval)
log.info('ci_response_space = %d', self.ci_response_space)
#log.info('diis = %s', self.diis)
log.info('chkfile = %s', self.chkfile)
#log.info('natorb = %s', self.natorb)
log.info('max_memory %d MB (current use %d MB)',
self.max_memory, pyscf.lib.current_memory()[0])
log.info('internal_rotation = %s', self.internal_rotation)
try:
self.fcisolver.dump_flags(self.verbose)
except AttributeError:
pass
def kernel(self, mo_coeff=None, ci0=None, callback=None, _kern=kernel):
if mo_coeff is None:
mo_coeff = self.mo_coeff
else:
self.mo_coeff = mo_coeff
if callback is None: callback = self.callback
if self.verbose >= logger.WARN:
self.check_sanity()
self.dump_flags()
self.converged, self.e_tot, self.e_cas, self.ci, self.mo_coeff = \
_kern(self, mo_coeff,
tol=self.conv_tol, conv_tol_grad=self.conv_tol_grad,
ci0=ci0, callback=callback, verbose=self.verbose)
logger.note(self, 'UCASSCF energy = %.15g', self.e_tot)
#if self.verbose >= logger.INFO:
# self.analyze(mo_coeff, self.ci, verbose=self.verbose)
self._finalize()
return self.e_tot, self.e_cas, self.ci, self.mo_coeff
def mc1step(self, mo_coeff=None, ci0=None, callback=None):
return self.kernel(mo_coeff, ci0, callback)
def mc2step(self, mo_coeff=None, ci0=None, callback=None):
from pyscf.mcscf import umc2step
return self.kernel(mo_coeff, ci0, callback, umc2step.kernel)
def get_h2eff(self, mo_coeff=None):
'''Computing active space two-particle Hamiltonian.
'''
return self.get_h2cas(mo_coeff)
def get_h2cas(self, mo_coeff=None):
return ucasci.UCASCI.ao2mo(self, mo_coeff)
def casci(self, mo_coeff, ci0=None, eris=None, verbose=None, envs=None):
if eris is None:
fcasci = copy.copy(self)
fcasci.ao2mo = self.get_h2cas
else:
fcasci = _fake_h_for_fast_casci(self, mo_coeff, eris)
log = logger.new_logger(self, verbose)
e_tot, e_cas, fcivec = ucasci.kernel(fcasci, mo_coeff, ci0, log)
if envs is not None and log.verbose >= logger.INFO:
log.debug('CAS space CI energy = %.15g', e_cas)
if 'imicro' in envs: # Within CASSCF iteration
log.info('macro iter %d (%d JK %d micro), '
'UCASSCF E = %.15g dE = %.8g',
envs['imacro'], envs['njk'], envs['imicro'],
e_tot, e_tot-envs['elast'])
if 'norm_gci' in envs:
log.info(' |grad[o]|=%5.3g '
'|grad[c]|= %s |ddm|=%5.3g',
envs['norm_gorb0'],
envs['norm_gci'], envs['norm_ddm'])
else:
log.info(' |grad[o]|=%5.3g |ddm|=%5.3g',
envs['norm_gorb0'], envs['norm_ddm'])
else: # Initialization step
log.info('UCASCI E = %.15g', e_tot)
return e_tot, e_cas, fcivec
def uniq_var_indices(self, nmo, ncore, ncas, frozen):
nocc = ncore + ncas
mask = numpy.zeros((nmo,nmo),dtype=bool)
mask[ncore:nocc,:ncore] = True
mask[nocc:,:nocc] = True
if self.internal_rotation:
raise NotImplementedError('internal_rotation')
if frozen is not None:
if isinstance(frozen, (int, numpy.integer)):
mask[:frozen] = mask[:,:frozen] = False
else:
mask[frozen] = mask[:,frozen] = False
return mask
def pack_uniq_var(self, mat):
nmo = self.mo_coeff[0].shape[1]
ncore = self.ncore
ncas = self.ncas
idxa = self.uniq_var_indices(nmo, ncore[0], ncas, self.frozen)
idxb = self.uniq_var_indices(nmo, ncore[1], ncas, self.frozen)
return numpy.hstack((mat[0][idxa], mat[1][idxb]))
# to anti symmetric matrix
def unpack_uniq_var(self, v):
nmo = self.mo_coeff[0].shape[1]
ncore = self.ncore
ncas = self.ncas
idx = numpy.empty((2,nmo,nmo), dtype=bool)
idx[0] = self.uniq_var_indices(nmo, ncore[0], ncas, self.frozen)
idx[1] = self.uniq_var_indices(nmo, ncore[1], ncas, self.frozen)
mat = numpy.zeros((2,nmo,nmo))
mat[idx] = v
mat[0] = mat[0] - mat[0].T
mat[1] = mat[1] - mat[1].T
return mat
def update_rotate_matrix(self, dx, u0=1):
if isinstance(u0, int) and u0 == 1:
u0 = (1,1)
dr = self.unpack_uniq_var(dx)
ua = numpy.dot(u0[0], expmat(dr[0]))
ub = numpy.dot(u0[1], expmat(dr[1]))
return (ua, ub)
def gen_g_hop(self, *args):
return gen_g_hop(self, *args)
rotate_orb_cc = rotate_orb_cc
def ao2mo(self, mo_coeff=None):
if mo_coeff is None: mo_coeff = self.mo_coeff
# nmo = mo[0].shape[1]
# ncore = self.ncore
# ncas = self.ncas
# nocc = (ncas + ncore[0], ncas + ncore[1])
# eriaa = pyscf.ao2mo.incore.full(self._scf._eri, mo[0])
# eriab = pyscf.ao2mo.incore.general(self._scf._eri, (mo[0],mo[0],mo[1],mo[1]))
# eribb = pyscf.ao2mo.incore.full(self._scf._eri, mo[1])
# eriaa = pyscf.ao2mo.restore(1, eriaa, nmo)
# eriab = pyscf.ao2mo.restore(1, eriab, nmo)
# eribb = pyscf.ao2mo.restore(1, eribb, nmo)
# eris = lambda:None
# eris.jkcpp = numpy.einsum('iipq->ipq', eriaa[:ncore[0],:ncore[0],:,:]) \
# - numpy.einsum('ipqi->ipq', eriaa[:ncore[0],:,:,:ncore[0]])
# eris.jkcPP = numpy.einsum('iipq->ipq', eribb[:ncore[1],:ncore[1],:,:]) \
# - numpy.einsum('ipqi->ipq', eribb[:ncore[1],:,:,:ncore[1]])
# eris.jC_pp = numpy.einsum('pqii->pq', eriab[:,:,:ncore[1],:ncore[1]])
# eris.jc_PP = numpy.einsum('iipq->pq', eriab[:ncore[0],:ncore[0],:,:])
# eris.aapp = numpy.copy(eriaa[ncore[0]:nocc[0],ncore[0]:nocc[0],:,:])
# eris.aaPP = numpy.copy(eriab[ncore[0]:nocc[0],ncore[0]:nocc[0],:,:])
# eris.AApp = numpy.copy(eriab[:,:,ncore[1]:nocc[1],ncore[1]:nocc[1]].transpose(2,3,0,1))
# eris.AAPP = numpy.copy(eribb[ncore[1]:nocc[1],ncore[1]:nocc[1],:,:])
# eris.appa = numpy.copy(eriaa[ncore[0]:nocc[0],:,:,ncore[0]:nocc[0]])
# eris.apPA = numpy.copy(eriab[ncore[0]:nocc[0],:,:,ncore[1]:nocc[1]])
# eris.APPA = numpy.copy(eribb[ncore[1]:nocc[1],:,:,ncore[1]:nocc[1]])
#
# eris.cvCV = numpy.copy(eriab[:ncore[0],ncore[0]:,:ncore[1],ncore[1]:])
# eris.Icvcv = eriaa[:ncore[0],ncore[0]:,:ncore[0],ncore[0]:] * 2\
# - eriaa[:ncore[0],:ncore[0],ncore[0]:,ncore[0]:].transpose(0,3,1,2) \
# - eriaa[:ncore[0],ncore[0]:,:ncore[0],ncore[0]:].transpose(0,3,2,1)
# eris.ICVCV = eribb[:ncore[1],ncore[1]:,:ncore[1],ncore[1]:] * 2\
# - eribb[:ncore[1],:ncore[1],ncore[1]:,ncore[1]:].transpose(0,3,1,2) \
# - eribb[:ncore[1],ncore[1]:,:ncore[1],ncore[1]:].transpose(0,3,2,1)
#
# eris.Iapcv = eriaa[ncore[0]:nocc[0],:,:ncore[0],ncore[0]:] * 2 \
# - eriaa[:,ncore[0]:,:ncore[0],ncore[0]:nocc[0]].transpose(3,0,2,1) \
# - eriaa[:,:ncore[0],ncore[0]:,ncore[0]:nocc[0]].transpose(3,0,1,2)
# eris.IAPCV = eribb[ncore[1]:nocc[1],:,:ncore[1],ncore[1]:] * 2 \
# - eribb[:,ncore[1]:,:ncore[1],ncore[1]:nocc[1]].transpose(3,0,2,1) \
# - eribb[:,:ncore[1],ncore[1]:,ncore[1]:nocc[1]].transpose(3,0,1,2)
# eris.apCV = numpy.copy(eriab[ncore[0]:nocc[0],:,:ncore[1],ncore[1]:])
# eris.APcv = numpy.copy(eriab[:ncore[0],ncore[0]:,ncore[1]:nocc[1],:].transpose(2,3,0,1))
# return eris
return umc_ao2mo._ERIS(self, mo_coeff)
def update_jk_in_ah(self, mo, r, casdm1s, eris):
ncas = self.ncas
ncore = self.ncore
nocc = (ncas + ncore[0], ncas + ncore[1])
ra, rb = r
vhf3ca = numpy.einsum('srqp,sr->qp', eris.Icvcv, ra[:ncore[0],ncore[0]:])
vhf3ca += numpy.einsum('qpsr,sr->qp', eris.cvCV, rb[:ncore[1],ncore[1]:]) * 2
vhf3cb = numpy.einsum('srqp,sr->qp', eris.ICVCV, rb[:ncore[1],ncore[1]:])
vhf3cb += numpy.einsum('srqp,sr->qp', eris.cvCV, ra[:ncore[0],ncore[0]:]) * 2
vhf3aa = numpy.einsum('kpsr,sr->kp', eris.Iapcv, ra[:ncore[0],ncore[0]:])
vhf3aa += numpy.einsum('kpsr,sr->kp', eris.apCV, rb[:ncore[1],ncore[1]:]) * 2
vhf3ab = numpy.einsum('kpsr,sr->kp', eris.IAPCV, rb[:ncore[1],ncore[1]:])
vhf3ab += numpy.einsum('kpsr,sr->kp', eris.APcv, ra[:ncore[0],ncore[0]:]) * 2
dm4 = (numpy.dot(casdm1s[0], ra[ncore[0]:nocc[0]]),
numpy.dot(casdm1s[1], rb[ncore[1]:nocc[1]]))
vhf4a = numpy.einsum('krqp,kr->qp', eris.Iapcv, dm4[0])
vhf4a += numpy.einsum('krqp,kr->qp', eris.APcv, dm4[1]) * 2
vhf4b = numpy.einsum('krqp,kr->qp', eris.IAPCV, dm4[1])
vhf4b += numpy.einsum('krqp,kr->qp', eris.apCV, dm4[0]) * 2
va = (numpy.dot(casdm1s[0], vhf3aa), numpy.dot(casdm1s[1], vhf3ab))
vc = (vhf3ca + vhf4a, vhf3cb + vhf4b)
return va, vc
def update_casdm(self, mo, u, fcivec, e_cas, eris):
ecore, h1cas, h2cas = self.approx_cas_integral(mo, u, eris)
ci1, g = self.solve_approx_ci(h1cas, h2cas, fcivec, ecore, e_cas)
casdm1, casdm2 = self.fcisolver.make_rdm12s(ci1, self.ncas, self.nelecas)
return casdm1, casdm2, g, ci1
def approx_cas_integral(self, mo, u, eris):
ncas = self.ncas
nelecas = self.nelecas
ncore = self.ncore
nocc = (ncas + ncore[0], ncas + ncore[1])
nmo = mo[0].shape[1]
rmat = u - numpy.eye(nmo)
mocas = (mo[0][:,ncore[0]:nocc[0]], mo[1][:,ncore[1]:nocc[1]])
hcore = self.get_hcore()
h1effa = reduce(numpy.dot, (rmat[0][:,:nocc[0]].T, mo[0].T,
hcore[0], mo[0][:,:nocc[0]]))
h1effb = reduce(numpy.dot, (rmat[1][:,:nocc[1]].T, mo[1].T,
hcore[1], mo[1][:,:nocc[1]]))
h1effa = h1effa + h1effa.T
h1effb = h1effb + h1effb.T
aapc = eris.aapp[:,:,:,:ncore[0]]
aaPC = eris.aaPP[:,:,:,:ncore[1]]
AApc = eris.AApp[:,:,:,:ncore[0]]
AAPC = eris.AAPP[:,:,:,:ncore[1]]
apca = eris.appa[:,:,:ncore[0],:]
APCA = eris.APPA[:,:,:ncore[1],:]
jka = numpy.einsum('iup->up', eris.jkcpp[:,:nocc[0]]) + eris.jC_pp[:nocc[0]]
v1a =(numpy.einsum('up,pv->uv', jka[ncore[0]:], rmat[0][:,ncore[0]:nocc[0]])
+ numpy.einsum('uvpi,pi->uv', aapc-apca.transpose(0,3,1,2), rmat[0][:,:ncore[0]])
+ numpy.einsum('uvpi,pi->uv', aaPC, rmat[1][:,:ncore[1]]))
jkb = numpy.einsum('iup->up', eris.jkcPP[:,:nocc[1]]) + eris.jc_PP[:nocc[1]]
v1b =(numpy.einsum('up,pv->uv', jkb[ncore[1]:], rmat[1][:,ncore[1]:nocc[1]])
+ numpy.einsum('uvpi,pi->uv', AApc, rmat[0][:,:ncore[0]])
+ numpy.einsum('uvpi,pi->uv', AAPC-APCA.transpose(0,3,1,2), rmat[1][:,:ncore[1]]))
h1casa =(h1effa[ncore[0]:,ncore[0]:] + (v1a + v1a.T)
+ reduce(numpy.dot, (mocas[0].T, hcore[0], mocas[0]))
+ eris.vhf_c[0][ncore[0]:nocc[0],ncore[0]:nocc[0]])
h1casb =(h1effb[ncore[1]:,ncore[1]:] + (v1b + v1b.T)
+ reduce(numpy.dot, (mocas[1].T, hcore[1], mocas[1]))
+ eris.vhf_c[1][ncore[1]:nocc[1],ncore[1]:nocc[1]])
h1cas = (h1casa, h1casb)
aaap = eris.aapp[:,:,ncore[0]:nocc[0],:]
aaAP = eris.aaPP[:,:,ncore[1]:nocc[1],:]
AAap = eris.AApp[:,:,ncore[1]:nocc[1],:]
AAAP = eris.AAPP[:,:,ncore[1]:nocc[1],:]
aaaa = numpy.einsum('tuvp,pw->tuvw', aaap, rmat[0][:,ncore[0]:nocc[0]])
aaaa = aaaa + aaaa.transpose(0,1,3,2)
aaaa = aaaa + aaaa.transpose(2,3,0,1)
aaaa += aaap[:,:,:,ncore[0]:nocc[0]]
AAAA = numpy.einsum('tuvp,pw->tuvw', AAAP, rmat[1][:,ncore[1]:nocc[1]])
AAAA = AAAA + AAAA.transpose(0,1,3,2)
AAAA = AAAA + AAAA.transpose(2,3,0,1)
AAAA += AAAP[:,:,:,ncore[1]:nocc[1]]
tmp = (numpy.einsum('vwtp,pu->tuvw', AAap, rmat[0][:,ncore[0]:nocc[0]]),
numpy.einsum('tuvp,pw->tuvw', aaAP, rmat[1][:,ncore[1]:nocc[1]]))
aaAA =(tmp[0] + tmp[0].transpose(1,0,2,3)
+ tmp[1] + tmp[1].transpose(0,1,3,2))
aaAA += aaAP[:,:,:,ncore[1]:nocc[1]]
# pure core response
ecore =(h1effa[:ncore[0]].trace() + h1effb[:ncore[1]].trace()
+ numpy.einsum('jp,pj->', jka[:ncore[0]], rmat[0][:,:ncore[0]])*2
+ numpy.einsum('jp,pj->', jkb[:ncore[1]], rmat[1][:,:ncore[1]])*2)
return ecore, h1cas, (aaaa, aaAA, AAAA)
def solve_approx_ci(self, h1, h2, ci0, ecore, e_cas):
''' Solve CI eigenvalue/response problem approximately
'''
ncas = self.ncas
nelecas = self.nelecas
ncore = self.ncore
nocc = (ncas + ncore[0], ncas + ncore[1])
if getattr(self.fcisolver, 'approx_kernel', None):
ci1 = self.fcisolver.approx_kernel(h1, h2, ncas, nelecas, ci0=ci0)[1]
return ci1, None
h2eff = self.fcisolver.absorb_h1e(h1, h2, ncas, nelecas, .5)
hc = self.fcisolver.contract_2e(h2eff, ci0, ncas, nelecas).ravel()
g = hc - (e_cas-ecore) * ci0.ravel()
if self.ci_response_space > 6:
logger.debug(self, 'CI step by full response')
# full response
e, ci1 = self.fcisolver.kernel(h1, h2, ncas, nelecas, ci0=ci0,
max_memory=self.max_memory)
else:
nd = min(max(self.ci_response_space, 2), ci0.size)
logger.debug(self, 'CI step by %dD subspace response', nd)
xs = [ci0.ravel()]
ax = [hc]
heff = numpy.empty((nd,nd))
seff = numpy.empty((nd,nd))
heff[0,0] = numpy.dot(xs[0], ax[0])
seff[0,0] = 1
for i in range(1, nd):
xs.append(ax[i-1] - xs[i-1] * e_cas)
ax.append(self.fcisolver.contract_2e(h2eff, xs[i], ncas,
nelecas).ravel())
for j in range(i+1):
heff[i,j] = heff[j,i] = numpy.dot(xs[i], ax[j])
seff[i,j] = seff[j,i] = numpy.dot(xs[i], xs[j])
e, v = pyscf.lib.safe_eigh(heff, seff)[:2]
ci1 = 0
for i in range(nd):
ci1 += xs[i] * v[i,0]
return ci1, g
def dump_chk(self, envs):
if not self.chkfile:
return self
if self.chk_ci:
civec = envs['fcivec']
else:
civec = None
ncore = self.ncore
ncas = self.ncas
nocca = ncore[0] + ncas
noccb = ncore[1] + ncas
if 'mo' in envs:
mo_coeff = envs['mo']
else:
mo_coeff = envs['mo']
mo_occ = numpy.zeros((2,envs['mo'][0].shape[1]))
mo_occ[0,:ncore[0]] = 1
mo_occ[1,:ncore[1]] = 1
if self.natorb:
occa, ucas = self._eig(-envs['casdm1'][0], ncore[0], nocca)
occb, ucas = self._eig(-envs['casdm1'][1], ncore[1], noccb)
mo_occ[0,ncore[0]:nocca] = -occa
mo_occ[1,ncore[1]:noccb] = -occb
else:
mo_occ[0,ncore[0]:nocca] = envs['casdm1'][0].diagonal()
mo_occ[1,ncore[1]:noccb] = envs['casdm1'][1].diagonal()
mo_energy = 'None'
chkfile.dump_mcscf(self, self.chkfile, 'mcscf', envs['e_tot'],
mo_coeff, ncore, ncas, mo_occ,
mo_energy, envs['e_cas'], civec, envs['casdm1'],
overwrite_mol=False)
return self
def rotate_mo(self, mo, u, log=None):
'''Rotate orbitals with the given unitary matrix'''
mo_a = numpy.dot(mo[0], u[0])
mo_b = numpy.dot(mo[1], u[1])
if log is not None and log.verbose >= logger.DEBUG:
ncore = self.ncore[0]
ncas = self.ncas
nocc = ncore + ncas
s = reduce(numpy.dot, (mo_a[:,ncore:nocc].T, self._scf.get_ovlp(),
self.mo_coeff[0][:,ncore:nocc]))
log.debug('Alpha active space overlap to initial guess, SVD = %s',
numpy.linalg.svd(s)[1])
log.debug('Alpha active space overlap to last step, SVD = %s',
numpy.linalg.svd(u[0][ncore:nocc,ncore:nocc])[1])
return mo_a, mo_b
def micro_cycle_scheduler(self, envs):
#log_norm_ddm = numpy.log(envs['norm_ddm'])
#return max(self.max_cycle_micro, int(self.max_cycle_micro-1-log_norm_ddm))
return self.max_cycle_micro
def max_stepsize_scheduler(self, envs):
if self._max_stepsize is None:
self._max_stepsize = self.max_stepsize
if envs['de'] > self.conv_tol: # Avoid total energy increasing
self._max_stepsize *= .5
logger.debug(self, 'set max_stepsize to %g', self._max_stepsize)
else:
self._max_stepsize = numpy.sqrt(self.max_stepsize*self.max_stepsize)
return self._max_stepsize
@property
def max_orb_stepsize(self): # pragma: no cover
return self.max_stepsize
@max_orb_stepsize.setter
def max_orb_stepsize(self, x): # pragma: no cover
sys.stderr.write('WARN: Attribute "max_orb_stepsize" was replaced by "max_stepsize"\n')
self.max_stepsize = x
CASSCF = UCASSCF
# to avoid calculating AO integrals
def _fake_h_for_fast_casci(casscf, mo, eris):
mc = copy.copy(casscf)
mc.mo_coeff = mo
# vhf for core density matrix
s = mc._scf.get_ovlp()
mo_inv = (numpy.dot(mo[0].T, s), numpy.dot(mo[1].T, s))
vjk =(numpy.einsum('ipq->pq', eris.jkcpp) + eris.jC_pp,
numpy.einsum('ipq->pq', eris.jkcPP) + eris.jc_PP)
vhf =(reduce(numpy.dot, (mo_inv[0].T, vjk[0], mo_inv[0])),
reduce(numpy.dot, (mo_inv[1].T, vjk[1], mo_inv[1])))
mc.get_veff = lambda *args: vhf
ncas = casscf.ncas
ncore = casscf.ncore
nocc = (ncas + ncore[0], ncas + ncore[1])
eri_cas = (eris.aapp[:,:,ncore[0]:nocc[0],ncore[0]:nocc[0]].copy(), \
eris.aaPP[:,:,ncore[1]:nocc[1],ncore[1]:nocc[1]].copy(),
eris.AAPP[:,:,ncore[1]:nocc[1],ncore[1]:nocc[1]].copy())
mc.get_h2eff = lambda *args: eri_cas
return mc
if __name__ == '__main__':
from pyscf import gto
from pyscf import scf
from pyscf.mcscf import addons
mol = gto.Mole()
mol.verbose = 0
mol.output = None#"out_h2o"
mol.atom = [
['H', ( 1.,-1. , 0. )],
['H', ( 0.,-1. ,-1. )],
['H', ( 1.,-0.5 ,-1. )],
['H', ( 0.,-0.5 ,-1. )],
['H', ( 0.,-0.5 ,-0. )],
['H', ( 0.,-0. ,-1. )],
['H', ( 1.,-0.5 , 0. )],
['H', ( 0., 1. , 1. )],
]
mol.basis = {'H': 'sto-3g'}
mol.charge = 1
mol.spin = 1
mol.build()
m = scf.UHF(mol)
ehf = m.scf()
mc = UCASSCF(m, 4, (2,1))
#mo = m.mo_coeff
mo = addons.sort_mo(mc, m.mo_coeff, [(3,4,5,6),(3,4,6,7)], 1)
emc = kernel(mc, mo, verbose=4)[1]
print(ehf, emc, emc-ehf)
print(emc - -2.9782774463926618)
mol.atom = [
['O', ( 0., 0. , 0. )],
['H', ( 0., -0.757, 0.587)],
['H', ( 0., 0.757 , 0.587)],]
mol.basis = {'H': 'cc-pvdz',
'O': 'cc-pvdz',}
mol.symmetry = 1
mol.charge = 1
mol.spin = 1
mol.build()
m = scf.UHF(mol)
ehf = m.scf()
mc = UCASSCF(m, 4, (2,1))
mc.verbose = 4
emc = mc.mc1step()[0]
print(ehf, emc, emc-ehf)
print(emc - -75.5644202701263, emc - -75.573930418500652,
emc - -75.574137883405612, emc - -75.648547447838951)
mc = UCASSCF(m, 4, (2,1))
mc.verbose = 4
mo = mc.sort_mo((3,4,6,7))
emc = mc.mc1step(mo)[0]
print(ehf, emc, emc-ehf)
print(emc - -75.5644202701263, emc - -75.573930418500652,
emc - -75.574137883405612, emc - -75.648547447838951)
|
gkc1000/pyscf
|
pyscf/mcscf/umc1step.py
|
Python
|
apache-2.0
| 38,605
|
[
"PySCF"
] |
55fd2be609245c8cf472f091ca9b20f276328d811f82ddc056f178d4769ba0f6
|
# vim:set et sts=4 sw=4:
#
# ibus-xkb - IBus XKB
#
# Copyright(c) 2012 Takao Fujiwara <takao.fujiwara1@gmail.com>
# Copyright(c) 2007-2010 Peng Huang <shawn.p.huang@gmail.com>
# Copyright(c) 2007-2012 Red Hat, Inc.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or(at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330,
# Boston, MA 02111-1307 USA
import gtk
import gtk.gdk as gdk
import gobject
class Handle(gtk.EventBox):
__gtype_name__ = "IBusHandle"
__gsignals__ = {
"move-begin" : (
gobject.SIGNAL_RUN_LAST,
gobject.TYPE_NONE,
()),
"move-end" : (
gobject.SIGNAL_RUN_LAST,
gobject.TYPE_NONE,
()),
}
def __init__ (self):
super(Handle, self).__init__()
self.set_visible_window(False)
self.set_size_request(10, -1)
self.set_events(
gdk.EXPOSURE_MASK | \
gdk.BUTTON_PRESS_MASK | \
gdk.BUTTON_RELEASE_MASK | \
gdk.BUTTON1_MOTION_MASK)
self.__move_begined = False
root = gdk.get_default_root_window()
def do_button_press_event(self, event):
if event.button == 1:
root = gdk.get_default_root_window()
try:
desktop = root.property_get("_NET_CURRENT_DESKTOP")[2][0]
self.__workarea = root.property_get("_NET_WORKAREA")[2][desktop * 4: (desktop + 1) * 4]
except:
self.__workarea = None
self.__move_begined = True
toplevel = self.get_toplevel()
x, y = toplevel.get_position()
self.__press_pos = event.x_root - x, event.y_root - y
self.window.set_cursor(gdk.Cursor(gdk.FLEUR))
self.emit("move-begin")
return True
return False
def do_button_release_event(self, event):
if event.button == 1:
self.__move_begined = False
del self.__press_pos
del self.__workarea
self.window.set_cursor(gdk.Cursor(gdk.LEFT_PTR))
self.emit("move-end")
return True
return False
def do_motion_notify_event(self, event):
if not self.__move_begined:
return
toplevel = self.get_toplevel()
x, y = toplevel.get_position()
x = int(event.x_root - self.__press_pos[0])
y = int(event.y_root - self.__press_pos[1])
if self.__workarea == None:
toplevel.move(x, y)
return
if x < self.__workarea[0] and x > self.__workarea[0] - 16:
x = self.__workarea[0]
if y < self.__workarea[1] and y > self.__workarea[1] - 16:
y = self.__workarea[1]
w, h = toplevel.get_size()
if x + w > self.__workarea[0] + self.__workarea[2] and \
x + w < self.__workarea[0] + self.__workarea[2] + 16:
x = self.__workarea[0] + self.__workarea[2] - w
if y + h > self.__workarea[1] + self.__workarea[3] and \
y + h < self.__workarea[1] + self.__workarea[3] + 16:
y = self.__workarea[1] + self.__workarea[3] - h
toplevel.move(x, y)
def do_expose_event(self, event):
self.style.paint_handle(
self.window,
gtk.STATE_NORMAL,
gtk.SHADOW_OUT,
event.area,
self,
"",
self.allocation.x, self.allocation.y,
10, self.allocation.height,
gtk.ORIENTATION_VERTICAL)
return True
|
fujiwarat/ibus-xkb
|
ui/gtk2/handle.py
|
Python
|
lgpl-2.1
| 4,191
|
[
"FLEUR"
] |
416d5f8a7bc4f812d9f64cc15f1ce9e6ac2b2b6ec82d458cd2ef3f4ec7cb7c8e
|
"""
This file is part of pyS5p
https://github.com/rmvanhees/pys5p.git
The class LV2io provides read access to S5p Tropomi S5P_OFFL_L2 products
Copyright (c) 2018-2021 SRON - Netherlands Institute for Space Research
All Rights Reserved
License: BSD-3-Clause
"""
from datetime import datetime, timedelta
from pathlib import Path
import h5py
from netCDF4 import Dataset
import numpy as np
from .s5p_xarray import data_to_xr, h5_to_xr
# - global parameters ------------------------------
# - local functions --------------------------------
# - class definition -------------------------------
class LV2io():
"""
This class should offer all the necessary functionality to read Tropomi
S5P_OFFL_L2 products
Attributes
----------
fid : h5py.File
filename : string
science_product : bool
ground_pixel : int
scanline : int
Methods
-------
close()
Close resources.
get_attr(attr_name, ds_name=None)
Obtain value of an HDF5 file attribute or dataset attribute.
get_orbit()
Returns reference orbit number
get_algorithm_version()
Returns version of the level-2 algorithm.
get_processor_version()
Returns version of the L12 processor used to generate this product.
get_product_version()
Returns version of the level-2 product
get_coverage_time()
Returns start and end of the measurement coverage time.
get_creation_time()
Returns creation date/time of the level-2 product.
get_ref_time()
Returns reference start time of measurements.
get_delta_time()
Returns offset from the reference start time of measurement.
get_geo_data(geo_dsets=None)
Returns data of selected datasets from the GEOLOCATIONS group.
get_geo_bounds(extent=None, data_sel=None)
Returns bounds of latitude/longitude as a mesh for plotting.
get_dataset(name, data_sel=None, fill_as_nan=True)
Read level-2 dataset from PRODUCT group.
get_data_as_s5pmsm(name, data_sel=None, fill_as_nan=True, mol_m2=False)
Read dataset from group PRODUCT/target_product group.
Notes
-----
The Python h5py module can read the operational netCDF4 products without
any problems, however, the SRON science products contain incompatible
attributes. Thus should be fixed when more up-to-date netCDF software is
used to generate the products. Currently, the Python netCDF4 module is
used to read the science products.
Examples
--------
"""
def __init__(self, lv2_product: str):
"""
Initialize access to an S5P_L2 product
Parameters
----------
lv2_product : string
full path to S5P Tropomi level 2 product
"""
if not Path(lv2_product).is_file():
raise FileNotFoundError(f'{lv2_product} does not exist')
# initialize class-attributes
self.filename = lv2_product
# open LV2 product as HDF5 file
if self.science_product:
self.fid = Dataset(lv2_product, "r", format="NETCDF4")
self.ground_pixel = self.fid['/instrument/ground_pixel'][:].max()
self.ground_pixel += 1
self.scanline = self.fid['/instrument/scanline'][:].max()
self.scanline += 1
# alternative set flag sparse
if self.fid['/instrument/scanline'].size % self.ground_pixel != 0:
raise ValueError('not all scanlines are complete')
else:
self.fid = h5py.File(lv2_product, "r")
self.ground_pixel = self.fid['/PRODUCT/ground_pixel'].size
self.scanline = self.fid['/PRODUCT/scanline'].size
def __repr__(self):
class_name = type(self).__name__
return f'{class_name}({self.filename!r})'
def __iter__(self):
for attr in sorted(self.__dict__):
if not attr.startswith("__"):
yield attr
def __enter__(self):
"""
method called to initiate the context manager
"""
return self
def __exit__(self, exc_type, exc_value, traceback):
"""
method called when exiting the context manager
"""
self.close()
return False # any exception is raised by the with statement.
def close(self):
"""
Close the product.
"""
if self.fid is not None:
self.fid.close()
# ----- Class properties --------------------
@property
def science_product(self) -> bool:
"""
Returns if the product is a science product
"""
science_inst = b'SRON Netherlands Institute for Space Research'
res = False
with h5py.File(self.filename) as fid:
if 'institution' in fid.attrs \
and fid.attrs['institution'] == science_inst:
res = True
return res
@property
def orbit(self) -> int:
"""
Returns reference orbit number
"""
if self.science_product:
return int(self.__nc_attr('orbit', 'l1b_file'))
return self.__h5_attr('orbit', None)[0]
@property
def algorithm_version(self) -> str:
"""
Returns version of the level 2 algorithm
"""
res = self.get_attr('algorithm_version')
return res if res is not None else self.get_attr('version')
@property
def processor_version(self) -> str:
"""
Returns version of the level 2 processor
"""
res = self.get_attr('processor_version')
return res if res is not None else self.get_attr('version')
@property
def product_version(self) -> str:
"""
Returns version of the level 2 product
"""
res = self.get_attr('product_version')
return res if res is not None else self.get_attr('version')
@property
def coverage_time(self) -> tuple:
"""
Returns start and end of the measurement coverage time
"""
return (self.get_attr('time_coverage_start'),
self.get_attr('time_coverage_end'))
@property
def creation_time(self) -> str:
"""
Returns creation date/time of the level 2 product
"""
return self.get_attr('date_created')
# ----- Attributes --------------------
def __h5_attr(self, attr_name, ds_name):
"""
read attributes from operational products using hdf5
"""
if ds_name is not None:
dset = self.fid[f'/PRODUCT/{ds_name}']
if attr_name not in dset.attrs.keys():
return None
attr = dset.attrs[attr_name]
else:
if attr_name not in self.fid.attrs:
return None
attr = self.fid.attrs[attr_name]
if isinstance(attr, bytes):
return attr.decode('ascii')
return attr
def __nc_attr(self, attr_name, ds_name):
"""
read attributes from science products using netCDF4
"""
if ds_name is not None:
for grp_name in ['target_product', 'side_product', 'instrument']:
if grp_name not in self.fid.groups:
continue
if ds_name not in self.fid[grp_name].variables:
continue
dset = self.fid[f'/{grp_name}/{ds_name}']
if attr_name in dset.ncattrs():
return dset.getncattr(attr_name)
return None
if attr_name not in self.fid.ncattrs():
return None
return self.fid.getncattr(attr_name)
def get_attr(self, attr_name, ds_name=None):
"""
Obtain value of an HDF5 file attribute or dataset attribute
Parameters
----------
attr_name : string
name of the attribute
ds_name : string (optional)
name of dataset, default is to read the product attributes
"""
if self.science_product:
return self.__nc_attr(attr_name, ds_name)
return self.__h5_attr(attr_name, ds_name)
# ----- Time information ---------------
@property
def ref_time(self) -> datetime:
"""
Returns reference start time of measurements
"""
if self.science_product:
return None
return (datetime(2010, 1, 1, 0, 0, 0)
+ timedelta(seconds=int(self.fid['/PRODUCT/time'][0])))
def get_time(self):
"""
Returns start time of measurement per scan-line
"""
if self.science_product:
buff = self.get_dataset('time')[::self.ground_pixel, :]
return np.array([datetime(*x) for x in buff])
buff = self.fid['/PRODUCT/delta_time'][0, :]
return np.array([self.ref_time + timedelta(seconds=x / 1e3)
for x in buff])
# ----- Geolocation --------------------
def __h5_geo_data(self, geo_dsets):
"""
read gelocation datasets from operational products using HDF5
"""
res = {}
if geo_dsets is None:
geo_dsets = 'latitude,longitude'
for key in geo_dsets.split(','):
for grp_name in ['/PRODUCT', '/PRODUCT/SUPPORT_DATA/GEOLOCATIONS']:
if key in self.fid[grp_name]:
res[key] = np.squeeze(
self.fid[f'{grp_name}/{key}'])
continue
return res
def __nc_geo_data(self, geo_dsets):
"""
read gelocation datasets from science products using netCDF4
"""
res = {}
if geo_dsets is None:
geo_dsets = 'latitude_center,longitude_center'
for key in geo_dsets.split(','):
if key in self.fid['/instrument'].variables.keys():
ds_name = f'/instrument/{key}'
res[key] = self.fid[ds_name][:].reshape(
self.scanline, self.ground_pixel)
return res
def get_geo_data(self, geo_dsets=None):
"""
Returns data of selected datasets from the GEOLOCATIONS group
Parameters
----------
geo_dset : string
Name(s) of datasets, comma separated
Default:
* operational: 'latitude,longitude'
* science: 'latitude_center,longitude_center'
Returns
-------
out : dictonary with arrays
arrays of selected datasets
"""
if self.science_product:
return self.__nc_geo_data(geo_dsets)
return self.__h5_geo_data(geo_dsets)
# ----- Footprints --------------------
def __h5_geo_bounds(self, extent, data_sel):
"""
read bounds of latitude/longitude from operational products using HDF5
"""
indx = None
if extent is not None:
if len(extent) != 4:
raise ValueError('parameter extent must have 4 elements')
lats = self.fid['/PRODUCT/latitude'][0, ...]
lons = self.fid['/PRODUCT/longitude'][0, ...]
indx = ((lons >= extent[0]) & (lons <= extent[1])
& (lats >= extent[2]) & (lats <= extent[3])).nonzero()
data_sel = np.s_[indx[0].min():indx[0].max(),
indx[1].min():indx[1].max()]
gid = self.fid['/PRODUCT/SUPPORT_DATA/GEOLOCATIONS']
if data_sel is None:
lat_bounds = gid['latitude_bounds'][0, ...]
lon_bounds = gid['longitude_bounds'][0, ...]
else:
data_sel0 = (0,) + data_sel + (slice(None),)
lat_bounds = gid['latitude_bounds'][data_sel0]
lon_bounds = gid['longitude_bounds'][data_sel0]
return (data_sel, lon_bounds, lat_bounds)
def __nc_geo_bounds(self, extent, data_sel):
"""
read bounds of latitude/longitude from science products using netCDF4
"""
indx = None
if extent is not None:
if len(extent) != 4:
raise ValueError('parameter extent must have 4 elements')
lats = self.fid['/instrument/latitude_center'][:].reshape(
self.scanline, self.ground_pixel)
lons = self.fid['/instrument/longitude_center'][:].reshape(
self.scanline, self.ground_pixel)
indx = ((lons >= extent[0]) & (lons <= extent[1])
& (lats >= extent[2]) & (lats <= extent[3])).nonzero()
data_sel = np.s_[indx[0].min():indx[0].max(),
indx[1].min():indx[1].max()]
gid = self.fid['/instrument']
lat_bounds = gid['latitude_corners'][:].data.reshape(
self.scanline, self.ground_pixel, 4)
lon_bounds = gid['longitude_corners'][:].data.reshape(
self.scanline, self.ground_pixel, 4)
if data_sel is not None:
lat_bounds = lat_bounds[data_sel + (slice(None),)]
lon_bounds = lon_bounds[data_sel + (slice(None),)]
return (data_sel, lon_bounds, lat_bounds)
def get_geo_bounds(self, extent=None, data_sel=None):
"""
Returns bounds of latitude/longitude as a mesh for plotting
Parameters
----------
extent : list
select data to cover a region with geolocation defined by:
lon_min, lon_max, lat_min, lat_max and return numpy slice
data_sel : numpy slice
a 3-dimensional numpy slice: time, scan_line, ground_pixel
Note 'data_sel' will be overwritten when 'extent' is defined
Returns
-------
data_sel : numpy slice
slice of data which covers geolocation defined by extent. Only
provided if extent is not None.
out : dictionary
with numpy arrays for latitude and longitude
"""
if self.science_product:
res = self.__nc_geo_bounds(extent, data_sel)
else:
res = self.__h5_geo_bounds(extent, data_sel)
data_sel, lon_bounds, lat_bounds = res
res = {}
_sz = lon_bounds.shape
res['longitude'] = np.empty((_sz[0]+1, _sz[1]+1), dtype=float)
res['longitude'][:-1, :-1] = lon_bounds[:, :, 0]
res['longitude'][-1, :-1] = lon_bounds[-1, :, 1]
res['longitude'][:-1, -1] = lon_bounds[:, -1, 1]
res['longitude'][-1, -1] = lon_bounds[-1, -1, 2]
res['latitude'] = np.empty((_sz[0]+1, _sz[1]+1), dtype=float)
res['latitude'][:-1, :-1] = lat_bounds[:, :, 0]
res['latitude'][-1, :-1] = lat_bounds[-1, :, 1]
res['latitude'][:-1, -1] = lat_bounds[:, -1, 1]
res['latitude'][-1, -1] = lat_bounds[-1, -1, 2]
if extent is None:
return res
return data_sel, res
# ----- Datasets (numpy) --------------------
def __h5_dataset(self, name, data_sel, fill_as_nan):
"""
read dataset from operational products using HDF5
"""
fillvalue = float.fromhex('0x1.ep+122')
if name not in self.fid['/PRODUCT']:
raise ValueError(f'dataset {name} for found')
dset = self.fid[f'/PRODUCT/{name}']
if data_sel is None:
if dset.dtype == np.float32:
res = dset.astype(float)[0, ...]
else:
res = dset[0, ...]
else:
if dset.dtype == np.float32:
res = dset.astype(float)[(0,) + data_sel]
else:
res = dset[(0,) + data_sel]
if fill_as_nan and dset.attrs['_FillValue'] == fillvalue:
res[(res == fillvalue)] = np.nan
return res
def __nc_dataset(self, name, data_sel, fill_as_nan):
"""
read dataset from science products using netCDF4
"""
if name in self.fid['/target_product'].variables.keys():
group = '/target_product'
elif name in self.fid['/instrument'].variables.keys():
group = '/instrument'
else:
raise ValueError(f'dataset {name} for found')
dset = self.fid[f'{group}/{name}']
if dset.size == self.scanline * self.ground_pixel:
res = dset[:].reshape(self.scanline, self.ground_pixel)
else:
res = dset[:]
if data_sel is not None:
res = res[data_sel]
if fill_as_nan:
return res.filled(np.nan)
return res.data
def get_dataset(self, name, data_sel=None, fill_as_nan=True):
"""
Read level 2 dataset from PRODUCT group
Parameters
----------
name : string
name of dataset with level 2 data
data_sel : numpy slice
a 3-dimensional numpy slice: time, scan_line, ground_pixel
fill_as_nan : boolean
Replace (float) FillValues with Nan's, when True
Returns
-------
out : array
"""
if self.science_product:
return self.__nc_dataset(name, data_sel, fill_as_nan)
return self.__h5_dataset(name, data_sel, fill_as_nan)
# ----- Dataset (xarray) --------------------
def __h5_data_as_xds(self, name, data_sel, mol_m2):
"""
Read dataset from group target_product using HDF5
Input: operational product
Return: xarray.Dataset
"""
if name not in self.fid['/PRODUCT']:
raise ValueError(f'dataset {name} for found')
dset = self.fid[f'/PRODUCT/{name}']
# ToDo handle parameter mol_m2
return h5_to_xr(dset, (0,) + data_sel).squeeze()
def __nc_data_as_xds(self, name, data_sel):
"""
Read dataset from group PRODUCT using netCDF4
Input: science product
Return: xarray.DataArray
"""
if name in self.fid['/target_product'].variables.keys():
group = '/target_product'
elif name in self.fid['/instrument'].variables.keys():
group = '/instrument'
else:
raise ValueError('dataset {name} for found')
return data_to_xr(self.get_dataset(name, data_sel),
dims=['scanline', 'ground_pixel'], name=name,
long_name=self.get_attr('long_name', name),
units=self.get_attr('units', name))
def get_data_as_xds(self, name, data_sel=None, mol_m2=False):
"""
Read dataset from group PRODUCT/target_product group
Parameters
----------
name : str
name of dataset with level 2 data
data_sel : numpy slice
a 3-dimensional numpy slice: time, scan_line, ground_pixel
mol_m2 : bool
Leaf units as mol per m^2 or convert units to molecules per cm^2
Returns
-------
out : xarray.DataArray
"""
if self.science_product:
return self.__nc_data_as_xds(name, data_sel)
return self.__h5_data_as_xds(name, data_sel, mol_m2)
|
rmvanhees/pys5p
|
src/pys5p/lv2_io.py
|
Python
|
bsd-3-clause
| 19,043
|
[
"NetCDF"
] |
1ca07f930d576e2455fd06a2ab07ff156d566afe548e197768363b6651887167
|
import collections
import copy
from ..core import Dict, Expr, Integer
from ..core.compatibility import as_int, is_sequence
from ..core.logic import fuzzy_and
from ..functions import sqrt
from ..logic import true
from ..utilities.iterables import uniq
from .matrices import MatrixBase, ShapeError, a2idx
class SparseMatrixBase(MatrixBase):
"""A sparse matrix base class."""
def __init__(self, *args):
from . import Matrix
if len(args) == 1 and isinstance(args[0], SparseMatrixBase):
self.rows = args[0].rows
self.cols = args[0].cols
self._smat = dict(args[0]._smat)
return
self._smat = {}
if len(args) == 3:
self.rows = as_int(args[0])
self.cols = as_int(args[1])
if isinstance(args[2], collections.abc.Callable):
op = args[2]
for i in range(self.rows):
for j in range(self.cols):
value = self._sympify(
op(self._sympify(i), self._sympify(j)))
if value:
self._smat[(i, j)] = value
elif isinstance(args[2], (dict, Dict)):
# manual copy, copy.deepcopy() doesn't work
for key in args[2]:
v = args[2][key]
if v:
self._smat[key] = self._sympify(v)
elif is_sequence(args[2]):
if len(args[2]) != self.rows*self.cols:
raise ValueError(
f'List length ({len(args[2])}) != rows*columns ({self.rows*self.cols})')
flat_list = args[2]
for i in range(self.rows):
for j in range(self.cols):
value = self._sympify(flat_list[i*self.cols + j])
if value:
self._smat[(i, j)] = value
else:
raise ValueError('Third argument must be a callable,'
' dictionary or sequence.')
else:
# handle full matrix forms with _handle_creation_inputs
r, c, _list = Matrix._handle_creation_inputs(*args)
self.rows = r
self.cols = c
for i in range(self.rows):
for j in range(self.cols):
value = _list[self.cols*i + j]
if value:
self._smat[(i, j)] = value
def __getitem__(self, key):
if isinstance(key, tuple):
i, j = key
try:
i, j = self.key2ij(key)
return self._smat.get((i, j), Integer(0))
except (TypeError, IndexError):
if any(isinstance(_, Expr) and not _.is_number for _ in (i, j)):
if ((j < 0) == true) or ((j >= self.shape[1]) == true) or \
((i < 0) == true) or ((i >= self.shape[0]) == true):
raise ValueError('index out of boundary')
from .expressions.matexpr import MatrixElement
return MatrixElement(self, i, j)
if isinstance(i, slice):
i = range(self.rows)[i]
elif is_sequence(i):
pass
else:
if i >= self.rows:
raise IndexError('Row index out of bounds')
i = [i]
if isinstance(j, slice):
j = range(self.cols)[j]
elif is_sequence(j):
pass
else:
if j >= self.cols:
raise IndexError('Col index out of bounds')
j = [j]
return self.extract(i, j)
# check for single arg, like M[:] or M[3]
if isinstance(key, slice):
lo, hi = key.indices(len(self))[:2]
L = []
for i in range(lo, hi):
m, n = divmod(i, self.cols)
L.append(self._smat.get((m, n), Integer(0)))
return L
i, j = divmod(a2idx(key, len(self)), self.cols)
return self._smat.get((i, j), Integer(0))
def __setitem__(self, key, value):
raise NotImplementedError
def copy(self):
return self._new(self.rows, self.cols, self._smat)
@property
def is_Identity(self):
if not self.is_square:
return False
if not all(self[i, i] == 1 for i in range(self.rows)):
return False
return len(self._smat) == self.rows
def tolist(self):
"""Convert this sparse matrix into a list of nested Python lists.
Examples
========
>>> a = SparseMatrix(((1, 2), (3, 4)))
>>> a.tolist()
[[1, 2], [3, 4]]
When there are no rows then it will not be possible to tell how
many columns were in the original matrix:
>>> SparseMatrix(ones(0, 3)).tolist()
[]
"""
if not self.rows:
return []
I, J = self.shape
return [[self[i, j] for j in range(J)] for i in range(I)]
def row_list(self):
"""Returns a row-sorted list of non-zero elements of the matrix.
Examples
========
>>> a = SparseMatrix(((1, 2), (3, 4)))
>>> a
Matrix([
[1, 2],
[3, 4]])
>>> a.RL
[(0, 0, 1), (0, 1, 2), (1, 0, 3), (1, 1, 4)]
See Also
========
diofant.matrices.sparse.MutableSparseMatrix.row_op
col_list
"""
return [tuple(k + (self[k],)) for k in
sorted(self._smat, key=lambda k: list(k))]
RL = property(row_list, None, None, 'Alternate faster representation')
def col_list(self):
"""Returns a column-sorted list of non-zero elements of the matrix.
Examples
========
>>> SparseMatrix(((1, 2), (3, 4)))
Matrix([
[1, 2],
[3, 4]])
>>> _.CL
[(0, 0, 1), (1, 0, 3), (0, 1, 2), (1, 1, 4)]
See Also
========
diofant.matrices.sparse.MutableSparseMatrix.col_op
row_list
"""
return [tuple(k + (self[k],)) for k in sorted(self._smat, key=lambda k: list(reversed(k)))]
CL = property(col_list, None, None, 'Alternate faster representation')
def _eval_trace(self):
"""Calculate the trace of a square matrix.
Examples
========
>>> eye(3).trace()
3
"""
trace = Integer(0)
for i in range(self.cols):
trace += self._smat.get((i, i), 0)
return trace
def _eval_transpose(self):
"""Returns the transposed SparseMatrix of this SparseMatrix.
Examples
========
>>> a = SparseMatrix(((1, 2), (3, 4)))
>>> a
Matrix([
[1, 2],
[3, 4]])
>>> a.T
Matrix([
[1, 3],
[2, 4]])
"""
tran = self.zeros(self.cols, self.rows)
for key, value in self._smat.items():
key = key[1], key[0] # reverse
tran._smat[key] = value
return tran
def _eval_conjugate(self):
"""Return the by-element conjugation.
Examples
========
>>> a = SparseMatrix(((1, 2 + I), (3, 4), (I, -I)))
>>> a
Matrix([
[1, 2 + I],
[3, 4],
[I, -I]])
>>> a.C
Matrix([
[ 1, 2 - I],
[ 3, 4],
[-I, I]])
See Also
========
transpose: Matrix transposition
H: Hermite conjugation
D: Dirac conjugation
"""
conj = self.copy()
for key, value in self._smat.items():
conj._smat[key] = value.conjugate()
return conj
def multiply(self, other):
"""Fast multiplication exploiting the sparsity of the matrix.
Examples
========
>>> A, B = SparseMatrix(ones(4, 3)), SparseMatrix(ones(3, 4))
>>> A.multiply(B) == 3*ones(4)
True
See Also
========
add
"""
A = self
B = other
# sort B's row_list into list of rows
Blist = [[] for i in range(B.rows)]
for i, j, v in B.row_list():
Blist[i].append((j, v))
Cdict = collections.defaultdict(int)
for k, j, Akj in A.row_list():
for n, Bjn in Blist[j]:
temp = Akj*Bjn
Cdict[k, n] += temp
rv = self.zeros(A.rows, B.cols)
rv._smat = {k: v for k, v in Cdict.items() if v}
return rv
def scalar_multiply(self, scalar):
"""Scalar element-wise multiplication."""
M = self.zeros(*self.shape)
if scalar:
for i in self._smat:
M._smat[i] = scalar*self._smat[i]
return M
def __mul__(self, other):
"""Multiply self and other, watching for non-matrix entities.
When multiplying be a non-sparse matrix, the result is no longer
sparse.
Examples
========
>>> I = SparseMatrix(eye(3))
>>> I*I == I
True
>>> Z = zeros(3)
>>> I*Z
Matrix([
[0, 0, 0],
[0, 0, 0],
[0, 0, 0]])
>>> I*2 == 2*I
True
"""
if isinstance(other, SparseMatrixBase):
return self.multiply(other)
if isinstance(other, MatrixBase):
return other._new(self*self._new(other))
return self.scalar_multiply(other)
def __rmul__(self, other):
"""Return product the same type as other (if a Matrix).
When multiplying be a non-sparse matrix, the result is no longer
sparse.
Examples
========
>>> A = Matrix(2, 2, range(1, 5))
>>> S = SparseMatrix(2, 2, range(2, 6))
>>> A*S == S*A
False
>>> (isinstance(A*S, SparseMatrix) is
... isinstance(S*A, SparseMatrix) is False)
True
"""
return self.scalar_multiply(other)
def __add__(self, other):
"""Add other to self, efficiently if possible.
When adding a non-sparse matrix, the result is no longer
sparse.
Examples
========
>>> A = SparseMatrix(eye(3)) + SparseMatrix(eye(3))
>>> B = SparseMatrix(eye(3)) + eye(3)
>>> A
Matrix([
[2, 0, 0],
[0, 2, 0],
[0, 0, 2]])
>>> A == B
True
>>> isinstance(A, SparseMatrix) and isinstance(B, SparseMatrix)
False
"""
if isinstance(other, SparseMatrixBase):
return self.add(other)
elif isinstance(other, MatrixBase):
return other._new(other + self)
else:
return NotImplemented
def __neg__(self):
"""Negate all elements of self.
Examples
========
>>> -SparseMatrix(eye(3))
Matrix([
[-1, 0, 0],
[ 0, -1, 0],
[ 0, 0, -1]])
"""
rv = self.copy()
for k, v in rv._smat.items():
rv._smat[k] = -v
return rv
def add(self, other):
"""Add two sparse matrices with dictionary representation.
Examples
========
>>> SparseMatrix(eye(3)).add(SparseMatrix(ones(3)))
Matrix([
[2, 1, 1],
[1, 2, 1],
[1, 1, 2]])
>>> SparseMatrix(eye(3)).add(-SparseMatrix(eye(3)))
Matrix([
[0, 0, 0],
[0, 0, 0],
[0, 0, 0]])
Only the non-zero elements are stored, so the resulting dictionary
that is used to represent the sparse matrix is empty:
>>> _._smat
{}
See Also
========
multiply
"""
if not isinstance(other, SparseMatrixBase):
raise ValueError(f'only use add with {self.__class__.__name__}, not {other.__class__.__name__}')
if self.shape != other.shape:
raise ShapeError()
M = self.copy()
for i, v in other._smat.items():
v = M[i] + v
if v:
M._smat[i] = v
else:
M._smat.pop(i, None)
return M
def extract(self, rowsList, colsList):
urow = list(uniq(rowsList))
ucol = list(uniq(colsList))
smat = {}
if len(urow)*len(ucol) < len(self._smat):
# there are fewer elements requested than there are elements in the matrix
for i, r in enumerate(urow):
for j, c in enumerate(ucol):
smat[i, j] = self._smat.get((r, c), 0)
else:
# most of the request will be zeros so check all of self's entries,
# keeping only the ones that are desired
for rk, ck in self._smat:
if rk in urow and ck in ucol:
smat[(urow.index(rk), ucol.index(ck))] = self._smat[(rk, ck)]
rv = self._new(len(urow), len(ucol), smat)
# rv is nominally correct but there might be rows/cols
# which require duplication
if len(rowsList) != len(urow):
for i, r in enumerate(rowsList):
i_previous = rowsList.index(r)
if i_previous != i:
rv = rv.row_insert(i, rv[i_previous, :])
if len(colsList) != len(ucol):
for i, c in enumerate(colsList):
i_previous = colsList.index(c)
if i_previous != i:
rv = rv.col_insert(i, rv[:, i_previous])
return rv
extract.__doc__ = MatrixBase.extract.__doc__
@property
def is_hermitian(self):
"""Checks if the matrix is Hermitian.
In a Hermitian matrix element i,j is the complex conjugate of
element j,i.
Examples
========
>>> a = SparseMatrix([[1, I], [-I, 1]])
>>> a
Matrix([
[ 1, I],
[-I, 1]])
>>> a.is_hermitian
True
>>> a[0, 0] = 2*I
>>> a.is_hermitian
False
>>> a[0, 0] = x
>>> a.is_hermitian
>>> a[0, 1] = a[1, 0]*I
>>> a.is_hermitian
False
"""
def cond():
d = self._smat
yield self.is_square
if len(d) <= self.rows:
yield fuzzy_and(
d[i, i].is_extended_real for i, j in d if i == j)
else:
yield fuzzy_and(
d[i, i].is_extended_real for i in range(self.rows) if (i, i) in d)
yield fuzzy_and(
((self[i, j] - self[j, i].conjugate()).is_zero
if (j, i) in d else False) for (i, j) in d)
return fuzzy_and(i for i in cond())
def is_symmetric(self, simplify=True):
"""Return True if self is symmetric.
Examples
========
>>> M = SparseMatrix(eye(3))
>>> M.is_symmetric()
True
>>> M[0, 2] = 1
>>> M.is_symmetric()
False
"""
if simplify:
return all((k[1], k[0]) in self._smat and
not (self[k] - self[(k[1], k[0])]).simplify()
for k in self._smat)
else:
return all((k[1], k[0]) in self._smat and
self[k] == self[(k[1], k[0])] for k in self._smat)
def has(self, *patterns):
"""Test whether any subexpression matches any of the patterns.
Examples
========
>>> A = SparseMatrix(((1, x), (0.2, 3)))
>>> A.has(x)
True
>>> A.has(y)
False
>>> A.has(Float)
True
"""
return any(self[key].has(*patterns) for key in self._smat)
def applyfunc(self, f):
"""Apply a function to each element of the matrix.
Examples
========
>>> m = SparseMatrix(2, 2, lambda i, j: i*2+j)
>>> m
Matrix([
[0, 1],
[2, 3]])
>>> m.applyfunc(lambda i: 2*i)
Matrix([
[0, 2],
[4, 6]])
"""
if not callable(f):
raise TypeError('`f` must be callable.')
out = self.copy()
for k, v in self._smat.items():
fv = f(v)
if fv:
out._smat[k] = fv
else:
out._smat.pop(k, None)
return out
def reshape(self, rows, cols):
"""Reshape matrix while retaining original size.
Examples
========
>>> S = SparseMatrix(4, 2, range(8))
>>> S.reshape(2, 4)
Matrix([
[0, 1, 2, 3],
[4, 5, 6, 7]])
"""
if len(self) != rows*cols:
raise ValueError(f'Invalid reshape parameters {rows:d} {cols:d}')
smat = {}
for k, v in self._smat.items():
i, j = k
n = i*self.cols + j
ii, jj = divmod(n, cols)
smat[(ii, jj)] = self._smat[(i, j)]
return self._new(rows, cols, smat)
def liupc(self):
"""Liu's algorithm, for pre-determination of the Elimination Tree of
the given matrix, used in row-based symbolic Cholesky factorization.
Examples
========
>>> S = SparseMatrix([[1, 0, 3, 2],
... [0, 0, 1, 0],
... [4, 0, 0, 5],
... [0, 6, 7, 0]])
>>> S.liupc()
([[0], [], [0], [1, 2]], [4, 3, 4, 4])
References
==========
Symbolic Sparse Cholesky Factorization using Elimination Trees,
Jeroen Van Grondelle (1999)
"""
# Algorithm 2.4, p 17 of reference
# get the indices of the elements that are non-zero on or below diag
R = [[] for r in range(self.rows)]
for r, c, _ in self.row_list():
if c <= r:
R[r].append(c)
inf = len(R) # nothing will be this large
parent = [inf]*self.rows
virtual = [inf]*self.rows
for r in range(self.rows):
for c in R[r][:-1]:
while virtual[c] < r:
t = virtual[c]
virtual[c] = r
c = t
if virtual[c] == inf:
parent[c] = virtual[c] = r
return R, parent
def row_structure_symbolic_cholesky(self):
"""Symbolic cholesky factorization, for pre-determination of the
non-zero structure of the Cholesky factororization.
Examples
========
>>> S = SparseMatrix([[1, 0, 3, 2],
... [0, 0, 1, 0],
... [4, 0, 0, 5],
... [0, 6, 7, 0]])
>>> S.row_structure_symbolic_cholesky()
[[0], [], [0], [1, 2]]
References
==========
Symbolic Sparse Cholesky Factorization using Elimination Trees,
Jeroen Van Grondelle (1999)
"""
R, parent = self.liupc()
inf = len(R) # this acts as infinity
Lrow = copy.deepcopy(R)
for k in range(self.rows):
for j in R[k]:
while j != inf and j != k:
Lrow[k].append(j)
j = parent[j]
Lrow[k] = sorted(set(Lrow[k]))
return Lrow
def _cholesky_sparse(self):
"""Algorithm for numeric Cholesky factorization of a sparse matrix."""
Crowstruc = self.row_structure_symbolic_cholesky()
C = self.zeros(self.rows)
for i in range(len(Crowstruc)):
for j in Crowstruc[i]:
if i != j:
C[i, j] = self[i, j]
summ = 0
for p1 in Crowstruc[i]: # pragma: no branch
if p1 < j:
for p2 in Crowstruc[j]: # pragma: no branch
if p2 < j:
if p1 == p2:
summ += C[i, p1]*C[j, p1]
else:
break
else:
break
C[i, j] -= summ
C[i, j] /= C[j, j]
else:
C[j, j] = self[j, j]
summ = 0
for k in Crowstruc[j]: # pragma: no branch
if k < j:
summ += C[j, k]**2
else:
break
C[j, j] -= summ
C[j, j] = sqrt(C[j, j])
return C
def _LDL_sparse(self):
"""Algorithm for numeric LDL factization, exploiting sparse structure.
"""
Lrowstruc = self.row_structure_symbolic_cholesky()
L = self.eye(self.rows)
D = self.zeros(self.rows, self.cols)
for i in range(len(Lrowstruc)):
for j in Lrowstruc[i]:
if i != j:
L[i, j] = self[i, j]
summ = 0
for p1 in Lrowstruc[i]: # pragma: no branch
if p1 < j:
for p2 in Lrowstruc[j]: # pragma: no branch
if p2 < j:
if p1 == p2:
summ += L[i, p1]*L[j, p1]*D[p1, p1]
else:
break
else:
break
L[i, j] -= summ
L[i, j] /= D[j, j]
else:
D[i, i] = self[i, i]
summ = 0
for k in Lrowstruc[i]: # pragma: no branch
if k < i:
summ += L[i, k]**2*D[k, k]
else:
break
D[i, i] -= summ
return L, D
def _lower_triangular_solve(self, rhs):
"""Fast algorithm for solving a lower-triangular system,
exploiting the sparsity of the given matrix.
"""
rows = [[] for i in range(self.rows)]
for i, j, v in self.row_list():
if i > j:
rows[i].append((j, v))
X = rhs.copy()
for i in range(self.rows):
for j, v in rows[i]:
X[i, 0] -= v*X[j, 0]
X[i, 0] /= self[i, i]
return self._new(X)
def _upper_triangular_solve(self, rhs):
"""Fast algorithm for solving an upper-triangular system,
exploiting the sparsity of the given matrix.
"""
rows = [[] for i in range(self.rows)]
for i, j, v in self.row_list():
if i < j:
rows[i].append((j, v))
X = rhs.copy()
for i in range(self.rows - 1, -1, -1):
rows[i].reverse()
for j, v in rows[i]:
X[i, 0] -= v*X[j, 0]
X[i, 0] /= self[i, i]
return self._new(X)
def _diagonal_solve(self, rhs):
"""Diagonal solve."""
return self._new(self.rows, 1, lambda i, j: rhs[i, 0] / self[i, i])
def _cholesky_solve(self, rhs):
# for speed reasons, this is not uncommented, but if you are
# having difficulties, try uncommenting to make sure that the
# input matrix is symmetric
# assert self.is_symmetric()
L = self._cholesky_sparse()
Y = L._lower_triangular_solve(rhs)
rv = L.T._upper_triangular_solve(Y)
return rv
def _LDL_solve(self, rhs):
# for speed reasons, this is not uncommented, but if you are
# having difficulties, try uncommenting to make sure that the
# input matrix is symmetric
# assert self.is_symmetric()
L, D = self._LDL_sparse()
Z = L._lower_triangular_solve(rhs)
Y = D._diagonal_solve(Z)
return L.T._upper_triangular_solve(Y)
def cholesky(self):
"""
Returns the Cholesky decomposition L of a matrix A
such that L * L.T = A
A must be a square, symmetric, positive-definite
and non-singular matrix
Examples
========
>>> A = SparseMatrix(((25, 15, -5), (15, 18, 0), (-5, 0, 11)))
>>> A.cholesky()
Matrix([
[ 5, 0, 0],
[ 3, 3, 0],
[-1, 1, 3]])
>>> A.cholesky() * A.cholesky().T == A
True
"""
from ..core import nan, oo
if not self.is_symmetric():
raise ValueError('Cholesky decomposition applies only to '
'symmetric matrices.')
M = self.as_mutable()._cholesky_sparse()
assert not M.has(nan, oo)
return self._new(M)
def LDLdecomposition(self):
"""
Returns the LDL Decomposition (matrices ``L`` and ``D``) of matrix
``A``, such that ``L * D * L.T == A``. ``A`` must be a square,
symmetric, positive-definite and non-singular.
This method eliminates the use of square root and ensures that all
the diagonal entries of L are 1.
Examples
========
>>> A = SparseMatrix(((25, 15, -5), (15, 18, 0), (-5, 0, 11)))
>>> L, D = A.LDLdecomposition()
>>> L
Matrix([
[ 1, 0, 0],
[ 3/5, 1, 0],
[-1/5, 1/3, 1]])
>>> D
Matrix([
[25, 0, 0],
[ 0, 9, 0],
[ 0, 0, 9]])
>>> L * D * L.T == A
True
"""
from ..core import nan, oo
if not self.is_symmetric():
raise ValueError('LDL decomposition applies only to '
'symmetric matrices.')
L, D = self.as_mutable()._LDL_sparse()
assert not L.has(nan, oo) and not D.has(nan, oo)
return self._new(L), self._new(D)
def solve_least_squares(self, rhs, method='LDL'):
"""Return the least-square fit to the data.
By default the cholesky_solve routine is used (method='CH'); other
methods of matrix inversion can be used.
Examples
========
>>> A = Matrix([1, 2, 3])
>>> B = Matrix([2, 3, 4])
>>> S = SparseMatrix(A.row_join(B))
>>> S
Matrix([
[1, 2],
[2, 3],
[3, 4]])
If each line of S represent coefficients of Ax + By
and x and y are [2, 3] then S*xy is:
>>> r = S*Matrix([2, 3])
>>> r
Matrix([
[ 8],
[13],
[18]])
But let's add 1 to the middle value and then solve for the
least-squares value of xy:
>>> xy = S.solve_least_squares(Matrix([8, 14, 18]))
>>> xy
Matrix([
[ 5/3],
[10/3]])
The error is given by S*xy - r:
>>> S*xy - r
Matrix([
[1/3],
[1/3],
[1/3]])
>>> _.norm().evalf(2)
0.58
If a different xy is used, the norm will be higher:
>>> xy += ones(2, 1)/10
>>> (S*xy - r).norm().evalf(2)
1.5
See Also
========
diofant.matrices.matrices.MatrixBase.inv
"""
t = self.T
return (t*self).inv(method=method)*t*rhs
def solve(self, rhs, method='LDL'):
"""Return solution to self*soln = rhs using given inversion method.
See Also
========
diofant.matrices.matrices.MatrixBase.inv
"""
if not self.is_square:
if self.rows < self.cols:
raise ValueError('Under-determined system.')
else:
raise ValueError('For over-determined system, M, having '
'more rows than columns, try M.solve_least_squares(rhs).')
else:
return self.inv(method=method)*rhs
def _eval_inverse(self, **kwargs):
"""Return the matrix inverse using Cholesky or LDL (default)
decomposition as selected with the ``method`` keyword: 'CH' or 'LDL',
respectively.
Examples
========
>>> A = SparseMatrix([[+2, -1, +0],
... [-1, +2, -1],
... [+0, +0, +2]])
>>> A.inv('CH')
Matrix([
[2/3, 1/3, 1/6],
[1/3, 2/3, 1/3],
[ 0, 0, 1/2]])
>>> A.inv(method='LDL') # use of 'method=' is optional
Matrix([
[2/3, 1/3, 1/6],
[1/3, 2/3, 1/3],
[ 0, 0, 1/2]])
>>> A * _
Matrix([
[1, 0, 0],
[0, 1, 0],
[0, 0, 1]])
"""
sym = self.is_symmetric()
M = self.as_mutable()
I = M.eye(M.rows)
if not sym:
t = M.T
r1 = M[0, :]
M = t*M
I = t*I
method = kwargs.get('method', 'LDL')
if method in 'LDL':
solve = M._LDL_solve
elif method == 'CH':
solve = M._cholesky_solve
else:
raise NotImplementedError(f'Method may be "CH" or "LDL", not {method}.')
rv = M.hstack(*[solve(I[:, i]) for i in range(I.cols)])
if not sym:
scale = (r1*rv[:, 0])[0, 0]
rv /= scale
return self._new(rv)
def __eq__(self, other):
try:
if self.shape != other.shape:
return False
if isinstance(other, SparseMatrixBase):
return self._smat == other._smat
else:
return self._smat == MutableSparseMatrix(other)._smat
except AttributeError:
return False
def as_mutable(self):
"""Returns a mutable version of this matrix.
Examples
========
>>> X = ImmutableMatrix([[1, 2], [3, 4]])
>>> Y = X.as_mutable()
>>> Y[1, 1] = 5 # Can set values in Y
>>> Y
Matrix([
[1, 2],
[3, 5]])
"""
return MutableSparseMatrix(self)
def as_immutable(self):
"""Returns an Immutable version of this Matrix."""
from .immutable import ImmutableSparseMatrix
return ImmutableSparseMatrix(self)
def nnz(self):
"""Returns the number of non-zero elements in Matrix."""
return len(self._smat)
@classmethod
def zeros(cls, r, c=None):
"""Return an r x c matrix of zeros, square if c is omitted."""
c = r if c is None else c
r = as_int(r)
c = as_int(c)
return cls(r, c, {})
@classmethod
def eye(cls, n):
"""Return an n x n identity matrix."""
n = as_int(n)
return cls(n, n, {(i, i): Integer(1) for i in range(n)})
class MutableSparseMatrix(SparseMatrixBase, MatrixBase):
"""
A sparse matrix (a matrix with a large number of zero elements).
Examples
========
>>> SparseMatrix(2, 2, range(4))
Matrix([
[0, 1],
[2, 3]])
>>> SparseMatrix(2, 2, {(1, 1): 2})
Matrix([
[0, 0],
[0, 2]])
See Also
========
diofant.matrices.dense.DenseMatrix
"""
@classmethod
def _new(cls, *args, **kwargs):
return cls(*args)
def as_mutable(self):
return self.copy()
def __setitem__(self, key, value):
"""Assign value to position designated by key.
Examples
========
>>> M = SparseMatrix(2, 2, {})
>>> M[1] = 1
>>> M
Matrix([
[0, 1],
[0, 0]])
>>> M[1, 1] = 2
>>> M
Matrix([
[0, 1],
[0, 2]])
>>> M = SparseMatrix(2, 2, {})
>>> M[:, 1] = [1, 1]
>>> M
Matrix([
[0, 1],
[0, 1]])
>>> M = SparseMatrix(2, 2, {})
>>> M[1, :] = [[1, 1]]
>>> M
Matrix([
[0, 0],
[1, 1]])
To replace row r you assign to position r*m where m
is the number of columns:
>>> M = SparseMatrix(4, 4, {})
>>> m = M.cols
>>> M[3*m] = ones(1, m)*2
>>> M
Matrix([
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[2, 2, 2, 2]])
And to replace column c you can assign to position c:
>>> M[2] = ones(m, 1)*4
>>> M
Matrix([
[0, 0, 4, 0],
[0, 0, 4, 0],
[0, 0, 4, 0],
[2, 2, 4, 2]])
"""
rv = self._setitem(key, value)
if rv is not None:
i, j, value = rv
if value:
self._smat[(i, j)] = value
elif (i, j) in self._smat:
del self._smat[(i, j)]
__hash__ = None
def __delitem__(self, key):
"""Delete portion of self defined by key.
Examples
========
>>> M = SparseMatrix([[0, 0], [0, 1]])
>>> M
Matrix([
[0, 0],
[0, 1]])
>>> del M[0, :]
>>> M
Matrix([[0, 1]])
>>> del M[:, 1]
>>> M
Matrix([[0]])
"""
i, j = self.key2ij(key)
newD = {}
if isinstance(i, int) and j == slice(None):
k = a2idx(i, self.rows)
for (i, j) in self._smat:
if i == k:
pass
elif i > k:
newD[i - 1, j] = self._smat[i, j]
else:
newD[i, j] = self._smat[i, j]
self._smat = newD
self.rows -= 1
elif i == slice(None) and isinstance(j, int):
k = a2idx(j, self.cols)
for (i, j) in self._smat:
if j == k:
pass
elif j > k:
newD[i, j - 1] = self._smat[i, j]
else:
newD[i, j] = self._smat[i, j]
self._smat = newD
self.cols -= 1
else:
raise NotImplementedError
def row_swap(self, i, j):
"""Swap, in place, columns i and j.
Examples
========
>>> S = SparseMatrix.eye(3)
>>> S[2, 1] = 2
>>> S.row_swap(1, 0)
>>> S
Matrix([
[0, 1, 0],
[1, 0, 0],
[0, 2, 1]])
"""
if i > j:
i, j = j, i
rows = self.row_list()
temp = []
for ii, jj, v in rows:
if ii == i:
self._smat.pop((ii, jj))
temp.append((jj, v))
elif ii == j:
self._smat.pop((ii, jj))
self._smat[i, jj] = v
elif ii > j:
break
for k, v in temp:
self._smat[j, k] = v
def col_swap(self, i, j):
"""Swap, in place, columns i and j.
Examples
========
>>> S = SparseMatrix.eye(3)
>>> S[2, 1] = 2
>>> S.col_swap(1, 0)
>>> S
Matrix([
[0, 1, 0],
[1, 0, 0],
[2, 0, 1]])
"""
if i > j:
i, j = j, i
rows = self.col_list()
temp = []
for ii, jj, v in rows:
if jj == i:
self._smat.pop((ii, jj))
temp.append((ii, v))
elif jj == j:
self._smat.pop((ii, jj))
self._smat[ii, i] = v
elif jj > j:
break
for k, v in temp:
self._smat[k, j] = v
def row_join(self, other):
"""Returns B appended after A (column-wise augmenting)::
[A B]
Examples
========
>>> A = SparseMatrix(((1, 0, 1), (0, 1, 0), (1, 1, 0)))
>>> A
Matrix([
[1, 0, 1],
[0, 1, 0],
[1, 1, 0]])
>>> B = SparseMatrix(((1, 0, 0), (0, 1, 0), (0, 0, 1)))
>>> B
Matrix([
[1, 0, 0],
[0, 1, 0],
[0, 0, 1]])
>>> C = A.row_join(B)
>>> C
Matrix([
[1, 0, 1, 1, 0, 0],
[0, 1, 0, 0, 1, 0],
[1, 1, 0, 0, 0, 1]])
>>> C == A.row_join(Matrix(B))
True
Joining at row ends is the same as appending columns at the end
of the matrix:
>>> C == A.col_insert(A.cols, B)
True
"""
A, B = self, other
if not self:
return type(self)(other)
if not A.rows == B.rows:
raise ShapeError()
A = A.copy()
if not isinstance(B, SparseMatrixBase):
k = 0
b = B._mat
for i in range(B.rows):
for j in range(B.cols):
v = b[k]
if v:
A._smat[(i, j + A.cols)] = v
k += 1
else:
for (i, j), v in B._smat.items():
A._smat[(i, j + A.cols)] = v
A.cols += B.cols
return A
def col_join(self, other):
"""Returns B augmented beneath A (row-wise joining)::
[A]
[B]
Examples
========
>>> A = SparseMatrix(ones(3))
>>> A
Matrix([
[1, 1, 1],
[1, 1, 1],
[1, 1, 1]])
>>> B = SparseMatrix.eye(3)
>>> B
Matrix([
[1, 0, 0],
[0, 1, 0],
[0, 0, 1]])
>>> C = A.col_join(B)
>>> C
Matrix([
[1, 1, 1],
[1, 1, 1],
[1, 1, 1],
[1, 0, 0],
[0, 1, 0],
[0, 0, 1]])
>>> C == A.col_join(Matrix(B))
True
Joining along columns is the same as appending rows at the end
of the matrix:
>>> C == A.row_insert(A.rows, Matrix(B))
True
"""
A, B = self, other
if not self:
return type(self)(other)
if not A.cols == B.cols:
raise ShapeError()
A = A.copy()
if not isinstance(B, SparseMatrixBase):
k = 0
b = B._mat
for i in range(B.rows):
for j in range(B.cols):
v = b[k]
if v:
A._smat[(i + A.rows, j)] = v
k += 1
else:
for (i, j), v in B._smat.items():
A._smat[i + A.rows, j] = v
A.rows += B.rows
return A
def copyin_list(self, key, value):
from . import Matrix
if not is_sequence(value):
raise TypeError('`value` must be of type list or tuple.')
self.copyin_matrix(key, Matrix(value))
def copyin_matrix(self, key, value):
# include this here because it's not part of BaseMatrix
rlo, rhi, clo, chi = self.key2bounds(key)
shape = value.shape
dr, dc = rhi - rlo, chi - clo
if shape != (dr, dc):
raise ShapeError(
"The Matrix `value` doesn't have the same dimensions "
'as the in sub-Matrix given by `key`.')
if not isinstance(value, SparseMatrixBase):
for i in range(value.rows):
for j in range(value.cols):
self[i + rlo, j + clo] = value[i, j]
else:
if (rhi - rlo)*(chi - clo) < len(self):
for i in range(rlo, rhi):
for j in range(clo, chi):
self._smat.pop((i, j), None)
else:
for i, j, v in self.row_list():
assert rlo <= i < rhi and clo <= j < chi
self._smat.pop((i, j), None)
for k, v in value._smat.items():
i, j = k
self[i + rlo, j + clo] = value[i, j]
def zip_row_op(self, i, k, f):
"""In-place operation on row ``i`` using two-arg functor whose args are
interpreted as ``(self[i, j], self[k, j])``.
Examples
========
>>> M = SparseMatrix.eye(3)*2
>>> M[0, 1] = -1
>>> M.zip_row_op(1, 0, lambda v, u: v + 2*u)
>>> M
Matrix([
[2, -1, 0],
[4, 0, 0],
[0, 0, 2]])
See Also
========
row_op
col_op
"""
self.row_op(i, lambda v, j: f(v, self[k, j]))
def row_op(self, i, f):
"""In-place operation on row ``i`` using two-arg functor whose args are
interpreted as ``(self[i, j], j)``.
Examples
========
>>> M = SparseMatrix.eye(3)*2
>>> M[0, 1] = -1
>>> M.row_op(1, lambda v, j: v + 2*M[0, j])
>>> M
Matrix([
[2, -1, 0],
[4, 0, 0],
[0, 0, 2]])
See Also
========
zip_row_op
col_op
"""
for j in range(self.cols):
v = self._smat.get((i, j), Integer(0))
fv = f(v, j)
if fv:
self._smat[(i, j)] = fv
elif v:
self._smat.pop((i, j))
def col_op(self, j, f):
"""In-place operation on col j using two-arg functor whose args are
interpreted as (self[i, j], i) for i in range(self.rows).
Examples
========
>>> M = SparseMatrix.eye(3)*2
>>> M[1, 0] = -1
>>> M.col_op(1, lambda v, i: v + 2*M[i, 0])
>>> M
Matrix([
[ 2, 4, 0],
[-1, 0, 0],
[ 0, 0, 2]])
"""
for i in range(self.rows):
v = self._smat.get((i, j), Integer(0))
fv = f(v, i)
if fv:
self._smat[(i, j)] = fv
elif v:
self._smat.pop((i, j))
def fill(self, value):
"""Fill self with the given value.
Notes
=====
Unless many values are going to be deleted (i.e. set to zero)
this will create a matrix that is slower than a dense matrix in
operations.
Examples
========
>>> M = SparseMatrix.zeros(3)
>>> M
Matrix([
[0, 0, 0],
[0, 0, 0],
[0, 0, 0]])
>>> M.fill(1)
>>> M
Matrix([
[1, 1, 1],
[1, 1, 1],
[1, 1, 1]])
"""
if not value:
self._smat = {}
else:
v = self._sympify(value)
self._smat = {(i, j): v
for i in range(self.rows) for j in range(self.cols)}
SparseMatrix = MutableSparseMatrix
|
skirpichev/omg
|
diofant/matrices/sparse.py
|
Python
|
bsd-3-clause
| 42,378
|
[
"DIRAC"
] |
08cc63327c66c680f726dbe38d19c6056fbb3e097f7798b316845ad8449ed0eb
|
#!/usr/bin/env python
"""
galaxy-fuse.py will mount Galaxy datasets for direct read access using FUSE.
To do this you will need your Galaxy API key, found by logging into Galaxy and
selecting the menu option User -> API Keys. You can mount your Galaxy datasets
using a command like
python galaxy-fuse.py <api-key> &
This puts the galaxy-fuse process into the background. Galaxy Datasets will then
appear as read-only files, organised by History, under the directory galaxy_files.
galaxy-fuse was written by Dr David Powell and began life at
https://github.com/drpowell/galaxy-fuse .
Modified December 2016 by Madison Flannery.
"""
from errno import ENOENT
from stat import S_IFDIR, S_IFREG, S_IFLNK
from sys import argv, exit
import re
import time
import os
import argparse
from fuse import FUSE, FuseOSError, Operations, LoggingMixIn, fuse_get_context
from bioblend import galaxy
# number of seconds to cache history/dataset lookups
CACHE_TIME = 30
# Split a path into hash of components
def path_type(path):
parts = filter(lambda x: len(x)>0, path.split('/'))
if path=='/':
return ('root',dict())
elif path=='/histories':
return ('histories',dict())
elif len(parts)==2 and parts[0]=='histories':
return ('datasets',dict(h_name=unesc_filename(parts[1])))
elif len(parts)==3 and parts[0]=='histories':
# Path: histories/<history_name>/<data_name>
# OR histories/<history_name>/<collection_name>
return ('historydataorcoll',dict(h_name=unesc_filename(parts[1]), ds_name=unesc_filename(parts[2])))
elif len(parts)==4 and parts[0]=='histories':
# Path: histories/<history_name>/<coll_name>/<dataset_name>
return ('collectiondataset',dict(h_name=unesc_filename(parts[1]), c_name=unesc_filename(parts[2]),
ds_name=unesc_filename(parts[3])))
print "Unknown : %s"%path
return ('',0)
# Escape/unescape slashes in filenames
def esc_filename(fname):
def esc(m):
c=m.group(0)
if c=='%':
return '%%'
elif c=='/':
return '%-'
return re.sub(r'%|/', esc, fname)
# Escape/unescape slashes in filenames
def unesc_filename(fname):
def unesc(m):
str=m.group(0)
if str=='%%':
return '%'
elif str=='%-':
return '/'
return re.sub(r'%(.)', unesc, fname)
def parse_name_with_id(fname):
m = re.match(r"^(?P<name>.*)-(?P<id>[0-9a-f]{16})", fname)
if m is not None:
return (m.group('name'), m.group('id'))
else:
return (fname,'')
class Context(LoggingMixIn, Operations):
'Prototype FUSE to galaxy histories'
def __init__(self, api_key):
self.gi = galaxy.GalaxyInstance(url='http://127.0.0.1:80/galaxy/', key=api_key)
self.filtered_datasets_cache = {}
self.full_datasets_cache = {}
self.histories_cache = {'time':None, 'contents':None}
def getattr(self, path, fh=None):
(typ,kw) = path_type(path)
now = time.time()
if typ=='root' or typ=='histories':
# Simple directory
st = dict(st_mode=(S_IFDIR | 0555), st_nlink=2)
st['st_ctime'] = st['st_mtime'] = st['st_atime'] = now
elif typ=='datasets':
# Simple directory
st = dict(st_mode=(S_IFDIR | 0555), st_nlink=2)
st['st_ctime'] = st['st_mtime'] = st['st_atime'] = now
elif typ=='historydataorcoll':
# Dataset or collection
d = self._dataset(kw)
if d['history_content_type'] == 'dataset_collection':
# A collection, will be a simple directory.
st = dict(st_mode=(S_IFDIR | 0555), st_nlink=2)
st['st_ctime'] = st['st_mtime'] = st['st_atime'] = now
else:
# A file, will be a symlink to a galaxy dataset.
t = time.mktime(time.strptime(d['update_time'],'%Y-%m-%dT%H:%M:%S.%f'))
fname = esc_filename(d.get('file_path', d['file_name']))
st = dict(st_mode=(S_IFLNK | 0444), st_nlink=1,
st_size=len(fname), st_ctime=t, st_mtime=t,
st_atime=t)
elif typ=='collectiondataset':
# A file within a collection, will be a symlink to a galaxy dataset.
d = self._dataset(kw, display=False)
t = time.mktime(time.strptime(d['update_time'],'%Y-%m-%dT%H:%M:%S.%f'))
fname = esc_filename(d.get('file_path', d['file_name']))
st = dict(st_mode=(S_IFLNK | 0444), st_nlink=1,
st_size=len(fname), st_ctime=t, st_mtime=t,
st_atime=t)
else:
raise FuseOSError(ENOENT)
return st
# Return a symlink for the given dataset
def readlink(self, path):
(typ,kw) = path_type(path)
if typ=='historydataorcoll':
# Dataset inside history.
d = self._dataset(kw)
# We have already checked that one of these keys is present
return d.get('file_path', d['file_name'])
elif typ=='collectiondataset':
# Dataset inside collection.
d = self._dataset(kw, display=False)
# We have already checked that one of these keys is present
return d.get('file_path', d['file_name'])
raise FuseOSError(ENOENT)
def read(self, path, size, offset, fh):
raise RuntimeError('unexpected path: %r' % path)
# Lookup all histories in galaxy; cache
def _histories(self):
cache = self.histories_cache
now = time.time()
if cache['contents'] is None or now - cache['time'] > CACHE_TIME:
cache['time'] = now
cache['contents'] = self.gi.histories.get_histories()
return cache['contents']
# Find a specific history by name
def _history(self,h_name):
(fixed_name, hist_id) = parse_name_with_id(h_name)
h = filter(lambda x: x['name']==fixed_name, self._histories())
if len(h)==0:
raise FuseOSError(ENOENT)
if len(h)>1:
h = filter(lambda x: x['id']==hist_id, self._histories())
if len(h)==0:
raise FuseOSError(ENOENT)
if len(h)>1:
print "Too many histories with identical names and IDs"
return h[0]
return h[0]
# Lookup visible datasets in the specified history; cache
# This will not return deleted or hidden datasets.
def _filtered_datasets(self, h):
id = h['id']
cache = self.filtered_datasets_cache
now = time.time()
if id not in cache or now - cache[id]['time'] > CACHE_TIME:
cache[id] = {'time':now,
'contents':self.gi.histories.show_history(id,contents=True,details='all', deleted=False, visible=True)}
return cache[id]['contents']
# Lookup all datasets in the specified history; cache
# This will return hidden datasets. Will not return deleted datasets.
def _all_datasets(self, h):
id = h['id']
cache = self.full_datasets_cache
now = time.time()
if id not in cache or now - cache[id]['time'] > CACHE_TIME:
cache[id] = {'time':now,
'contents':self.gi.histories.show_history(id,contents=True,details='all', deleted=False)}
return cache[id]['contents']
# Find a specific dataset - the 'kw' parameter is from path_type() above
# Will also handle dataset collections.
def _dataset(self, kw, display=True):
h = self._history(kw['h_name'])
if display:
ds = self._filtered_datasets(h)
else:
ds = self._all_datasets(h)
(d_name, d_id) = parse_name_with_id(kw['ds_name'])
d = filter(lambda x: x['name']==d_name, ds)
if len(d)==0:
raise FuseOSError(ENOENT)
if len(d)>1:
d = filter(lambda x: x['name']==d_name and x['id'] == d_id, ds)
if len(d)==0:
raise FuseOSError(ENOENT)
if len(d)>1:
print "Too many datasets with that name and ID"
return d[0]
# This is a collection. Deal with it upstream.
if d[0]['history_content_type'] == 'dataset_collection':
return d[0]
# Some versions of the Galaxy API use file_path and some file_name
if 'file_path' not in d[0] and 'file_name' not in d[0]:
print "Unable to find file of dataset. Have you set : expose_dataset_path = True"
raise FuseOSError(ENOENT)
return d[0]
# read directory contents
def readdir(self, path, fh):
(typ,kw) = path_type(path)
if typ=='root':
return ['.', '..', 'histories']
elif typ=='histories':
hl = self._histories()
# Count duplicates
hist_count = {}
for h in hl:
try:
hist_count[h['name']] += 1
except:
hist_count[h['name']] = 1
# Build up results manually
results = ['.', '..']
for h in hl:
if h['name'] in hist_count and hist_count[h['name']] > 1:
results.append(esc_filename(h['name'] + '-' + h['id']))
else:
results.append(esc_filename(h['name']))
return results
elif typ=='datasets':
h = self._history(kw['h_name'])
ds = self._filtered_datasets(h)
# Count duplicates
d_count = {}
for d in ds:
try:
d_count[d['name']] += 1
except:
d_count[d['name']] = 1
results = ['.', '..']
for d in ds:
if d['name'] in d_count and d_count[d['name']] > 1:
results.append(esc_filename(d['name'] + '-' + d['id']))
else:
results.append(esc_filename(d['name']))
return results
elif typ=='historydataorcoll':
# This is a dataset collection
# Get the datasets in the collection
ds = [x['object'] for x in self._dataset(kw)['elements']]
# Get all datasets - we need this for checking and handling duplicates
# Handles the situation in which duplicates in history and
# one (or more) of the duplicates are in collection.
h = self._history(kw['h_name'])
all_ds = self._all_datasets(h)
# Count duplicates
d_count = {}
for d in all_ds:
try:
d_count[d['name']] += 1
except:
d_count[d['name']] = 1
results = ['.', '..']
for d in ds:
if d['name'] in d_count and d_count[d['name']] > 1:
results.append(esc_filename(d['name'] + '-' + d['id']))
else:
results.append(esc_filename(d['name']))
return results
# Disable unused operations:
access = None
flush = None
getxattr = None
listxattr = None
open = None
opendir = None
release = None
releasedir = None
statfs = None
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Mount Galaxy Datasets for direct read access using FUSE.")
parser.add_argument("apikey",
help="Galaxy API key for the account to read")
parser.add_argument("-m", "--mountpoint", default="galaxy_files",
help="Directory under which to mount the Galaxy Datasets.")
args = parser.parse_args()
# Create the directory if it does not exist
if not os.path.exists(args.mountpoint):
os.makedirs(args.mountpoint)
fuse = FUSE(Context(args.apikey),
args.mountpoint,
foreground=True,
ro=True)
|
gvlproject/gvl_commandline_utilities
|
roles/gvl.commandline-utilities/files/galaxy-fuse.py
|
Python
|
mit
| 12,065
|
[
"Galaxy"
] |
5dd8ed2cb5b15b96fb089d2b3794d4b276e5924f5b8eca51df496dca6922757d
|
"""Bayesian Gaussian Mixture Models and
Dirichlet Process Gaussian Mixture Models"""
from __future__ import print_function
# Author: Alexandre Passos (alexandre.tp@gmail.com)
# Bertrand Thirion <bertrand.thirion@inria.fr>
#
# Based on mixture.py by:
# Ron Weiss <ronweiss@gmail.com>
# Fabian Pedregosa <fabian.pedregosa@inria.fr>
#
import numpy as np
from scipy.special import digamma as _digamma, gammaln as _gammaln
from scipy import linalg
from scipy.spatial.distance import cdist
from ..externals.six.moves import xrange
from ..utils import check_random_state, check_array
from ..utils.extmath import logsumexp, pinvh, squared_norm
from ..utils.validation import check_is_fitted
from .. import cluster
from .gmm import GMM
def digamma(x):
return _digamma(x + np.finfo(np.float32).eps)
def gammaln(x):
return _gammaln(x + np.finfo(np.float32).eps)
def log_normalize(v, axis=0):
"""Normalized probabilities from unnormalized log-probabilites"""
v = np.rollaxis(v, axis)
v = v.copy()
v -= v.max(axis=0)
out = logsumexp(v)
v = np.exp(v - out)
v += np.finfo(np.float32).eps
v /= np.sum(v, axis=0)
return np.swapaxes(v, 0, axis)
def wishart_log_det(a, b, detB, n_features):
"""Expected value of the log of the determinant of a Wishart
The expected value of the logarithm of the determinant of a
wishart-distributed random variable with the specified parameters."""
l = np.sum(digamma(0.5 * (a - np.arange(-1, n_features - 1))))
l += n_features * np.log(2)
return l + detB
def wishart_logz(v, s, dets, n_features):
"The logarithm of the normalization constant for the wishart distribution"
z = 0.
z += 0.5 * v * n_features * np.log(2)
z += (0.25 * (n_features * (n_features - 1)) * np.log(np.pi))
z += 0.5 * v * np.log(dets)
z += np.sum(gammaln(0.5 * (v - np.arange(n_features) + 1)))
return z
def _bound_wishart(a, B, detB):
"""Returns a function of the dof, scale matrix and its determinant
used as an upper bound in variational approximation of the evidence"""
n_features = B.shape[0]
logprior = wishart_logz(a, B, detB, n_features)
logprior -= wishart_logz(n_features,
np.identity(n_features),
1, n_features)
logprior += 0.5 * (a - 1) * wishart_log_det(a, B, detB, n_features)
logprior += 0.5 * a * np.trace(B)
return logprior
##############################################################################
# Variational bound on the log likelihood of each class
##############################################################################
def _sym_quad_form(x, mu, A):
"""helper function to calculate symmetric quadratic form x.T * A * x"""
q = (cdist(x, mu[np.newaxis], "mahalanobis", VI=A) ** 2).reshape(-1)
return q
def _bound_state_log_lik(X, initial_bound, precs, means, covariance_type):
"""Update the bound with likelihood terms, for standard covariance types"""
n_components, n_features = means.shape
n_samples = X.shape[0]
bound = np.empty((n_samples, n_components))
bound[:] = initial_bound
if covariance_type in ['diag', 'spherical']:
for k in range(n_components):
d = X - means[k]
bound[:, k] -= 0.5 * np.sum(d * d * precs[k], axis=1)
elif covariance_type == 'tied':
for k in range(n_components):
bound[:, k] -= 0.5 * _sym_quad_form(X, means[k], precs)
elif covariance_type == 'full':
for k in range(n_components):
bound[:, k] -= 0.5 * _sym_quad_form(X, means[k], precs[k])
return bound
class DPGMM(GMM):
"""Variational Inference for the Infinite Gaussian Mixture Model.
DPGMM stands for Dirichlet Process Gaussian Mixture Model, and it
is an infinite mixture model with the Dirichlet Process as a prior
distribution on the number of clusters. In practice the
approximate inference algorithm uses a truncated distribution with
a fixed maximum number of components, but almost always the number
of components actually used depends on the data.
Stick-breaking Representation of a Gaussian mixture model
probability distribution. This class allows for easy and efficient
inference of an approximate posterior distribution over the
parameters of a Gaussian mixture model with a variable number of
components (smaller than the truncation parameter n_components).
Initialization is with normally-distributed means and identity
covariance, for proper convergence.
Read more in the :ref:`User Guide <dpgmm>`.
Parameters
----------
n_components: int, default 1
Number of mixture components.
covariance_type: string, default 'diag'
String describing the type of covariance parameters to
use. Must be one of 'spherical', 'tied', 'diag', 'full'.
alpha: float, default 1
Real number representing the concentration parameter of
the dirichlet process. Intuitively, the Dirichlet Process
is as likely to start a new cluster for a point as it is
to add that point to a cluster with alpha elements. A
higher alpha means more clusters, as the expected number
of clusters is ``alpha*log(N)``.
tol : float, default 1e-3
Convergence threshold.
n_iter : int, default 10
Maximum number of iterations to perform before convergence.
params : string, default 'wmc'
Controls which parameters are updated in the training
process. Can contain any combination of 'w' for weights,
'm' for means, and 'c' for covars.
init_params : string, default 'wmc'
Controls which parameters are updated in the initialization
process. Can contain any combination of 'w' for weights,
'm' for means, and 'c' for covars. Defaults to 'wmc'.
verbose : int, default 0
Controls output verbosity.
Attributes
----------
covariance_type : string
String describing the type of covariance parameters used by
the DP-GMM. Must be one of 'spherical', 'tied', 'diag', 'full'.
n_components : int
Number of mixture components.
weights_ : array, shape (`n_components`,)
Mixing weights for each mixture component.
means_ : array, shape (`n_components`, `n_features`)
Mean parameters for each mixture component.
precs_ : array
Precision (inverse covariance) parameters for each mixture
component. The shape depends on `covariance_type`::
(`n_components`, 'n_features') if 'spherical',
(`n_features`, `n_features`) if 'tied',
(`n_components`, `n_features`) if 'diag',
(`n_components`, `n_features`, `n_features`) if 'full'
converged_ : bool
True when convergence was reached in fit(), False otherwise.
See Also
--------
GMM : Finite Gaussian mixture model fit with EM
VBGMM : Finite Gaussian mixture model fit with a variational
algorithm, better for situations where there might be too little
data to get a good estimate of the covariance matrix.
"""
def __init__(self, n_components=1, covariance_type='diag', alpha=1.0,
random_state=None, tol=1e-3, verbose=0, min_covar=None,
n_iter=10, params='wmc', init_params='wmc'):
self.alpha = alpha
super(DPGMM, self).__init__(n_components, covariance_type,
random_state=random_state,
tol=tol, min_covar=min_covar,
n_iter=n_iter, params=params,
init_params=init_params, verbose=verbose)
def _get_precisions(self):
"""Return precisions as a full matrix."""
if self.covariance_type == 'full':
return self.precs_
elif self.covariance_type in ['diag', 'spherical']:
return [np.diag(cov) for cov in self.precs_]
elif self.covariance_type == 'tied':
return [self.precs_] * self.n_components
def _get_covars(self):
return [pinvh(c) for c in self._get_precisions()]
def _set_covars(self, covars):
raise NotImplementedError("""The variational algorithm does
not support setting the covariance parameters.""")
def score_samples(self, X):
"""Return the likelihood of the data under the model.
Compute the bound on log probability of X under the model
and return the posterior distribution (responsibilities) of
each mixture component for each element of X.
This is done by computing the parameters for the mean-field of
z for each observation.
Parameters
----------
X : array_like, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
logprob : array_like, shape (n_samples,)
Log probabilities of each data point in X
responsibilities: array_like, shape (n_samples, n_components)
Posterior probabilities of each mixture component for each
observation
"""
check_is_fitted(self, 'gamma_')
X = check_array(X)
if X.ndim == 1:
X = X[:, np.newaxis]
z = np.zeros((X.shape[0], self.n_components))
sd = digamma(self.gamma_.T[1] + self.gamma_.T[2])
dgamma1 = digamma(self.gamma_.T[1]) - sd
dgamma2 = np.zeros(self.n_components)
dgamma2[0] = digamma(self.gamma_[0, 2]) - digamma(self.gamma_[0, 1] +
self.gamma_[0, 2])
for j in range(1, self.n_components):
dgamma2[j] = dgamma2[j - 1] + digamma(self.gamma_[j - 1, 2])
dgamma2[j] -= sd[j - 1]
dgamma = dgamma1 + dgamma2
# Free memory and developers cognitive load:
del dgamma1, dgamma2, sd
if self.covariance_type not in ['full', 'tied', 'diag', 'spherical']:
raise NotImplementedError("This ctype is not implemented: %s"
% self.covariance_type)
p = _bound_state_log_lik(X, self._initial_bound + self.bound_prec_,
self.precs_, self.means_,
self.covariance_type)
z = p + dgamma
z = log_normalize(z, axis=-1)
bound = np.sum(z * p, axis=-1)
return bound, z
def _update_concentration(self, z):
"""Update the concentration parameters for each cluster"""
sz = np.sum(z, axis=0)
self.gamma_.T[1] = 1. + sz
self.gamma_.T[2].fill(0)
for i in range(self.n_components - 2, -1, -1):
self.gamma_[i, 2] = self.gamma_[i + 1, 2] + sz[i]
self.gamma_.T[2] += self.alpha
def _update_means(self, X, z):
"""Update the variational distributions for the means"""
n_features = X.shape[1]
for k in range(self.n_components):
if self.covariance_type in ['spherical', 'diag']:
num = np.sum(z.T[k].reshape((-1, 1)) * X, axis=0)
num *= self.precs_[k]
den = 1. + self.precs_[k] * np.sum(z.T[k])
self.means_[k] = num / den
elif self.covariance_type in ['tied', 'full']:
if self.covariance_type == 'tied':
cov = self.precs_
else:
cov = self.precs_[k]
den = np.identity(n_features) + cov * np.sum(z.T[k])
num = np.sum(z.T[k].reshape((-1, 1)) * X, axis=0)
num = np.dot(cov, num)
self.means_[k] = linalg.lstsq(den, num)[0]
def _update_precisions(self, X, z):
"""Update the variational distributions for the precisions"""
n_features = X.shape[1]
if self.covariance_type == 'spherical':
self.dof_ = 0.5 * n_features * np.sum(z, axis=0)
for k in range(self.n_components):
# could be more memory efficient ?
sq_diff = np.sum((X - self.means_[k]) ** 2, axis=1)
self.scale_[k] = 1.
self.scale_[k] += 0.5 * np.sum(z.T[k] * (sq_diff + n_features))
self.bound_prec_[k] = (
0.5 * n_features * (
digamma(self.dof_[k]) - np.log(self.scale_[k])))
self.precs_ = np.tile(self.dof_ / self.scale_, [n_features, 1]).T
elif self.covariance_type == 'diag':
for k in range(self.n_components):
self.dof_[k].fill(1. + 0.5 * np.sum(z.T[k], axis=0))
sq_diff = (X - self.means_[k]) ** 2 # see comment above
self.scale_[k] = np.ones(n_features) + 0.5 * np.dot(
z.T[k], (sq_diff + 1))
self.precs_[k] = self.dof_[k] / self.scale_[k]
self.bound_prec_[k] = 0.5 * np.sum(digamma(self.dof_[k])
- np.log(self.scale_[k]))
self.bound_prec_[k] -= 0.5 * np.sum(self.precs_[k])
elif self.covariance_type == 'tied':
self.dof_ = 2 + X.shape[0] + n_features
self.scale_ = (X.shape[0] + 1) * np.identity(n_features)
for k in range(self.n_components):
diff = X - self.means_[k]
self.scale_ += np.dot(diff.T, z[:, k:k + 1] * diff)
self.scale_ = pinvh(self.scale_)
self.precs_ = self.dof_ * self.scale_
self.det_scale_ = linalg.det(self.scale_)
self.bound_prec_ = 0.5 * wishart_log_det(
self.dof_, self.scale_, self.det_scale_, n_features)
self.bound_prec_ -= 0.5 * self.dof_ * np.trace(self.scale_)
elif self.covariance_type == 'full':
for k in range(self.n_components):
sum_resp = np.sum(z.T[k])
self.dof_[k] = 2 + sum_resp + n_features
self.scale_[k] = (sum_resp + 1) * np.identity(n_features)
diff = X - self.means_[k]
self.scale_[k] += np.dot(diff.T, z[:, k:k + 1] * diff)
self.scale_[k] = pinvh(self.scale_[k])
self.precs_[k] = self.dof_[k] * self.scale_[k]
self.det_scale_[k] = linalg.det(self.scale_[k])
self.bound_prec_[k] = 0.5 * wishart_log_det(
self.dof_[k], self.scale_[k], self.det_scale_[k],
n_features)
self.bound_prec_[k] -= 0.5 * self.dof_[k] * np.trace(
self.scale_[k])
def _monitor(self, X, z, n, end=False):
"""Monitor the lower bound during iteration
Debug method to help see exactly when it is failing to converge as
expected.
Note: this is very expensive and should not be used by default."""
if self.verbose > 0:
print("Bound after updating %8s: %f" % (n, self.lower_bound(X, z)))
if end:
print("Cluster proportions:", self.gamma_.T[1])
print("covariance_type:", self.covariance_type)
def _do_mstep(self, X, z, params):
"""Maximize the variational lower bound
Update each of the parameters to maximize the lower bound."""
self._monitor(X, z, "z")
self._update_concentration(z)
self._monitor(X, z, "gamma")
if 'm' in params:
self._update_means(X, z)
self._monitor(X, z, "mu")
if 'c' in params:
self._update_precisions(X, z)
self._monitor(X, z, "a and b", end=True)
def _initialize_gamma(self):
"Initializes the concentration parameters"
self.gamma_ = self.alpha * np.ones((self.n_components, 3))
def _bound_concentration(self):
"""The variational lower bound for the concentration parameter."""
logprior = gammaln(self.alpha) * self.n_components
logprior += np.sum((self.alpha - 1) * (
digamma(self.gamma_.T[2]) - digamma(self.gamma_.T[1] +
self.gamma_.T[2])))
logprior += np.sum(- gammaln(self.gamma_.T[1] + self.gamma_.T[2]))
logprior += np.sum(gammaln(self.gamma_.T[1]) +
gammaln(self.gamma_.T[2]))
logprior -= np.sum((self.gamma_.T[1] - 1) * (
digamma(self.gamma_.T[1]) - digamma(self.gamma_.T[1] +
self.gamma_.T[2])))
logprior -= np.sum((self.gamma_.T[2] - 1) * (
digamma(self.gamma_.T[2]) - digamma(self.gamma_.T[1] +
self.gamma_.T[2])))
return logprior
def _bound_means(self):
"The variational lower bound for the mean parameters"
logprior = 0.
logprior -= 0.5 * squared_norm(self.means_)
logprior -= 0.5 * self.means_.shape[1] * self.n_components
return logprior
def _bound_precisions(self):
"""Returns the bound term related to precisions"""
logprior = 0.
if self.covariance_type == 'spherical':
logprior += np.sum(gammaln(self.dof_))
logprior -= np.sum(
(self.dof_ - 1) * digamma(np.maximum(0.5, self.dof_)))
logprior += np.sum(- np.log(self.scale_) + self.dof_
- self.precs_[:, 0])
elif self.covariance_type == 'diag':
logprior += np.sum(gammaln(self.dof_))
logprior -= np.sum(
(self.dof_ - 1) * digamma(np.maximum(0.5, self.dof_)))
logprior += np.sum(- np.log(self.scale_) + self.dof_ - self.precs_)
elif self.covariance_type == 'tied':
logprior += _bound_wishart(self.dof_, self.scale_, self.det_scale_)
elif self.covariance_type == 'full':
for k in range(self.n_components):
logprior += _bound_wishart(self.dof_[k],
self.scale_[k],
self.det_scale_[k])
return logprior
def _bound_proportions(self, z):
"""Returns the bound term related to proportions"""
dg12 = digamma(self.gamma_.T[1] + self.gamma_.T[2])
dg1 = digamma(self.gamma_.T[1]) - dg12
dg2 = digamma(self.gamma_.T[2]) - dg12
cz = np.cumsum(z[:, ::-1], axis=-1)[:, -2::-1]
logprior = np.sum(cz * dg2[:-1]) + np.sum(z * dg1)
del cz # Save memory
z_non_zeros = z[z > np.finfo(np.float32).eps]
logprior -= np.sum(z_non_zeros * np.log(z_non_zeros))
return logprior
def _logprior(self, z):
logprior = self._bound_concentration()
logprior += self._bound_means()
logprior += self._bound_precisions()
logprior += self._bound_proportions(z)
return logprior
def lower_bound(self, X, z):
"""returns a lower bound on model evidence based on X and membership"""
check_is_fitted(self, 'means_')
if self.covariance_type not in ['full', 'tied', 'diag', 'spherical']:
raise NotImplementedError("This ctype is not implemented: %s"
% self.covariance_type)
X = np.asarray(X)
if X.ndim == 1:
X = X[:, np.newaxis]
c = np.sum(z * _bound_state_log_lik(X, self._initial_bound +
self.bound_prec_, self.precs_,
self.means_, self.covariance_type))
return c + self._logprior(z)
def _set_weights(self):
for i in xrange(self.n_components):
self.weights_[i] = self.gamma_[i, 1] / (self.gamma_[i, 1]
+ self.gamma_[i, 2])
self.weights_ /= np.sum(self.weights_)
def _fit(self, X, y=None):
"""Estimate model parameters with the variational
algorithm.
For a full derivation and description of the algorithm see
doc/modules/dp-derivation.rst
or
http://scikit-learn.org/stable/modules/dp-derivation.html
A initialization step is performed before entering the em
algorithm. If you want to avoid this step, set the keyword
argument init_params to the empty string '' when creating
the object. Likewise, if you would like just to do an
initialization, set n_iter=0.
Parameters
----------
X : array_like, shape (n, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
responsibilities : array, shape (n_samples, n_components)
Posterior probabilities of each mixture component for each
observation.
"""
self.random_state_ = check_random_state(self.random_state)
# initialization step
X = check_array(X)
if X.ndim == 1:
X = X[:, np.newaxis]
n_samples, n_features = X.shape
z = np.ones((n_samples, self.n_components))
z /= self.n_components
self._initial_bound = - 0.5 * n_features * np.log(2 * np.pi)
self._initial_bound -= np.log(2 * np.pi * np.e)
if (self.init_params != '') or not hasattr(self, 'gamma_'):
self._initialize_gamma()
if 'm' in self.init_params or not hasattr(self, 'means_'):
self.means_ = cluster.KMeans(
n_clusters=self.n_components,
random_state=self.random_state_).fit(X).cluster_centers_[::-1]
if 'w' in self.init_params or not hasattr(self, 'weights_'):
self.weights_ = np.tile(1.0 / self.n_components, self.n_components)
if 'c' in self.init_params or not hasattr(self, 'precs_'):
if self.covariance_type == 'spherical':
self.dof_ = np.ones(self.n_components)
self.scale_ = np.ones(self.n_components)
self.precs_ = np.ones((self.n_components, n_features))
self.bound_prec_ = 0.5 * n_features * (
digamma(self.dof_) - np.log(self.scale_))
elif self.covariance_type == 'diag':
self.dof_ = 1 + 0.5 * n_features
self.dof_ *= np.ones((self.n_components, n_features))
self.scale_ = np.ones((self.n_components, n_features))
self.precs_ = np.ones((self.n_components, n_features))
self.bound_prec_ = 0.5 * (np.sum(digamma(self.dof_) -
np.log(self.scale_), 1))
self.bound_prec_ -= 0.5 * np.sum(self.precs_, 1)
elif self.covariance_type == 'tied':
self.dof_ = 1.
self.scale_ = np.identity(n_features)
self.precs_ = np.identity(n_features)
self.det_scale_ = 1.
self.bound_prec_ = 0.5 * wishart_log_det(
self.dof_, self.scale_, self.det_scale_, n_features)
self.bound_prec_ -= 0.5 * self.dof_ * np.trace(self.scale_)
elif self.covariance_type == 'full':
self.dof_ = (1 + self.n_components + n_samples)
self.dof_ *= np.ones(self.n_components)
self.scale_ = [2 * np.identity(n_features)
for _ in range(self.n_components)]
self.precs_ = [np.identity(n_features)
for _ in range(self.n_components)]
self.det_scale_ = np.ones(self.n_components)
self.bound_prec_ = np.zeros(self.n_components)
for k in range(self.n_components):
self.bound_prec_[k] = wishart_log_det(
self.dof_[k], self.scale_[k], self.det_scale_[k],
n_features)
self.bound_prec_[k] -= (self.dof_[k] *
np.trace(self.scale_[k]))
self.bound_prec_ *= 0.5
# EM algorithms
current_log_likelihood = None
# reset self.converged_ to False
self.converged_ = False
for i in range(self.n_iter):
prev_log_likelihood = current_log_likelihood
# Expectation step
curr_logprob, z = self.score_samples(X)
current_log_likelihood = (
curr_logprob.mean() + self._logprior(z) / n_samples)
# Check for convergence.
if prev_log_likelihood is not None:
change = abs(current_log_likelihood - prev_log_likelihood)
if change < self.tol:
self.converged_ = True
break
# Maximization step
self._do_mstep(X, z, self.params)
if self.n_iter == 0:
# Need to make sure that there is a z value to output
# Output zeros because it was just a quick initialization
z = np.zeros((X.shape[0], self.n_components))
self._set_weights()
return z
class VBGMM(DPGMM):
"""Variational Inference for the Gaussian Mixture Model
Variational inference for a Gaussian mixture model probability
distribution. This class allows for easy and efficient inference
of an approximate posterior distribution over the parameters of a
Gaussian mixture model with a fixed number of components.
Initialization is with normally-distributed means and identity
covariance, for proper convergence.
Read more in the :ref:`User Guide <vbgmm>`.
Parameters
----------
n_components: int, default 1
Number of mixture components.
covariance_type: string, default 'diag'
String describing the type of covariance parameters to
use. Must be one of 'spherical', 'tied', 'diag', 'full'.
alpha: float, default 1
Real number representing the concentration parameter of
the dirichlet distribution. Intuitively, the higher the
value of alpha the more likely the variational mixture of
Gaussians model will use all components it can.
tol : float, default 1e-3
Convergence threshold.
n_iter : int, default 10
Maximum number of iterations to perform before convergence.
params : string, default 'wmc'
Controls which parameters are updated in the training
process. Can contain any combination of 'w' for weights,
'm' for means, and 'c' for covars.
init_params : string, default 'wmc'
Controls which parameters are updated in the initialization
process. Can contain any combination of 'w' for weights,
'm' for means, and 'c' for covars. Defaults to 'wmc'.
verbose : int, default 0
Controls output verbosity.
Attributes
----------
covariance_type : string
String describing the type of covariance parameters used by
the DP-GMM. Must be one of 'spherical', 'tied', 'diag', 'full'.
n_features : int
Dimensionality of the Gaussians.
n_components : int (read-only)
Number of mixture components.
weights_ : array, shape (`n_components`,)
Mixing weights for each mixture component.
means_ : array, shape (`n_components`, `n_features`)
Mean parameters for each mixture component.
precs_ : array
Precision (inverse covariance) parameters for each mixture
component. The shape depends on `covariance_type`::
(`n_components`, 'n_features') if 'spherical',
(`n_features`, `n_features`) if 'tied',
(`n_components`, `n_features`) if 'diag',
(`n_components`, `n_features`, `n_features`) if 'full'
converged_ : bool
True when convergence was reached in fit(), False
otherwise.
See Also
--------
GMM : Finite Gaussian mixture model fit with EM
DPGMM : Infinite Gaussian mixture model, using the dirichlet
process, fit with a variational algorithm
"""
def __init__(self, n_components=1, covariance_type='diag', alpha=1.0,
random_state=None, tol=1e-3, verbose=0,
min_covar=None, n_iter=10, params='wmc', init_params='wmc'):
super(VBGMM, self).__init__(
n_components, covariance_type, random_state=random_state,
tol=tol, verbose=verbose, min_covar=min_covar,
n_iter=n_iter, params=params, init_params=init_params)
self.alpha = float(alpha) / n_components
def score_samples(self, X):
"""Return the likelihood of the data under the model.
Compute the bound on log probability of X under the model
and return the posterior distribution (responsibilities) of
each mixture component for each element of X.
This is done by computing the parameters for the mean-field of
z for each observation.
Parameters
----------
X : array_like, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
logprob : array_like, shape (n_samples,)
Log probabilities of each data point in X
responsibilities: array_like, shape (n_samples, n_components)
Posterior probabilities of each mixture component for each
observation
"""
check_is_fitted(self, 'gamma_')
X = check_array(X)
if X.ndim == 1:
X = X[:, np.newaxis]
dg = digamma(self.gamma_) - digamma(np.sum(self.gamma_))
if self.covariance_type not in ['full', 'tied', 'diag', 'spherical']:
raise NotImplementedError("This ctype is not implemented: %s"
% self.covariance_type)
p = _bound_state_log_lik(X, self._initial_bound + self.bound_prec_,
self.precs_, self.means_,
self.covariance_type)
z = p + dg
z = log_normalize(z, axis=-1)
bound = np.sum(z * p, axis=-1)
return bound, z
def _update_concentration(self, z):
for i in range(self.n_components):
self.gamma_[i] = self.alpha + np.sum(z.T[i])
def _initialize_gamma(self):
self.gamma_ = self.alpha * np.ones(self.n_components)
def _bound_proportions(self, z):
logprior = 0.
dg = digamma(self.gamma_)
dg -= digamma(np.sum(self.gamma_))
logprior += np.sum(dg.reshape((-1, 1)) * z.T)
z_non_zeros = z[z > np.finfo(np.float32).eps]
logprior -= np.sum(z_non_zeros * np.log(z_non_zeros))
return logprior
def _bound_concentration(self):
logprior = 0.
logprior = gammaln(np.sum(self.gamma_)) - gammaln(self.n_components
* self.alpha)
logprior -= np.sum(gammaln(self.gamma_) - gammaln(self.alpha))
sg = digamma(np.sum(self.gamma_))
logprior += np.sum((self.gamma_ - self.alpha)
* (digamma(self.gamma_) - sg))
return logprior
def _monitor(self, X, z, n, end=False):
"""Monitor the lower bound during iteration
Debug method to help see exactly when it is failing to converge as
expected.
Note: this is very expensive and should not be used by default."""
if self.verbose > 0:
print("Bound after updating %8s: %f" % (n, self.lower_bound(X, z)))
if end:
print("Cluster proportions:", self.gamma_)
print("covariance_type:", self.covariance_type)
def _set_weights(self):
self.weights_[:] = self.gamma_
self.weights_ /= np.sum(self.weights_)
|
DSLituiev/scikit-learn
|
sklearn/mixture/dpgmm.py
|
Python
|
bsd-3-clause
| 32,070
|
[
"Gaussian"
] |
48753ca6dabfda6881f2a2c161eb31d5f419d7676c09d1c55efca0f2a5fba825
|
# class generated by DeVIDE::createDeVIDEModuleFromVTKObject
from module_kits.vtk_kit.mixins import SimpleVTKClassModuleBase
import vtk
class vtkRendererSource(SimpleVTKClassModuleBase):
def __init__(self, module_manager):
SimpleVTKClassModuleBase.__init__(
self, module_manager,
vtk.vtkRendererSource(), 'Processing.',
(), ('vtkImageData',),
replaceDoc=True,
inputFunctions=None, outputFunctions=None)
|
chrisidefix/devide
|
modules/vtk_basic/vtkRendererSource.py
|
Python
|
bsd-3-clause
| 476
|
[
"VTK"
] |
33a35a70e80bdff79256d88095758b0cb8d78f1e5a7989f8e42dbbf329cad0c5
|
# coding: utf-8
# Copyright: (c) 2019, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import os.path
import pathlib
import yaml
from jinja2 import Environment, FileSystemLoader
from ansible.module_utils.six import string_types
from ansible.module_utils._text import to_bytes
# Pylint doesn't understand Python3 namespace modules.
from ..change_detection import update_file_if_different # pylint: disable=relative-beyond-top-level
from ..commands import Command # pylint: disable=relative-beyond-top-level
from ..jinja2.filters import documented_type, rst_ify # pylint: disable=relative-beyond-top-level
DEFAULT_TEMPLATE_FILE = 'collections_galaxy_meta.rst.j2'
DEFAULT_TEMPLATE_DIR = pathlib.Path(__file__).parents[4] / 'docs/templates'
def normalize_options(options):
"""Normalize the options to make for easy templating"""
for opt in options:
if isinstance(opt['description'], string_types):
opt['description'] = [opt['description']]
class DocumentCollectionMeta(Command):
name = 'collection-meta'
@classmethod
def init_parser(cls, add_parser):
parser = add_parser(cls.name, description='Generate collection galaxy.yml documentation from shared metadata')
parser.add_argument("-t", "--template-file", action="store", dest="template_file",
default=DEFAULT_TEMPLATE_FILE,
help="Jinja2 template to use for the config")
parser.add_argument("-T", "--template-dir", action="store", dest="template_dir",
default=str(DEFAULT_TEMPLATE_DIR),
help="directory containing Jinja2 templates")
parser.add_argument("-o", "--output-dir", action="store", dest="output_dir", default='/tmp/',
help="Output directory for rst files")
parser.add_argument("collection_defs", metavar="COLLECTION-OPTION-DEFINITIONS.yml", type=str,
help="Source for collection metadata option docs")
@staticmethod
def main(args):
output_dir = os.path.abspath(args.output_dir)
template_file_full_path = os.path.abspath(os.path.join(args.template_dir, args.template_file))
template_file = os.path.basename(template_file_full_path)
template_dir = os.path.dirname(template_file_full_path)
with open(args.collection_defs) as f:
options = yaml.safe_load(f)
normalize_options(options)
env = Environment(loader=FileSystemLoader(template_dir),
variable_start_string="@{",
variable_end_string="}@",
trim_blocks=True)
env.filters['documented_type'] = documented_type
env.filters['rst_ify'] = rst_ify
template = env.get_template(template_file)
output_name = os.path.join(output_dir, template_file.replace('.j2', ''))
temp_vars = {'options': options}
data = to_bytes(template.render(temp_vars))
update_file_if_different(output_name, data)
return 0
|
azaghal/ansible
|
hacking/build_library/build_ansible/command_plugins/collection_meta.py
|
Python
|
gpl-3.0
| 3,270
|
[
"Galaxy"
] |
86c6bb67d451097490beaeecc9af80d7dcb10b090132e33f71dd42f2a7888905
|
#
# This file is part of Plinth.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""
Plinth module to configure ownCloud
"""
from django.utils.translation import ugettext_lazy as _
from functools import partial
import os
from plinth import actions
from plinth import action_utils
from plinth import cfg
from plinth import service as service_module
version = 1
depends = ['apps']
managed_packages = ['postgresql', 'php5-pgsql', 'owncloud', 'php-dropbox',
'php-google-api-php-client']
title = _('File Hosting (ownCloud)')
description = [
_('ownCloud gives you universal access to your files through a web '
'interface or WebDAV. It also provides a platform to easily view '
'& sync your contacts, calendars and bookmarks across all your '
'devices and enables basic editing right on the web. Installation '
'has minimal server requirements, doesn\'t need special '
'permissions and is quick. ownCloud is extendable via a simple '
'but powerful API for applications and plugins.'),
_('When enabled, the ownCloud installation will be available '
'from <a href="/owncloud">/owncloud</a> path on the web server. '
'Visit this URL to set up the initial administration account for '
'ownCloud.')
]
service = None
def init():
"""Initialize the ownCloud module"""
# XXX: ownCloud has been removed from Debian
if not os.path.isfile('/etc/owncloud/config.php') and \
not os.path.isfile('/etc/owncloud/autoconfig.php'):
return
menu = cfg.main_menu.get('apps:index')
menu.add_urlname(title, 'glyphicon-picture', 'owncloud:index')
global service
setup_helper = globals()['setup_helper']
if setup_helper.get_state() != 'needs-setup':
service = service_module.Service(
'owncloud', title, ports=['http', 'https'], is_external=True,
is_enabled=is_enabled, enable=_enable, disable=_disable)
def setup(helper, old_version=None):
"""Install and configure the module."""
helper.install(managed_packages)
helper.call('post', actions.superuser_run, 'owncloud-setup', ['enable'])
global service
if service is None:
service = service_module.Service(
'owncloud', title, ports=['http', 'https'], is_external=True,
is_enabled=is_enabled, enable=_enable, disable=_disable)
helper.call('post', service.notify_enabled, None, True)
def get_status():
"""Return the current status"""
return {'enabled': is_enabled()}
def is_enabled():
"""Return whether the module is enabled."""
output = actions.run('owncloud-setup', ['status'])
return 'enable' in output.split()
def enable(should_enable):
"""Enable/disable the module."""
option = 'enable' if should_enable else 'noenable'
actions.superuser_run('owncloud-setup', [option])
# Send a signal to other modules that the service is
# enabled/disabled
service.notify_enabled(None, should_enable)
def diagnose():
"""Run diagnostics and return the results."""
results = []
results.extend(action_utils.diagnose_url_on_all(
'https://{host}/owncloud', check_certificate=False))
return results
_enable = partial(enable, True)
_disable = partial(enable, False)
|
freedomboxtwh/Plinth
|
plinth/modules/owncloud/__init__.py
|
Python
|
agpl-3.0
| 3,888
|
[
"VisIt"
] |
bcd65f8ce33ec9e67274069bdbf1f1276ddf47bbc5763ade30e2765fbd1859c2
|
#pylint: disable=missing-docstring
####################################################################################################
# DO NOT MODIFY THIS HEADER #
# MOOSE - Multiphysics Object Oriented Simulation Environment #
# #
# (c) 2010 Battelle Energy Alliance, LLC #
# ALL RIGHTS RESERVED #
# #
# Prepared by Battelle Energy Alliance, LLC #
# Under Contract No. DE-AC07-05ID14517 #
# With the U. S. Department of Energy #
# #
# See COPYRIGHT for full restrictions #
####################################################################################################
#pylint: enable=missing-docstring
from MooseCollapsible import MooseCollapsible
from MarkdownTable import MarkdownTable
from MooseLinkDatabase import MooseLinkDatabase
from MooseClassDatabase import MooseClassDatabase
from Builder import Builder
from moose_docs_file_tree import moose_docs_file_tree
from moose_docs_import import moose_docs_import
from moose_docs_app_syntax import moose_docs_app_syntax
from slugify import slugify
EXTENSIONS = ('.md', '.png', '.bmp', '.jpeg', '.svg', '.gif', '.webm', '.ogg', '.mp4', '.js',
'.css', '.bib')
|
liuwenf/moose
|
python/MooseDocs/common/__init__.py
|
Python
|
lgpl-2.1
| 1,905
|
[
"MOOSE"
] |
01b84146cbeb7601d47401e98550b70b9f4f8b5e6898bdc9f8f1e936a2cf4461
|
'''
Created on Jun 2, 2011
@author: mkiyer
chimerascan: chimeric transcript discovery using RNA-seq
Copyright (C) 2011 Matthew Iyer
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import operator
from chimerascan import pysam
from seq import DNA_reverse_complement
#
# constants used for CIGAR alignments
#
CIGAR_M = 0 #match Alignment match (can be a sequence match or mismatch)
CIGAR_I = 1 #insertion Insertion to the reference
CIGAR_D = 2 #deletion Deletion from the reference
CIGAR_N = 3 #skip Skipped region from the reference
CIGAR_S = 4 #softclip Soft clip on the read (clipped sequence present in <seq>)
CIGAR_H = 5 #hardclip Hard clip on the read (clipped sequence NOT present in <seq>)
CIGAR_P = 6 #padding Padding (silent deletion from the padded reference sequence)
def parse_reads_by_qname(samfh):
"""
generator function to parse and return lists of
reads that share the same qname
"""
reads = []
for read in samfh:
if len(reads) > 0 and read.qname != reads[-1].qname:
yield reads
reads = []
reads.append(read)
if len(reads) > 0:
yield reads
def parse_pe_reads(bamfh):
pe_reads = ([], [])
# reads must be sorted by qname
num_reads = 0
prev_qname = None
for read in bamfh:
# get read attributes
qname = read.qname
readnum = 1 if read.is_read2 else 0
# if query name changes we have completely finished
# the fragment and can reset the read data
if num_reads > 0 and qname != prev_qname:
yield pe_reads
# reset state variables
pe_reads = ([], [])
num_reads = 0
pe_reads[readnum].append(read)
prev_qname = qname
num_reads += 1
if num_reads > 0:
yield pe_reads
def parse_unpaired_pe_reads(bamfh):
"""
parses alignments that were aligned in single read mode
and hence all hits are labeled as 'read1' and lack mate
information. instead the read1 read2 information is
attached to the 'qname' field
"""
pe_reads = ([], [])
num_reads = 0
prev_qname = None
for read in bamfh:
# extract read1/2 from qname
readnum = int(read.qname[-1])
if readnum == 1:
read.is_read1 = True
mate = 0
elif readnum == 2:
mate = 1
read.is_read2 = True
# reconstitute correct qname
qname = read.qname[:-2]
read.qname = qname
# if query name changes we have completely finished
# the fragment and can reset the read data
if num_reads > 0 and qname != prev_qname:
yield pe_reads
# reset state variables
pe_reads = ([], [])
num_reads = 0
pe_reads[mate].append(read)
prev_qname = qname
num_reads += 1
if num_reads > 0:
yield pe_reads
def select_best_mismatch_strata(reads, mismatch_tolerance=0):
if len(reads) == 0:
return []
# sort reads by number of mismatches
mapped_reads = []
unmapped_reads = []
for r in reads:
if r.is_unmapped:
unmapped_reads.append(r)
else:
mapped_reads.append((r.opt('NM'), r))
if len(mapped_reads) == 0:
return unmapped_reads
sorted_reads = sorted(mapped_reads, key=operator.itemgetter(0))
best_nm = sorted_reads[0][0]
worst_nm = sorted_reads[-1][0]
sorted_reads.extend((worst_nm+1, r) for r in unmapped_reads)
# choose reads within a certain mismatch tolerance
best_reads = []
for mismatches, r in sorted_reads:
if mismatches > (best_nm + mismatch_tolerance):
break
best_reads.append(r)
return best_reads
def copy_read(r):
a = pysam.AlignedRead()
a.qname = r.qname
a.seq = r.seq
a.flag = r.flag
a.rname = r.rname
a.pos = r.pos
a.mapq = r.mapq
a.cigar = r.cigar
a.mrnm = r.mrnm
a.mpos = r.mpos
a.isize = r.isize
a.qual = r.qual
a.tags = r.tags
return a
def soft_pad_read(fq, r):
"""
'fq' is the fastq record
'r' in the AlignedRead SAM read
"""
# make sequence soft clipped
ext_length = len(fq.seq) - len(r.seq)
cigar_softclip = [(CIGAR_S, ext_length)]
cigar = r.cigar
# reconstitute full length sequence in read
if r.is_reverse:
seq = DNA_reverse_complement(fq.seq)
qual = fq.qual[::-1]
if (cigar is not None) and (ext_length > 0):
cigar = cigar_softclip + cigar
else:
seq = fq.seq
qual = fq.qual
if (cigar is not None) and (ext_length > 0):
cigar = cigar + cigar_softclip
# replace read field
r.seq = seq
r.qual = qual
r.cigar = cigar
def pair_reads(r1, r2, tags=None):
'''
fill in paired-end fields in SAM record
'''
if tags is None:
tags = []
# convert read1 to paired-end
r1.is_paired = True
r1.is_proper_pair = True
r1.is_read1 = True
r1.mate_is_reverse = r2.is_reverse
r1.mate_is_unmapped = r2.is_unmapped
r1.mpos = r2.pos
r1.mrnm = r2.rname
r1.tags = r1.tags + tags
# convert read2 to paired-end
r2.is_paired = True
r2.is_proper_pair = True
r2.is_read2 = True
r2.mate_is_reverse = r1.is_reverse
r2.mate_is_unmapped = r1.is_unmapped
r2.mpos = r1.pos
r2.mrnm = r1.rname
r2.tags = r2.tags + tags
# compute insert size
if r1.rname != r2.rname:
r1.isize = 0
r2.isize = 0
elif r1.pos > r2.pos:
isize = r1.aend - r2.pos
r1.isize = -isize
r2.isize = isize
else:
isize = r2.aend - r1.pos
r1.isize = isize
r2.isize = -isize
def get_clipped_interval(r):
cigar = r.cigar
padstart, padend = r.pos, r.aend
if len(cigar) > 1:
if (cigar[0][0] == CIGAR_S or
cigar[0][0] == CIGAR_H):
padstart -= cigar[0][1]
elif (cigar[-1][0] == CIGAR_S or
cigar[-1][0] == CIGAR_H):
padend += cigar[-1][1]
return padstart, padend
|
genome-vendor/chimerascan
|
chimerascan/lib/sam.py
|
Python
|
gpl-3.0
| 6,694
|
[
"pysam"
] |
3b80738b8ba3a505b9447bd6a1e52e50861ce7328ffc33e2b24717a7b6705898
|
#!/usr/bin/env python
# B a r a K u d a
#
# Generate global plots related to wind
# wind stress, its curl, wind speed, etc
# Use climatology fields built with 'build_clim.sh'
#
# L. Brodeau, 2017
import sys
from os import path
import numpy as nmp
from netCDF4 import Dataset
import barakuda_tool as bt
import barakuda_plot as bp
cv_curl = 'socurl' ; # as generated by CDFTOOLS...
#CPAL_CURL='BrBG_r'
CPAL_CURL='ncview_jaisnb'
#CPAL_TAUM='ncview_nrl'
CPAL_TAUM='cubehelix_r'
CPAL_WNDM='CMRmap_r'
# Max values and increment for figures:
Rmax = 0.3 ; dR = 0.05 ; # Wind stress curl (10^-6 s)
Tmax = 0.3 ; dT = 0.025 ; # Wind stress module (N/m^2)
Wmax = 15. ; Wmin = 2. ; dW = 1. ; # Surface wind speed module (m/s)
venv_needed = {'ORCA','EXP','DIAG_D','MM_FILE','FIG_FORM','FILE_FLX_SUFFIX','NN_TAUM','NN_WNDM'}
vdic = bt.check_env_var(sys.argv[0], venv_needed)
CONFEXP = vdic['ORCA']+'-'+vdic['EXP']
cd_clim = vdic['DIAG_D']+'/clim'
path_fig='./'
fig_type=vdic['FIG_FORM']
narg = len(sys.argv)
if narg < 3: print 'Usage: '+sys.argv[0]+' <year1> <year2>'; sys.exit(0)
cy1 = sys.argv[1] ; cy2=sys.argv[2]; jy1=int(cy1); jy2=int(cy2)
jy1_clim = jy1 ; jy2_clim = jy2
print ' => mean on the clim : ', jy1_clim, jy2_clim, '\n'
ctag = CONFEXP+'_'+cy1+'-'+cy2
# Getting coordinates:
bt.chck4f(vdic['MM_FILE'])
id_mm = Dataset(vdic['MM_FILE'])
xlon = id_mm.variables['glamt'][0,:,:] ; xlat = id_mm.variables['gphit'][0,:,:]
Xmask = id_mm.variables['tmask'][0,0,:,:]
id_mm.close()
l_tau_is_annual = False
cf_nemo_curl = cd_clim+'/mclim_'+ctag+'_TCURL.nc4'
if not path.exists(cf_nemo_curl):
cf_nemo_curl = cd_clim+'/aclim_'+ctag+'_TCURL.nc4'
if path.exists(cf_nemo_curl):
l_tau_is_annual = True
print '\n *** wind.py : wind stress is annual...'
else:
print '\n *** WARNING: wind.py : giving up neither annual nor monthly curl clim found!'
sys.exit(0)
# Getting NEMO mean monthly climatology of wind stress module:
cextra_tau = ''
cv_taum = vdic['NN_TAUM']
if cv_taum != 'X':
cf_nemo_taum = cd_clim+'/mclim_'+ctag+'_'+vdic['FILE_FLX_SUFFIX']+'.nc4'
if l_tau_is_annual: cf_nemo_taum = cd_clim+'/aclim_'+ctag+'_'+vdic['FILE_FLX_SUFFIX']+'.nc4'
else:
#Falling back on what cdfcurl.x has computed from monthly averaged Taux and Tauy:
cf_nemo_taum = cf_nemo_curl
cv_taum = 'sotaum'
cextra_tau = ' (from monthly-averaged Tau_x & Tau_y !)'
if l_tau_is_annual: cextra_tau = ' (from annually-averaged Tau_x & Tau_y !)'
bt.chck4f(cf_nemo_taum)
id_nemo = Dataset(cf_nemo_taum)
Xtaum = id_nemo.variables[cv_taum][:,:,:]
id_nemo.close()
[ Nt, nj, ni ] = Xtaum.shape ; print ' Shape of TAUM :', Nt, nj, ni, '\n'
if not Nt in [1,12]:
print '\n *** ERROR: wind.py : only accepting monthly or annual climatologies!', Nt
sys.exit(0)
# Getting NEMO mean monthly climatology of CURL:
bt.chck4f(cf_nemo_curl)
id_nemo = Dataset(cf_nemo_curl)
Xcurl = id_nemo.variables[cv_curl][:,:,:]
id_nemo.close()
cextra_crl = ' (from monthly-averaged Tau_x & Tau_y !)'
if l_tau_is_annual: cextra_crl = ' (from annually-averaged Tau_x & Tau_y !)'
# Getting surface wind speed as seen in NEMO if we find it!!!
l_do_wndm = False
cv_wndm = vdic['NN_WNDM']
if cv_wndm != 'X':
cf_nemo_wndm = cd_clim+'/mclim_'+ctag+'_'+vdic['FILE_FLX_SUFFIX']+'.nc4'
if path.exists(cf_nemo_wndm):
id_nemo = Dataset(cf_nemo_wndm)
list_var = id_nemo.variables.keys()
if cv_wndm in list_var:
Xwndm = id_nemo.variables[cv_wndm][:,:,:]
l_do_wndm = True
print '\n *** wind.py : we found wind speed in '+cf_nemo_wndm ; #lolo
id_nemo.close()
nper = 3
if l_tau_is_annual: nper = 1
Xtaum_plot = nmp.zeros((nper,nj,ni))
Xcurl_plot = nmp.zeros((3,nj,ni))
Xtaum_plot[0,:,:] = nmp.mean(Xtaum[:,:,:] ,axis=0) ; # Annual
Xcurl_plot[0,:,:] = nmp.mean(Xcurl[:,:,:] ,axis=0) ; # Annual
if not l_tau_is_annual:
Xtaum_plot[1,:,:] = nmp.mean(Xtaum[:3,:,:] ,axis=0) ; # Winter
Xtaum_plot[2,:,:] = nmp.mean(Xtaum[6:9,:,:],axis=0) ; # Summer
Xcurl_plot[1,:,:] = nmp.mean(Xcurl[:3,:,:] ,axis=0) ; # Winter
Xcurl_plot[2,:,:] = nmp.mean(Xcurl[6:9,:,:],axis=0) ; # Summer
if l_do_wndm:
Xwndm_plot = nmp.zeros((nper,nj,ni))
Xwndm_plot[0,:,:] = nmp.mean(Xwndm[:,:,:] ,axis=0) ; # Annual
Xwndm_plot[1,:,:] = nmp.mean(Xwndm[:3,:,:] ,axis=0) ; # Winter
Xwndm_plot[2,:,:] = nmp.mean(Xwndm[6:9,:,:],axis=0) ; # Summer
# the Jean-Marc Molines method:
ji_lat0 = nmp.argmax(xlat[nj-1,:])
# Annual Curl:
bp.plot("2d")(xlon[0,:], xlat[:,ji_lat0], Xcurl_plot[0,:,:], Xmask, -Rmax, Rmax, dR,
corca=vdic['ORCA'], lkcont=False, cpal=CPAL_CURL,
cfignm=path_fig+'tau_curl_annual_'+CONFEXP, cbunit=r'$(10^{-6}s^{-1})$',
ctitle='Wind stress curl, '+CONFEXP+' ('+cy1+'-'+cy2+')'+cextra_crl,
lforce_lim=False, i_cb_subsamp=1,
cfig_type=fig_type, lat_min=-77., lat_max=75., lpix=True)
if not l_tau_is_annual:
# JFM Curl:
bp.plot("2d")(xlon[0,:], xlat[:,ji_lat0], Xcurl_plot[1,:,:], Xmask, -Rmax, Rmax, dR,
corca=vdic['ORCA'], lkcont=False, cpal=CPAL_CURL,
cfignm=path_fig+'tau_curl_JFM_'+CONFEXP, cbunit=r'$(10^{-6}s^{-1})$',
ctitle='Wind stress curl, JFM, '+CONFEXP+' ('+cy1+'-'+cy2+')'+cextra_crl,
lforce_lim=False, i_cb_subsamp=1,
cfig_type=fig_type, lat_min=-77., lat_max=75., lpix=True)
# JAS Curl:
bp.plot("2d")(xlon[0,:], xlat[:,ji_lat0], Xcurl_plot[2,:,:], Xmask, -Rmax, Rmax, dR,
corca=vdic['ORCA'], lkcont=False, cpal=CPAL_CURL,
cfignm=path_fig+'tau_curl_JAS_'+CONFEXP, cbunit=r'$(10^{-6}s^{-1})$',
ctitle='Wind stress curl, JAS, '+CONFEXP+' ('+cy1+'-'+cy2+')'+cextra_crl,
lforce_lim=False, i_cb_subsamp=1,
cfig_type=fig_type, lat_min=-77., lat_max=75., lpix=True)
# Annual Taum:
bp.plot("2d")(xlon[0,:], xlat[:,ji_lat0], Xtaum_plot[0,:,:], Xmask, 0., Tmax, dT,
corca=vdic['ORCA'], lkcont=False, cpal=CPAL_TAUM,
cfignm=path_fig+'taum_annual_'+CONFEXP, cbunit=r'$(N/m^2)$',
ctitle='Wind stress module, '+CONFEXP+' ('+cy1+'-'+cy2+')'+cextra_tau,
lforce_lim=False, i_cb_subsamp=1,
cfig_type=fig_type, lat_min=-77., lat_max=75., lpix=True)
if not l_tau_is_annual:
# JFM Taum:
bp.plot("2d")(xlon[0,:], xlat[:,ji_lat0], Xtaum_plot[1,:,:], Xmask, 0., Tmax, dT,
corca=vdic['ORCA'], lkcont=False, cpal=CPAL_TAUM,
cfignm=path_fig+'taum_JFM_'+CONFEXP, cbunit=r'$(N/m^2)$',
ctitle='Wind stress module, JFM, '+CONFEXP+' ('+cy1+'-'+cy2+')'+cextra_tau,
lforce_lim=False, i_cb_subsamp=1,
cfig_type=fig_type, lat_min=-77., lat_max=75., lpix=True)
# JAS Taum:
bp.plot("2d")(xlon[0,:], xlat[:,ji_lat0], Xtaum_plot[2,:,:], Xmask, 0., Tmax, dT,
corca=vdic['ORCA'], lkcont=False, cpal=CPAL_TAUM,
cfignm=path_fig+'taum_JAS_'+CONFEXP, cbunit=r'$(N/m^2)$',
ctitle='Wind stress module, JAS, '+CONFEXP+' ('+cy1+'-'+cy2+')'+cextra_tau,
lforce_lim=False, i_cb_subsamp=1,
cfig_type=fig_type, lat_min=-77., lat_max=75., lpix=True)
if l_do_wndm:
# Annual Wndm:
bp.plot("2d")(xlon[0,:], xlat[:,ji_lat0], Xwndm_plot[0,:,:], Xmask, Wmin, Wmax, dW,
corca=vdic['ORCA'], lkcont=False, cpal=CPAL_WNDM,
cfignm=path_fig+'wndm_annual_'+CONFEXP, cbunit=r'$(m/s)$',
ctitle='Surface wind speed module, '+CONFEXP+' ('+cy1+'-'+cy2+')'+cextra_tau,
lforce_lim=False, i_cb_subsamp=1,
cfig_type=fig_type, lat_min=-77., lat_max=75., lpix=True)
# JFM Wndm:
bp.plot("2d")(xlon[0,:], xlat[:,ji_lat0], Xwndm_plot[1,:,:], Xmask, Wmin, Wmax, dW,
corca=vdic['ORCA'], lkcont=False, cpal=CPAL_WNDM,
cfignm=path_fig+'wndm_JFM_'+CONFEXP, cbunit=r'$(m/s)$',
ctitle='Surface wind speed module, JFM, '+CONFEXP+' ('+cy1+'-'+cy2+')'+cextra_tau,
lforce_lim=False, i_cb_subsamp=1,
cfig_type=fig_type, lat_min=-77., lat_max=75., lpix=True)
# JAS Wndm:
bp.plot("2d")(xlon[0,:], xlat[:,ji_lat0], Xwndm_plot[2,:,:], Xmask, Wmin, Wmax, dW,
corca=vdic['ORCA'], lkcont=False, cpal=CPAL_WNDM,
cfignm=path_fig+'wndm_JAS_'+CONFEXP, cbunit=r'$(m/s)$',
ctitle='Surface wind speed module, JAS, '+CONFEXP+' ('+cy1+'-'+cy2+')'+cextra_tau,
lforce_lim=False, i_cb_subsamp=1,
cfig_type=fig_type, lat_min=-77., lat_max=75., lpix=True)
|
brodeau/barakuda
|
python/exec/wind.py
|
Python
|
gpl-2.0
| 8,877
|
[
"ORCA"
] |
4e6b6f97e398ebc790873c50b5901ab0eb311e50b5e2d8812f3764fb2198019a
|
#
#@BEGIN LICENSE
#
# PSI4: an ab initio quantum chemistry software package
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
#@END LICENSE
#
from __future__ import print_function
"""Queries the PubChem database using a compound name (i.e. 1,3,5-hexatriene)
to obtain a molecule string that can be passed to Molecule. ::
results = getPubChemObj("1,3,5-hexatriene")
Results is an array of results from PubChem matches to your query.
for entry in results:
entry["CID"] => PubChem compound identifer
entry["IUPAC"] => IUPAC name for the resulting compound
entry["PubChemObj"] => instance of PubChemObj for this compound
entry["PubChemObj"].getMoleculeString() => returns a string compatible
with PSI4's Molecule creation
"""
try:
# Python 2 syntax
from urllib2 import urlopen
from urllib2 import quote
from urllib2 import URLError
except ImportError:
# Python 3 syntax
from urllib.request import urlopen
from urllib.parse import quote
from urllib.error import URLError
import re
import sys
class PubChemObj(object):
def __init__(self, cid, mf, iupac):
self.url = 'http://pubchem.ncbi.nlm.nih.gov/summary/summary.cgi'
self.cid = cid
self.mf = mf
self.iupac = iupac
self.natom = 0
self.dataSDF = ''
def __str__(self):
return "%17d %s\n" % (self.cid, self.iupac)
def getSDF(self):
"""Function to return the SDF (structure-data file) of the PubChem object."""
if (len(self.dataSDF) == 0):
# When completed uncomment the following:
url = self.url + '?cid=' + quote(str(self.cid)) + '&disopt=3DDisplaySDF'
try:
location = urlopen(url)
except URLError as e:
msg = "\tPubchemError\n%s\n\treceived when trying to open\n\t%s\n" % (str(e), url)
msg += "\tCheck your internet connection, and the above URL, and try again.\n"
raise Exception(msg)
print("\tRetrieved entry for chemical ID %d\n" % self.cid)
self.dataSDF = location.read().decode(sys.getdefaultencoding())
#f = open("TEST", "w")
#f.write(self.dataSDF)
return self.dataSDF
def name(self):
"""Function to return the IUPAC name of the PubChem object."""
return self.iupac
def getCartesian(self):
"""Function to return a string of the atom symbol and XYZ
coordinates of the PubChem object.
"""
try:
sdfText = self.getSDF()
except Exception as e:
raise e
# Find
# NA NB CONSTANT
# 14 13 0 0 0 0 0 0 0999 V2000
m = re.search(r'^\s*(\d+)\s+(?:\d+\s+){8}V2000$', sdfText, re.MULTILINE)
self.natom = 0
if (m):
self.natom = int(m.group(1))
if (self.natom == 0):
raise Exception("PubchemError\n Cannot find the number of atoms. 3D data doesn't appear\n" +
"to be available for %s.\n" % self.iupac)
lines = re.split('\n', sdfText)
# 3.7320 -0.2500 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0
NUMBER = "((?:[-+]?\\d*\\.\\d+(?:[DdEe][-+]?\\d+)?)|(?:[-+]?\\d+\\.\\d*(?:[DdEe][-+]?\\d+)?))"
atom_re = re.compile(r'^\s*' + NUMBER + r'\s+' + NUMBER + r'\s+' + NUMBER + r'\s*(\w+)(?:\s+\d+){12}')
molecule_string = "PubchemInput\n"
atom_count = 0
for line in lines:
if (not line or line.isspace()):
continue
atom_match = atom_re.match(line)
if atom_match:
x = float(atom_match.group(1))
y = float(atom_match.group(2))
z = float(atom_match.group(3))
sym = atom_match.group(4)
atom_count = atom_count + 1
molecule_string += "%s %10.6f %10.6f %10.6f\n" % (sym, x, y, z)
if (atom_count == self.natom):
break
return molecule_string
def getXYZFile(self):
"""Function to obtain preferentially a molecule string
through getCartesian() or a query string otherwise.
"""
try:
temp = self.getCartesian()
except Exception as e:
raise
molstr = "%d\n%s\n%s" % (self.natom, self.iupac, temp)
return molstr
def getMoleculeString(self):
"""Function to obtain a molecule string through
getCartesian() or fail.
"""
try:
return self.getCartesian()
except Exception as e:
return e.message
def getPubChemResults(name):
"""Function to query the PubChem database for molecules matching the
input string. Builds a PubChem object if found.
"""
url = 'http://www.ncbi.nlm.nih.gov/sites/entrez?db=pccompound&term=%s&format=text' % quote(name)
print("\tSearching PubChem database for %s" % (name))
try:
loc = urlopen(url)
except URLError as e:
msg = "\tPubchemError\n%s\n\treceived when trying to open\n\t%s\n" % (str(e), url)
msg += "\tCheck your internet connection, and the above URL, and try again.\n"
raise Exception(msg)
data = loc.read()
ans = []
l = data.find(b"<pre>")
l = data.find(b"\n", l)
i = 1
while(True):
l = data.find(str("%d. " % i).encode(sys.getdefaultencoding()), l)
if l == -1:
break
tag = b"MF: "
l = data.find(tag, l) + len(tag)
mf = data[l:data.find(b'\n', l)].decode(sys.getdefaultencoding())
tag = b"IUPAC name: "
l = data.find(tag, l) + len(tag)
iupac = data[l:data.find(b'\n', l)].decode(sys.getdefaultencoding())
tag = b"CID:"
l = data.find(tag, l) + len(tag)
#if l == 4:
# break
cid = int(data[l:data.find(b"\n", l)])
l = data.find(b'\t', l) + 1
pubobj = PubChemObj(cid, mf, iupac)
ans.append(pubobj)
i += 1
print("\tFound %d results" % (len(ans)))
return ans
if __name__ == "__main__":
try:
obj = getPubChemResults("1-methoxy-4-[(E)-prop-1-enyl]benzene")
#obj = getPubChemResults("sodium benzenesulfonate")
except Exception as e:
print(e.message)
for r in obj:
print(r)
print(r.getMoleculeString())
|
spring01/libPSI
|
lib/python/pubchem.py
|
Python
|
gpl-2.0
| 7,182
|
[
"Psi4"
] |
7eff2ccb48476380246c3b8b783ec70c6d9407adf191d94f2bab15d7ffb0a5fb
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# from https://github.com/jameslz/benchmark
import sys
if len(sys.argv) != 4:
print "\nUsage: %s <alignment> <evalue> <bit_score>\n" % sys.argv[0]
sys.exit(1)
blast = sys.argv[1]
max_evalue = float(sys.argv[2])
min_bit_score = float(sys.argv[3])
with open(blast, 'r') as fp:
init = ""
for line in fp:
if not line.startswith("#"):
item = line.strip().split("\t")
evalue = float(item[10])
bit_score = float(item[11])
if init != item[0]:
if evalue <= max_evalue and bit_score >= min_bit_score:
print line.strip()
init = item[0]
|
shenwei356/bio_scripts
|
blast/blast_best_hit_outfmt6.py
|
Python
|
mit
| 726
|
[
"BLAST"
] |
083cda7c372e1726721298c377ac465bcb353f9cd73a83a21578b422bed02d80
|
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python2, python3
"""Generates molecules that satisfy two targets.
Used a single Q-function as policy
Target1: SAS
Target2: QED
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import json
import os
from absl import app
from absl import flags
from rdkit import Chem
from rdkit.Chem import QED
from rdkit.Contrib import SA_Score
from tensorflow.compat.v1 import gfile
from mol_dqn.chemgraph.mcts import deep_q_networks
from mol_dqn.chemgraph.mcts import molecules as molecules_mdp
from mol_dqn.chemgraph.mcts import run_dqn
from mol_dqn.chemgraph.tensorflow import core
flags.DEFINE_float('target_sas', 1, 'The target SAS of the molecule.')
flags.DEFINE_float('target_qed', 0.5, 'The target QED of the molecule.')
flags.DEFINE_boolean('use_multiply', True, 'mul')
flags.DEFINE_float('gamma', 0.999, 'discount')
FLAGS = flags.FLAGS
class MultiObjectiveRewardMolecule(molecules_mdp.Molecule):
"""Defines the subclass of generating a molecule with a specific reward.
The reward is defined as a 1-D vector with 2 entries: similarity and QED
reward = (similarity_score, qed_score)
"""
def _reward(self):
"""Calculates the reward of the current state.
The reward is defined as a tuple of the similarity and QED value.
Returns:
A tuple of the similarity and qed value
"""
# calculate similarity.
# if the current molecule does not contain the scaffold of the target,
# similarity is zero.
if self._state is None:
return 0.0, 0.0
mol = Chem.MolFromSmiles(self._state)
if mol is None:
return 0.0, 0.0
qed_value = QED.qed(mol)
sas = SA_Score.sascorer.calculateScore(mol)
return -abs(sas - FLAGS.target_sas), -abs(qed_value - FLAGS.target_qed)
def soft_cst(v, l, r):
if l <= v <= r:
return 1
return -min(abs(l - v), abs(r - v))
class Molecule(molecules_mdp.Molecule):
"""SAS and QED reward molecules."""
def _reward(self):
"""Calculates the reward of the current state.
The reward is defined as a tuple of the similarity and QED value.
Returns:
A tuple of the similarity and qed value
"""
# calculate similarity.
# if the current molecule does not contain the scaffold of the target,
# similarity is zero.
if self._state is None:
return 0.0
mol = Chem.MolFromSmiles(self._state)
if mol is None:
return 0.0
qed_value = QED.qed(mol)
sas = SA_Score.sascorer.calculateScore(mol)
c1 = -abs(sas - FLAGS.target_sas)
c2 = -abs(qed_value - FLAGS.target_qed)
if FLAGS.use_multiply:
if c1 < 0 and c2 < 0:
reward = -c1 * c2
else:
reward = c1 * c2
else:
reward = (c1 + c2)
return reward * FLAGS.gamma**(self.max_steps - self._counter)
def main(argv):
del argv
if FLAGS.hparams is not None:
with gfile.Open(FLAGS.hparams, 'r') as f:
hparams = deep_q_networks.get_hparams(**json.load(f))
else:
hparams = deep_q_networks.get_hparams()
hparams.add_hparam('target_qed', FLAGS.target_qed)
hparams.add_hparam('target_sas', FLAGS.target_sas)
environment = Molecule(
atom_types=set(hparams.atom_types),
init_mol='CCc1c(C)[nH]c2CCC(CN3CCOCC3)C(=O)c12',
allow_removal=hparams.allow_removal,
allow_no_modification=hparams.allow_no_modification,
allow_bonds_between_rings=False,
allowed_ring_sizes={3, 4, 5, 6},
max_steps=hparams.max_steps_per_episode)
dqn = deep_q_networks.DeepQNetwork(
input_shape=(hparams.batch_size, hparams.fingerprint_length + 1),
q_fn=functools.partial(
deep_q_networks.multi_layer_model, hparams=hparams),
optimizer=hparams.optimizer,
grad_clipping=hparams.grad_clipping,
num_bootstrap_heads=hparams.num_bootstrap_heads,
gamma=hparams.gamma,
epsilon=1.0)
run_dqn.run_training(
hparams=hparams,
environment=environment,
dqn=dqn,
)
core.write_hparams(hparams, os.path.join(FLAGS.model_dir, 'config.json'))
if __name__ == '__main__':
app.run(main)
|
google-research/google-research
|
mol_dqn/experimental/multi_obj_gen.py
|
Python
|
apache-2.0
| 4,724
|
[
"RDKit"
] |
dd19a4c3ef79a052027ebdaa6eda8dff8026e2755a1f7cbb5521ca31f35843a6
|
########################################################################
#
# (C) 2013, James Cammarata <jcammarata@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
########################################################################
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os.path
import re
import shutil
import sys
import time
import yaml
from jinja2 import Environment, FileSystemLoader
import ansible.constants as C
from ansible.cli import CLI
from ansible.errors import AnsibleError, AnsibleOptionsError
from ansible.galaxy import Galaxy
from ansible.galaxy.api import GalaxyAPI
from ansible.galaxy.login import GalaxyLogin
from ansible.galaxy.role import GalaxyRole
from ansible.galaxy.token import GalaxyToken
from ansible.module_utils._text import to_text
from ansible.playbook.role.requirement import RoleRequirement
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
class GalaxyCLI(CLI):
'''command to manage Ansible roles in shared repositories, the default of which is Ansible Galaxy *https://galaxy.ansible.com*.'''
SKIP_INFO_KEYS = ("name", "description", "readme_html", "related", "summary_fields", "average_aw_composite", "average_aw_score", "url")
VALID_ACTIONS = ("delete", "import", "info", "init", "install", "list", "login", "remove", "search", "setup")
def __init__(self, args):
self.api = None
self.galaxy = None
super(GalaxyCLI, self).__init__(args)
def set_action(self):
super(GalaxyCLI, self).set_action()
# specific to actions
if self.action == "delete":
self.parser.set_usage("usage: %prog delete [options] github_user github_repo")
self.parser.set_description("Removes the role from Galaxy. It does not remove or alter the actual GitHub repository.")
elif self.action == "import":
self.parser.set_usage("usage: %prog import [options] github_user github_repo")
self.parser.set_description("Import a role.")
self.parser.add_option('--no-wait', dest='wait', action='store_false', default=True, help='Don\'t wait for import results.')
self.parser.add_option('--branch', dest='reference',
help='The name of a branch to import. Defaults to the repository\'s default branch (usually master)')
self.parser.add_option('--role-name', dest='role_name', help='The name the role should have, if different than the repo name')
self.parser.add_option('--status', dest='check_status', action='store_true', default=False,
help='Check the status of the most recent import request for given github_user/github_repo.')
elif self.action == "info":
self.parser.set_usage("usage: %prog info [options] role_name[,version]")
self.parser.set_description("View more details about a specific role.")
elif self.action == "init":
self.parser.set_usage("usage: %prog init [options] role_name")
self.parser.set_description("Initialize new role with the base structure of a role.")
self.parser.add_option('--init-path', dest='init_path', default="./",
help='The path in which the skeleton role will be created. The default is the current working directory.')
self.parser.add_option('--type', dest='role_type', action='store', default='default',
help="Initialize using an alternate role type. Valid types include: 'container', 'apb' and 'network'.")
self.parser.add_option('--role-skeleton', dest='role_skeleton', default=C.GALAXY_ROLE_SKELETON,
help='The path to a role skeleton that the new role should be based upon.')
elif self.action == "install":
self.parser.set_usage("usage: %prog install [options] [-r FILE | role_name(s)[,version] | scm+role_repo_url[,version] | tar_file(s)]")
self.parser.set_description("Install Roles from file(s), URL(s) or tar file(s)")
self.parser.add_option('-i', '--ignore-errors', dest='ignore_errors', action='store_true', default=False,
help='Ignore errors and continue with the next specified role.')
self.parser.add_option('-n', '--no-deps', dest='no_deps', action='store_true', default=False, help='Don\'t download roles listed as dependencies')
self.parser.add_option('-r', '--role-file', dest='role_file', help='A file containing a list of roles to be imported')
self.parser.add_option('-g', '--keep-scm-meta', dest='keep_scm_meta', action='store_true',
default=False, help='Use tar instead of the scm archive option when packaging the role')
elif self.action == "remove":
self.parser.set_usage("usage: %prog remove role1 role2 ...")
self.parser.set_description("Delete a role from roles_path.")
elif self.action == "list":
self.parser.set_usage("usage: %prog list [role_name]")
self.parser.set_description("Show the name and version of each role installed in the roles_path.")
elif self.action == "login":
self.parser.set_usage("usage: %prog login [options]")
self.parser.set_description("Login to api.github.com server in order to use ansible-galaxy sub command such as 'import', 'delete' and 'setup'.")
self.parser.add_option('--github-token', dest='token', default=None, help='Identify with github token rather than username and password.')
elif self.action == "search":
self.parser.set_usage("usage: %prog search [searchterm1 searchterm2] [--galaxy-tags galaxy_tag1,galaxy_tag2] [--platforms platform1,platform2] "
"[--author username]")
self.parser.add_option('--platforms', dest='platforms', help='list of OS platforms to filter by')
self.parser.add_option('--galaxy-tags', dest='galaxy_tags', help='list of galaxy tags to filter by')
self.parser.add_option('--author', dest='author', help='GitHub username')
self.parser.set_description("Search the Galaxy database by tags, platforms, author and multiple keywords.")
elif self.action == "setup":
self.parser.set_usage("usage: %prog setup [options] source github_user github_repo secret")
self.parser.add_option('--remove', dest='remove_id', default=None,
help='Remove the integration matching the provided ID value. Use --list to see ID values.')
self.parser.add_option('--list', dest="setup_list", action='store_true', default=False, help='List all of your integrations.')
self.parser.set_description("Manage the integration between Galaxy and the given source.")
# options that apply to more than one action
if self.action in ['init', 'info']:
self.parser.add_option('--offline', dest='offline', default=False, action='store_true', help="Don't query the galaxy API when creating roles")
if self.action not in ("delete", "import", "init", "login", "setup"):
# NOTE: while the option type=str, the default is a list, and the
# callback will set the value to a list.
self.parser.add_option('-p', '--roles-path', dest='roles_path', action="callback", callback=CLI.unfrack_paths, default=C.DEFAULT_ROLES_PATH,
help='The path to the directory containing your roles. The default is the roles_path configured in your ansible.cfg'
' file (/etc/ansible/roles if not configured)', type='str')
if self.action in ("init", "install"):
self.parser.add_option('-f', '--force', dest='force', action='store_true', default=False, help='Force overwriting an existing role')
def parse(self):
''' create an options parser for bin/ansible '''
self.parser = CLI.base_parser(
usage="usage: %%prog [%s] [--help] [options] ..." % "|".join(self.VALID_ACTIONS),
epilog="\nSee '%s <command> --help' for more information on a specific command.\n\n" % os.path.basename(sys.argv[0]),
desc="Perform various Role related operations.",
)
# common
self.parser.add_option('-s', '--server', dest='api_server', default=C.GALAXY_SERVER, help='The API server destination')
self.parser.add_option('-c', '--ignore-certs', action='store_true', dest='ignore_certs', default=C.GALAXY_IGNORE_CERTS,
help='Ignore SSL certificate validation errors.')
self.set_action()
super(GalaxyCLI, self).parse()
display.verbosity = self.options.verbosity
self.galaxy = Galaxy(self.options)
def run(self):
super(GalaxyCLI, self).run()
self.api = GalaxyAPI(self.galaxy)
self.execute()
def exit_without_ignore(self, rc=1):
"""
Exits with the specified return code unless the
option --ignore-errors was specified
"""
if not self.options.ignore_errors:
raise AnsibleError('- you can use --ignore-errors to skip failed roles and finish processing the list.')
def _display_role_info(self, role_info):
text = [u"", u"Role: %s" % to_text(role_info['name'])]
text.append(u"\tdescription: %s" % role_info.get('description', ''))
for k in sorted(role_info.keys()):
if k in self.SKIP_INFO_KEYS:
continue
if isinstance(role_info[k], dict):
text.append(u"\t%s:" % (k))
for key in sorted(role_info[k].keys()):
if key in self.SKIP_INFO_KEYS:
continue
text.append(u"\t\t%s: %s" % (key, role_info[k][key]))
else:
text.append(u"\t%s: %s" % (k, role_info[k]))
return u'\n'.join(text)
############################
# execute actions
############################
def execute_init(self):
"""
creates the skeleton framework of a role that complies with the galaxy metadata format.
"""
init_path = self.options.init_path
force = self.options.force
role_skeleton = self.options.role_skeleton
role_name = self.args.pop(0).strip() if self.args else None
if not role_name:
raise AnsibleOptionsError("- no role name specified for init")
role_path = os.path.join(init_path, role_name)
if os.path.exists(role_path):
if os.path.isfile(role_path):
raise AnsibleError("- the path %s already exists, but is a file - aborting" % role_path)
elif not force:
raise AnsibleError("- the directory %s already exists."
"you can use --force to re-initialize this directory,\n"
"however it will reset any main.yml files that may have\n"
"been modified there already." % role_path)
inject_data = dict(
role_name=role_name,
author='your name',
description='your description',
company='your company (optional)',
license='license (GPLv2, CC-BY, etc)',
issue_tracker_url='http://example.com/issue/tracker',
min_ansible_version='2.4',
role_type=self.options.role_type
)
# create role directory
if not os.path.exists(role_path):
os.makedirs(role_path)
if role_skeleton is not None:
skeleton_ignore_expressions = C.GALAXY_ROLE_SKELETON_IGNORE
else:
role_skeleton = self.galaxy.default_role_skeleton_path
skeleton_ignore_expressions = ['^.*/.git_keep$']
role_skeleton = os.path.expanduser(role_skeleton)
skeleton_ignore_re = [re.compile(x) for x in skeleton_ignore_expressions]
template_env = Environment(loader=FileSystemLoader(role_skeleton))
for root, dirs, files in os.walk(role_skeleton, topdown=True):
rel_root = os.path.relpath(root, role_skeleton)
in_templates_dir = rel_root.split(os.sep, 1)[0] == 'templates'
dirs[:] = [d for d in dirs if not any(r.match(d) for r in skeleton_ignore_re)]
for f in files:
filename, ext = os.path.splitext(f)
if any(r.match(os.path.join(rel_root, f)) for r in skeleton_ignore_re):
continue
elif ext == ".j2" and not in_templates_dir:
src_template = os.path.join(rel_root, f)
dest_file = os.path.join(role_path, rel_root, filename)
template_env.get_template(src_template).stream(inject_data).dump(dest_file)
else:
f_rel_path = os.path.relpath(os.path.join(root, f), role_skeleton)
shutil.copyfile(os.path.join(root, f), os.path.join(role_path, f_rel_path))
for d in dirs:
dir_path = os.path.join(role_path, rel_root, d)
if not os.path.exists(dir_path):
os.makedirs(dir_path)
display.display("- %s was created successfully" % role_name)
def execute_info(self):
"""
prints out detailed information about an installed role as well as info available from the galaxy API.
"""
if len(self.args) == 0:
# the user needs to specify a role
raise AnsibleOptionsError("- you must specify a user/role name")
roles_path = self.options.roles_path
data = ''
for role in self.args:
role_info = {'path': roles_path}
gr = GalaxyRole(self.galaxy, role)
install_info = gr.install_info
if install_info:
if 'version' in install_info:
install_info['intalled_version'] = install_info['version']
del install_info['version']
role_info.update(install_info)
remote_data = False
if not self.options.offline:
remote_data = self.api.lookup_role_by_name(role, False)
if remote_data:
role_info.update(remote_data)
if gr.metadata:
role_info.update(gr.metadata)
req = RoleRequirement()
role_spec = req.role_yaml_parse({'role': role})
if role_spec:
role_info.update(role_spec)
data = self._display_role_info(role_info)
# FIXME: This is broken in both 1.9 and 2.0 as
# _display_role_info() always returns something
if not data:
data = u"\n- the role %s was not found" % role
self.pager(data)
def execute_install(self):
"""
uses the args list of roles to be installed, unless -f was specified. The list of roles
can be a name (which will be downloaded via the galaxy API and github), or it can be a local .tar.gz file.
"""
role_file = self.options.role_file
if len(self.args) == 0 and role_file is None:
# the user needs to specify one of either --role-file or specify a single user/role name
raise AnsibleOptionsError("- you must specify a user/role name or a roles file")
no_deps = self.options.no_deps
force = self.options.force
roles_left = []
if role_file:
try:
f = open(role_file, 'r')
if role_file.endswith('.yaml') or role_file.endswith('.yml'):
try:
required_roles = yaml.safe_load(f.read())
except Exception as e:
raise AnsibleError("Unable to load data from the requirements file: %s" % role_file)
if required_roles is None:
raise AnsibleError("No roles found in file: %s" % role_file)
for role in required_roles:
if "include" not in role:
role = RoleRequirement.role_yaml_parse(role)
display.vvv("found role %s in yaml file" % str(role))
if "name" not in role and "scm" not in role:
raise AnsibleError("Must specify name or src for role")
roles_left.append(GalaxyRole(self.galaxy, **role))
else:
with open(role["include"]) as f_include:
try:
roles_left += [
GalaxyRole(self.galaxy, **r) for r in
(RoleRequirement.role_yaml_parse(i) for i in yaml.safe_load(f_include))
]
except Exception as e:
msg = "Unable to load data from the include requirements file: %s %s"
raise AnsibleError(msg % (role_file, e))
else:
raise AnsibleError("Invalid role requirements file")
f.close()
except (IOError, OSError) as e:
raise AnsibleError('Unable to open %s: %s' % (role_file, str(e)))
else:
# roles were specified directly, so we'll just go out grab them
# (and their dependencies, unless the user doesn't want us to).
for rname in self.args:
role = RoleRequirement.role_yaml_parse(rname.strip())
roles_left.append(GalaxyRole(self.galaxy, **role))
for role in roles_left:
# only process roles in roles files when names matches if given
if role_file and self.args and role.name not in self.args:
display.vvv('Skipping role %s' % role.name)
continue
display.vvv('Processing role %s ' % role.name)
# query the galaxy API for the role data
if role.install_info is not None:
if role.install_info['version'] != role.version or force:
if force:
display.display('- changing role %s from %s to %s' %
(role.name, role.install_info['version'], role.version or "unspecified"))
role.remove()
else:
display.warning('- %s (%s) is already installed - use --force to change version to %s' %
(role.name, role.install_info['version'], role.version or "unspecified"))
continue
else:
if not force:
display.display('- %s is already installed, skipping.' % str(role))
continue
try:
installed = role.install()
except AnsibleError as e:
display.warning("- %s was NOT installed successfully: %s " % (role.name, str(e)))
self.exit_without_ignore()
continue
# install dependencies, if we want them
if not no_deps and installed:
if not role.metadata:
display.warning("Meta file %s is empty. Skipping dependencies." % role.path)
else:
role_dependencies = role.metadata.get('dependencies') or []
for dep in role_dependencies:
display.debug('Installing dep %s' % dep)
dep_req = RoleRequirement()
dep_info = dep_req.role_yaml_parse(dep)
dep_role = GalaxyRole(self.galaxy, **dep_info)
if '.' not in dep_role.name and '.' not in dep_role.src and dep_role.scm is None:
# we know we can skip this, as it's not going to
# be found on galaxy.ansible.com
continue
if dep_role.install_info is None:
if dep_role not in roles_left:
display.display('- adding dependency: %s' % str(dep_role))
roles_left.append(dep_role)
else:
display.display('- dependency %s already pending installation.' % dep_role.name)
else:
if dep_role.install_info['version'] != dep_role.version:
display.warning('- dependency %s from role %s differs from already installed version (%s), skipping' %
(str(dep_role), role.name, dep_role.install_info['version']))
else:
display.display('- dependency %s is already installed, skipping.' % dep_role.name)
if not installed:
display.warning("- %s was NOT installed successfully." % role.name)
self.exit_without_ignore()
return 0
def execute_remove(self):
"""
removes the list of roles passed as arguments from the local system.
"""
if len(self.args) == 0:
raise AnsibleOptionsError('- you must specify at least one role to remove.')
for role_name in self.args:
role = GalaxyRole(self.galaxy, role_name)
try:
if role.remove():
display.display('- successfully removed %s' % role_name)
else:
display.display('- %s is not installed, skipping.' % role_name)
except Exception as e:
raise AnsibleError("Failed to remove role %s: %s" % (role_name, str(e)))
return 0
def execute_list(self):
"""
lists the roles installed on the local system or matches a single role passed as an argument.
"""
if len(self.args) > 1:
raise AnsibleOptionsError("- please specify only one role to list, or specify no roles to see a full list")
if len(self.args) == 1:
# show only the request role, if it exists
name = self.args.pop()
gr = GalaxyRole(self.galaxy, name)
if gr.metadata:
install_info = gr.install_info
version = None
if install_info:
version = install_info.get("version", None)
if not version:
version = "(unknown version)"
# show some more info about single roles here
display.display("- %s, %s" % (name, version))
else:
display.display("- the role %s was not found" % name)
else:
# show all valid roles in the roles_path directory
roles_path = self.options.roles_path
path_found = False
for path in roles_path:
role_path = os.path.expanduser(path)
if not os.path.exists(role_path):
display.warning("- the configured path %s does not exist." % role_path)
continue
elif not os.path.isdir(role_path):
display.warning("- the configured path %s, exists, but it is not a directory." % role_path)
continue
path_files = os.listdir(role_path)
path_found = True
for path_file in path_files:
gr = GalaxyRole(self.galaxy, path_file)
if gr.metadata:
install_info = gr.install_info
version = None
if install_info:
version = install_info.get("version", None)
if not version:
version = "(unknown version)"
display.display("- %s, %s" % (path_file, version))
if not path_found:
raise AnsibleOptionsError("- None of the provided paths was usable. Please specify a valid path with --roles-path")
return 0
def execute_search(self):
''' searches for roles on the Ansible Galaxy server'''
page_size = 1000
search = None
if len(self.args):
terms = []
for i in range(len(self.args)):
terms.append(self.args.pop())
search = '+'.join(terms[::-1])
if not search and not self.options.platforms and not self.options.galaxy_tags and not self.options.author:
raise AnsibleError("Invalid query. At least one search term, platform, galaxy tag or author must be provided.")
response = self.api.search_roles(search, platforms=self.options.platforms,
tags=self.options.galaxy_tags, author=self.options.author, page_size=page_size)
if response['count'] == 0:
display.display("No roles match your search.", color=C.COLOR_ERROR)
return True
data = [u'']
if response['count'] > page_size:
data.append(u"Found %d roles matching your search. Showing first %s." % (response['count'], page_size))
else:
data.append(u"Found %d roles matching your search:" % response['count'])
max_len = []
for role in response['results']:
max_len.append(len(role['username'] + '.' + role['name']))
name_len = max(max_len)
format_str = u" %%-%ds %%s" % name_len
data.append(u'')
data.append(format_str % (u"Name", u"Description"))
data.append(format_str % (u"----", u"-----------"))
for role in response['results']:
data.append(format_str % (u'%s.%s' % (role['username'], role['name']), role['description']))
data = u'\n'.join(data)
self.pager(data)
return True
def execute_login(self):
"""
verify user's identify via Github and retrieve an auth token from Ansible Galaxy.
"""
# Authenticate with github and retrieve a token
if self.options.token is None:
if C.GALAXY_TOKEN:
github_token = C.GALAXY_TOKEN
else:
login = GalaxyLogin(self.galaxy)
github_token = login.create_github_token()
else:
github_token = self.options.token
galaxy_response = self.api.authenticate(github_token)
if self.options.token is None and C.GALAXY_TOKEN is None:
# Remove the token we created
login.remove_github_token()
# Store the Galaxy token
token = GalaxyToken()
token.set(galaxy_response['token'])
display.display("Successfully logged into Galaxy as %s" % galaxy_response['username'])
return 0
def execute_import(self):
""" used to import a role into Ansible Galaxy """
colors = {
'INFO': 'normal',
'WARNING': C.COLOR_WARN,
'ERROR': C.COLOR_ERROR,
'SUCCESS': C.COLOR_OK,
'FAILED': C.COLOR_ERROR,
}
if len(self.args) < 2:
raise AnsibleError("Expected a github_username and github_repository. Use --help.")
github_repo = to_text(self.args.pop(), errors='surrogate_or_strict')
github_user = to_text(self.args.pop(), errors='surrogate_or_strict')
if self.options.check_status:
task = self.api.get_import_task(github_user=github_user, github_repo=github_repo)
else:
# Submit an import request
task = self.api.create_import_task(github_user, github_repo, reference=self.options.reference, role_name=self.options.role_name)
if len(task) > 1:
# found multiple roles associated with github_user/github_repo
display.display("WARNING: More than one Galaxy role associated with Github repo %s/%s." % (github_user, github_repo),
color='yellow')
display.display("The following Galaxy roles are being updated:" + u'\n', color=C.COLOR_CHANGED)
for t in task:
display.display('%s.%s' % (t['summary_fields']['role']['namespace'], t['summary_fields']['role']['name']), color=C.COLOR_CHANGED)
display.display(u'\nTo properly namespace this role, remove each of the above and re-import %s/%s from scratch' % (github_user, github_repo),
color=C.COLOR_CHANGED)
return 0
# found a single role as expected
display.display("Successfully submitted import request %d" % task[0]['id'])
if not self.options.wait:
display.display("Role name: %s" % task[0]['summary_fields']['role']['name'])
display.display("Repo: %s/%s" % (task[0]['github_user'], task[0]['github_repo']))
if self.options.check_status or self.options.wait:
# Get the status of the import
msg_list = []
finished = False
while not finished:
task = self.api.get_import_task(task_id=task[0]['id'])
for msg in task[0]['summary_fields']['task_messages']:
if msg['id'] not in msg_list:
display.display(msg['message_text'], color=colors[msg['message_type']])
msg_list.append(msg['id'])
if task[0]['state'] in ['SUCCESS', 'FAILED']:
finished = True
else:
time.sleep(10)
return 0
def execute_setup(self):
""" Setup an integration from Github or Travis for Ansible Galaxy roles"""
if self.options.setup_list:
# List existing integration secrets
secrets = self.api.list_secrets()
if len(secrets) == 0:
# None found
display.display("No integrations found.")
return 0
display.display(u'\n' + "ID Source Repo", color=C.COLOR_OK)
display.display("---------- ---------- ----------", color=C.COLOR_OK)
for secret in secrets:
display.display("%-10s %-10s %s/%s" % (secret['id'], secret['source'], secret['github_user'],
secret['github_repo']), color=C.COLOR_OK)
return 0
if self.options.remove_id:
# Remove a secret
self.api.remove_secret(self.options.remove_id)
display.display("Secret removed. Integrations using this secret will not longer work.", color=C.COLOR_OK)
return 0
if len(self.args) < 4:
raise AnsibleError("Missing one or more arguments. Expecting: source github_user github_repo secret")
secret = self.args.pop()
github_repo = self.args.pop()
github_user = self.args.pop()
source = self.args.pop()
resp = self.api.add_secret(source, github_user, github_repo, secret)
display.display("Added integration for %s %s/%s" % (resp['source'], resp['github_user'], resp['github_repo']))
return 0
def execute_delete(self):
""" Delete a role from Ansible Galaxy. """
if len(self.args) < 2:
raise AnsibleError("Missing one or more arguments. Expected: github_user github_repo")
github_repo = self.args.pop()
github_user = self.args.pop()
resp = self.api.delete_role(github_user, github_repo)
if len(resp['deleted_roles']) > 1:
display.display("Deleted the following roles:")
display.display("ID User Name")
display.display("------ --------------- ----------")
for role in resp['deleted_roles']:
display.display("%-8s %-15s %s" % (role.id, role.namespace, role.name))
display.display(resp['status'])
return True
|
jimi-c/ansible
|
lib/ansible/cli/galaxy.py
|
Python
|
gpl-3.0
| 33,077
|
[
"Galaxy"
] |
1e6693d707e0efbd25fccfac63ec81ec8727f3d394180c50fd48601d083db1e1
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#################################################################################
# LAYMAN ACTIONS
#################################################################################
# File: cli.py
#
# Handles layman actions via the command line interface.
#
# Copyright:
# (c) 2010 - 2011
# Gunnar Wrobel
# Brian Dolbec
# Distributed under the terms of the GNU General Public License v2
#
# Author(s):
# Gunnar Wrobel <wrobel@gentoo.org>
# Brian Dolbec <brian.dolbec@gmail.com
#
''' Provides the command line actions that can be performed by layman.'''
__version__ = "$Id: cli.py 2011-01-15 23:52 PST Brian Dolbec$"
import os, sys
from layman.api import LaymanAPI
from layman.utils import (decode_selection, encoder, get_encoding,
pad, terminal_width)
from layman.constants import (NOT_OFFICIAL_MSG, NOT_SUPPORTED_MSG,
FAILURE, SUCCEED)
class ListPrinter(object):
def __init__(self, config):
self.config = config
self.output = self.config['output']
if not self.config['width']:
self.width = terminal_width()-1
else:
self.width = self.config['width']
self.srclen = self.width - 43
self._encoding_ = get_encoding(self.output)
if config['verbose']:
self.my_lister = self.short_list # self.long_list
else:
self.my_lister = self.short_list
def print_shortdict(self, info, complain):
#print "ListPrinter.print_shortdict()",info, "\n\n"
overlays = sorted(info)
#print "ids =======>", overlays, "\n"
for ovl in overlays:
overlay = info[ovl]
#print "overlay =", overlay
summary, supported, official = overlay
self.print_overlay(summary, supported, official, complain)
def print_shortlist(self, info, complain):
for summary, supported, official in info:
self.print_overlay(summary, supported, official, complain)
def print_fulldict(self, info, complain):
ids = sorted(info)
#print "ids =======>", ids, "\n"
for ovl in ids:
overlay = info[ovl]
#print overlay
self.print_overlay(self.my_lister(overlay),
overlay['supported'],
overlay['official'],
complain)
def print_overlay(self, summary, supported, official, complain):
# Is the overlay supported?
if supported:
# Is this an official overlay?
if official:
self.output.info(summary, 1)
# Unofficial overlays will only be listed if we are not
# checking or listing verbose
elif complain:
# Give a reason why this is marked yellow if it is a verbose
# listing
if self.config['verbose']:
self.output.warn(NOT_OFFICIAL_MSG, 1)
self.output.warn(summary, 1)
# Unsupported overlays will only be listed if we are not checking
# or listing verbose
elif complain:
# Give a reason why this is marked red if it is a verbose
# listing
prev_state = self.output.block_callback
self.output.block_callback = True
if self.config['verbose']:
self.output.error(NOT_SUPPORTED_MSG)
self.output.error(summary)
self.output.block_callback = prev_state
def short_list(self, overlay):
'''
>>> print short_list(overlay)
wrobel [Subversion] (https://o.g.o/svn/dev/wrobel )
'''
name = pad(overlay['name'], 25)
if len(set(e for e in overlay['src_types'])) == 1:
_type = overlay['src_types'][0]
else:
_type = '%s/..' % overlay['src_type'][0]
mtype = ' [' + pad(_type, 10) + ']'
source = ', '.join(overlay['src_uris'])
if len(source) > self.srclen:
source = source.replace("overlays.gentoo.org", "o.g.o")
source = ' (' + pad(source, self.srclen) + ')'
return encoder(name + mtype + source, self._encoding_)
class Main(object):
'''Performs the actions the user selected.
'''
def __init__(self, config):
self.config = config
#print "config.keys()", config.keys()
self.output = config['output']
self.api = LaymanAPI(config,
report_errors=False,
output=config.output)
# Given in order of precedence
self.actions = [('fetch', 'Fetch'),
('add', 'Add'),
('sync', 'Sync'),
('info', 'Info'),
('sync_all', 'Sync'),
('delete', 'Delete'),
('list', 'ListRemote'),
('list_local', 'ListLocal'),]
def __call__(self):
self.output.debug("CLI.__call__(): self.config.keys()"
" %s" % str(self.config.keys()), 6)
# blank newline -- no " *"
self.output.notice('')
# Make fetching the overlay list a default action
if not 'nofetch' in self.config.keys():
# Actions that implicitely call the fetch operation before
fetch_actions = ['sync', 'sync_all', 'list']
for i in fetch_actions:
if i in self.config.keys():
# Implicitely call fetch, break loop
self.Fetch()
break
result = 0
# Set the umask
umask = self.config['umask']
try:
new_umask = int(umask, 8)
old_umask = os.umask(new_umask)
except Exception, error:
self.output.die('Failed setting to umask "' + umask +
'"!\nError was: ' + str(error))
action_errors = []
results = []
act=set([x[0] for x in self.actions])
k=set([x for x in self.config.keys()])
a=act.intersection(k)
self.output.debug('Actions = %s' % str(a), 4)
for action in self.actions:
self.output.debug('Checking for action %s' % action[0], 4)
if action[0] in self.config.keys():
result += getattr(self, action[1])()
_errors = self.api.get_errors()
if _errors:
self.output.debug("CLI: found errors performing "
"action %s" % action[0], 2)
action_errors.append((action[0], _errors))
result = -1 # So it cannot remain 0, i.e. success
results.append(result)
self.output.debug('Completed action %s, result %s'
% (action[0], result==0), 4)
self.output.debug('Checking for action errors', 4)
if action_errors:
for action, _errors in action_errors:
self.output.warn("CLI: Errors occurred processing action"
" %s" % action)
for _error in _errors:
self.output.error(_error)
self.output.notice("")
# Reset umask
os.umask(old_umask)
if -1 in results:
sys.exit(FAILURE)
else:
sys.exit(SUCCEED)
def Fetch(self):
''' Fetches the overlay listing.
'''
self.output.info("Fetching remote list,...", 2)
result = self.api.fetch_remote_list()
if result:
self.output.info('Fetch Ok', 2)
# blank newline -- no " *"
self.output.notice('')
return result
def Add(self):
''' Adds the selected overlays.
'''
self.output.info("Adding overlay,...", 2)
selection = decode_selection(self.config['add'])
if 'ALL' in selection:
selection = self.api.get_available()
self.output.debug('Adding selected overlays', 6)
result = self.api.add_repos(selection, update_news=True)
if result:
self.output.info('Successfully added overlay(s) '+\
', '.join(selection) +'.', 2)
# blank newline -- no " *"
self.output.notice('')
return result
def Sync(self):
''' Syncs the selected overlays.
'''
self.output.info("Syncing selected overlays,...", 2)
# Note api.sync() defaults to printing results
selection = decode_selection(self.config['sync'])
if self.config['sync_all'] or 'ALL' in selection:
selection = self.api.get_installed()
self.output.debug('Updating selected overlays', 6)
result = self.api.sync(selection, update_news=True)
# blank newline -- no " *"
self.output.notice('')
return result
def Delete(self):
''' Deletes the selected overlays.
'''
self.output.info('Deleting selected overlays,...', 2)
selection = decode_selection(self.config['delete'])
if 'ALL' in selection:
selection = self.api.get_installed()
result = self.api.delete_repos(selection)
if result:
self.output.info('Successfully deleted overlay(s) ' +\
', '.join(selection) + '.', 2)
# blank newline -- no " *"
self.output.notice('')
return result
def Info(self):
''' Print information about the specified overlays.
'''
selection = decode_selection(self.config['info'])
if 'ALL' in selection:
selection = self.api.get_available()
list_printer = ListPrinter(self.config)
_complain = self.config['nocheck'] or self.config['verbose']
info = self.api.get_info_str(selection, local=False,
verbose=True, width=list_printer.width)
list_printer.print_shortdict(info, complain=_complain)
# blank newline -- no " *"
self.output.notice('')
return info != {}
def ListRemote(self):
''' Lists the available overlays.
'''
self.output.debug('Printing remote overlays.', 6)
list_printer = ListPrinter(self.config)
_complain = self.config['nocheck'] or self.config['verbose']
info = self.api.get_info_list(local=False,
verbose=self.config['verbose'], width=list_printer.width)
list_printer.print_shortlist(info, complain=_complain)
# blank newline -- no " *"
self.output.notice('')
return info != {}
def ListLocal(self):
''' Lists the local overlays.
'''
#print "ListLocal()"
self.output.debug('Printing installed overlays.', 6)
list_printer = ListPrinter(self.config)
#
# fast way
info = self.api.get_info_list(verbose=self.config['verbose'],
width=list_printer.width)
#self.output.debug('CLI: ListLocal() info = %s' % len(info), 4)
#self.output.debug('\n'.join([ str(x) for x in info]), 4)
list_printer.print_shortlist(info, complain=True)
#
# slow way
#info = self.api.get_all_info(self.api.get_installed(), local=True)
#list_printer.print_fulldict(info, complain=_complain)
# blank newline -- no " *"
self.output.notice('')
return info != {}
if __name__ == '__main__':
import doctest
# Ignore warnings here. We are just testing
from warnings import filterwarnings, resetwarnings
filterwarnings('ignore')
doctest.testmod(sys.modules[__name__])
resetwarnings()
|
jmesmon/layman
|
layman/cli.py
|
Python
|
gpl-2.0
| 11,815
|
[
"Brian"
] |
6b88d821284e89415f22eb205d1316912887d29df7cd58ac6c0db54ab88b9a07
|
# ---
# jupyter:
# jupytext:
# formats: ipynb,.pct.py:percent
# text_representation:
# extension: .py
# format_name: percent
# format_version: '1.3'
# jupytext_version: 1.3.3
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %% [markdown]
# # Multiclass classification
# %% [markdown]
# The multiclass classification problem is a regression problem from an input $x \in {\cal X}$ to discrete labels $y\in {\cal Y}$, where ${\cal Y}$ is a discrete set of size $C$ bigger than two (for $C=2$ it is the more usual binary classification).
#
# Labels are encoded in a one-hot fashion, that is if $C=4$ and $y=2$, we note $\bar{y} = [0,1,0,0]$.
#
# The generative model for this problem consists of:
#
# * $C$ latent functions $\mathbf{f} = [f_1,...,f_C]$ with an independent Gaussian Process prior
# * a deterministic function that builds a discrete distribution $\pi(\mathbf{f}) = [\pi_1(f_1),...,\pi_C(f_C)]$ from the latents such that $\sum_c \pi_c(f_c) = 1$
# * a discrete likelihood $p(y|\mathbf{f}) = Discrete(y;\pi(\mathbf{f})) = \prod_c \pi_c(f_c)^{\bar{y}_c}$
#
# A typical example of $\pi$ is the softmax function:
#
# \begin{equation}
# \pi_c (f_c) \propto \exp( f_c)
# \end{equation}
#
# Another convenient one is the robust max:
# \begin{equation}
# \pi_c(\mathbf{f}) = \begin{cases} 1 - \epsilon, & \mbox{if } c = \arg \max_c f_c \\
# \epsilon /(C-1), & \mbox{ otherwise} \end{cases}
# \end{equation}
#
#
#
#
# %%
import numpy as np
import tensorflow as tf
import warnings
warnings.filterwarnings("ignore") # ignore DeprecationWarnings from tensorflow
import matplotlib.pyplot as plt
# %matplotlib inline
import gpflow
from gpflow.utilities import print_summary, set_trainable
from gpflow.ci_utils import ci_niter
from multiclass_classification import plot_posterior_predictions, colors
# reproducibility:
np.random.seed(0)
tf.random.set_seed(123)
# %% [markdown]
# ## Sampling from the GP multiclass generative model
# %% [markdown]
# ### Declaring model parameters and input
# %%
# Number of functions and number of data points
C = 3
N = 100
# Lengthscale of the SquaredExponential kernel (isotropic -- change to `[0.1] * C` for ARD)
lengthscales = 0.1
# Jitter
jitter_eye = np.eye(N) * 1e-6
# Input
X = np.random.rand(N, 1)
# %% [markdown]
# ### Sampling
# %%
# SquaredExponential kernel matrix
kernel_se = gpflow.kernels.SquaredExponential(lengthscales=lengthscales)
K = kernel_se(X) + jitter_eye
# Latents prior sample
f = np.random.multivariate_normal(mean=np.zeros(N), cov=K, size=(C)).T
# Hard max observation
Y = np.argmax(f, 1).flatten().astype(int)
# One-hot encoding
Y_hot = np.zeros((N, C), dtype=bool)
Y_hot[np.arange(N), Y] = 1
data = (X, Y)
# %% [markdown]
# ### Plotting
# %%
plt.figure(figsize=(12, 6))
order = np.argsort(X.flatten())
for c in range(C):
plt.plot(X[order], f[order, c], ".", color=colors[c], label=str(c))
plt.plot(X[order], Y_hot[order, c], "-", color=colors[c])
plt.legend()
plt.xlabel("$X$")
plt.ylabel("Latent (dots) and one-hot labels (lines)")
plt.title("Sample from the joint $p(Y, \mathbf{f})$")
plt.grid()
plt.show()
# %% [markdown]
# ## Inference
#
# %% [markdown]
# Inference here consists of computing the posterior distribution over the latent functions given the data $p(\mathbf{f}|Y, X)$.
#
# You can use different inference methods. Here we perform variational inference.
# For a treatment of the multiclass classification problem using MCMC sampling, see [Markov Chain Monte Carlo (MCMC)](../advanced/mcmc.ipynb).
#
#
# %% [markdown]
# ### Approximate inference: Sparse Variational Gaussian Process
# %% [markdown]
# #### Declaring the SVGP model (see [GPs for big data](../advanced/gps_for_big_data.ipynb))
# %%
# sum kernel: Matern32 + White
kernel = gpflow.kernels.Matern32() + gpflow.kernels.White(variance=0.01)
# Robustmax Multiclass Likelihood
invlink = gpflow.likelihoods.RobustMax(C) # Robustmax inverse link function
likelihood = gpflow.likelihoods.MultiClass(3, invlink=invlink) # Multiclass likelihood
Z = X[::5].copy() # inducing inputs
m = gpflow.models.SVGP(
kernel=kernel,
likelihood=likelihood,
inducing_variable=Z,
num_latent_gps=C,
whiten=True,
q_diag=True,
)
# Only train the variational parameters
set_trainable(m.kernel.kernels[1].variance, False)
set_trainable(m.inducing_variable, False)
print_summary(m, fmt="notebook")
# %% [markdown]
# #### Running inference
# %%
opt = gpflow.optimizers.Scipy()
opt_logs = opt.minimize(
m.training_loss_closure(data), m.trainable_variables, options=dict(maxiter=ci_niter(1000))
)
print_summary(m, fmt="notebook")
# %%
plot_posterior_predictions(m, X, Y)
|
GPflow/GPflow
|
doc/source/notebooks/advanced/multiclass_classification.pct.py
|
Python
|
apache-2.0
| 4,736
|
[
"Gaussian"
] |
1529f08db013fbfba6a1b78931334ece4cb5abc233b039a6698792b95add76ab
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from loguru import logger
# import funkcí z jiného adresáře
import os.path
import os.path as op
# import copy
import unittest
import pytest
# sample_data_path = os.path.dirname(os.path.abspath(__file__))
# sample_data_path
# sys.path.append(os.path.join(path_to_script, "../extern/pyseg_base/src/"))
# sys.path.append(os.path.join(path_to_script, "../extern/py3DSeedEditor/"))
# sys.path.append(os.path.join(path_to_script, "../src/"))
# from PyQt4.QtGui import QFileDialog, QApplication, QMainWindow
import numpy as np
try:
import dicom
dicom.debug(False)
except:
import pydicom as dicom
dicom.config.debug(False)
#
import sys
import io3d
import io3d.dcmreaddata as dcmr
# sample_data_path = "~/data/medical/orig/sample_data/"
# sample_data_path = op.expanduser(sample_data_path)
from PyQt5.QtWidgets import QApplication
import io3d.outputqt
import io3d.datareaderqt
import io3d.datasets
class QtTest(unittest.TestCase):
interactivetTest = False
# def setUp(self):
# import imtools
# import imtools.sample_data
# imtools.sample_data.get_sample_data(["jatra_5mm", "volumetrie"], SAMPLE_DATA_DIR)
# def setUp(self):
# self.dcmdir = os.path.join(path_to_script, '../sample_data/jatra_5mm')
# self.data3d, self.metadata = dcmr.dcm_read_from_dir(self.dcmdir)
# reader = dcmr.DicomReader(self.dcmdir)
# self.data3d = reader.get_3Ddata()
# self.metadata = reader.get_metaData()
def test_select_ouput_path(self):
app = QApplication(sys.argv)
sopw = io3d.outputqt.SelectOutputPathWidget(
widget_label="widget label", path="~/lisa_data/sample.{}.pkl"
)
sopw.show()
in_path = "~/sample{}.vtk"
sopw.set_path(in_path)
out_path = sopw.get_path()
home = op.expanduser("~")
rp1 = op.relpath(home, in_path)
rp2 = op.relpath(home, out_path)
# app.exec_()
self.assertEqual(rp1, rp2)
def test_select_ouput_filename(self):
app = QApplication(sys.argv)
sopw = io3d.outputqt.SelectOutputPathWidget(
widget_label="widget label", path="~/lisa_data/sample.{}.pkl"
)
sopw.show()
in_path = "~/sample{}.vtk"
sopw.set_path(in_path)
out_path = sopw.get_filename()
# app.exec_()
self.assertEqual(out_path, "sample{}.vtk")
def test_read_datareader(self):
sdp = io3d.datasets.join_path("sample_data")
dp = io3d.datasets.join_path("sample_data/jatra_5mm/")
app = QApplication(sys.argv)
drw = io3d.datareaderqt.DataReaderWidget(loaddir=sdp, qt_app=app)
# (widget_label="widget label", path="~/lisa_data/sample.{}.pkl")
drw.show()
drw.datapath = dp
drw.read_data_from_prepared_datapath()
# print(drw.datap["data3d"].shape)
error = np.sum(
np.abs(np.asarray([93, 512, 512]) - np.asarray(drw.datap["data3d"].shape))
)
# app.exec_()
self.assertEqual(error, 0)
def test_read_datareader_get_path_dialog_surround_with_inject_dirpath(self):
sdp = io3d.datasets.join_path("medical", "orig", "sample_data", get_root=True)
dp = io3d.datasets.join_path(
"medical/orig/sample_data/jatra_5mm/", get_root=True
)
app = QApplication(sys.argv)
drw = io3d.datareaderqt.DataReaderWidget(loaddir=sdp, qt_app=app)
# (widget_label="widget label", path="~/lisa_data/sample.{}.pkl")
drw.show()
drw.datapath = dp
drw._skip_get_path_dialog_for_tests = True
drw.read_data_dir_dialog()
# drw.read_data_from_prepared_datapath()
# print(drw.datap["data3d"].shape)
error = np.sum(
np.abs(np.asarray([93, 512, 512]) - np.asarray(drw.datap["data3d"].shape))
)
# app.exec_()
self.assertEqual(error, 0)
def test_read_datareader_get_path_dialog_surround_with_inject_filepath(self):
sdp = io3d.datasets.join_path("medical", "orig", "sample_data", get_root=True)
# dp = io3d.datasets.join_path("medical/orig/sample_data/ct_head.rawiv", get_root=True)
dp = io3d.datasets.join_path("medical/orig/liver-orig001.mhd", get_root=True)
app = QApplication(sys.argv)
drw = io3d.datareaderqt.DataReaderWidget(loaddir=sdp, qt_app=app)
# (widget_label="widget label", path="~/lisa_data/sample.{}.pkl")
drw.show()
drw.datapath = dp
drw._skip_get_path_dialog_for_tests = True
drw.read_data_dir_dialog()
# drw.read_data_from_prepared_datapath()
# print(drw.datap["data3d"].shape)
data3d = drw.datap["data3d"]
error = np.sum(np.array_equal(np.asarray([183, 512, 512]), data3d.shape))
# app.exec_()
@pytest.mark.interactive
def test_read_datareader_interactive(self):
sdp = io3d.datasets.join_path("sample_data")
dp = io3d.datasets.join_path("sample_data/jatra_5mm/")
app = QApplication(sys.argv)
drw = io3d.datareaderqt.DataReaderWidget(loaddir=sdp, qt_app=app)
# (widget_label="widget label", path="~/lisa_data/sample.{}.pkl")
drw.show()
# drw.datapath = dp
# drw.read_data_from_prepared_datapath()
# error = np.sum(np.abs(np.asarray([93, 512, 512]) - np.asarray(drw.datap["data3d"].shape)))
app.exec_()
# self.assertEqual(error, 0)
def test_qstring(self):
if sys.version_info.major == 2:
from PyQt5.QtCore import QString
text = QString("i am qstring")
else:
text = "i am str"
txt = io3d.datareaderqt.get_str_py2_compatibility(text)
self.assertTrue(type(txt) is str)
if __name__ == "__main__":
unittest.main()
|
mjirik/io3d
|
tests/qt_test.py
|
Python
|
mit
| 5,887
|
[
"VTK"
] |
86d4a2472a3956ac9b685f1ed8307406eb7bd2099ddc9dd36fd71904062baef7
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
#
# Grimoire general utils
#
# Copyright (C) 2015 Bitergia
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
# Authors:
# Alvaro del Castillo San Felix <acs@bitergia.com>
#
import argparse
from dateutil import parser
import logging
import sys
from grimoire.ocean.elastic import ElasticOcean
# Connectors for Ocean
from grimoire.ocean.bugzilla import BugzillaOcean
from grimoire.ocean.bugzillarest import BugzillaRESTOcean
from grimoire.ocean.confluence import ConfluenceOcean
from grimoire.ocean.discourse import DiscourseOcean
from grimoire.ocean.gerrit import GerritOcean
from grimoire.ocean.git import GitOcean
from grimoire.ocean.github import GitHubOcean
from grimoire.ocean.jenkins import JenkinsOcean
from grimoire.ocean.jira import JiraOcean
from grimoire.ocean.kitsune import KitsuneOcean
from grimoire.ocean.mbox import MBoxOcean
from grimoire.ocean.mediawiki import MediaWikiOcean
from grimoire.ocean.phabricator import PhabricatorOcean
from grimoire.ocean.remo2 import ReMoOcean
from grimoire.ocean.stackexchange import StackExchangeOcean
from grimoire.ocean.supybot import SupybotOcean
from grimoire.ocean.telegram import TelegramOcean
from grimoire.ocean.twitter import TwitterOcean
# Connectors for EnrichOcean
from grimoire.elk.bugzilla import BugzillaEnrich
from grimoire.elk.bugzillarest import BugzillaRESTEnrich
from grimoire.elk.confluence import ConfluenceEnrich
from grimoire.elk.discourse import DiscourseEnrich
from grimoire.elk.git import GitEnrich
from grimoire.elk.github import GitHubEnrich
from grimoire.elk.gerrit import GerritEnrich
from grimoire.elk.gmane import GmaneEnrich
from grimoire.elk.jenkins import JenkinsEnrich
from grimoire.elk.jira import JiraEnrich
from grimoire.elk.kitsune import KitsuneEnrich
from grimoire.elk.mbox import MBoxEnrich
from grimoire.elk.mediawiki import MediaWikiEnrich
from grimoire.elk.phabricator import PhabricatorEnrich
from grimoire.elk.remo2 import ReMoEnrich
from grimoire.elk.pipermail import PipermailEnrich
from grimoire.elk.stackexchange import StackExchangeEnrich
from grimoire.elk.supybot import SupybotEnrich
from grimoire.elk.telegram import TelegramEnrich
from grimoire.elk.twitter import TwitterEnrich
# Connectors for Perceval
from perceval.backends.bugzilla import Bugzilla, BugzillaCommand
from perceval.backends.bugzillarest import BugzillaREST, BugzillaRESTCommand
from perceval.backends.discourse import Discourse, DiscourseCommand
from perceval.backends.confluence import Confluence, ConfluenceCommand
from perceval.backends.gerrit import Gerrit, GerritCommand
from perceval.backends.git import Git, GitCommand
from perceval.backends.github import GitHub, GitHubCommand
from perceval.backends.gmane import Gmane, GmaneCommand
from perceval.backends.jenkins import Jenkins, JenkinsCommand
from perceval.backends.jira import Jira, JiraCommand
from perceval.backends.kitsune import Kitsune, KitsuneCommand
from perceval.backends.mbox import MBox, MBoxCommand
from perceval.backends.mediawiki import MediaWiki, MediaWikiCommand
from perceval.backends.phabricator import Phabricator, PhabricatorCommand
from perceval.backends.pipermail import Pipermail, PipermailCommand
from perceval.backends.remo2 import ReMo, ReMoCommand
from perceval.backends.stackexchange import StackExchange, StackExchangeCommand
from perceval.backends.supybot import Supybot, SupybotCommand
from perceval.backends.telegram import Telegram, TelegramCommand
from grimoire.elk.elastic import ElasticSearch
from grimoire.elk.elastic import ElasticConnectException
def get_connector_from_name(name):
found = None
connectors = get_connectors()
for cname in connectors:
if cname == name:
found = connectors[cname]
return found
def get_connector_name(cls):
found = None
connectors = get_connectors()
for cname in connectors:
for con in connectors[cname]:
if cls == con:
if found:
# The canonical name is included in the classname
if cname in cls.__name__.lower():
found = cname
else:
found = cname
return found
def get_connectors():
return {"bugzilla":[Bugzilla, BugzillaOcean, BugzillaEnrich, BugzillaCommand],
"bugzillarest":[BugzillaREST, BugzillaRESTOcean, BugzillaRESTEnrich, BugzillaRESTCommand],
"confluence":[Confluence, ConfluenceOcean, ConfluenceEnrich, ConfluenceCommand],
"discourse":[Discourse, DiscourseOcean, DiscourseEnrich, DiscourseCommand],
"gerrit":[Gerrit, GerritOcean, GerritEnrich, GerritCommand],
"git":[Git, GitOcean, GitEnrich, GitCommand],
"github":[GitHub, GitHubOcean, GitHubEnrich, GitHubCommand],
"gmane":[Gmane, MBoxOcean, GmaneEnrich, GmaneCommand],
"jenkins":[Jenkins, JenkinsOcean, JenkinsEnrich, JenkinsCommand],
"jira":[Jira, JiraOcean, JiraEnrich, JiraCommand],
"kitsune":[Kitsune, KitsuneOcean, KitsuneEnrich, KitsuneCommand],
"mbox":[MBox, MBoxOcean, MBoxEnrich, MBoxCommand],
"mediawiki":[MediaWiki, MediaWikiOcean, MediaWikiEnrich, MediaWikiCommand],
"phabricator":[Phabricator, PhabricatorOcean, PhabricatorEnrich, PhabricatorCommand],
"pipermail":[Pipermail, MBoxOcean, MBoxEnrich, PipermailCommand],
"pipermail":[Pipermail, MBoxOcean, PipermailEnrich, PipermailCommand],
"remo":[ReMo, ReMoOcean, ReMoEnrich, ReMoCommand],
"stackexchange":[StackExchange, StackExchangeOcean,
StackExchangeEnrich, StackExchangeCommand],
"supybot":[Supybot, SupybotOcean, SupybotEnrich, SupybotCommand],
"telegram":[Telegram, TelegramOcean, TelegramEnrich, TelegramCommand],
"twitter":[None, TwitterOcean, TwitterEnrich, None]
} # Will come from Registry
def get_elastic(url, es_index, clean = None, backend = None):
mapping = None
if backend:
mapping = backend.get_elastic_mappings()
analyzers = backend.get_elastic_analyzers()
try:
insecure = True
elastic = ElasticSearch(url, es_index, mapping, clean, insecure, analyzers)
except ElasticConnectException:
logging.error("Can't connect to Elastic Search. Is it running?")
sys.exit(1)
return elastic
def config_logging(debug):
if debug:
logging.basicConfig(level=logging.DEBUG, format='%(asctime)s %(message)s')
logging.debug("Debug mode activated")
else:
logging.basicConfig(level=logging.INFO, format='%(asctime)s %(message)s')
logging.getLogger("urllib3").setLevel(logging.WARNING)
logging.getLogger("requests").setLevel(logging.WARNING)
ARTHUR_USAGE_MSG = ''
ARTHUR_DESC_MSG = ''
ARTHUR_EPILOG_MSG = ''
def get_params_parser():
"""Parse command line arguments"""
parser = argparse.ArgumentParser(usage=ARTHUR_USAGE_MSG,
description=ARTHUR_DESC_MSG,
epilog=ARTHUR_EPILOG_MSG,
formatter_class=argparse.RawDescriptionHelpFormatter,
add_help=False)
ElasticOcean.add_params(parser)
parser.add_argument('-h', '--help', action='help',
help=argparse.SUPPRESS)
parser.add_argument('-g', '--debug', dest='debug',
action='store_true',
help=argparse.SUPPRESS)
parser.add_argument("--no_incremental", action='store_true',
help="don't use last state for data source")
parser.add_argument("--fetch_cache", action='store_true',
help="Use cache for item retrieval")
parser.add_argument("--redis", default="redis",
help="url for the redis server")
parser.add_argument("--enrich", action='store_true',
help="Enrich items after retrieving")
parser.add_argument("--enrich_only", action='store_true',
help="Only enrich items (DEPRECATED, use --only-enrich)")
parser.add_argument("--only-enrich", dest='enrich_only', action='store_true',
help="Only enrich items (DEPRECATED, use --only-enrich)")
parser.add_argument("--events-enrich", dest='events_enrich', action='store_true',
help="Enrich events in items")
parser.add_argument('--index', help="Ocean index name")
parser.add_argument('--index-enrich', dest="index_enrich", help="Ocean enriched index name")
parser.add_argument('--db-user', help="User for db connection (default to root)",
default="root")
parser.add_argument('--db-password', help="Password for db connection (default empty)",
default="")
parser.add_argument('--db-host', help="Host for db connection (default to mariadb)",
default="mariadb")
parser.add_argument('--db-projects-map', help="Projects Mapping DB")
parser.add_argument('--json-projects-map', help="Projects Mapping JSON file")
parser.add_argument('--project', help="Project for the repository (origin)")
parser.add_argument('--refresh-projects', action='store_true', help="Refresh projects in enriched items")
parser.add_argument('--db-sortinghat', help="SortingHat DB")
parser.add_argument('--only-identities', action='store_true', help="Only add identities to SortingHat DB")
parser.add_argument('--refresh-identities', action='store_true', help="Refresh identities in enriched items")
parser.add_argument('--github-token', help="If provided, github usernames will be retrieved in git enrich.")
parser.add_argument('--studies', action='store_true', help="Execute studies after enrichment.")
parser.add_argument('--only-studies', action='store_true', help="Execute only studies.")
parser.add_argument('backend', help=argparse.SUPPRESS)
parser.add_argument('backend_args', nargs=argparse.REMAINDER,
help=argparse.SUPPRESS)
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
return parser
def get_params():
''' Get params definition from ElasticOcean and from all the backends '''
parser = get_params_parser()
args = parser.parse_args()
return args
def get_time_diff_days(start_txt, end_txt):
''' Number of days between two days '''
if start_txt is None or end_txt is None:
return None
start = parser.parse(start_txt)
end = parser.parse(end_txt)
seconds_day = float(60*60*24)
diff_days = \
(end-start).total_seconds() / seconds_day
diff_days = float('%.2f' % diff_days)
return diff_days
|
sanacl/GrimoireELK
|
grimoire/utils.py
|
Python
|
gpl-3.0
| 11,497
|
[
"Elk"
] |
031fe87597416ed420607032a9fc90cd9fe11ce65814799c93ee031a5e82e6e0
|
#!/usr/bin/env python
# encoding: utf-8
from gi.repository import Moose
from gi.repository import GLib
import sys
import logging
LOGGER = logging.getLogger('zeroconf')
def print_server(browser):
LOGGER.info('-- SERVER LIST --')
for server in browser:
for attr, value in server:
LOGGER.info('{:>10} : {}'.format(attr, value))
LOGGER.info('')
def zeroconf_state_changed(browser):
state = browser.get_state()
if state is Moose.ZeroconfState.CHANGED:
if browser.timeout_id is not None:
GLib.source_remove(browser.timeout_id)
browser.timeout_id = GLib.timeout_add(
500, lambda: print_server(browser)
)
elif state is Moose.ZeroconfState.ERROR:
LOGGER.error('Error', browser.get_error())
elif state is Moose.ZeroconfState.ALL_FOR_NOW:
LOGGER.info('-- ALL FOUND FOR NOW --')
elif state is Moose.ZeroconfState.UNCONNECTED:
LOGGER.error('-- CONNECTION LOST --')
else:
LOGGER.warning(
'Unknown state. ZeroconfBrowser, you\'re drunk, go home.'
)
def print_servers():
"""Search all registered MPD Servers in the network & log them to stdout.
Uses libmoosecat's ZeroconfBrowser() implementation.
"""
browser = Moose.ZeroconfBrowser()
if browser.get_state() is not Moose.ZeroconfState.CONNECTED:
logging.critical('No avahi running, eh?')
return
browser.timeout_id = None
browser.connect('state-changed', zeroconf_state_changed)
try:
loop = GLib.MainLoop()
GLib.timeout_add(4 * 1000, loop.quit)
loop.run()
except KeyboardInterrupt:
logging.warning('[Ctrl-C]')
if __name__ == '__main__':
print_servers()
|
studentkittens/snobaer
|
snobaer/zeroconf.py
|
Python
|
lgpl-3.0
| 1,748
|
[
"MOOSE"
] |
da62313ea5ac84e21c34bf8e676ae9d91a7d1bc72c938f00e088fadc585b7ed1
|
from django.db import models
from django.db.models import Q
from django.utils.dateformat import format as django_format
from stages.models import Corporation, CorpContact, Teacher
GENDER_CHOICES = (
('M', 'Masculin'),
('F', 'Féminin'),
('I', 'Inconnu')
)
SECTION_CHOICES = (
('ASA', 'Aide en soin et accompagnement AFP'),
('ASE', 'Assist. socio-éducatif-ve CFC'),
('ASSC', 'Assist. en soin et santé communautaire CFC'),
('EDE', "Education de l’enfance, dipl. ES"),
('EDS', 'Education sociale, dipl. ES'),
)
OPTION_CHOICES = (
('GEN', 'Généraliste'),
('ENF', 'Enfance'),
('PAG', 'Personnes âgées'),
('HAN', 'Handicap'),
('PE-5400h', 'Parcours Emploi 5400h.'),
('PE-3600h', 'Parcours Emploi 3600h.'),
('PS', 'Parcours stage 5400h.'),
)
DIPLOMA_CHOICES = (
(0, 'Aucun'),
(1, "CFC d'ASE"),
(2, "CFC autre domaine"),
(3, "Matu acad./spéc. ou dipl. ECG"),
(4, "Portfolio"),
)
DIPLOMA_STATUS_CHOICES = (
(0, 'Inconnu'),
(1, 'En cours'),
(2, 'OK'),
)
RESIDENCE_PERMITS_CHOICES = (
(0, 'Pas nécessaire'),
(1, 'Nécessaire - OK'),
(2, 'Manquante'),
)
AES_ACCORDS_CHOICES = (
(0, 'OK'),
(1, 'Demander accord du canton concerné'),
(2, 'Refus du canton concerné')
)
class Candidate(models.Model):
"""
Inscriptions for new students
"""
first_name = models.CharField('Prénom', max_length=40)
last_name = models.CharField('Nom', max_length=40)
gender = models.CharField('Genre', max_length=1, choices=GENDER_CHOICES)
birth_date = models.DateField('Date de naissance', blank=True, null=True)
street = models.CharField('Rue', max_length=150, blank=True)
pcode = models.CharField('Code postal', max_length=4)
city = models.CharField('Localité', max_length=40)
district = models.CharField('Canton', max_length=2, blank=True)
mobile = models.CharField('Portable', max_length=40, blank=True)
email = models.EmailField('Courriel', blank=True)
avs = models.CharField('No AVS', max_length=15, blank=True)
handicap = models.BooleanField('Handicap/besoins part.', default=False)
section = models.CharField('Filière', max_length=10, choices=SECTION_CHOICES)
option = models.CharField('Option', max_length=20, choices=OPTION_CHOICES, blank=True)
exemption_ecg = models.BooleanField(default=False)
validation_sfpo = models.DateField('Confirmation SFPO', blank=True, null=True)
integration_second_year = models.BooleanField('Intégration', default=False)
confirmation_date = models.DateTimeField('Envoi mail de confirmation', blank=True, null=True)
canceled_file = models.BooleanField('Dossier retiré', default=False)
has_photo = models.BooleanField(default=False, verbose_name='Photo passeport')
corporation = models.ForeignKey(
Corporation, null=True, blank=True, on_delete=models.SET_NULL, verbose_name='Employeur'
)
instructor = models.ForeignKey(
CorpContact, null=True, blank=True, on_delete=models.SET_NULL, verbose_name='FEE/FPP'
)
# Checking for registration file
registration_form = models.BooleanField("Formulaire d’inscription", default=False)
certificate_of_payement = models.BooleanField("Attest. de paiement", default=False)
police_record = models.BooleanField("Casier judic.", default=False)
cv = models.BooleanField("CV", default=False)
reflexive_text = models.BooleanField("Texte réflexif", default=False)
promise = models.BooleanField("Promesse d'eng.", default=False)
contract = models.BooleanField("Contrat valide", default=False)
comment = models.TextField('Remarques', blank=True)
work_certificate = models.BooleanField("Bilan act. prof./dernier stage", default=False)
marks_certificate = models.BooleanField("Bull. de notes", default=False)
deposite_date = models.DateField('Date dépôt dossier')
examination_teacher = models.ForeignKey(
Teacher, null=True, blank=True, on_delete=models.SET_NULL,
limit_choices_to={'can_examinate': True}, verbose_name='Correct. examen'
)
examination_result = models.PositiveSmallIntegerField('Points examen', blank=True, null=True)
interview_result = models.PositiveSmallIntegerField('Points entretien prof.', blank=True, null=True)
file_result = models.PositiveSmallIntegerField('Points dossier', blank=True, null=True)
inscr_other_school = models.CharField("Inscr. autre école", max_length=30, blank=True)
certif_of_800_childhood = models.BooleanField("Attest. 800h. enfance", default=False)
certif_of_800_general = models.BooleanField("Attest. 800h. général", default=False)
diploma = models.PositiveSmallIntegerField('Titre sec. II', choices=DIPLOMA_CHOICES, default=0)
diploma_detail = models.CharField('Détail titre', max_length=30, blank=True, default='')
diploma_status = models.PositiveSmallIntegerField("Statut titre", choices=DIPLOMA_STATUS_CHOICES, default=0)
activity_rate = models.CharField("Taux d'activité", max_length=50, blank=True, default='')
validation_date = models.DateTimeField('Envoi mail de validation', null=True, blank=True)
convocation_date = models.DateTimeField('Envoi mail de convocation', null=True, blank=True)
convoc_confirm_receipt = models.DateTimeField('Accusé de réception', null=True, blank=True)
aes_accords = models.PositiveSmallIntegerField("Accord AES", choices=AES_ACCORDS_CHOICES, default=0)
residence_permits = models.PositiveSmallIntegerField(
"Autor. de séjour (pour les pers. étrang.)",
choices=RESIDENCE_PERMITS_CHOICES, blank=True, null=True, default=0
)
accepted = models.BooleanField('Admis', default=False)
class Meta:
verbose_name = 'Candidat'
ordering = ('last_name',)
def __str__(self):
return "%s %s" % (self.last_name, self.first_name)
@property
def civility(self):
if self.gender == 'M':
return 'Monsieur'
if self.gender == 'F':
return 'Madame'
else:
return ''
@property
def section_option(self):
if not self.option:
return self.get_section_display()
else:
return '{0}, option «{1}»'.format(self.get_section_display(), self.get_option_display())
@property
def has_interview(self):
try:
self.interview
return True
except Interview.DoesNotExist:
return False
@property
def total_result(self):
return (self.examination_result or 0) + (self.interview_result or 0) + (self.file_result or 0)
def get_ok(self, fieldname):
return 'OK' if getattr(self, fieldname) is True else 'NON'
INTERVIEW_CHOICES = (
('N', 'Normal'),
('R', 'Réserve'),
('X', 'Attente confirmation enseignants'),
)
class Interview(models.Model):
date = models.DateTimeField('Date')
room = models.CharField("Salle d'entretien", max_length=25)
candidat = models.OneToOneField(Candidate, null=True, blank=True, on_delete=models.SET_NULL)
teacher_int = models.ForeignKey(
Teacher, null=True, blank=True, on_delete=models.SET_NULL, related_name='+',
verbose_name='Ens. entretien'
)
teacher_file = models.ForeignKey(
Teacher, null=True, blank=True, on_delete=models.SET_NULL, related_name='+',
verbose_name='Ens. dossier'
)
status = models.CharField('Statut', max_length=1, choices=INTERVIEW_CHOICES, default='N')
class Meta:
verbose_name = "Entretien d'admission"
verbose_name_plural = "Entretiens d'admission"
ordering = ('date',)
def __str__(self):
return '{0} : {1} (Ent.) / {2} (Dos.) - ({3}) -salle:{4}-{5}'.format(
self.date_formatted,
self.teacher_int.abrev if self.teacher_int else '?',
self.teacher_file.abrev if self.teacher_file else '?',
self.status, self.room, self.candidat or '???'
)
@property
def date_formatted(self):
return django_format(self.date, "l j F Y à H\hi")
|
epcoullery/epcstages
|
candidats/models.py
|
Python
|
agpl-3.0
| 8,114
|
[
"ASE"
] |
36fb4928881e8dff022b5f7ee0b6de9b800ca686e00f048db7b9a350c63c2312
|
# ---------------- User Configuration Settings for speed-track.py ---------------------------------
# Set display and log settings
verbose = True # display basic status information on console
display_fps = False # show average frame count every 100 loops
log_data_to_file = True # save comma delimited file to a text .log file
gui_window_on = False # Display desktop GUI openCV threshold window. Set to False if running from ssh console only.
calibrate = False # Create a speed_calibrate.jpg file with markers to calculate a px to FT conversion
# Motion Camera Settings
CAMERA_HFLIP = False # Flip the camera image horizontally if required
CAMERA_VFLIP = False # Flip the camera image vertically if required
CAMERA_WIDTH = 320 # Set the image stream width for opencv motion scanning
CAMERA_HEIGHT = 240 # Set the image stream height for opencv motion scanning
WINDOW_BIGGER = 3 # if gui_window_on=True then multiplies the opencv window size by this amount
IMAGE_VIEW_FT = 72 # Set the width in feet for the road width that the camera width sees
SPEED_MPH = True # Set the speed conversion kph = False mph=True
# Speed Photo Camera Image Settings
image_width = 1280 # Set width of trigger point image to save
image_height = 720 # Set height of trigger point image to save
image_path = "images" # folder name to store images
image_prefix = "speed-" # image name prefix
image_text_bottom = True # True = Show image text at bottom otherwise at top
# Motion Tracking Settings
MIN_AREA = 200 # excludes all contours less than or equal to this Area
CIRCLE_SIZE = 1 # diameter of circle to show motion location in window
BLUR_SIZE = 10 # OpenCV setting for Gaussian difference image blur
THRESHOLD_SENSITIVITY = 25 # OpenCV setting for difference image threshold
# Motion Event Settings
# ---------------------
event_timeout = 2 # Number of seconds to wait between motion events before clearing track
# Set valid range for next motion event before appending (excludes unrealistic speeds)
x_diff_min = 2
x_diff_max = 130
# Set cumulative track length trigger point for taking speed photo
# Note: Fast motion may not get captured due to camera lag.
track_trig_len = 160
# Set valid y Limits for motion events. This restricts valid motion to road area.
y_upper = 140 # Exclude motion events above this point to top of image.
y_lower = 200 # Exclude motion events below this point to bottom of image.
#--------------------------- End of User Settings -------------------------------------------------
|
xadnem/motion-track
|
speed-track/speed_settings.py
|
Python
|
mit
| 2,582
|
[
"Gaussian"
] |
5fe32247b2631ff7b78ffe04232e22c78457a592b14bbd6787cd94c3940552cf
|
# $HeadURL: $
''' TransferCommand module
'''
from datetime import datetime, timedelta
from DIRAC import S_OK, S_ERROR
from DIRAC.AccountingSystem.Client.ReportsClient import ReportsClient
from DIRAC.Core.DISET.RPCClient import RPCClient
from DIRAC.ResourceStatusSystem.Client.ResourceManagementClient import ResourceManagementClient
from DIRAC.ResourceStatusSystem.Command.Command import Command
from DIRAC.ResourceStatusSystem.Utilities import CSHelpers
__RCSID__ = '$Id: $'
class TransferCommand( Command ):
'''
Transfer "master" Command
'''
def __init__( self, args = None, clients = None ):
super( TransferCommand, self ).__init__( args, clients )
if 'ReportsClient' in self.apis:
self.rClient = self.apis[ 'ReportsClient' ]
else:
self.rClient = ReportsClient()
if 'ReportGenerator' in self.apis:
self.rgClient = self.apis[ 'ReportGenerator' ]
else:
self.rgClient = RPCClient( 'Accounting/ReportGenerator' )
self.rClient.rpcClient = self.rgClient
if 'ResourceManagementClient' in self.apis:
self.rmClient = self.apis[ 'ResourceManagementClient' ]
else:
self.rmClient = ResourceManagementClient()
def _storeCommand( self, results ):
'''
Stores the results of doNew method on the database.
'''
for result in results:
resQuery = self.rmClient.addOrModifyTransferCache( result[ 'SourceName' ],
result[ 'DestinationName' ],
result[ 'Metric' ],
result[ 'Value' ] )
if not resQuery[ 'OK' ]:
return resQuery
return S_OK()
def _prepareCommand( self ):
'''
TransferChannelCommand requires four arguments:
- hours : <int>
- direction : Source | Destination
- elementName : <str>
- metric : Quality | FailedTransfers
GGUSTickets are associated with gocDB names, so we have to transform the
diracSiteName into a gocSiteName.
'''
if not 'hours' in self.args:
return S_ERROR( 'Number of hours not specified' )
hours = self.args[ 'hours' ]
if not 'direction' in self.args:
return S_ERROR( 'direction is missing' )
direction = self.args[ 'direction' ]
if direction not in [ 'Source', 'Destination' ]:
return S_ERROR( 'direction is not Source nor Destination' )
if not 'name' in self.args:
return S_ERROR( '"name" is missing' )
name = self.args[ 'name' ]
if not 'metric' in self.args:
return S_ERROR( 'metric is missing' )
metric = self.args[ 'metric' ]
if metric not in [ 'Quality', 'FailedTransfers' ]:
return S_ERROR( 'metric is not Quality nor FailedTransfers' )
return S_OK( ( hours, name, direction, metric ) )
def doNew( self, masterParams = None ):
'''
Gets the parameters to run, either from the master method or from its
own arguments.
For every elementName ( cannot process bulk queries.. ) contacts the
accounting client. It reurns dictionaries like { 'X -> Y' : { id: 100%.. } }
If there are ggus tickets, are recorded and then returned.
'''
if masterParams is not None:
hours, name, direction, metric = masterParams
else:
params = self._prepareCommand()
if not params[ 'OK' ]:
return params
hours, name, direction, metric = params[ 'Value' ]
toD = datetime.utcnow()
fromD = toD - timedelta( hours = hours )
# dictionary with conditions for the accounting
transferDict = {
'OperationType' : 'putAndRegister',
direction : name
}
if metric == 'FailedTransfers':
transferDict[ 'FinalStatus' ] = [ 'Failed' ]
transferResults = self.rClient.getReport( 'DataOperation', metric, fromD,
toD, transferDict, 'Channel' )
if not transferResults[ 'OK' ]:
return transferResults
transferResults = transferResults[ 'Value' ]
if not 'data' in transferResults:
return S_ERROR( 'Missing data key' )
transferResults = transferResults[ 'data' ]
uniformResult = []
for channel, elementDict in transferResults.items():
try:
source, destination = channel.split( ' -> ' )
except ValueError:
continue
channelDict = {}
channelDict[ 'SourceName' ] = source
channelDict[ 'DestinationName' ] = destination
channelDict[ 'Metric' ] = metric
channelDict[ 'Value' ] = sum( elementDict.values() ) / len( elementDict.values() )
uniformResult.append( channelDict )
storeRes = self._storeCommand( uniformResult )
if not storeRes[ 'OK' ]:
return storeRes
# Compute mean of all transfer channels
value = 0
for channelDict in uniformResult:
value += channelDict[ 'Value' ]
if uniformResult:
value = float( value ) / len( uniformResult )
else:
value = None
return S_OK( { 'Mean' : value, 'Name' : name } )
def doCache( self ):
'''
Method that reads the cache table and tries to read from it. It will
return a list of dictionaries if there are results.
'''
params = self._prepareCommand()
if not params[ 'OK' ]:
return params
_hours, name, direction, metric = params[ 'Value' ]
sourceName, destinationName = None, None
if direction == 'Source':
sourceName = name
if direction == 'Destination':
destinationName = name
result = self.rmClient.selectTransferCache( sourceName, destinationName, metric )
if not result[ 'OK' ]:
return result
result = [ dict( zip( result[ 'Columns' ], res ) ) for res in result[ 'Value' ] ]
# Compute mean of all transfer channels
value = 0
for channelDict in result:
value += channelDict[ 'Value' ]
if result:
value = float( value ) / len( result )
else:
value = None
return S_OK( { 'Mean' : value, 'Name' : name } )
def doMaster( self ):
'''
Master method, which looks little bit spaguetti code, sorry !
- It gets all Sites.
- It gets all StorageElements
As there is no bulk query, it compares with what we have on the database.
It queries a portion of them.
'''
sites = CSHelpers.getSites()
if not sites[ 'OK' ]:
return sites
sites = sites[ 'Value' ]
ses = CSHelpers.getStorageElements()
if not ses[ 'OK' ]:
return ses
ses = ses[ 'Value' ]
elementNames = sites + ses
# sourceQuery = self.rmClient.selectTransferCache( meta = { 'columns' : [ 'SourceName' ] } )
# if not sourceQuery[ 'OK' ]:
# return sourceQuery
# sourceQuery = [ element[0] for element in sourceQuery[ 'Value' ] ]
#
# sourceElementsToQuery = list( set( elementNames ).difference( set( sourceQuery ) ) )
self.log.info( 'Processing %s' % ', '.join( elementNames ) )
for metric in [ 'Quality', 'FailedTransfers' ]:
for direction in [ 'Source', 'Destination' ]:
# 2 hours of window
result = self.doNew( ( 2, elementNames, direction, metric ) )
if not result[ 'OK' ]:
self.metrics[ 'failed' ].append( result )
return S_OK( self.metrics )
################################################################################
#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF
|
vmendez/DIRAC
|
ResourceStatusSystem/Command/TransferCommand.py
|
Python
|
gpl-3.0
| 8,006
|
[
"DIRAC"
] |
3099ad87e139dddc585663562ea30e1ec5a468147078a60f7b39bbfc72e0baf2
|
__all__ = [
'WriteRectilinearGridToUBC',
'WriteImageDataToUBC',
]
__displayname__ = 'Writers'
import os
import numpy as np
from .. import interface
from ..base import WriterBase
class ubcTensorMeshWriterBase(WriterBase):
"""A base class to assist in writing data bjects to the UBC Tensor Mesh
format.
"""
__displayname__ = 'UBC Format Writer Base'
__category__ = 'base'
def __init__(self, inputType='vtkRectilinearGrid'):
WriterBase.__init__(self, inputType=inputType, ext='msh')
# These MUST be set by children
self.xcells = None
self.ycells = None
self.zcells = None
self.origin = None
def write_mesh_3d(self, nx, ny, nz, filename):
"""Write 3D Tensor Mesh to the UBC format"""
def arr2str(arr):
return ' '.join(map(str, arr))
ox, oy, oz = self.origin
# Write out grid / mesh
with open(filename, 'w') as f:
f.write('%d %d %d\n' % (nx, ny, nz))
f.write('%d %d %d\n' % (ox, oy, oz))
f.write('%s\n' % arr2str(self.xcells))
f.write('%s\n' % arr2str(self.ycells))
f.write('%s\n' % arr2str(self.zcells))
return
def write_models(self, grd, filename):
"""Write cell data attributes to model files"""
nx, ny, nz = grd.GetDimensions()
nx -= 1
ny -= 1
nz -= 1
def reshape_model(model):
# Swap axes because VTK structures the coordinates a bit differently
# - This is absolutely crucial!
# - Do not play with unless you know what you are doing!
model = np.reshape(model, (nz, ny, nx))
model = np.swapaxes(model, 0, 2)
model = np.swapaxes(model, 0, 1)
# Now reverse Z axis
model = model[:, :, ::-1]
return model.flatten()
# make up file names for models
for i in range(grd.GetCellData().GetNumberOfArrays()):
vtkarr = grd.GetCellData().GetArray(i)
arr = interface.convert_array(vtkarr)
arr = reshape_model(arr)
path = os.path.dirname(filename)
filename = '%s/%s.mod' % (path, vtkarr.GetName().replace(' ', '_'))
np.savetxt(
filename,
arr,
comments='! ',
header='Mesh File: %s' % os.path.basename(filename),
fmt=self.get_format(),
)
return
class WriteRectilinearGridToUBC(ubcTensorMeshWriterBase):
"""Writes a ``vtkRectilinearGrid`` data object to the UBC Tensor Mesh format.
This file reader currently only handles 3D data.
"""
__displayname__ = 'Write ``vtkRectilinearGrid`` to UBC Tensor Mesh'
__category__ = 'writer'
def __init__(self):
ubcTensorMeshWriterBase.__init__(self, inputType='vtkRectilinearGrid')
def perform_write_out(self, input_data_object, filename, object_name):
"""Write out a ``vtkRectilinearGrid`` to the UBC file format"""
# Get the input data object
grd = input_data_object
# Get grid dimensions
nx, ny, nz = grd.GetDimensions()
# get the points and convert to spacings
xcoords = interface.convert_array(grd.GetXCoordinates())
ycoords = interface.convert_array(grd.GetYCoordinates())
zcoords = interface.convert_array(grd.GetZCoordinates())
# TODO: decide if 2D or 3D
# Now get the cell sizes
self.xcells = np.diff(xcoords)
self.ycells = np.diff(ycoords)
self.zcells = np.diff(zcoords)
# find origin (top southwest corner): this works because of input type
ox, oy, oz = np.min(xcoords), np.min(ycoords), np.max(zcoords)
self.origin = (ox, oy, oz)
# flip z
self.zcells = self.zcells[::-1]
# Write mesh
self.write_mesh_3d(nx - 1, ny - 1, nz - 1, filename)
# Now write out model data
self.write_models(grd, filename)
# Always return 1 from pipeline methods or seg-faults will occur
return 1
class WriteImageDataToUBC(ubcTensorMeshWriterBase):
"""Writes a ``vtkImageData`` (uniform grid) data object to the UBC Tensor
Mesh format. This file reader currently only handles 3D data.
"""
__displayname__ = 'Write ``vtkImageData`` to UBC Tensor Mesh'
__category__ = 'writer'
def __init__(self):
ubcTensorMeshWriterBase.__init__(self, inputType='vtkImageData')
def perform_write_out(self, input_data_object, filename, object_name):
"""Write out a ``vtkImageData`` to the UBC file format"""
# Get the input data object
grd = input_data_object
# Get grid dimensions
nx, ny, nz = grd.GetDimensions()
nx -= 1
ny -= 1
nz -= 1
# get the points and convert to spacings
dx, dy, dz = grd.GetSpacing()
# Now make the cell arrays
self.xcells = np.full(nx, dx)
self.ycells = np.full(ny, dy)
self.zcells = np.full(nz, dz)
# find origin (top southwest corner)
ox, oy, oz = grd.GetOrigin()
oz += nz * dz
self.origin = (ox, oy, oz)
# TODO: decide if 2D or 3D
# Write mesh
self.write_mesh_3d(nx, ny, nz, filename)
# Now write out model data
self.write_models(grd, filename)
# Always return 1 from pipeline methods or seg-faults will occur
return 1
|
banesullivan/ParaViewGeophysics
|
PVGeo/ubc/write.py
|
Python
|
bsd-3-clause
| 5,502
|
[
"VTK"
] |
1ff6756b9123da13044db9002ca9eb5bf8b4b3b7e34bcadee7cce426dc3f5146
|
# -*- coding: utf-8 -*-
#
# test_import_lobbyists.py - Test lobbyist importing.
# Copyright (C) 2008 by Drew Hess <dhess@bothan.net>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Test lobbyist importing."""
import unittest
import lobbyists
import sqlite3
import util
class TestImportLobbyists(unittest.TestCase):
def test_import_lobbyists(self):
"""Lobbyist importing."""
filings = list(lobbyists.parse_filings(util.testpath('lobbyists.xml')))
con = sqlite3.connect(':memory:')
con = lobbyists.create_db(con)
cur = con.cursor()
self.failUnless(lobbyists.import_filings(cur, filings))
# Some of the other import tests just compare the parsed
# filings to the contents of the database, but for various
# reasons that's difficult for lobbyist records. Instead,
# this test has knowledge of the contents of the
# 'lobbyists.xml' test file, and checks the database contents
# explicitly, ala the parser tests in test_parser.py.
con.row_factory = sqlite3.Row
cur = con.cursor()
cur.execute("SELECT * FROM lobbyist")
rows = [row for row in cur]
row = rows.pop()
self.failUnlessEqual(row['id'], 16)
self.failUnlessEqual(row['name'], 'KNUTSON, KENT')
self.failUnlessEqual(row['indicator'], 'undetermined')
self.failUnlessEqual(row['official_position'], 'N/A')
row = rows.pop()
self.failUnlessEqual(row['id'], 15)
self.failUnlessEqual(row['name'], 'KNUTSON, KENT')
self.failUnlessEqual(row['indicator'], 'not covered')
self.failUnlessEqual(row['official_position'], 'N/A')
row = rows.pop()
self.failUnlessEqual(row['id'], 14)
self.failUnlessEqual(row['name'], 'CHAMPLIN, STEVEN')
self.failUnlessEqual(row['indicator'], 'not covered')
self.failUnlessEqual(row['official_position'], 'ExecFlrAsst, H. Maj. Whip; ExecDir, H.DemCauc.')
row = rows.pop()
self.failUnlessEqual(row['id'], 13)
self.failUnlessEqual(row['name'], 'GRIFFIN, BRIAN')
self.failUnlessEqual(row['indicator'], 'not covered')
self.failUnlessEqual(row['official_position'], 'StaffAsst, DemPolicyComm; FlrAsst, MinoritySec')
row = rows.pop()
self.failUnlessEqual(row['id'], 12)
self.failUnlessEqual(row['name'], 'DUBERSTEIN, KENNETH')
self.failUnlessEqual(row['indicator'], 'not covered')
self.failUnlessEqual(row['official_position'], 'Chief of Staff, President Reagan')
row = rows.pop()
self.failUnlessEqual(row['id'], 11)
self.failUnlessEqual(row['name'], 'UELAND, ERIC')
self.failUnlessEqual(row['indicator'], 'not covered')
self.failUnlessEqual(row['official_position'], 'AsstEditor/Ed./Res.Dir, Sen.Rep.PolicyComm;')
row = rows.pop()
self.failUnlessEqual(row['id'], 10)
self.failUnlessEqual(row['name'], 'BEDWELL, EDWARD T')
self.failUnlessEqual(row['indicator'], 'undetermined')
self.failUnlessEqual(row['official_position'], 'unspecified')
row = rows.pop()
self.failUnlessEqual(row['id'], 9)
self.failUnlessEqual(row['name'], 'LEHMAN (MY 2006), PATRICK')
self.failUnlessEqual(row['indicator'], 'undetermined')
self.failUnlessEqual(row['official_position'], 'unspecified')
row = rows.pop()
self.failUnlessEqual(row['id'], 8)
self.failUnlessEqual(row['name'], 'NEAL, KATIE')
self.failUnlessEqual(row['indicator'], 'covered')
self.failUnlessEqual(row['official_position'], 'COMM DIR/REP DINGELL')
row = rows.pop()
self.failUnlessEqual(row['id'], 7)
self.failUnlessEqual(row['name'], 'NEAL, KATIE')
self.failUnlessEqual(row['indicator'], 'not covered')
self.failUnlessEqual(row['official_position'], 'N/A')
row = rows.pop()
self.failUnlessEqual(row['id'], 6)
self.failUnlessEqual(row['name'], 'NEAL, KATIE')
self.failUnlessEqual(row['indicator'], 'undetermined')
self.failUnlessEqual(row['official_position'], 'unspecified')
row = rows.pop()
self.failUnlessEqual(row['id'], 5)
self.failUnlessEqual(row['name'], 'unspecified')
self.failUnlessEqual(row['indicator'], 'not covered')
self.failUnlessEqual(row['official_position'], 'unspecified')
row = rows.pop()
self.failUnlessEqual(row['id'], 4)
self.failUnlessEqual(row['name'], 'MCKENNEY, WILLIAM')
self.failUnlessEqual(row['indicator'], 'not covered')
self.failUnlessEqual(row['official_position'], 'Staff Director, Ways & Means Over Sub')
row = rows.pop()
self.failUnlessEqual(row['id'], 3)
self.failUnlessEqual(row['name'], 'DENNIS, JAMES')
self.failUnlessEqual(row['indicator'], 'not covered')
self.failUnlessEqual(row['official_position'], 'Tax Counsel, Sen Robb - Counsel, Sen Bingaman')
row = rows.pop()
self.failUnlessEqual(row['id'], 2)
self.failUnlessEqual(row['name'], 'GRAFMEYER, RICHARD')
self.failUnlessEqual(row['indicator'], 'not covered')
self.failUnlessEqual(row['official_position'], 'Deputy Chief of Staff, JCT')
row = rows.pop()
self.failUnlessEqual(row['id'], 1)
self.failUnlessEqual(row['name'], 'HARRIS, ROBERT L.')
self.failUnlessEqual(row['indicator'], 'undetermined')
self.failUnlessEqual(row['official_position'], 'unspecified')
self.failUnlessEqual(len(rows), 0)
def test_import_filings_to_lobbyists(self):
"""Lobbyists are matched up with filings in the database."""
filings = list(lobbyists.parse_filings(util.testpath('lobbyists.xml')))
con = sqlite3.connect(':memory:')
con = lobbyists.create_db(con)
cur = con.cursor()
self.failUnless(lobbyists.import_filings(cur, filings))
con.row_factory = sqlite3.Row
cur = con.cursor()
cur.execute("SELECT * FROM filing_lobbyists")
rows = [row for row in cur]
row = rows.pop()
self.failUnlessEqual(row['filing'],
'771F3B6A-315D-4190-88F3-2CE0F138B2B8')
self.failUnlessEqual(row['status'], 'active')
self.failUnlessEqual(row['lobbyist'], 16)
row = rows.pop()
self.failUnlessEqual(row['filing'],
'771F3B6A-315D-4190-88F3-2CE0F138B2B8')
self.failUnlessEqual(row['status'], 'active')
self.failUnlessEqual(row['lobbyist'], 15)
row = rows.pop()
self.failUnlessEqual(row['filing'],
'BD894C51-AA23-46AE-9802-006B8C91702B')
self.failUnlessEqual(row['status'], 'active')
self.failUnlessEqual(row['lobbyist'], 14)
row = rows.pop()
self.failUnlessEqual(row['filing'],
'BD894C51-AA23-46AE-9802-006B8C91702B')
self.failUnlessEqual(row['status'], 'active')
self.failUnlessEqual(row['lobbyist'], 13)
row = rows.pop()
self.failUnlessEqual(row['filing'],
'BD894C51-AA23-46AE-9802-006B8C91702B')
self.failUnlessEqual(row['status'], 'active')
self.failUnlessEqual(row['lobbyist'], 12)
row = rows.pop()
self.failUnlessEqual(row['filing'],
'BD894C51-AA23-46AE-9802-006B8C91702B')
self.failUnlessEqual(row['status'], 'active')
self.failUnlessEqual(row['lobbyist'], 11)
row = rows.pop()
self.failUnlessEqual(row['filing'],
'2164D6BB-EBBA-40D2-9C18-16A2D670030A')
self.failUnlessEqual(row['status'], 'terminated')
self.failUnlessEqual(row['lobbyist'], 10)
row = rows.pop()
self.failUnlessEqual(row['filing'],
'87A30FA6-7C35-4294-BA43-4CE7B5B808B3')
self.failUnlessEqual(row['status'], 'terminated')
self.failUnlessEqual(row['lobbyist'], 9)
row = rows.pop()
self.failUnlessEqual(row['filing'],
'0FC23296-F948-43FD-98D4-0912F6579E6A')
self.failUnlessEqual(row['status'], 'active')
self.failUnlessEqual(row['lobbyist'], 8)
row = rows.pop()
self.failUnlessEqual(row['filing'],
'0FC23296-F948-43FD-98D4-0912F6579E6A')
self.failUnlessEqual(row['status'], 'active')
self.failUnlessEqual(row['lobbyist'], 7)
row = rows.pop()
self.failUnlessEqual(row['filing'],
'0FC23296-F948-43FD-98D4-0912F6579E6A')
self.failUnlessEqual(row['status'], 'active')
self.failUnlessEqual(row['lobbyist'], 6)
row = rows.pop()
self.failUnlessEqual(row['filing'],
'02DDA99B-725A-4DBA-8397-34892A6918D7')
self.failUnlessEqual(row['status'], 'terminated')
self.failUnlessEqual(row['lobbyist'], 5)
row = rows.pop()
self.failUnlessEqual(row['filing'],
'02DDA99B-725A-4DBA-8397-34892A6918D7')
self.failUnlessEqual(row['status'], 'active')
self.failUnlessEqual(row['lobbyist'], 4)
row = rows.pop()
self.failUnlessEqual(row['filing'],
'02DDA99B-725A-4DBA-8397-34892A6918D7')
self.failUnlessEqual(row['status'], 'active')
self.failUnlessEqual(row['lobbyist'], 3)
row = rows.pop()
self.failUnlessEqual(row['filing'],
'02DDA99B-725A-4DBA-8397-34892A6918D7')
self.failUnlessEqual(row['status'], 'active')
self.failUnlessEqual(row['lobbyist'], 2)
row = rows.pop()
self.failUnlessEqual(row['filing'],
'04926911-8A12-4A0E-9DA4-510869446EAC')
self.failUnlessEqual(row['status'], 'undetermined')
self.failUnlessEqual(row['lobbyist'], 1)
def test_import_lobbyist_person(self):
"""Importing lobbyists should fill the 'person' table."""
filings = list(lobbyists.parse_filings(util.testpath('lobbyists.xml')))
con = sqlite3.connect(':memory:')
con = lobbyists.create_db(con)
cur = con.cursor()
self.failUnless(lobbyists.import_filings(cur, filings))
con.row_factory = sqlite3.Row
cur = con.cursor()
cur.execute("SELECT * FROM person")
rows = [row['name'] for row in cur]
lobbyers = util.flatten([x['lobbyists'] for x in filings if 'lobbyists' in x])
names = set([x['lobbyist']['name'] for x in lobbyers])
self.failUnlessEqual(len(rows), len(names))
for name in names:
self.failUnless(name in rows)
def test_import_lobbyist_lobbyist_status(self):
"""After import, lobbyist_status table should be unchanged (it's pre-loaded)."""
con = sqlite3.connect(':memory:')
con = lobbyists.create_db(con)
con.row_factory = sqlite3.Row
cur = con.cursor()
cur.execute("SELECT status FROM lobbyist_status")
rows = set([row[0] for row in cur])
self.failUnlessEqual(len(rows), 3)
self.failUnless('active' in rows)
self.failUnless('terminated' in rows)
self.failUnless('undetermined' in rows)
def test_import_lobbyist_lobbyist_indicator(self):
"""After import, lobbyist_indicator table should be unchanged (it's pre-loaded)."""
con = sqlite3.connect(':memory:')
con = lobbyists.create_db(con)
con.row_factory = sqlite3.Row
cur = con.cursor()
cur.execute("SELECT status FROM lobbyist_indicator")
rows = set([row[0] for row in cur])
self.failUnlessEqual(len(rows), 3)
self.failUnless('covered' in rows)
self.failUnless('not covered' in rows)
self.failUnless('undetermined' in rows)
def test_import_identical_lobbyists(self):
"""Identical lobbyists shouldn't be duplicated in the database."""
filings = list(lobbyists.parse_filings(util.testpath('lobbyists_dup.xml')))
con = sqlite3.connect(':memory:')
con = lobbyists.create_db(con)
cur = con.cursor()
self.failUnless(lobbyists.import_filings(cur, filings))
cur = con.cursor()
cur.execute('SELECT lobbyist FROM filing_lobbyists')
row1, row2 = cur.fetchall()
self.failUnlessEqual(row1[0], row2[0])
def test_import_identical_lobbyists2(self):
"""Identical lobbyists shouldn't be duplicated in the database (case 2)."""
# This test file contains a single filing with two
# lobbyists. The two lobbyists are exactly the same, except
# with different statuses. This should result in only a single
# entry in the filing_lobbyists table.
filings = list(lobbyists.parse_filings(util.testpath('lobbyists_dup2.xml')))
con = sqlite3.connect(':memory:')
con = lobbyists.create_db(con)
cur = con.cursor()
self.failUnless(lobbyists.import_filings(cur, filings))
cur = con.cursor()
cur.execute('SELECT * FROM lobbyist')
rows = cur.fetchall()
self.failUnlessEqual(len(rows), 1)
def test_import_similar_lobbyists(self):
"""Slightly different lobbyists are inserted into different rows."""
filings = list(lobbyists.parse_filings(util.testpath('lobbyists_slightly_different.xml')))
con = sqlite3.connect(':memory:')
con = lobbyists.create_db(con)
cur = con.cursor()
self.failUnless(lobbyists.import_filings(cur, filings))
cur = con.cursor()
cur.execute('SELECT id FROM lobbyist')
lobbyers = util.flatten([x['lobbyists'] for x in filings if 'lobbyists' in x])
self.failUnlessEqual(len(cur.fetchall()), len(lobbyers))
if __name__ == '__main__':
unittest.main()
|
dhess/lobbyists
|
lobbyists/tests/test_import_lobbyists.py
|
Python
|
gpl-3.0
| 14,553
|
[
"Brian"
] |
80dfb154d0797ccb8f176cd9e512bb173f34abd177660225b8fc88bd2db943c2
|
import ast
from collections import deque
import sys
import pytest
class FuncCallVisitor(ast.NodeVisitor):
def __init__(self):
self._name = deque()
@property
def name(self):
return '.'.join(self._name)
@name.deleter
def name(self):
self._name.clear()
def visit_Name(self, node):
self._name.appendleft(node.id)
def visit_Attribute(self, node):
try:
self._name.appendleft(node.attr)
self._name.appendleft(node.value.id)
except AttributeError:
self.generic_visit(node)
def get_func_calls(tree):
func_calls = []
for node in ast.walk(tree):
if isinstance(node, ast.Call):
callvisitor = FuncCallVisitor()
callvisitor.visit(node.func)
func_calls.append(callvisitor.name)
func_calls.append(node.func.lineno)
return func_calls
if __name__ == '__main__':
tree = ast.parse(open(sys.argv[1]).read())
calls = get_func_calls(tree)
assert 'print' not in calls, 'Found print at line {0} of {1}'.format(calls[calls.index('print') + 1], sys.argv[1])
|
frc1418/2017-robot
|
tests/test_print.py
|
Python
|
mit
| 1,132
|
[
"VisIt"
] |
ab126fd9d416fe024d2179677b48d718e79cbbed940e784da39b5587634246c3
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""Wrapper for netCDF readers."""
from __future__ import unicode_literals, division, print_function
import os.path
import warnings
import numpy as np
from collections import OrderedDict
from monty.dev import requires
from monty.collections import AttrDict
from monty.functools import lazy_property
from monty.string import marquee
from pymatgen.core.units import ArrayWithUnit
from pymatgen.core.xcfunc import XcFunc
from pymatgen.core.structure import Structure
import logging
logger = logging.getLogger(__name__)
__author__ = "Matteo Giantomassi"
__copyright__ = "Copyright 2013, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Matteo Giantomassi"
__email__ = "gmatteo at gmail.com"
__status__ = "Development"
__date__ = "$Feb 21, 2013M$"
__all__ = [
"as_ncreader",
"as_etsfreader",
"NetcdfReader",
"NetcdfReaderError",
"ETSF_Reader",
"NO_DEFAULT",
"structure_from_ncdata",
]
try:
import netCDF4
except ImportError as exc:
netCDF4 = None
warnings.warn("""\
`import netCDF4` failed with the following error:
%s
Please install netcdf4 with `conda install netcdf4`
If the conda version does not work, uninstall it with `conda uninstall hdf4 hdf5 netcdf4`
and use `pip install netcdf4`""" % str(exc))
def _asreader(file, cls):
closeit = False
if not isinstance(file, cls):
file, closeit = cls(file), True
return file, closeit
def as_ncreader(file):
"""
Convert file into a NetcdfReader instance.
Returns reader, closeit where closeit is set to True
if we have to close the file before leaving the procedure.
"""
return _asreader(file, NetcdfReader)
def as_etsfreader(file):
return _asreader(file, ETSF_Reader)
class NetcdfReaderError(Exception):
"""Base error class for NetcdfReader"""
class NO_DEFAULT(object):
"""Signal that read_value should raise an Error"""
class NetcdfReader(object):
"""
Wraps and extends netCDF4.Dataset. Read only mode. Supports with statements.
Additional documentation available at:
http://netcdf4-python.googlecode.com/svn/trunk/docs/netCDF4-module.html
"""
Error = NetcdfReaderError
@requires(netCDF4 is not None, "netCDF4 must be installed to use this class")
def __init__(self, path):
"""Open the Netcdf file specified by path (read mode)."""
self.path = os.path.abspath(path)
try:
self.rootgrp = netCDF4.Dataset(self.path, mode="r")
except Exception as exc:
raise self.Error("In file %s: %s" % (self.path, str(exc)))
self.ngroups = len(list(self.walk_tree()))
#self.path2group = OrderedDict()
#for children in self.walk_tree():
# for child in children:
# #print(child.group, child.path)
# self.path2group[child.path] = child.group
def __enter__(self):
"""Activated when used in the with statement."""
return self
def __exit__(self, type, value, traceback):
"""Activated at the end of the with statement. It automatically closes the file."""
self.rootgrp.close()
def close(self):
try:
self.rootgrp.close()
except Exception as exc:
logger.warning("Exception %s while trying to close %s" % (exc, self.path))
def walk_tree(self, top=None):
"""
Navigate all the groups in the file starting from top.
If top is None, the root group is used.
"""
if top is None:
top = self.rootgrp
values = top.groups.values()
yield values
for value in top.groups.values():
for children in self.walk_tree(value):
yield children
def print_tree(self):
for children in self.walk_tree():
for child in children:
print(child)
def read_dimvalue(self, dimname, path="/", default=NO_DEFAULT):
"""
Returns the value of a dimension.
Args:
dimname: Name of the variable
path: path to the group.
default: return `default` if `dimname` is not present and
`default` is not `NO_DEFAULT` else raise self.Error.
"""
try:
dim = self._read_dimensions(dimname, path=path)[0]
return len(dim)
except self.Error:
if default is NO_DEFAULT: raise
return default
def read_varnames(self, path="/"):
"""List of variable names stored in the group specified by path."""
if path == "/":
return self.rootgrp.variables.keys()
else:
group = self.path2group[path]
return group.variables.keys()
def read_value(self, varname, path="/", cmode=None, default=NO_DEFAULT):
"""
Returns the values of variable with name varname in the group specified by path.
Args:
varname: Name of the variable
path: path to the group.
cmode: if cmode=="c", a complex ndarrays is constructed and returned
(netcdf does not provide native support from complex datatype).
default: returns default if varname is not present.
self.Error is raised if default is default is NO_DEFAULT
Returns:
numpy array if varname represents an array, scalar otherwise.
"""
try:
var = self.read_variable(varname, path=path)
except self.Error:
if default is NO_DEFAULT: raise
return default
if cmode is None:
# scalar or array
# getValue is not portable!
try:
return var.getValue()[0] if not var.shape else var[:]
except IndexError:
return var.getValue() if not var.shape else var[:]
else:
assert var.shape[-1] == 2
if cmode == "c":
return var[...,0] + 1j*var[...,1]
else:
raise ValueError("Wrong value for cmode %s" % cmode)
def read_variable(self, varname, path="/"):
"""Returns the variable with name varname in the group specified by path."""
return self._read_variables(varname, path=path)[0]
def _read_dimensions(self, *dimnames, **kwargs):
path = kwargs.get("path", "/")
try:
if path == "/":
return [self.rootgrp.dimensions[dname] for dname in dimnames]
else:
group = self.path2group[path]
return [group.dimensions[dname] for dname in dimnames]
except KeyError:
raise self.Error("In file %s:\nError while reading dimensions: `%s` with kwargs: `%s`" %
(self.path, dimnames, kwargs))
def _read_variables(self, *varnames, **kwargs):
path = kwargs.get("path", "/")
try:
if path == "/":
return [self.rootgrp.variables[vname] for vname in varnames]
else:
group = self.path2group[path]
return [group.variables[vname] for vname in varnames]
except KeyError:
raise self.Error("In file %s:\nError while reading variables: `%s` with kwargs `%s`." %
(self.path, varnames, kwargs))
def read_keys(self, keys, dict_cls=AttrDict, path="/"):
"""
Read a list of variables/dimensions from file. If a key is not present the corresponding
entry in the output dictionary is set to None.
"""
od = dict_cls()
for k in keys:
try:
# Try to read a variable.
od[k] = self.read_value(k, path=path)
except self.Error:
try:
# Try to read a dimension.
od[k] = self.read_dimvalue(k, path=path)
except self.Error:
od[k] = None
return od
class ETSF_Reader(NetcdfReader):
"""
This object reads data from a file written according to the ETSF-IO specifications.
We assume that the netcdf file contains at least the crystallographic section.
"""
@lazy_property
def chemical_symbols(self):
"""Chemical symbols char [number of atom species][symbol length]."""
charr = self.read_value("chemical_symbols")
symbols = []
for v in charr:
symbols.append("".join(c.decode("utf-8") for c in v))
return symbols
def typeidx_from_symbol(self, symbol):
"""Returns the type index from the chemical symbol. Note python convention."""
return self.chemical_symbols.index(symbol)
def read_structure(self, cls=Structure):
"""Returns the crystalline structure."""
if self.ngroups != 1:
raise NotImplementedError("In file %s: ngroups != 1" % self.path)
return structure_from_ncdata(self, cls=cls)
def read_abinit_xcfunc(self):
"""
Read ixc from an Abinit file. Return :class:`XcFunc` object.
"""
ixc = int(self.read_value("ixc"))
return XcFunc.from_abinit_ixc(ixc)
def read_abinit_hdr(self):
"""
Read the variables associated to the Abinit header.
Return :class:`AbinitHeader`
"""
d = {}
for hvar in _HDR_VARIABLES.values():
ncname = hvar.etsf_name if hvar.etsf_name is not None else hvar.name
if ncname in self.rootgrp.variables:
d[hvar.name] = self.read_value(ncname)
elif ncname in self.rootgrp.dimensions:
d[hvar.name] = self.read_dimvalue(ncname)
else:
raise ValueError("Cannot find `%s` in `%s`" % (ncname, self.path))
# Convert scalars to (well) scalars.
if hasattr(d[hvar.name], "shape") and not d[hvar.name].shape:
d[hvar.name] = np.asscalar(d[hvar.name])
if hvar.name in ("title", "md5_pseudos", "codvsn"):
# Convert array of numpy bytes to list of strings
if hvar.name == "codvsn":
d[hvar.name] = "".join(bs.decode("utf-8").strip() for bs in d[hvar.name])
else:
d[hvar.name] = ["".join(bs.decode("utf-8") for bs in astr).strip()
for astr in d[hvar.name]]
return AbinitHeader(d)
def structure_from_ncdata(ncdata, site_properties=None, cls=Structure):
"""
Reads and returns a pymatgen structure from a NetCDF file
containing crystallographic data in the ETSF-IO format.
Args:
ncdata: filename or NetcdfReader instance.
site_properties: Dictionary with site properties.
cls: The Structure class to instanciate.
"""
ncdata, closeit = as_ncreader(ncdata)
# TODO check whether atomic units are used
lattice = ArrayWithUnit(ncdata.read_value("primitive_vectors"), "bohr").to("ang")
red_coords = ncdata.read_value("reduced_atom_positions")
natom = len(red_coords)
znucl_type = ncdata.read_value("atomic_numbers")
# type_atom[0:natom] --> index Between 1 and number of atom species
type_atom = ncdata.read_value("atom_species")
# Fortran to C index and float --> int conversion.
species = natom * [None]
for atom in range(natom):
type_idx = type_atom[atom] - 1
species[atom] = int(znucl_type[type_idx])
d = {}
if site_properties is not None:
for prop in site_properties:
d[property] = ncdata.read_value(prop)
structure = cls(lattice, species, red_coords, site_properties=d)
# Quick and dirty hack.
# I need an abipy structure since I need to_abivars and other methods.
try:
from abipy.core.structure import Structure as AbipyStructure
structure.__class__ = AbipyStructure
except ImportError:
pass
if closeit:
ncdata.close()
return structure
class _H(object):
__slots__ = ["name", "doc", "etsf_name"]
def __init__(self, name, doc, etsf_name=None):
self.name, self.doc, self.etsf_name = name, doc, etsf_name
_HDR_VARIABLES = (
# Scalars
_H("bantot", "total number of bands (sum of nband on all kpts and spins)"),
_H("date", "starting date"),
_H("headform", "format of the header"),
_H("intxc", "input variable"),
_H("ixc", "input variable"),
_H("mband", "maxval(hdr%nband)", etsf_name="max_number_of_states"),
_H("natom", "input variable", etsf_name="number_of_atoms"),
_H("nkpt", "input variable", etsf_name="number_of_kpoints"),
_H("npsp", "input variable"),
_H("nspden", "input variable", etsf_name="number_of_components"),
_H("nspinor", "input variable", etsf_name="number_of_spinor_components"),
_H("nsppol", "input variable", etsf_name="number_of_spins"),
_H("nsym", "input variable", etsf_name="number_of_symmetry_operations"),
_H("ntypat", "input variable", etsf_name="number_of_atom_species"),
_H("occopt", "input variable"),
_H("pertcase", "the index of the perturbation, 0 if GS calculation"),
_H("usepaw", "input variable (0=norm-conserving psps, 1=paw)"),
_H("usewvl", "input variable (0=plane-waves, 1=wavelets)"),
_H("kptopt", "input variable (defines symmetries used for k-point sampling)"),
_H("pawcpxocc", "input variable"),
_H("nshiftk_orig", "original number of shifts given in input (changed in inkpts, the actual value is nshiftk)"),
_H("nshiftk", "number of shifts after inkpts."),
_H("icoulomb", "input variable."),
_H("ecut", "input variable", etsf_name="kinetic_energy_cutoff"),
_H("ecutdg", "input variable (ecut for NC psps, pawecutdg for paw)"),
_H("ecutsm", "input variable"),
_H("ecut_eff", "ecut*dilatmx**2 (dilatmx is an input variable)"),
_H("etot", "EVOLVING variable"),
_H("fermie", "EVOLVING variable", etsf_name="fermi_energy"),
_H("residm", "EVOLVING variable"),
_H("stmbias", "input variable"),
_H("tphysel", "input variable"),
_H("tsmear", "input variable"),
_H("nelect", "number of electrons (computed from pseudos and charge)"),
_H("charge", "input variable"),
# Arrays
_H("qptn", "qptn(3) the wavevector, in case of a perturbation"),
#_H("rprimd", "rprimd(3,3) EVOLVING variables", etsf_name="primitive_vectors"),
#_H(ngfft, "ngfft(3) input variable", number_of_grid_points_vector1"
#_H("nwvlarr", "nwvlarr(2) the number of wavelets for each resolution.", etsf_name="number_of_wavelets"),
_H("kptrlatt_orig", "kptrlatt_orig(3,3) Original kptrlatt"),
_H("kptrlatt", "kptrlatt(3,3) kptrlatt after inkpts."),
_H("istwfk", "input variable istwfk(nkpt)"),
_H("lmn_size", "lmn_size(npsp) from psps"),
_H("nband", "input variable nband(nkpt*nsppol)", etsf_name="number_of_states"),
_H("npwarr", "npwarr(nkpt) array holding npw for each k point", etsf_name="number_of_coefficients"),
_H("pspcod", "pscod(npsp) from psps"),
_H("pspdat", "psdat(npsp) from psps"),
_H("pspso", "pspso(npsp) from psps"),
_H("pspxc", "pspxc(npsp) from psps"),
_H("so_psp", "input variable so_psp(npsp)"),
_H("symafm", "input variable symafm(nsym)"),
#_H(symrel="input variable symrel(3,3,nsym)", etsf_name="reduced_symmetry_matrices"),
_H("typat", "input variable typat(natom)", etsf_name="atom_species"),
_H("kptns", "input variable kptns(nkpt, 3)", etsf_name="reduced_coordinates_of_kpoints"),
_H("occ", "EVOLVING variable occ(mband, nkpt, nsppol)", etsf_name="occupations"),
_H("tnons", "input variable tnons(nsym, 3)", etsf_name="reduced_symmetry_translations"),
_H("wtk", "weight of kpoints wtk(nkpt)", etsf_name="kpoint_weights"),
_H("shiftk_orig", "original shifts given in input (changed in inkpts)."),
_H("shiftk", "shiftk(3,nshiftk), shiftks after inkpts"),
_H("amu", "amu(ntypat) ! EVOLVING variable"),
#_H("xred", "EVOLVING variable xred(3,natom)", etsf_name="reduced_atom_positions"),
_H("zionpsp", "zionpsp(npsp) from psps"),
_H("znuclpsp", "znuclpsp(npsp) from psps. Note the difference between (znucl|znucltypat) and znuclpsp"),
_H("znucltypat", "znucltypat(ntypat) from alchemy", etsf_name="atomic_numbers"),
_H("codvsn", "version of the code"),
_H("title", "title(npsp) from psps"),
_H("md5_pseudos", "md5pseudos(npsp), md5 checksums associated to pseudos (read from file)"),
#_H(type(pawrhoij_type), allocatable :: pawrhoij(:) ! EVOLVING variable, only for paw
)
_HDR_VARIABLES = OrderedDict([(h.name, h) for h in _HDR_VARIABLES])
class AbinitHeader(AttrDict):
"""Stores the values reported in the Abinit header."""
#def __init__(self, *args, **kwargs):
# super(AbinitHeader, self).__init__(*args, **kwargs)
# for k, v in self.items():
# v.__doc__ = _HDR_VARIABLES[k].doc
def __str__(self):
return self.to_string()
def to_string(self, verbose=0, title=None, **kwargs):
"""
String representation. kwargs are passed to `pprint.pformat`.
Args:
verbose: Verbosity level
title: Title string.
"""
from pprint import pformat
s = pformat(self, **kwargs)
if title is not None:
return "\n".join([marquee(title, mark="="), s])
return s
|
setten/pymatgen
|
pymatgen/io/abinit/netcdf.py
|
Python
|
mit
| 17,296
|
[
"ABINIT",
"NetCDF",
"pymatgen"
] |
178c4875be43e0eaf31cdccf98325b99b9e2d708cf24ee4c6a0fc5e76463b95f
|
# -*- coding: utf-8 -*-
"""
Functions related to fetching and manipulating skeletons.
"""
from io import StringIO
from itertools import combinations
from collections import namedtuple
import numpy as np
import pandas as pd
import networkx as nx
from scipy.spatial import cKDTree
from .client import inject_client
@inject_client
def fetch_skeleton(body, heal=False, export_path=None, format='pandas', with_distances=False, *, client=None):
"""
Equivalent to :py:meth:`.Client.fetch_skeleton()`. See that function for details.
"""
return client.fetch_skeleton(body, heal, export_path, format, with_distances)
def skeleton_df_to_nx(df, with_attributes=True, directed=True, with_distances=False):
"""
Create a ``networkx.Graph`` from a skeleton DataFrame.
Args:
df:
DataFrame as returned by :py:meth:`.Client.fetch_skeleton()`
with_attributes:
If True, store node attributes for x, y, z, radius
directed:
If True, return ``nx.DiGraph``, otherwise ``nx.Graph``.
Edges will point from child to parent.
with_distances:
If True, add an edge attribute 'distance' indicating the
euclidean distance between skeleton nodes.
Returns:
``nx.DiGraph`` or ``nx.Graph``
"""
if directed:
g = nx.DiGraph()
else:
g = nx.Graph()
if with_attributes:
for row in df.itertuples(index=False):
g.add_node(row.rowId, x=row.x, y=row.y, z=row.z, radius=row.radius)
else:
g.add_nodes_from(df['rowId'].sort_values())
if with_distances:
edges_df = df[['rowId', 'link']].copy()
edges_df['distance'] = calc_segment_distances(df)
edges_df = edges_df.query('link != -1').sort_values(['rowId', 'link'])
g.add_weighted_edges_from(edges_df.itertuples(index=False), 'distance')
else:
edges_df = df.query('link != -1')[['rowId', 'link']]
edges_df = edges_df.sort_values(['rowId', 'link'])
g.add_edges_from(edges_df.values)
return g
def calc_segment_distances(df):
"""
For each node (row) in the given skeleton DataFrame,
compute euclidean distance from the node to its parent (link) node.
Root nodes (i.e. when link == -1) will be assigned a distance of np.inf.
Returns:
np.ndarray
"""
# Append parent (link) columns to each row by matching
# each row's 'link' ID with the parent's 'rowId'.
edges_df = df[['rowId', 'link', *'xyz']].merge(
df[['rowId', *'xyz']], 'left',
left_on='link', right_on='rowId', suffixes=['', '_link'])
diff = edges_df[[*'xyz']] - edges_df[['x_link', 'y_link', 'z_link']].values
distances = np.linalg.norm(diff, axis=1).astype(np.float32)
distances[np.isnan(distances)] = np.inf
return distances
def skeleton_swc_to_df(swc):
"""
Create a DataFrame from and SWC file.
The 'node_type' column is discarded.
Args:
swc:
Either a filepath ending in '.swc', or a file object,
or the contents of an SWC file (as a string).
Returns:
``pd.DataFrame``
"""
if hasattr(swc, 'read'):
swc = swc.read()
else:
assert isinstance(swc, str)
if swc.endswith('.swc'):
with open(swc, 'r') as f:
swc = f.read()
cols = ['rowId', 'node_type', 'x', 'y', 'z', 'radius', 'link']
lines = swc.split('\n')
lines = filter(lambda line: '#' not in line, lines)
swc_csv = '\n'.join(lines)
# Compact dtypes save RAM when loading lots of skeletons
dtypes = {
'rowId': np.int32,
'node_type': np.int8,
'x': np.float32,
'y': np.float32,
'z': np.float32,
'radius': np.float32,
'link': np.int32,
}
df = pd.read_csv(StringIO(swc_csv), delimiter=' ', engine='c', names=cols, dtype=dtypes, header=None)
df = df.drop(columns=['node_type'])
return df
def skeleton_df_to_swc(df, export_path=None):
"""
Create an SWC file from a skeleton DataFrame.
Args:
df:
DataFrame, as returned by :py:meth:`.Client.fetch_skeleton()`
export_path:
Optional. Write the SWC file to disk a the given location.
Returns:
``str``
"""
df = df.copy()
df['node_type'] = 0
df = df[['rowId', 'node_type', 'x', 'y', 'z', 'radius', 'link']]
swc = "# "
swc += df.to_csv(sep=' ', header=True, index=False)
if export_path:
with open(export_path, 'w') as f:
f.write(swc)
return swc
def heal_skeleton(skeleton_df, max_distance=np.inf):
"""
Attempt to repair a fragmented skeleton into a single connected component.
Rather than a single tree, skeletons from neuprint sometimes
consist of multiple fragments, i.e. multiple connected
components. That's due to artifacts in the underlying
segmentation from which the skeletons were generated.
In such skeletons, there will be multiple 'root' nodes
(SWC rows where ``link == -1``).
This function 'heals' a fragmented skeleton by joining its
fragments into a single tree.
First, each fragment is joined to every other fragment at
their nearest points. The resulting graph has unnecessary
edges, which are then removed by extracting the minimum
spanning tree. The MST is returned as the healed skeleton.
Args:
skeleton_df:
DataFrame as returned by :py:meth:`.Client.fetch_skeleton()`
max_distance:
If a skeleton's fragments are very spatially distant, it may
not be desirable to connect them with a new edge.
This parameter specifies the maximum length of new edges
introduced by the healing procedure. If a skeleton fragment
cannot be connected to the rest of the skeleton because it's
too far away, the skeleton will remain fragmented.
Returns:
DataFrame, with ``link`` column updated with updated edges.
"""
if max_distance is True:
max_distance = np.inf
if not max_distance:
max_distance = 0.0
skeleton_df = skeleton_df.sort_values('rowId').reset_index(drop=True)
g = skeleton_df_to_nx(skeleton_df, False, False)
# Extract each fragment's rows and construct a KD-Tree
Fragment = namedtuple('Fragment', ['frag_id', 'df', 'kd'])
fragments = []
for frag_id, cc in enumerate(nx.connected_components(g)):
if len(cc) == len(skeleton_df):
# There's only one component -- no healing necessary
return skeleton_df
df = skeleton_df.query('rowId in @cc')
kd = cKDTree(df[[*'xyz']].values)
fragments.append( Fragment(frag_id, df, kd) )
# Sort from big-to-small, so the calculations below use a
# KD tree for the larger point set in every fragment pair.
fragments = sorted(fragments, key=lambda frag: -len(frag.df))
# We could use the full graph and connect all
# fragment pairs at their nearest neighbors,
# but it's faster to treat each fragment as a
# single node and run MST on that quotient graph,
# which is tiny.
frag_graph = nx.Graph()
for frag_a, frag_b in combinations(fragments, 2):
coords_b = frag_b.df[[*'xyz']].values
distances, indexes = frag_a.kd.query(coords_b)
index_b = np.argmin(distances)
index_a = indexes[index_b]
node_a = frag_a.df['rowId'].iloc[index_a]
node_b = frag_b.df['rowId'].iloc[index_b]
dist_ab = distances[index_b]
# Add edge from one fragment to another,
# but keep track of which fine-grained skeleton
# nodes were used to calculate distance.
frag_graph.add_edge( frag_a.frag_id, frag_b.frag_id,
node_a=node_a, node_b=node_b,
distance=dist_ab )
# Compute inter-fragment MST edges
frag_edges = nx.minimum_spanning_edges(frag_graph, weight='distance', data=True)
# For each inter-fragment edge, add the corresponding
# fine-grained edge between skeleton nodes in the original graph.
omit_edges = []
for _u, _v, d in frag_edges:
g.add_edge(d['node_a'], d['node_b'])
if d['distance'] > max_distance:
omit_edges.append((d['node_a'], d['node_b']))
# Traverse in depth-first order to compute edges for final tree
root = skeleton_df['rowId'].iloc[0]
# Replace 'link' (parent) column using MST edges
_reorient_skeleton(skeleton_df, root, g)
assert (skeleton_df['link'] == -1).sum() == 1
assert skeleton_df['link'].iloc[0] == -1
# Delete edges that violated max_distance
for a,b in omit_edges:
q = '(rowId == @a and link == @b) or (rowId == @b and link == @a)'
idx = skeleton_df.query(q).index
skeleton_df.loc[idx, 'link'] = -1
return skeleton_df
def _reorient_skeleton(skeleton_df, root, g=None):
"""
Replace the 'link' column in each row of the skeleton dataframe
so that its parent corresponds to a depth-first traversal from
the given root node.
Args:
skeleton_df:
A skeleton dataframe
root:
A rowId to use as the new root node
g:
Optional. A nx.Graph representation of the skeleton
Works in-place.
"""
g = g or skeleton_df_to_nx(skeleton_df, False, False)
assert isinstance(g, nx.Graph) and not isinstance(g, nx.DiGraph), \
"skeleton graph must be undirected"
edges = list(nx.dfs_edges(g, source=root))
# If the graph has more than one connected component,
# the remaining components have arbitrary roots
if len(edges) != len(g.edges):
for cc in nx.connected_components(g):
if root not in cc:
edges += list(nx.dfs_edges(g, source=cc.pop()))
edges = pd.DataFrame(edges, columns=['link', 'rowId']) # parent, child
edges = edges.set_index('rowId')['link']
# Replace 'link' (parent) column using DFS edges
skeleton_df['link'] = skeleton_df['rowId'].map(edges).fillna(-1).astype(int)
def reorient_skeleton(skeleton_df, rowId=None, xyz=None, use_max_radius=False):
"""
Change the root node of a skeleton.
In general, the root node of the skeletons stored in neuprint is
not particularly significant, so the directionality of the nodes
(parent to child or vice-versa) on any given neuron branch is arbitrary.
This function allows you to pick a different root node and reorient
the tree with respect to that node. Replaces the 'link' column in
each row of the skeleton dataframe so that its parent corresponds
to a depth-first traversal from the new root node.
You can specify the new root node either by its row, or by a coordinate
(the closest node to that coordinate will be selected) or by size
(the largest node will be selected).
Works in-place. Only the 'link' column is changed.
If the given skeleton has more than one connected component (and thus
more than one root node), the orientation of the edges in other components
will be arbitrary.
Args:
skeleton_df:
A skeleton dataframe, e.g. as returned by `py:func:fetch_skeleton(..., heal=True)`
rowId:
A rowId to use as the new root node
xyz:
If given, chooses the node closest to the given coordinate as the new root node.
use_max_radius:
If True, choose the largest node (by radius) to use as the new root node.
"""
assert rowId != 0, \
"rowId is never 0 in NeuTu skeletons"
assert bool(rowId) + (xyz is not None) + use_max_radius == 1, \
"Select either a rowId to use as the new root, or a coordinate, or use_max_radius=True"
if xyz is not None:
# Find closest node to the given coordinate
distances = np.linalg.norm(skeleton_df[[*'xyz']] - xyz, axis=1)
rowId = skeleton_df['rowId'].iloc[np.argmin(distances)]
elif use_max_radius:
# Find the node with the largest radius
idx = skeleton_df['radius'].idxmax()
rowId = skeleton_df.loc[idx, 'rowId']
assert rowId is not None, "You must specify a new root node"
_reorient_skeleton(skeleton_df, rowId)
def skeleton_segments(skeleton_df):
"""
Compute a table of skeleton segments.
A skeleton dataframe is a table of nodes (points) and their parent nodes.
This function computes a table of segments, where each row lists both the
child and parent point, along with some attributes describing the segment:
length, average radius, and segment volume.
"""
assert isinstance(skeleton_df, pd.DataFrame)
segment_df = skeleton_df.merge(skeleton_df[['rowId', 'link', *'xyz', 'radius']],
'inner',
left_on='link',
right_on='rowId',
suffixes=['', '_parent'])
child_points = segment_df[[*'xyz']].values
parent_points = segment_df[['x_parent', 'y_parent', 'z_parent']].values
segment_df['length'] = np.linalg.norm(child_points - parent_points, axis=1)
segment_df['avg_radius'] = segment_df.eval('(radius + radius_parent) / 2')
# Volume of a truncated cone:
# V = π * h * (R² * r² + R*r) / 3
PI = np.pi
e = '@PI * length * (radius_parent**2 + radius**2 + radius*radius_parent) / 3'
segment_df['volume'] = segment_df.eval(e)
return segment_df
|
connectome-neuprint/neuprint-python
|
neuprint/skeleton.py
|
Python
|
bsd-3-clause
| 13,578
|
[
"NEURON"
] |
231095b57c39446493ac2aa531c827757c128c9f13537c5da80b779fbdf8c2bd
|
import random, unittest, cPickle, collections
from copy import deepcopy, copy
from pylpsolve import LP, LPException
from numpy import array as ar, ones, eye, float64, uint, int, ones
class TestErrorCatch(unittest.TestCase):
# test constraint adding by (wrong typed index array, value array)
def test01_constraint_rejects_float_idx(self):
lp = LP()
self.assertRaises(ValueError,
lambda: lp.addConstraint( (ar([0, 1.1, 2],dtype=float64), ar([1,1,1],dtype=float64) ), ">=", 1))
# test constraint adding by (wrong typed index array, value array)
def test01_objfunc_rejects_float_idx(self):
lp = LP()
self.assertRaises(ValueError,
lambda: lp.setObjective( (ar([0, 1.1, 2],dtype=float64), ar([1,1,1],dtype=float64) )))
def test02_constraint_rejects_neg_idx(self):
lp = LP()
self.assertRaises(ValueError,
lambda: lp.addConstraint( (ar([0, -1, 2]), ar([1,1,1],dtype=float64) ), ">=", 1))
def test02_objfunc_rejects_negative_idx(self):
lp = LP()
self.assertRaises(ValueError,
lambda: lp.setObjective( (ar([0, -1, 2]), ar([1,1,1],dtype=float64) )))
def checkBadSizingTooLarge(self, opts):
lp = LP()
def run_test(c_arg, o_arg):
if opts[-1] == "c":
self.assertRaises(ValueError, lambda: lp.addConstraint(c_arg, ">", 1))
elif opts[-1] == "o":
self.assertRaises(ValueError, lambda: lp.setObjective(o_arg))
else:
assert False
indices = {}
indices["t"] = (0,3)
indices["N"] = "a"
indices["l"] = [0,1,2]
indices["a"] = ar([0,1,2])
indices["f"] = ar([0,1,2],dtype=float64)
weights = {}
weights["l"] = [1,1,1,1]
weights["a"] = ar([1,1,1,1])
weights["f"] = ar([1,1,1,1])
obj_func = {}
obj_func["l"] = [1,2,3,4]
obj_func["a"] = ar([1,2,3,4])
obj_func["f"] = ar([1,2,3,4],dtype=float64)
# Some ones used in the dict's case
il = indices["l"]
assert len(il) == 3
wl = weights["l"]
assert len(wl) == 4
ol = obj_func["l"]
assert len(ol) == 4
if opts[0] == "d" or opts[0] == "T":
if opts[1] == "2":
lp.getIndexBlock("b", 1)
cd = [ ("a", wl[:2]), ("b", wl[2:])]
od = [ ("a", ol[:2]), ("b", ol[2:])]
elif opts[1] == "3":
cd = [((0,2), wl[:2]), (2, wl[2:])]
od = [((0,2), ol[:2]), (2, ol[2:])]
elif opts[1] == "4":
cd = [((0,2), wl[:2]), ( (2,3), wl[2:])]
od = [((0,2), ol[:2]), ( (2,3), ol[2:])]
elif opts[1] == "5": # bad for out of order
cd = [("a", wl[:2]), ( (2,3), wl[2:])]
od = [("a", ol[:2]), ( (2,3), ol[2:])]
elif opts[1] in indices.keys() and opts[2] in weights.keys():
if "N" in opts:
lp.getIndexBlock(indices["N"], 3)
cd = [(indices[opts[1]], weights[opts[2]])]
od = [(indices[opts[1]], obj_func[opts[2]])]
else:
assert False
if opts[0] == "d":
run_test(dict(cd), dict(od))
return
elif opts[0] == "T":
run_test(cd, od)
return
else:
assert False
else:
assert len(opts) == 3
# No little n option here
if "N" in opts:
lp.getIndexBlock(indices["N"], 3)
run_test( (indices[opts[0]], weights[opts[1]]),
(indices[opts[0]], obj_func[opts[1]]))
return
def testBadSizingTooLarge_tlc(self): self.checkBadSizingTooLarge("tlc")
def testBadSizingTooLarge_tac(self): self.checkBadSizingTooLarge("tac")
def testBadSizingTooLarge_tfc(self): self.checkBadSizingTooLarge("tfc")
def testBadSizingTooLarge_Nlc(self): self.checkBadSizingTooLarge("Nlc")
def testBadSizingTooLarge_Nac(self): self.checkBadSizingTooLarge("Nac")
def testBadSizingTooLarge_Nfc(self): self.checkBadSizingTooLarge("Nfc")
def testBadSizingTooLarge_llc(self): self.checkBadSizingTooLarge("llc")
def testBadSizingTooLarge_lac(self): self.checkBadSizingTooLarge("lac")
def testBadSizingTooLarge_lfc(self): self.checkBadSizingTooLarge("lfc")
def testBadSizingTooLarge_alc(self): self.checkBadSizingTooLarge("alc")
def testBadSizingTooLarge_aac(self): self.checkBadSizingTooLarge("aac")
def testBadSizingTooLarge_afc(self): self.checkBadSizingTooLarge("afc")
def testBadSizingTooLarge_flc(self): self.checkBadSizingTooLarge("flc")
def testBadSizingTooLarge_fac(self): self.checkBadSizingTooLarge("fac")
def testBadSizingTooLarge_ffc(self): self.checkBadSizingTooLarge("ffc")
def testBadSizingTooLarge_d2c(self): self.checkBadSizingTooLarge("d2c")
def testBadSizingTooLarge_d2c(self): self.checkBadSizingTooLarge("d2c")
def testBadSizingTooLarge_d2c(self): self.checkBadSizingTooLarge("d2c")
def testBadSizingTooLarge_d3c(self): self.checkBadSizingTooLarge("d3c")
def testBadSizingTooLarge_d3c(self): self.checkBadSizingTooLarge("d3c")
def testBadSizingTooLarge_d3c(self): self.checkBadSizingTooLarge("d3c")
def testBadSizingTooLarge_d4c(self): self.checkBadSizingTooLarge("d4c")
def testBadSizingTooLarge_d4c(self): self.checkBadSizingTooLarge("d4c")
def testBadSizingTooLarge_d4c(self): self.checkBadSizingTooLarge("d4c")
def testBadSizingTooLarge_T2c(self): self.checkBadSizingTooLarge("T2c")
def testBadSizingTooLarge_T2c(self): self.checkBadSizingTooLarge("T2c")
def testBadSizingTooLarge_T2c(self): self.checkBadSizingTooLarge("T2c")
def testBadSizingTooLarge_T3c(self): self.checkBadSizingTooLarge("T3c")
def testBadSizingTooLarge_T3c(self): self.checkBadSizingTooLarge("T3c")
def testBadSizingTooLarge_T3c(self): self.checkBadSizingTooLarge("T3c")
def testBadSizingTooLarge_T4c(self): self.checkBadSizingTooLarge("T4c")
def testBadSizingTooLarge_T4c(self): self.checkBadSizingTooLarge("T4c")
def testBadSizingTooLarge_T4c(self): self.checkBadSizingTooLarge("T4c")
def testBadSizingTooLarge_T5c(self): self.checkBadSizingTooLarge("T5c")
def testBadSizingTooLarge_T5c(self): self.checkBadSizingTooLarge("T5c")
def testBadSizingTooLarge_T5c(self): self.checkBadSizingTooLarge("T5c")
def testBadSizingTooLarge_Ttlo(self): self.checkBadSizingTooLarge("Ttlc")
def testBadSizingTooLarge_Ttao(self): self.checkBadSizingTooLarge("Ttac")
def testBadSizingTooLarge_Ttfo(self): self.checkBadSizingTooLarge("Ttfc")
def testBadSizingTooLarge_TNlo(self): self.checkBadSizingTooLarge("TNlc")
def testBadSizingTooLarge_TNao(self): self.checkBadSizingTooLarge("TNac")
def testBadSizingTooLarge_TNfo(self): self.checkBadSizingTooLarge("TNfc")
def testBadSizingTooLarge_Tllo(self): self.checkBadSizingTooLarge("Tllc")
def testBadSizingTooLarge_Tlao(self): self.checkBadSizingTooLarge("Tlac")
def testBadSizingTooLarge_Tlfo(self): self.checkBadSizingTooLarge("Tlfc")
def testBadSizingTooLarge_Talo(self): self.checkBadSizingTooLarge("Talc")
def testBadSizingTooLarge_Taao(self): self.checkBadSizingTooLarge("Taac")
def testBadSizingTooLarge_Tafo(self): self.checkBadSizingTooLarge("Tafc")
def testBadSizingTooLarge_Tflo(self): self.checkBadSizingTooLarge("Tflc")
def testBadSizingTooLarge_Tfao(self): self.checkBadSizingTooLarge("Tfac")
def testBadSizingTooLarge_Tffo(self): self.checkBadSizingTooLarge("Tffc")
def testBadSizingTooLarge_tlo(self): self.checkBadSizingTooLarge("tlc")
def testBadSizingTooLarge_tao(self): self.checkBadSizingTooLarge("tac")
def testBadSizingTooLarge_tfo(self): self.checkBadSizingTooLarge("tfc")
def testBadSizingTooLarge_Nlo(self): self.checkBadSizingTooLarge("Nlc")
def testBadSizingTooLarge_Nao(self): self.checkBadSizingTooLarge("Nac")
def testBadSizingTooLarge_Nfo(self): self.checkBadSizingTooLarge("Nfc")
def testBadSizingTooLarge_llo(self): self.checkBadSizingTooLarge("llc")
def testBadSizingTooLarge_lao(self): self.checkBadSizingTooLarge("lac")
def testBadSizingTooLarge_lfo(self): self.checkBadSizingTooLarge("lfc")
def testBadSizingTooLarge_alo(self): self.checkBadSizingTooLarge("alc")
def testBadSizingTooLarge_aao(self): self.checkBadSizingTooLarge("aac")
def testBadSizingTooLarge_afo(self): self.checkBadSizingTooLarge("afc")
def testBadSizingTooLarge_flo(self): self.checkBadSizingTooLarge("flc")
def testBadSizingTooLarge_fao(self): self.checkBadSizingTooLarge("fac")
def testBadSizingTooLarge_ffo(self): self.checkBadSizingTooLarge("ffc")
def testBadSizingTooLarge_d2o(self): self.checkBadSizingTooLarge("d2c")
def testBadSizingTooLarge_d2o(self): self.checkBadSizingTooLarge("d2c")
def testBadSizingTooLarge_d2o(self): self.checkBadSizingTooLarge("d2c")
def testBadSizingTooLarge_d3o(self): self.checkBadSizingTooLarge("d3c")
def testBadSizingTooLarge_d3o(self): self.checkBadSizingTooLarge("d3c")
def testBadSizingTooLarge_d3o(self): self.checkBadSizingTooLarge("d3c")
def testBadSizingTooLarge_d4o(self): self.checkBadSizingTooLarge("d4c")
def testBadSizingTooLarge_d4o(self): self.checkBadSizingTooLarge("d4c")
def testBadSizingTooLarge_d4o(self): self.checkBadSizingTooLarge("d4c")
def testBadSizingTooLarge_T2o(self): self.checkBadSizingTooLarge("T2c")
def testBadSizingTooLarge_T2o(self): self.checkBadSizingTooLarge("T2c")
def testBadSizingTooLarge_T2o(self): self.checkBadSizingTooLarge("T2c")
def testBadSizingTooLarge_T3o(self): self.checkBadSizingTooLarge("T3c")
def testBadSizingTooLarge_T3o(self): self.checkBadSizingTooLarge("T3c")
def testBadSizingTooLarge_T3o(self): self.checkBadSizingTooLarge("T3c")
def testBadSizingTooLarge_T4o(self): self.checkBadSizingTooLarge("T4c")
def testBadSizingTooLarge_T4o(self): self.checkBadSizingTooLarge("T4c")
def testBadSizingTooLarge_T4o(self): self.checkBadSizingTooLarge("T4c")
def testBadSizingTooLarge_T5o(self): self.checkBadSizingTooLarge("T5c")
def testBadSizingTooLarge_T5o(self): self.checkBadSizingTooLarge("T5c")
def testBadSizingTooLarge_T5o(self): self.checkBadSizingTooLarge("T5c")
def testBadSizingTooLarge_Ttlo(self): self.checkBadSizingTooLarge("Ttlo")
def testBadSizingTooLarge_Ttao(self): self.checkBadSizingTooLarge("Ttao")
def testBadSizingTooLarge_Ttfo(self): self.checkBadSizingTooLarge("Ttfo")
def testBadSizingTooLarge_TNlo(self): self.checkBadSizingTooLarge("TNlo")
def testBadSizingTooLarge_TNao(self): self.checkBadSizingTooLarge("TNao")
def testBadSizingTooLarge_TNfo(self): self.checkBadSizingTooLarge("TNfo")
def testBadSizingTooLarge_Tllo(self): self.checkBadSizingTooLarge("Tllo")
def testBadSizingTooLarge_Tlao(self): self.checkBadSizingTooLarge("Tlao")
def testBadSizingTooLarge_Tlfo(self): self.checkBadSizingTooLarge("Tlfo")
def testBadSizingTooLarge_Talo(self): self.checkBadSizingTooLarge("Talo")
def testBadSizingTooLarge_Taao(self): self.checkBadSizingTooLarge("Taao")
def testBadSizingTooLarge_Tafo(self): self.checkBadSizingTooLarge("Tafo")
def testBadSizingTooLarge_Tflo(self): self.checkBadSizingTooLarge("Tflo")
def testBadSizingTooLarge_Tfao(self): self.checkBadSizingTooLarge("Tfao")
def testBadSizingTooLarge_Tffo(self): self.checkBadSizingTooLarge("Tffo")
def checkBadSizingTooSmall(self, opts):
lp = LP()
def run_test(c_arg, o_arg):
if opts[-1] == "c":
self.assertRaises(ValueError, lambda: lp.addConstraint(c_arg, ">", 1))
elif opts[-1] == "o":
self.assertRaises(ValueError, lambda: lp.setObjective(o_arg))
else:
assert False
indices = {}
indices["t"] = (0,5)
indices["N"] = "a"
indices["l"] = [0,1,2,3,4]
indices["a"] = ar([0,1,2,3,4])
indices["f"] = ar([0,1,2,3,4],dtype=float64)
weights = {}
weights["l"] = [1,1,1,1]
weights["a"] = ar([1,1,1,1])
weights["f"] = ar([1,1,1,1])
obj_func = {}
obj_func["l"] = [1,2,3,4]
obj_func["a"] = ar([1,2,3,4])
obj_func["f"] = ar([1,2,3,4],dtype=float64)
# Some ones used in the dict's case
il = indices["l"]
assert len(il) == 5
wl = weights["l"]
assert len(wl) == 4
ol = obj_func["l"]
assert len(ol) == 4
if opts[0] == "d" or opts[0] == "T":
if opts[1] == "2":
lp.getIndexBlock("b", 3)
cd = [ ("a", wl[:2]), ("b", wl[2:])]
od = [ ("a", ol[:2]), ("b", ol[2:])]
elif opts[1] == "4":
cd = [((0,2), wl[:2]), ( (2,5), wl[2:])]
od = [((0,2), ol[:2]), ( (2,5), ol[2:])]
elif opts[1] == "5": # bad for out of order
cd = [("a", wl[:2]), ( (2,5), wl[2:])]
od = [("a", ol[:2]), ( (2,5), ol[2:])]
elif opts[1] in indices.keys() and opts[2] in weights.keys():
if "N" in opts:
lp.getIndexBlock(indices["N"], 5)
cd = [(indices[opts[1]], weights[opts[2]])]
od = [(indices[opts[1]], obj_func[opts[2]])]
else:
assert False
if opts[0] == "d":
run_test(dict(cd), dict(od))
return
elif opts[0] == "T":
run_test(cd, od)
return
else:
assert False
else:
assert len(opts) == 3
# No little n option here
if "N" in opts:
lp.getIndexBlock(indices["N"], 5)
run_test( (indices[opts[0]], weights[opts[1]]),
(indices[opts[0]], obj_func[opts[1]]))
return
def testBadSizingTooSmall_tlc(self): self.checkBadSizingTooSmall("tlc")
def testBadSizingTooSmall_tac(self): self.checkBadSizingTooSmall("tac")
def testBadSizingTooSmall_tfc(self): self.checkBadSizingTooSmall("tfc")
def testBadSizingTooSmall_Nlc(self): self.checkBadSizingTooSmall("Nlc")
def testBadSizingTooSmall_Nac(self): self.checkBadSizingTooSmall("Nac")
def testBadSizingTooSmall_Nfc(self): self.checkBadSizingTooSmall("Nfc")
def testBadSizingTooSmall_llc(self): self.checkBadSizingTooSmall("llc")
def testBadSizingTooSmall_lac(self): self.checkBadSizingTooSmall("lac")
def testBadSizingTooSmall_lfc(self): self.checkBadSizingTooSmall("lfc")
def testBadSizingTooSmall_alc(self): self.checkBadSizingTooSmall("alc")
def testBadSizingTooSmall_aac(self): self.checkBadSizingTooSmall("aac")
def testBadSizingTooSmall_afc(self): self.checkBadSizingTooSmall("afc")
def testBadSizingTooSmall_flc(self): self.checkBadSizingTooSmall("flc")
def testBadSizingTooSmall_fac(self): self.checkBadSizingTooSmall("fac")
def testBadSizingTooSmall_ffc(self): self.checkBadSizingTooSmall("ffc")
def testBadSizingTooSmall_d2c(self): self.checkBadSizingTooSmall("d2c")
def testBadSizingTooSmall_d2c(self): self.checkBadSizingTooSmall("d2c")
def testBadSizingTooSmall_d2c(self): self.checkBadSizingTooSmall("d2c")
def testBadSizingTooSmall_d4c(self): self.checkBadSizingTooSmall("d4c")
def testBadSizingTooSmall_d4c(self): self.checkBadSizingTooSmall("d4c")
def testBadSizingTooSmall_d4c(self): self.checkBadSizingTooSmall("d4c")
def testBadSizingTooSmall_T2c(self): self.checkBadSizingTooSmall("T2c")
def testBadSizingTooSmall_T2c(self): self.checkBadSizingTooSmall("T2c")
def testBadSizingTooSmall_T2c(self): self.checkBadSizingTooSmall("T2c")
def testBadSizingTooSmall_T4c(self): self.checkBadSizingTooSmall("T4c")
def testBadSizingTooSmall_T4c(self): self.checkBadSizingTooSmall("T4c")
def testBadSizingTooSmall_T4c(self): self.checkBadSizingTooSmall("T4c")
def testBadSizingTooSmall_T5c(self): self.checkBadSizingTooSmall("T5c")
def testBadSizingTooSmall_T5c(self): self.checkBadSizingTooSmall("T5c")
def testBadSizingTooSmall_T5c(self): self.checkBadSizingTooSmall("T5c")
def testBadSizingTooSmall_Ttlo(self): self.checkBadSizingTooSmall("Ttlc")
def testBadSizingTooSmall_Ttao(self): self.checkBadSizingTooSmall("Ttac")
def testBadSizingTooSmall_Ttfo(self): self.checkBadSizingTooSmall("Ttfc")
def testBadSizingTooSmall_TNlo(self): self.checkBadSizingTooSmall("TNlc")
def testBadSizingTooSmall_TNao(self): self.checkBadSizingTooSmall("TNac")
def testBadSizingTooSmall_TNfo(self): self.checkBadSizingTooSmall("TNfc")
def testBadSizingTooSmall_Tllo(self): self.checkBadSizingTooSmall("Tllc")
def testBadSizingTooSmall_Tlao(self): self.checkBadSizingTooSmall("Tlac")
def testBadSizingTooSmall_Tlfo(self): self.checkBadSizingTooSmall("Tlfc")
def testBadSizingTooSmall_Talo(self): self.checkBadSizingTooSmall("Talc")
def testBadSizingTooSmall_Taao(self): self.checkBadSizingTooSmall("Taac")
def testBadSizingTooSmall_Tafo(self): self.checkBadSizingTooSmall("Tafc")
def testBadSizingTooSmall_Tflo(self): self.checkBadSizingTooSmall("Tflc")
def testBadSizingTooSmall_Tfao(self): self.checkBadSizingTooSmall("Tfac")
def testBadSizingTooSmall_Tffo(self): self.checkBadSizingTooSmall("Tffc")
def testBadSizingTooSmall_tlo(self): self.checkBadSizingTooSmall("tlc")
def testBadSizingTooSmall_tao(self): self.checkBadSizingTooSmall("tac")
def testBadSizingTooSmall_tfo(self): self.checkBadSizingTooSmall("tfc")
def testBadSizingTooSmall_Nlo(self): self.checkBadSizingTooSmall("Nlc")
def testBadSizingTooSmall_Nao(self): self.checkBadSizingTooSmall("Nac")
def testBadSizingTooSmall_Nfo(self): self.checkBadSizingTooSmall("Nfc")
def testBadSizingTooSmall_llo(self): self.checkBadSizingTooSmall("llc")
def testBadSizingTooSmall_lao(self): self.checkBadSizingTooSmall("lac")
def testBadSizingTooSmall_lfo(self): self.checkBadSizingTooSmall("lfc")
def testBadSizingTooSmall_alo(self): self.checkBadSizingTooSmall("alc")
def testBadSizingTooSmall_aao(self): self.checkBadSizingTooSmall("aac")
def testBadSizingTooSmall_afo(self): self.checkBadSizingTooSmall("afc")
def testBadSizingTooSmall_flo(self): self.checkBadSizingTooSmall("flc")
def testBadSizingTooSmall_fao(self): self.checkBadSizingTooSmall("fac")
def testBadSizingTooSmall_ffo(self): self.checkBadSizingTooSmall("ffc")
def testBadSizingTooSmall_d2o(self): self.checkBadSizingTooSmall("d2c")
def testBadSizingTooSmall_d2o(self): self.checkBadSizingTooSmall("d2c")
def testBadSizingTooSmall_d2o(self): self.checkBadSizingTooSmall("d2c")
def testBadSizingTooSmall_d4o(self): self.checkBadSizingTooSmall("d4c")
def testBadSizingTooSmall_d4o(self): self.checkBadSizingTooSmall("d4c")
def testBadSizingTooSmall_d4o(self): self.checkBadSizingTooSmall("d4c")
def testBadSizingTooSmall_T2o(self): self.checkBadSizingTooSmall("T2c")
def testBadSizingTooSmall_T2o(self): self.checkBadSizingTooSmall("T2c")
def testBadSizingTooSmall_T2o(self): self.checkBadSizingTooSmall("T2c")
def testBadSizingTooSmall_T4o(self): self.checkBadSizingTooSmall("T4c")
def testBadSizingTooSmall_T4o(self): self.checkBadSizingTooSmall("T4c")
def testBadSizingTooSmall_T4o(self): self.checkBadSizingTooSmall("T4c")
def testBadSizingTooSmall_T5o(self): self.checkBadSizingTooSmall("T5c")
def testBadSizingTooSmall_T5o(self): self.checkBadSizingTooSmall("T5c")
def testBadSizingTooSmall_T5o(self): self.checkBadSizingTooSmall("T5c")
def testBadSizingTooSmall_Ttlo(self): self.checkBadSizingTooSmall("Ttlo")
def testBadSizingTooSmall_Ttao(self): self.checkBadSizingTooSmall("Ttao")
def testBadSizingTooSmall_Ttfo(self): self.checkBadSizingTooSmall("Ttfo")
def testBadSizingTooSmall_TNlo(self): self.checkBadSizingTooSmall("TNlo")
def testBadSizingTooSmall_TNao(self): self.checkBadSizingTooSmall("TNao")
def testBadSizingTooSmall_TNfo(self): self.checkBadSizingTooSmall("TNfo")
def testBadSizingTooSmall_Tllo(self): self.checkBadSizingTooSmall("Tllo")
def testBadSizingTooSmall_Tlao(self): self.checkBadSizingTooSmall("Tlao")
def testBadSizingTooSmall_Tlfo(self): self.checkBadSizingTooSmall("Tlfo")
def testBadSizingTooSmall_Talo(self): self.checkBadSizingTooSmall("Talo")
def testBadSizingTooSmall_Taao(self): self.checkBadSizingTooSmall("Taao")
def testBadSizingTooSmall_Tafo(self): self.checkBadSizingTooSmall("Tafo")
def testBadSizingTooSmall_Tflo(self): self.checkBadSizingTooSmall("Tflo")
def testBadSizingTooSmall_Tfao(self): self.checkBadSizingTooSmall("Tfao")
def testBadSizingTooSmall_Tffo(self): self.checkBadSizingTooSmall("Tffo")
############################################################
# Now some specific cases
def checkInconsistentSubarrays(self, opts):
values = {}
indices = {}
indices["t"] = (0,3)
indices["n"] = "a"
indices["N"] = "a"
indices["l"] = [0,1,2]
indices["a"] = ar([0,1,2],dtype=uint)
indices["f"] = ar([0,1,2],dtype=float64)
indices["e"] = None # empty
A = [[1,0, 0],
[0,1], # inconsistent; does this get caught?
[0,0.5,0]]
values = {}
values["L"] = A
values["l"] = [ar(le) for le in A]
values["B"] = [[1, 0, 0], [[1,0,0]], [0,1,1]]
values["C"] = ones((1,3,3) )
values["D"] = [[1, 0, 0], [1,1,[1]], [0,1,1]]
values["E"] = [[1, 0, 0], (1,1,1), [0,1,1]]
targets = {}
targets["s"] = 1
targets["l"] = [1,1,1]
targets["a"] = ar([1,1,1],dtype=uint)
targets["f"] = ar([1,1,1],dtype=float64)
lp = LP()
if opts[0] == "N":
lp.getIndexBlock(indices["N"], 3)
io = indices[opts[0]]
vl = values [opts[1]]
tr = targets[opts[2]]
ob = [1,2,3]
if io is None:
self.assertRaises(ValueError, lambda: lp.addConstraint(vl, ">=", tr))
else:
self.assertRaises(ValueError, lambda: lp.addConstraint( (io, vl), ">=", tr))
def testInconsistentSubarrays_tLs(self): self.checkInconsistentSubarrays("tLs")
def testInconsistentSubarrays_tLl(self): self.checkInconsistentSubarrays("tLl")
def testInconsistentSubarrays_tLa(self): self.checkInconsistentSubarrays("tLa")
def testInconsistentSubarrays_tLf(self): self.checkInconsistentSubarrays("tLf")
def testInconsistentSubarrays_tls(self): self.checkInconsistentSubarrays("tls")
def testInconsistentSubarrays_tll(self): self.checkInconsistentSubarrays("tll")
def testInconsistentSubarrays_tla(self): self.checkInconsistentSubarrays("tla")
def testInconsistentSubarrays_tlf(self): self.checkInconsistentSubarrays("tlf")
def testInconsistentSubarrays_nLs(self): self.checkInconsistentSubarrays("nLs")
def testInconsistentSubarrays_nLl(self): self.checkInconsistentSubarrays("nLl")
def testInconsistentSubarrays_nLa(self): self.checkInconsistentSubarrays("nLa")
def testInconsistentSubarrays_nLf(self): self.checkInconsistentSubarrays("nLf")
def testInconsistentSubarrays_nls(self): self.checkInconsistentSubarrays("nls")
def testInconsistentSubarrays_nll(self): self.checkInconsistentSubarrays("nll")
def testInconsistentSubarrays_nla(self): self.checkInconsistentSubarrays("nla")
def testInconsistentSubarrays_nlf(self): self.checkInconsistentSubarrays("nlf")
def testInconsistentSubarrays_nBs(self): self.checkInconsistentSubarrays("nBs")
def testInconsistentSubarrays_nBl(self): self.checkInconsistentSubarrays("nBl")
def testInconsistentSubarrays_nBa(self): self.checkInconsistentSubarrays("nBa")
def testInconsistentSubarrays_nBf(self): self.checkInconsistentSubarrays("nBf")
def testInconsistentSubarrays_nCs(self): self.checkInconsistentSubarrays("nCs")
def testInconsistentSubarrays_nCl(self): self.checkInconsistentSubarrays("nCl")
def testInconsistentSubarrays_nCa(self): self.checkInconsistentSubarrays("nCa")
def testInconsistentSubarrays_nCf(self): self.checkInconsistentSubarrays("nCf")
def testInconsistentSubarrays_nDs(self): self.checkInconsistentSubarrays("nDs")
def testInconsistentSubarrays_nDl(self): self.checkInconsistentSubarrays("nDl")
def testInconsistentSubarrays_nDa(self): self.checkInconsistentSubarrays("nDa")
def testInconsistentSubarrays_nDf(self): self.checkInconsistentSubarrays("nDf")
def testInconsistentSubarrays_NLs(self): self.checkInconsistentSubarrays("NLs")
def testInconsistentSubarrays_NLl(self): self.checkInconsistentSubarrays("NLl")
def testInconsistentSubarrays_NLa(self): self.checkInconsistentSubarrays("NLa")
def testInconsistentSubarrays_NLf(self): self.checkInconsistentSubarrays("NLf")
def testInconsistentSubarrays_Nls(self): self.checkInconsistentSubarrays("Nls")
def testInconsistentSubarrays_Nll(self): self.checkInconsistentSubarrays("Nll")
def testInconsistentSubarrays_Nla(self): self.checkInconsistentSubarrays("Nla")
def testInconsistentSubarrays_Nlf(self): self.checkInconsistentSubarrays("Nlf")
def testInconsistentSubarrays_NBs(self): self.checkInconsistentSubarrays("NBs")
def testInconsistentSubarrays_NBl(self): self.checkInconsistentSubarrays("NBl")
def testInconsistentSubarrays_NBa(self): self.checkInconsistentSubarrays("NBa")
def testInconsistentSubarrays_NBf(self): self.checkInconsistentSubarrays("NBf")
def testInconsistentSubarrays_NCs(self): self.checkInconsistentSubarrays("NCs")
def testInconsistentSubarrays_NCl(self): self.checkInconsistentSubarrays("NCl")
def testInconsistentSubarrays_NCa(self): self.checkInconsistentSubarrays("NCa")
def testInconsistentSubarrays_NCf(self): self.checkInconsistentSubarrays("NCf")
def testInconsistentSubarrays_NDs(self): self.checkInconsistentSubarrays("NDs")
def testInconsistentSubarrays_NDl(self): self.checkInconsistentSubarrays("NDl")
def testInconsistentSubarrays_NDa(self): self.checkInconsistentSubarrays("NDa")
def testInconsistentSubarrays_NDf(self): self.checkInconsistentSubarrays("NDf")
def testInconsistentSubarrays_lLs(self): self.checkInconsistentSubarrays("lLs")
def testInconsistentSubarrays_lLl(self): self.checkInconsistentSubarrays("lLl")
def testInconsistentSubarrays_lLa(self): self.checkInconsistentSubarrays("lLa")
def testInconsistentSubarrays_lLf(self): self.checkInconsistentSubarrays("lLf")
def testInconsistentSubarrays_lls(self): self.checkInconsistentSubarrays("lls")
def testInconsistentSubarrays_lll(self): self.checkInconsistentSubarrays("lll")
def testInconsistentSubarrays_lla(self): self.checkInconsistentSubarrays("lla")
def testInconsistentSubarrays_llf(self): self.checkInconsistentSubarrays("llf")
def testInconsistentSubarrays_lBs(self): self.checkInconsistentSubarrays("lBs")
def testInconsistentSubarrays_lBl(self): self.checkInconsistentSubarrays("lBl")
def testInconsistentSubarrays_lBa(self): self.checkInconsistentSubarrays("lBa")
def testInconsistentSubarrays_lBf(self): self.checkInconsistentSubarrays("lBf")
def testInconsistentSubarrays_lCs(self): self.checkInconsistentSubarrays("lCs")
def testInconsistentSubarrays_lCl(self): self.checkInconsistentSubarrays("lCl")
def testInconsistentSubarrays_lCa(self): self.checkInconsistentSubarrays("lCa")
def testInconsistentSubarrays_lCf(self): self.checkInconsistentSubarrays("lCf")
def testInconsistentSubarrays_lDs(self): self.checkInconsistentSubarrays("lDs")
def testInconsistentSubarrays_lDl(self): self.checkInconsistentSubarrays("lDl")
def testInconsistentSubarrays_lDa(self): self.checkInconsistentSubarrays("lDa")
def testInconsistentSubarrays_lDf(self): self.checkInconsistentSubarrays("lDf")
def testInconsistentSubarrays_aLs(self): self.checkInconsistentSubarrays("aLs")
def testInconsistentSubarrays_aLl(self): self.checkInconsistentSubarrays("aLl")
def testInconsistentSubarrays_aLa(self): self.checkInconsistentSubarrays("aLa")
def testInconsistentSubarrays_aLf(self): self.checkInconsistentSubarrays("aLf")
def testInconsistentSubarrays_als(self): self.checkInconsistentSubarrays("als")
def testInconsistentSubarrays_all(self): self.checkInconsistentSubarrays("all")
def testInconsistentSubarrays_ala(self): self.checkInconsistentSubarrays("ala")
def testInconsistentSubarrays_alf(self): self.checkInconsistentSubarrays("alf")
def testInconsistentSubarrays_aBs(self): self.checkInconsistentSubarrays("aBs")
def testInconsistentSubarrays_aBl(self): self.checkInconsistentSubarrays("aBl")
def testInconsistentSubarrays_aBa(self): self.checkInconsistentSubarrays("aBa")
def testInconsistentSubarrays_aBf(self): self.checkInconsistentSubarrays("aBf")
def testInconsistentSubarrays_aCs(self): self.checkInconsistentSubarrays("aCs")
def testInconsistentSubarrays_aCl(self): self.checkInconsistentSubarrays("aCl")
def testInconsistentSubarrays_aCa(self): self.checkInconsistentSubarrays("aCa")
def testInconsistentSubarrays_aCf(self): self.checkInconsistentSubarrays("aCf")
def testInconsistentSubarrays_aDs(self): self.checkInconsistentSubarrays("aDs")
def testInconsistentSubarrays_aDl(self): self.checkInconsistentSubarrays("aDl")
def testInconsistentSubarrays_aDa(self): self.checkInconsistentSubarrays("aDa")
def testInconsistentSubarrays_aDf(self): self.checkInconsistentSubarrays("aDf")
def testInconsistentSubarrays_fLs(self): self.checkInconsistentSubarrays("fLs")
def testInconsistentSubarrays_fLl(self): self.checkInconsistentSubarrays("fLl")
def testInconsistentSubarrays_fLa(self): self.checkInconsistentSubarrays("fLa")
def testInconsistentSubarrays_fLf(self): self.checkInconsistentSubarrays("fLf")
def testInconsistentSubarrays_fls(self): self.checkInconsistentSubarrays("fls")
def testInconsistentSubarrays_fll(self): self.checkInconsistentSubarrays("fll")
def testInconsistentSubarrays_fla(self): self.checkInconsistentSubarrays("fla")
def testInconsistentSubarrays_flf(self): self.checkInconsistentSubarrays("flf")
def testInconsistentSubarrays_fBs(self): self.checkInconsistentSubarrays("fBs")
def testInconsistentSubarrays_fBl(self): self.checkInconsistentSubarrays("fBl")
def testInconsistentSubarrays_fBa(self): self.checkInconsistentSubarrays("fBa")
def testInconsistentSubarrays_fBf(self): self.checkInconsistentSubarrays("fBf")
def testInconsistentSubarrays_fCs(self): self.checkInconsistentSubarrays("fCs")
def testInconsistentSubarrays_fCl(self): self.checkInconsistentSubarrays("fCl")
def testInconsistentSubarrays_fCa(self): self.checkInconsistentSubarrays("fCa")
def testInconsistentSubarrays_fCf(self): self.checkInconsistentSubarrays("fCf")
def testInconsistentSubarrays_fDs(self): self.checkInconsistentSubarrays("fDs")
def testInconsistentSubarrays_fDl(self): self.checkInconsistentSubarrays("fDl")
def testInconsistentSubarrays_fDa(self): self.checkInconsistentSubarrays("fDa")
def testInconsistentSubarrays_fDf(self): self.checkInconsistentSubarrays("fDf")
def testInconsistentSubarrays_eLs(self): self.checkInconsistentSubarrays("eLs")
def testInconsistentSubarrays_eLl(self): self.checkInconsistentSubarrays("eLl")
def testInconsistentSubarrays_eLa(self): self.checkInconsistentSubarrays("eLa")
def testInconsistentSubarrays_eLf(self): self.checkInconsistentSubarrays("eLf")
def testInconsistentSubarrays_els(self): self.checkInconsistentSubarrays("els")
def testInconsistentSubarrays_ell(self): self.checkInconsistentSubarrays("ell")
def testInconsistentSubarrays_ela(self): self.checkInconsistentSubarrays("ela")
def testInconsistentSubarrays_elf(self): self.checkInconsistentSubarrays("elf")
def testInconsistentSubarrays_eBs(self): self.checkInconsistentSubarrays("eBs")
def testInconsistentSubarrays_eBl(self): self.checkInconsistentSubarrays("eBl")
def testInconsistentSubarrays_eBa(self): self.checkInconsistentSubarrays("eBa")
def testInconsistentSubarrays_eBf(self): self.checkInconsistentSubarrays("eBf")
def testInconsistentSubarrays_eCs(self): self.checkInconsistentSubarrays("eCs")
def testInconsistentSubarrays_eCl(self): self.checkInconsistentSubarrays("eCl")
def testInconsistentSubarrays_eCa(self): self.checkInconsistentSubarrays("eCa")
def testInconsistentSubarrays_eCf(self): self.checkInconsistentSubarrays("eCf")
def testInconsistentSubarrays_eDs(self): self.checkInconsistentSubarrays("eDs")
def testInconsistentSubarrays_eDl(self): self.checkInconsistentSubarrays("eDl")
def testInconsistentSubarrays_eDa(self): self.checkInconsistentSubarrays("eDa")
def testInconsistentSubarrays_eDf(self): self.checkInconsistentSubarrays("eDf")
if __name__ == '__main__':
unittest.main()
|
hoytak/pylpsolve
|
tests/test_errorcatch.py
|
Python
|
lgpl-2.1
| 33,500
|
[
"ADF"
] |
3a4797cb98f28ecddbe2e502deb160d27c088aaeae632c1ea3c0a517e3a2d35b
|
from django.contrib.contenttypes import generic
from django.contrib.contenttypes.models import ContentType
from django.db import models
from django.utils.translation import ugettext_lazy as _
class Visit(models.Model):
when = models.DateTimeField(_('when'), auto_now_add=True)
# Who visited
visitor_content_type = models.ForeignKey(ContentType, related_name='visited_objects')
visitor_object_id = models.PositiveIntegerField(db_index=True)
visitor = generic.GenericForeignKey('visitor_content_type', 'visitor_object_id')
# Who was visited
visited_content_type = models.ForeignKey(ContentType, related_name='latest_visitors')
visited_object_id = models.PositiveIntegerField(db_index=True)
visited = generic.GenericForeignKey('visited_content_type', 'visited_object_id')
def __unicode__(self):
return u"%s watched %s" % (self.visitor, self.visited)
|
SpreadBand/django-visitors
|
visitors/models.py
|
Python
|
gpl-3.0
| 911
|
[
"VisIt"
] |
a830e2578f3937638160ce2debdb79ae1a8621037103a409f8a9f23d6c809edc
|
# -*- coding: utf-8 -*-
# vim: autoindent shiftwidth=4 expandtab textwidth=120 tabstop=4 softtabstop=4
###############################################################################
# OpenLP - Open Source Lyrics Projection #
# --------------------------------------------------------------------------- #
# Copyright (c) 2008-2013 Raoul Snyman #
# Portions copyright (c) 2008-2013 Tim Bentley, Gerald Britton, Jonathan #
# Corwin, Samuel Findlay, Michael Gorven, Scott Guerrieri, Matthias Hub, #
# Meinert Jordan, Armin Köhler, Erik Lundin, Edwin Lunando, Brian T. Meyer. #
# Joshua Miller, Stevan Pettit, Andreas Preikschat, Mattias Põldaru, #
# Christian Richter, Philip Ridout, Simon Scudder, Jeffrey Smith, #
# Maikel Stuivenberg, Martin Thompson, Jon Tibble, Dave Warnock, #
# Frode Woldsund, Martin Zibricky, Patrick Zimmermann #
# --------------------------------------------------------------------------- #
# This program is free software; you can redistribute it and/or modify it #
# under the terms of the GNU General Public License as published by the Free #
# Software Foundation; version 2 of the License. #
# #
# This program is distributed in the hope that it will be useful, but WITHOUT #
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or #
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for #
# more details. #
# #
# You should have received a copy of the GNU General Public License along #
# with this program; if not, write to the Free Software Foundation, Inc., 59 #
# Temple Place, Suite 330, Boston, MA 02111-1307 USA #
###############################################################################
from PyQt4 import QtGui
from openlp.core.lib import translate
from openlp.core.lib.ui import create_button_box
class Ui_AddGroupDialog(object):
def setupUi(self, add_group_dialog):
add_group_dialog.setObjectName('add_group_dialog')
add_group_dialog.resize(300, 10)
self.dialog_layout = QtGui.QVBoxLayout(add_group_dialog)
self.dialog_layout.setObjectName('dialog_layout')
self.name_layout = QtGui.QFormLayout()
self.name_layout.setObjectName('name_layout')
self.parent_group_label = QtGui.QLabel(add_group_dialog)
self.parent_group_label.setObjectName('parent_group_label')
self.parent_group_combobox = QtGui.QComboBox(add_group_dialog)
self.parent_group_combobox.setObjectName('parent_group_combobox')
self.name_layout.addRow(self.parent_group_label, self.parent_group_combobox)
self.name_label = QtGui.QLabel(add_group_dialog)
self.name_label.setObjectName('name_label')
self.name_edit = QtGui.QLineEdit(add_group_dialog)
self.name_edit.setObjectName('name_edit')
self.name_label.setBuddy(self.name_edit)
self.name_layout.addRow(self.name_label, self.name_edit)
self.dialog_layout.addLayout(self.name_layout)
self.button_box = create_button_box(add_group_dialog, 'button_box', ['cancel', 'save'])
self.dialog_layout.addWidget(self.button_box)
self.retranslateUi(add_group_dialog)
add_group_dialog.setMaximumHeight(add_group_dialog.sizeHint().height())
def retranslateUi(self, add_group_dialog):
add_group_dialog.setWindowTitle(translate('ImagePlugin.AddGroupForm', 'Add group'))
self.parent_group_label.setText(translate('ImagePlugin.AddGroupForm', 'Parent group:'))
self.name_label.setText(translate('ImagePlugin.AddGroupForm', 'Group name:'))
|
marmyshev/item_title
|
openlp/plugins/images/forms/addgroupdialog.py
|
Python
|
gpl-2.0
| 3,966
|
[
"Brian"
] |
aca30d70477a822ef6777493c796f4aa18b8448efbd95d5939adbd4f85389155
|
from Sire.IO import *
from Sire.MM import *
from Sire.Maths import *
from Sire.Mol import *
(mols,space) = Amber().readCrdTop("../io/waterbox.crd", "../io/waterbox.top")
cljboxes = CLJBoxes()
idxs = []
natoms = 0
for i in range(0,mols.nMolecules()):
idxs.append( cljboxes.add( CLJAtoms(mols[MolIdx(i)]) ) )
natoms += mols[MolIdx(i)].molecule().nAtoms()
def test_cljdelta(verbose = False):
old_water = mols[MolIdx(0)].molecule()
old_cljatoms = CLJAtoms(old_water)
test_cljatoms = cljboxes.atoms(idxs[0])
if verbose:
print("\nMaking sure the box has the right number of atoms %s vs. %s",
cljboxes.nAtoms(), natoms)
assert( cljboxes.nAtoms() == natoms )
if verbose:
print("\nChecking I can get the old atoms back out again...")
print("OLD:\n%s" % old_cljatoms)
print("TEST:\n%s" % test_cljatoms)
assert(old_cljatoms == test_cljatoms)
new_water = old_water.move().translate( Vector(1) ).commit()
new_cljatoms = CLJAtoms(new_water)
cljdelta = CLJDelta(1, cljboxes, idxs[0], new_water)
test_cljatoms = cljdelta.newAtoms()
if verbose:
print("\nChecking changed atoms looks correct")
print("CHANGED:\n%s" % cljdelta.changedAtoms())
print("BOX: %s (%s,%s,%s) : %s" % (cljdelta.boxIndex(), \
cljdelta.nBoxX(), cljdelta.nBoxY(), cljdelta.nBoxZ(), \
cljdelta.isSingleBox()))
if cljdelta.isSingleBox():
assert( cljdelta.nBoxX() == 1 )
assert( cljdelta.nBoxY() == 1 )
assert( cljdelta.nBoxZ() == 1 )
else:
assert( cljdelta.nBoxX() > 1 or cljdelta.nBoxY() > 1 or cljdelta.nBoxZ() > 1 )
if verbose:
print("\nComparing new atoms are correctly in the delta")
print("NEW:\n%s" % new_cljatoms)
print("TEST:\n%s" % test_cljatoms)
assert(new_cljatoms == test_cljatoms)
test_cljatoms = cljdelta.oldAtoms()
if verbose:
print("\nComparing old atoms are correctly in the delta")
print("OLD:\n%s" % old_cljatoms)
print("TEST:\n%s" % test_cljatoms)
assert(old_cljatoms == test_cljatoms)
if verbose:
print("\nTesting that the old indicies are correctly stored in the delta")
print("OLD:\n%s" % idxs[0])
print("NEW:\n%s" % cljdelta.oldIndicies())
assert( idxs[0] == cljdelta.oldIndicies() )
# now apply the delta on a copy of the boxes
new_boxes = CLJBoxes(cljboxes)
new_idxs = new_boxes.apply(cljdelta)
if verbose:
print("\nChecking...")
print(new_idxs)
assert( new_idxs == idxs[0] )
test_atoms = new_boxes.atoms(new_idxs)
if verbose:
print("\nSeeing if the new atoms are in the box")
print("NEW:\n%s" % new_cljatoms)
print("TEST:\n%s" % test_atoms)
assert(new_cljatoms == test_atoms)
nold = cljboxes.nAtoms()
nnew = new_boxes.nAtoms()
assert( nold == nnew )
if __name__ == "__main__":
test_cljdelta(True)
|
chryswoods/SireTests
|
unittests/SireMM/test_cljdelta.py
|
Python
|
gpl-2.0
| 3,003
|
[
"Amber"
] |
5fb98dd07d875549458e96e62fc7bef6f4d23963d1acc753fdb83f1b83e351d9
|
from django.utils import timezone
from dateutil.relativedelta import relativedelta
from edc_constants.constants import SCREENED
from edc_registration.models import RegisteredSubject
from edc_identifier.models import SubjectIdentifier
from edc_constants.constants import FAILED_ELIGIBILITY, OFF_STUDY, SCHEDULED, POS, YES, NO, NOT_APPLICABLE
from edc_meta_data.models import RequisitionMetaData
from td_maternal.models import MaternalVisit
from tshilo_dikotla.constants import INFANT
from td_list.models import DeliveryComplications
from ..forms import MaternalLabourDelForm
from .base_test_case import BaseTestCase
from .factories import (MaternalUltraSoundIniFactory, MaternalEligibilityFactory, MaternalConsentFactory,
AntenatalEnrollmentFactory, AntenatalVisitMembershipFactory, MaternalLabourDelFactory)
class TestMaternalLabourDel(BaseTestCase):
def setUp(self):
super(TestMaternalLabourDel, self).setUp()
self.maternal_eligibility = MaternalEligibilityFactory()
self.maternal_consent = MaternalConsentFactory(
maternal_eligibility=self.maternal_eligibility)
self.registered_subject = self.maternal_eligibility.registered_subject
# maternal visit created here.
options = {'registered_subject': self.registered_subject,
'current_hiv_status': POS,
'evidence_hiv_status': YES,
'will_get_arvs': YES,
'is_diabetic': NO,
'will_remain_onstudy': YES,
'rapid_test_done': NOT_APPLICABLE,
'last_period_date': (timezone.datetime.now() - relativedelta(weeks=25)).date()}
self.antenatal_enrollment = AntenatalEnrollmentFactory(**options)
self.maternal_visit = MaternalVisit.objects.get(
appointment__registered_subject=self.registered_subject,
reason=SCHEDULED,
appointment__visit_definition__code='1000M')
self.maternal_ultrasound = MaternalUltraSoundIniFactory(maternal_visit=self.maternal_visit,
number_of_gestations=1,
)
self.maternal_visits_membership = AntenatalVisitMembershipFactory(registered_subject=self.registered_subject)
complications = DeliveryComplications.objects.create(
hostname_created="django", name="None",
short_name="None", created=timezone.datetime.now(),
user_modified="", modified=timezone.datetime.now(),
hostname_modified="django", version="1.0",
display_index=1, user_created="django", field_name=None,
revision=":develop:")
self.options = {
'registered_subject': self.registered_subject.id,
'report_datetime': timezone.now(),
'delivery_datetime': timezone.now(),
'delivery_time_estimated': NO,
'labour_hrs':'3',
'delivery_complications': [complications.id],
'delivery_hospital': 'Lesirane',
'mode_delivery': 'spontaneous vaginal',
'csection_reason': NOT_APPLICABLE,
'live_infants_to_register': 1,
'valid_regiment_duration': YES,
'arv_initiation_date': (timezone.datetime.now() - relativedelta(weeks=6)).date()
}
def test_new_infant_registration(self):
maternal_labour_del = MaternalLabourDelFactory(registered_subject=self.registered_subject,
live_infants_to_register=1)
self.assertEqual(SubjectIdentifier.objects.filter(
identifier=self.registered_subject.subject_identifier).count(), 1)
self.assertEqual(RegisteredSubject.objects.filter(
subject_type=INFANT,
registration_status='DELIVERED',
relative_identifier=self.maternal_consent.subject_identifier).count(), 1)
def test_on_therapy_for_atleast4weeks(self):
self.assertEqual(self.antenatal_enrollment.enrollment_hiv_status, POS)
maternal_labour_del = MaternalLabourDelFactory(registered_subject=self.registered_subject,
live_infants_to_register=1,
valid_regiment_duration=YES
)
self.assertTrue(maternal_labour_del.keep_on_study)
self.assertTrue(maternal_labour_del.antenatal_enrollment.is_eligible)
def test_not_therapy_for_atleast4weeks(self):
self.assertEqual(self.antenatal_enrollment.enrollment_hiv_status, POS)
maternal_labour_del = MaternalLabourDelFactory(registered_subject=self.registered_subject,
live_infants_to_register=1,
valid_regiment_duration=NO
)
self.assertFalse(maternal_labour_del.keep_on_study)
self.assertFalse(maternal_labour_del.antenatal_enrollment.is_eligible)
def test_valid_regimen_duration_hiv_pos_only_na(self):
self.options['valid_regiment_duration'] = NOT_APPLICABLE
form = MaternalLabourDelForm(data=self.options)
errors = ''.join(form.errors.get('__all__'))
self.assertIn(
'Participant is HIV+ valid regimen duration should be YES. Please correct.', errors)
def test_valid_regimen_duration_hiv_pos_only_no_init_date(self):
self.options['arv_initiation_date'] = None
form = MaternalLabourDelForm(data=self.options)
errors = ''.join(form.errors.get('__all__'))
self.assertIn(
'You indicated participant was on valid regimen, please give a valid arv initiation date.', errors)
def test_valid_regimen_duration_hiv_pos_only_invalid_init_date(self):
self.options['arv_initiation_date'] = (timezone.datetime.now() - relativedelta(weeks=1)).date()
form = MaternalLabourDelForm(data=self.options)
errors = ''.join(form.errors.get('__all__'))
self.assertIn(
'You indicated that the mother was on REGIMEN for a valid duration, but '
'delivery date is within 4weeks of art initiation date. Please correct.', errors)
|
botswana-harvard/tshilo-dikotla
|
td_maternal/tests/test_maternal_lab_del.py
|
Python
|
gpl-2.0
| 6,380
|
[
"VisIt"
] |
5eb1f23d4d670280b371b28695a2a78fe2f52b6b9383a55b370c56a949f4f6a2
|
import numpy as np
from gpaw.blacs import BlacsGrid, parallelprint
from gpaw.mpi import world, rank, size
from gpaw.utilities.lapack import diagonalize
from gpaw.utilities.scalapack import scalapack_diagonalize_dc
from gpaw.blacs import Redistributor
def scal_diagonalize(A, nodes='master'):
# Diagonalize matrix A (size N*N) with scalapack
# Usage: eps, B = scal_diagonalize(A)
# eps and B and the eigenvalues and eigenvectors
# nodes = 'master': eigenvectors only available on master node
# nodes = 'all': eigenvectors broadcast to all nodes
# make sure A is N*N, and hermitian
N = A.shape[0]
assert A.shape[0] == A.shape[1]
for i in range(N):
for j in range(i, N):
assert A[i,j] == A[j,i].conj()
# create blacs descriptor
mb = 64
g = BlacsGrid(world, 2, size//2)
nndesc1 = g.new_descriptor(N, N, N, N)
nndesc2 = g.new_descriptor(N, N, mb, mb)
# distribute A to blacs grid A_
if rank != 0:
A = nndesc1.zeros(dtype=A.dtype)
A_ = nndesc2.empty(dtype=A.dtype)
redistributor = Redistributor(world, nndesc1, nndesc2)
redistributor.redistribute(A, A_)
# diagonalize
B_ = nndesc2.zeros(dtype=A.dtype)
eps = np.zeros(N,dtype=A.dtype)
nndesc2.diagonalize_dc(A_, B_, eps, 'L')
# distribute the eigenvectors to master
B = np.zeros_like(A)
redistributor = Redistributor(world, nndesc2, nndesc1)
redistributor.redistribute(B_, B)
if nodes == 'master':
return eps, B
elif nodes == 'all':
if rank != 0:
B = np.zeros((N, N))
world.broadcast(B, 0)
return eps, B
# generate a matrix
N = 512
A = np.arange(N**2,dtype=float).reshape(N,N)
for i in range(N):
for j in range(i,N):
A[i,j] = A[j,i]
# diagonalize
eps, B = scal_diagonalize(A)
check = 1
if check and rank == 0:
# check whether it gives the same result with lapack
eps1 = np.zeros(N)
diagonalize(A, eps1)
assert np.abs(eps-eps1).sum() < 1e-6
for i in range(N//size):
# the eigenvectors are row of the matrix, it can be differ by a minus sign.
if np.abs(A[i,:] - B[i,:]).sum() > 1e-6:
if np.abs(A[i,:] + B[i,:]).sum() > 1e-6:
raise ValueError('Check !')
|
robwarm/gpaw-symm
|
gpaw/test/parallel/scalapack_diag.py
|
Python
|
gpl-3.0
| 2,279
|
[
"GPAW"
] |
d83a8dee63824214f4b1454a12091e85a82ae05a92ae7564be6c89f945c630b8
|
import numpy as np
import orca
import pandas as pd
from activitysim import activitysim as asim
from activitysim.util import reindex
@orca.table(cache=True)
def households(set_random_seed, store, settings):
if "households_sample_size" in settings:
return asim.random_rows(store["households"],
settings["households_sample_size"])
return store["households"]
# this is a placeholder table for columns that get computed after the
# auto ownership model
@orca.table()
def households_autoown(households):
return pd.DataFrame(index=households.index)
# this is a common merge so might as well define it once here and use it
@orca.table()
def households_merged(households, land_use, accessibility):
return orca.merge_tables(households.name, tables=[
households, land_use, accessibility])
orca.broadcast('households', 'persons', cast_index=True, onto_on='household_id')
@orca.column("households")
def income_in_thousands(households):
return households.income / 1000
@orca.column("households")
def income_segment(households):
return pd.cut(households.income_in_thousands,
bins=[-np.inf, 30, 60, 100, np.inf],
labels=[1, 2, 3, 4])
@orca.column("households")
def non_workers(households, persons):
return persons.household_id.value_counts() - households.workers
@orca.column("households")
def drivers(households, persons):
# we assume that everyone 16 and older is a potential driver
return persons.local.query("16 <= age").\
groupby("household_id").size().\
reindex(households.index).fillna(0)
@orca.column("households")
def num_young_children(households, persons):
return persons.local.query("age <= 4").\
groupby("household_id").size().\
reindex(households.index).fillna(0)
@orca.column("households")
def num_children(households, persons):
return persons.local.query("5 <= age <= 15").\
groupby("household_id").size().\
reindex(households.index).fillna(0)
@orca.column("households")
def num_adolescents(households, persons):
return persons.local.query("16 <= age <= 17").\
groupby("household_id").size().\
reindex(households.index).fillna(0)
@orca.column("households")
def num_college_age(households, persons):
return persons.local.query("18 <= age <= 24").\
groupby("household_id").size().\
reindex(households.index).fillna(0)
@orca.column("households")
def num_young_adults(households, persons):
return persons.local.query("25 <= age <= 34").\
groupby("household_id").size().\
reindex(households.index).fillna(0)
# just a rename / alias
@orca.column("households")
def home_taz(households):
return households.TAZ
# map household type ids to strings
@orca.column("households")
def household_type(households, settings):
return households.HHT.map(settings["household_type_map"])
@orca.column("households")
def non_family(households):
return households.household_type.isin(["nonfamily_male_alone",
"nonfamily_male_notalone",
"nonfamily_female_alone",
"nonfamily_female_notalone"])
# can't just invert these unfortunately because there's a null household type
@orca.column("households")
def family(households):
return households.household_type.isin(["family_married",
"family_male",
"family_female"])
@orca.column("households")
def num_under16_not_at_school(persons, households):
return persons.under16_not_at_school.groupby(persons.household_id).size().\
reindex(households.index).fillna(0)
@orca.column('households')
def auto_ownership(households):
return pd.Series(0, households.index)
@orca.column('households')
def hhsize(households):
return households.PERSONS
@orca.column('households_autoown')
def no_cars(households):
return (households.auto_ownership == 0)
@orca.column('households')
def home_is_urban(households, land_use, settings):
s = reindex(land_use.area_type, households.home_taz)
return s < settings['urban_threshold']
@orca.column('households')
def home_is_rural(households, land_use, settings):
s = reindex(land_use.area_type, households.home_taz)
return s > settings['rural_threshold']
@orca.column('households_autoown')
def car_sufficiency(households, persons):
return households.auto_ownership - persons.household_id.value_counts()
@orca.column('households')
def work_tour_auto_time_savings(households):
# TODO fix this variable from auto ownership model
return pd.Series(0, households.index)
|
bhargavasana/activitysim
|
activitysim/defaults/tables/households.py
|
Python
|
agpl-3.0
| 4,771
|
[
"ORCA"
] |
1e1e35ae7500e98f47601cfcf96450f1c9cd024af4e68a133c528a1c7a81f0ba
|
import sys
import gzip
import threading
from glob import glob
from io import BytesIO
from .. import backends, conventions
from .common import ArrayWriter
from ..core.combine import auto_combine
from ..core.utils import close_on_error, is_remote_uri
from ..core.pycompat import basestring, OrderedDict, range
def _get_default_engine(path, allow_remote=False):
if allow_remote and is_remote_uri(path): # pragma: no cover
try:
import netCDF4
engine = 'netcdf4'
except ImportError:
try:
import pydap
engine = 'pydap'
except ImportError:
raise ValueError('netCDF4 or pydap is required for accessing '
'remote datasets via OPeNDAP')
else:
try:
import netCDF4
engine = 'netcdf4'
except ImportError: # pragma: no cover
try:
import scipy.io.netcdf
engine = 'scipy'
except ImportError:
raise ValueError('cannot read or write netCDF files without '
'netCDF4-python or scipy installed')
return engine
_global_lock = threading.Lock()
def _default_lock(filename, engine):
if filename.endswith('.gz'):
lock = False
else:
if engine is None:
engine = _get_default_engine(filename, allow_remote=True)
if engine == 'netcdf4':
if is_remote_uri(filename):
lock = False
else:
# TODO: identify netcdf3 files and don't use the global lock
# for them
lock = _global_lock
elif engine == 'h5netcdf':
lock = _global_lock
else:
lock = False
return lock
def open_dataset(filename_or_obj, group=None, decode_cf=True,
mask_and_scale=True, decode_times=True,
concat_characters=True, decode_coords=True, engine=None,
chunks=None, lock=None, drop_variables=None):
"""Load and decode a dataset from a file or file-like object.
Parameters
----------
filename_or_obj : str, file or xray.backends.*DataStore
Strings are interpreted as a path to a netCDF file or an OpenDAP URL
and opened with python-netCDF4, unless the filename ends with .gz, in
which case the file is gunzipped and opened with scipy.io.netcdf (only
netCDF3 supported). File-like objects are opened with scipy.io.netcdf
(only netCDF3 supported).
group : str, optional
Path to the netCDF4 group in the given file to open (only works for
netCDF4 files).
decode_cf : bool, optional
Whether to decode these variables, assuming they were saved according
to CF conventions.
mask_and_scale : bool, optional
If True, replace array values equal to `_FillValue` with NA and scale
values according to the formula `original_values * scale_factor +
add_offset`, where `_FillValue`, `scale_factor` and `add_offset` are
taken from variable attributes (if they exist). If the `_FillValue` or
`missing_value` attribute contains multiple values a warning will be
issued and all array values matching one of the multiple values will
be replaced by NA.
decode_times : bool, optional
If True, decode times encoded in the standard NetCDF datetime format
into datetime objects. Otherwise, leave them encoded as numbers.
concat_characters : bool, optional
If True, concatenate along the last dimension of character arrays to
form string arrays. Dimensions will only be concatenated over (and
removed) if they have no corresponding variable and if they are only
used as the last dimension of character arrays.
decode_coords : bool, optional
If True, decode the 'coordinates' attribute to identify coordinates in
the resulting dataset.
engine : {'netcdf4', 'scipy', 'pydap', 'h5netcdf'}, optional
Engine to use when reading netCDF files. If not provided, the default
engine is chosen based on available dependencies, with a preference for
'netcdf4'.
chunks : dict, optional
If chunks is provided, it used to load the new dataset into dask
arrays. This is an experimental feature; see the documentation for more
details.
lock : False, True or threading.Lock, optional
If chunks is provided, this argument is passed on to
:py:func:`dask.array.from_array`. By default, a per-variable lock is
used when reading data from netCDF files with the netcdf4 and h5netcdf
engines to avoid issues with concurrent access when using dask's
multithreaded backend.
drop_variables: string or iterable, optional
A variable or list of variables to exclude from being parsed from the
dataset. This may be useful to drop variables with problems or
inconsistent values.
Returns
-------
dataset : Dataset
The newly created dataset.
See Also
--------
open_mfdataset
"""
if not decode_cf:
mask_and_scale = False
decode_times = False
concat_characters = False
decode_coords = False
def maybe_decode_store(store, lock=False):
ds = conventions.decode_cf(
store, mask_and_scale=mask_and_scale, decode_times=decode_times,
concat_characters=concat_characters, decode_coords=decode_coords,
drop_variables=drop_variables)
if chunks is not None:
ds = ds.chunk(chunks, lock=lock)
return ds
if isinstance(filename_or_obj, backends.AbstractDataStore):
store = filename_or_obj
elif isinstance(filename_or_obj, basestring):
if filename_or_obj.endswith('.gz'):
if engine is not None and engine != 'scipy':
raise ValueError('can only read gzipped netCDF files with '
"default engine or engine='scipy'")
# if the string ends with .gz, then gunzip and open as netcdf file
if sys.version_info[:2] < (2, 7):
raise ValueError('reading a gzipped netCDF not '
'supported on Python 2.6')
try:
store = backends.ScipyDataStore(gzip.open(filename_or_obj))
except TypeError as e:
# TODO: gzipped loading only works with NetCDF3 files.
if 'is not a valid NetCDF 3 file' in e.message:
raise ValueError('gzipped file loading only supports '
'NetCDF 3 files.')
else:
raise
else:
if engine is None:
engine = _get_default_engine(filename_or_obj,
allow_remote=True)
if engine == 'netcdf4':
store = backends.NetCDF4DataStore(filename_or_obj, group=group)
elif engine == 'scipy':
store = backends.ScipyDataStore(filename_or_obj)
elif engine == 'pydap':
store = backends.PydapDataStore(filename_or_obj)
elif engine == 'h5netcdf':
store = backends.H5NetCDFStore(filename_or_obj, group=group)
else:
raise ValueError('unrecognized engine for open_dataset: %r'
% engine)
if lock is None:
lock = _default_lock(filename_or_obj, engine)
with close_on_error(store):
return maybe_decode_store(store, lock)
else:
if engine is not None and engine != 'scipy':
raise ValueError('can only read file-like objects with '
"default engine or engine='scipy'")
# assume filename_or_obj is a file-like object
store = backends.ScipyDataStore(filename_or_obj)
return maybe_decode_store(store)
class _MultiFileCloser(object):
def __init__(self, file_objs):
self.file_objs = file_objs
def close(self):
for f in self.file_objs:
f.close()
def open_mfdataset(paths, chunks=None, concat_dim=None, preprocess=None,
engine=None, lock=None, **kwargs):
"""Open multiple files as a single dataset.
Experimental. Requires dask to be installed.
Parameters
----------
paths : str or sequence
Either a string glob in the form "path/to/my/files/*.nc" or an explicit
list of files to open.
chunks : dict, optional
Dictionary with keys given by dimension names and values given by chunk
sizes. In general, these should divide the dimensions of each dataset.
By default, chunks will be chosen to load entire input files into
memory at once. This has a major impact on performance: please see the
full documentation for more details.
concat_dim : str or DataArray or Index, optional
Dimension to concatenate files along. This argument is passed on to
:py:func:`xray.auto_combine` along with the dataset objects. You only
need to provide this argument if the dimension along which you want to
concatenate is not a dimension in the original datasets, e.g., if you
want to stack a collection of 2D arrays along a third dimension.
preprocess : callable, optional
If provided, call this function on each dataset prior to concatenation.
engine : {'netcdf4', 'scipy', 'pydap', 'h5netcdf'}, optional
Engine to use when reading netCDF files. If not provided, the default
engine is chosen based on available dependencies, with a preference for
'netcdf4'.
lock : False, True or threading.Lock, optional
This argument is passed on to :py:func:`dask.array.from_array`. By
default, a per-variable lock is used when reading data from netCDF
files with the netcdf4 and h5netcdf engines to avoid issues with
concurrent access when using dask's multithreaded backend.
**kwargs : optional
Additional arguments passed on to :py:func:`xray.open_dataset`.
Returns
-------
xray.Dataset
See Also
--------
auto_combine
open_dataset
"""
if isinstance(paths, basestring):
paths = sorted(glob(paths))
if not paths:
raise IOError('no files to open')
datasets = [open_dataset(p, engine=engine, **kwargs) for p in paths]
if lock is None:
lock = _default_lock(paths[0], engine)
file_objs = [ds._file_obj for ds in datasets]
datasets = [ds.chunk(chunks, lock=lock) for ds in datasets]
if preprocess is not None:
datasets = [preprocess(ds) for ds in datasets]
combined = auto_combine(datasets, concat_dim=concat_dim)
combined._file_obj = _MultiFileCloser(file_objs)
return combined
WRITEABLE_STORES = {'netcdf4': backends.NetCDF4DataStore,
'scipy': backends.ScipyDataStore,
'h5netcdf': backends.H5NetCDFStore}
def to_netcdf(dataset, path=None, mode='w', format=None, group=None,
engine=None, writer=None):
"""This function creates an appropriate datastore for writing a dataset to
disk as a netCDF file
See `Dataset.to_netcdf` for full API docs.
The ``writer`` argument is only for the private use of save_mfdataset.
"""
if path is None:
path = BytesIO()
if engine is None:
engine = 'scipy'
elif engine is not None:
raise ValueError('invalid engine for creating bytes with '
'to_netcdf: %r. Only the default engine '
"or engine='scipy' is supported" % engine)
elif engine is None:
engine = _get_default_engine(path)
try:
store_cls = WRITEABLE_STORES[engine]
except KeyError:
raise ValueError('unrecognized engine for to_netcdf: %r' % engine)
if format is not None:
format = format.upper()
# if a writer is provided, store asynchronously
sync = writer is None
store = store_cls(path, mode, format, group, writer)
try:
dataset.dump_to_store(store, sync=sync)
if isinstance(path, BytesIO):
return path.getvalue()
finally:
if sync:
store.close()
if not sync:
return store
def save_mfdataset(datasets, paths, mode='w', format=None, groups=None,
engine=None):
"""Write multiple datasets to disk as netCDF files simultaneously.
This function is intended for use with datasets consisting of dask.array
objects, in which case it can write the multiple datasets to disk
simultaneously using a shared thread pool.
When not using dask, it is no different than calling ``to_netcdf``
repeatedly.
Parameters
----------
datasets : list of xray.Dataset
List of datasets to save.
paths : list of str
List of paths to which to save each corresponding dataset.
mode : {'w', 'a'}, optional
Write ('w') or append ('a') mode. If mode='w', any existing file at
these locations will be overwritten.
format : {'NETCDF4', 'NETCDF4_CLASSIC', 'NETCDF3_64BIT', 'NETCDF3_CLASSIC'}, optional
File format for the resulting netCDF file:
* NETCDF4: Data is stored in an HDF5 file, using netCDF4 API
features.
* NETCDF4_CLASSIC: Data is stored in an HDF5 file, using only
netCDF 3 compatibile API features.
* NETCDF3_64BIT: 64-bit offset version of the netCDF 3 file format,
which fully supports 2+ GB files, but is only compatible with
clients linked against netCDF version 3.6.0 or later.
* NETCDF3_CLASSIC: The classic netCDF 3 file format. It does not
handle 2+ GB files very well.
All formats are supported by the netCDF4-python library.
scipy.io.netcdf only supports the last two formats.
The default format is NETCDF4 if you are saving a file to disk and
have the netCDF4-python library available. Otherwise, xray falls
back to using scipy to write netCDF files and defaults to the
NETCDF3_64BIT format (scipy does not support netCDF4).
groups : list of str, optional
Paths to the netCDF4 group in each corresponding file to which to save
datasets (only works for format='NETCDF4'). The groups will be created
if necessary.
engine : {'netcdf4', 'scipy', 'h5netcdf'}, optional
Engine to use when writing netCDF files. If not provided, the
default engine is chosen based on available dependencies, with a
preference for 'netcdf4' if writing to a file on disk.
Examples
--------
Save a dataset into one netCDF per year of data:
>>> years, datasets = zip(*ds.groupby('time.year'))
>>> paths = ['%s.nc' % y for y in years]
>>> xray.save_mfdataset(datasets, paths)
"""
if mode == 'w' and len(set(paths)) < len(paths):
raise ValueError("cannot use mode='w' when writing multiple "
'datasets to the same path')
if groups is None:
groups = [None] * len(datasets)
if len(set([len(datasets), len(paths), len(groups)])) > 1:
raise ValueError('must supply lists of the same length for the '
'datasets, paths and groups arguments to '
'save_mfdataset')
writer = ArrayWriter()
stores = [to_netcdf(ds, path, mode, format, group, engine, writer)
for ds, path, group in zip(datasets, paths, groups)]
try:
writer.sync()
for store in stores:
store.sync()
finally:
for store in stores:
store.close()
|
markelg/xray
|
xray/backends/api.py
|
Python
|
apache-2.0
| 15,889
|
[
"NetCDF"
] |
931585b6e4af3d6a8ef9983e224d9a96e908f90d8871583a21307fa3ca80a2ed
|
#!/usr/bin/python
"""
Copyright 2010 Paul Willworth <ioscode@gmail.com>
This file is part of Galaxy Harvester.
Galaxy Harvester is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Galaxy Harvester is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with Galaxy Harvester. If not, see <http://www.gnu.org/licenses/>.
"""
import os
import sys
import Cookie
import dbSession
import dbShared
import cgi
import MySQLdb
#
form = cgi.FieldStorage()
galaxy = form.getfirst('galaxy', '')
# escape input to prevent sql injection
galaxy = dbShared.dbInsertSafe(galaxy)
# Main program
rowCount = 0
print 'Content-type: text/html\n'
print '<table class="userData" width="100%">'
conn = dbShared.ghConn()
cursor = conn.cursor()
if (cursor):
print '<thead><tr class="tableHead"><td>Rank</td><td>Member</td><td>Resources</td></th></thead>'
sqlStr = 'SELECT tUsers.userID, added, pictureName FROM tUsers LEFT JOIN tUserStats ON tUsers.userID = tUserStats.userID WHERE galaxy=' + galaxy + ' ORDER BY added DESC LIMIT 20'
cursor.execute(sqlStr)
row = cursor.fetchone()
while (row != None):
rowCount += 1
print ' <tr class="statRow"><td>' + str(rowCount) + '</td><td><a href="user.py?uid=' + row[0] + '" class="nameLink"><img src="/images/users/'+str(row[2])+'" class="tinyAvatar" /><span style="vertical-align:4px;">'+ row[0] + '</span></a></td><td>' + str(row[1]) + '</td>'
print ' </tr>'
row = cursor.fetchone()
cursor.close()
conn.close()
print ' </table>'
|
druss316/G-Harvestor
|
html/getMemberList.py
|
Python
|
gpl-3.0
| 1,927
|
[
"Galaxy"
] |
2a8cbb60c4b59a271c8091a6b2bfdb4fa79afab16eaf50c6070bccf8317e9453
|
"""
Routines for running a Columbus computation.
"""
import sys
import os
import shutil
import subprocess
import numpy as np
import nomad.math.constants as constants
import nomad.core.glbl as glbl
import nomad.core.atom_lib as atom_lib
import nomad.core.trajectory as trajectory
import nomad.core.surface as surface
import nomad.integrals.centroid as centroid
# path to columbus executables
columbus_path = ''
# set to true if we want to compute electronic structure properties
comp_properties = True
# path to columbus input files
input_path = ''
# path to location of 'work'/'restart' directories
work_path = ''
# path to the location of restart files (i.e. mocoef files and civfl)
restart_path = ''
# atom labels
a_sym = []
# atomic number
a_num = []
# atomic masses (amu)
a_mass = []
# by default, since this is the columbus module, assume atoms are
# 3-dimensional (i.e. Cartesian)
p_dim = 3
# number of atoms
n_atoms = 0
# total number of cartesian coordinates
n_cart = 0
# number of drts (should usually be "1" for C1)
n_drt = 1
# number of orbitals
n_orbs = 0
# number of states in sate-averaged MCSCF
n_mcstates = 0
# number of CI roots
n_cistates = 0
# number of dummy atoms
n_dummy = 0
# list of dummy atom weights
dummy_lst = []
# if DE between two states greater than de_thresh, ignore coupling
coup_de_thresh = 100.
# maximum angular momentum in basis set
max_l = 1
# excitation level in CI
mrci_lvl = 0
# amount of memory per process, in MB
mem_str = ''
#----------------------------------------------------------------
#
# Functions called from interface object
#
#----------------------------------------------------------------
def init_interface():
"""Initializes the Columbus calculation from the Columbus input."""
global columbus_path, input_path, work_path, restart_path, log_file
global a_sym, a_num, a_mass, n_atoms, n_dummy, n_cart, p_dim
global n_orbs, n_mcstates, n_cistates, max_l, mrci_lvl, mem_str
global coup_de_thresh, dummy_lst
# setup working directories
# input and restart are shared
input_path = glbl.paths['cwd']+'/input'
restart_path = glbl.paths['cwd']+'/restart'
# ...but each process has it's own work directory
work_path = glbl.paths['cwd']+'/work.'+str(glbl.mpi['rank'])
# set atomic symbol, number, mass,
natm = len(glbl.properties['crd_labels']) // p_dim
a_sym = glbl.properties['crd_labels'][::p_dim]
a_data = []
# we need to go through this to pull out the atomic numbers for
# correct writing of input
for i in range(natm):
if atom_lib.valid_atom(a_sym[i]):
a_data.append(atom_lib.atom_data(a_sym[i]))
else:
raise ValueError('Atom: '+str(a_sym[i])+' not found in library')
# masses are au -- columbus geom reads mass in amu
a_mass = [a_data[i][1]/constants.amu2au for i in range(natm)]
a_num = [a_data[i][2] for i in range(natm)]
# check to see if we have any dummy atoms to account for
if glbl.columbus['dummy_constrain'] is None:
dummy_lst = []
else:
dummy_lst = np.atleast_2d(glbl.columbus['dummy_constrain'])
# if we want to constrain dummy atom to the C.O.M.
if glbl.columbus['dummy_constrain_com'] and a_mass not in dummy_lst:
dummy_lst.append(a_mass)
# ensure dummy atom count is accurate.
n_dummy = len(dummy_lst)
if n_dummy != count_dummy(input_path+'/daltaoin'):
raise ValueError('Number of dummy atoms='+str(n_dummy)+
' is inconsistent with COLUMBUS input ='+
str(count_dummy(input_path+'/daltaoin')))
# confirm that we can see the COLUMBUS installation (pull the value
# COLUMBUS environment variable)
columbus_path = os.environ['COLUMBUS']
if not os.path.isfile(columbus_path + '/ciudg.x'):
raise FileNotFoundError('Cannot find COLUMBUS executables in: ' +
columbus_path)
# ensure COLUMBUS input files are present locally
if not os.path.exists('input'):
raise FileNotFoundError('Cannot find COLUMBUS input files in: input')
if os.path.exists(work_path):
shutil.rmtree(work_path)
os.makedirs(work_path)
if glbl.mpi['rank'] == 0:
if os.path.exists(restart_path):
shutil.rmtree(restart_path)
os.makedirs(restart_path)
# copy input directory to home and copy file contents to work directory
# we now asssume input directory is present in current directory
for item in os.listdir('input'):
local_file = os.path.join('input', item)
work_file = os.path.join(work_path, item)
shutil.copy2(local_file, work_file)
# if glbl.mpi['rank'] == 0:
# input_file = os.path.join(input_path, item)
# shutil.copy2(local_file, input_file)
# make sure process 0 is finished populating the input directory
if glbl.mpi['parallel']:
glbl.mpi['comm'].barrier()
# now -- pull information from columbus input
n_atoms = natm
n_cart = natm * p_dim
n_orbs = int(read_pipe_keyword('input/cidrtmsin',
'orbitals per irrep'))
n_mcstates = int(read_nlist_keyword('input/mcscfin',
'NAVST'))
n_cistates = int(read_nlist_keyword('input/ciudgin.drt1',
'NROOT'))
mrci_lvl = int(read_pipe_keyword('input/cidrtmsin',
'maximum excitation level'))
max_l = ang_mom_dalton('input/daltaoin')
# all COLUMBUS modules will be run with the amount of meomry specified by mem_per_core
mem_str = str(int(glbl.columbus['mem_per_core']))
coup_de_thresh = float(glbl.columbus['coup_de_thresh'])
# Do some error checking to makes sure COLUMBUS calc is consistent with trajectory
if n_cistates < int(glbl.properties['n_states']):
raise ValueError('n_cistates < n_states: t'+str(n_cistates)+' < '+str(glbl.properties['n_states']))
# generate one time input files for columbus calculations
make_one_time_input()
# always return to current working directory
os.chdir(glbl.paths['cwd'])
def evaluate_trajectory(traj, t=None):
"""Computes MCSCF/MRCI energy and computes all couplings.
For the columbus module, since gradients are not particularly
time consuming, it's easier (and probably faster) to compute
EVERYTHING at once (i.e. all energies, all gradients, all properties)
Thus, if electronic structure information is not up2date, all methods
call the same routine: run_single_point.
"""
global n_cart
label = traj.label
state = traj.state
nstates = traj.nstates
if label < 0:
print('evaluate_trajectory called with ' +
'id associated with centroid, label=' + str(label))
# create surface object to hold potential information
col_surf = surface.Surface()
col_surf.add_data('geom', traj.x())
# write geometry to file
write_col_geom(traj.x())
mo_restart, ci_restart = get_col_restart(traj)
if not mo_restart:
raise IOError('cannot find starting orbitals for mcscf')
# generate integrals
generate_integrals(label, t)
# run mcscf
run_col_mcscf(traj, t)
col_surf.add_data('mo', pack_mocoef())
# run mrci, if necessary
potential, atom_pop = run_col_mrci(traj, ci_restart, t)
col_surf.add_data('potential', potential + glbl.properties['pot_shift'])
col_surf.add_data('atom_pop', atom_pop)
# run properties, dipoles, etc.
[perm_dipoles, sec_moms] = run_col_multipole(traj)
col_surf.add_data('sec_mom', sec_moms)
dipoles = np.zeros((3, nstates, nstates))
for i in range(nstates):
dipoles[:,i,i] = perm_dipoles[:,i]
# run transition dipoles
init_states = [0, state]
for i in init_states:
for j in range(nstates):
if i != j or (j in init_states and j < i):
tr_dip = run_col_tdipole(label, i, j)
dipoles[:,i,j] = tr_dip
dipoles[:,j,i] = tr_dip
col_surf.add_data('dipole',dipoles)
# compute gradient on current state
deriv = np.zeros((n_cart, nstates, nstates))
grads = run_col_gradient(traj, t)
deriv[:,state,state] = grads
# run coupling to other states
nad_coup = run_col_coupling(traj, potential, t)
for i in range(nstates):
if i != state:
state_i = min(i,state)
state_j = max(i,state)
deriv[:, state_i, state_j] = nad_coup[:, i]
deriv[:, state_j, state_i] = -nad_coup[:, i]
col_surf.add_data('derivative', deriv)
# save restart files
make_col_restart(traj)
# always return to current working directory
os.chdir(glbl.paths['cwd'])
return col_surf
def evaluate_centroid(cent, t=None):
"""Evaluates all requested electronic structure information at a
centroid."""
global n_cart
label = cent.label
nstates = cent.nstates
if label >= 0:
print('evaluate_centroid called with ' +
'id associated with trajectory, label=' + str(label))
state_i = min(cent.states)
state_j = max(cent.states)
# create surface object to hold potential information
col_surf = surface.Surface()
col_surf.add_data('geom', cent.x())
# write geometry to file
write_col_geom(cent.x())
mo_restart, ci_restart = get_col_restart(cent)
if not mo_restart:
raise IOError('cannot find starting orbitals for mcscf')
# generate integrals
generate_integrals(label, t)
# run mcscf
run_col_mcscf(cent, t)
col_surf.add_data('mo',pack_mocoef())
# run mrci, if necessary
potential, atom_pop = run_col_mrci(cent, ci_restart, t)
col_surf.add_data('potential', potential + glbl.properties['pot_shift'])
col_surf.add_data('atom_pop', atom_pop)
deriv = np.zeros((cent.dim, nstates, nstates))
if state_i != state_j:
# run coupling between states
nad_coup = run_col_coupling(cent, potential, t)
deriv[:,state_i, state_j] = nad_coup[:,state_j]
deriv[:,state_j, state_i] = -nad_coup[:,state_j]
col_surf.add_data('derivative', deriv)
# save restart files
make_col_restart(cent)
# always return to current working directory
os.chdir(glbl.paths['cwd'])
return col_surf
def evaluate_coupling(traj):
"""evaluate coupling between electronic states"""
nstates = traj.nstates
state = traj.state
# effective coupling is the nad projected onto velocity
coup = np.zeros((nstates, nstates))
vel = traj.velocity()
for i in range(nstates):
if i != state:
coup[state,i] = np.dot(vel, traj.derivative(state,i))
coup[i,state] = -coup[state,i]
traj.pes.add_data('coupling', coup)
#----------------------------------------------------------------
#
# Routines for running columbus
#
#---------------------------------------------------------------
def make_one_time_input():
"""Creates a Columbus input for MRCI calculations."""
global mem_str
global columbus_path, work_path
# all calculations take place in work_dir
os.chdir(work_path)
# rotation matrix
with open('rotmax', 'w') as rfile:
rfile.write(' 1 0 0\n 0 1 0\n 0 0 1')
# cidrtfil files
if not os.path.isfile('cidrtfl.ci'):
with open('cidrtmsls', 'w') as cidrtmsls, open('cidrtmsin', 'r') as cidrtmsin:
run_prog('init', 'cidrtms.x', args=['-m',mem_str],
in_pipe=cidrtmsin,
out_pipe=cidrtmsls)
shutil.move('cidrtfl.1', 'cidrtfl.ci')
if not os.path.isfile('cidrtfl.cigrd'):
with open('cidrtmsls.cigrd', 'w') as cidrtmsls_grd, \
open('cidrtmsin.cigrd', 'r') as cidrtmsin_grd:
run_prog('init', 'cidrtms.x', args=['-m',mem_str],
in_pipe=cidrtmsin_grd,
out_pipe=cidrtmsls_grd)
shutil.move('cidrtfl.1', 'cidrtfl.cigrd')
# check if hermitin exists, if not, copy daltcomm
if not os.path.exists('hermitin'):
shutil.copy('daltcomm', 'hermitin')
# make sure ciudgin file exists
shutil.copy('ciudgin.drt1', 'ciudgin')
def generate_integrals(label, t):
"""Runs Dalton to generate AO integrals."""
global work_path
os.chdir(work_path)
# run unik.gets.x script
with open('unikls', 'w') as unikls:
run_prog(label, 'unik.gets.x', out_pipe=unikls)
# run hernew
run_prog(label, 'hernew.x')
shutil.move('daltaoin.new', 'daltaoin')
# run dalton.x
shutil.copy('hermitin', 'daltcomm')
with open('hermitls', 'w') as hermitls:
run_prog(label, 'dalton.x', args=['-m', mem_str], out_pipe=hermitls)
append_log(label, 'integral', t)
def run_col_mcscf(traj, t):
"""Runs MCSCF program."""
global n_mcstates, n_drt, mrci_lvl, mem_str
global work_path
label = traj.label
if type(traj) is trajectory.Trajectory:
state = traj.state
else:
state = min(traj.states)
os.chdir(work_path)
# allow for multiple DRTs in the mcscf part. For example, We may
# want to average over a' and a" in a tri-atomic case
for i in range(1, n_drt+1):
shutil.copy('mcdrtin.' + str(i), 'mcdrtin')
with open('mcdrtls', 'w') as mcdrtls, open('mcdrtin', 'r') as mcdrtin:
run_prog(label, 'mcdrt.x', args = ['-m', mem_str],
in_pipe = mcdrtin,
out_pipe = mcdrtls)
with open('mcuftls', 'w') as mcuftls:
run_prog(label, 'mcuft.x', out_pipe = mcuftls)
# save formula tape and log files for each DRT
shutil.copy('mcdrtfl', 'mcdrtfl.' + str(i))
shutil.copy('mcdftfl', 'mcdftfl.' + str(i))
shutil.copy('mcuftls', 'mcuftls.' + str(i))
shutil.copy('mcoftfl', 'mcoftfl.' + str(i))
# if running cas dynamics (i.e. no mrci), make sure we compute the
# mcscf/cas density (for gradients and couplings)
if mrci_lvl == 0:
with open('mcdenin', 'w', encoding='utf-8') as mcden:
mcden.write('MCSCF')
# diagonal densities (for gradients)
for i in range(n_mcstates):
mcden.write('1 {:2d} 1 {:2d}').format(i, i)
# off-diagonal densities (for couplings)
for i in range(n_mcstates):
mcden.write('1 {:2d} 1 {:2d}').format(min(i, state),
max(i, state))
# try running mcscf a couple times this can be tweaked if one
# develops other strategies to deal with convergence problems
converged = False
run_max = 3
n_run = 0
while not converged and n_run < run_max:
n_run += 1
if n_run == 3:
# disable orbital-state coupling if convergence an issue
ncoupl = int(read_nlist_keyword('mcscfin', 'ncoupl'))
niter = int(read_nlist_keyword('mcscfin', 'niter'))
set_nlist_keyword('mcscfin', 'ncoupl', niter+1)
run_prog(label, 'mcscf.x', args=['-m', mem_str])
# check convergence
with open('mcscfls', 'r') as ofile:
for line in ofile:
if '*converged*' in line:
converged = True
break
# if not converged, we have to die here...
if not converged:
raise TimeoutError('MCSCF not converged.')
# save output
shutil.copy('mocoef_mc', 'mocoef')
# grab mcscfls output
append_log(label,'mcscf', t)
def run_col_mrci(traj, ci_restart, t):
"""Runs MRCI if running at that level of theory."""
global n_atoms, n_dummy, n_cistates, max_l, mem_str
global work_path
os.chdir(work_path)
label = traj.label
# get a fresh ciudgin file
shutil.copy(input_path + '/ciudgin.drt1', 'ciudgin')
# if old ci vectors are present, set NOLDV=n_cistatese
if ci_restart:
set_nlist_keyword('ciudgin','NOLDV', n_cistates)
set_nlist_keyword('ciudgin','NBKITR', 0)
# determine if trajectory or centroid, and compute densities
# accordingly
if type(traj) is trajectory.Trajectory:
# perform density transformation for gradient computations
int_trans = True
# compute densities between all states and trajectory state
tran_den = []
init_states = [0, traj.state]
for i in init_states:
for j in range(traj.nstates):
if i != j and not (j in init_states and j < i):
tran_den.append([min(i,j)+1, max(i,j)+1])
else:
# this is a centroid, only need gradient if statei != statej
state_i = min(traj.states)
state_j = max(traj.states)
int_trans = (traj.states[0] != traj.states[1])
tran_den = [[state_i+1, state_j+1]]
# append entries in tran_den to ciudgin file
with open('ciudgin', 'r') as ciudgin:
ci_file = ciudgin.readlines()
with open('ciudgin', 'w') as ciudgin:
for line in ci_file:
ciudgin.write(line)
if '&end' in line:
break
ciudgin.write('transition\n')
for i in range(len(tran_den)):
ciudgin.write(' 1 {:2d} 1 {:2d}\n'.format(tran_den[i][0],
tran_den[i][1]))
# make sure we point to the correct formula tape file
link_force('cidrtfl.ci', 'cidrtfl')
link_force('cidrtfl.ci', 'cidrtfl.1')
# perform the integral transformation
with open('tranin', 'w') as ofile:
ofile.write('&input\nLUMORB=0\n&end')
run_prog(label, 'tran.x', args=['-m', mem_str])
# run mrci
run_prog(label, 'ciudg.x', args=['-m', mem_str])
ci_ener = []
ci_res = []
ci_tol = []
mrci_iter = False
converged = True
sys.stdout.flush()
with open('ciudgsm', 'r') as ofile:
for line in ofile:
if 'beginning the ci' in line:
mrci_iter = True
if 'final mr-sdci convergence information' in line and mrci_iter:
for i in range(n_cistates):
ci_info = ofile.readline().split()
try:
ci_info.remove('#') # necessary due to unfortunate columbus formatting
except ValueError:
pass
ci_ener.append(float(ci_info[3]))
ci_res.append(float(ci_info[6]))
ci_tol.append(float(ci_info[7]))
converged = converged and ci_res[-1] <= ci_tol[-1]
break
# determine convergence...
if not converged:
raise TimeoutError('MRCI did not converge for trajectory ' + str(label))
# if we're good, update energy array
energies = np.array([ci_ener[i] for i in range(traj.nstates)],dtype=float)
# now update atom_pops
ist = -1
atom_pops = np.zeros((n_atoms, traj.nstates))
# these are more trouble than they're worth, commenting out
# with open('ciudgls', 'r') as ciudgls:
# for line in ciudgls:
# if ' gross atomic populations' in line:
# ist += 1
# # only get populations for lowest traj.nstates states
# if ist == traj.nstates:
# break
# pops = []
# iatm = 0
# for i in range(int(np.ceil((n_atoms+n_dummy)/6.))):
# for j in range(max_l+3):
# nxtline = ciudgls.readline()
# if 'total' in line:
# break
# l_arr = nxtline.split()
# if i==1:
# pops.extend(l_arr[n_dummy+1:])
# else:
# pops.extend(l_arr[1:])
# atom_pops[:, ist] = np.array(pops, dtype=float)
# grab mrci output
append_log(label,'mrci', t)
# transform integrals using cidrtfl.cigrd
if int_trans:
frzn_core = int(read_nlist_keyword('cigrdin', 'assume_fc'))
if frzn_core == 1:
os.remove('moints')
os.remove('cidrtfl')
os.remove('cidrtfl.1')
link_force('cidrtfl.cigrd', 'cidrtfl')
link_force('cidrtfl.cigrd', 'cidrtfl.1')
shutil.copy(input_path + '/tranin', 'tranin')
run_prog(label, 'tran.x', args=['-m', mem_str])
return energies, atom_pops
def run_col_multipole(traj):
"""Runs dipoles / second moments."""
global p_dim, mrci_lvl, mem_str
global work_path
os.chdir(work_path)
nst = traj.nstates
dip_moms = np.zeros((p_dim, traj.nstates))
sec_moms = np.zeros((p_dim, p_dim, traj.nstates))
if mrci_lvl == 0:
type_str = 'mc'
else:
type_str = 'ci'
for istate in range(nst):
i1 = istate + 1
link_force('nocoef_' + str(type_str) + '.drt1.state' + str(i1),
'mocoef_prop')
run_prog(traj.label, 'exptvl.x', args=['-m', mem_str])
with open('propls', 'r') as prop_file:
for line in prop_file:
if 'Dipole moments' in line:
for j in range(5):
line = prop_file.readline()
l_arr = line.split()
dip_moms[:,istate] = np.array([float(l_arr[1]),
float(l_arr[2]),
float(l_arr[3])])
if 'Second moments' in line:
for j in range(5):
line = prop_file.readline()
l_arr = line.split()
for j in range(5):
line = prop_file.readline()
l_arr.extend(line.split())
# NOTE: we're only taking the diagonal elements
inds = [1,2,3,4,6,7]
raw_dat = np.array([float(l_arr[j]) for j in inds])
map_arr = [[0,1,2],[1,3,4],[2,4,5]]
for i in range(p_dim):
for j in range(i+1):
sec_moms[i,j,istate] = raw_dat[map_arr[i][j]]
sec_moms[j,i,istate] = raw_dat[map_arr[j][i]]
os.remove('mocoef_prop')
return dip_moms, sec_moms
def run_col_tdipole(label, state_i, state_j):
"""Computes transition dipoles between ground and excited state,
and between trajectory states and other state."""
global p_dim, mrci_lvl, mem_str
global work_path
os.chdir(work_path)
# make sure we point to the correct formula tape file
link_force('civfl', 'civfl.drt1')
link_force('civout', 'civout.drt1')
link_force('cirefv', 'cirefv.drt1')
i1 = min(state_i, state_j) + 1
j1 = max(state_i, state_j) + 1
if state_i == state_j:
return None
if mrci_lvl == 0:
with open('transftin', 'w') as ofile:
ofile.write('y\n1\n' + str(j1) + '\n1\n' + str(i1))
run_prog(label, 'transft.x', in_pipe='transftin', out_pipe='transftls')
with open('transmomin', 'w') as ofile:
ofile.write('MCSCF\n1 ' + str(j1) + '\n1\n' + str(i1))
run_prog(label, 'transmom.x', args=['-m', mem_str])
os.remove('mcoftfl')
shutil.copy('mcoftfl.1', 'mcoftfl')
else:
with open('trnciin', 'w') as ofile:
ofile.write(' &input\n lvlprt=1,\n nroot1=' + str(i1) + ',\n' +
' nroot2=' + str(j1) + ',\n drt1=1,\n drt2=1,\n &end')
run_prog(label, 'transci.x', args=['-m', mem_str])
shutil.move('cid1trfl', 'cid1trfl.' + str(i1) + '.' + str(j1))
tran_dip = np.zeros(p_dim)
with open('trncils', 'r') as trncils:
for line in trncils:
if 'total (elec)' in line:
line_arr = line.split()
for dim in range(p_dim):
tran_dip[dim] = float(line_arr[dim+2])
tran_dip[dim] = float(line_arr[dim+2])
return tran_dip
def run_col_gradient(traj, t):
"""Performs integral transformation and determine gradient on
trajectory state."""
global n_dummy
global mrci_lvl, mem_str
global work_path
os.chdir(work_path)
shutil.copy(input_path + '/cigrdin', 'cigrdin')
tstate = traj.state + 1
if mrci_lvl > 0:
link_force('cid1fl.drt1.state' + str(tstate), 'cid1fl')
link_force('cid2fl.drt1.state' + str(tstate), 'cid2fl')
shutil.copy(input_path + '/trancidenin', 'tranin')
else:
link_force('mcsd1fl.' + str(tstate), 'cid1fl')
link_force('mcsd2fl.' + str(tstate), 'cid2fl')
set_nlist_keyword('cigrdin', 'samcflag', 1)
shutil.copy(input_path + '/tranmcdenin', 'tranin')
# run cigrd
set_nlist_keyword('cigrdin', 'nadcalc', 0)
run_prog(traj.label, 'cigrd.x', args=['-m', mem_str])
os.remove('cid1fl')
os.remove('cid2fl')
shutil.move('effd1fl', 'modens')
shutil.move('effd2fl', 'modens2')
# run tran
run_prog(traj.label, 'tran.x', args=['-m', mem_str])
os.remove('modens')
os.remove('modens2')
# run dalton
shutil.copy(input_path + '/abacusin', 'daltcomm')
with open('abacusls', 'w') as abacusls:
run_prog(traj.label, 'dalton.x', args=['-m', mem_str],
out_pipe=abacusls)
shutil.move('abacusls', 'abacusls.grad')
with open('cartgrd', 'r') as cartgrd:
lines = cartgrd.readlines()
# dummy atoms come first -- and aren't included in gradient
grad = [lines[i].split() for i in range(n_dummy,len(lines))]
gradient = np.array([item.replace('D', 'e') for row in grad
for item in row], dtype=float)
shutil.move('cartgrd', 'cartgrd.s'+str(traj.state)+'.'+str(traj.label))
# grab cigrdls output
append_log(traj.label, 'cigrd', t)
return gradient
def run_col_coupling(traj, ci_ener, t):
"""Computes couplings to states within prescribed DE window."""
global n_cart, n_dummy, coup_de_thresh, mrci_lvl, mem_str
global input_path, work_path
if type(traj) is trajectory.Trajectory:
t_state = traj.state
c_states = range(traj.nstates)
delta_e_max = coup_de_thresh
elif type(traj) is centroid.Centroid:
t_state = min(traj.states)
c_states = [max(traj.states)]
# if computing coupling regardless of delta e,
# set threshold to something we know won't trigger
# the ignoring of the coupling
delta_e_max = 2.*(ci_ener[-1] - ci_ener[0])
nad_coupl = np.zeros((n_cart, traj.nstates))
os.chdir(work_path)
# copy some clean files to the work directory
shutil.copy(input_path + '/cigrdin', 'cigrdin')
set_nlist_keyword('cigrdin', 'nadcalc', 1)
if mrci_lvl == 0:
set_nlist_keyword('cigrdin', 'samcflag', 1)
shutil.copy(input_path + '/tranmcdenin', 'tranin')
else:
shutil.copy(input_path + '/trancidenin', 'tranin')
shutil.copy(input_path + '/abacusin', 'daltcomm')
insert_dalton_key('daltcomm', 'COLBUS', '.NONUCG')
# loop over states to compute coupling to
for c_state in c_states:
if c_state == t_state or abs(ci_ener[c_state] -
ci_ener[t_state]) > delta_e_max:
continue
s1 = str(min(t_state, c_state) + 1).strip()
s2 = str(max(t_state, c_state) + 1).strip()
if mrci_lvl == 0:
link_force('mcsd1fl.trd' + s1 + 'to' + s2, 'cid1fl.tr')
link_force('mcsd2fl.trd' + s1 + 'to' + s2, 'cid2fl.tr')
link_force('mcad1fl.' + s1 + s2, 'cid1trfl')
else:
link_force('cid1fl.trd' + s1 + 'to' + s2, 'cid1fl.tr')
link_force('cid2fl.trd' + s1 + 'to' + s2, 'cid2fl.tr')
link_force('cid1trfl.' + s1 + '.' + s2, 'cid1trfl')
set_nlist_keyword('cigrdin', 'drt1', 1)
set_nlist_keyword('cigrdin', 'drt2', 1)
set_nlist_keyword('cigrdin', 'root1', s1)
set_nlist_keyword('cigrdin', 'root2', s2)
run_prog(traj.label, 'cigrd.x', args=['-m', mem_str])
shutil.move('effd1fl', 'modens')
shutil.move('effd2fl', 'modens2')
run_prog(traj.label, 'tran.x', args=['-m', mem_str])
with open('abacusls', 'w') as abacusls:
run_prog(traj.label, 'dalton.x', args=['-m', mem_str],
out_pipe=abacusls)
# read in cartesian gradient and save to array
with open('cartgrd', 'r') as cartgrd:
lines = cartgrd.read().splitlines()
grad = [lines[i].split() for i in range(n_dummy,len(lines))]
coup_vec = np.array([item.replace('D', 'e') for row in grad
for item in row], dtype=float)
delta_e = ci_ener[c_state] - ci_ener[t_state]
nad_coupl[:,c_state] = coup_vec / delta_e
shutil.move('cartgrd', 'cartgrd.nad.' + str(s1) + '.' + str(s2))
# grab mcscfls output
append_log(traj.label, 'nad', t)
# set the phase of the new coupling vectors using the cached data
nad_coupl_phased = get_adiabatic_phase(traj, nad_coupl)
return nad_coupl_phased
def make_col_restart(traj):
"""Saves mocoef and ci files to restart directory."""
global restart_path, work_path
os.chdir(work_path)
label = traj.label
# move orbitals
shutil.move(work_path+'/mocoef', restart_path+'/mocoef.'+str(label))
# move all ci vector, ci info files
# need to investigate behavior of ciudg with respect restarts and IO
# in fortran, symlink to ci vector file is destroyed, replaced with
# new file. Here, the ci vector is seemingly edited, meaning that when
# ciudg finishes, and symlink remains and points to an edited file.
# In this case, one simply removes the symlink, no need to edit file
# in restart directory.
if os.path.islink(work_path+'/civfl'):
os.unlink(work_path+'/civfl')
else:
shutil.move(work_path+'/civfl', restart_path+'/civfl.'+str(label))
if os.path.islink(work_path+'/civout'):
os.unlink(work_path+'/civout')
else:
shutil.move(work_path+'/civout', restart_path+'/civout.'+str(label))
if os.path.islink(work_path+'/cirefv'):
os.unlink(work_path+'/cirefv')
else:
shutil.move(work_path+'/cirefv', restart_path+'/cirefv.'+str(label))
# do some cleanup
if os.path.isfile('cirdrtfl'): os.unlink('cidrtfl')
if os.path.isfile('cirdrtfl.1'): os.unlink('cidrtfl.1')
if os.path.isfile('aoints'): os.unlink('aoints')
if os.path.isfile('aoints2'): os.unlink('aoints2')
if os.path.isfile('modens'): os.unlink('modens')
if os.path.isfile('modens2'): os.unlink('modens2')
if os.path.isfile('cid1fl.tr'): os.unlink('cid1fl.tr')
if os.path.isfile('cid2fl.tr'): os.unlink('cid2fl.tr')
if os.path.isfile('cid1trfl'): os.unlink('cid1trfl')
if os.path.isfile('civfl.drt1'): os.unlink('civfl.drt1')
if os.path.isfile('civout.drt1'):os.unlink('civout.drt1')
if os.path.isfile('cirefv.drt1'):os.unlink('cirefv.drt1')
def get_col_restart(traj):
"""Gets restart mocoef file and ci vectors for columbus calculation.
1. failure to find mocoef file is fatal.
2. failure to find ci files is OK
MOCOEF
1. If first step and parent-less trajectory, take what's in input.
2. If first step of spawned trajectory, take parents restart info.
3. If first step of centroid, take one of parent's restart info.
CIUDG
1. Copys/links CI restart files to working directory.
2. If no ci vectors, simply start CI process from scratch
"""
global work_path, restart_path
os.chdir(work_path)
mocoef_file = restart_path + '/mocoef.'
lbl_str = str(traj.label) # string for trajectory label
par_str = '' # string for parent trajectory label
if type(traj) is centroid.Centroid:
# centroids have two parents
par_arr = [str(traj.parents[i]) for i in range(len(traj.parents))]
else:
# if trajectory, there is a single parent
par_arr = [str(traj.parent)]
mo_restart = False
ci_restart = False
# MOCOEF RESTART FILES
# if we have some orbitals in memory, write those out
if 'mo' in traj.pes.avail_data():
write_mocoef('mocoef', traj.pes.get_data('mo'))
mo_restart = True
# if restart file exists, create symbolic link to it
elif os.path.exists(mocoef_file+lbl_str):
shutil.copy(mocoef_file+lbl_str, 'mocoef')
mo_restart = True
# if we still haven't found an mocoef file, check restart files
# of parents [relevant if we've just spawned and this is first
# pes evaluation for the child
if not mo_restart:
print('looking for parent restart...')
for i in range(len(par_arr)):
print('checking: '+mocoef_file+par_arr[i])
if os.path.exists(mocoef_file+par_arr[i]):
shutil.copy(mocoef_file+par_arr[i], 'mocoef')
mo_restart = True
print('found: '+mocoef_file+par_arr[i])
par_str = par_arr[i]
break
sys.stdout.flush()
if not mo_restart:
# else, just take the mocoef file we have lying around
if os.path.exists(work_path+'/mocoef'):
mo_restart = True
# else, we're out of luck
else:
mo_restart = False
# CI RESTART FILES
# if restart file exists, create symbolic link to it
civfl = restart_path + '/civfl.' + lbl_str
civout = restart_path + '/civout.' + lbl_str
cirefv = restart_path + '/cirefv.' + lbl_str
civfl_p = restart_path + '/civfl.' + par_str
civout_p = restart_path + '/civout.' + par_str
cirefv_p = restart_path + '/cirefv.' + par_str
# if restart file exists, create symbolic link to it
if (os.path.isfile(civfl) and os.path.isfile(civout)
and os.path.isfile(cirefv)):
ci_restart = True
# if parent restart files exists, create symbolic link to it
elif (os.path.isfile(civfl_p) and os.path.isfile(civout_p)
and os.path.isfile(cirefv_p)):
shutil.copy(civfl_p, civfl)
shutil.copy(civout_p, civout)
shutil.copy(cirefv_p, cirefv)
ci_restart = True
# else no ci restart
else:
ci_restart = False
if ci_restart:
link_force(civfl, work_path+'/civfl')
link_force(civout, work_path+'/civout')
link_force(cirefv, work_path+'/cirefv')
return mo_restart, ci_restart
def get_adiabatic_phase(traj, new_coup):
"""Determines the phase of the computed coupling that yields smallest
change from previous coupling."""
global n_cart
label = traj.label
if type(traj) is trajectory.Trajectory:
state = traj.state
else:
state = min(traj.states)
# pull data to make consistent
if 'derivative' in traj.pes.avail_data():
old_coup = np.transpose(
np.array([traj.derivative(min(state,i),max(state,i),geom_chk=False)
for i in range(traj.nstates)]))
else:
old_coup = np.zeros((n_cart, traj.nstates))
for i in range(traj.nstates):
# if the previous coupling is vanishing, phase of new coupling is arbitrary
if np.linalg.norm(old_coup[:,i]) > constants.fpzero:
# check the difference between the vectors assuming phases of +1/-1
norm_pos = np.linalg.norm( new_coup[:,i] - old_coup[:,i])
norm_neg = np.linalg.norm(-new_coup[:,i] - old_coup[:,i])
if norm_pos > norm_neg:
new_coup[:,i] *= -1.
return new_coup
#-----------------------------------------------------------------
#
# File parsing
#
#-----------------------------------------------------------------
def run_prog(tid, prog_name, args=None, in_pipe=None, out_pipe=None):
"""Tries to run a Columbus program executable. If error is
raised, return False, else True"""
arg = [str(prog_name)]
kwargs = dict()
# first argument is executable, plus any arguments passed to executable
if args:
arg.extend(args)
# if we need to pipe input
if in_pipe:
kwargs['stdin'] = in_pipe
# if we need to pipe output
if out_pipe:
kwargs['stdout'] = out_pipe
# append check for error code
kwargs['check'] = True
kwargs['universal_newlines'] = True
subprocess.run(arg, **kwargs)
# if got here, return code not caught as non-zero, but check
# bummer file to be sure error code not caught by Columbus
if not prog_status():
raise RuntimeError(str(prog_name)+' returned error, traj='+str(tid))
def prog_status():
"""Opens bummer file, checks to see if fatal error message
has been written. If so, return False, else, return True"""
try:
with open('bummer', 'r') as f:
bummer = f.readlines()
except EnvironmentError:
# if bummer not here, return True
return True
bstr = "".join(bummer)
return bstr.find('fatal') == -1 or bstr.find('nonfatal') != -1
def append_log(label, listing_file, time):
"""Grabs key output from columbus listing files.
Useful for diagnosing electronic structure problems.
"""
# check to see if time is given, if not -- this is a spawning
# situation
if time is None:
tstr = 'spawning'
else:
tstr = str(time)
# open the running log for this process
#log_file = open(glbl.home_path+'/columbus.log.'+str(glbl.mpi['rank']), 'a')
log_file = open('columbus.log.'+str(glbl.mpi['rank']), 'a')
log_file.write(' time='+tstr+' trajectory='+str(label)+
': '+str(listing_file)+' summary -------------\n')
if listing_file == 'integral':
with open('hermitls', 'r') as hermitls:
for line in hermitls:
if 'Bond distances' in line:
while 'Nuclear repulsion energy' not in line:
log_file.write(line)
line = hermitls.readline()
break
elif listing_file == 'mcscf':
with open('mcscfls', 'r') as mcscfls:
for line in mcscfls:
if 'final mcscf' in line:
while len(line.rstrip()) != 0:
log_file.write(line)
line = mcscfls.readline()
break
elif listing_file == 'mrci':
with open('ciudgsm', 'r') as ciudgls:
ci_iter = False
for line in ciudgls:
if 'beginning the ci iterative':
ci_iter = True
if 'final mr-sdci convergence information' in line and ci_iter:
while len(line.rstrip()) != 0:
log_file.write(line)
line = ciudgls.readline()
break
elif listing_file == 'cigrd':
with open('cigrdls', 'r') as cigrdls:
for line in cigrdls:
if 'RESULTS' in line:
while 'effective' not in line:
log_file.write(line)
line = cigrdls.readline()
break
elif listing_file == 'nad':
with open('cigrdls', 'r') as cigrdls_nad:
for line in cigrdls_nad:
if 'RESULTS' in line:
while 'effective' not in line:
log_file.write(line)
line = cigrdls_nad.readline()
break
else:
print('listing file: ' + str(listing_file) + ' not recognized.')
log_file.close()
def write_col_geom(geom):
"""Writes a array of atoms to a COLUMBUS style geom file."""
global n_atoms, n_dummy, dummy_lst, p_dim, a_sym, a_num, a_mass
global work_path
os.chdir(work_path)
f = open('geom', 'w', encoding='utf-8')
fmt = '{:3s}{:6.1f}{:14.8f}{:14.8f}{:14.8f}{:14.8f}\n'
for i in range(n_dummy):
xyz = dummy_xyz(geom, dummy_lst[i])
f.write(fmt.format('X', 0, xyz[0], xyz[1], xyz[2], 0.0))
for i in range(n_atoms):
f.write(fmt.format(a_sym[i], a_num[i], geom[p_dim*i], geom[p_dim*i+1],
geom[p_dim*i+2], a_mass[i]))
f.close()
def read_pipe_keyword(infile, keyword):
"""Reads from a direct input file via keyword search."""
f = open(infile, 'r', encoding='utf-8')
for line in f:
if keyword in line:
f.close()
return line.split()[0]
def read_nlist_keyword(infile, keyword):
"""Reads from a namelist style input."""
f = open(infile, 'r', encoding='utf-8')
for line in f:
if keyword in line:
f.close()
line = line.rstrip("\r\n")
return line.split('=', 1)[1].strip(' ,')
def set_nlist_keyword(file_name, keyword, value):
"""Writes a namelist style input."""
outfile = str(file_name) + '.tmp'
key_found = False
with open(file_name, 'r') as ifile, open(outfile, 'w') as ofile:
for line in ifile:
if keyword in line:
ofile.write(str(keyword) + ' = ' + str(value) + '\n')
key_found = True
elif '&end' in line and not key_found:
ofile.write(str(keyword) + ' = ' + str(value) + ',\n')
ofile.write(line)
else:
ofile.write(line)
shutil.move(outfile, file_name)
def insert_dalton_key(infile, keyword, value):
"""Insert a Dalton keyword.
This is a pretty specialized function given the idiosyncracies of
dalton input. The keyword must already exist in the file.
"""
with open(infile, 'r') as ifile, open('tempfile', 'w') as ofile:
for line in ifile:
ofile.write(line)
if keyword in line:
ofile.write(value + '\n')
shutil.move('tempfile', infile)
def ang_mom_dalton(infile):
"""Finds maximum ang. mom. in basis set from dalton."""
max_l = 0
with open(infile, 'r') as daltaoin:
for i in range(4):
line = daltaoin.readline()
l_arr = line.split()
n_grps = int(l_arr[1])
for i in range(n_grps):
line = daltaoin.readline()
l_arr = line.split()
n_atm = int(l_arr[1])
n_ang = int(l_arr[2]) - 1
# max_l on first line
max_l = max(max_l, n_ang)
n_con = [int(l_arr[j]) for j in range(3, 3+n_ang+1)]
for j in range(n_atm):
line = daltaoin.readline()
for j in range(len(n_con)):
for k in range(n_con[j]):
line = daltaoin.readline()
nprim = int(line.split()[1])
n_line = int(np.ceil(float(line.split()[2])/3.))
for l in range(nprim * n_line):
line = daltaoin.readline()
return max_l
def count_dummy(daltfile):
"""Determines the number of dummy atoms in a dalton input file"""
n_dum = 0
with open(daltfile, 'r') as daltaoin:
for line in daltaoin:
if line[0] == 'X':
n_dum += 1
return n_dum
def dummy_xyz(geom, dummy_wts):
"""Determines the xyz coordinates for a dummy atom given a
cartesian geometry and a set of wts"""
global n_atoms
xyz = np.zeros(3)
for i in range(n_atoms):
xyz += geom[3*i:3*i+3]*dummy_wts[i]
xyz /= sum(dummy_wts)
return xyz
def file_len(fname):
"""Returns the number of lines in a file."""
with open(fname) as f:
for i, l in enumerate(f):
pass
return i + 1
def link_force(target, link_name):
"""Creates a symbolic link, overwriting existing link if necessary."""
try:
os.symlink(target, link_name)
except FileExistsError:
os.unlink(link_name)
os.symlink(target, link_name)
def pack_mocoef():
"""Loads orbitals from a mocoef file."""
mos = np.loadtxt('mocoef',dtype=bytes,delimiter='\n').astype(str)
return mos
def write_mocoef(fname, mos):
"""Writes orbitals to mocoef file."""
np.savetxt(str(fname), mos, fmt="%s")
|
mschuurman/FMSpy
|
nomad/interfaces/columbus.py
|
Python
|
lgpl-3.0
| 44,712
|
[
"COLUMBUS",
"Dalton"
] |
fa6b931e252c7882334e3fd778676a88fc06dbe43cbf8ba7f2f4527cf1e532ed
|
# find the people from the same town
# use python 3.5 as default
"""
How to find the people from the same home town?
Xiao Ming is a new colleage student, he wants to know who is from the
same town as him. But many people just tell him that he or she is from the
same town with another student.
Can you find the all the people from the same town with XiaoMing?
Demo:
if 4 people invoved in the dialog
there are 3 relationship told
1 4
3 2
2 4
if we just image XiaoMing is 1
so from 1 4 we know 4 is from the same town with XiaoMing.
from 2 4, we know 2 is from the same town with XiaoMing
...
Finally we can get 2 3 4 are from the same town with XiaoMing
"""
# import
from collections import deque
class Graph(object):
"save the undirect graph"
def __init__(self):
"init the graph"
self.__graph = {}
def __str__(self):
"show the graph"
return str(self.edges())
def setSet(self, set):
"set the key set of the graph, the key is the island actually"
for key in set:
self.add(key)
def add(self, island_1, island_2=None):
"add an edge"
if island_2 == None:
self.__graph[island_1] = []
else:
self.__graph[island_2].append(island_1)
self.__graph[island_1].append(island_2)
def edges(self):
"return all the edges contained in the map"
edges = []
for key in self.__graph:
for island in self.__graph[key]: # not empty
edges.append((key, island))
return edges
def isolate(self):
"return just the isolate island"
isolate = []
for island in self.__graph:
if not self.__graph[island]:
isolate.append(island)
return isolate
def relate(self, island_3):
"find the other islands related to island_3"
visit = []
queue = deque([])
queue.append(island_3)
while queue:
current = queue.popleft()
if current not in visit:
visit.append(current)
for relate in self.__graph[current]:
if relate not in queue:
queue.append(relate)
return visit
def findship(set, people, ship):
"find all the relationship of people"
relation = Graph()
# set first
relation.setSet(set)
# add
for edge in ship:
a, b = edge
relation.add(a, b)
# find
return relation.relate(people)
def main():
"simple demo"
print("""
Find People From Same Town
N -- number of people invoved in dialog
M -- number of ship got
if n = 0 and m =0 just quit the program
Example:
input:
5 4
1 3
1 5
2 4
3 5
0 0
output:
2
""")
while 1:
n, m = input().split()
n = int(n)
m = int(m)
if n == 0 and m == 0:
break
ship = []
while m:
a, b = input().split()
ship.append((int(a), int(b)))
m -= 1
ships = findship(range(n), 0, ship)
print(ships)
if __name__ == '__main__':
main()
|
smileboywtu/Code-Interview
|
find-people-from-same-town.py
|
Python
|
gpl-2.0
| 3,443
|
[
"VisIt"
] |
f751c1c70f4aa43216b00a1ebbd14d86db04d2cf1570983359dcf0f9919db2d6
|
"""Tests for the ArraySource data source for MayaVi.
"""
# Author: Prabhu Ramachandran <prabhu_r@users.sf.net>
# Copyright (c) 2005, Enthought, Inc.
# License: BSD Style.
# Standard library imports.
from os.path import abspath
from StringIO import StringIO
import copy
import numpy
# Enthought library imports.
from traits.api import TraitError
# Local imports.
from common import TestCase
class TestArraySource(TestCase):
def check_input_validation(self, obj):
"""Tests if only the correct forms of input arrays are supported."""
# These should work.
obj.scalar_data = numpy.zeros((2,2), 'd')
obj.scalar_data = numpy.zeros((2,2,2), 'd')
obj.scalar_data = None
obj.vector_data = numpy.zeros((2,2,3), 'd')
obj.vector_data = numpy.zeros((2,2,2,3), 'd')
obj.vector_data = None
# These should not.
self.assertRaises(TraitError, setattr, obj, 'scalar_data', [1,2,3])
self.assertRaises(TraitError, setattr, obj, 'scalar_data',
numpy.zeros((2,2,2,3), 'd'))
obj.scalar_data = None
self.assertRaises(TraitError, setattr, obj, 'vector_data', [[1,2,3]])
self.assertRaises(TraitError, setattr, obj, 'vector_data',
numpy.zeros((2,2,2,1), 'd'))
obj.vector_data = None
obj.scalar_data = numpy.zeros((2,2), 'd')
self.assertRaises(TraitError, setattr, obj, 'vector_data',
numpy.zeros((4,4,3), 'd'))
obj.vector_data = numpy.zeros((2,2,3), 'd')
self.assertRaises(TraitError, setattr, obj, 'scalar_data',
numpy.zeros((4,3), 'i'))
self.assertRaises(TraitError, setattr, obj, 'scalar_data',
numpy.zeros((2,2,2), 'i'))
obj.scalar_data = numpy.zeros((2,2), 'f')
# Clean up the object so it can be used for further testing.
obj.scalar_data = obj.vector_data = None
def make_2d_data(self):
s = numpy.array([[0, 1],[2, 3]], 'd')
v = numpy.array([[[1,1,1], [1,0,0]],[[0,1,0], [0,0,1]]], 'd')
tps = numpy.transpose
s, v = tps(s), tps(v, (1, 0, 2))
return s, v
def make_3d_data(self):
s = numpy.array([[[0, 1],[2, 3]],
[[4, 5],[6, 7]]], 'd')
v = numpy.array([[[[0,0,0],
[1,0,0]],
[[0,1,0],
[1,1,0]]],
[[[0,0,1],
[1,0,1]],
[[0,1,1],
[1,1,1]]]], 'd')
tps = numpy.transpose
s, v = tps(s), tps(v, (2, 1, 0, 3))
return s, v
def check(self):
script = self.script
s = script.engine.current_scene
d1, d2 = s.children
s1, v1 = d1.children[0].children[1:]
expect = list(self.make_2d_data())
tps = numpy.transpose
expect[0] = tps(expect[0])
expect[1] = tps(expect[1], (1, 0, 2))
sc1 = s1.actor.mapper.input.point_data.scalars.to_array()
assert numpy.allclose(sc1.flatten(), expect[0].flatten())
vec1 = s1.actor.mapper.input.point_data.vectors.to_array()
assert numpy.allclose(vec1.flatten(), expect[1].flatten())
s2, v2 = d2.children[0].children[1:]
expect = list(self.make_3d_data())
tps = numpy.transpose
expect[0] = tps(expect[0])
expect[1] = tps(expect[1], (2, 1, 0, 3))
sc2 = s2.actor.mapper.input.point_data.scalars.to_array()
assert numpy.allclose(sc2.flatten(), expect[0].flatten())
vec2 = s2.actor.mapper.input.point_data.vectors.to_array()
assert numpy.allclose(vec2.flatten(), expect[1].flatten())
def test(self):
self.main()
def do(self):
############################################################
# Imports.
script = self.script
from mayavi.sources.array_source import ArraySource
from mayavi.modules.outline import Outline
from mayavi.modules.surface import Surface
from mayavi.modules.vectors import Vectors
############################################################
# Create a new scene and set up the visualization.
s = self.new_scene()
d = ArraySource()
self.check_input_validation(d)
sc, vec = self.make_2d_data()
d.origin = (-1, -1, 0)
d.scalar_data = sc
d.vector_data = vec
script.add_source(d)
# Create an outline for the data.
o = Outline()
script.add_module(o)
# View the data.
s = Surface()
script.add_module(s)
v = Vectors()
script.add_module(v)
# Add a 3D data source
d = ArraySource()
sc, vec = self.make_3d_data()
d.scalar_data = sc
d.vector_data = vec
script.add_source(d)
# Create an outline for the data.
o = Outline()
script.add_module(o)
# View a slice.
s = Surface()
script.add_module(s)
v = Vectors()
script.add_module(v)
# Set the scene to a suitable view.
s.scene.z_plus_view()
c = s.scene.camera
c.azimuth(-30)
c.elevation(30)
self.check()
############################################################
# Test if saving a visualization and restoring it works.
bg = s.scene.background
# Save visualization.
f = StringIO()
f.name = abspath('test.mv2') # We simulate a file.
script.save_visualization(f)
f.seek(0) # So we can read this saved data.
# Remove existing scene.
engine = script.engine
engine.close_scene(s)
# Load visualization
script.load_visualization(f)
s = engine.current_scene
# Set the scene to a suitable view.
s.scene.z_plus_view()
c = s.scene.camera
c.azimuth(-30)
c.elevation(30)
s.scene.background = bg
self.check()
############################################################
# Test if the MayaVi2 visualization can be deepcopied.
# Pop the source object.
sources = s.children
s.children = []
# Add it back to see if that works without error.
s.children.extend(sources)
s.scene.reset_zoom()
self.check()
# Now deepcopy the source and replace the existing one with
# the copy. This basically simulates cutting/copying the
# object from the UI via the right-click menu on the tree
# view, and pasting the copy back.
sources1 = copy.deepcopy(sources)
s.children[:] = sources
s.scene.reset_zoom()
self.check()
# If we have come this far, we are golden!
if __name__ == "__main__":
t = TestArraySource()
t.test()
|
liulion/mayavi
|
integrationtests/mayavi/test_array_source.py
|
Python
|
bsd-3-clause
| 6,964
|
[
"Mayavi"
] |
a2dabc0d0e1e1c924c95aca9da7c9971910c7c0bc607c93fbe9e0c0d001c19a6
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.