text
stringlengths 12
1.05M
| repo_name
stringlengths 5
86
| path
stringlengths 4
191
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 12
1.05M
| keyword
listlengths 1
23
| text_hash
stringlengths 64
64
|
|---|---|---|---|---|---|---|---|
#
# Copyright (c) 2015 nexB Inc. and others. All rights reserved.
# http://nexb.com and https://github.com/nexB/scancode-toolkit/
# The ScanCode software is licensed under the Apache License version 2.0.
# Data generated with ScanCode require an acknowledgment.
# ScanCode is a trademark of nexB Inc.
#
# You may not use this software except in compliance with the License.
# You may obtain a copy of the License at: http://apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
#
# When you publish or redistribute any data created with ScanCode or any ScanCode
# derivative work, you must accompany this data with the following acknowledgment:
#
# Generated with ScanCode and provided on an "AS IS" BASIS, WITHOUT WARRANTIES
# OR CONDITIONS OF ANY KIND, either express or implied. No content created from
# ScanCode should be considered or used as legal advice. Consult an Attorney
# for any legal advice.
# ScanCode is a free software code scanning tool from nexB Inc. and others.
# Visit https://github.com/nexB/scancode-toolkit/ for support and download.
from __future__ import absolute_import, print_function
import codecs
from collections import OrderedDict
import os
from os.path import dirname
from os.path import join
from unittest.case import expectedFailure
from commoncode import fileutils
from commoncode.testcase import FileBasedTesting
from commoncode import text
from licensedcode import saneyaml
from licensedcode import detect
test_data_dir = join(dirname(__file__), 'data/licenses')
"""
Data-driven tests using expectations stored in YAML files.
"""
class LicenseTest(object):
"""
A license detection test is used to verify that license detection works
correctly
It consists of two files with the same base name: a .yml file with test data
and a test file with any other extension that needs to be tested for
detection
The following data are loaded from the .yml file:
- a test file to scan for licenses,
- a list of expected licenses (with optional positions) to detect,
- optional notes.
- a boolean flag expected_failure set to True if a test is expected to fail
for now
If the list of licenses is empty, then this test should not detect any
license in the test file.
"""
def __init__(self, data_file=None, test_file=None):
self.data_file = data_file
self.test_file = test_file
if self.test_file:
self.test_file_name = fileutils.file_name(test_file)
if self.data_file:
with codecs.open(data_file, mode='rb', encoding='utf-8') as df:
data = saneyaml.load(df.read())
self.licenses = data.get('licenses', [])
self.notes = data.get('notes')
self.sort = data.get('sort', False)
self.expected_failure = data.get('expected_failure', False)
def asdict(self):
dct = OrderedDict()
if self.licenses:
dct['licenses'] = self.licenses
if self.expected_failure:
dct['expected_failure'] = self.expected_failure
if self.sort and self.licenses and len(self.licenses) > 1:
dct['sort'] = self.sort
if self.notes:
dct['notes'] = self.notes
return dct
def dump(self):
"""
Dump a representation of self to tgt_dir using two files:
- a .yml for the rule data in YAML block format
- a .RULE: the rule text as a UTF-8 file
"""
as_yaml = saneyaml.dump(self.asdict())
with codecs.open(self.data_file, 'wb', encoding='utf-8') as df:
df.write(as_yaml)
def load_license_tests(test_dir=test_data_dir):
"""
Yield an iterable of LicenseTest loaded from test data files in test_dir.
"""
# first collect files with .yml extension and files with other extensions
# in two maps keyed by file base_name
data_files = {}
test_files = {}
for top, _, files in os.walk(test_dir):
for yfile in files:
base_name = fileutils.file_base_name(yfile)
file_path = join(top, yfile)
if yfile.endswith('.yml'):
assert base_name not in data_files
data_files[base_name] = file_path
else:
assert base_name not in test_files
test_files[base_name] = file_path
# ensure that each data file has a corresponding test file
diff = set(data_files.keys()).symmetric_difference(set(test_files.keys()))
assert not diff
# second, create pairs of a data_file and the corresponding test file
# that have the same base_name
for base_name, data_file in data_files.items():
test_file = test_files[base_name]
yield LicenseTest(data_file, test_file)
def build_tests(license_tests, clazz):
"""
Dynamically build test methods from a sequence of LicenseTest and attach
these method to the clazz test class.
"""
for test in license_tests:
# path relative to the data directory
tfn = 'licenses/' + test.test_file_name
test_name = 'test_detection_%(tfn)s' % locals()
test_name = text.python_safe_name(test_name)
# closure on the test params
test_method = make_test_function(test.licenses, tfn, test_name, sort=test.sort)
if test.expected_failure:
test_method = expectedFailure(test_method)
# attach that method to our test class
setattr(clazz, test_name, test_method)
def make_test_function(expected_licenses, test_file, test_name, sort=False):
"""
Build a test function closing on tests arguments
"""
def data_driven_test_function(self):
test_loc = self.get_test_loc(test_file)
result = list(detect.detect_license(test_loc, perfect=True))
# the detected license is the first member of the returned tuple
license_result = [d[0] for d in result]
try:
if sort:
assert sorted(expected_licenses) == sorted(license_result)
else:
assert expected_licenses == license_result
except:
# on failure, we compare against the full results to get
# additional failure details, including the test_file
if sort:
assert sorted(expected_licenses) == ['test file: ' + test_file] + sorted(result)
else:
assert expected_licenses == ['test file: ' + test_file] + result
data_driven_test_function.__name__ = test_name
data_driven_test_function.funcname = test_name
return data_driven_test_function
class TestLicenseDataDriven(FileBasedTesting):
# test functions are attached to this class at module import time
test_data_dir = join(dirname(__file__), 'data')
build_tests(license_tests=load_license_tests(), clazz=TestLicenseDataDriven)
|
ened/scancode-toolkit
|
tests/licensedcode/test_detection_datadriven.py
|
Python
|
apache-2.0
| 7,147
|
[
"VisIt"
] |
99fcfb76cbd9850c1a2d91d3ebb22138c846e8e93a62b0007323467a28fa9bed
|
import sys
sys.path.insert(1,"../../")
import h2o
from tests import pyunit_utils
# The purpose of this test is to detect a change in the _metric_json of MetricsBase objects. Many of the metric
# accessors require _metric_json to have a particular form.
from h2o.estimators.glm import H2OGeneralizedLinearEstimator
from h2o.estimators.gbm import H2OGradientBoostingEstimator
def metric_json_check():
df = h2o.import_file(path=pyunit_utils.locate("smalldata/logreg/prostate.csv"))
# Regression metric json
reg_mod = H2OGradientBoostingEstimator(distribution="gaussian")
reg_mod.train(x=list(range(3,df.ncol)), y="CAPSULE", training_frame=df)
reg_met = reg_mod.model_performance()
reg_metric_json_keys_have = list(reg_met._metric_json.keys())
reg_metric_json_keys_desired = [u'model_category',
u'description',
u'r2',
u'frame',
u'model_checksum',
u'MSE',
u'RMSE',
u'mae',
u'rmsle',
u'__meta',
u'_exclude_fields',
u'scoring_time',
u'predictions',
u'model',
u'duration_in_ms',
u'frame_checksum',
u'nobs',
u'mean_residual_deviance',
u'custom_metric_name',
u'custom_metric_value']
reg_metric_diff = list(set(reg_metric_json_keys_have) - set(reg_metric_json_keys_desired))
assert not reg_metric_diff, "There's a difference between the current ({0}) and the desired ({1}) regression " \
"metric json. The difference is {2}".format(reg_metric_json_keys_have,
reg_metric_json_keys_desired,
reg_metric_diff)
# Regression metric json (GLM)
reg_mod = H2OGeneralizedLinearEstimator(family="gaussian")
reg_mod.train(x=list(range(3,df.ncol)), y="CAPSULE", training_frame=df)
reg_met = reg_mod.model_performance()
reg_metric_json_keys_have = list(reg_met._metric_json.keys())
reg_metric_json_keys_desired = [u'model_category',
u'description',
u'r2',
u'residual_degrees_of_freedom',
u'frame',
u'model_checksum',
u'MSE',
u'RMSE',
u'mae',
u'rmsle',
u'__meta',
u'_exclude_fields',
u'null_deviance',
u'scoring_time',
u'null_degrees_of_freedom',
u'predictions',
u'AIC',
u'model',
u'duration_in_ms',
u'frame_checksum',
u'nobs',
u'residual_deviance',
u'mean_residual_deviance',
u'custom_metric_name',
u'custom_metric_value']
reg_metric_diff = list(set(reg_metric_json_keys_have) - set(reg_metric_json_keys_desired))
assert not reg_metric_diff, "There's a difference between the current ({0}) and the desired ({1}) glm-regression " \
"metric json. The difference is {2}".format(reg_metric_json_keys_have,
reg_metric_json_keys_desired,
reg_metric_diff)
# Binomial metric json
bin_mod = H2OGradientBoostingEstimator(distribution="bernoulli")
df["CAPSULE"] = df["CAPSULE"].asfactor()
bin_mod.train(x=list(range(3,df.ncol)), y="CAPSULE", training_frame=df)
bin_met = bin_mod.model_performance()
bin_metric_json_keys_have = list(bin_met._metric_json.keys())
bin_metric_json_keys_desired = [u'cm',
u'AUC',
u'Gini',
u'model_category',
u'description',
u'mean_per_class_error',
u'r2',
u'frame',
u'model_checksum',
u'MSE',
u'RMSE',
u'__meta',
u'_exclude_fields',
u'gains_lift_table',
u'logloss',
u'scoring_time',
u'thresholds_and_metric_scores',
u'predictions',
u'max_criteria_and_metric_scores',
u'model',
u'duration_in_ms',
u'frame_checksum',
u'nobs',
u'domain',
u'custom_metric_name',
u'custom_metric_value',
u'pr_auc']
bin_metric_diff = list(set(bin_metric_json_keys_have) - set(bin_metric_json_keys_desired))
assert not bin_metric_diff, "There's a difference between the current ({0}) and the desired ({1}) binomial " \
"metric json. The difference is {2}".format(bin_metric_json_keys_have,
bin_metric_json_keys_desired,
bin_metric_diff)
# Binomial metric json (GLM)
bin_mod = H2OGeneralizedLinearEstimator(family="binomial")
bin_mod.train(x=list(range(3,df.ncol)), y="CAPSULE", training_frame=df)
bin_metric_json_keys_have = list(bin_met._metric_json.keys())
bin_metric_json_keys_desired = [u'cm',
u'frame',
u'residual_deviance',
u'max_criteria_and_metric_scores',
u'MSE',
u'RMSE',
u'frame_checksum',
u'nobs',
u'AIC',
u'logloss',
u'Gini',
u'predictions',
u'AUC',
u'description',
u'mean_per_class_error',
u'model_checksum',
u'duration_in_ms',
u'model_category',
u'gains_lift_table',
u'r2',
u'residual_degrees_of_freedom',
u'__meta',
u'_exclude_fields',
u'null_deviance',
u'scoring_time',
u'null_degrees_of_freedom',
u'model',
u'thresholds_and_metric_scores',
u'domain',
u'custom_metric_name',
u'custom_metric_value',
u'pr_auc']
bin_metric_diff = list(set(bin_metric_json_keys_have) - set(bin_metric_json_keys_desired))
assert not bin_metric_diff, "There's a difference between the current ({0}) and the desired ({1}) glm-binomial " \
"metric json. The difference is {2}".format(bin_metric_json_keys_have,
bin_metric_json_keys_desired,
bin_metric_diff)
# Multinomial metric json
df = h2o.import_file(path=pyunit_utils.locate("smalldata/airlines/AirlinesTrain.csv.zip"))
myX = ["Origin", "Dest", "IsDepDelayed", "UniqueCarrier", "Distance", "fDayofMonth", "fDayOfWeek"]
myY = "fYear"
mul_mod = H2OGradientBoostingEstimator(distribution="multinomial")
mul_mod.train(x=myX, y=myY, training_frame=df)
mul_met = mul_mod.model_performance()
mul_metric_json_keys_have = list(mul_met._metric_json.keys())
mul_metric_json_keys_desired = [u'cm',
u'model_category',
u'description',
u'mean_per_class_error',
u'AUC',
u'pr_auc',
u'multinomial_auc_table',
u'multinomial_aucpr_table',
u'r2',
u'frame',
u'nobs',
u'model_checksum',
u'MSE',
u'RMSE',
u'__meta',
u'_exclude_fields',
u'logloss',
u'scoring_time',
u'predictions',
u'hit_ratio_table',
u'model',
u'duration_in_ms',
u'frame_checksum',
u'custom_metric_name',
u'custom_metric_value']
mul_metric_diff = list(set(mul_metric_json_keys_have) - set(mul_metric_json_keys_desired))
assert not mul_metric_diff, "There's a difference between the current ({0}) and the desired ({1}) multinomial " \
"metric json. The difference is {2}".format(mul_metric_json_keys_have,
mul_metric_json_keys_desired,
mul_metric_diff)
# Clustering metric json
df = h2o.import_file(path=pyunit_utils.locate("smalldata/iris/iris.csv"))
from h2o.estimators.kmeans import H2OKMeansEstimator
clus_mod = H2OKMeansEstimator(k=3, standardize=False)
clus_mod.train(x=list(range(4)), training_frame=df)
clus_met = clus_mod.model_performance()
clus_metric_json_keys_have = list(clus_met._metric_json.keys())
clus_metric_json_keys_desired = [u'tot_withinss',
u'model_category',
u'description',
u'frame',
u'model_checksum',
u'MSE',
u'RMSE',
u'__meta',
u'_exclude_fields',
u'scoring_time',
u'betweenss',
u'predictions',
u'totss',
u'model',
u'duration_in_ms',
u'frame_checksum',
u'nobs',
u'centroid_stats',
u'custom_metric_name',
u'custom_metric_value']
clus_metric_diff = list(set(clus_metric_json_keys_have) - set(clus_metric_json_keys_desired))
assert not clus_metric_diff, "There's a difference between the current ({0}) and the desired ({1}) clustering " \
"metric json. The difference is {2}".format(clus_metric_json_keys_have,
clus_metric_json_keys_desired,
clus_metric_diff)
if __name__ == "__main__":
pyunit_utils.standalone_test(metric_json_check)
else:
metric_json_check()
|
michalkurka/h2o-3
|
h2o-py/tests/testdir_misc/pyunit_metric_json_check.py
|
Python
|
apache-2.0
| 13,450
|
[
"Gaussian"
] |
6166adbe6bfb3149f702d87a09a3dba4c82525446f3ff5a5c6301b6a8c4e1919
|
import os
from astropy.io.fits import open as fits_open
import numpy as np
from astropy.convolution import convolve as ap_convolve
from astropy.convolution import Box1DKernel
from scipy.signal import medfilt # Median Filter from scipy
from scipy.interpolate import interp1d
def open_kepler(filename):
""" Opens a Kepler filename and retrieves corrected flux """
if os.path.exists(filename)==False:
print "{} not found".format(filename)
return [-1],[-1],[-1]
hdu = fits_open(filename)
#print hdu[1].data.dtype.names
time = hdu[1].data.field("TIME")
## Use the corrected flux, not raw
flux = hdu[1].data.field("PDCSAP_FLUX")
flux_err = hdu[1].data.field("PDCSAP_FLUX_ERR")
hdu.close()
return time,flux,flux_err
def calc_sigma(time,flux,flux_err):
"""
Following McQuillan et al. (2013), determines the standard deviation
of the flux by iteratively smoothing the flux and removing outliers,
then calculating the standard deviation of the residuals between the
smoothed flux and the original flux.
"""
x,y,yerr = np.float64(time),np.float64(flux),np.float64(flux_err)
#print len(x),len(y),len(yerr)
bad = (np.isnan(y) | np.isnan(yerr) | np.isnan(x))
good = np.where(bad==False)[0]
x,y,yerr = x[good],y[good],yerr[good]
#print len(x),len(y),len(yerr)
x1,y1,yerr1 = np.copy(x),np.copy(y),np.copy(yerr)
# Loop three times, applying a median filter then a boxcar filter before clipping 3-sigma outliers
for i in range(1):
y_med = medfilt(y1,11)
y_boxed = ap_convolve(y_med, Box1DKernel(11),boundary="extend")
residuals = y_med - y1
sigma_residuals = np.std(residuals)
to_clip = (abs(residuals)>(3*sigma_residuals))
to_keep = np.where(to_clip==False)[0]
x1,y1,yerr1 = x1[to_keep],y1[to_keep],yerr1[to_keep]
#print len(x1),len(y1),len(yerr1)
# After outliers have been removed, apply the smoothing one last time
# then determine the residuals from this smoothed curve.
y_smoothed1 = medfilt(y1,11)
y_smoothed = ap_convolve(y_smoothed1, Box1DKernel(11))
# smoothing screws up the endpoints, which screws up the std(residuals)
residuals = (y_smoothed - y1)[10:-10]
sigma_res = np.std(residuals)
return x,y,yerr,sigma_res
def fill_gaps(time,flux,flux_err):
"""
patches gaps in the Kepler data by linearly interpolating between the
points on either side of the gap, and then adding noise drawn from a
Gaussian with width equal to the standard deviation of the residuals
calculated by calc_sigma()
"""
x,y,yerr,sigma_res = calc_sigma(time,flux,flux_err)
cadence = np.median(np.diff(x))
tolerance = 5e-4 # There's some very slight variation in cadence that I'm ignoring
missing = np.where(abs(np.diff(x)-cadence)>tolerance)[0]
# The actual missing points will be between missing and missing+1
number_missing = np.asarray(np.round(np.diff(x)[missing]/cadence) - 1,int)
single_missing = missing[number_missing==1]
mult_missing = missing[number_missing>1]
missing_times = (x[single_missing] + x[single_missing+1])/2.0
for i in mult_missing:
#print x[i]-x[i+1],cadence
num_missing = np.int((x[i+1] - x[i])/cadence)
#print num_missing,x[missing],x[missing+1]
addl_times = x[i] + [cadence*(j+1) for j in range(num_missing)]
#print addl_times
missing_times = np.append(missing_times, addl_times)
#print missing_times
# linearly interpolate between existing points
interp_function = interp1d(x,y,kind="linear")
replacement_flux = interp_function(missing_times)
#print replacement_flux
replacement_flux = replacement_flux + np.random.normal(loc=0,scale=sigma_res,size=np.shape(missing_times))
replacement_flux_errs = np.ones(len(missing_times))*sigma_res
#print np.median(y_smoothed),np.median(replacement_flux)
# Reassemble the time series. If a duplicate point was accidentally added, remove it
x2 = np.append(x,missing_times)
y2 = np.append(y,replacement_flux)
y2err = np.append(yerr,replacement_flux_errs)
sort_loc = np.argsort(x2)
#print len(x2),len(sort_loc),len(y2),len(y2err)
x3,y3,yerr3 = x2[sort_loc],y2[sort_loc],y2err[sort_loc]
extra = (abs(np.diff(x3))<tolerance)
good = np.where(extra==False)[0]
x,y,yerr = x3[good],y3[good],yerr3[good]
return x,y,yerr
|
stephtdouglas/k2spin
|
fix_kepler.py
|
Python
|
mit
| 4,505
|
[
"Gaussian"
] |
4ad02c7a290b5720844827dd9a1d379a8954ba78424c6cd819a6361d6ffefbdf
|
try: paraview.simple
except: from paraview.simple import *
import numpy as np
from mpi4py import MPI
import os
import csv
from scipy import interpolate
import gc
import sys
gc.enable()
comm = MPI.COMM_WORLD
label = 'm_50_7'
label = label+'_3Db_particles'
labelo = label+'_particles'
basename = 'mli_checkpoint'
## READ archive (too many points... somehow)
# args: name, dayi, dayf, days
#label = sys.argv[1]
#basename = sys.argv[2]
#dayi = int(sys.argv[3])
#dayf = int(sys.argv[4])
#days = int(sys.argv[5])
tt = int(sys.argv[1]) - 1
path = '/scratch/jmensa/'+label+'/'
try: os.stat('./plot/'+label)
except OSError: os.mkdir('./plot/'+label)
Xlist = np.linspace(0,10000,200)
Ylist = np.linspace(0,10000,200)
Zlist = np.linspace(0,-50,51)
[X,Y] = np.meshgrid(Xlist,Ylist)
depths = [17]
size = MPI.COMM_WORLD.Get_size()
rank = MPI.COMM_WORLD.Get_rank()
nl = len(Zlist)/size
ll = len(Zlist)%size
for k in range(len(depths)):
kl = str(k+4)
mli_pvtu = XMLPartitionedUnstructuredGridReader( FileName=[path+'/mli_checkpoint_'+str(tt)+'.pvtu'] )
mli_pvtu.PointArrayStatus = ['Tracer_'+kl+'_CG']
sliceFilter = Slice(mli_pvtu)
sliceFilter.SliceType.Normal = [0,0,1]
if rank == 0:
Tr = np.zeros((len(Xlist),len(Ylist),len(Zlist)))
for n in range(nl+ll):
layer = n+rank*nl
print 'layer:', rank, layer
sliceFilter.SliceType.Origin = [0,0,-1*layer]
DataSliceFile = paraview.servermanager.Fetch(sliceFilter)
points = DataSliceFile.GetPoints()
numPoints = DataSliceFile.GetNumberOfPoints()
#
data=np.zeros((numPoints))
coords=np.zeros((numPoints,3))
#
for x in xrange(numPoints):
data[x] = DataSliceFile.GetPointData().GetArray('Tracer_'+kl+'_CG').GetValue(x)
coords[x] = points.GetPoint(x)
Tr[:,:,layer] = interpolate.griddata((coords[:,0],coords[:,1]),data,(X,Y),method='linear')
# print rank, Tr[:,:,:]
if rank > 0:
Tr = np.zeros((len(Xlist),len(Ylist),nl))
for n in xrange(nl):
layer = n+rank*nl
print 'layer:', rank, layer
sliceFilter.SliceType.Origin = [0,0,-1*layer]
DataSliceFile = paraview.servermanager.Fetch(sliceFilter)
points = DataSliceFile.GetPoints()
numPoints = DataSliceFile.GetNumberOfPoints()
#
data=np.zeros((numPoints))
coords=np.zeros((numPoints,3))
#
for x in xrange(numPoints):
data[x] = DataSliceFile.GetPointData().GetArray('Tracer_'+kl+'_CG').GetValue(x)
coords[x] = points.GetPoint(x)
Tr[:,:,n] = interpolate.griddata((coords[:,0],coords[:,1]),data,(X,Y),method='linear')
# print rank, Tr[:,:,:]
comm.send(nl*rank+ll, dest=0, tag=10)
comm.send(Tr, dest=0, tag=11)
if rank == 0:
for s in range(size-1):
print 's', s+1
l = comm.recv(source=s+1, tag=10)
print 'l', l
Tr[:,:,l:l+nl] = comm.recv(source=s+1, tag=11)
print Tr
fd = open('./csv/Tracer_'+labelo+'_Tr'+kl+'_'+str(tt)+'.csv','a')
print Tr[:,:,:]
for z in xrange(len(Zlist)):
print z
for j in xrange(len(Ylist)):
for i in xrange(len(Xlist)):
fd.write(str(Tr[i,j,z])+', ')
fd.write('\n')
fd.close()
del mli_pvtu, Tr, coords, data, numPoints, points, DataSliceFile, sliceFilter
gc.collect()
|
jungla/ICOM-fluidity-toolbox
|
Detectors/extract_Tr_3_day.py
|
Python
|
gpl-2.0
| 3,166
|
[
"ParaView"
] |
469918f4f4c4d51c7f0c284bef0f92d57571f5dde4c8ecd8d909cc15f1ff9956
|
#
# Copyright (C) 2007, Mark Lee
#
#http://rl-glue-ext.googlecode.com/
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# $Revision: 446 $
# $Date: 2009-01-22 20:20:21 -0700 (Thu, 22 Jan 2009) $
# $Author: brian@tannerpages.com $
# $HeadURL: http://rl-glue-ext.googlecode.com/svn/trunk/projects/codecs/Python/src/rlglue/environment/EnvironmentLoaderScript.py $
import sys
import os
import rlglue.network.Network as Network
from ClientEnvironment import ClientEnvironment
import EnvironmentLoader as EnvironmentLoader
def main():
usage = "PYTHONPATH=<Path to RLGlue> python -c 'import rlglue.environment.EnvironmentLoaderScript' <Environment>";
envVars = "The following environment variables are used by the environment to control its function:\n" + \
"RLGLUE_HOST : If set the agent will use this ip or hostname to connect to rather than " + Network.kLocalHost + "\n" + \
"RLGLUE_PORT : If set the agent will use this port to connect on rather than " + str(Network.kDefaultPort) + "\n"
if (len(sys.argv) < 2):
print (usage)
print (envVars)
sys.exit(1)
EnvironmentLoader.loadEnvironmentLikeScript()
main()
|
okkhoy/mo-rlglue-python-codec
|
rlglue/environment/EnvironmentLoaderScript.py
|
Python
|
mit
| 1,631
|
[
"Brian"
] |
e8481c0589f9cc5a425d16f16381790638ea86fbcf98cdc32e03f4e43b746b21
|
# Copyright Anne M. Archibald 2008
# Released under the scipy license
import os
from numpy.testing import (assert_equal, assert_array_equal, assert_,
assert_almost_equal, assert_array_almost_equal,
assert_allclose)
from pytest import raises as assert_raises
import pytest
from platform import python_implementation
import numpy as np
from scipy.spatial import KDTree, Rectangle, distance_matrix, cKDTree
from scipy.spatial.ckdtree import cKDTreeNode
from scipy.spatial import minkowski_distance
import itertools
@pytest.fixture(params=[KDTree, cKDTree])
def kdtree_type(request):
return request.param
def KDTreeTest(kls):
"""Class decorator to create test cases for KDTree and cKDTree
Tests use the class variable ``kdtree_type`` as the tree constructor.
"""
if not kls.__name__.startswith('_Test'):
raise RuntimeError("Expected a class name starting with _Test")
for tree in (KDTree, cKDTree):
test_name = kls.__name__[1:] + '_' + tree.__name__
if test_name in globals():
raise RuntimeError("Duplicated test name: " + test_name)
# Create a new sub-class with kdtree_type defined
test_case = type(test_name, (kls,), {'kdtree_type': tree})
globals()[test_name] = test_case
return kls
def distance_box(a, b, p, boxsize):
diff = a - b
diff[diff > 0.5 * boxsize] -= boxsize
diff[diff < -0.5 * boxsize] += boxsize
d = minkowski_distance(diff, 0, p)
return d
class ConsistencyTests:
def distance(self, a, b, p):
return minkowski_distance(a, b, p)
def test_nearest(self):
x = self.x
d, i = self.kdtree.query(x, 1)
assert_almost_equal(d**2, np.sum((x-self.data[i])**2))
eps = 1e-8
assert_(np.all(np.sum((self.data-x[np.newaxis, :])**2, axis=1) > d**2-eps))
def test_m_nearest(self):
x = self.x
m = self.m
dd, ii = self.kdtree.query(x, m)
d = np.amax(dd)
i = ii[np.argmax(dd)]
assert_almost_equal(d**2, np.sum((x-self.data[i])**2))
eps = 1e-8
assert_equal(np.sum(np.sum((self.data-x[np.newaxis, :])**2, axis=1) < d**2+eps), m)
def test_points_near(self):
x = self.x
d = self.d
dd, ii = self.kdtree.query(x, k=self.kdtree.n, distance_upper_bound=d)
eps = 1e-8
hits = 0
for near_d, near_i in zip(dd, ii):
if near_d == np.inf:
continue
hits += 1
assert_almost_equal(near_d**2, np.sum((x-self.data[near_i])**2))
assert_(near_d < d+eps, "near_d=%g should be less than %g" % (near_d, d))
assert_equal(np.sum(self.distance(self.data, x, 2) < d**2+eps), hits)
def test_points_near_l1(self):
x = self.x
d = self.d
dd, ii = self.kdtree.query(x, k=self.kdtree.n, p=1, distance_upper_bound=d)
eps = 1e-8
hits = 0
for near_d, near_i in zip(dd, ii):
if near_d == np.inf:
continue
hits += 1
assert_almost_equal(near_d, self.distance(x, self.data[near_i], 1))
assert_(near_d < d+eps, "near_d=%g should be less than %g" % (near_d, d))
assert_equal(np.sum(self.distance(self.data, x, 1) < d+eps), hits)
def test_points_near_linf(self):
x = self.x
d = self.d
dd, ii = self.kdtree.query(x, k=self.kdtree.n, p=np.inf, distance_upper_bound=d)
eps = 1e-8
hits = 0
for near_d, near_i in zip(dd, ii):
if near_d == np.inf:
continue
hits += 1
assert_almost_equal(near_d, self.distance(x, self.data[near_i], np.inf))
assert_(near_d < d+eps, "near_d=%g should be less than %g" % (near_d, d))
assert_equal(np.sum(self.distance(self.data, x, np.inf) < d+eps), hits)
def test_approx(self):
x = self.x
k = self.k
eps = 0.1
d_real, i_real = self.kdtree.query(x, k)
d, i = self.kdtree.query(x, k, eps=eps)
assert_(np.all(d <= d_real*(1+eps)))
@KDTreeTest
class _Test_random(ConsistencyTests):
def setup_method(self):
self.n = 100
self.m = 4
np.random.seed(1234)
self.data = np.random.randn(self.n, self.m)
self.kdtree = self.kdtree_type(self.data, leafsize=2)
self.x = np.random.randn(self.m)
self.d = 0.2
self.k = 10
@KDTreeTest
class _Test_random_far(_Test_random):
def setup_method(self):
super().setup_method()
self.x = np.random.randn(self.m)+10
@KDTreeTest
class _Test_small(ConsistencyTests):
def setup_method(self):
self.data = np.array([[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
[1, 1, 1]])
self.kdtree = self.kdtree_type(self.data)
self.n = self.kdtree.n
self.m = self.kdtree.m
np.random.seed(1234)
self.x = np.random.randn(3)
self.d = 0.5
self.k = 4
def test_nearest(self):
assert_array_equal(
self.kdtree.query((0, 0, 0.1), 1),
(0.1, 0))
def test_nearest_two(self):
assert_array_equal(
self.kdtree.query((0, 0, 0.1), 2),
([0.1, 0.9], [0, 1]))
@KDTreeTest
class _Test_small_nonleaf(_Test_small):
def setup_method(self):
super().setup_method()
self.kdtree = self.kdtree_type(self.data, leafsize=1)
class Test_vectorization_KDTree:
def setup_method(self):
self.data = np.array([[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
[1, 1, 1]])
self.kdtree = KDTree(self.data)
def test_single_query(self):
d, i = self.kdtree.query(np.array([0, 0, 0]))
assert_(isinstance(d, float))
assert_(np.issubdtype(i, np.signedinteger))
def test_vectorized_query(self):
d, i = self.kdtree.query(np.zeros((2, 4, 3)))
assert_equal(np.shape(d), (2, 4))
assert_equal(np.shape(i), (2, 4))
def test_single_query_multiple_neighbors(self):
s = 23
kk = self.kdtree.n+s
d, i = self.kdtree.query(np.array([0, 0, 0]), k=kk)
assert_equal(np.shape(d), (kk,))
assert_equal(np.shape(i), (kk,))
assert_(np.all(~np.isfinite(d[-s:])))
assert_(np.all(i[-s:] == self.kdtree.n))
def test_vectorized_query_multiple_neighbors(self):
s = 23
kk = self.kdtree.n+s
d, i = self.kdtree.query(np.zeros((2, 4, 3)), k=kk)
assert_equal(np.shape(d), (2, 4, kk))
assert_equal(np.shape(i), (2, 4, kk))
assert_(np.all(~np.isfinite(d[:, :, -s:])))
assert_(np.all(i[:, :, -s:] == self.kdtree.n))
@pytest.mark.parametrize('r', [0.8, 1.1])
def test_single_query_all_neighbors(self, r):
np.random.seed(1234)
point = np.random.rand(self.kdtree.m)
with pytest.warns(DeprecationWarning, match="k=None"):
d, i = self.kdtree.query(point, k=None, distance_upper_bound=r)
assert isinstance(d, list)
assert isinstance(i, list)
assert_array_equal(np.array(d) <= r, True) # All within bounds
# results are sorted by distance
assert all(a <= b for a, b in zip(d, d[1:]))
assert_allclose( # Distances are correct
d, minkowski_distance(point, self.kdtree.data[i, :]))
# Compare to brute force
dist = minkowski_distance(point, self.kdtree.data)
assert_array_equal(sorted(i), (dist <= r).nonzero()[0])
def test_vectorized_query_all_neighbors(self):
query_shape = (2, 4)
r = 1.1
np.random.seed(1234)
points = np.random.rand(*query_shape, self.kdtree.m)
with pytest.warns(DeprecationWarning, match="k=None"):
d, i = self.kdtree.query(points, k=None, distance_upper_bound=r)
assert_equal(np.shape(d), query_shape)
assert_equal(np.shape(i), query_shape)
for idx in np.ndindex(query_shape):
dist, ind = d[idx], i[idx]
assert isinstance(dist, list)
assert isinstance(ind, list)
assert_array_equal(np.array(dist) <= r, True) # All within bounds
# results are sorted by distance
assert all(a <= b for a, b in zip(dist, dist[1:]))
assert_allclose( # Distances are correct
dist, minkowski_distance(
points[idx], self.kdtree.data[ind]))
class Test_vectorization_cKDTree:
def setup_method(self):
self.data = np.array([[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
[1, 1, 1]])
self.kdtree = cKDTree(self.data)
def test_single_query(self):
d, i = self.kdtree.query([0, 0, 0])
assert_(isinstance(d, float))
assert_(isinstance(i, int))
def test_vectorized_query(self):
d, i = self.kdtree.query(np.zeros((2, 4, 3)))
assert_equal(np.shape(d), (2, 4))
assert_equal(np.shape(i), (2, 4))
def test_vectorized_query_noncontiguous_values(self):
np.random.seed(1234)
qs = np.random.randn(3, 1000).T
ds, i_s = self.kdtree.query(qs)
for q, d, i in zip(qs, ds, i_s):
assert_equal(self.kdtree.query(q), (d, i))
def test_single_query_multiple_neighbors(self):
s = 23
kk = self.kdtree.n+s
d, i = self.kdtree.query([0, 0, 0], k=kk)
assert_equal(np.shape(d), (kk,))
assert_equal(np.shape(i), (kk,))
assert_(np.all(~np.isfinite(d[-s:])))
assert_(np.all(i[-s:] == self.kdtree.n))
def test_vectorized_query_multiple_neighbors(self):
s = 23
kk = self.kdtree.n+s
d, i = self.kdtree.query(np.zeros((2, 4, 3)), k=kk)
assert_equal(np.shape(d), (2, 4, kk))
assert_equal(np.shape(i), (2, 4, kk))
assert_(np.all(~np.isfinite(d[:, :, -s:])))
assert_(np.all(i[:, :, -s:] == self.kdtree.n))
class ball_consistency:
tol = 0.0
def distance(self, a, b, p):
return minkowski_distance(a * 1.0, b * 1.0, p)
def test_in_ball(self):
x = np.atleast_2d(self.x)
d = np.broadcast_to(self.d, x.shape[:-1])
l = self.T.query_ball_point(x, self.d, p=self.p, eps=self.eps)
for i, ind in enumerate(l):
dist = self.distance(self.data[ind], x[i], self.p) - d[i]*(1.+self.eps)
norm = self.distance(self.data[ind], x[i], self.p) + d[i]*(1.+self.eps)
assert_array_equal(dist < self.tol * norm, True)
def test_found_all(self):
x = np.atleast_2d(self.x)
d = np.broadcast_to(self.d, x.shape[:-1])
l = self.T.query_ball_point(x, self.d, p=self.p, eps=self.eps)
for i, ind in enumerate(l):
c = np.ones(self.T.n, dtype=bool)
c[ind] = False
dist = self.distance(self.data[c], x[i], self.p) - d[i]/(1.+self.eps)
norm = self.distance(self.data[c], x[i], self.p) + d[i]/(1.+self.eps)
assert_array_equal(dist > -self.tol * norm, True)
@KDTreeTest
class _Test_random_ball(ball_consistency):
def setup_method(self):
n = 100
m = 4
np.random.seed(1234)
self.data = np.random.randn(n, m)
self.T = self.kdtree_type(self.data, leafsize=2)
self.x = np.random.randn(m)
self.p = 2.
self.eps = 0
self.d = 0.2
@KDTreeTest
class _Test_random_ball_periodic(ball_consistency):
def distance(self, a, b, p):
return distance_box(a, b, p, 1.0)
def setup_method(self):
n = 10000
m = 4
np.random.seed(1234)
self.data = np.random.uniform(size=(n, m))
self.T = self.kdtree_type(self.data, leafsize=2, boxsize=1)
self.x = np.full(m, 0.1)
self.p = 2.
self.eps = 0
self.d = 0.2
def test_in_ball_outside(self):
l = self.T.query_ball_point(self.x + 1.0, self.d, p=self.p, eps=self.eps)
for i in l:
assert_(self.distance(self.data[i], self.x, self.p) <= self.d*(1.+self.eps))
l = self.T.query_ball_point(self.x - 1.0, self.d, p=self.p, eps=self.eps)
for i in l:
assert_(self.distance(self.data[i], self.x, self.p) <= self.d*(1.+self.eps))
def test_found_all_outside(self):
c = np.ones(self.T.n, dtype=bool)
l = self.T.query_ball_point(self.x + 1.0, self.d, p=self.p, eps=self.eps)
c[l] = False
assert_(np.all(self.distance(self.data[c], self.x, self.p) >= self.d/(1.+self.eps)))
l = self.T.query_ball_point(self.x - 1.0, self.d, p=self.p, eps=self.eps)
c[l] = False
assert_(np.all(self.distance(self.data[c], self.x, self.p) >= self.d/(1.+self.eps)))
@KDTreeTest
class _Test_random_ball_largep_issue9890(ball_consistency):
# allow some roundoff errors due to numerical issues
tol = 1e-13
def setup_method(self):
n = 1000
m = 2
np.random.seed(123)
self.data = np.random.randint(100, 1000, size=(n, m))
self.T = self.kdtree_type(self.data)
self.x = self.data
self.p = 100
self.eps = 0
self.d = 10
@KDTreeTest
class _Test_random_ball_approx(_Test_random_ball):
def setup_method(self):
super().setup_method()
self.eps = 0.1
@KDTreeTest
class _Test_random_ball_approx_periodic(_Test_random_ball):
def setup_method(self):
super().setup_method()
self.eps = 0.1
@KDTreeTest
class _Test_random_ball_far(_Test_random_ball):
def setup_method(self):
super().setup_method()
self.d = 2.
@KDTreeTest
class _Test_random_ball_far_periodic(_Test_random_ball_periodic):
def setup_method(self):
super().setup_method()
self.d = 2.
@KDTreeTest
class _Test_random_ball_l1(_Test_random_ball):
def setup_method(self):
super().setup_method()
self.p = 1
@KDTreeTest
class _Test_random_ball_linf(_Test_random_ball):
def setup_method(self):
super().setup_method()
self.p = np.inf
def test_random_ball_vectorized(kdtree_type):
n = 20
m = 5
np.random.seed(1234)
T = kdtree_type(np.random.randn(n, m))
r = T.query_ball_point(np.random.randn(2, 3, m), 1)
assert_equal(r.shape, (2, 3))
assert_(isinstance(r[0, 0], list))
def test_query_ball_point_multithreading(kdtree_type):
np.random.seed(0)
n = 5000
k = 2
points = np.random.randn(n, k)
T = kdtree_type(points)
l1 = T.query_ball_point(points, 0.003, workers=1)
l2 = T.query_ball_point(points, 0.003, workers=64)
l3 = T.query_ball_point(points, 0.003, workers=-1)
for i in range(n):
if l1[i] or l2[i]:
assert_array_equal(l1[i], l2[i])
for i in range(n):
if l1[i] or l3[i]:
assert_array_equal(l1[i], l3[i])
def test_n_jobs():
# Test for the deprecated argument name "n_jobs" aliasing "workers"
points = np.random.randn(50, 2)
T = cKDTree(points)
with pytest.deprecated_call(match="n_jobs argument has been renamed"):
T.query_ball_point(points, 0.003, n_jobs=1)
with pytest.deprecated_call(match="n_jobs argument has been renamed"):
T.query(points, 1, n_jobs=1)
with pytest.raises(TypeError, match="Unexpected keyword argument"):
T.query_ball_point(points, 0.003, workers=1, n_jobs=1)
with pytest.raises(TypeError, match="Unexpected keyword argument"):
T.query(points, 1, workers=1, n_jobs=1)
class two_trees_consistency:
def distance(self, a, b, p):
return minkowski_distance(a, b, p)
def test_all_in_ball(self):
r = self.T1.query_ball_tree(self.T2, self.d, p=self.p, eps=self.eps)
for i, l in enumerate(r):
for j in l:
assert_(self.distance(self.data1[i], self.data2[j], self.p) <= self.d*(1.+self.eps))
def test_found_all(self):
r = self.T1.query_ball_tree(self.T2, self.d, p=self.p, eps=self.eps)
for i, l in enumerate(r):
c = np.ones(self.T2.n, dtype=bool)
c[l] = False
assert_(np.all(self.distance(self.data2[c], self.data1[i], self.p) >= self.d/(1.+self.eps)))
@KDTreeTest
class _Test_two_random_trees(two_trees_consistency):
def setup_method(self):
n = 50
m = 4
np.random.seed(1234)
self.data1 = np.random.randn(n, m)
self.T1 = self.kdtree_type(self.data1, leafsize=2)
self.data2 = np.random.randn(n, m)
self.T2 = self.kdtree_type(self.data2, leafsize=2)
self.p = 2.
self.eps = 0
self.d = 0.2
@KDTreeTest
class _Test_two_random_trees_periodic(two_trees_consistency):
def distance(self, a, b, p):
return distance_box(a, b, p, 1.0)
def setup_method(self):
n = 50
m = 4
np.random.seed(1234)
self.data1 = np.random.uniform(size=(n, m))
self.T1 = self.kdtree_type(self.data1, leafsize=2, boxsize=1.0)
self.data2 = np.random.uniform(size=(n, m))
self.T2 = self.kdtree_type(self.data2, leafsize=2, boxsize=1.0)
self.p = 2.
self.eps = 0
self.d = 0.2
@KDTreeTest
class _Test_two_random_trees_far(_Test_two_random_trees):
def setup_method(self):
super().setup_method()
self.d = 2
@KDTreeTest
class _Test_two_random_trees_far_periodic(_Test_two_random_trees_periodic):
def setup_method(self):
super().setup_method()
self.d = 2
@KDTreeTest
class _Test_two_random_trees_linf(_Test_two_random_trees):
def setup_method(self):
super().setup_method()
self.p = np.inf
@KDTreeTest
class _Test_two_random_trees_linf_periodic(_Test_two_random_trees_periodic):
def setup_method(self):
super().setup_method()
self.p = np.inf
class Test_rectangle:
def setup_method(self):
self.rect = Rectangle([0, 0], [1, 1])
def test_min_inside(self):
assert_almost_equal(self.rect.min_distance_point([0.5, 0.5]), 0)
def test_min_one_side(self):
assert_almost_equal(self.rect.min_distance_point([0.5, 1.5]), 0.5)
def test_min_two_sides(self):
assert_almost_equal(self.rect.min_distance_point([2, 2]), np.sqrt(2))
def test_max_inside(self):
assert_almost_equal(self.rect.max_distance_point([0.5, 0.5]), 1/np.sqrt(2))
def test_max_one_side(self):
assert_almost_equal(self.rect.max_distance_point([0.5, 1.5]), np.hypot(0.5, 1.5))
def test_max_two_sides(self):
assert_almost_equal(self.rect.max_distance_point([2, 2]), 2*np.sqrt(2))
def test_split(self):
less, greater = self.rect.split(0, 0.1)
assert_array_equal(less.maxes, [0.1, 1])
assert_array_equal(less.mins, [0, 0])
assert_array_equal(greater.maxes, [1, 1])
assert_array_equal(greater.mins, [0.1, 0])
def test_distance_l2():
assert_almost_equal(minkowski_distance([0, 0], [1, 1], 2), np.sqrt(2))
def test_distance_l1():
assert_almost_equal(minkowski_distance([0, 0], [1, 1], 1), 2)
def test_distance_linf():
assert_almost_equal(minkowski_distance([0, 0], [1, 1], np.inf), 1)
def test_distance_vectorization():
np.random.seed(1234)
x = np.random.randn(10, 1, 3)
y = np.random.randn(1, 7, 3)
assert_equal(minkowski_distance(x, y).shape, (10, 7))
class count_neighbors_consistency:
def test_one_radius(self):
r = 0.2
assert_equal(self.T1.count_neighbors(self.T2, r),
np.sum([len(l) for l in self.T1.query_ball_tree(self.T2, r)]))
def test_large_radius(self):
r = 1000
assert_equal(self.T1.count_neighbors(self.T2, r),
np.sum([len(l) for l in self.T1.query_ball_tree(self.T2, r)]))
def test_multiple_radius(self):
rs = np.exp(np.linspace(np.log(0.01), np.log(10), 3))
results = self.T1.count_neighbors(self.T2, rs)
assert_(np.all(np.diff(results) >= 0))
for r, result in zip(rs, results):
assert_equal(self.T1.count_neighbors(self.T2, r), result)
@KDTreeTest
class _Test_count_neighbors(count_neighbors_consistency):
def setup_method(self):
n = 50
m = 2
np.random.seed(1234)
self.T1 = self.kdtree_type(np.random.randn(n, m), leafsize=2)
self.T2 = self.kdtree_type(np.random.randn(n, m), leafsize=2)
class sparse_distance_matrix_consistency:
def distance(self, a, b, p):
return minkowski_distance(a, b, p)
def test_consistency_with_neighbors(self):
M = self.T1.sparse_distance_matrix(self.T2, self.r)
r = self.T1.query_ball_tree(self.T2, self.r)
for i, l in enumerate(r):
for j in l:
assert_almost_equal(M[i, j],
self.distance(self.T1.data[i], self.T2.data[j], self.p),
decimal=14)
for ((i, j), d) in M.items():
assert_(j in r[i])
def test_zero_distance(self):
# raises an exception for bug 870 (FIXME: Does it?)
self.T1.sparse_distance_matrix(self.T1, self.r)
def test_consistency(self):
# Test consistency with a distance_matrix
M1 = self.T1.sparse_distance_matrix(self.T2, self.r)
expected = distance_matrix(self.T1.data, self.T2.data)
expected[expected > self.r] = 0
assert_array_almost_equal(M1.todense(), expected, decimal=14)
def test_against_logic_error_regression(self):
# regression test for gh-5077 logic error
np.random.seed(0)
too_many = np.array(np.random.randn(18, 2), dtype=int)
tree = self.kdtree_type(
too_many, balanced_tree=False, compact_nodes=False)
d = tree.sparse_distance_matrix(tree, 3).todense()
assert_array_almost_equal(d, d.T, decimal=14)
def test_ckdtree_return_types(self):
# brute-force reference
ref = np.zeros((self.n, self.n))
for i in range(self.n):
for j in range(self.n):
v = self.data1[i, :] - self.data2[j, :]
ref[i, j] = np.dot(v, v)
ref = np.sqrt(ref)
ref[ref > self.r] = 0.
# test return type 'dict'
dist = np.zeros((self.n, self.n))
r = self.T1.sparse_distance_matrix(self.T2, self.r, output_type='dict')
for i, j in r.keys():
dist[i, j] = r[(i, j)]
assert_array_almost_equal(ref, dist, decimal=14)
# test return type 'ndarray'
dist = np.zeros((self.n, self.n))
r = self.T1.sparse_distance_matrix(self.T2, self.r,
output_type='ndarray')
for k in range(r.shape[0]):
i = r['i'][k]
j = r['j'][k]
v = r['v'][k]
dist[i, j] = v
assert_array_almost_equal(ref, dist, decimal=14)
# test return type 'dok_matrix'
r = self.T1.sparse_distance_matrix(self.T2, self.r,
output_type='dok_matrix')
assert_array_almost_equal(ref, r.todense(), decimal=14)
# test return type 'coo_matrix'
r = self.T1.sparse_distance_matrix(self.T2, self.r,
output_type='coo_matrix')
assert_array_almost_equal(ref, r.todense(), decimal=14)
@KDTreeTest
class _Test_sparse_distance_matrix(sparse_distance_matrix_consistency):
def setup_method(self):
n = 50
m = 4
np.random.seed(1234)
data1 = np.random.randn(n, m)
data2 = np.random.randn(n, m)
self.T1 = self.kdtree_type(data1, leafsize=2)
self.T2 = self.kdtree_type(data2, leafsize=2)
self.r = 0.5
self.p = 2
self.data1 = data1
self.data2 = data2
self.n = n
self.m = m
def test_distance_matrix():
m = 10
n = 11
k = 4
np.random.seed(1234)
xs = np.random.randn(m, k)
ys = np.random.randn(n, k)
ds = distance_matrix(xs, ys)
assert_equal(ds.shape, (m, n))
for i in range(m):
for j in range(n):
assert_almost_equal(minkowski_distance(xs[i], ys[j]), ds[i, j])
def test_distance_matrix_looping():
m = 10
n = 11
k = 4
np.random.seed(1234)
xs = np.random.randn(m, k)
ys = np.random.randn(n, k)
ds = distance_matrix(xs, ys)
dsl = distance_matrix(xs, ys, threshold=1)
assert_equal(ds, dsl)
def check_onetree_query(T, d):
r = T.query_ball_tree(T, d)
s = set()
for i, l in enumerate(r):
for j in l:
if i < j:
s.add((i, j))
assert_(s == T.query_pairs(d))
def test_onetree_query(kdtree_type):
np.random.seed(0)
n = 50
k = 4
points = np.random.randn(n, k)
T = kdtree_type(points)
check_onetree_query(T, 0.1)
points = np.random.randn(3*n, k)
points[:n] *= 0.001
points[n:2*n] += 2
T = kdtree_type(points)
check_onetree_query(T, 0.1)
check_onetree_query(T, 0.001)
check_onetree_query(T, 0.00001)
check_onetree_query(T, 1e-6)
def test_query_pairs_single_node(kdtree_type):
tree = kdtree_type([[0, 1]])
assert_equal(tree.query_pairs(0.5), set())
def test_kdtree_query_pairs(kdtree_type):
np.random.seed(0)
n = 50
k = 2
r = 0.1
r2 = r**2
points = np.random.randn(n, k)
T = kdtree_type(points)
# brute force reference
brute = set()
for i in range(n):
for j in range(i+1, n):
v = points[i, :] - points[j, :]
if np.dot(v, v) <= r2:
brute.add((i, j))
l0 = sorted(brute)
# test default return type
s = T.query_pairs(r)
l1 = sorted(s)
assert_array_equal(l0, l1)
# test return type 'set'
s = T.query_pairs(r, output_type='set')
l1 = sorted(s)
assert_array_equal(l0, l1)
# test return type 'ndarray'
s = set()
arr = T.query_pairs(r, output_type='ndarray')
for i in range(arr.shape[0]):
s.add((int(arr[i, 0]), int(arr[i, 1])))
l2 = sorted(s)
assert_array_equal(l0, l2)
def test_ball_point_ints(kdtree_type):
# Regression test for #1373.
x, y = np.mgrid[0:4, 0:4]
points = list(zip(x.ravel(), y.ravel()))
tree = kdtree_type(points)
assert_equal(sorted([4, 8, 9, 12]),
sorted(tree.query_ball_point((2, 0), 1)))
points = np.asarray(points, dtype=float)
tree = kdtree_type(points)
assert_equal(sorted([4, 8, 9, 12]),
sorted(tree.query_ball_point((2, 0), 1)))
def test_kdtree_comparisons():
# Regression test: node comparisons were done wrong in 0.12 w/Py3.
nodes = [KDTree.node() for _ in range(3)]
assert_equal(sorted(nodes), sorted(nodes[::-1]))
def test_kdtree_build_modes(kdtree_type):
# check if different build modes for KDTree give similar query results
np.random.seed(0)
n = 5000
k = 4
points = np.random.randn(n, k)
T1 = kdtree_type(points).query(points, k=5)[-1]
T2 = kdtree_type(points, compact_nodes=False).query(points, k=5)[-1]
T3 = kdtree_type(points, balanced_tree=False).query(points, k=5)[-1]
T4 = kdtree_type(points, compact_nodes=False,
balanced_tree=False).query(points, k=5)[-1]
assert_array_equal(T1, T2)
assert_array_equal(T1, T3)
assert_array_equal(T1, T4)
def test_kdtree_pickle(kdtree_type):
# test if it is possible to pickle a KDTree
try:
import cPickle as pickle # type: ignore[import]
except ImportError:
import pickle
np.random.seed(0)
n = 50
k = 4
points = np.random.randn(n, k)
T1 = kdtree_type(points)
tmp = pickle.dumps(T1)
T2 = pickle.loads(tmp)
T1 = T1.query(points, k=5)[-1]
T2 = T2.query(points, k=5)[-1]
assert_array_equal(T1, T2)
def test_kdtree_pickle_boxsize(kdtree_type):
# test if it is possible to pickle a periodic KDTree
try:
import cPickle as pickle
except ImportError:
import pickle
np.random.seed(0)
n = 50
k = 4
points = np.random.uniform(size=(n, k))
T1 = kdtree_type(points, boxsize=1.0)
tmp = pickle.dumps(T1)
T2 = pickle.loads(tmp)
T1 = T1.query(points, k=5)[-1]
T2 = T2.query(points, k=5)[-1]
assert_array_equal(T1, T2)
def test_kdtree_copy_data(kdtree_type):
# check if copy_data=True makes the kd-tree
# impervious to data corruption by modification of
# the data arrray
np.random.seed(0)
n = 5000
k = 4
points = np.random.randn(n, k)
T = kdtree_type(points, copy_data=True)
q = points.copy()
T1 = T.query(q, k=5)[-1]
points[...] = np.random.randn(n, k)
T2 = T.query(q, k=5)[-1]
assert_array_equal(T1, T2)
def test_ckdtree_parallel(kdtree_type, monkeypatch):
# check if parallel=True also generates correct query results
np.random.seed(0)
n = 5000
k = 4
points = np.random.randn(n, k)
T = kdtree_type(points)
T1 = T.query(points, k=5, workers=64)[-1]
T2 = T.query(points, k=5, workers=-1)[-1]
T3 = T.query(points, k=5)[-1]
assert_array_equal(T1, T2)
assert_array_equal(T1, T3)
monkeypatch.setattr(os, 'cpu_count', lambda: None)
with pytest.raises(NotImplementedError, match="Cannot determine the"):
T.query(points, 1, workers=-1)
def test_ckdtree_view():
# Check that the nodes can be correctly viewed from Python.
# This test also sanity checks each node in the cKDTree, and
# thus verifies the internal structure of the kd-tree.
np.random.seed(0)
n = 100
k = 4
points = np.random.randn(n, k)
kdtree = cKDTree(points)
# walk the whole kd-tree and sanity check each node
def recurse_tree(n):
assert_(isinstance(n, cKDTreeNode))
if n.split_dim == -1:
assert_(n.lesser is None)
assert_(n.greater is None)
assert_(n.indices.shape[0] <= kdtree.leafsize)
else:
recurse_tree(n.lesser)
recurse_tree(n.greater)
x = n.lesser.data_points[:, n.split_dim]
y = n.greater.data_points[:, n.split_dim]
assert_(x.max() < y.min())
recurse_tree(kdtree.tree)
# check that indices are correctly retrieved
n = kdtree.tree
assert_array_equal(np.sort(n.indices), range(100))
# check that data_points are correctly retrieved
assert_array_equal(kdtree.data[n.indices, :], n.data_points)
# KDTree is specialized to type double points, so no need to make
# a unit test corresponding to test_ball_point_ints()
def test_kdtree_list_k(kdtree_type):
# check kdtree periodic boundary
n = 200
m = 2
klist = [1, 2, 3]
kint = 3
np.random.seed(1234)
data = np.random.uniform(size=(n, m))
kdtree = kdtree_type(data, leafsize=1)
# check agreement between arange(1, k+1) and k
dd, ii = kdtree.query(data, klist)
dd1, ii1 = kdtree.query(data, kint)
assert_equal(dd, dd1)
assert_equal(ii, ii1)
# now check skipping one element
klist = np.array([1, 3])
kint = 3
dd, ii = kdtree.query(data, kint)
dd1, ii1 = kdtree.query(data, klist)
assert_equal(dd1, dd[..., klist - 1])
assert_equal(ii1, ii[..., klist - 1])
# check k == 1 special case
# and k == [1] non-special case
dd, ii = kdtree.query(data, 1)
dd1, ii1 = kdtree.query(data, [1])
assert_equal(len(dd.shape), 1)
assert_equal(len(dd1.shape), 2)
assert_equal(dd, np.ravel(dd1))
assert_equal(ii, np.ravel(ii1))
def test_kdtree_box(kdtree_type):
# check ckdtree periodic boundary
n = 2000
m = 3
k = 3
np.random.seed(1234)
data = np.random.uniform(size=(n, m))
kdtree = kdtree_type(data, leafsize=1, boxsize=1.0)
# use the standard python KDTree for the simulated periodic box
kdtree2 = kdtree_type(data, leafsize=1)
for p in [1, 2, 3.0, np.inf]:
dd, ii = kdtree.query(data, k, p=p)
dd1, ii1 = kdtree.query(data + 1.0, k, p=p)
assert_almost_equal(dd, dd1)
assert_equal(ii, ii1)
dd1, ii1 = kdtree.query(data - 1.0, k, p=p)
assert_almost_equal(dd, dd1)
assert_equal(ii, ii1)
dd2, ii2 = simulate_periodic_box(kdtree2, data, k, boxsize=1.0, p=p)
assert_almost_equal(dd, dd2)
assert_equal(ii, ii2)
def test_kdtree_box_0boxsize(kdtree_type):
# check ckdtree periodic boundary that mimics non-periodic
n = 2000
m = 2
k = 3
np.random.seed(1234)
data = np.random.uniform(size=(n, m))
kdtree = kdtree_type(data, leafsize=1, boxsize=0.0)
# use the standard python KDTree for the simulated periodic box
kdtree2 = kdtree_type(data, leafsize=1)
for p in [1, 2, np.inf]:
dd, ii = kdtree.query(data, k, p=p)
dd1, ii1 = kdtree2.query(data, k, p=p)
assert_almost_equal(dd, dd1)
assert_equal(ii, ii1)
def test_kdtree_box_upper_bounds(kdtree_type):
data = np.linspace(0, 2, 10).reshape(-1, 2)
data[:, 1] += 10
with pytest.raises(ValueError):
kdtree_type(data, leafsize=1, boxsize=1.0)
with pytest.raises(ValueError):
kdtree_type(data, leafsize=1, boxsize=(0.0, 2.0))
# skip a dimension.
kdtree_type(data, leafsize=1, boxsize=(2.0, 0.0))
def test_kdtree_box_lower_bounds(kdtree_type):
data = np.linspace(-1, 1, 10)
assert_raises(ValueError, kdtree_type, data, leafsize=1, boxsize=1.0)
def simulate_periodic_box(kdtree, data, k, boxsize, p):
dd = []
ii = []
x = np.arange(3 ** data.shape[1])
nn = np.array(np.unravel_index(x, [3] * data.shape[1])).T
nn = nn - 1.0
for n in nn:
image = data + n * 1.0 * boxsize
dd2, ii2 = kdtree.query(image, k, p=p)
dd2 = dd2.reshape(-1, k)
ii2 = ii2.reshape(-1, k)
dd.append(dd2)
ii.append(ii2)
dd = np.concatenate(dd, axis=-1)
ii = np.concatenate(ii, axis=-1)
result = np.empty([len(data), len(nn) * k], dtype=[
('ii', 'i8'),
('dd', 'f8')])
result['ii'][:] = ii
result['dd'][:] = dd
result.sort(order='dd')
return result['dd'][:, :k], result['ii'][:, :k]
@pytest.mark.skipif(python_implementation() == 'PyPy',
reason="Fails on PyPy CI runs. See #9507")
def test_ckdtree_memuse():
# unit test adaptation of gh-5630
# NOTE: this will fail when run via valgrind,
# because rss is no longer a reliable memory usage indicator.
try:
import resource
except ImportError:
# resource is not available on Windows
return
# Make some data
dx, dy = 0.05, 0.05
y, x = np.mgrid[slice(1, 5 + dy, dy),
slice(1, 5 + dx, dx)]
z = np.sin(x)**10 + np.cos(10 + y*x) * np.cos(x)
z_copy = np.empty_like(z)
z_copy[:] = z
# Place FILLVAL in z_copy at random number of random locations
FILLVAL = 99.
mask = np.random.randint(0, z.size, np.random.randint(50) + 5)
z_copy.flat[mask] = FILLVAL
igood = np.vstack(np.nonzero(x != FILLVAL)).T
ibad = np.vstack(np.nonzero(x == FILLVAL)).T
mem_use = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss
# burn-in
for i in range(10):
tree = cKDTree(igood)
# count memleaks while constructing and querying cKDTree
num_leaks = 0
for i in range(100):
mem_use = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss
tree = cKDTree(igood)
dist, iquery = tree.query(ibad, k=4, p=2)
new_mem_use = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss
if new_mem_use > mem_use:
num_leaks += 1
# ideally zero leaks, but errors might accidentally happen
# outside cKDTree
assert_(num_leaks < 10)
def test_kdtree_weights(kdtree_type):
data = np.linspace(0, 1, 4).reshape(-1, 1)
tree1 = kdtree_type(data, leafsize=1)
weights = np.ones(len(data), dtype='f4')
nw = tree1._build_weights(weights)
assert_array_equal(nw, [4, 2, 1, 1, 2, 1, 1])
assert_raises(ValueError, tree1._build_weights, weights[:-1])
for i in range(10):
# since weights are uniform, these shall agree:
c1 = tree1.count_neighbors(tree1, np.linspace(0, 10, i))
c2 = tree1.count_neighbors(tree1, np.linspace(0, 10, i),
weights=(weights, weights))
c3 = tree1.count_neighbors(tree1, np.linspace(0, 10, i),
weights=(weights, None))
c4 = tree1.count_neighbors(tree1, np.linspace(0, 10, i),
weights=(None, weights))
tree1.count_neighbors(tree1, np.linspace(0, 10, i),
weights=weights)
assert_array_equal(c1, c2)
assert_array_equal(c1, c3)
assert_array_equal(c1, c4)
for i in range(len(data)):
# this tests removal of one data point by setting weight to 0
w1 = weights.copy()
w1[i] = 0
data2 = data[w1 != 0]
tree2 = kdtree_type(data2)
c1 = tree1.count_neighbors(tree1, np.linspace(0, 10, 100),
weights=(w1, w1))
# "c2 is correct"
c2 = tree2.count_neighbors(tree2, np.linspace(0, 10, 100))
assert_array_equal(c1, c2)
#this asserts for two different trees, singular weights
# crashes
assert_raises(ValueError, tree1.count_neighbors,
tree2, np.linspace(0, 10, 100), weights=w1)
def test_kdtree_count_neighbous_multiple_r(kdtree_type):
n = 2000
m = 2
np.random.seed(1234)
data = np.random.normal(size=(n, m))
kdtree = kdtree_type(data, leafsize=1)
r0 = [0, 0.01, 0.01, 0.02, 0.05]
i0 = np.arange(len(r0))
n0 = kdtree.count_neighbors(kdtree, r0)
nnc = kdtree.count_neighbors(kdtree, r0, cumulative=False)
assert_equal(n0, nnc.cumsum())
for i, r in zip(itertools.permutations(i0),
itertools.permutations(r0)):
# permute n0 by i and it shall agree
n = kdtree.count_neighbors(kdtree, r)
assert_array_equal(n, n0[list(i)])
def test_len0_arrays(kdtree_type):
# make sure len-0 arrays are handled correctly
# in range queries (gh-5639)
np.random.seed(1234)
X = np.random.rand(10, 2)
Y = np.random.rand(10, 2)
tree = kdtree_type(X)
# query_ball_point (single)
d, i = tree.query([.5, .5], k=1)
z = tree.query_ball_point([.5, .5], 0.1*d)
assert_array_equal(z, [])
# query_ball_point (multiple)
d, i = tree.query(Y, k=1)
mind = d.min()
z = tree.query_ball_point(Y, 0.1*mind)
y = np.empty(shape=(10, ), dtype=object)
y.fill([])
assert_array_equal(y, z)
# query_ball_tree
other = kdtree_type(Y)
y = tree.query_ball_tree(other, 0.1*mind)
assert_array_equal(10*[[]], y)
# count_neighbors
y = tree.count_neighbors(other, 0.1*mind)
assert_(y == 0)
# sparse_distance_matrix
y = tree.sparse_distance_matrix(other, 0.1*mind, output_type='dok_matrix')
assert_array_equal(y == np.zeros((10, 10)), True)
y = tree.sparse_distance_matrix(other, 0.1*mind, output_type='coo_matrix')
assert_array_equal(y == np.zeros((10, 10)), True)
y = tree.sparse_distance_matrix(other, 0.1*mind, output_type='dict')
assert_equal(y, {})
y = tree.sparse_distance_matrix(other, 0.1*mind, output_type='ndarray')
_dtype = [('i', np.intp), ('j', np.intp), ('v', np.float64)]
res_dtype = np.dtype(_dtype, align=True)
z = np.empty(shape=(0, ), dtype=res_dtype)
assert_array_equal(y, z)
# query_pairs
d, i = tree.query(X, k=2)
mind = d[:, -1].min()
y = tree.query_pairs(0.1*mind, output_type='set')
assert_equal(y, set())
y = tree.query_pairs(0.1*mind, output_type='ndarray')
z = np.empty(shape=(0, 2), dtype=np.intp)
assert_array_equal(y, z)
def test_kdtree_duplicated_inputs(kdtree_type):
# check kdtree with duplicated inputs
n = 1024
for m in range(1, 8):
data = np.ones((n, m))
data[n//2:] = 2
for balanced, compact in itertools.product((False, True), repeat=2):
kdtree = kdtree_type(data, balanced_tree=balanced,
compact_nodes=compact, leafsize=1)
assert kdtree.size == 3
tree = (kdtree.tree if kdtree_type is cKDTree else
kdtree.tree._node)
assert_equal(
np.sort(tree.lesser.indices),
np.arange(0, n // 2))
assert_equal(
np.sort(tree.greater.indices),
np.arange(n // 2, n))
def test_kdtree_noncumulative_nondecreasing(kdtree_type):
# check kdtree with duplicated inputs
# it shall not divide more than 3 nodes.
# root left (1), and right (2)
kdtree = kdtree_type([[0]], leafsize=1)
assert_raises(ValueError, kdtree.count_neighbors,
kdtree, [0.1, 0], cumulative=False)
def test_short_knn(kdtree_type):
# The test case is based on github: #6425 by @SteveDoyle2
xyz = np.array([
[0., 0., 0.],
[1.01, 0., 0.],
[0., 1., 0.],
[0., 1.01, 0.],
[1., 0., 0.],
[1., 1., 0.]],
dtype='float64')
ckdt = kdtree_type(xyz)
deq, ieq = ckdt.query(xyz, k=4, distance_upper_bound=0.2)
assert_array_almost_equal(deq,
[[0., np.inf, np.inf, np.inf],
[0., 0.01, np.inf, np.inf],
[0., 0.01, np.inf, np.inf],
[0., 0.01, np.inf, np.inf],
[0., 0.01, np.inf, np.inf],
[0., np.inf, np.inf, np.inf]])
def test_query_ball_point_vector_r(kdtree_type):
np.random.seed(1234)
data = np.random.normal(size=(100, 3))
query = np.random.normal(size=(100, 3))
tree = kdtree_type(data)
d = np.random.uniform(0, 0.3, size=len(query))
rvector = tree.query_ball_point(query, d)
rscalar = [tree.query_ball_point(qi, di) for qi, di in zip(query, d)]
for a, b in zip(rvector, rscalar):
assert_array_equal(sorted(a), sorted(b))
def test_query_ball_point_length(kdtree_type):
np.random.seed(1234)
data = np.random.normal(size=(100, 3))
query = np.random.normal(size=(100, 3))
tree = kdtree_type(data)
d = 0.3
length = tree.query_ball_point(query, d, return_length=True)
length2 = [len(ind) for ind in tree.query_ball_point(query, d, return_length=False)]
length3 = [len(tree.query_ball_point(qi, d)) for qi in query]
length4 = [tree.query_ball_point(qi, d, return_length=True) for qi in query]
assert_array_equal(length, length2)
assert_array_equal(length, length3)
assert_array_equal(length, length4)
def test_discontiguous(kdtree_type):
np.random.seed(1234)
data = np.random.normal(size=(100, 3))
d_contiguous = np.arange(100) * 0.04
d_discontiguous = np.ascontiguousarray(
np.arange(100)[::-1] * 0.04)[::-1]
query_contiguous = np.random.normal(size=(100, 3))
query_discontiguous = np.ascontiguousarray(query_contiguous.T).T
assert query_discontiguous.strides[-1] != query_contiguous.strides[-1]
assert d_discontiguous.strides[-1] != d_contiguous.strides[-1]
tree = kdtree_type(data)
length1 = tree.query_ball_point(query_contiguous,
d_contiguous, return_length=True)
length2 = tree.query_ball_point(query_discontiguous,
d_discontiguous, return_length=True)
assert_array_equal(length1, length2)
d1, i1 = tree.query(query_contiguous, 1)
d2, i2 = tree.query(query_discontiguous, 1)
assert_array_equal(d1, d2)
assert_array_equal(i1, i2)
@pytest.mark.parametrize("balanced_tree, compact_nodes",
[(True, False),
(True, True),
(False, False),
(False, True)])
def test_kdtree_empty_input(kdtree_type, balanced_tree, compact_nodes):
# https://github.com/scipy/scipy/issues/5040
np.random.seed(1234)
empty_v3 = np.empty(shape=(0, 3))
query_v3 = np.ones(shape=(1, 3))
query_v2 = np.ones(shape=(2, 3))
tree = kdtree_type(empty_v3, balanced_tree=balanced_tree,
compact_nodes=compact_nodes)
length = tree.query_ball_point(query_v3, 0.3, return_length=True)
assert length == 0
dd, ii = tree.query(query_v2, 2)
assert ii.shape == (2, 2)
assert dd.shape == (2, 2)
assert np.isinf(dd).all()
N = tree.count_neighbors(tree, [0, 1])
assert_array_equal(N, [0, 0])
M = tree.sparse_distance_matrix(tree, 0.3)
assert M.shape == (0, 0)
@KDTreeTest
class _Test_sorted_query_ball_point:
def setup_method(self):
np.random.seed(1234)
self.x = np.random.randn(100, 1)
self.ckdt = self.kdtree_type(self.x)
def test_return_sorted_True(self):
idxs_list = self.ckdt.query_ball_point(self.x, 1., return_sorted=True)
for idxs in idxs_list:
assert_array_equal(idxs, sorted(idxs))
for xi in self.x:
idxs = self.ckdt.query_ball_point(xi, 1., return_sorted=True)
assert_array_equal(idxs, sorted(idxs))
def test_return_sorted_None(self):
"""Previous behavior was to sort the returned indices if there were
multiple points per query but not sort them if there was a single point
per query."""
idxs_list = self.ckdt.query_ball_point(self.x, 1.)
for idxs in idxs_list:
assert_array_equal(idxs, sorted(idxs))
idxs_list_single = [self.ckdt.query_ball_point(xi, 1.) for xi in self.x]
idxs_list_False = self.ckdt.query_ball_point(self.x, 1., return_sorted=False)
for idxs0, idxs1 in zip(idxs_list_False, idxs_list_single):
assert_array_equal(idxs0, idxs1)
def test_kdtree_complex_data():
# Test that KDTree rejects complex input points (gh-9108)
points = np.random.rand(10, 2).view(complex)
with pytest.raises(TypeError, match="complex data"):
t = KDTree(points)
t = KDTree(points.real)
with pytest.raises(TypeError, match="complex data"):
t.query(points)
with pytest.raises(TypeError, match="complex data"):
t.query_ball_point(points, r=1)
def test_kdtree_tree_access():
# Test KDTree.tree can be used to traverse the KDTree
np.random.seed(1234)
points = np.random.rand(100, 4)
t = KDTree(points)
root = t.tree
assert isinstance(root, KDTree.innernode)
assert root.children == points.shape[0]
# Visit the tree and assert some basic properties for each node
nodes = [root]
while nodes:
n = nodes.pop(-1)
if isinstance(n, KDTree.leafnode):
assert isinstance(n.children, int)
assert n.children == len(n.idx)
assert_array_equal(points[n.idx], n._node.data_points)
else:
assert isinstance(n, KDTree.innernode)
assert isinstance(n.split_dim, int)
assert 0 <= n.split_dim < t.m
assert isinstance(n.split, float)
assert isinstance(n.children, int)
assert n.children == n.less.children + n.greater.children
nodes.append(n.greater)
nodes.append(n.less)
def test_kdtree_attributes():
# Test KDTree's attributes are available
np.random.seed(1234)
points = np.random.rand(100, 4)
t = KDTree(points)
assert isinstance(t.m, int)
assert t.n == points.shape[0]
assert isinstance(t.n, int)
assert t.m == points.shape[1]
assert isinstance(t.leafsize, int)
assert t.leafsize == 10
assert_array_equal(t.maxes, np.amax(points, axis=0))
assert_array_equal(t.mins, np.amin(points, axis=0))
assert t.data is points
@pytest.mark.parametrize("kdtree_class", [KDTree, cKDTree])
def test_kdtree_count_neighbors_weighted(kdtree_class):
np.random.seed(1234)
r = np.arange(0.05, 1, 0.05)
A = np.random.random(21).reshape((7,3))
B = np.random.random(45).reshape((15,3))
wA = np.random.random(7)
wB = np.random.random(15)
kdA = kdtree_class(A)
kdB = kdtree_class(B)
nAB = kdA.count_neighbors(kdB, r, cumulative=False, weights=(wA,wB))
# Compare against brute-force
weights = wA[None, :] * wB[:, None]
dist = np.linalg.norm(A[None, :, :] - B[:, None, :], axis=-1)
expect = [np.sum(weights[(prev_radius < dist) & (dist <= radius)])
for prev_radius, radius in zip(itertools.chain([0], r[:-1]), r)]
assert_allclose(nAB, expect)
|
e-q/scipy
|
scipy/spatial/tests/test_kdtree.py
|
Python
|
bsd-3-clause
| 48,897
|
[
"VisIt"
] |
af60e3593b8620c007e28e09c0bc9b151fc03c73c1b53a0efe56b0395a002b9f
|
"""
Views for user API
"""
from django.shortcuts import redirect
from django.utils import dateparse
from rest_framework import generics, views
from rest_framework.decorators import api_view
from rest_framework.response import Response
from opaque_keys.edx.keys import UsageKey
from opaque_keys import InvalidKeyError
from courseware.access import is_mobile_available_for_user
from courseware.model_data import FieldDataCache
from courseware.module_render import get_module_for_descriptor
from courseware.views.index import save_positions_recursively_up
from courseware.views.views import get_current_child
from student.models import CourseEnrollment, User
from xblock.fields import Scope
from xblock.runtime import KeyValueStore
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.exceptions import ItemNotFoundError
from .serializers import CourseEnrollmentSerializer, UserSerializer
from .. import errors
from ..utils import mobile_view, mobile_course_access
@mobile_view(is_user=True)
class UserDetail(generics.RetrieveAPIView):
"""
**Use Case**
Get information about the specified user and access other resources
the user has permissions for.
Users are redirected to this endpoint after they sign in.
You can use the **course_enrollments** value in the response to get a
list of courses the user is enrolled in.
**Example Request**
GET /api/mobile/v0.5/users/{username}
**Response Values**
If the request is successful, the request returns an HTTP 200 "OK" response.
The HTTP 200 response has the following values.
* course_enrollments: The URI to list the courses the currently signed
in user is enrolled in.
* email: The email address of the currently signed in user.
* id: The ID of the user.
* name: The full name of the currently signed in user.
* username: The username of the currently signed in user.
"""
queryset = (
User.objects.all()
.select_related('profile')
)
serializer_class = UserSerializer
lookup_field = 'username'
@mobile_view(is_user=True)
class UserCourseStatus(views.APIView):
"""
**Use Cases**
Get or update the ID of the module that the specified user last
visited in the specified course.
**Example Requests**
GET /api/mobile/v0.5/users/{username}/course_status_info/{course_id}
PATCH /api/mobile/v0.5/users/{username}/course_status_info/{course_id}
**PATCH Parameters**
The body of the PATCH request can include the following parameters.
* last_visited_module_id={module_id}
* modification_date={date}
The modification_date parameter is optional. If it is present, the
update will only take effect if the modification_date in the
request is later than the modification_date saved on the server.
**Response Values**
If the request is successful, the request returns an HTTP 200 "OK" response.
The HTTP 200 response has the following values.
* last_visited_module_id: The ID of the last module that the user
visited in the course.
* last_visited_module_path: The ID of the modules in the path from the
last visited module to the course module.
"""
http_method_names = ["get", "patch"]
def _last_visited_module_path(self, request, course):
"""
Returns the path from the last module visited by the current user in the given course up to
the course module. If there is no such visit, the first item deep enough down the course
tree is used.
"""
field_data_cache = FieldDataCache.cache_for_descriptor_descendents(
course.id, request.user, course, depth=2)
course_module = get_module_for_descriptor(
request.user, request, course, field_data_cache, course.id, course=course
)
path = [course_module]
chapter = get_current_child(course_module, min_depth=2)
if chapter is not None:
path.append(chapter)
section = get_current_child(chapter, min_depth=1)
if section is not None:
path.append(section)
path.reverse()
return path
def _get_course_info(self, request, course):
"""
Returns the course status
"""
path = self._last_visited_module_path(request, course)
path_ids = [unicode(module.location) for module in path]
return Response({
"last_visited_module_id": path_ids[0],
"last_visited_module_path": path_ids,
})
def _update_last_visited_module_id(self, request, course, module_key, modification_date):
"""
Saves the module id if the found modification_date is less recent than the passed modification date
"""
field_data_cache = FieldDataCache.cache_for_descriptor_descendents(
course.id, request.user, course, depth=2)
try:
module_descriptor = modulestore().get_item(module_key)
except ItemNotFoundError:
return Response(errors.ERROR_INVALID_MODULE_ID, status=400)
module = get_module_for_descriptor(
request.user, request, module_descriptor, field_data_cache, course.id, course=course
)
if modification_date:
key = KeyValueStore.Key(
scope=Scope.user_state,
user_id=request.user.id,
block_scope_id=course.location,
field_name='position'
)
original_store_date = field_data_cache.last_modified(key)
if original_store_date is not None and modification_date < original_store_date:
# old modification date so skip update
return self._get_course_info(request, course)
save_positions_recursively_up(request.user, request, field_data_cache, module, course=course)
return self._get_course_info(request, course)
@mobile_course_access(depth=2)
def get(self, request, course, *args, **kwargs): # pylint: disable=unused-argument
"""
Get the ID of the module that the specified user last visited in the specified course.
"""
return self._get_course_info(request, course)
@mobile_course_access(depth=2)
def patch(self, request, course, *args, **kwargs): # pylint: disable=unused-argument
"""
Update the ID of the module that the specified user last visited in the specified course.
"""
module_id = request.data.get("last_visited_module_id")
modification_date_string = request.data.get("modification_date")
modification_date = None
if modification_date_string:
modification_date = dateparse.parse_datetime(modification_date_string)
if not modification_date or not modification_date.tzinfo:
return Response(errors.ERROR_INVALID_MODIFICATION_DATE, status=400)
if module_id:
try:
module_key = UsageKey.from_string(module_id)
except InvalidKeyError:
return Response(errors.ERROR_INVALID_MODULE_ID, status=400)
return self._update_last_visited_module_id(request, course, module_key, modification_date)
else:
# The arguments are optional, so if there's no argument just succeed
return self._get_course_info(request, course)
@mobile_view(is_user=True)
class UserCourseEnrollmentsList(generics.ListAPIView):
"""
**Use Case**
Get information about the courses that the currently signed in user is
enrolled in.
**Example Request**
GET /api/mobile/v0.5/users/{username}/course_enrollments/
**Response Values**
If the request for information about the user is successful, the
request returns an HTTP 200 "OK" response.
The HTTP 200 response has the following values.
* certificate: Information about the user's earned certificate in the
course.
* course: A collection of the following data about the course.
* courseware_access: A JSON representation with access information for the course,
including any access errors.
* course_about: The URL to the course about page.
* course_handouts: The URI to get data for course handouts.
* course_image: The path to the course image.
* course_updates: The URI to get data for course updates.
* discussion_url: The URI to access data for course discussions if
it is enabled, otherwise null.
* end: The end date of the course.
* id: The unique ID of the course.
* name: The name of the course.
* number: The course number.
* org: The organization that created the course.
* start: The date and time when the course starts.
* start_display:
If start_type is a string, then the advertised_start date for the course.
If start_type is a timestamp, then a formatted date for the start of the course.
If start_type is empty, then the value is None and it indicates that the course has not yet started.
* start_type: One of either "string", "timestamp", or "empty"
* subscription_id: A unique "clean" (alphanumeric with '_') ID of
the course.
* video_outline: The URI to get the list of all videos that the user
can access in the course.
* created: The date the course was created.
* is_active: Whether the course is currently active. Possible values
are true or false.
* mode: The type of certificate registration for this course (honor or
certified).
* url: URL to the downloadable version of the certificate, if exists.
"""
queryset = CourseEnrollment.objects.all()
serializer_class = CourseEnrollmentSerializer
lookup_field = 'username'
# In Django Rest Framework v3, there is a default pagination
# class that transmutes the response data into a dictionary
# with pagination information. The original response data (a list)
# is stored in a "results" value of the dictionary.
# For backwards compatibility with the existing API, we disable
# the default behavior by setting the pagination_class to None.
pagination_class = None
def get_queryset(self):
enrollments = self.queryset.filter(
user__username=self.kwargs['username'],
is_active=True
).order_by('created').reverse()
return [
enrollment for enrollment in enrollments
if enrollment.course_overview and
is_mobile_available_for_user(self.request.user, enrollment.course_overview)
]
@api_view(["GET"])
@mobile_view()
def my_user_info(request):
"""
Redirect to the currently-logged-in user's info page
"""
return redirect("user-detail", username=request.user.username)
|
ampax/edx-platform
|
lms/djangoapps/mobile_api/users/views.py
|
Python
|
agpl-3.0
| 11,135
|
[
"VisIt"
] |
fd70e9762efa81f90c831a4ca956d9207fdd33b117d68f30d0707cf0b9f05fb9
|
#!/usr/bin/env python -u
# Decompose each of the MAGMO spectra into Gaussian components.
#
# This program reads the previously generated spectra summaries and uses GaussPy to fit Gaussian components to
# the spectra. Comparison diagrams are produced for each spectrum.
# Author James Dempsey
# Date 5 Jan 2017
from __future__ import print_function, division
from astropy.io import fits, votable
from astropy.io.votable import parse, from_table, writeto
from astropy.table import Table, Column
from matplotlib import gridspec
from string import Template
import argparse
import datetime
import gausspy.gp as gp
import magmo
import numpy as np
import pickle
import time
import matplotlib.pyplot as plt
import aplpy
FILENAME_DATA_GAUSSPY = 'spectra.pickle'
def parseargs():
"""
Parse the command line arguments
:return: An args map with the parsed arguments
"""
parser = argparse.ArgumentParser(
description="Decompose Galactic plane HI absorption spectra into Gaussian components")
parser.add_argument("-i", "--input", help="The input spectra catalogue",
default='magmo-spectra.vot')
parser.add_argument("--long_min", help="The smallest longitude to be decomposed",
type=int, default=-180)
parser.add_argument("--long_max", help="The largest longitude to be decomposed",
type=int, default=180)
parser.add_argument("-q", "--quality", help="The minimum quality level to include",
default='B')
parser.add_argument("-o", "--output", help="The file name of the decomposition result.",
default='magmo-decomp.pickle')
parser.add_argument("--plot_only", help="Produce plots for the result of a previous decomposition", default=False,
action='store_true')
parser.add_argument("--train", help="Train GaussPy using the selected spectra. The produced alpha value can then " +
"be used for later decomposition.", default=False, action='store_true')
parser.add_argument("--alpha1", help="The value for the first GaussPy smoothing parameter",
type=float, default=1.12)
parser.add_argument("--alpha2", help="The value for the second GaussPy smoothing parameter",
type=float, default=5)
parser.add_argument("--snr_thresh", help="The signal to noise ratio threshold",
type=float, default=5)
args = parser.parse_args()
return args
def read_spectra(filename):
votable = parse(filename, pedantic=False)
results = next(resource for resource in votable.resources if
resource.type == "results")
results_array = results.tables[0].array
return results_array
def read_opacity(filename):
votable = parse(filename, pedantic=False)
results = next(resource for resource in votable.resources if
resource.type == "results")
results_array = results.tables[0].array
return results_array
def filter_spectra(spectra, min_long, max_long, min_quality):
filtered = spectra[spectra['Longitude'] >= min_long]
filtered = filtered[filtered['Longitude'] <= max_long]
num_filterd_long = len(spectra) - len(filtered)
filtered = filtered[filtered['Rating'] <= min_quality]
num_filtered_quality = len(spectra) - len(filtered) - num_filterd_long
filtered = filtered[filtered['Duplicate'] == False]
num_filtered_dupe = len(spectra) - len(filtered) - num_filterd_long - num_filtered_quality
print("Filtered spectra from {} to {}, Longitude: {} Quality: {} Duplicates: {}".format(len(spectra), len(filtered),
num_filterd_long,
num_filtered_quality,
num_filtered_dupe))
return filtered
def get_opacity_filename(day, field, source):
t = Template('day${day}/${field}_src${source}_opacity.votable.xml')
return t.substitute(day=day, field=field, source=source)
def convert_from_ratio(absorption):
"""
Convert an array of absorption values (I/I_0) to opacity values (tau).
Note that this currently clips any negative values to avoid nan in the
result.
:param absorption: The array of absorption values
:return: The equivalent tau values
"""
#return -1 * np.log(np.maximum(absorption, 1e-16))
return 1 - absorption
def convert_to_ratio(opacity):
"""
Convert an array of opacity values (tau) to absorption values (I/I_0).
:param opacity: The array of tau values
:return: The equivalent absorption ratio values
"""
#return np.exp(-1 * opacity)
return 1 - opacity
def prepare_spectra(spectra, data_filename):
data = {}
# Convert to GaussPy format
i = 0
for spectrum in spectra:
filename = get_opacity_filename(spectrum['Day'], spectrum['Field'], spectrum['Source'])
#print("Reading", filename)
opacity = read_opacity(filename)
#if spectrum['Day'] == 43 and spectrum['Rating'] == 'A' and spectrum['Field'] == '019.612-0.120': # and spectrum['Source'] == '25-0':
# print("Skipping ", spectrum['Rating'], spectrum['Day'], spectrum['Longitude'], spectrum['Field'], spectrum['Source'])
# continue
longitude = spectrum['Longitude']
if longitude < 0:
longitude += 360
# print (i, spectrum['Rating'], spectrum['Day'], longitude, spectrum['Field'], spectrum['Source'])
rms = spectrum['Continuum_SD']
errors = np.ones(opacity.shape[0]) * rms
location = np.array(spectrum['Longitude'], spectrum['Latitude'])
tau = convert_from_ratio(opacity['opacity'])
#print (tau)
#inverted_opacity = 1 - opacity['opacity']
data['data_list'] = data.get('data_list', []) + [tau]
data['x_values'] = data.get('x_values', []) + [opacity['velocity'] / 1000]
data['errors'] = data.get('errors', []) + [errors]
data['location'] = data.get('location', []) + [location]
data['spectrum_idx'] = data.get('spectrum_idx', []) + [i]
data['rating'] = data.get('rating', []) + [spectrum['Rating']]
i += 1
# Save the file to be used by GaussPy
pickle.dump(data, open(data_filename, 'w'))
def decompose(spectra, out_filename, alpha1, alpha2, snr_thresh, data_filename):
start = time.time()
print("## Commenced decomposition at %s ##" %
time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(start)))
prepare_spectra(spectra, data_filename)
end_read = time.time()
print("## Finished conversion of %d spectra at %s taking %.02f s ##" %
(len(spectra), time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(end_read)), (end_read - start)))
# Load GaussPy
g = gp.GaussianDecomposer()
# Setting AGD parameters
g.set('mode','conv')
g.set('phase', 'two' if alpha2 else 'one')
g.set('SNR_thresh', [snr_thresh, snr_thresh])
g.set('alpha1', alpha1)
#g.set('verbose', True)
if alpha2:
g.set('alpha2', alpha2)
# Run GaussPy
decomposed_data = g.batch_decomposition(data_filename)
# Save decomposition information
pickle.dump(decomposed_data, open(out_filename, 'w'))
end = time.time()
if len(decomposed_data['means_fit']) != len(spectra):
print(
'###! WARNING: Original %d and decomposed spectra %d counts differ!' % (
len(spectra), len(decomposed_data['means_fit'])))
print("## Finished decomposition of %d spectra at %s taking %.02f s ##" %
(len(spectra), time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(end)), (end - end_read)))
def output_component_catalogue(spectra, data, data_decomposed, folder):
names = []
comp_names = []
days = []
field_names = []
sources = []
longitudes = []
latitudes = []
amps = []
fwhms = []
means = []
best_fit_rchi2s = []
amps_fit_errs = []
fwhms_fit_errs = []
means_fit_errs = []
num_no_comps = {}
for i in range(len(data_decomposed['fwhms_fit'])):
if i >= len(data['spectrum_idx']):
print("Error: data index of %d is invalid for data array of len %d" % (
i, len(data['spectrum_idx'])))
spectrum_idx = data['spectrum_idx'][i]
if spectrum_idx >= len(spectra):
print("Error: spectra index of %d at row %d is invalid for spectra array of len %d" % (
spectrum_idx, i, len(spectra)))
spectrum = spectra[spectrum_idx]
fit_fwhms = data_decomposed['fwhms_fit'][i]
fit_means = data_decomposed['means_fit'][i]
fit_amps = data_decomposed['amplitudes_fit'][i]
best_fit_rchi2 = data_decomposed['best_fit_rchi2'][i]
means_fit_err = data_decomposed['means_fit_err'][i]
fwhms_fit_err = data_decomposed['fwhms_fit_err'][i]
amplitudes_fit_err = data_decomposed['amplitudes_fit_err'][i]
if len(fit_amps) > 0.:
for j in range(len(fit_amps)):
days.append(int(spectrum['Day']))
field_names.append(spectrum['Field'])
sources.append(spectrum['Source'])
longitudes.append(spectrum['Longitude'])
latitudes.append(spectrum['Latitude'])
amps.append(fit_amps[j])
fwhms.append(fit_fwhms[j])
means.append(fit_means[j])
best_fit_rchi2s.append(best_fit_rchi2[0])
amps_fit_errs.append(means_fit_err[j])
fwhms_fit_errs.append(fwhms_fit_err[j])
means_fit_errs.append(amplitudes_fit_err[j])
names.append(spectrum['Name'])
suffix = chr(ord('A') + j)
comp_names.append(spectrum['Name']+suffix)
else:
rating = spectrum['Rating']
num_no_comps[rating] = num_no_comps.get(rating, 0) + 1
print ("Unable to find components for ")
temp_table = Table(
[comp_names, names, days, field_names, sources, longitudes, latitudes, amps, fwhms, means, best_fit_rchi2s,
amps_fit_errs, fwhms_fit_errs, means_fit_errs],
names=['Comp_Name', 'Spectra_Name', 'Day', 'Field', 'Source', 'Longitude', 'Latitude', 'Amplitude', 'FWHM',
'Mean', 'Best_Fit_Rchi2', 'Amplitude_Fit_Err', 'FWHM_Fit_Err', 'Mean_Fit_Err'],
meta={'ID': 'magmo_components',
'name': 'MAGMO Components ' + str(datetime.date.today())})
votable = from_table(temp_table)
filename = "magmo-components.vot"
writeto(votable, filename)
filename = folder + "/magmo-components.vot"
writeto(votable, filename)
total_nc = 0
for rating, count in num_no_comps.items():
total_nc += count
print("Wrote out", len(fwhms), "components to", filename, "No components generated for", total_nc)
for rating in sorted(num_no_comps.keys()):
print("%s: %3d" % (rating, num_no_comps[rating]))
# , ".", num_no_comps, "spectra (of", len(spectra), ") had no components found")
def calc_residual(velo, opacity, fit_amps, fit_fwhms, fit_means):
g_sum = np.zeros(len(velo))
# Plot individual components
if len(fit_amps) > 0.:
for j in range(len(fit_amps)):
amp, fwhm, mean = fit_amps[j], fit_fwhms[j], fit_means[j]
yy = amp * np.exp(-4. * np.log(2) * (velo - mean) ** 2 / fwhm ** 2)
g_sum += yy
residual = opacity - g_sum
return residual
def output_decomposition_catalogue(folder, spectra, data, data_decomposed, alpha1, alpha2):
names = []
days = []
field_names = []
sources = []
longitudes = []
latitudes = []
cont_sd = []
residual_rms = []
ratings = []
num_comps = []
for i in range(len(data_decomposed['fwhms_fit'])):
spectrum = spectra[data['spectrum_idx'][i]]
names.append(spectrum['Name'])
days.append(int(spectrum['Day']))
field_names.append(spectrum['Field'])
sources.append(spectrum['Source'])
longitudes.append(spectrum['Longitude'])
latitudes.append(spectrum['Latitude'])
ratings.append(spectrum['Rating'])
cont_sd.append(spectrum['Continuum_SD'])
fit_fwhms = data_decomposed['fwhms_fit'][i]
fit_means = data_decomposed['means_fit'][i]
fit_amps = data_decomposed['amplitudes_fit'][i]
num_comps.append(len(fit_fwhms))
velo = data['x_values'][i]
# opacity = convert_to_ratio(data['data_list'][i])
residual = calc_residual(velo, data['data_list'][i], fit_amps, fit_fwhms, fit_means)
residual_rms.append(np.sqrt(np.mean(np.square(residual))))
temp_table = Table(
[names, days, field_names, sources, longitudes, latitudes, residual_rms, ratings, num_comps, cont_sd],
names=['Spectra_Name', 'Day', 'Field', 'Source', 'Longitude', 'Latitude', 'Residual_RMS', 'Rating', 'Num_Comp',
'Continuum_SD'],
meta={'ID': 'magmo_decomposition',
'name': 'MAGMO Decomposition ' + str(datetime.date.today()),
'alpha1' : alpha1,
'alpha2' : alpha2})
votable = from_table(temp_table)
filename = folder + "/magmo-decomposition.vot"
writeto(votable, filename)
def plot_single_spectrum(ax, velo, opacity, fit_amps, fit_fwhms, fit_means, name):
g_sum = np.zeros(len(velo))
ax.plot(velo, opacity, color='grey')
# Plot individual components
if len(fit_amps) > 0.:
for j in range(len(fit_amps)):
amp, fwhm, mean = fit_amps[j], fit_fwhms[j], fit_means[j]
yy = amp * np.exp(-4. * np.log(2) * (velo - mean) ** 2 / fwhm ** 2)
g_sum += yy
yy = convert_to_ratio(yy)
ax.plot(velo, yy, '--', lw=0.5, color='purple')
g_sum = convert_to_ratio(g_sum)
ax.plot(velo, g_sum, '-', lw=1.0, color='blue')
plt.title(name)
residual = opacity - g_sum
return residual
def find_bounds(velo, fit_fwhms, fit_means):
buff = 25 # km/s
if len(fit_fwhms) == 0:
return 0, len(velo)-1
means = np.array(fit_means)
fwhms = np.array(fit_fwhms)
lowest_vals = means - fwhms
highest_vals = means + fwhms
low_velo = np.min(lowest_vals) - buff
high_velo = np.max(highest_vals) + buff
low_idx = (np.abs(velo - low_velo)).argmin()
high_idx = (np.abs(velo - high_velo)).argmin()
return low_idx, high_idx
def plot_spectrum(velo, opacity, fit_amps, fit_fwhms, fit_means, name, filename, formats):
fig = plt.figure(figsize=(8, 6))
gs = gridspec.GridSpec(2, 1, height_ratios=[3, 1])
ax = fig.add_subplot(gs[0])
y = convert_from_ratio(opacity)
min_bound, max_bound = find_bounds(velo, fit_fwhms, fit_means)
residual = plot_single_spectrum(ax, velo[min_bound:max_bound], y[min_bound:max_bound],
fit_amps, fit_fwhms, fit_means, name)
residual_rms = np.sqrt(np.mean(np.square(residual)))
ax.set_ylabel('$e^{-\\tau}$')
# Residual plot
ax = fig.add_subplot(gs[1])
ax.plot(velo[min_bound:max_bound], residual, 'or', markerfacecolor='None', markersize=2, markeredgecolor='blue')
ax.grid()
plt.xlabel('LSR Velocity (km/s) n=%d rms=%.4f' % (len(fit_amps), residual_rms))
for fmt in formats:
plt.savefig(filename + "." + fmt, bbox_inches="tight")
plt.close()
return residual_rms
def plot_spectra(spectra, data, data_decomposed, alpha1, alpha2, folder='.'):
plot_folder = folder + "/plots"
magmo.ensure_dir_exists(plot_folder)
residuals = np.zeros(len(data_decomposed['fwhms_fit']))
for rating in 'ABCDEF':
magmo.ensure_dir_exists(plot_folder + "/" + rating)
for i in range(len(data_decomposed['fwhms_fit'])):
spectrum = spectra[data['spectrum_idx'][i]]
velo = data['x_values'][i]
opacity = data['data_list'][i]
fit_fwhms = data_decomposed['fwhms_fit'][i]
fit_means = data_decomposed['means_fit'][i]
fit_amps = data_decomposed['amplitudes_fit'][i]
rating = spectrum['Rating']
field_name = spectrum['Field']
day = str(spectrum['Day'])
src_id = spectrum['Source']
name = spectrum['Name'] + " (" + rating + ")"
filename = plot_folder + "/" + rating + "/"
filename += field_name + "_" + day + "_src" + src_id + "_fit"
residuals[i] = plot_spectrum(velo, opacity, fit_amps, fit_fwhms, fit_means, name, filename, ('pdf', 'png'))
print("Residual RMS mean {:0.4} median {:0.4} sd {:0.4} for alphas {},{}".format(np.mean(residuals), np.median(residuals),
np.std(residuals), alpha1, alpha2))
def plot_components(spectra, data_decomposed):
fits_filename = 'hi4pi-total-car.fits'
hdulist = fits.open(fits_filename, memmap=True)
image = hdulist[0].data
header = hdulist[0].header
fig = aplpy.FITSFigure(image, header)
fig.set_theme('publication')
# fig.show_colorscale(vmin=vmin, vmax=np.max(image), cmap="jet")
fig.add_colorbar()
fig.save('magmo-map-comp.pdf')
fig.close()
def get_samples(data, rating_count=[4, 1, 1], ratings='ABC'):
indexes = []
found_spectra = [0,0,0]
print (len(data['rating']))
for i in range(len(data['rating'])):
print (data['rating'][i])
key = ratings.index(data['rating'][i])
if key >= 0 and found_spectra[key] < rating_count[key]:
indexes.append(i)
found_spectra[key] += 1
return indexes
def output_decomposition(spectra, out_filename, folder, data_filename, alpha1, alpha2):
# For each spectra:
# Output plots
# Output component catalogue
max_results = 6 if len(spectra) > 6 else len(spectra)
print(max_results)
data = pickle.load(open(data_filename))
data_decomposed = pickle.load(open(out_filename))
index_values = get_samples(data)
print (index_values, len(data_decomposed['fwhms_fit']), len(spectra))
fig = plt.figure(0, [12, 12])
i = 0
for index in index_values:
ax = fig.add_subplot(4, 3, i + 1 + ((i // 3) * 3)) # , sharex=True)
#index = i # index_values[i]
x = data['x_values'][index]
#y = 1 - data['data_list'][index]
#y = convert_to_ratio(data['data_list'][index])
y = convert_from_ratio(data['data_list'][index])
fit_fwhms = data_decomposed['fwhms_fit'][index]
fit_means = data_decomposed['means_fit'][index]
fit_amps = data_decomposed['amplitudes_fit'][index]
spectrum = spectra[data['spectrum_idx'][index]]
rating = spectrum['Rating']
name = spectrum['Name'] + " (" + rating + ")"
residual = plot_single_spectrum(ax, x, y, fit_amps, fit_fwhms, fit_means, name)
residual_past_noise = np.maximum(np.absolute(residual) - spectrum['Continuum_SD'], np.zeros(residual.shape))
residual_rms = np.sqrt(np.mean(np.square(residual_past_noise)))
print (name, "has residual RMS (excluding noise) of", residual_rms)
if i % 3 == 0:
ax.set_ylabel('$1 - e^{-\\tau}$')
# Residual plot
ax = fig.add_subplot(4, 3, i + 4 + ((i // 3) * 3))
# frame2 = ax.add_axes((.1, .1, .8, .2))
ax.plot(x, residual, 'or', markerfacecolor='None', markersize=2, markeredgecolor='blue')
ax.plot(x, residual_past_noise, 'or', markerfacecolor='None', markersize=2, markeredgecolor='green')
ax.grid()
# ax.set_xlim(400, 600)
if i >= 3:
ax.set_xlabel('LSR Velocity (km/s)')
i += 1
plt.tight_layout()
magmo.ensure_dir_exists(folder)
plt.savefig(folder + "/magmo-decomp.pdf")
plt.close()
output_component_catalogue(spectra, data, data_decomposed, folder)
output_decomposition_catalogue(folder, spectra, data, data_decomposed, alpha1, alpha2)
plot_spectra(spectra, data, data_decomposed, alpha1, alpha2, folder=folder)
def main():
# Parse command line options
args = parseargs()
start = time.time()
print("#### Started processing MAGMO spectra at %s ####" %
time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(start)))
# Read in spectra
spectra = read_spectra(args.input)
spectra = filter_spectra(spectra, args.long_min, args.long_max, args.quality)
alpha1_range = (3.5, 4.36)
alpha2_range = (4.36, 9.37)
#for i in range(0,1): #len(alpha1_range)):
for i in range(len(alpha1_range)):
a1 = alpha1_range[i]
a2 = alpha2_range[i]
folder = "run"+ str(i+1)
magmo.ensure_dir_exists(folder)
data_filename = folder + "/" + FILENAME_DATA_GAUSSPY
# Decompose all spectra
if not args.plot_only:
decompose(spectra, folder+"/"+args.output, a1, a2, args.snr_thresh, data_filename)
# Read in result
output_decomposition(spectra, folder+"/"+args.output, folder, data_filename, a1, a2)
# Report
end = time.time()
print('#### Processing completed at %s ####' %
time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(end)))
print('Processed spectra in %.02f s' %
(end - start))
return 0
if __name__ == '__main__':
exit(main())
|
jd-au/magmo-HI
|
decompose.py
|
Python
|
apache-2.0
| 21,500
|
[
"Gaussian"
] |
f5a6f205b10532cfc6ce5476dfdf50bbfce6e96bc9fef9618b0a4acdf7f2a6c1
|
# pmx Copyright Notice
# ============================
#
# The pmx source code is copyrighted, but you can freely use and
# copy it as long as you don't change or remove any of the copyright
# notices.
#
# ----------------------------------------------------------------------
# pmx is Copyright (C) 2006-2013 by Daniel Seeliger
#
# All Rights Reserved
#
# Permission to use, copy, modify, distribute, and distribute modified
# versions of this software and its documentation for any purpose and
# without fee is hereby granted, provided that the above copyright
# notice appear in all copies and that both the copyright notice and
# this permission notice appear in supporting documentation, and that
# the name of Daniel Seeliger not be used in advertising or publicity
# pertaining to distribution of the software without specific, written
# prior permission.
#
# DANIEL SEELIGER DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS
# SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
# FITNESS. IN NO EVENT SHALL DANIEL SEELIGER BE LIABLE FOR ANY
# SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER
# RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF
# CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
# ----------------------------------------------------------------------
__doc__="""
Functions to read gromacs forcefield files
"""
import sys,os,re, copy
from parser import *
import cpp
from atom import Atom
from odict import *
from library import _aliases
from ffparser import *
import _pmx as _p
def get_bond_param(type1,type2,bond_lib):
for entr in bond_lib:
if (type1==entr[0] and type2==entr[1]) or \
(type2==entr[0] and type1==entr[1]):
return entr[2:]
return None
def get_angle_param(type1,type2,type3,ang_lib):
for entr in ang_lib:
if (type1 == entr[0] and \
type2 == entr[1] and \
type3 == entr[2]) or \
(type1 == entr[2] and \
type2 == entr[1] and \
type3 == entr[0]):
return entr[3:]
return None
def get_dihedral_param(type1,type2,type3,type4,dih_lib, func):
for entr in dih_lib:
if (type1 == entr[0] and \
type2 == entr[1] and \
type3 == entr[2] and \
type4 == entr[3] and func==entr[4]) or \
(type1 == entr[3] and \
type2 == entr[2] and \
type3 == entr[1] and \
type4 == entr[0] and func==entr[4]):
return entr[4:]
for entr in dih_lib:
if ('X' == entr[0] and \
type2 == entr[1] and \
type3 == entr[2] and \
type4 == entr[3] and func==entr[4]) or \
(type1 == entr[0] and \
type2 == entr[1] and \
type3 == entr[2] and \
'X' == entr[3] and func==entr[4]):
return entr[4:]
if ('X' == entr[3] and \
type2 == entr[2] and \
type3 == entr[1] and \
type4 == entr[0] and func==entr[4]) or \
(type1 == entr[3] and \
type2 == entr[2] and \
type3 == entr[1] and \
'X' == entr[0] and func==entr[4]):
return entr[4:]
for entr in dih_lib:
if ('X' == entr[0] and \
type2 == entr[1] and \
type3 == entr[2] and \
'X' == entr[3] and func==entr[4]) or \
('X' == entr[3] and \
type2 == entr[2] and \
type3 == entr[1] and \
'X' == entr[0] and func==entr[4]):
return entr[4:]
for entr in dih_lib:
if ('X' == entr[0] and \
'X' == entr[1] and \
type3 == entr[2] and \
type4 == entr[3] and func==entr[4]) or \
(type1 == entr[3] and \
type2 == entr[2] and \
'X' == entr[1] and \
'X' == entr[0] and func==entr[4]):
return entr[4:]
for entr in dih_lib:
if (typ1 == entr[0] and \
'X' == entr[1] and \
'X' == entr[2] and \
type4 == entr[3] and func==entr[4]) or \
(type1 == entr[3] and \
'X' == entr[2] and \
'X' == entr[1] and \
type4 == entr[0] and func==entr[4]):
return entr[4:]
return None
class ITPFile:
def __init__(self, fname = None, ff = 'amber99sb'):
self.name = 'MOL'
self.nrexcl = 3
self.atoms = []
self.pairs = []
self.angles = []
self.dihedrals = []
self.atomtypes = None
self.virtual_sites1 = []
self.virtual_sites2 = []
self.virtual_sites3 = []
self.virtual_sites4 = []
self.has_vsites1 = False
self.has_vsites2 = False
self.has_vsites3 = False
self.has_vsites4 = False
if fname:
self.read(fname, ff = ff)
def read(self,fname, ff = None):
if not hasattr(fname,"readlines"):
lines = open(fname).readlines()
else:
lines = fname.readlines()
lines = kickOutComments(lines,';')
lines = kickOutComments(lines,'#')
self.name, self.nrexcl = read_moleculetype(lines)
self.atoms = read_itp_atoms(lines)
self.bonds = read_itp_bonds(lines)
self.pairs = read_itp_pairs(lines)
self.angles = read_itp_angles(lines)
self.dihedrals = read_itp_dihedrals(lines)
self.atomtypes = read_atomtypes(lines,ff)
self.read_vsites2(lines)
def write(self,fname):
if not hasattr(fname,"write"):
fp = open(fname,"w")
else:
fp = fname
write_itp_moleculetype(fp,self.name,self.nrexcl)
write_itp_atoms(fp, self.atoms)
if self.bonds:
write_itp_bonds(fp, self.bonds)
if self.pairs:
write_itp_pairs(fp, self.pairs)
if self.angles:
write_itp_angles(fp, self.angles)
if self.dihedrals:
write_itp_dihedrals(fp, self.dihedrals)
if self.has_vsites2:
self.write_itp_vsites2(fp)
def write_itp_vsites2(self, fp ):
print >>fp, '[ virtual_sites2 ]'
for v in self.virtual_sites2:
print >>fp, "%8d %8d %8d %s %s" % (v[0].id, v[1].id, v[2].id, v[3], v[4])
def set_name(self, name):
self.name = name
for atom in self.atoms:
atom.resname = name
def as_rtp(self):
for i, bond in enumerate(self.bonds):
id1 = bond[0]
id2 = bond[1]
self.bonds[i][0] = self.atoms[id1-1]
self.bonds[i][1] = self.atoms[id2-1]
for i, angle in enumerate(self.angles):
id1 = angle[0]
id2 = angle[1]
id3 = angle[2]
self.angles[i][0] = self.atoms[id1-1]
self.angles[i][1] = self.atoms[id2-1]
self.angles[i][2] = self.atoms[id3-1]
for i, dih in enumerate(self.dihedrals):
id1 = dih[0]
id2 = dih[1]
id3 = dih[2]
id4 = dih[3]
self.dihedrals[i][0] = self.atoms[id1-1]
self.dihedrals[i][1] = self.atoms[id2-1]
self.dihedrals[i][2] = self.atoms[id3-1]
self.dihedrals[i][3] = self.atoms[id4-1]
for i, vs in enumerate(self.virtual_sites2):
id1 = dih[0]
id2 = dih[1]
id3 = dih[2]
self.virtual_sites2[i][0] = self.atoms[id1-1]
self.virtual_sites2[i][1] = self.atoms[id2-1]
self.virtual_sites2[i][2] = self.atoms[id3-1]
def id2atoms(self):
for i, bond in enumerate(self.bonds):
id1 = bond[0]
id2 = bond[1]
self.bonds[i][0] = self.atoms[id1-1]
self.bonds[i][1] = self.atoms[id2-1]
for i, pairs in enumerate(self.pairs):
id1 = pairs[0]
id2 = pairs[1]
self.pairs[i][0] = self.atoms[id1-1]
self.pairs[i][1] = self.atoms[id2-1]
for i, angle in enumerate(self.angles):
id1 = angle[0]
id2 = angle[1]
id3 = angle[2]
self.angles[i][0] = self.atoms[id1-1]
self.angles[i][1] = self.atoms[id2-1]
self.angles[i][2] = self.atoms[id3-1]
for i, dih in enumerate(self.dihedrals):
id1 = dih[0]
id2 = dih[1]
id3 = dih[2]
id4 = dih[3]
self.dihedrals[i][0] = self.atoms[id1-1]
self.dihedrals[i][1] = self.atoms[id2-1]
self.dihedrals[i][2] = self.atoms[id3-1]
self.dihedrals[i][3] = self.atoms[id4-1]
def write_rtp(self, filename ='mol.rtp'):
fp = open(filename,'w')
print >>fp, '[ %s ]' % self.name
print >>fp, ' [ atoms ]'
for atom in self.atoms:
print >>fp, "%8s %-12s %8.6f %5d" % \
(atom.name,atom.atomtype,atom.q,atom.cgnr)
print >>fp, '\n [ bonds ]'
for bond in self.bonds:
if len(bond)<=3:
print >>fp, "%8s %8s "% \
(bond[0].name, bond[1].name)
else:
print >>fp, "%8s %8s %8.4f %8.4f "% \
(bond[0].name, bond[1].name, bond[3], bond[4])
print >>fp, '\n [ angles ]'
for angle in self.angles:
if len(angle)<=4:
print >>fp, "%8s %8s %8s "% \
(angle[0].name, angle[1].name,angle[2].name)
elif angle[3]==5: # U-B
print >>fp, "%8s %8s %8s %8.4f %8.4f %8.4f %8.4f "% \
(angle[0].name, angle[1].name,angle[2].name,
angle[4],angle[5],angle[6],angle[7])
else:
print >>fp, "%8s %8s %8s %8.4f %8.4f "% \
(angle[0].name, angle[1].name,angle[2].name,angle[4],angle[5])
print >>fp, '\n [ dihedrals ]'
for dih in self.dihedrals:
if len(dih)<=5: # no parameters
print >>fp, "%8s %8s %8s %s "% \
(dih[0].name, dih[1].name,dih[2].name, dih[3].name)
elif dih[4]==3:
print >>fp, "%8s %8s %8s %s %8.4f %8.4f %8.4f %8.4f %8.4f %8.4f "% \
(dih[0].name, dih[1].name,dih[2].name, dih[3].name,
dih[5], dih[6], dih[7], dih[8], dih[9], dih[10])
elif (dih[4]==1) or (dih[4]==4) or (dih[4]==9):
print >>fp, "%8s %8s %8s %s %8.4f %8.4f %8.4f "% \
(dih[0].name, dih[1].name,dih[2].name, dih[3].name,
dih[5], dih[6], dih[7])
elif (dih[4]==2) or (dih[4]==11):
print >>fp, "%8s %8s %8s %s %8.4f %8.4f "% \
(dih[0].name, dih[1].name,dih[2].name, dih[3].name,
dih[5], dih[6])
# print >>fp, '\n [ impropers ]'
# for dih in self.impropers:
# if len(dih)<=5: # no parameters
# print >>fp, "%8s %8s %8s %s "% \
# (dih[0].name, dih[1].name,dih[2].name, dih[3].name)
# elif dih[4]==2:
# print >>fp, "%8s %8s %8s %s %8.4f %8.4f "% \
# (dih[0].name, dih[1].name,dih[2].name, dih[3].name, dih[5], dih[6])
# elif dih[4]==4:
# print >>fp, "%8s %8s %8s %s %8.4f %8.4f %8.4f "% \
# (dih[0].name, dih[1].name,dih[2].name, dih[3].name, dih[5], dih[6], dih[7])
def read_vsites2(self, lines):
starts = []
dih = []
for i, line in enumerate(lines):
if line.strip().startswith('[ virtual_sites2 ]'):
starts.append(i)
if starts:
self.has_vsites2 = True
for s in starts:
lst = readSection(lines[s:],'[ virtual_sites2 ]','[')
for line in lst:
entr = line.split()
idx = [int(x) for x in entr[:3]]
func = int(entr[3])
try:
rest = ' '.join(entr[4:])
except:
rest = ''
self.virtual_sites2.append([self.atoms[idx[0]-1],\
self.atoms[idx[1]-1],\
self.atoms[idx[2]-1],\
func,rest])
def read_vsites3(self, lines):
starts = []
dih = []
for i, line in enumerate(lines):
if line.strip().startswith('[ virtual_sites3 ]'):
starts.append(i)
if starts:
self.has_vsites3 = True
for s in starts:
lst = readSection(lines[s:],'[ virtual_sites3 ]','[')
for line in lst:
entr = line.split()
idx = [int(x) for x in entr[:4]]
func = int(entr[4])
try:
rest = ' '.join(entr[5:])
except:
rest = ''
self.virtual_sites3.append([self.atoms[idx[0]-1],\
self.atoms[idx[1]-1],\
self.atoms[idx[2]-1],\
self.atoms[idx[3]-1],\
func,rest])
def read_vsites4(self, lines):
starts = []
dih = []
for i, line in enumerate(lines):
if line.strip().startswith('[ virtual_sites4 ]'):
starts.append(i)
if starts:
self.has_vsites4 = True
for s in starts:
lst = readSection(lines[s:],'[ virtual_sites4 ]','[')
for line in lst:
entr = line.split()
idx = [int(x) for x in entr[:5]]
func = int(entr[5])
try:
rest = ' '.join(entr[6:])
except:
rest = ''
self.virtual_sites4.append([self.atoms[idx[0]-1],\
self.atoms[idx[1]-1],\
self.atoms[idx[2]-1],\
self.atoms[idx[3]-1],\
self.atoms[idx[4]-1],\
func,rest])
class Topology:
def __init__(self, filename = None, ff = 'amber99sb', itp=False, top = None):
self.filename = filename
self.is_itp = itp
if self.is_itp and top == None:
print "Error:You have to provide the .top file if you read a .itp"
sys.exit(1)
if top is not None:
self.topfile = top
else:
self.topfile = self.filename
self.atoms = []
self.bonds = []
self.constraints = []
self.have_constraints = False
self.pairs = []
self.angles = []
self.dihedrals = []
self.virtual_sites3 = []
self.virtual_sites4 = []
self.has_vsites3 = False
self.has_vsites4 = False
self.molecules = []
self.system = ''
self.qA = 0.
self.qB = 0.
if filename is not None:
self.read_top(filename, ff = ff)
l = cpp_parse_file(self.topfile)
l = kickOutComments(l,'#')
l = kickOutComments(l,';')
self.BondedParams = BondedParser( l )
self.NBParams = NBParser( l )
# self.types, self.bond_lib, self.ang_lib, self.dih_lib = \
# read_ff(self.topfile,ff=ff)
self.assign_fftypes()
def read_top(self, fname, ff = 'amber99sb'):
if not hasattr(fname,"readlines"):
lines = open(fname).readlines()
else:
lines = fname.readlines()
lines = kickOutComments(lines,';')
if not self.is_itp:
self.read_header(lines)
self.read_footer(lines)
lines = kickOutComments(lines,'#')
self.read_moleculetype(lines)
self.read_atoms(lines)
if not self.atoms:
self.no_itp = False
else:
self.read_bonds(lines)
self.read_constraints(lines)
self.read_pairs(lines)
self.read_angles(lines)
self.read_dihedrals(lines)
self.read_vsites3(lines)
self.read_vsites4(lines)
self.no_itp = True
if not self.is_itp:
self.read_system(lines)
self.read_molecules(lines)
def assign_forcefield_parameters(self, eval_cpp = True):
if eval_cpp:
proc = cpp.PreProcessor()
proc(self.filename)
self.cpp_dic = proc.cpp_namespace
for d in self.dihedrals:
if len(d) == 6:
if not hasattr(d[5],"append"):
if self.cpp_dic.has_key(d[5]):
d[5] = [float(x) for x in self.cpp_dic[d[5]].split()]
elif len(d) == 7:
if not hasattr(d[5],"append"):
if self.cpp_dic.has_key(d[5]):
d[5] = [float(x) for x in self.cpp_dic[d[5]].split()]
if not hasattr(d[6],"append"):
if self.cpp_dic.has_key(d[6]):
d[6] = [float(x) for x in self.cpp_dic[d[6]].split()]
self.make_bond_params()
self.make_angle_params()
self.make_dihedral_params()
def get_qA(self):
qA = 0
for atom in self.atoms:
qA+=atom.q
return round(qA,3)
def get_qB(self):
qB = 0
for atom in self.atoms:
if atom.atomtypeB is not None:
qB+=atom.qB
else:
qB+=atom.q
return round(qB,3)
def make_Bstates(self, subset = None):
if subset:
atomlist = subset
else:
atomlist = self.atoms
for atom in atomlist:
atom.atomtypeB = atom.atomtype
atom.mB = atom.m
atom.qB = atom.q
for i, b in enumerate(self.bonds):
if len(b) > 3:
if self.bonds[i][0] in atomlist or \
self.bonds[i][1] in atomlist:
param = copy.deepcopy(b[-1])
self.bonds[i].append(param)
for i, ang in enumerate(self.angles):
if len(ang) > 4:
if self.angles[i][0] in atomlist or \
self.angles[i][1] in atomlist or \
self.angles[i][2] in atomlist:
param = copy.deepcopy(ang[-1])
self.angles[i].append(param)
for i, dih in enumerate(self.dihedrals):
if len(dih) > 5:
if self.dihedrals[i][0] in atomlist or \
self.dihedrals[i][1] in atomlist or \
self.dihedrals[i][2] in atomlist or \
self.dihedrals[i][3] in atomlist:
param = copy.deepcopy(dih[-1])
self.dihedrals[i].append(param)
def set_Astate_zero(self):
for i, b in enumerate(self.bonds):
for k in range(len(self.bonds[i][-2])):
self.bonds[i][-2][k] = 0
for i, ang in enumerate(self.angles):
for k in range(len(self.angles[i][-2])):
self.angles[i][-2][k] = 0
for i, dih in enumerate(self.dihedrals):
if dih[4] == 3:
for k in range(len(self.dihedrals[i][-2])):
self.dihedrals[i][-2][k] = 0
elif dih[4] == 1:
self.dihedrals[i][-2][1] = 0
def read_molecules(self,lines):
lst = readSection(lines,'[ molecules ]','[')
self.molecules = []
for line in lst:
entr = line.split()
self.molecules.append([entr[0],int(entr[1])])
def read_system(self,lines):
lst = readSection(lines,'[ system ]','[')
self.system = lst[0].strip()
def read_atoms(self,lines):
lst = readSection(lines,'[ atoms ]','[')
self.atoms = []
for line in lst:
a = topline2atom(line)
self.atoms.append(a)
def read_bonds(self,lines):
lst = readSection(lines,'[ bonds ]','[')
self.bonds = []
for line in lst:
entries = line.split()
if len(entries) == 3:
idx = [int(x) for x in line.split()]
self.bonds.append([self.atoms[idx[0]-1], self.atoms[idx[1]-1], idx[2]])
elif len(entries) == 5:
idx = [int(x) for x in entries[:3]]
l = float(entries[3])
k = float(entries[4])
self.bonds.append([self.atoms[idx[0]-1], self.atoms[idx[1]-1], idx[2], [l,k]])
elif len(entries) == 7:
idx = [int(x) for x in entries[:3]]
lA = float(entries[3])
kA = float(entries[4])
lB = float(entries[5])
kB = float(entries[6])
self.bonds.append([self.atoms[idx[0]-1], self.atoms[idx[1]-1], idx[2], [lA,kA],[lB,kB]])
def read_pairs(self,lines):
lst = readSection(lines,'[ pairs ]','[')
self.pairs = []
for line in lst:
idx = [int(x) for x in line.split()]
self.pairs.append([self.atoms[idx[0]-1], self.atoms[idx[1]-1], idx[2]])
def read_constraints(self,lines):
lst = readSection(lines,'[ constraints ]','[')
self.constraints = []
for line in lst:
idx = [int(x) for x in line.split()]
self.constraints.append([self.atoms[idx[0]-1], self.atoms[idx[1]-1], idx[2]])
if self.constraints:
self.have_constraints = True
def read_angles(self, lines):
lst = readSection(lines,'[ angles ]','[')
angles = []
for line in lst:
entries = line.split()
if len(entries) == 4:
idx = [int(x) for x in line.split()]
self.angles.append([self.atoms[idx[0]-1], self.atoms[idx[1]-1], \
self.atoms[idx[2]-1], idx[3]])
elif len(entries) == 6:
idx = [int(x) for x in entries[:4]]
l = float(entries[4])
k = float(entries[5])
self.angles.append([self.atoms[idx[0]-1], self.atoms[idx[1]-1], \
self.atoms[idx[2]-1], idx[3], [l,k]])
elif len(entries) == 8:
idx = [int(x) for x in entries[:4]]
lA = float(entries[4])
kA = float(entries[5])
lB = float(entries[6])
kB = float(entries[7])
self.angles.append([self.atoms[idx[0]-1], self.atoms[idx[1]-1], \
self.atoms[idx[2]-1], idx[3], [lA,kA],[lB,kB]])
def read_dihedrals(self, lines):
starts = []
dih = []
for i, line in enumerate(lines):
if line.strip().startswith('[ dihedrals ]'):
starts.append(i)
for s in starts:
lst = readSection(lines[s:],'[ dihedrals ]','[')
for line in lst:
entr = line.split()
idx = [int(x) for x in entr[:4]]
func = int(entr[4])
try:
rest = ' '.join(entr[5:])
except:
rest = ''
self.dihedrals.append([self.atoms[idx[0]-1],\
self.atoms[idx[1]-1],\
self.atoms[idx[2]-1],\
self.atoms[idx[3]-1],\
func,rest])
def read_vsites3(self, lines):
starts = []
dih = []
for i, line in enumerate(lines):
if line.strip().startswith('[ virtual_sites3 ]'):
starts.append(i)
if starts:
self.has_vsites3 = True
for s in starts:
lst = readSection(lines[s:],'[ virtual_sites3 ]','[')
for line in lst:
entr = line.split()
idx = [int(x) for x in entr[:4]]
func = int(entr[4])
try:
rest = ' '.join(entr[5:])
except:
rest = ''
self.virtual_sites3.append([self.atoms[idx[0]-1],\
self.atoms[idx[1]-1],\
self.atoms[idx[2]-1],\
self.atoms[idx[3]-1],\
func,rest])
def read_vsites4(self, lines):
starts = []
dih = []
for i, line in enumerate(lines):
if line.strip().startswith('[ virtual_sites4 ]'):
starts.append(i)
if starts:
self.has_vsites4 = True
for s in starts:
lst = readSection(lines[s:],'[ virtual_sites4 ]','[')
for line in lst:
entr = line.split()
idx = [int(x) for x in entr[:5]]
func = int(entr[5])
try:
rest = ' '.join(entr[6:])
except:
rest = ''
self.virtual_sites4.append([self.atoms[idx[0]-1],\
self.atoms[idx[1]-1],\
self.atoms[idx[2]-1],\
self.atoms[idx[3]-1],\
self.atoms[idx[4]-1],\
func,rest])
def read_header(self, lines):
ret = []
for line in lines:
if not line.strip().startswith('[') and \
not line.strip().startswith('#ifdef POSRES'):
ret.append(line.rstrip())
else:
break
self.header = ret
def read_footer(self, lines):
for line in lines:
if line.strip().startswith('#ifdef POSRES'):
idx = lines.index(line)
self.footer = [l.rstrip() for l in lines[idx:]]
break
idx = self.footer.index('[ system ]')
self.footer = self.footer[:idx]
def read_moleculetype(self, lines):
l = readSection(lines,'[ moleculetype ]','[')
if l:
self.name, self.nexcl = l[0].split()[0], int(l[0].split()[1])
def set_molecule(self, molname, n):
mol_exists = False
for i, mol in enumerate(self.molecules):
if mol[0] == molname:
self.molecules[i][1] = n
mol_exists = True
if not mol_exists:
self.molecules.append([molname,n])
def del_molecule(self, molname):
if not hasattr(molname,"append"):
molname = [molname]
new = []
for m in self.molecules:
if m[0] not in molname:
new.append(m)
self.molecules = new
def write_top(self, outfile, stateBonded = 'AB', stateTypes = 'AB', stateQ = 'AB',
scale_mass = False, dummy_qA = 'on', dummy_qB = 'on', target_qB = [],
full_morphe = True):
fp = open(outfile,'w')
if not self.is_itp:
self.write_header(fp)
if self.no_itp:
self.write_moleculetype(fp)
self.write_atoms(fp, charges = stateQ, atomtypes = stateTypes, dummy_qA = dummy_qA,
dummy_qB = dummy_qB, scale_mass = scale_mass,
target_qB = target_qB, full_morphe = full_morphe)
self.write_bonds(fp, state = stateBonded)
if self.have_constraints:
self.write_constraints(fp)
self.write_pairs(fp)
self.write_angles(fp, state = stateBonded)
self.write_dihedrals(fp, state = stateBonded)
if self.has_vsites3:
self.write_vsites3(fp)
if self.has_vsites4:
self.write_vsites4(fp)
if not self.is_itp:
self.write_footer(fp)
self.write_system(fp)
self.write_molecules(fp)
fp.close()
def write_header(self,fp):
for line in self.header:
print >>fp, line
def write_footer(self,fp):
for line in self.footer:
print >>fp, line
def write_moleculetype(self, fp):
print >>fp, '[ moleculetype ]'
print >>fp, '; Name nrexcl'
print >>fp, '%s %d' % (self.name,self.nrexcl)
def __atoms_morphe( self, atoms ):
for atom in atoms:
if atom.atomtypeB is not None and (atom.q!=atom.qB or atom.m != atom.mB): return True
return False
def __atomtypes_morphe(self, atoms):
for atom in atoms:
if atom.atomtypeB is not None and atom.atomtype != atom.atomtypeB: return True
return False
def __is_perturbed_residue( self, residue ):
if self.__atoms_morphe(residue.atoms): return True
return False
def __last_perturbed_atom(self, r):
max_order = 0
last_atom = None
for atom in r.atoms:
if self.__atoms_morphe([atom]) and atom.name not in ['N','CA','C','O','H']:
if not atom.atomtype.startswith('DUM') and not atom.atomtypeB.startswith('DUM'):
last_atom = atom
if last_atom == None:
print >>sys.stderr, 'Error: Could not find a perturbed atom to put rest charges on !'
sys.exit(1)
return last_atom
def check_special_dihedrals( self ):
for d in self.dihedrals:
A, B = self.check_case( d[:4] )
if ('D' not in A and 'D' in B) or ('D' in A and 'D' not in B):
print d[0].name, d[1].name, d[2].name, d[3].name, d[4:], A, B
def write_atoms(self, fp, charges = 'AB', atomtypes = 'AB', dummy_qA = 'on',\
dummy_qB = 'on', scale_mass=True, target_qB = [], full_morphe = True):
self.qA = 0
self.qB = 0
for r in self.residues:
if self.__is_perturbed_residue(r):
target_chargeB = target_qB.pop(0)
print 'Making target charge %g for residue %s' % (round(target_chargeB,5), r.resname)
for atom in r.atoms:
if self.__atoms_morphe([atom]):
if charges == 'AB': # we move the charges from state A to state B
atom.qqA = atom.q
atom.qqB = atom.qB
if not full_morphe and (atom.q*atom.qB < 0 or atom.atomtype!=atom.atomtypeB): # we change a charge from + to - or vice versa
atom.qqB = 0
atom.to_be_morphed = True
else:
atom.qqB = atom.qB
elif charges == 'AA': # we keep the charges
atom.qqA = atom.q
atom.qqB = atom.q
elif charges == 'BB': # take charges of state B
if not full_morphe:
if hasattr(atom,"contQ"):
atom.qqA = atom.contQ
atom.qqB = atom.qqA
if hasattr(atom,"to_be_morphed"): # this a big q morphe. has been set to zero before
if atomtypes == 'BB':
atom.qqA = 0
atom.qqB = atom.qB
elif atomtypes == 'AB':
atom.qqA = 0
atom.qqB = 0
elif not hasattr(atom,"contQ") and not hasattr(atom,"to_be_morphed") :
atom.qqA = atom.qB
atom.qqB = atom.qB
else:
atom.qqA = atom.qB
atom.qqB = atom.qB
if atom.atomtype.startswith('DUM') or atom.atomtypeB.startswith('DUM'):
if dummy_qA == 'off':
atom.qqA = 0.
if dummy_qB == 'off':
atom.qqB = 0.
else:
atom.qqA = atom.q
atom.qqB = atom.q
qA_tot = sum(map(lambda a: a.qqA, r.atoms))
qB_tot = sum(map(lambda a: a.qqB, r.atoms))
if qB_tot != target_chargeB:
print 'State B has total charge of %g' % round(qB_tot,5)
print 'Applying charge correction to ensure integer charges'
latom = self.__last_perturbed_atom(r)
print 'Selecting atom %d-%s (%s) as perturbed atom with highest order' % (latom.id,latom.name, latom.resname)
newqB = latom.qqB-(qB_tot-target_chargeB)
print 'Changing chargeB of atom %s from %g to %g' % (latom.name, latom.qqB,newqB)
latom.qqB = newqB
qB_tot = sum(map(lambda a: a.qqB, r.atoms))
print 'New total charge of B-state is %g' % round(qB_tot,5)
else:
print 'No corrections applied to ensure integer charges'
print >>fp,'\n [ atoms ]'
print >>fp, '; nr type resnr residue atom cgnr charge mass typeB chargeB massB'
al = self.atoms
for atom in al:
if self.__atoms_morphe([atom]):
if atomtypes == 'AB':
atA = atom.atomtype
atB = atom.atomtypeB
mA = atom.m
mB = atom.mB
elif atomtypes == 'AA':
atA = atom.atomtype
atB = atom.atomtype
mA = atom.m
mB = atom.m
elif atomtypes == 'BB':
atA = atom.atomtypeB
atB = atom.atomtypeB
mA = atom.mB
mB = atom.mB
if scale_mass:
if atA.startswith('DUM'):
mA = 1.
if atB.startswith('DUM'):
mB = 1.
if hasattr(atom,"qqB"):
qqB = atom.qqB
if hasattr(atom,"contQ") and not full_morphe:
qqA = atom.contQ
else:
qqA = atom.qqA
else:
qqA = atom.q
qqB = atom.qB
print >>fp , '%6d%11s%7d%7s%7s%7d%11.6f%11.4f%11s%11.6f%11.4f' % \
(atom.id, atA, atom.resnr, atom.resname, atom.name, \
atom.cgnr, qqA, mA, atB, qqB, mB)
self.qA+=qqA
self.qB+=qqB
else:
print >>fp , '%6d%11s%7d%7s%7s%7d%11.6f%11.4f' % \
(atom.id, atom.atomtype, atom.resnr, atom.resname, atom.name, \
atom.cgnr, atom.q, atom.m)
self.qA+=atom.q
self.qB+=atom.q
# write qB of latom to qA
if not full_morphe:
try:
latom.contQ = latom.qqB
except:
pass
## def write_atoms(self,fp):
## print >>fp, '[ atoms ]'
## print >>fp,'; nr type resnr residue atom cgnr charge mass typeB chargeB'
## for atom in self.atoms:
## if atom.atomtypeB is not None:
## print >>fp , '%6d%11s%7d%7s%7s%7d%11.6f%11.4f%11s%11.6f%11.4f' % \
## (atom.id, atom.atomtype, atom.resnr, atom.resname, atom.name, \
## atom.cgnr, atom.q, atom.m, atom.atomtypeB, atom.qB, atom.mB)
## else:
## print >>fp , '%6d%11s%7d%7s%7s%7d%11.6f%11.4f' % \
## (atom.id, atom.atomtype, atom.resnr, atom.resname, atom.name, \
## atom.cgnr, atom.q, atom.m)
def write_bonds(self,fp, state = 'AB'):
print >>fp,'\n [ bonds ]'
print >>fp, '; ai aj funct c0 c1 c2 c3'
for b in self.bonds:
if len(b) == 3:
print >>fp, '%6d %6d %6d' % (b[0].id, b[1].id, b[2])
elif len(b) == 4:
s = ' '+' '.join([str(x) for x in b[3]])
print >>fp, '%6d %6d %6d %s' % (b[0].id, b[1].id, b[2], s)
else:
lA = b[3][1]
kA = b[3][2]
lB = b[4][1]
kB = b[4][2]
if state == 'AB':
print >>fp, '%6d %6d %6d %14.6f %14.6f %14.6f %14.6f' % \
(b[0].id, b[1].id, b[2],lA,kA, lB, kB)
elif state == 'AA':
print >>fp, '%6d %6d %6d %14.6f %14.6f %14.6f %14.6f' % \
(b[0].id, b[1].id, b[2],lA, kA, lA, kA)
elif state == 'BB':
print >>fp, '%6d %6d %6d %14.6f %14.6f %14.6f %14.6f' % \
(b[0].id, b[1].id, b[2],lB, kB, lB, kB)
def write_pairs(self, fp):
# CHECK HOW THIS GOES WITH B-STATES
print >>fp,'\n [ pairs ]'
print >>fp, '; ai aj funct c0 c1 c2 c3'
for p in self.pairs:
print >>fp, '%6d %6d %6d' % (p[0].id, p[1].id, p[2])
def write_constraints(self, fp):
# CHECK HOW THIS GOES WITH B-STATES
print >>fp,'\n [ constraints ]'
print >>fp, '; ai aj funct c0 c1 c2 c3'
for p in self.constraints:
print >>fp, '%6d %6d %6d' % (p[0].id, p[1].id, p[2])
def write_angles(self,fp, state='AB'):
print >>fp,'\n [ angles ]'
print >>fp, '; ai aj ak funct c0 c1 c2 c3'
for ang in self.angles:
if len(ang) == 4:
print >>fp, '%6d %6d %6d %6d' % (ang[0].id, ang[1].id, ang[2].id,ang[3])
else:
if state == 'AB':
print >>fp, '%6d %6d %6d %6d %14.6f %14.6f %14.6f %14.6f ; %s %s %s' % \
(ang[0].id, ang[1].id, ang[2].id,ang[3], ang[4][1], \
ang[4][2], ang[5][1], ang[5][2], ang[0].name, ang[1].name, ang[2].name)
elif state == 'AA':
print >>fp, '%6d %6d %6d %6d %14.6f %14.6f %14.6f %14.6f ; %s %s %s' % \
(ang[0].id, ang[1].id, ang[2].id,ang[3], ang[4][1], \
ang[4][2], ang[4][1], ang[4][2], ang[0].name, ang[1].name, ang[2].name)
elif state == 'BB':
print >>fp, '%6d %6d %6d %6d %14.6f %14.6f %14.6f %14.6f ; %s %s %s' % \
(ang[0].id, ang[1].id, ang[2].id,ang[3], ang[5][1], \
ang[5][2], ang[5][1], ang[5][2], ang[0].name, ang[1].name, ang[2].name)
def write_dihedrals(self, fp, state='AB'):
print >>fp,'\n [ dihedrals ]'
print >>fp,'; ai aj ak al funct c0 c1 c2 c3 c4 c5'
for d in self.dihedrals:
if len(d) == 5:
print >>fp, "%6d %6d %6d %6d %4d" % ( d[0].id, d[1].id, d[2].id, d[3].id, d[4])
elif len(d) == 6:
print >>fp, "%6d %6d %6d %6d %4d %s" % ( d[0].id, d[1].id, d[2].id, d[3].id, d[4], d[5])
elif len(d) == 7:
A, B = self.check_case(d[:4])
ast = d[5]
bs = d[6]
if ast == None or bs == None:
print d[0].name, d[1].name, d[2].name, d[3].name, d[0].atomtype, d[1].atomtype, d[2].atomtype, d[3].atomtype, d[0].atomtypeB, d[1].atomtypeB, d[2].atomtypeB, d[3].atomtypeB
print d[0].type, d[1].type, d[2].type, d[3].type, d[0].typeB, d[1].typeB, d[2].typeB, d[3].typeB
if ast == 'NULL':
if d[4] == 3: # Ryckaert-Bellemans
ast = ' '.join(["%g" % x for x in [0,0,0,0,0,0]])
elif d[4] == 1:
ast = ' '.join(["%g" % x for x in [0,0,0]])
elif ast != 'NULL' and hasattr(ast,"append"):
ast = ' '.join(["%g" % x for x in d[5][1:]])
if bs == 'NULL':
if d[4] == 3:
bs = ' '.join(["%g" % x for x in [0,0,0,0,0,0]])
elif d[4] == 1:
bs = ' '.join(["%g" % x for x in [0,0,0]])
elif bs !='NULL' and hasattr(bs,"append"):
bs = ' '.join(["%g" % x for x in d[6][1:]])
if state == 'AB':
print >>fp, "%6d %6d %6d %6d %4d %s %s ; %s %s %s %s %s %s %s %s (%s->%s)" % \
( d[0].id, d[1].id, d[2].id, d[3].id, d[4], ast, bs, d[0].name,d[1].name,d[2].name,d[3].name, \
d[0].type,d[1].type,d[2].type,d[3].type,A,B)
elif state == 'AA':
print >>fp, "%6d %6d %6d %6d %4d %s %s ; %s %s %s %s %s %s %s %s (%s->%s)" % \
( d[0].id, d[1].id, d[2].id, d[3].id, d[4], ast, ast, d[0].name,d[1].name,d[2].name,d[3].name, \
d[0].type,d[1].type,d[2].type,d[3].type, A,B)
elif state == 'BB':
print >>fp, "%6d %6d %6d %6d %4d %s %s ; %s %s %s %s %s %s %s %s (%s->%s)" % \
( d[0].id, d[1].id, d[2].id, d[3].id, d[4], bs, bs, d[0].name,d[1].name,d[2].name,d[3].name, \
d[0].type,d[1].type,d[2].type,d[3].type, A,B)
def check_case(self, atoms):
A = ''
B = ''
for a in atoms:
if a.atomtype.startswith('DUM'): A += 'D'
else: A += 'A'
if a.atomtypeB is not None:
if a.atomtypeB.startswith('DUM'): B += 'D'
else: B += 'A'
else: B += 'A'
return A, B
def write_vsites3(self, fp):
print >>fp,'\n [ virtual_sites3 ]'
print >>fp,'; ai aj ak al funct c0 c1'
for vs in self.virtual_sites3:
if len(vs) == 6:
print >>fp, "%6d %6d %6d %6d %4d" % ( vs[0].id, vs[1].id, vs[2].id, vs[3].id, vs[4])
else:
sys.stderr.write('EEK! Something went wrong while writing virtual_sites3!!!!\n')
print vs
sys.exit(1)
def write_vsites4(self, fp):
print >>fp,'\n [ virtual_sites4 ]'
print >>fp,'; ai aj ak al am funct c0 c1 c2'
for vs in self.virtual_sites4:
if len(vs) == 7:
print >>fp, "%6d %6d %6d %6d %6d %4d" % ( vs[0].id, vs[1].id, vs[2].id, vs[3].id, vs[4].id, vs[5])
else:
sys.stderr.write('EEK! Something went wrong while writing virtual_sites4!!!!\n')
print vs
sys.exit(1)
def write_system(self,fp):
print >>fp, '[ system ]'
print >>fp, self.system
def write_molecules(self,fp):
print >>fp, '[ molecules ]'
for mol, num in self.molecules:
print >>fp, "%s %d" % (mol,num)
def assign_fftypes(self):
for atom in self.atoms:
atom.type = self.NBParams.atomtypes[atom.atomtype]['bond_type']
if atom.atomtypeB is not None:
atom.typeB = self.NBParams.atomtypes[atom.atomtypeB]['bond_type']
else:
atom.typeB = atom.type
def make_bond_params(self):
for i, (at1,at2,func) in enumerate(self.bonds):
param = get_bond_param(at1.type,at2.type,self.bond_lib)
if param is None:
print 'Error! No bonded parameters found! (%s-%s)' % \
(at1.type, at2.type)
sys.exit(1)
self.bonds[i].append(param[1:])
def make_angle_params(self):
for i, (at1, at2, at3, func) in enumerate(self.angles):
param = get_angle_param(at1.type, at2.type, at3.type, self.ang_lib)
if param is None:
print 'Error! No angle parameters found! (%s-%s-%s)' % \
(at1.type, at2.type, at3.type)
sys.exit(1)
self.angles[i].append(param[1:])
def make_dihedral_params(self):
for i, d in enumerate(self.dihedrals):
if d[5]!='': # we have a prefefined dihedral
continue
else:
at1, at2, at3, at4, func, dih = d
param = get_dihedral_param(at1.type, at2.type, \
at3.type, at4.type, \
self.dih_lib, func)
if param is None:
print 'Error! No dihedral parameters found! (%s-%s-%s-%s)' % \
(at1.type, at2.type, at3.type, at4.type)
print func, dih
sys.exit(1)
del self.dihedrals[i][-1]
self.dihedrals[i].append(param[1:])
def cpp_parse_file(fn,cpp_defs=[],cpp_path=[os.environ.get('GMXLIB')] ):
defs = []
incs = []
for d in cpp_defs:
defs.append('-D%s' % d)
for i in cpp_path:
incs.append('-I%s' % i)
cmd = 'cpp -traditional %s %s %s ' % (' '.join(defs),' '.join(incs),fn)
return os.popen(cmd,'r').readlines()
def read_atp(fn):
l = open(fn).readlines()
l = kickOutComments(l,';')
l = parseList('sf',l)
dic = {}
for type,mass in l:
dic[type] = mass
return dic
def read_atomtypes(l, ff= 'amber99sb'):
lst = readSection(l,'[ atomtypes ]','[')
if ff == 'oplsaa':
try:
lst = parseList('ssiffsff',lst)
except:
lst = parseList('sffsff',lst)
elif ff in ['amber03','amber99sb']:
lst = parseList('ssffsff',lst)
elif ('gaff' in ff) or ('cgenff' in ff):
lst = parseList('sffsff',lst)
dic = {}
for line in lst:
dic[line[0]]=line[1:]
return dic
def read_bondtypes(l):
res = []
starts = []
for i, line in enumerate(l):
if line.strip().startswith('[ bondtypes ]'):
starts.append(i)
for s in starts:
lst = readSection(l[s:],'[ bondtypes ]','[')
lst = parseList('ssiff',lst)
res.extend(lst)
return res
def read_angletypes(l):
res = []
starts = []
for i, line in enumerate(l):
if line.strip().startswith('[ angletypes ]'):
starts.append(i)
for s in starts:
lst = readSection(l[s:],'[ angletypes ]','[')
lst = parseList('sssiff',lst)
res.extend(lst)
return res
def read_dihedraltypes(l):
res = []
starts = []
for i, line in enumerate(l):
if line.strip().startswith('[ dihedraltypes ]'):
starts.append(i)
for s in starts:
lst = readSection(l[s:],'[ dihedraltypes ]','[')
try:
lst = parseList('ssssiffffff',lst)
except:
try:
lst = parseList('ssssiffi',lst)
except:
lst = parseList('ssiffi',lst)
res.extend(lst)
return res
def read_ff(fn,ff='amber99sb',cpp_defs = [], cpp_path = [os.environ.get('GMXLIB')]):
l = cpp_parse_file(fn,cpp_defs,cpp_path)
l = kickOutComments(l,'#')
l = kickOutComments(l,';')
atomtypes = read_atomtypes(l,ff=ff)
bt = read_bondtypes(l)
at = read_angletypes(l)
dt = read_dihedraltypes(l)
return (atomtypes,bt,at,dt)
def __get_rtp_resnames( lines ):
keys = []
for line in lines:
if line.strip().startswith('['):
if line.strip()[1:-1].strip() not in \
['atoms','bonds','dihedrals','impropers','bondedtypes']:
keys.append( line.strip()[1:-1].strip() )
return keys
def __get_rtp_entry( key, lines ):
r = []
for line in lines:
if line.strip()[1:-1].strip() == key:
idx = lines.index( line )
for line in lines[idx+1:]:
if line.strip().startswith('['):
if line.strip()[1:-1].strip() not in \
['atoms','bonds','dihedrals','impropers']:
break
else:
r.append(line)
else:
r.append(line)
return r
def __read_rtp_atoms(resname, lines ):
atoms = []
for line in lines:
entr = line.split()
if _aliases.has_key( resname ) :
if _aliases[resname].has_key(entr[0]):
entr[0] = _aliases[resname][entr[0]]
entr[2] = float(entr[2])
entr[3] = int(entr[3])
atoms.append(entr)
return atoms
def __read_rtp_bonds( resname, lines ):
bonds = []
for line in lines:
entr = line.split()
if entr[0] not in ['+','-']:
if _aliases.has_key( resname ) :
if _aliases[resname].has_key(entr[0]):
entr[0] = _aliases[resname][entr[0]]
if entr[1] not in ['+','-']:
if _aliases.has_key( resname ) :
if _aliases[resname].has_key(entr[1]):
entr[1] = _aliases[resname][entr[1]]
bonds.append(entr)
return bonds
def __read_rtp_dihedrals( resname, lines ):
diheds = []
for line in lines:
entr = line.split()
if entr[0] not in ['+','-']:
if _aliases.has_key( resname ) :
if _aliases[resname].has_key(entr[0]):
entr[0] = _aliases[resname][entr[0]]
if entr[1] not in ['+','-']:
if _aliases.has_key( resname ) :
if _aliases[resname].has_key(entr[1]):
entr[1] = _aliases[resname][entr[1]]
if entr[2] not in ['+','-']:
if _aliases.has_key( resname ) :
if _aliases[resname].has_key(entr[2]):
entr[2] = _aliases[resname][entr[2]]
if entr[3] not in ['+','-']:
if _aliases.has_key( resname ) :
if _aliases[resname].has_key(entr[3]):
entr[3] = _aliases[resname][entr[3]]
if len(entr) == 5:
diheds.append(entr)
else:
diheds.append(entr+[''])
return diheds
def __read_rtp_impropers( resname, lines ):
improps = []
for line in lines:
entr = line.split()
if entr[0] not in ['+','-']:
if _aliases.has_key( resname ) :
if _aliases[resname].has_key(entr[0]):
entr[0] = _aliases[resname][entr[0]]
if entr[1] not in ['+','-']:
if _aliases.has_key( resname ) :
if _aliases[resname].has_key(entr[1]):
entr[1] = _aliases[resname][entr[1]]
if entr[2] not in ['+','-']:
if _aliases.has_key( resname ) :
if _aliases[resname].has_key(entr[2]):
entr[2] = _aliases[resname][entr[2]]
if entr[3] not in ['+','-']:
if _aliases.has_key( resname ) :
if _aliases[resname].has_key(entr[3]):
entr[3] = _aliases[resname][entr[3]]
if len(entr) == 5:
improps.append(entr)
else:
improps.append(entr+[''])
return improps
def read_rtp( filename ):
rtp_entries = {}
l = open(filename,'r').readlines()
lines = kickOutComments(l,';')
keys = __get_rtp_resnames(lines)
for key in keys:
sys.stderr.write('%s -> %4s\r' % (filename, key))
rtp_lines = __get_rtp_entry( key, lines)
# read atoms
al = readSection(rtp_lines,'[ atoms ]','[')
atoms = __read_rtp_atoms(key, al )
# read bonds
bl = readSection(rtp_lines,'[ bonds ]','[')
bonds = __read_rtp_bonds( key, bl )
# read dihedrals
dl = readSection(rtp_lines,'[ dihedrals ]','[')
diheds = __read_rtp_dihedrals(key, dl)
# read impropers
il = readSection(rtp_lines,'[ impropers ]','[')
improps = __read_rtp_impropers(key, il)
rtp_entries[key] = {
'atoms': atoms,
'bonds': bonds,
'diheds': diheds,
'improps': improps
}
sys.stderr.write('\ndone...\n' )
return rtp_entries
def get_rtp_entry(key, filename = 'ffamber99sb.rtp'):
l = open(filename,'r').readlines()
lines = kickOutComments(l,';')
r = []
idx = 0
for line in lines:
if line.strip()[1:-1].strip() == key:
idx = lines.index(line)
break
for line in lines[idx+1:]:
if line.strip().startswith('['):
if line.strip()[1:-1].strip() not in \
['atoms','bonds','dihedrals','impropers']:
break
else:
r.append(line)
else:
r.append(line)
# read atoms
atoms = []
al = readSection(r,'[ atoms ]','[')
for line in al:
entr = line.split()
entr[2] = float(entr[2])
entr[3] = int(entr[3])
atoms.append(entr)
# read bonds
bonds = []
bl = readSection(r,'[ bonds ]','[')
for line in bl:
entr = line.split()
bonds.append(entr)
# read dihedrals
diheds = []
dl = readSection(r,'[ dihedrals ]','[')
for line in dl:
entr = line.split()
if len(entr) == 5:
diheds.append(entr)
else:
diheds.append(entr+[''])
# read impropers
improps = []
il = readSection(r,'[ impropers ]','[')
for line in il:
entr = line.split()
if len(entr)==5:
improps.append(entr)
else:
improps.append(entr+[''])
return atoms, bonds, diheds, improps
def topline2atom(line):
entr = line.split()
idx = int(entr[0])
atomtype = entr[1]
resnr = int(entr[2])
resname = entr[3]
name = entr[4]
cgnr = int(entr[5])
q = float(entr[6])
m = float(entr[7])
try:
atomtypeB = entr[8]
qB = float(entr[9])
mB = float(entr[10])
except:
atomtypeB = None
qB = None
mB = None
a = Atom(id=idx,atomtype=atomtype,\
resnr = resnr, resname = resname,\
name = name, cgnr = cgnr, q = q, \
m = m, atomtypeB = atomtypeB, \
qB = qB, mB = mB)
return a
def read_itp_atoms(lines):
lst = readSection(lines,'[ atoms ]','[')
al = []
for line in lst:
a = topline2atom(line)
al.append(a)
return al
def write_itp_atoms(fp,al):
print >>fp, '[ atoms ]'
print >>fp,'; nr type resnr residue atom cgnr charge mass typeB chargeB'
for atom in al:
if atom.atomtypeB is not None:
print >>fp , '%6d %11s %7d%7s%7s%7d%11.6f%11.4f %11s%11.6f%11.4f' % \
(atom.id, atom.atomtype, atom.resnr, atom.resname, atom.name, \
atom.cgnr, atom.q, atom.m, atom.atomtypeB, atom.qB, atom.mB)
else:
print >>fp , '%6d %11s %7d%7s%7s%7d%11.6f%11.4f' % \
(atom.id, atom.atomtype, atom.resnr, atom.resname, atom.name, \
atom.cgnr, atom.q, atom.m)
def write_itp_bonds(fp,bonds):
print >>fp, '[ bonds ]'
for b in bonds:
if isinstance(b[0],Atom):
tmp = [b[0].id, b[1].id]+b[2:]
else:
tmp = b
s = [str(item) for item in tmp]
out = ''
for item in s:
if len(item) > 6:
form="%%%ds " % len(item)
out += form % item
elif len(item) > 0:
out += "%6s " % item
print >>fp, out
def write_itp_pairs(fp,pairs):
print >>fp, '[ pairs ]'
for p in pairs:
if isinstance(p[0],Atom):
tmp = [p[0].id, p[1].id]+p[2:]
else:
tmp = p
s = [str(item) for item in tmp]
out = ''
for item in s:
if len(item) > 6:
form="%%%ds " % len(item)
out += form % item
elif len(item) > 0:
out += "%6s " % item
print >>fp, out
def write_itp_angles(fp,angles):
print >>fp, '[ angles ]'
for a in angles:
if isinstance(a[0],Atom):
tmp = [a[0].id, a[1].id, a[2].id]+a[3:]
else:
tmp = a
s = [str(item) for item in tmp]
out = ''
for item in s:
if len(item) > 6:
form="%%%ds " % len(item)
out += form % item
elif len(item) > 0:
out += "%6s " % item
print >>fp, out
def write_itp_dihedrals(fp,dihedrals):
print >>fp, '[ dihedrals ]'
for d in dihedrals:
if isinstance(d[0],Atom):
tmp = [d[0].id, d[1].id, d[2].id, d[3].id]+d[4:]
else:
tmp = d
s = [str(item) for item in tmp]
out = ''
for item in s:
if len(item) > 6:
form="%%%ds " % len(item)
out += form % item
elif len(item) > 0:
out += "%6s " % item
print >>fp, out
def write_itp_moleculetype(fp,name,nrexcl):
print >>fp, '[ moleculetype ]'
print >>fp, '; Name nrexcl'
print >>fp, '%s %d' % (name,nrexcl)
def read_itp_bonds(lines):
lst = readSection(lines,'[ bonds ]','[')
bonds = []
for line in lst:
entr = line.split()
b0 = int(entr[0])
b1 = int(entr[1])
bt = int(entr[2])
if len(entr) > 3:
params = [float(x) for x in entr[3:]]
else:
params = []
bonds.append([b0,b1,bt]+params)
return bonds
def read_itp_pairs(lines):
lst = readSection(lines,'[ pairs ]','[')
pairs = []
for line in lst:
entr = line.split()
b0 = int(entr[0])
b1 = int(entr[1])
bt = int(entr[2])
pairs.append([b0,b1,bt])
return pairs
def read_itp_angles(lines):
lst = readSection(lines,'[ angles ]','[')
angles = []
for line in lst:
entr = line.split()
b0 = int(entr[0])
b1 = int(entr[1])
b2 = int(entr[2])
bt = int(entr[3])
if len(entr) > 4:
params = [float(x) for x in entr[4:]]
else:
params = []
angles.append([b0,b1,b2,bt]+params)
return angles
def read_itp_dihedrals(lines):
starts = []
dihedrals = []
for i, line in enumerate(lines):
if line.strip().startswith('[ dihedrals ]'):
starts.append(i)
for s in starts:
lst = readSection(lines[s:],'[ dihedrals ]','[')
for line in lst:
entr = line.split()
b0 = int(entr[0])
b1 = int(entr[1])
b2 = int(entr[2])
b3 = int(entr[3])
bt = int(entr[4])
if len(entr) > 5:
try:
params = [float(x) for x in entr[5:]]
except:
params = entr[5:] # if we have defines
else:
params = []
dihedrals.append([b0,b1,b2,b3,bt]+params)
return dihedrals
def read_moleculetype(lines):
l = readSection(lines,'[ moleculetype ]','[')
if l:
return l[0].split()[0], int(l[0].split()[1])
else:
return None, None
def read_gaff_top(fname):
""" this function reads topology files from gaff """
lines = open(fname).readlines()
lines = kickOutComments(lines,';')
itp = ITPFile(fname, ff = 'amber03')
# atypes = read_atomtypes(lines, ff = 'amber03')
# itp.atomtypes = atypes
return itp
class MDPError(Exception):
def __init__(self, s):
self.s = s
def __str__(self):
return repr(self.s)
class MDP:
def __init__(self, fn = None):
self.parameters = OrderedDict([
['include' ,''],
['define' ,''],
['integrator' , 'md'],
['tinit' , 0],
['dt' , 0.002],
['nsteps' , 25000],
['simulation_part' , 1],
['init_step' , 0],
['comm-mode' , 'Linear'],
['nstcomm' , 100],
['nstcalcenergy' , 100],
['nstdhdl' , 100],
['comm-grps' ,''],
['bd-fric' , 0],
['ld-seed' , 1993],
['emtol' , 100],
['emstep' , 0.01],
['niter' , 0],
['fcstep' , 0],
['nstcgsteep' , 1000],
['nbfgscorr' , 10],
['rtpi' , 0.05],
['nstxout' , 10000],
['nstvout' , 10000],
['nstfout' , 0],
['nstlog' , 1000],
['nstenergy' , 100],
['nstxtcout' , 100],
['xtc-precision' , 1000],
['xtc-grps' ,''],
['energygrps' , ''],
['nstlist' , 10],
['ns-type' , 'Grid'],
['pbc' , 'xyz'],
['periodic_molecules' , 'no'],
['rlist' , 1.2],
['cutoff-scheme' , 'verlet'],
['coulombtype' , 'PME'],
['rcoulomb-switch' , 0],
['rcoulomb' , 1.1],
['epsilon-r' , 1],
['epsilon_rf' , 1],
['vdw-modifier' , 'Potential-switch'],
['rvdw-switch' , 1],
['rvdw' , 1.1],
['DispCorr' , 'EnerPres'],
['table-extension' , 1],
['energygrp_table' ,''],
['fourierspacing' , 0.12],
['fourier_nx' , 0],
['fourier_ny' , 0],
['fourier_nz' , 0],
['pme_order' , 4],
['ewald_rtol' , 1e-05],
['ewald_geometry' , '3d'],
['epsilon_surface' , 0],
['optimize_fft' , 'no'],
['implicit_solvent' , 'No'],
# ['gb_algorithm' , 'Still'],
# ['nstgbradii' , 1],
# ['rgbradii' , 2],
# ['gb_epsilon_solvent' , 80],
# ['gb_saltconc' , 0],
# ['gb_obc_alpha' , 1],
# ['gb_obc_beta' , 0.8],
# ['gb_obc_gamma' , 4.85],
# ['sa_surface_tension' , 2.092],
['tcoupl' , 'v-rescale'],
['tc-grps' , ['System']],
['tau-t' , [0.1]],
['ref-t' , [298]],
['Pcoupl' , 'Parrinello-Rahman'],
['Pcoupltype' , 'Isotropic'],
['tau-p' , 5],
['compressibility' , 4.6E-5],
['ref-p' , 1],
['refcoord_scaling' , 'all'],
['andersen_seed' , 815131],
['QMMM' , 'no'],
# ['QMMM-grps' ,''],
# ['QMmethod' ,''],
# ['QMMMscheme' , 'normal'],
# ['QMbasis' ,''],
# ['QMcharge' ,''],
# ['QMmult' ,''],
['annealing' , ['no']],
['annealing_npoints' , [2]],
['annealing_time' , [0, 50]],
['annealing_temp' , [0, 298]],
['gen-vel' , 'no'],
['gen-temp' , 298],
['gen-seed' , 173529],
['constraints' , 'all-bonds'],
['constraint-algorithm' , 'Lincs'],
# ['continuation' , 'yes'],
['Shake-SOR' , 'no'],
['shake-tol' , 1e-04],
['lincs-order' , 4],
['lincs-iter' , 1],
['lincs-warnangle' , 30],
['morse' , 'no'],
['energygrp_excl' ,''],
['nwall' , 0],
['wall_type' , '9-3'],
['wall_r_linpot' , -1],
['wall_atomtype' ,''],
['wall_density' ,''],
['wall_ewald_zfac' , 3],
['pull' , 'no'],
['disre' , 'No'],
['disre-weighting' , 'Equal'],
['disre-mixed' , 'no'],
['disre-fc' , 1000],
['disre-tau' , 0],
['nstdisreout' , 100],
['orire' , 'no'],
['orire-fc' , 0],
['orire-tau' , 0],
['orire-fitgrp' ,''],
['nstorireout' , 100],
['dihre' , 'No'],
['dihre-fc' , 1000],
['free-energy' , 'no'],
['init-lambda' , 0],
['delta-lambda' , 0],
['sc-alpha' , 0.3],
['sc-power' , 1],
['sc-sigma' , 0.25],
['sc-coul' , 'yes'],
['couple-moltype' ,''],
['couple-lambda0' , 'vdw-q'],
['couple-lambda1' , 'vdw-q'],
['couple-intramol' , 'no'],
['acc-grps' ,''],
['accelerate' ,''],
['freezegrps' ,''],
['freezedim' ,''],
['cos-acceleration' , 0],
['deform' ,''],
['E-x' ,''],
['E-xt' ,''],
['E-y' ,''],
['E-yt' ,''],
['E-z' ,''],
['E-zt' ,''],
['user1-grps' ,''],
['user2-grps' ,''],
['userint1' , 0],
['userint2' , 0],
['userint3' , 0],
['userint4' , 0],
['userreal1' , 0],
['userreal2' , 0],
['userreal3' , 0],
['userreal4' , 0]
])
if fn:
self.read(fn)
def __str__(self):
line = ''
for key, val in self.parameters.items():
if hasattr(val,"append"):
s = ''
for x in val:
s+=str(x)+' '
else:
s = str(val)
line+="%-25s = %s\n" % (key, s)
return line
def __setitem__(self,item,value):
if not self.parameters.has_key(item):
raise MDPError, "No such option %s" % item
self.parameters[item] = value
def __getitem__(self, item):
return self.parameters[item]
def write(self, fp = None):
if fp is None:
fp = sys.stdout
else:
if not hasattr(fp,"write"):
fp = open(fp,"w")
print >>fp, self
def read(self, filename):
lines = open(filename).readlines()
l = kickOutComments(lines,';')
for line in l:
entr = line.split('=')
key = entr[0].strip()
val = entr[1].strip().split()
if not self.parameters.has_key(key):
self.parameters[key] = val
# print 'Warning! Ignoring entry \'%s\'' % key
else:
if len(val) == 0:
self[key] = ''
elif len(val) == 1:
self[key] = val[0]
else:
self[key] = val
return self
def make_amber_residue_names(model):
cysl = model.fetch_residues('CYS') # get a list with all cysteines
# we do a simple check. If a HG is there it's CYS, else it's CYS2
for res in cysl:
hg = res.fetch_atoms('HG') # select HG atom from residue
sg1 = res.fetch_atoms('SG')[0]
if not hg: # no hydrogen
ss_bond = False
for r in cysl:
if r!=res:
sg2 = r.fetch_atoms('SG')[0]
d = sg1 - sg2
if d < 2.5:
ss_bond = True
break
if ss_bond:
# terminal cys2 is ccyx
rr = 'CYS2'
res.set_resname(rr)
else:
res.set_resname('CYM')
else:
res.set_resname('CYN')
lysl = model.fetch_residues('LYS')
for res in lysl:
at = res.fetch('HZ3')
at2 = res.fetch('HZ2')
if at or not at2:
res.set_resname('LYP')
# histidine
hisl = model.fetch_residues('HIS')
for res in hisl:
bHE2 = False
bHD1 = False
he2 = res.fetch('HE2')
if he2: bHE2 = True
hd1 = res.fetch('HD1')
if hd1: bHD1 = True
if hd1 and he2:
res.set_resname('HIP')
elif hd1 and not he2:
res.set_resname('HID')
elif he2 and not hd1:
res.set_resname('HIE')
else:
res.set_resname('HID')
aspl = model.fetch_residues('ASP')
for res in aspl:
bHD2 = False
hd2 = res.fetch('HD2')
if hd2:
res.set_resname('ASH')
glul = model.fetch_residues('GLU')
for res in glul:
bHD2 = False
hd2 = res.fetch('HE2')
if hd2:
res.set_resname('GLH')
for chain in model.chains:
if chain.residues[0].is_protein_residue():
first = chain.nterminus()
last = chain.cterminus()
first.set_resname('N'+first.resname) # rename e.g. ALA to NALA
if last.resname == 'CYS2':
last.set_resname('CCYX') # rename e.g. ARG to CARG
else:
last.set_resname('C'+last.resname) # rename e.g. ARG to CARG
try:
o1,o2 = last.fetchm(['O1','O2'])
o1.name = 'OC1'
o2.name = 'OC2'
except:
try:
o1,o2 = last.fetchm(['O','OXT'])
o1.name = 'OC1'
o2.name = 'OC2'
except:
print >>sys.stderr, 'pymacs_Warning_> No terminal oxygen atoms found in chain %s' % chain.id
def assign_ffamber99sb_params(m):
m.get_symbol()
m.rename_atoms()
for c in m.chains:
c.make_residue_tree()
make_amber_residue_names( m)
rtp = RTPParser('ffamber99sb.rtp')
rtp.assign_params(m)
bo = BondedParser('ffamber99sbbon.itp')
nb = NBParser('ffamber99sbnb.itp')
nb.assign_params( m )
bo.assign_params( m )
rtp.assign_dihedral_params( m, bo.directives )
def bond_energy(m):
return _p.total_bond_energy(m.bond_list)
def angle_energy(m):
return _p.total_angle_energy(m.angle_list)
def dihedral_energy(m):
return _p.total_dihedral_energy(m.dihedral_list)
def improper_energy(m):
return _p.total_improper_energy(m.improper_list)
def coul14_energy(m):
return _p.coul14_energy( m.atoms )
def lj14_energy( m ):
return _p.lj14_energy( m.atoms )
def nb_lj_energy( m ):
return _p.nb_lj_energy( m.atoms )
def nb_coul_energy( m ):
return _p.nb_coul_energy( m.atoms )
def nb_energy( m ):
return _p.nb_energy( m.atoms )
def energy(m):
bond_ene = bond_energy( m )
angle_ene = angle_energy( m )
dihedral_ene = dihedral_energy( m )
improper_ene = improper_energy ( m )
lj14_ene = lj14_energy( m )
coul14_ene = coul14_energy( m )
nb_ene = nb_energy( m )
## print 'bonds = ', bond_ene
## print 'angles = ',angle_ene
## print 'dihedrals = ',dihedral_ene
## print 'impropers = ',improper_ene
## print 'nb = ',nb_ene
## print 'lj14 = ',lj14_ene
## print 'coul14 = ',coul14_ene
return bond_ene + angle_ene + dihedral_ene + improper_ene + nb_ene + lj14_ene + coul14_ene
|
dseeliger/pmx
|
pmx/forcefield.py
|
Python
|
lgpl-3.0
| 73,752
|
[
"Gromacs"
] |
d630e4de961c701794c32c1afdb143768e23dc54367092817f5c091ac1a22e9f
|
# -*- coding: utf-8 -*-
# vi:si:et:sw=4:sts=4:ts=4
##
## Copyright (C) 2005-2007 Async Open Source <http://www.async.com.br>
## All rights reserved
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU Lesser General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU Lesser General Public License for more details.
##
## You should have received a copy of the GNU Lesser General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., or visit: http://www.gnu.org/.
##
## Author(s): Stoq Team <stoq-devel@async.com.br>
##
""" Base classes to manage services informations """
# pylint: enable=E1101
from storm.expr import Join, LeftJoin
from storm.references import Reference
from zope.interface import implementer
from stoqlib.database.properties import IdCol
from stoqlib.database.viewable import Viewable
from stoqlib.domain.base import Domain
from stoqlib.domain.events import (ServiceCreateEvent, ServiceEditEvent,
ServiceRemoveEvent)
from stoqlib.domain.interfaces import IDescribable
from stoqlib.domain.sellable import (Sellable,
SellableUnit, SellableCategory)
from stoqlib.lib.parameters import sysparam
from stoqlib.lib.translation import stoqlib_gettext
_ = stoqlib_gettext
#
# Base Domain Classes
#
@implementer(IDescribable)
class Service(Domain):
"""Class responsible to store basic service informations."""
__storm_table__ = 'service'
sellable_id = IdCol()
#: The |sellable| for this service
sellable = Reference(sellable_id, 'Sellable.id')
def remove(self):
"""Removes this service from the database."""
self.store.remove(self)
def close(self):
# We don't have to do anything special when closing a service.
pass
#
# Sellable helpers
#
def can_remove(self):
if sysparam.compare_object('DELIVERY_SERVICE', self):
# The delivery item cannot be deleted as it's important
# for creating deliveries.
return False
return super(Service, self).can_remove()
def can_close(self):
# The delivery item cannot be closed as it will be
# used for deliveries.
return not sysparam.compare_object('DELIVERY_SERVICE', self)
#
# IDescribable implementation
#
def get_description(self):
return self.sellable.get_description()
#
# Domain hooks
#
def on_create(self):
ServiceCreateEvent.emit(self)
def on_delete(self):
ServiceRemoveEvent.emit(self)
def on_update(self):
store = self.store
emitted_store_list = getattr(self, u'_emitted_store_list', set())
# Since other classes can propagate this event (like Sellable),
# emit the event only once for each store.
if not store in emitted_store_list:
ServiceEditEvent.emit(self)
emitted_store_list.add(store)
self._emitted_store_list = emitted_store_list
#
# Views
#
class ServiceView(Viewable):
"""Stores information about services
:attribute id: the id of the asellable table
:attribute barcode: the sellable barcode
:attribute status: the sellable status
:attribute cost: the sellable cost
:attribute price: the sellable price
:attribute description: the sellable description
:attribute unit: the unit in case the sellable is not a product
:attribute service_id: the id of the service table
"""
sellable = Sellable
id = Sellable.id
code = Sellable.code
barcode = Sellable.barcode
status = Sellable.status
cost = Sellable.cost
price = Sellable.base_price
description = Sellable.description
category_description = SellableCategory.description
unit = SellableUnit.description
service_id = Service.id
tables = [
Sellable,
Join(Service, Service.sellable_id == Sellable.id),
LeftJoin(SellableUnit, Sellable.unit_id == SellableUnit.id),
LeftJoin(SellableCategory, SellableCategory.id == Sellable.category_id),
]
def get_unit(self):
return self.unit or u""
|
andrebellafronte/stoq
|
stoqlib/domain/service.py
|
Python
|
gpl-2.0
| 4,520
|
[
"VisIt"
] |
74416c0ee7ec1359b64f4a0a32f43b516aa1536e9c4c3e58f67c9ebf413e5490
|
##############################################################################
# MDTraj: A Python Library for Loading, Saving, and Manipulating
# Molecular Dynamics Trajectories.
# Copyright 2012-2013 Stanford University and the Authors
#
# Authors: Christopher M. Bruns
# Contributors: Peter Eastman, Robert McGibbon
#
# MDTraj is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 2.1
# of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with MDTraj. If not, see <http://www.gnu.org/licenses/>.
#
# Portions of this code originate from the OpenMM molecular simulation toolkit,
# copyright (c) 2012 Stanford University, Christopher M. Bruns and Peter Eastman,
# and are distributed under the following terms:
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS, CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
# USE OR OTHER DEALINGS IN THE SOFTWARE.
##############################################################################
from __future__ import print_function, division
import sys
import warnings
import numpy as np
from mdtraj.core import element
class PdbStructure(object):
"""
PdbStructure object holds a parsed Protein Data Bank format file.
Examples:
Load a pdb structure from a file:
> pdb = PdbStructure(open("1ARJ.pdb"))
Fetch the first atom of the structure:
> print(pdb.iter_atoms().next())
ATOM 1 O5' G N 17 13.768 -8.431 11.865 1.00 0.00 O
Loop over all of the atoms of the structure
> for atom in pdb.iter_atoms():
> print(atom)
ATOM 1 O5' G N 17 13.768 -8.431 11.865 1.00 0.00 O
...
Get a list of all atoms in the structure:
> atoms = list(pdb.iter_atoms())
also:
residues = list(pdb.iter_residues())
positions = list(pdb.iter_positions())
chains = list(pdb.iter_chains())
models = list(pdb.iter_models())
Fetch atomic coordinates of first atom:
> print(pdb.iter_positions().next())
[13.768, -8.431, 11.865]
or
> print(pdb.iter_atoms().next().position)
[13.768, -8.431, 11.865]
Strip the length units from an atomic position:
> pos = pdb.iter_positions().next()
> print(pos)
[13.768, -8.431, 11.865]
> print(pos)
[13.768, -8.431, 11.865]
> print(pos)
[1.3768, -0.8431, 1.1865]
The hierarchical structure of the parsed PDB structure is as follows:
PdbStructure
Model
Chain
Residue
Atom
Location
Model - A PDB structure consists of one or more Models. Each model corresponds to one version of
an NMR structure, or to one frame of a molecular dynamics trajectory.
Chain - A Model contains one or more Chains. Each chain corresponds to one molecule, although multiple
water molecules are frequently included in the same chain.
Residue - A Chain contains one or more Residues. One Residue corresponds to one of the repeating
unit that constitutes a polymer such as protein or DNA. For non-polymeric molecules, one Residue
represents one molecule.
Atom - A Residue contains one or more Atoms. Atoms are chemical atoms.
Location - An atom can sometimes have more that one position, due to static disorder in X-ray
crystal structures. To see all of the atom positions, use the atom.iter_positions() method,
or pass the parameter "include_alt_loc=True" to one of the other iter_positions() methods.
> for pos in pdb.iter_positions(include_alt_loc=True):
> ...
Will loop over all atom positions, including multiple alternate locations for atoms that have
multiple positions. The default value of include_alt_loc is False for the iter_positions()
methods.
"""
def __init__(self, input_stream, load_all_models=True):
"""Create a PDB model from a PDB file stream.
Parameters:
- self (PdbStructure) The new object that is created.
- input_stream (stream) An input file stream, probably created with
open().
- load_all_models (bool) Whether to load every model of an NMR
structure or trajectory, or just load the first model, to save memory.
"""
# initialize models
self.load_all_models = load_all_models
self.models = []
self._current_model = None
self.default_model = None
self.models_by_number = {}
self._unit_cell_lengths = None
self._unit_cell_angles = None
# read file
self._load(input_stream)
def _load(self, input_stream):
state = None
self._reset_atom_numbers()
self._reset_residue_numbers()
# Read one line at a time
for pdb_line in input_stream:
# Look for atoms
if (pdb_line.find("ATOM ") == 0) or (pdb_line.find("HETATM") == 0):
if state == 'NEW_MODEL':
new_number = self._current_model.number + 1
self._add_model(Model(new_number))
state = None
self._add_atom(Atom(pdb_line, self))
# Notice MODEL punctuation, for the next level of detail
# in the structure->model->chain->residue->atom->position hierarchy
elif (pdb_line.find("MODEL") == 0):
#model_number = int(pdb_line[10:14])
if self._current_model is None:
new_number = 0
else:
new_number = self._current_model.number + 1
self._add_model(Model(new_number))
self._reset_atom_numbers()
self._reset_residue_numbers()
state = None
elif (pdb_line.find("ENDMDL") == 0):
self._current_model._finalize()
if self.load_all_models:
state = 'NEW_MODEL'
else:
break
elif (pdb_line.find("END") == 0):
self._current_model._finalize()
if self.load_all_models:
state = 'NEW_MODEL'
else:
break
elif (pdb_line.find("TER") == 0 and pdb_line.split()[0] == "TER"):
self._current_model._current_chain._add_ter_record()
self._reset_residue_numbers()
elif (pdb_line.find("CRYST1") == 0):
self._unit_cell_lengths = (float(pdb_line[6:15]), float(pdb_line[15:24]), float(pdb_line[24:33]))
self._unit_cell_angles = (float(pdb_line[33:40]), float(pdb_line[40:47]), float(pdb_line[47:54]))
elif (pdb_line.find("CONECT") == 0):
atoms = [int(pdb_line[6:11])]
for pos in (11,16,21,26):
try:
atoms.append(int(pdb_line[pos:pos+5]))
except:
pass
self._current_model.connects.append(atoms)
self._finalize()
def _reset_atom_numbers(self):
self._atom_numbers_are_hex = False
self._next_atom_number = 1
def _reset_residue_numbers(self):
self._residue_numbers_are_hex = False
self._next_residue_number = 1
def write(self, output_stream=sys.stdout):
"""Write out structure in PDB format"""
for model in self.models:
if len(model.chains) == 0:
continue
if len(self.models) > 1:
print("MODEL %4d" % model.number, file=output_stream)
model.write(output_stream)
if len(self.models) > 1:
print("ENDMDL", file=output_stream)
print("END", file=output_stream)
def _add_model(self, model):
if self.default_model == None:
self.default_model = model
self.models.append(model)
self._current_model = model
if model.number not in self.models_by_number:
self.models_by_number[model.number] = model
def get_model(self, model_number):
return self.models_by_number[model_number]
def model_numbers(self):
return list(self.models_by_number.keys())
def __contains__(self, model_number):
return self.models_by_number.__contains__(model_number)
def __getitem__(self, model_number):
return self.models_by_number[model_number]
def __iter__(self):
for model in self.models:
yield model
def iter_models(self, use_all_models=False):
if use_all_models:
for model in self:
yield model
elif len(self.models) > 0:
yield self.models[0]
def iter_chains(self, use_all_models=False):
for model in self.iter_models(use_all_models):
for chain in model.iter_chains():
yield chain
def iter_residues(self, use_all_models=False):
for model in self.iter_models(use_all_models):
for res in model.iter_residues():
yield res
def iter_atoms(self, use_all_models=False):
for model in self.iter_models(use_all_models):
for atom in model.iter_atoms():
yield atom
def iter_positions(self, use_all_models=False, include_alt_loc=False):
"""
Iterate over atomic positions.
Parameters
- use_all_models (bool=False) Get positions from all models or just the first one.
- include_alt_loc (bool=False) Get all positions for each atom, or just the first one.
"""
for model in self.iter_models(use_all_models):
for loc in model.iter_positions(include_alt_loc):
yield loc
def __len__(self):
return len(self.models)
def _add_atom(self, atom):
"""
"""
if self._current_model == None:
self._add_model(Model(0))
atom.model_number = self._current_model.number
# Atom might be alternate position for existing atom
self._current_model._add_atom(atom)
def _finalize(self):
"""Establish first and last residues, atoms, etc."""
for model in self.models:
model._finalize()
def get_unit_cell_lengths(self):
"""Get the lengths of the crystallographic unit cell (may be None)."""
return self._unit_cell_lengths
def get_unit_cell_angles(self):
"""Get the angles of the crystallographic unit cell (may be None)."""
return self._unit_cell_angles
class Model(object):
"""Model holds one model of a PDB structure.
NMR structures usually have multiple models. This represents one
of them.
"""
def __init__(self, model_number=1):
self.number = model_number
self.chains = []
self._current_chain = None
self.chains_by_id = {}
self.connects = []
def _add_atom(self, atom):
"""
"""
if len(self.chains) == 0:
self._add_chain(Chain(atom.chain_id))
# Create a new chain if the chain id has changed
if self._current_chain.chain_id != atom.chain_id:
self._add_chain(Chain(atom.chain_id))
# Create a new chain after TER record, even if ID is the same
elif self._current_chain.has_ter_record:
self._add_chain(Chain(atom.chain_id))
self._current_chain._add_atom(atom)
def _add_chain(self, chain):
self.chains.append(chain)
self._current_chain = chain
if not chain.chain_id in self.chains_by_id:
self.chains_by_id[chain.chain_id] = chain
def get_chain(self, chain_id):
return self.chains_by_id[chain_id]
def chain_ids(self):
return list(self.chains_by_id.keys())
def __contains__(self, chain_id):
return self.chains_by_id.__contains__(chain_id)
def __getitem__(self, chain_id):
return self.chains_by_id[chain_id]
def __iter__(self):
return iter(self.chains)
def iter_chains(self):
for chain in self:
yield chain
def iter_residues(self):
for chain in self:
for res in chain.iter_residues():
yield res
def iter_atoms(self):
for chain in self:
for atom in chain.iter_atoms():
yield atom
def iter_positions(self, include_alt_loc=False):
for chain in self:
for loc in chain.iter_positions(include_alt_loc):
yield loc
def __len__(self):
return len(self.chains)
def write(self, output_stream=sys.stdout):
# Start atom serial numbers at 1
sn = Model.AtomSerialNumber(1)
for chain in self.chains:
chain.write(sn, output_stream)
def _finalize(self):
for chain in self.chains:
chain._finalize()
class AtomSerialNumber(object):
"""pdb.Model inner class for pass-by-reference incrementable serial number"""
def __init__(self, val):
self.val = val
def increment(self):
self.val += 1
class Chain(object):
def __init__(self, chain_id=' '):
self.chain_id = chain_id
self.residues = []
self.has_ter_record = False
self._current_residue = None
self.residues_by_num_icode = {}
self.residues_by_number = {}
def _add_atom(self, atom):
"""
"""
# Create a residue if none have been created
if len(self.residues) == 0:
self._add_residue(Residue(atom.residue_name_with_spaces, atom.residue_number, atom.insertion_code, atom.alternate_location_indicator))
# Create a residue if the residue information has changed
elif self._current_residue.number != atom.residue_number:
self._add_residue(Residue(atom.residue_name_with_spaces, atom.residue_number, atom.insertion_code, atom.alternate_location_indicator))
elif self._current_residue.insertion_code != atom.insertion_code:
self._add_residue(Residue(atom.residue_name_with_spaces, atom.residue_number, atom.insertion_code, atom.alternate_location_indicator))
elif self._current_residue.name_with_spaces == atom.residue_name_with_spaces:
# This is a normal case: number, name, and iCode have not changed
pass
elif atom.alternate_location_indicator != ' ':
# OK - this is a point mutation, Residue._add_atom will know what to do
pass
else: # Residue name does not match
# Only residue name does not match
warnings.warn("WARNING: two consecutive residues with same number (%s, %s)" % (atom, self._current_residue.atoms[-1]))
self._add_residue(Residue(atom.residue_name_with_spaces, atom.residue_number, atom.insertion_code, atom.alternate_location_indicator))
self._current_residue._add_atom(atom)
def _add_residue(self, residue):
if len(self.residues) == 0:
residue.is_first_in_chain = True
self.residues.append(residue)
self._current_residue = residue
key = str(residue.number) + residue.insertion_code
# only store the first residue with a particular key
if key not in self.residues_by_num_icode:
self.residues_by_num_icode[key] = residue
if residue.number not in self.residues_by_number:
self.residues_by_number[residue.number] = residue
def write(self, next_serial_number, output_stream=sys.stdout):
for residue in self.residues:
residue.write(next_serial_number, output_stream)
if self.has_ter_record:
r = self.residues[-1]
print("TER %5d %3s %1s%4d%1s" % (next_serial_number.val, r.name_with_spaces, self.chain_id, r.number, r.insertion_code), file=output_stream)
next_serial_number.increment()
def _add_ter_record(self):
self.has_ter_record = True
self._finalize()
def get_residue(self, residue_number, insertion_code=' '):
return self.residues_by_num_icode[str(residue_number) + insertion_code]
def __contains__(self, residue_number):
return self.residues_by_number.__contains__(residue_number)
def __getitem__(self, residue_number):
"""Returns the FIRST residue in this chain with a particular residue number"""
return self.residues_by_number[residue_number]
def __iter__(self):
for res in self.residues:
yield res
def iter_residues(self):
for res in self:
yield res
def iter_atoms(self):
for res in self:
for atom in res:
yield atom;
def iter_positions(self, include_alt_loc=False):
for res in self:
for loc in res.iter_positions(include_alt_loc):
yield loc
def __len__(self):
return len(self.residues)
def _finalize(self):
self.residues[0].is_first_in_chain = True
self.residues[-1].is_final_in_chain = True
for residue in self.residues:
residue._finalize()
class Residue(object):
def __init__(self, name, number, insertion_code=' ', primary_alternate_location_indicator=' '):
alt_loc = primary_alternate_location_indicator
self.primary_location_id = alt_loc
self.locations = {}
self.locations[alt_loc] = Residue.Location(alt_loc, name)
self.name_with_spaces = name
self.number = number
self.insertion_code = insertion_code
self.atoms = []
self.atoms_by_name = {}
self.is_first_in_chain = False
self.is_final_in_chain = False
self._current_atom = None
def _add_atom(self, atom):
"""
"""
alt_loc = atom.alternate_location_indicator
if not alt_loc in self.locations:
self.locations[alt_loc] = Residue.Location(alt_loc, atom.residue_name_with_spaces)
assert atom.residue_number == self.number
assert atom.insertion_code == self.insertion_code
# Check whether this is an existing atom with another position
if (atom.name_with_spaces in self.atoms_by_name):
old_atom = self.atoms_by_name[atom.name_with_spaces]
# Unless this is a duplicated atom (warn about file error)
if atom.alternate_location_indicator in old_atom.locations:
pass # TJL COMMENTED OUT
#warnings.warn("WARNING: duplicate atom (%s, %s)" % (atom, old_atom._pdb_string(old_atom.serial_number, atom.alternate_location_indicator)))
else:
for alt_loc, position in atom.locations.items():
old_atom.locations[alt_loc] = position
return # no new atom added
# actually use new atom
self.atoms_by_name[atom.name] = atom
self.atoms_by_name[atom.name_with_spaces] = atom
self.atoms.append(atom)
self._current_atom = atom
def write(self, next_serial_number, output_stream=sys.stdout, alt_loc = "*"):
for atom in self.atoms:
atom.write(next_serial_number, output_stream, alt_loc)
def _finalize(self):
if len(self.atoms) > 0:
self.atoms[0].is_first_atom_in_chain = self.is_first_in_chain
self.atoms[-1].is_final_atom_in_chain = self.is_final_in_chain
for atom in self.atoms:
atom.is_first_residue_in_chain = self.is_first_in_chain
atom.is_final_residue_in_chain = self.is_final_in_chain
def set_name_with_spaces(self, name, alt_loc=None):
# Gromacs ffamber PDB files can have 4-character residue names
# assert len(name) == 3
if alt_loc == None:
alt_loc = self.primary_location_id
loc = self.locations[alt_loc]
loc.name_with_spaces = name
loc.name = name.strip()
def get_name_with_spaces(self, alt_loc=None):
if alt_loc == None:
alt_loc = self.primary_location_id
loc = self.locations[alt_loc]
return loc.name_with_spaces
name_with_spaces = property(get_name_with_spaces, set_name_with_spaces, doc='four-character residue name including spaces')
def get_name(self, alt_loc=None):
if alt_loc == None:
alt_loc = self.primary_location_id
loc = self.locations[alt_loc]
return loc.name
name = property(get_name, doc='residue name')
def get_atom(self, atom_name):
return self.atoms_by_name[atom_name]
def __contains__(self, atom_name):
return self.atoms_by_name.__contains__(atom_name)
def __getitem__(self, atom_name):
"""Returns the FIRST atom in this residue with a particular atom name"""
return self.atoms_by_name[atom_name]
def __iter__(self):
"Iterator over atoms"
for atom in self.iter_atoms():
yield atom
# Three possibilities: primary alt_loc, certain alt_loc, or all alt_locs
def iter_atoms(self, alt_loc=None):
if alt_loc == None:
locs = [self.primary_location_id]
elif alt_loc == "":
locs = [self.primary_location_id]
elif alt_loc == "*":
locs = None
else:
locs = list(alt_loc)
# If an atom has any location in alt_loc, emit the atom
for atom in self.atoms:
use_atom = False # start pessimistic
for loc2 in atom.locations.keys():
if locs == None: # means all locations
use_atom = True
elif loc2 in locs:
use_atom = True
if use_atom:
yield atom
def iter_positions(self, include_alt_loc=False):
"""Returns one position per atom, even if an individual atom has multiple positions.
"""
for atom in self:
if include_alt_loc:
for loc in atom.iter_positions():
yield loc
else:
yield atom.position
def __len__(self):
return len(self.atoms)
# Residues can have multiple locations, based on alt_loc indicator
class Location:
"""
Inner class of residue to allow different residue names for different alternate_locations.
"""
def __init__(self, alternate_location_indicator, residue_name_with_spaces):
self.alternate_location_indicator = alternate_location_indicator
self.residue_name_with_spaces = residue_name_with_spaces
class Atom(object):
"""Atom represents one atom in a PDB structure.
"""
def __init__(self, pdb_line, pdbstructure=None):
"""Create a new pdb.Atom from an ATOM or HETATM line.
Example line:
ATOM 2209 CB TYR A 299 6.167 22.607 20.046 1.00 8.12 C
00000000011111111112222222222333333333344444444445555555555666666666677777777778
12345678901234567890123456789012345678901234567890123456789012345678901234567890
ATOM line format description from
http://deposit.rcsb.org/adit/docs/pdb_atom_format.html:
COLUMNS DATA TYPE CONTENTS
--------------------------------------------------------------------------------
1 - 6 Record name "ATOM "
7 - 11 Integer Atom serial number.
13 - 16 Atom Atom name.
17 Character Alternate location indicator.
18 - 20 Residue name Residue name.
22 Character Chain identifier.
23 - 26 Integer Residue sequence number.
27 AChar Code for insertion of residues.
31 - 38 Real(8.3) Orthogonal coordinates for X in Angstroms.
39 - 46 Real(8.3) Orthogonal coordinates for Y in Angstroms.
47 - 54 Real(8.3) Orthogonal coordinates for Z in Angstroms.
55 - 60 Real(6.2) Occupancy (Default = 1.0).
61 - 66 Real(6.2) Temperature factor (Default = 0.0).
73 - 76 LString(4) Segment identifier, left-justified.
77 - 78 LString(2) Element symbol, right-justified.
79 - 80 LString(2) Charge on the atom.
"""
# We might modify first/final status during _finalize() methods
self.is_first_atom_in_chain = False
self.is_final_atom_in_chain = False
self.is_first_residue_in_chain = False
self.is_final_residue_in_chain = False
# Start parsing fields from pdb line
self.record_name = pdb_line[0:6].strip()
# VMD sometimes uses hex for atoms greater than 9,999
if pdbstructure is not None and pdbstructure._atom_numbers_are_hex:
self.serial_number = int(pdb_line[6:11], 16)
else:
try:
self.serial_number = int(pdb_line[6:11])
except:
try:
self.serial_number = int(pdb_line[6:11], 16)
pdbstructure._atom_numbers_are_hex = True
except:
# Just give it the next number in sequence.
self.serial_number = pdbstructure._next_atom_number
self.name_with_spaces = pdb_line[12:16]
alternate_location_indicator = pdb_line[16]
self.residue_name_with_spaces = pdb_line[17:20]
# In some MD codes, notably ffamber in gromacs, residue name has a fourth character in
# column 21
possible_fourth_character = pdb_line[20:21]
if possible_fourth_character != " ":
# Fourth character should only be there if official 3 are already full
if len(self.residue_name_with_spaces.strip()) != 3:
raise ValueError('Misaligned residue name: %s' % pdb_line)
self.residue_name_with_spaces += possible_fourth_character
self.residue_name = self.residue_name_with_spaces.strip()
self.chain_id = pdb_line[21]
if pdbstructure is not None and pdbstructure._residue_numbers_are_hex:
self.residue_number = int(pdb_line[22:26], 16)
else:
try:
self.residue_number = int(pdb_line[22:26])
except:
try:
self.residue_number = int(pdb_line[22:26], 16)
pdbstructure._residue_numbers_are_hex = True
except:
# When VMD runs out of hex values it starts filling in the residue ID field with ****
# Look at the most recent atoms to figure out whether this is a new residue or not.
if pdbstructure._current_model is None or pdbstructure._current_model._current_chain is None or pdbstructure._current_model._current_chain._current_residue is None:
# This is the first residue in the model.
self.residue_number = pdbstructure._next_residue_number
else:
currentRes = pdbstructure._current_model._current_chain._current_residue
if currentRes.name_with_spaces != self.residue_name_with_spaces:
# The residue name has changed.
self.residue_number = pdbstructure._next_residue_number
elif self.name_with_spaces in currentRes.atoms_by_name:
# There is already an atom with this name.
self.residue_number = pdbstructure._next_residue_number
else:
self.residue_number = currentRes.number
self.insertion_code = pdb_line[26]
# coordinates, occupancy, and temperature factor belong in Atom.Location object
x = float(pdb_line[30:38])
y = float(pdb_line[38:46])
z = float(pdb_line[46:54])
try:
occupancy = float(pdb_line[54:60])
except:
occupancy = 1.0
try:
temperature_factor = float(pdb_line[60:66])
except:
temperature_factor = 0.0
self.locations = {}
loc = Atom.Location(alternate_location_indicator, np.array([x,y,z]), occupancy, temperature_factor, self.residue_name_with_spaces)
self.locations[alternate_location_indicator] = loc
self.default_location_id = alternate_location_indicator
# segment id, element_symbol, and formal_charge are not always present
self.segment_id = pdb_line[72:76].strip()
self.element_symbol = pdb_line[76:78].strip()
try: self.formal_charge = int(pdb_line[78:80])
except ValueError: self.formal_charge = None
# figure out atom element
try:
# First try to find a sensible element symbol from columns 76-77
self.element = element.get_by_symbol(self.element_symbol)
except KeyError:
# otherwise, deduce element from first two characters of atom name
# remove digits found in some hydrogen atom names
symbol = self.name_with_spaces[0:2].strip().lstrip("0123456789")
try:
# Some molecular dynamics PDB files, such as gromacs with ffamber force
# field, include 4-character hydrogen atom names beginning with "H".
# Hopefully elements like holmium (Ho) and mercury (Hg) will have fewer than four
# characters in the atom name. This problem is the fault of molecular
# dynamics code authors who feel the need to make up their own atom
# nomenclature because it is too tedious to read that provided by the PDB.
# These are the same folks who invent their own meanings for biochemical terms
# like "dipeptide". Clowntards.
if len(self.name) == 4 and self.name[0:1] == "H":
self.element = element.hydrogen
else:
self.element = element.get_by_symbol(symbol)
except KeyError:
# OK, I give up
self.element = None
if pdbstructure is not None:
pdbstructure._next_atom_number = self.serial_number+1
pdbstructure._next_residue_number = self.residue_number+1
def iter_locations(self):
"""
Iterate over Atom.Location objects for this atom, including primary location.
"""
for alt_loc in self.locations:
yield self.locations[alt_loc]
def iter_positions(self):
"""
Iterate over atomic positions. Returns Quantity(Vec3(), unit) objects, unlike
iter_locations, which returns Atom.Location objects.
"""
for loc in self.iter_locations():
yield loc.position
def iter_coordinates(self):
"""
Iterate over x, y, z values of primary atom position.
"""
for coord in self.position:
yield coord
# Hide existence of multiple alternate locations to avoid scaring casual users
def get_location(self, location_id=None):
id = location_id
if (id == None):
id = self.default_location_id
return self.locations[id]
def set_location(self, new_location, location_id=None):
id = location_id
if (id == None):
id = self.default_location_id
self.locations[id] = new_location
location = property(get_location, set_location, doc='default Atom.Location object')
def get_position(self):
return self.location.position
def set_position(self, coords):
self.location.position = coords
position = property(get_position, set_position, doc='orthogonal coordinates')
def get_alternate_location_indicator(self):
return self.location.alternate_location_indicator
alternate_location_indicator = property(get_alternate_location_indicator)
def get_occupancy(self):
return self.location.occupancy
occupancy = property(get_occupancy)
def get_temperature_factor(self):
return self.location.temperature_factor
temperature_factor = property(get_temperature_factor)
def get_x(self): return self.position[0]
x = property(get_x)
def get_y(self): return self.position[1]
y = property(get_y)
def get_z(self): return self.position[2]
z = property(get_z)
def _pdb_string(self, serial_number=None, alternate_location_indicator=None):
"""
Produce a PDB line for this atom using a particular serial number and alternate location
"""
if serial_number == None:
serial_number = self.serial_number
if alternate_location_indicator == None:
alternate_location_indicator = self.alternate_location_indicator
# produce PDB line in three parts: names, numbers, and end
# Accomodate 4-character residue names that use column 21
long_res_name = self.residue_name_with_spaces
if len(long_res_name) == 3:
long_res_name += " "
assert len(long_res_name) == 4
names = "%-6s%5d %4s%1s%4s%1s%4d%1s " % (
self.record_name, serial_number, \
self.name_with_spaces, alternate_location_indicator, \
long_res_name, self.chain_id, \
self.residue_number, self.insertion_code)
numbers = "%8.3f%8.3f%8.3f%6.2f%6.2f " % (
self.x, self.y, self.z, self.occupancy, self.temperature_factor)
end = "%-4s%2s" % (\
self.segment_id, self.element_symbol)
formal_charge = " "
if (self.formal_charge != None): formal_charge = "%+2d" % self.formal_charge
return names+numbers+end+formal_charge
def __str__(self):
return self._pdb_string(self.serial_number, self.alternate_location_indicator)
def write(self, next_serial_number, output_stream=sys.stdout, alt_loc = "*"):
"""
alt_loc = "*" means write all alternate locations
alt_loc = None means write just the primary location
alt_loc = "AB" means write locations "A" and "B"
"""
if alt_loc == None:
locs = [self.default_location_id]
elif alt_loc == "":
locs = [self.default_location_id]
elif alt_loc == "*":
locs = self.locations.keys()
locs.sort()
else:
locs = list(alt_loc)
for loc_id in locs:
print(self._pdb_string(next_serial_number.val, loc_id), file=output_stream)
next_serial_number.increment()
def set_name_with_spaces(self, name):
assert len(name) == 4
self._name_with_spaces = name
self._name = name.strip()
def get_name_with_spaces(self):
return self._name_with_spaces
name_with_spaces = property(get_name_with_spaces, set_name_with_spaces, doc='four-character residue name including spaces')
def get_name(self):
return self._name
name = property(get_name, doc='residue name')
class Location(object):
"""
Inner class of Atom for holding alternate locations
"""
def __init__(self, alt_loc, position, occupancy, temperature_factor, residue_name):
self.alternate_location_indicator = alt_loc
self.position = position
self.occupancy = occupancy
self.temperature_factor = temperature_factor
self.residue_name = residue_name
def __iter__(self):
for coord in self.position:
yield coord
def __str__(self):
return str(self.position)
|
kyleabeauchamp/mdtraj
|
mdtraj/formats/pdb/pdbstructure.py
|
Python
|
lgpl-2.1
| 37,034
|
[
"CRYSTAL",
"Gromacs",
"MDTraj",
"OpenMM",
"VMD"
] |
d075a3979e003e2dd5e810699d612d1795ef84f0fb395e10da6d537824c1d71d
|
try: paraview.simple
except: from paraview.simple import *
paraview.simple._DisableFirstRenderCameraReset()
ImageLuminance()
|
jeromevelut/Peavip
|
Testing/ImageLuminance.py
|
Python
|
gpl-3.0
| 127
|
[
"ParaView"
] |
7499f41902a74a9e69a22b0db828c88319f1d017dd679f8a0555d6d0a2798ded
|
from pywps.Process import WPSProcess
from flyingpigeon.indices import indices, indices_description, calc_indice_single
from flyingpigeon.subset import countries, countries_longname
from flyingpigeon.utils import GROUPING
import logging
class SingleIndicesProcess(WPSProcess):
"""This process calculates a climate indice for the given input netcdf files."""
def __init__(self):
WPSProcess.__init__(
self,
identifier = "indices_single",
title="Calculation of climate indice (single variable)",
version = "0.3",
abstract="This process calculates climate indices based on one single variable.",
statusSupported=True,
storeSupported=True
)
self.resource = self.addComplexInput(
identifier="resource",
title="Resouce",
abstract="NetCDF File",
minOccurs=1,
maxOccurs=100,
maxmegabites=5000,
formats=[{"mimeType":"application/x-netcdf"}],
)
self.groupings = self.addLiteralInput(
identifier="groupings",
title="Grouping",
abstract="Select an time grouping (time aggregation)",
default='yr',
type=type(''),
minOccurs=1,
maxOccurs=len(GROUPING),
allowedValues=GROUPING
)
self.indices = self.addLiteralInput(
identifier="indices",
title="Indice",
abstract=indices_description(),
default='SU',
type=type(''),
minOccurs=1,
maxOccurs=len(indices()),
allowedValues=indices()
)
self.polygons = self.addLiteralInput(
identifier="polygons",
title="Country subset",
abstract= countries_longname(),
#default='FRA',
type=type(''),
minOccurs=0,
maxOccurs=len(countries()),
allowedValues=countries()
)
# complex output
# -------------
self.output = self.addComplexOutput(
identifier="output",
title="Indice",
abstract="Calculated indice as NetCDF file",
metadata=[],
formats=[{"mimeType":"application/x-netcdf"}],
asReference=True
)
def execute(self):
import os
import tarfile
from tempfile import mkstemp
from os import path
ncs = self.getInputValues(identifier='resource')
indices = self.indices.getValue()
polygons = self.polygons.getValue()
groupings = self.groupings.getValue() # getInputValues(identifier='groupings')
polygons = self.polygons.getValue()
# if len(polygons)==0:
# polygons = None
self.status.set('starting: indices=%s, groupings=%s, countries=%s, num_files=%s' % (indices,
groupings, polygons, len(ncs)), 0)
results = calc_indice_single(
resource = ncs,
indices = indices,
polygons= polygons,
groupings = groupings,
dir_output = path.curdir,
)
self.status.set('result %s' % results, 90)
try:
(fp_tarf, tarf) = mkstemp(dir=".", suffix='.tar')
tar = tarfile.open(tarf, "w")
for result in results:
p , f = path.split(result)
tar.add( result , arcname = result.replace(p, ""))
tar.close()
logging.info('Tar file prepared')
except Exception as e:
logging.error('Tar file preparation failed %s' % e)
self.output.setValue( tarf )
self.status.set('done: indice=%s, num_files=%s' % (indices, len(ncs)), 100)
|
sradanov/flyingpigeon
|
flyingpigeon/processes/wps_indices.py
|
Python
|
apache-2.0
| 3,870
|
[
"NetCDF"
] |
37ca0a6c83f8b825666395eca41714b7fb31d0047bdb15767cc0ef35e2bf5748
|
#build list of available data
import os, sys
microbe_info= {}
try:
orgs = {}
for line in open( "/depot/data2/galaxy/microbes/microbial_data.loc" ):
if line[0:1] == "#" : continue
fields = line.split('\t')
#read each line, if not enough fields, go to next line
try:
info_type = fields.pop(0)
if info_type.upper() == "ORG":
#ORG 12521 Clostridium perfringens SM101 bacteria Firmicutes CP000312,CP000313,CP000314,CP000315 http://www.ncbi.nlm.nih.gov/entrez/query.fcgi?db=genomeprj&cmd=Retrieve&dopt=Overview&list_uids=12521
org_num = fields.pop(0)
name = fields.pop(0)
kingdom = fields.pop(0)
group = fields.pop(0)
chromosomes = fields.pop(0)
info_url = fields.pop(0)
link_site = fields.pop(0).replace("\r","").replace("\n","")
if org_num not in orgs:
orgs[org_num]={}
orgs[org_num]['chrs']={}
orgs[org_num]['name']= name
orgs[org_num]['kingdom']= kingdom
orgs[org_num]['group']= group
orgs[org_num]['chromosomes']= chromosomes
orgs[org_num]['info_url']= info_url
orgs[org_num]['link_site']= link_site
elif info_type.upper() == "CHR":
#CHR 12521 CP000315 Clostridium perfringens phage phiSM101, complete genome 38092 110684521 CP000315.1
org_num = fields.pop(0)
chr_acc = fields.pop(0)
name = fields.pop(0)
length = fields.pop(0)
gi = fields.pop(0)
gb = fields.pop(0)
info_url = fields.pop(0).replace("\r","").replace("\n","")
chr = {}
chr['name']=name
chr['length']=length
chr['gi']=gi
chr['gb']=gb
chr['info_url']=info_url
if org_num not in orgs:
orgs[org_num]={}
orgs[org_num]['chrs']={}
orgs[org_num]['chrs'][chr_acc] = chr
elif info_type.upper() == "DATA":
#DATA 12521_12521_CDS 12521 CP000315 CDS bed /home/djb396/alignments/playground/bacteria/12521/CP000315.CDS.bed
uid = fields.pop(0)
org_num = fields.pop(0)
chr_acc = fields.pop(0)
feature = fields.pop(0)
filetype = fields.pop(0)
path = fields.pop(0).replace("\r","").replace("\n","")
data = {}
data['filetype']=filetype
data['path']=path
data['feature']=feature
if org_num not in orgs:
orgs[org_num]={}
orgs[org_num]['chrs']={}
if 'data' not in orgs[org_num]['chrs'][chr_acc]:
orgs[org_num]['chrs'][chr_acc]['data']={}
orgs[org_num]['chrs'][chr_acc]['data'][uid] = data
else: continue
except:
continue
for org_num in orgs:
org = orgs[org_num]
if org['kingdom'] not in microbe_info:
microbe_info[org['kingdom']]={}
if org['group'] not in microbe_info[org['kingdom']]:
microbe_info[org['kingdom']][org['group']]={}
if org_num not in microbe_info[org['kingdom']][org['group']]:
microbe_info[org['kingdom']][org['group']][org_num]=org
except Exception, exc:
print >>sys.stdout, 'microbial_import_code.py initialization error -> %s' % exc
def get_kingdoms():
ret_val = []
kingdoms = microbe_info.keys()
kingdoms.sort()
for kingdom in kingdoms:
ret_val.append((kingdom,kingdom,False))
if ret_val:
ret_val[0]= (ret_val[0][0],ret_val[0][1],True)
return ret_val
def get_groups(kingdom):
ret_val = []
groups = microbe_info[kingdom].keys()
groups.sort()
for group in groups:
ret_val.append((group,group,False))
if ret_val:
ret_val[0]= (ret_val[0][0],ret_val[0][1],True)
return ret_val
def get_orgs(kingdom,group):
ret_val = []
orgs = microbe_info[kingdom][group].keys()
#need to sort by name
swap_test = False
for i in range(0, len(orgs) - 1):
for j in range(0, len(orgs) - i - 1):
if microbe_info[kingdom][group][orgs[j]]['name'] > microbe_info[kingdom][group][orgs[j + 1]]['name']:
orgs[j], orgs[j + 1] = orgs[j + 1], orgs[j]
swap_test = True
if swap_test == False:
break
for org in orgs:
if microbe_info[kingdom][group][org]['link_site'] == "UCSC":
ret_val.append(("<b>"+microbe_info[kingdom][group][org]['name']+"</b> <a href=\""+microbe_info[kingdom][group][org]['info_url']+"\" target=\"_blank\">(about)</a>",org,False))
else:
ret_val.append((microbe_info[kingdom][group][org]['name']+" <a href=\""+microbe_info[kingdom][group][org]['info_url']+"\" target=\"_blank\">(about)</a>",org,False))
if ret_val:
ret_val[0]= (ret_val[0][0],ret_val[0][1],True)
return ret_val
def get_data(kingdom,group,org,feature):
ret_val = []
chroms = microbe_info[kingdom][group][org]['chrs'].keys()
chroms.sort()
for chr in chroms:
for data in microbe_info[kingdom][group][org]['chrs'][chr]['data']:
if microbe_info[kingdom][group][org]['chrs'][chr]['data'][data]['feature']==feature:
ret_val.append((microbe_info[kingdom][group][org]['chrs'][chr]['name']+" <a href=\""+microbe_info[kingdom][group][org]['chrs'][chr]['info_url']+"\" target=\"_blank\">(about)</a>",data,False))
return ret_val
#post processing, set build for data and add additional data to history
from galaxy import datatypes, config, jobs
from shutil import copyfile
def exec_after_process(app, inp_data, out_data, param_dict, tool, stdout, stderr):
history = out_data.items()[0][1].history
if history == None:
print "unknown history!"
return
kingdom = param_dict.get('kingdom',None)
group = param_dict.get('group',None)
org = param_dict.get('org',None)
if not (kingdom or group or org):
print "Parameters are not available."
new_stdout = ""
split_stdout = stdout.split("\n")
basic_name = ""
for line in split_stdout:
fields = line.split("\t")
if fields[0] == "#File1":
description = fields[1]
chr = fields[2]
dbkey = fields[3]
file_type = fields[4]
name, data = out_data.items()[0]
basic_name = data.name
data.name = data.name + " (" + microbe_info[kingdom][group][org]['chrs'][chr]['data'][description]['feature'] +" for "+microbe_info[kingdom][group][org]['name']+":"+chr + ")"
data.dbkey = dbkey
data.info = data.name
datatypes.change_datatype( data, file_type )
data.init_meta()
data.set_peek()
app.model.flush()
elif fields[0] == "#NewFile":
description = fields[1]
chr = fields[2]
dbkey = fields[3]
filepath = fields[4]
file_type = fields[5]
newdata = app.model.Dataset()
newdata.extension = file_type
newdata.name = basic_name + " (" + microbe_info[kingdom][group][org]['chrs'][chr]['data'][description]['feature'] +" for "+microbe_info[kingdom][group][org]['name']+":"+chr + ")"
newdata.flush()
history.add_dataset( newdata )
newdata.flush()
app.model.flush()
try:
copyfile(filepath,newdata.file_name)
newdata.info = newdata.name
newdata.state = jobs.JOB_OK
except:
newdata.info = "The requested file is missing from the system."
newdata.state = jobs.JOB_ERROR
newdata.dbkey = dbkey
newdata.init_meta()
newdata.set_peek()
#
app.model.flush()
|
jmchilton/galaxy-central
|
tools/data_source/microbial_import_code.py
|
Python
|
mit
| 8,633
|
[
"Galaxy"
] |
b2b28afcdff56ee0794b0dded1d1f11fc60e6a8b12f835eb827d17525339f5b9
|
"""Module for functions in the Vascular Modeling Toolkit (VMTK).
For more details on VMTK, go to www.vmtk.org. For more details specifically on
VMTK scripts, go to www.vmtk.org/vmtkscripts.
To a large extent, function names were chosen to match the names of
corresponding vmtk scripts. Parts of the documentation were taken from
www.vmtk.org.
"""
from vmtk import pypes, vmtkscripts
def vmtkbifurcationreferencesystems(centerlines):
"""Compute reference system for each bifurcation of a vessel tree.
Args:
centerlines: Centerlines split into branches.
Returns:
Reference system for each bifurcation.
Pointdata (selection):
Normal: Normal of the bifurcation plane.
UpNormal: Normal pointing toward the bifurcation apex.
"""
bifrefsystem = vmtkscripts.vmtkBifurcationReferenceSystems()
bifrefsystem.Centerlines = centerlines
bifrefsystem.RadiusArrayName = 'MaximumInscribedSphereRadius'
bifrefsystem.BlankingArrayName = 'Blanking'
bifrefsystem.GroupIdsArrayName = 'GroupIds'
bifrefsystem.Execute()
return bifrefsystem.ReferenceSystems
def vmtkbifurcationsections(surface, centerlines, distance=1):
"""Compute sections located a fixed number of maximally inscribed
sphere radii away from each bifurcation.
Args:
surface: Surface split into branches.
centerlines: Centerlines split into branches.
distance: Distance from bifurcation in number of maximally inscribed
spheres, where each sphere touches the center of the previous one.
Returns:
Polydata with one cross section per branch of each bifurcation.
Celldata (selection):
BifurcationSectionArea: Section area.
BifurcationSectionMinSize: Minimum diameter of section.
BifurcationSectionMaxSize: Maximum diameter of section.
"""
bifsections = vmtkscripts.vmtkBifurcationSections()
bifsections.Surface = surface
bifsections.Centerlines = centerlines
bifsections.NumberOfDistanceSpheres = distance
bifsections.RadiusArrayName = 'MaximumInscribedSphereRadius'
bifsections.GroupIdsArrayName = 'GroupIds'
bifsections.CenterlineIdsArrayName = 'CenterlineIds'
bifsections.TractIdsArrayName = 'TractIds'
bifsections.BlankingArrayName = 'Blanking'
bifsections.Execute()
return bifsections.BifurcationSections
def vmtkbifurcationvectors(centerlines, referencesystems):
"""Compute bifurcation vectors.
Args:
centerlines: Centerlines split into branches.
referencesystems: Bifurcation reference systems.
Returns:
Vectors in the direction of the parent and daughter branches of each
bifurcation.
Pointdata (selection):
BifurcationVectors: Bifurcation vectors.
InPlaneBifurcationVectors: BifurcationVectors projected onto the
bifurcation plane.
InPlaneBifurcationVectorAngles: Angles (in radians) between
InPlaneBifurcationVectors and the UpNormal.
"""
bifvectors = vmtkscripts.vmtkBifurcationVectors()
bifvectors.Centerlines = centerlines
bifvectors.ReferenceSystems = referencesystems
bifvectors.RadiusArrayName = 'MaximumInscribedSphereRadius'
bifvectors.GroupIdsArrayName = 'GroupIds'
bifvectors.CenterlineIdsArrayName = 'CenterlineIds'
bifvectors.TractIdsArrayName = 'TractIds'
bifvectors.BlankingArrayName = 'Blanking'
bifvectors.ReferenceSystemsNormalArrayName = 'Normal'
bifvectors.ReferenceSystemsUpNormalArrayName = 'UpNormal'
bifvectors.Execute()
return bifvectors.BifurcationVectors
def vmtkbranchclipper(surface, centerlines, groupidlist=[], insideout=0):
"""Split surface into branches.
Args:
surface: Surface mesh of vascular geometry.
centerlines: Centerlines split into branches.
groupidlist: List of branches (specified by their GroupIds) to extract.
insideout (bool): Either keep or remove the branches in groupidlist.
Returns:
Surface split into branches or, if specified, only the selected
branches.
"""
clipper = vmtkscripts.vmtkBranchClipper()
clipper.Surface = surface
clipper.Centerlines = centerlines
clipper.InsideOut = insideout
clipper.GroupIds = groupidlist
clipper.GroupIdsArrayName = 'GroupIds'
clipper.RadiusArrayName = 'MaximumInscribedSphereRadius'
clipper.BlankingArrayName = 'Blanking'
clipper.Execute()
return clipper.Surface
def vmtkbranchextractor(centerlines):
"""Split centerlines into branches.
Args:
centerlines: Centerlines.
Returns:
Centerlines split into branches.
Celldata (selection):
CenterlineId: Cellid of centerline from which the tract was split.
TractId: Id identifying each tract along one centerline.
GroupId: Id of the group to which the tract belongs.
Blanking: Boolean indicating whether tract belongs to bifurcation.
"""
extractor = vmtkscripts.vmtkBranchExtractor()
extractor.Centerlines = centerlines
extractor.RadiusArrayName = 'MaximumInscribedSphereRadius'
extractor.Execute()
return extractor.Centerlines
def vmtkbranchgeometry(centerlines, smoothing=0, iterations=100):
"""Compute geometric variables for each branch of a vessel tree.
Args:
centerlines: Centerlines split into branches.
smoothing (bool): Laplacian smooth branches before computing geometric
variables.
iterations: Number of smoothing iterations.
Returns:
Description of geometry for each branch. Vertices at (0, 0, 0), one per
branch, are used as placeholders to assign pointdata with values
for the geometric variables.
Pointdata (selection):
Length: Branch length.
Curvature: Average curvature of branch.
Torsion: Average torsion of branch.
Tortuosity: Tortuosity of branch.
Note:
Smoothing doesn't seem to work, it does work with
vmtkcenterlinegeometry.
"""
branchgeometry = vmtkscripts.vmtkBranchGeometry()
branchgeometry.Centerlines = centerlines
branchgeometry.GroupIdsArrayName = 'GroupIds'
branchgeometry.RadiusArrayName = 'MaximumInscribedSphereRadius'
branchgeometry.BlankingArrayName = 'Blanking'
branchgeometry.LineSmoothing = smoothing
branchgeometry.NumberOfSmoothingIterations = iterations
branchgeometry.Execute()
return branchgeometry.GeometryData
def vmtkbranchmapping(surface, centerlines, referencesystems):
"""Map and stretch the longitudinal metrics obtained with
vmtkbranchmetrics.
Args:
surface: Surface split into branches.
centerlines: Centerlines split into branches.
referencesystems: Bifurcation reference systems.
Returns:
Surface with the longitudinal metric mapped and stretched to correctly
account for the presence of insertion regions at bifurcations.
Pointdata (selection):
StretchedMapping: Corrected longitudinal metric.
"""
mapper = vmtkscripts.vmtkBranchMapping()
mapper.Surface = surface
mapper.Centerlines = centerlines
mapper.ReferenceSystems = referencesystems
mapper.AbscissasArrayName = 'Abscissas'
mapper.NormalsArrayName = 'ParallelTransportNormals'
mapper.GroupIdsArrayName = 'GroupIds'
mapper.CenterlineIdsArrayName = 'CenterlineIds'
mapper.TractIdsArrayName = 'TractIds'
mapper.ReferenceSystemsNormalArrayName = 'Normal'
mapper.RadiusArrayName = 'MaximumInscribedSphereRadius'
mapper.BlankingArrayName = 'Blanking'
mapper.AngularMetricArrayName = 'AngularMetric'
mapper.AbscissaMetricArrayName = 'AbscissaMetric'
mapper.Execute()
return mapper.Surface
def vmtkbranchmetrics(surface, centerlines):
"""Compute longitudinal and circumferential metrics for each branch.
Args:
surface: Surface split into branches.
centerlines: Centerlines split into branches.
Returns:
Surface with longitudinal and circumferential metrics for each
branch.
Pointdata (selection):
AbscissaMetric: Curvilinear abscissa of centerlines projected onto
the surface.
AngularMetric: Periodic circumferential coordinates of surface mesh
points around the centerlines, spanning the interval (-pi, pi).
The zero angle is derived from the ParallelTransportNormals
pointdata of the centerlines, which can be obtained using
vmtkcenterlineattributes.
"""
branchmetrics = vmtkscripts.vmtkBranchMetrics()
branchmetrics.Surface = surface
branchmetrics.Centerlines = centerlines
branchmetrics.AbscissasArrayName = 'Abscissas'
branchmetrics.NormalsArrayName = 'ParallelTransportNormals'
branchmetrics.RadiusArrayName = 'MaximumInscribedSphereRadius'
branchmetrics.GroupIdsArrayName = 'GroupIds'
branchmetrics.CenterlineIdsArrayName = 'CenterlineIds'
branchmetrics.TractIdsArrayName = 'TractIds'
branchmetrics.BlankingArrayName = 'Blanking'
branchmetrics.Execute()
return branchmetrics.Surface
def vmtkbranchpatching(surface, longitudinalpatchsize=1.0,
circularnumberofpatches=12):
"""Patch surface of each branch.
Patching means to 'cut' a set of contiguous rectangular regions on the
surface mesh that follow iso-contours in the StretchedMapping and
AngularMetric arrays. All the quantities of interest (e.g. wall shear
stress, oscillatory shear index) are averaged on each of these patches.
Args:
surface: Surface split into branches.
longitudinalpatchsize: 'Length' of the patch along the longitudinal
direction.
circularnumberofpatches: Number of patches along the circumference.
Returns:
Surface composed of disconnected patches.
Pointdata:
All the original pointdata
Celldata (selection):
Average of original pointdata on patch to which cell belongs
Slab: Patch coordinate (integer) in longitudinal direction.
Sector: Patch coordinate (integer) in circumferential direction.
PatchArea: Surface area of the patch
"""
patcher = vmtkscripts.vmtkBranchPatching()
patcher.Surface = surface
patcher.LongitudinalPatchSize = longitudinalpatchsize
patcher.CircularNumberOfPatches = circularnumberofpatches
patcher.UseConnectivity = 1
patcher.CircularPatching = 1
patcher.GroupIdsArrayName = 'GroupIds'
patcher.LongitudinalMappingArrayName = 'StretchedMapping'
patcher.CircularMappingArrayName = 'AngularMetric'
patcher.Execute()
#return (patcher.Surface, patcher.PatchedData)
return patcher.Surface
def vmtkbranchsections(surface, centerlines, interval=1):
"""Compute sections at specified intervals (in maximally inscribed sphere
radii units) labeled by the GroupId of the corresponding branch.
Args:
surface: Surface split into branches.
centerlines: Centerlines split into branches.
interval: Distance between sections in maximally inscribed sphere radii
units.
Returns:
Polydata with sections.
Celldata (selection):
BranchSectionGroupIds: GroupId of corresponding branch
BranchSectionDistanceSpheres: Distance along the branch in maximally
inscribed sphere radii
"""
sectioner = vmtkscripts.vmtkBranchSections()
sectioner.Surface = surface
sectioner.Centerlines = centerlines
sectioner.NumberOfDistanceSpheres = distance
sectioner.RadiusArrayName = 'MaximumInscribedSphereRadius'
sectioner.GroupIdsArrayName = 'GroupIds'
sectioner.CenterlineIdsArrayName = 'CenterlineIds'
sectioner.TractIdsArrayName = 'TractIds'
sectioner.BlankingArrayName = 'Blanking'
sectioner.Execute()
return sectioner.BranchSections
def vmtkcenterlineattributes(centerlines):
"""Compute centerline attributes.
Args:
centerlines: Centerlines.
Returns:
Centerlines with attributes.
Pointdata:
MaximumInscribedSphereRadius: If the point on the centerline is the
center of a sphere, this is the radius of the largest possible
sphere that does not intersect the surface.
Abscissas: Position along the centerlines. By default, the abscissa
is measured from the start of the centerlines.
ParallelTransportNormals: 'Normal' of the centerlines (perpendicular
to centerline direction).
"""
clattributes = vmtkscripts.vmtkCenterlineAttributes()
clattributes.Centerlines = centerlines
clattributes.Execute()
return clattributes.Centerlines
def vmtkcenterlinegeometry(centerlines, smoothing=0, iterations=100):
"""Compute the local geometry of centerlines.
Args:
centerlines: Centerlines.
smoothing (bool): Laplacian smooth centerlines before computing
geometric variables.
iterations: Number of smoothing iterations.
Returns:
Centerlines with geometric variables defined at each point.
Pointdata (selection):
Curvature: Local curvature.
Torsion: Local torsion.
Celldata (selection):
Tortuosity: Tortuosity of each centerline.
Length: Length of each centerline.
Note:
Since the computation of the geometric variables depends on first,
second and third derivatives of the line coordinates, and since such
derivatives are approximated using a simple finite difference scheme
along the line, it is very likely that such derivatives will be affected
by noise that is not appreciable when looking at the line itself. For
this reason, it might be necessary to run the Laplacian smoothing filter
before computing the derivatives and the related quantities.
"""
clgeometry = vmtkscripts.vmtkCenterlineGeometry()
clgeometry.Centerlines = centerlines
clgeometry.LineSmoothing = smoothing
clgeometry.NumberOfSmoothingIterations = iterations
clgeometry.Execute()
return clgeometry.Centerlines
def vmtkcenterlinemerge(centerlines, length=.1):
"""Merge centerline tracts belonging to the same groups.
Args:
centerlines: Centerlines split into branches.
length: Distance between centerline points after resampling.
Returns:
Centerlines with only one centerline branch per vessel tree branch. The
centerline branches meet at the bifurcation origins.
"""
merger = vmtkscripts.vmtkCenterlineMerge()
merger.Centerlines = centerlines
merger.Length = length
merger.RadiusArrayName = 'MaximumInscribedSphereRadius'
merger.GroupIdsArrayName = 'GroupIds'
merger.CenterlineIdsArrayName = 'CenterlineIds'
merger.BlankingArrayName = 'Blanking'
merger.TractIdsArrayName = 'TractIds'
merger.Execute()
return merger.Centerlines
def vmtkcenterlinemodeller(centerlines, size=[64, 64, 64]):
"""Convert a centerline to an image containing the tube function.
Args:
centerlines: Centerlines.
size: Image dimensions.
Returns:
Signed distance transform image, with the zero level set being (tapered)
tubes running from one centerline point to the next with a radius at
each end corresponding to the local MaximumInscribedSphereRadius.
"""
modeller = vmtkscripts.vmtkCenterlineModeller()
modeller.Centerlines = centerlines
modeller.RadiusArrayName = 'MaximumInscribedSphereRadius'
modeller.SampleDimensions = size
modeller.Execute()
return modeller.Image
def vmtkcenterlineoffsetattributes(centerlines, referencesystems,
refgroupid=1):
"""Offset centerline attributes to a bifurcation reference system.
Args:
centerlines: Centerlines with attributes.
referencesystems: Bifurcation reference systems.
refgroupid: GroupId of bifurcation to which to offset the attributes.
Returns:
Centerlines with the attributes offset in such a way that the abscissa
of the closest point to the bifurcation origin is zero and the
centerline normal at that point coincides with the bifurcation reference
system normal.
"""
offsetter = vmtkscripts.vmtkCenterlineOffsetAttributes()
offsetter.Centerlines = centerlines
offsetter.ReferenceSystems = referencesystems
offsetter.ReferenceGroupId = refgroupid
offsetter.AbscissasArrayName = 'Abscissas'
offsetter.NormalsArrayName = 'ParallelTransportNormals'
offsetter.GroupIdsArrayName = 'GroupIds'
offsetter.CenterlineIdsArrayName = 'CenterlineIds'
offsetter.ReferenceSystemsNormalArrayName = 'Normal'
offsetter.Execute()
return offsetter.Centerlines
def vmtkcenterlineresampling(centerlines, length=.1):
"""Resample input centerlines with a spline filter.
Args:
centerlines: Centerlines.
length: Space between centerline points after resampling.
Returns:
Centerlines with equal spacing between centerline points.
"""
resampler = vmtkscripts.vmtkCenterlineResampling()
resampler.Centerlines = centerlines
resampler.Length = length
resampler.Execute()
return resampler.Centerlines
def vmtkcenterlines(surface, endpoints=0, interactive=False,
sourcepoints=[0, 0, 0], targetpoints=[0, 0, 0]):
"""Compute centerlines of a vascular geometry.
Args:
surface: Surface mesh of a vascular geometry.
endpoints (bool): Include endpoints. By construction, centerlines do
not reach the source/targetpoints. endpoints=1 bridges the
start/end of the centerlines to the source/targetpoints.
interactive (bool): Select source/targetpoints interactively. This
pops up a VTK window. Follow instructions in terminal.
sourcepoints: Give barycenters of sourcepoints as in '[0.0, 1.0, 2.0]'.
In case of multiple sourcepoints, append the lists of each three
coordinates as in '[0.0, 1.0, 2.0, 3.0, 4.0, 5.0]'.
targetpoints: Give barycenters of targetpoints following same notation
as with sourcepoints.
Returns:
Centerlines. Each cell of the centerlines polydata is a centerline from
one of the sourcepoints to one of the targetpoints.
"""
centerliner = vmtkscripts.vmtkCenterlines()
centerliner.Surface = surface
centerliner.AppendEndPoints = endpoints
if interactive:
centerliner.SeedSelectorName = 'pickpoint'
else:
centerliner.SeedSelectorName = 'pointlist'
centerliner.SourcePoints = sourcepoints
centerliner.TargetPoints = targetpoints
centerliner.Execute()
return centerliner.Centerlines
def vmtkcenterlinesections(surface, centerlines):
"""Compute sections located at each point of the centerlines.
Args:
surface: Surface mesh of vascular geometry.
centerlines: Centerlines corresponding to surface.
Returns:
Polydata with one cross section per branch of each bifurcation.
Celldata (selection):
CenterlineSectionArea: Section area.
CenterlineSectionMinSize: Minimum diameter of section.
CenterlineSectionMaxSize: Maximum diameter of section.
"""
sectioner = vmtkscripts.vmtkCenterlineSections()
sectioner.Surface = surface
sectioner.Centerlines = centerlines
sectioner.Execute()
return sectioner.CenterlineSections
def vmtkcenterlinesmoothing(centerlines, iterations=100):
"""Smooth centerlines with a Laplacian smoothing filter.
Args:
centerlines: Centerlines.
iterations: Number of smoothing iterations.
Returns:
Smoothed centerlines.
"""
smoother = vmtkscripts.vmtkCenterlineSmoothing()
smoother.Centerlines = centerlines
smoother.NumberOfSmoothingIterations = iterations
smoother.Execute()
return smoother.Centerlines
def vmtkdelaunayvoronoi(surface, removesubresolution=1):
"""Compute Voronoi diagram corresponding to the surface.
Args:
surface: Surface mesh.
removesubresolution (bool): Remove Voronoi points with 'subresolution'
radii.
Returns:
Voronoi diagram.
Note:
The Voronoi diagram represents the lumen volume as a set of overlapping
spheres: the largest spheres correspond to the local minimum lumen
diameter, the smallest spheres correspond to finer details on the
surface.
"""
voronoi = vmtkscripts.vmtkDelaunayVoronoi()
voronoi.Surface = surface
voronoi.RemoveSubresolutionTetrahedra = removesubresolution
voronoi.Execute()
return voronoi.VoronoiDiagram
def vmtkdistancetocenterlines(surface, centerlines):
"""Compute distance from the surface to the centerlines.
Args:
surface: Surface mesh of vascular geometry.
centerlines: Centerlines corresponding to surface.
Returns:
Surface with DistanceToCenterlines as pointdata.
"""
distance = vmtkscripts.vmtkDistanceToCenterlines()
distance.Surface = surface
distance.Centerlines = centerlines
distance.RadiusArrayName = 'MaximumInscribedSphereRadius'
distance.Execute()
return distance.Surface
def vmtkflowextensions(surface, interactive=1, extensionlength=10,
transitionratio=.25):
"""Extrude inlets and outlets of a vascular geometry.
Args:
surface: Surface mesh of vascular geometry.
interactive (bool): Choose inlets/outlets to be extruded.
extensionlength: Length of extrusions.
transitionratio: Rate of transition from model section to circular
section.
Returns:
Surface with extruded inlets/outlets.
"""
extender = vmtkscripts.vmtkFlowExtensions()
extender.Surface = surface
extender.ExtensionMode = 'boundarynormal'
extender.TransitionRatio = transitionratio
extender.Interactive = interactive
extender.ExtensionLength = extensionlength
extender.Execute()
return extender.Surface
def vmtkicpregistration(surface, referencesurface):
"""Register a surface to a reference surface using the interative closest
point algorithm.
Args:
surface = Surface mesh.
referencesurface = Reference surface mesh.
Returns:
'surface' rigidly transformed to best match 'referencesurface'.
Pointdata:
Distance: Distance from 'surface' to 'referencesurface'.
"""
registration = vmtkscripts.vmtkICPRegistration()
registration.Surface = surface
registration.ReferenceSurface = referencesurface
registration.DistanceArrayName = 'Distance'
registration.Execute()
return registration.Surface
def vmtkimagereader(path):
"""Read an image and store it in a vtkImageData object.
Args:
path: Path to the image file.
Returns:
vtkImageData object.
Note:
Reads several image formats: vti, vtk, dcm, raw, mha, mhd, tif, png
"""
reader = vmtkscripts.vmtkImageReader()
reader.InputFileName = path
reader.Execute()
return reader.Image
def vmtkimagewriter(image, path):
"""Write a vtkImageData object to disk.
Args:
image: vtkImageData object.
path: Path to the image file.
Returns:
n/a
Note:
Writes several image formats: vti, vtk, mha, mhd, tif, png, dat
"""
writer = vmtkscripts.vmtkImageWriter()
writer.Image = image
writer.OutputFileName = path
writer.Execute()
def vmtkmarchingcubes(image, level=0.0):
"""Generate an isosurface of given level from a 3D image.
Args:
image: vtkImageData object.
level: Graylevel at which to generate the isosurface.
Returns:
Surface mesh of the isosurface.
"""
marcher = vmtkscripts.vmtkMarchingCubes()
marcher.Image = image
marcher.Level = level
marcher.Execute()
return marcher.Surface
def vmtkmeshreader(path):
"""Read a mesh and store it in a vtkUnstructuredGrid object.
Args:
path: Path to the mesh file.
Returns:
vtkUnstructuredGrid object.
Note:
Reads several mesh formats: vtu, vtk, FDNEUT, xda, neu (ngneut),
gneu (gambit), tec (tecplot), node (tetgen), ele (tetgen)
"""
reader = vmtkscripts.vmtkMeshReader()
reader.InputFileName = path
reader.Execute()
return reader.Mesh
def vmtkmeshtosurface(mesh, cleanoutput=1):
"""Convert a mesh to a surface by throwing out volume elements and (optionally) the relative points
Args:
mesh: Volumetric mesh.
cleanoutput (bool): Remove unused points.
Returns:
vtkPolyData object.
"""
extractor = vmtkscripts.vmtkMeshToSurface()
extractor.Mesh = mesh
extractor.CleanOutput = cleanoutput
extractor.Execute()
return extractor.Surface
def vmtkmeshvectorfromcomponents(mesh, vectorname='Velocity',
componentsnames=['VelocityX', 'VelocityY',
'VelocityZ'], removecomponents=False):
"""Create a vector array from a number of scalar arrays treated as vector
components.
Args:
mesh: vtkUnstructuredGrid object.
vectorname: Name to give to the vector array that will be created.
componentsnames: List of names of the three scalar arrays that will be
used as vector components.
removecomponents (bool): Remove the scalar arrays after creating the
vector array.
Returns:
vtkUnstructuredGrid object with vector array.
"""
vectorer = vmtkscripts.vmtkMeshVectorFromComponents()
vectorer.Mesh = mesh
vectorer.VectorArrayName = vectorname
vectorer.ComponentsArrayNames = componentsnames
vectorer.RemoveComponentArrays = removecomponents
vectorer.Execute()
return vectorer.Mesh
def vmtkmeshwallshearrate(mesh, velocityarrayname='vel'):
"""Compute wall shear rate from a velocity field
Args:
mesh: vtkUnstructuredGrid object.
velocityarrayname: Name of velocity array.
Returns:
vtkPolyData object with the surface of the mesh with the newly
created array named WallShearRate.
Note:
To obtain the wall shear stress, multiply WallShearRate by the
viscosity.
"""
wsrfilter = vmtkscripts.vmtkMeshWallShearRate()
wsrfilter.Mesh = mesh
wsrfilter.VelocityArrayName = velocityarrayname
wsrfilter.WallShearRateArrayName = 'wsr'
wsrfilter.Execute()
return wsrfilter.Surface
def vmtkmeshwriter(mesh, path):
"""Write a vtkUnstructuredGrid object to disk.
Args:
image: vtkUnstructuredGrid object.
path: Path to the mesh file.
Returns:
n/a
Note:
Writes several mesh formats: vtu, vtk, xda, FDNEUT, lifev, xml
(dolfin), msh (fluent), tec (tecplot), node (tetgen), ele (tetgen), dat
"""
writer = vmtkscripts.vmtkMeshWriter()
writer.Mesh = mesh
writer.OutputFileName = path
writer.Execute()
def vmtknetworkextraction(surface):
"""Extract a network of approximated centerlines from a surface.
Args:
surface: Surface mesh of vascular network.
Returns:
Network of centerlines.
Note:
The surface must have at least one opening.
"""
extractor = vmtkscripts.vmtkNetworkExtraction()
extractor.Surface = surface
extractor.Execute()
return extractor.Network
def vmtkpolyballmodeller(voronoi, size=[64, 64, 64]):
"""Converts a polyball to an image containing the tube function.
Args:
voronoi: Voronoi diagram.
size: Image dimensions.
Returns:
Signed distance transform image, with the zero level set being the lumen
surface represented by the Voronoi diagram.
Note:
The Voronoi diagram represents the lumen volume as a set of overlapping
spheres (also named 'polyball'): the largest spheres correspond to the
local minimum lumen diameter, the smallest spheres correspond to finer
details on the surface.
"""
modeller = vmtkscripts.vmtkPolyBallModeller()
modeller.Surface = voronoi
modeller.RadiusArrayName = 'MaximumInscribedSphereRadius'
modeller.SampleDimensions = size
modeller.Execute()
return modeller.Image
def vmtkpointsplitextractor(centerlines, splitpoint, gap=1.0):
"""Split centerlines at specified location.
Args:
centerlines: Centerlines.
splitpoint: Location where to split the centerlines.
gap: Length of 'Blanking=1' part of the centerlines.
Returns
Centerlines split at splitpoint, with the center of the gap at the
splitpoint. The output is similar to the output of
vmtkbranchextractor.
Celldata (selection):
CenterlineId: Cellid of centerline from which the tract was split.
TractId: Id identifying each tract along one centerline.
GroupId: Id of the group to which the tract belongs.
Blanking: Boolean indicating whether tract belongs to bifurcation.
"""
extractor = vmtkscripts.vmtkPointSplitExtractor()
extractor.Centerlines = centerlines
extractor.RadiusArrayName = 'MaximumInscribedSphereRadius'
extractor.GroupIdsArrayName = 'GroupIds'
extractor.SplitPoint = splitpoint
extractor.Execute()
return extractor.Centerlines
def vmtksurfacecapper(surface):
"""Caps the holes of a surface.
Args:
surface: Surface mesh of vascular geometry with holes at inlets and
outlets.
Returns:
Surface mesh with capped holes. Each cap has an ID assigned for easy
specification of boundary conditions.
Celldata:
CellEntityIds: ID assigned to caps.
"""
capper = vmtkscripts.vmtkSurfaceCapper()
capper.Surface = surface
capper.Method = 'centerpoint'
capper.Interactive = 0
capper.Execute()
return capper.Surface
def vmtksurfacecenterlineprojection(surface, centerlines):
"""Project pointdata from centerlines to a surface.
Args:
surface: Surface mesh of vascular geometry.
centerlines: Centerlines corresponding to surface.
Returns:
Surface mesh with centerlines pointdata projected onto it.
"""
projection = vmtkscripts.vmtkSurfaceCenterlineProjection()
projection.Surface = surface
projection.Centerlines = centerlines
projection.RadiusArrayName = 'MaximumInscribedSphereRadius'
projection.Execute()
return projection.Surface
def vmtksurfacecurvature(surface, curvature_type='mean', absolute_curvature=0,
median_filtering=0):
"""Compute curvature of an input surface.
Args:
surface: Surface mesh.
curvature_type ('mean', 'gaussian', 'maximum', 'minimum'): Type of
curvature to compute.
absolute_curvature (bool): Output the absolute value of curvature.
median_filtering (bool): Output curvature after median filtering to
suppress numerical noise speckles.
Returns:
Surface with curvature variable.
Pointdata:
Curvature: Local surface curvature.
"""
curvaturefilter = vmtkscripts.vmtkSurfaceCurvature()
curvaturefilter.Surface = surface
curvaturefilter.CurvatureType = curvature_type
curvaturefilter.AbsoluteCurvature = absolute_curvature
curvaturefilter.MedianFiltering = median_filtering
curvaturefilter.Execute()
return curvaturefilter.Surface
def vmtksurfacedecimation(surface, target=.9):
"""Reduce the number of triangles in a surface.
Args:
surface: Surface mesh.
target: Desired number of triangles relative to the input number of
triangles.
Returns:
Surface mesh with fewer triangles.
"""
decimator = vmtkscripts.vmtkSurfaceDecimation()
decimator.Surface = surface
decimator.TargetReduction = target
decimator.Execute()
return decimator.Surface
def vmtksurfacedistance(surface, referencesurface,
distance_arrayname='distance',
distancevector_arrayname='distance_vector',
signeddistance_arrayname='signed_distance'):
"""Compute the pointwise minimum distance from a surface to a reference
surface.
Args:
surface: Surface mesh.
referencesurface: Reference surface mesh.
Returns:
'surface' with 'Distance' pointdata.
"""
distancer = vmtkscripts.vmtkSurfaceDistance()
distancer.Surface = surface
distancer.ReferenceSurface = referencesurface
distancer.DistanceArrayName = distance_arrayname
distancer.DistanceVectorsArrayName = distancevector_arrayname
distancer.SignedDistanceArrayName = signeddistance_arrayname
distancer.Execute()
return distancer.Surface
def vmtksurfacenormals(surface):
"""Compute normals to a surface.
Args:
surface: Surface mesh.
Returns:
Surface mesh with 'Normals' vector pointdata.
"""
normaller = vmtkscripts.vmtkSurfaceNormals()
normaller.Surface = surface
normaller.Execute()
return normaller.Surface
def vmtksurfaceprojection(surface, referencesurface):
"""Interpolates the pointdata of a reference surface onto a surface based
on minimum distance criterion.
Args:
surface: Surface mesh.
referencesurface: Reference surface mesh.
Returns:
'surface' with projected pointdata from 'referencesurface'.
"""
projector = vmtkscripts.vmtkSurfaceProjection()
projector.Surface = surface
projector.ReferenceSurface = referencesurface
projector.Execute()
return projector.Surface
def vmtksurfacereader(path):
"""Read a polydata (surface or centerline) and store it in a vtkPolyData
object.
Args:
path: Path to the polydata file.
Returns:
vtkPolyData object.
Note:
Reads several polydata formats: vtp, vtk, stl, ply, tec (tecplot),
dat (tecplot)
"""
reader = vmtkscripts.vmtkSurfaceReader()
reader.InputFileName = path
reader.Execute()
return reader.Surface
def vmtksurfaceremeshing(surface, edgelength=1.0, iterations=10):
"""Remesh a surface using high quality triangles.
Args:
surface: Surface mesh.
edgelength: Target length of triangle edges.
iterations: Number of iterations to optimize the mesh quality.
Returns:
Remeshed surface.
"""
remesher = vmtkscripts.vmtkSurfaceRemeshing()
remesher.Surface = surface
remesher.NumberOfIterations = iterations
remesher.ElementSizeMode = 'edgelength'
remesher.TargetEdgeLength = edgelength
remesher.Execute()
return remesher.Surface
def vmtksurfacescaling(polydata, scalefactor):
"""Scale a polydata by an isotropic factor.
Args:
polydata: Surface mesh or centerlines.
scalefactor: Scaling factor.
Returns:
Scaled polydata.
"""
scaler = vmtkscripts.vmtkSurfaceScaling()
scaler.Surface = polydata
scaler.ScaleFactor = scalefactor
scaler.Execute()
return scaler.Surface
def vmtksurfacesmoothing(surface, iterations=100, method='taubin'):
"""Smooth a surface.
Args:
surface: Surface mesh.
iterations: Number of smoothing iterations.
method ('taubin', 'laplace'): Taubin's volume-preserving or a Laplacian
smoothing filter.
Returns:
Smoothed surface.
"""
smoother = vmtkscripts.vmtkSurfaceSmoothing()
smoother.Surface = surface
smoother.Method = method
smoother.NumberOfIterations = iterations
smoother.PassBand = 0.1
smoother.Execute()
return smoother.Surface
def vmtksurfacesubdivision(surface, numberofsubdivisions=1, method='linear'):
"""Subdivide a triangulated surface.
Args:
surface: Surface mesh.
numberofsubdivisions: Number of subdivisions.
method ('linear', 'butterfly', 'loop'): Subdivision method.
Returns:
Subdivided surface mesh.
"""
divider = vmtkscripts.vmtkSurfaceSubdivision()
divider.Surface = surface
divider.NumberOfSubdivisions = numberofsubdivisions
divider.Method = method
divider.Execute()
return divider.Surface
def vmtksurfacewriter(polydata, path):
"""Write a vtkPolyData object (e.g. surface and centerlines) to disk.
Args:
polydata: vtkPolyData object.
path: Path to the polydata file.
Returns:
n/a
Note:
Writes several polydata formats: vtp, vtk, stl (use only for
triangulated surface meshes), ply, tec (tecplot), dat
"""
writer = vmtkscripts.vmtkSurfaceWriter()
writer.Surface = polydata
writer.OutputFileName = path
writer.Execute()
|
ajgeers/utils
|
utils/vmtklib.py
|
Python
|
bsd-2-clause
| 38,412
|
[
"Gaussian",
"VTK"
] |
f62742dcb2d1a00721764c73068ef640eb2feec712684cb5902469b2b664cf27
|
##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
#
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
import datetime as dt
class Lammps(CMakePackage):
"""LAMMPS stands for Large-scale Atomic/Molecular Massively
Parallel Simulator. This package uses patch releases, not
stable release.
See https://github.com/spack/spack/pull/5342 for a detailed
discussion.
"""
homepage = "http://lammps.sandia.gov/"
url = "https://github.com/lammps/lammps/archive/patch_1Sep2017.tar.gz"
tags = ['ecp', 'ecp-apps']
version('20180316', '25bad35679583e0dd8cb8753665bb84b')
version('20180222', '4d0513e3183bd57721814d217fdaf957')
version('20170922', '4306071f919ec7e759bda195c26cfd9a')
version('20170901', '767e7f07289663f033474dfe974974e7')
version('develop', git='https://github.com/lammps/lammps', branch='master')
def url_for_version(self, version):
vdate = dt.datetime.strptime(str(version), "%Y%m%d")
return "https://github.com/lammps/lammps/archive/patch_{0}.tar.gz".format(
vdate.strftime("%d%b%Y").lstrip('0'))
supported_packages = ['asphere', 'body', 'class2', 'colloid', 'compress',
'coreshell', 'dipole', 'granular', 'kspace', 'latte',
'manybody', 'mc', 'meam', 'misc', 'molecule',
'mpiio', 'peri', 'poems', 'python', 'qeq', 'reax',
'replica', 'rigid', 'shock', 'snap', 'srd',
'user-atc', 'user-h5md', 'user-lb', 'user-misc',
'user-netcdf', 'user-omp', 'voronoi']
for pkg in supported_packages:
variant(pkg, default=False,
description='Activate the {0} package'.format(pkg))
variant('lib', default=True,
description='Build the liblammps in addition to the executable')
variant('mpi', default=True,
description='Build with mpi')
depends_on('mpi', when='+mpi')
depends_on('mpi', when='+mpiio')
depends_on('fftw', when='+kspace')
depends_on('voropp', when='+voronoi')
depends_on('netcdf+mpi', when='+user-netcdf')
depends_on('blas', when='+user-atc')
depends_on('lapack', when='+user-atc')
depends_on('latte@1.0.1', when='@:20180222+latte')
depends_on('latte@1.1.1:', when='@20180316:+latte')
depends_on('blas', when='+latte')
depends_on('lapack', when='+latte')
depends_on('python', when='+python')
depends_on('mpi', when='+user-lb')
depends_on('mpi', when='+user-h5md')
depends_on('hdf5', when='+user-h5md')
conflicts('+body', when='+poems')
conflicts('+latte', when='@:20170921')
conflicts('+python', when='~lib')
conflicts('+qeq', when='~manybody')
conflicts('+user-atc', when='~manybody')
conflicts('+user-misc', when='~manybody')
conflicts('+user-phonon', when='~kspace')
conflicts('+user-misc', when='~manybody')
patch("lib.patch", when="@20170901")
patch("660.patch", when="@20170922")
root_cmakelists_dir = 'cmake'
def cmake_args(self):
spec = self.spec
args = [
'-DBUILD_SHARED_LIBS={0}'.format(
'ON' if '+lib' in spec else 'OFF'),
'-DENABLE_MPI={0}'.format(
'ON' if '+mpi' in spec else 'OFF')
]
for pkg in self.supported_packages:
opt = '-DENABLE_{0}'.format(pkg.upper())
if '+{0}'.format(pkg) in spec:
args.append('{0}=ON'.format(opt))
else:
args.append('{0}=OFF'.format(opt))
if '+kspace' in spec:
args.append('-DFFT=FFTW3')
return args
|
matthiasdiener/spack
|
var/spack/repos/builtin/packages/lammps/package.py
|
Python
|
lgpl-2.1
| 4,813
|
[
"LAMMPS",
"NetCDF"
] |
5ab1909bc56ef23e0186f6f6070ec96aeec310145dcb550740b08adde951dadf
|
##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class RAffyilm(RPackage):
"""affyILM is a preprocessing tool which estimates gene
expression levels for Affymetrix Gene Chips. Input from
physical chemistry is employed to first background subtract
intensities before calculating concentrations on behalf
of the Langmuir model."""
homepage = "https://www.bioconductor.org/packages/affyILM/"
url = "https://git.bioconductor.org/packages/affyILM"
version('1.28.0', git='https://git.bioconductor.org/packages/affyILM', commit='307bee3ebc599e0ea4a1d6fa8d5511ccf8bef7de')
depends_on('r@3.4.0:3.4.9', when='@1.28.0')
depends_on('r-gcrma', type=('build', 'run'))
depends_on('r-affxparser', type=('build', 'run'))
depends_on('r-affy', type=('build', 'run'))
depends_on('r-biobase', type=('build', 'run'))
|
EmreAtes/spack
|
var/spack/repos/builtin/packages/r-affyilm/package.py
|
Python
|
lgpl-2.1
| 2,063
|
[
"Bioconductor"
] |
1966cff23b21a535ac8029467f1862b45be2e097e6a3badfd12f57ac6df19218
|
import errno
import functools
import itertools
import multiprocessing
import os, os.path
import shlex
import subprocess
import sys
from ndmake import debug
from ndmake import dispatch
from ndmake import files
from ndmake import mtime
from ndmake import mux
from ndmake import space
from ndmake import template
from ndmake import threadpool
dprint = debug.dprint_factory(__name__)
dprint_traverse = debug.dprint_factory(__name__, "traverse")
dprint_update = debug.dprint_factory(__name__, "update")
dprint_undemarcated = debug.dprint_factory(__name__, "undemarcated")
dprint_unlink = debug.dprint_factory(__name__, "unlink")
#
# Exceptions
#
class UpdateException(Exception): pass
class MissingFileException(UpdateException): pass
class CalledProcessError(UpdateException): pass
#
# Graph façade
#
class Graph:
# Vertices and edges are managed by integer vertex ids. The Vertex objects
# themselves do not hold pointers to each other.
#
# The acyclicity of the directed graph is enforced during graph
# construction. XXX Might be more efficient to check later.
def __init__(self):
self._vertex_id_generator = itertools.count(0)
# Vertices.
self._vertex_id_map = {} # Vertex -> id
self._id_vertex_map = {} # id -> Vertex
self._name_id_map = {} # (name, type_) -> id
# Edges.
self._parents = {} # id -> set(ids)
self._children = {} # id -> set(ids)
# Dimensions.
self.dimensions = {} # name -> Dimension
# Templates.
self.template_environment = template.Environment()
def write_graphviz(self, filename):
with open(filename, "w") as file:
fprint = functools.partial(print, file=file)
fprint("digraph depgraph {")
for id, vertex in self._id_vertex_map.items():
label = vertex.name
shape = "box"
color = "black"
if isinstance(vertex, Computation):
shape = "box"
color = "black"
elif isinstance(vertex, Dataset):
shape = "folder"
color = "navy"
elif isinstance(vertex, Survey):
label = " ".join((vertex.__class__.__name__, vertex.name))
shape = "box"
color = "red"
fprint("v{:d} [label=\"{}\" shape=\"{}\" color=\"{}\"];".
format(id, label, shape, color))
for parent, children in self._parents.items():
for child in children:
fprint("v{:d} -> v{:d};".format(child, parent))
fprint("}")
def vertex_by_name(self, name, type_):
"""Return a vertex with the given name and type."""
try:
vertex_id = self._name_id_map[(name, type_)]
except KeyError:
raise KeyError("no {} vertex named {}".format(type_.__name__,
name))
return self._id_vertex_map[vertex_id]
def add_vertex(self, vertex):
"""Add an isolated vertex to the graph and return the vertex id."""
name_key = (vertex.name, vertex.namespace_type)
if name_key in self._name_id_map:
existing_id = self._name_id_map[name_key]
existing_vertex = self._id_vertex_map[existing_id]
if vertex is existing_vertex:
return existing_id
raise KeyError("a {} vertex named {} already exists".
format(vertex.namespace_type.__name__,
vertex.name))
vertex_id = next(self._vertex_id_generator)
self._vertex_id_map[vertex] = vertex_id
self._id_vertex_map[vertex_id] = vertex
self._name_id_map[name_key] = vertex_id
return vertex_id
def _vertex_id(self, vertex, add_if_not_member=False):
if add_if_not_member and vertex not in self._vertex_id_map:
return self.add_vertex(vertex)
return self._vertex_id_map[vertex]
def add_edge(self, from_vertex, to_vertex):
"""Add an edge between two vertices.
If either or both of the vertices do not belong to the graph, add them
as well.
"""
from_id = self._vertex_id(from_vertex, add_if_not_member=True)
to_id = self._vertex_id(to_vertex, add_if_not_member=True)
if from_id == to_id:
raise ValueError("attempt to create self-dependent vertex")
if self._is_ancestor(to_vertex, from_vertex):
raise ValueError("attmpt to create cycle in graph")
self._parents.setdefault(to_id, set()).add(from_id)
self._children.setdefault(from_id, set()).add(to_id)
def _is_ancestor(self, the_vertex, other_vertex):
# Not the most efficient traversal, but good enough for now.
for child in self.children_of(the_vertex):
if child is other_vertex or self._is_ancestor(child, other_vertex):
return True
def simplify_by_transitive_reduction(self):
visited_ids = set()
def visit(vertex_id, previous_vertex_id):
visited_ids.add(vertex_id)
# Remove shortcut edges from visited ancestors.
for parent_id in list(self._parents.get(vertex_id, [])):
if parent_id is previous_vertex_id:
continue
if parent_id in visited_ids:
# Remove this edge if its tail is a Survey.
if isinstance(self._id_vertex_map[parent_id], Survey):
self._parents[vertex_id].remove(parent_id)
self._children[parent_id].remove(vertex_id)
# Proceed to children.
for child_id in list(self._children.get(vertex_id, [])):
# Children may be removed during iteration.
if child_id in self._children.get(vertex_id, []):
visit(child_id, vertex_id)
visited_ids.remove(vertex_id)
for vertex_id in self._id_vertex_map:
visit(vertex_id, None)
def parents_of(self, vertex):
"""Return the parent vertices of the given vertex."""
return list(self._id_vertex_map[i] for i in
self._parents.get(self._vertex_id_map[vertex], []))
def children_of(self, vertex):
"""Return the child vertices of the given vertex."""
return list(self._id_vertex_map[i] for i in
self._children.get(self._vertex_id_map[vertex], []))
def sources(self):
"""Return all vertices that do not have parents.
Includes isolated vertices, if any.
"""
return list(self._id_vertex_map[id]
for id in self._id_vertex_map.keys()
if not len(self._parents.setdefault(id, set())))
def sinks(self):
"""Return all vertices that do not have children.
Includes isolated vertices, if any.
"""
return list(self._id_vertex_map[id]
for id in self._id_vertex_map.keys()
if not len(self._children.setdefault(id, set())))
@dispatch.tasklet
def update_vertices(self, vertices, options):
for vertex in vertices:
yield dispatch.Spawn(vertex.update(self, options))
# Wait for completion.
notification_chans = [] # Can't use generator expression here.
for vertex in vertices:
notification_chan = yield from vertex.get_notification_chan()
notification_chans.append(notification_chan)
if len(notification_chans):
completion_chan = yield dispatch.MakeChannel()
yield dispatch.Spawn(mux.demultiplex(notification_chans),
return_chan=completion_chan)
yield dispatch.Recv(completion_chan)
@dispatch.tasklet
def update_vertices_with_threadpool(self, vertices, options):
if options.get("parallel", False):
jobs = options.get("jobs")
if not jobs or jobs < 1:
jobs = multiprocessing.cpu_count()
options["jobs"] = jobs
task_chan = yield dispatch.MakeChannel()
yield dispatch.Spawn(threadpool.threadpool(task_chan, jobs))
options["threadpool"] = task_chan
yield from self.update_vertices(vertices, options)
if "threadpool" in options:
finish_chan = yield dispatch.MakeChannel()
yield dispatch.Send(task_chan, (..., None, finish_chan, None),
block=False)
yield dispatch.Recv(finish_chan)
#
# Vertex
#
class Vertex:
def __init__(self, graph, name, scope):
self.name = name
self.scope = scope
self.update_started = False # Prevent duplicate update.
self.notification_request_chan = None # Request chan for mux.
def __repr__(self):
return "<{} \"{}\">".format(type(self).__name__, self.name)
def __str__(self):
return "{} {}".format(type(self).__name__.lower(), self.name)
@property
def namespace_type(self):
if isinstance(self, Dataset):
return Dataset
if isinstance(self, Computation):
return Computation
if isinstance(self, Survey):
return Survey
@dispatch.tasklet
def update_all_elements(self, graph, options):
# This method implements element-by-element update. Subclasses can
# override this method to do a full-scope check before calling super().
dprint_update(self, "updating all elements")
completion_chans = []
for element, is_full in self.scope.iterate():
completion_chan = yield dispatch.MakeChannel()
completion_chans.append(completion_chan)
yield dispatch.Spawn(self.update_element(graph, element, is_full,
options),
return_chan=completion_chan)
# With the current implementation of dispatch.py, having a large
# number of channels with pending messages slows down the scheduler
# significantly. Until this issue is fixed (if ever), we keep down
# the number of active channels by preemptively demultiplexing the
# completion notification channels. The chunk size of 11 has been
# determined empirically, but run time is roughly constant with
# chunk sizes of 2-32.
if len(completion_chans) > 11:
chunk_complete_chan = yield dispatch.MakeChannel()
yield dispatch.Spawn(mux.demultiplex(completion_chans),
return_chan = chunk_complete_chan)
completion_chans = [chunk_complete_chan]
all_complete_chan = yield dispatch.MakeChannel()
yield dispatch.Spawn(mux.demultiplex(completion_chans),
return_chan=all_complete_chan)
yield dispatch.Recv(all_complete_chan)
@dispatch.subtasklet
def get_notification_request_chan(self):
# Return the unique channel attached to each vertex to which
# notification requests can be sent. Notification is requested by
# sending the handle to the notification channel to the channel
# returned by this subtasklet. All notification channels receive a
# signal exactly once, after the vertex has been updated or as soon as
# the request is made, whichever comes later.
if self.notification_request_chan is None:
self.notification_request_chan = yield dispatch.MakeChannel()
return self.notification_request_chan
@dispatch.subtasklet
def get_notification_chan(self):
# Return a new channel that will receive notification of completion
# of update of this vertex. The channel will recieve a signal even if
# update has already been completed by the time this subtasklet is
# called.
request_chan = (yield from self.get_notification_request_chan())
# Create and register a new notification channel.
notification_chan = yield dispatch.MakeChannel()
yield dispatch.Send(request_chan, notification_chan, False)
return notification_chan
@dispatch.tasklet
def update(self, graph, options):
# Prevent duplicate execution.
if self.update_started:
return
self.update_started = True
# Set up a channel by which completion notification channels can be
# registered.
request_chan = yield from self.get_notification_request_chan()
# Set up notification for our completion.
completion_chan = yield dispatch.MakeChannel()
yield dispatch.Spawn(mux.multiplex(completion_chan, request_chan))
yield dispatch.Spawn(self._update(graph, options),
return_chan=completion_chan)
@dispatch.tasklet
def _update(self, graph, options):
dprint_traverse("tid {}".format((yield dispatch.GetTid())),
"traversing upward:", self)
# Update prerequisites.
parents = graph.parents_of(self)
yield from graph.update_vertices(parents, options)
# Perform the update action.
dprint_traverse("tid {}".format((yield dispatch.GetTid())),
"traversing downward:", self)
if options.get("print_traversed_vertices", True):
print("starting check/update of {}".format(self))
completion_chan = yield dispatch.MakeChannel()
yield dispatch.Spawn(self.update_all_elements(graph, options),
return_chan=completion_chan)
yield dispatch.Recv(completion_chan)
if options.get("print_traversed_vertices", True):
print("finished check/update of {}".format(self))
def invalidate_up_to_date_cache(self, graph, element):
# Propagate computation status invalidation to descendants.
# This generic implementation just propagates the call; Computation
# overrides this to implement the actual invalidation.
for child in graph.children_of(self):
child.invalidate_up_to_date_cache(graph, element)
#
# Concrete Vertices
#
class Dataset(Vertex):
def __init__(self, graph, name, scope, filename_template):
super().__init__(graph, name, scope)
self.filename_template = filename_template
self.mtimes = space.Cache(self.scope, self.read_mtimes, mtime.extrema)
persistence_path = os.path.join(files.ndmake_dir(), "data", self.name)
self.mtimes.set_persistence(mtime.reader, mtime.writer,
path=persistence_path, filename="mtimes",
level=2)
def __str__(self):
return "data {}".format(self.name)
def render_filename(self, element):
dict_ = {"__name__": self.name}
return element.render_template(self.filename_template,
extra_names=dict_)
def read_mtimes(self, element):
return mtime.get(self.render_filename(element))
@dispatch.tasklet
def update_all_elements(self, graph, options):
if options.get("survey_only", False):
return
oldest_mtime, newest_mtime = self.mtimes[space.Element()]
if oldest_mtime > 0:
dprint_update(self, "all elements up to date")
if options.get("cache", False):
self.mtimes.save_to_file()
return
yield from super().update_all_elements(graph, options)
if options.get("cache", False):
self.mtimes.save_to_file()
@dispatch.tasklet
def update_element(self, graph, element, is_full, options):
dprint_update(self,
"updating {} element:".
format("full" if is_full else "partial"),
element)
if not is_full:
dprint_undemarcated("undemarcated", self, element)
return
if options.get("survey_only", False):
return
oldest_mtime, newest_mtime = self.mtimes[element]
if oldest_mtime == 0 or newest_mtime == mtime.MAX_TIME:
# There are missing files.
# Unless this is a dry run or a keep-going run, we raise an error.
# XXX For now, we raise an error unconditionally.
filename = self.render_filename(element)
parents = graph.parents_of(self)
for parent in parents:
if isinstance(parent, Computation):
raise MissingFileException("file {filename} (member of "
"dataset {dataset}; output of "
"compute {compute}) missing".
format(filename=filename,
dataset=self.name,
compute=parent.name))
raise MissingFileException("file {filename} (member of source "
"dataset {dataset}) missing".
format(filename=filename,
dataset=self.name))
# In the future, there should be a way to associate with this
# exception the command that should have produced the file (if
# not a source dataset).
return
yield
def clean(self, graph, element, cache_only=False):
if cache_only:
del self.mtimes[element]
else:
self.delete_files(element)
for parent in graph.parents_of(self):
if isinstance(parent, Computation):
parent.invalidate_up_to_date_cache(graph, element)
def delete_files(self, element):
for full_element, is_full in self.scope.iterate(element):
assert is_full
filename = self.render_filename(full_element)
dprint_unlink("deleting", filename)
if os.path.exists(filename):
os.unlink(filename)
del self.mtimes[element]
def dirname(self, element):
element = self.scope.canonicalized_element(element)
if self.scope.is_full_element(element):
return os.path.dirname(self.render_filename(element))
else:
# Given a partial element, we still want to be able to create
# directories whose names are fixed. There might be a better way
# to do this, but for now, we empirically find the common prefix
# path by setting unassigned dimensions to different values.
coords1, coords2 = {}, {}
for extent in self.scope.extents:
if extent.dimension in element.space.dimensions:
value = element[extent.dimension]
coords1[extent.dimension] = value
coords2[extent.dimension] = value
else:
coords1[extent.dimension] = extent.value_type(123456)
coords2[extent.dimension] = extent.value_type(654321)
full_element1 = space.Element(self.scope, coords1)
full_element2 = space.Element(self.scope, coords2)
dirname1 = os.path.dirname(self.render_filename(full_element1))
dirname2 = os.path.dirname(self.render_filename(full_element2))
while dirname1 != dirname2:
dirname1 = os.path.dirname(dirname1)
dirname2 = os.path.dirname(dirname2)
if not dirname1 or not dirname2:
return None
return dirname1
def create_dirs(self, element):
dirname = self.dirname(element)
if dirname:
os.makedirs(dirname, exist_ok=True)
def name_proxy(self, element):
return DatasetNameProxy(self, element)
class Computation(Vertex):
def __init__(self, graph, name, scope, command_template, occupancy=1):
super().__init__(graph, name, scope)
self.command_template = command_template
self.occupancy = occupancy
is_up_to_date = functools.partial(self.is_up_to_date, graph)
self.statuses = space.Cache(self.scope, is_up_to_date, all)
persistence_path = os.path.join(files.ndmake_dir(), "compute",
self.name)
self.statuses.set_persistence(lambda s: bool(int(s)),
lambda b: "1" if b else "0",
path=persistence_path, filename="status",
level=2)
def __str__(self):
return "compute {}".format(self.name)
def render_command(self, graph, element):
# Bind input and output dataset names.
io_vertices = (graph.parents_of(self) + graph.children_of(self))
dataset_name_proxies = dict((v.name, v.name_proxy(element))
for v in io_vertices
if hasattr(v, "name_proxy"))
# Note: filename-surveyed output datasets' names are not available in
# the command template.
dict_ = dataset_name_proxies
dict_["__name__"] = self.name
return element.render_template(self.command_template,
extra_names=dict_)
def is_up_to_date(self, graph, element):
oldest_child_mtime, _ = mtime.extrema(child.mtimes[element]
for child
in graph.children_of(self))
if oldest_child_mtime > 0:
_, newest_input_mtime = mtime.extrema(parent.mtimes[element]
for parent
in graph.parents_of(self)
if isinstance(parent,
Dataset))
if newest_input_mtime <= oldest_child_mtime:
return True
return False
@dispatch.tasklet
def update_all_elements(self, graph, options):
if options.get("survey_only", False):
return
status = self.statuses[space.Element()]
if status:
dprint_update(self, "all elements up to date")
if options.get("cache", False):
self.statuses.save_to_file()
return
yield from super().update_all_elements(graph, options)
if options.get("cache", False):
self.statuses.save_to_file()
@dispatch.tasklet
def update_element(self, graph, element, is_full, options):
dprint_update(self,
"updating {} element:".
format("full" if is_full else "partial"),
element)
if not is_full:
dprint_undemarcated("undemarcated", self, element)
return
if options.get("survey_only", False):
return
if self.statuses[element]:
return
yield from self.execute(graph, element, options)
def clean(self, graph, element, cache_only=False):
self.invalidate_up_to_date_cache(graph, element)
if not cache_only:
for child in graph.children_of(self):
child.delete_files(element)
@dispatch.subtasklet
def execute(self, graph, element, options):
del self.statuses[element]
self.invalidate_up_to_date_cache(graph, element)
for child in graph.children_of(self):
child.delete_files(element)
child.create_dirs(element)
yield from self.run_command(graph, element, options)
@dispatch.subtasklet
def run_command(self, graph, element, options):
command = self.render_command(graph, element)
print_command = options.get("print_executed_commands", False)
def task_func():
# Avoid print(), which apparently flushes the output between the
# string and the newline.
if print_command:
sys.stdout.write(command + "\n")
try:
with subprocess.Popen(command, shell=True) as proc:
proc.wait()
return proc.returncode
except OSError as e:
if e.errno == errno.E2BIG: # Argument list too long.
# Fall back to piping to shell, which will help if the long
# command is e.g. a here document.
with subprocess.Popen("/bin/sh",
stdin=subprocess.PIPE) as shellproc:
shellproc.communicate(command.encode())
return shellproc.returncode
raise
outputs_are_valid = False
try:
if "threadpool" in options:
completion_chan = yield dispatch.MakeChannel()
yield dispatch.Send(options["threadpool"],
(task_func, None, completion_chan,
self.occupancy))
# Wait for task_func() to finish.
_, retval, exc_info = yield dispatch.Recv(completion_chan)
if exc_info:
raise exc_info
else:
# Run serially.
retval = task_func()
if retval:
# For now, we raise unconditionally. XXX If requested, outputs
# should be considered valid even if retval != 0.
raise CalledProcessError("command returned exit status of "
"{:d}: {}".format(retval, command))
else:
outputs_are_valid = True
finally:
# If the command failed, we need to delete any output files, which
# may be corrupt.
if not outputs_are_valid:
for child in graph.children_of(self):
child.delete_files(element)
def invalidate_up_to_date_cache(self, graph, element):
del self.statuses[element]
super().invalidate_up_to_date_cache(graph, element)
class Survey(Vertex):
def __init__(self, graph, name, scope, surveyer):
super().__init__(graph, name, scope)
self.surveyer = surveyer
self.results = {}
self.mtimes = space.Cache(self.scope, self.surveyer.read_mtimes,
mtime.extrema)
persistence_path = os.path.join(files.ndmake_dir(), "survey",
self.name)
self.mtimes.set_persistence(mtime.reader, mtime.writer,
path=persistence_path, filename="mtimes",
level=2)
def __str__(self):
return "survey for {}".format(self.name)
def is_result_available(self, element):
# Iff we've been updated, the results are stored in self.results.
element = self.scope.canonicalized_element(element)
return element in self.results
def result(self, element):
element = self.scope.canonicalized_element(element)
return self.results[element]
@dispatch.tasklet
def update_all_elements(self, graph, options):
yield from super().update_all_elements(graph, options)
if options.get("cache", False):
self.mtimes.save_to_file()
@dispatch.tasklet
def update_element(self, graph, element, is_full, options):
dprint_update(self,
"updating {} element:".
format("full" if is_full else "partial"),
element)
if not is_full:
dprint_undemarcated("undemarcated", self, element)
return
for parent in graph.parents_of(self):
if isinstance(parent, Dataset):
# Command survey with input(s).
break
if isinstance(parent, Computation):
# Filename survey with producer.
break
else:
# We have a command survey with no inputs or a filename survey on a
# non-computed dataset: always run survey.
yield from self.execute(graph, element, options)
return
our_mtime, _ = self.mtimes[element]
if our_mtime > 0:
if self.surveyer.mtimes_include_files:
self.load_result(element)
return
_, newest_input_mtime = mtime.extrema(parent.mtimes[element]
for parent
in graph.parents_of(self)
if isinstance(parent,
Dataset))
if newest_input_mtime <= our_mtime:
self.load_result(element)
return
yield from self.execute(graph, element, options)
@dispatch.subtasklet
def execute(self, graph, element, options):
self.surveyer.delete_files(element, delete_surveyed_files=False)
del self.mtimes[element]
self.invalidate_up_to_date_cache(graph, element)
# Bind input dataset names.
dataset_name_proxies = dict((parent.name, parent.name_proxy(element))
for parent in graph.parents_of(self)
if isinstance(parent, Dataset))
dict_ = dataset_name_proxies
dict_["__name__"] = self.name
self.results[element] = self.surveyer.run_survey(self, element,
dict_, options)
return
yield
def load_result(self, element):
self.results[element] = self.surveyer.load_result(element)
def delete_files(self, graph, element):
self.surveyer.delete_files(element, delete_surveyed_files=True)
del self.mtimes[element]
def create_dirs(self, element):
self.surveyer.create_dirs(element)
#
# Templating support
#
class DatasetNameProxy:
# An object to be bound to a dataset name when rendering a command
# template.
def __init__(self, dataset, default_element):
self.__dataset = dataset
default_element = dataset.scope.canonicalized_element(default_element)
self.__default_element = default_element
def __repr__(self):
# Use a summarized version of __quoted_filenames().
filenames = self.__filename_list(self.__default_element)
if len(filenames) > 3:
summary = " ".join((shlex.quote(filenames[0]),
"...",
shlex.quote(filenames[-1])))
else:
summary = " ".join(shlex.quote(name) for name in filenames)
return "<DatasetNameProxy default={}>".format(repr(summary))
def __filename_list(self, element):
if self.__dataset.scope.is_full_element(element):
return [self.__dataset.render_filename(element)]
# We have a partial element.
return list(self.__dataset.render_filename(full_element)
for full_element, is_full
in self.__dataset.scope.iterate(element)
if is_full)
def __quoted_filenames(self, element):
return " ".join(shlex.quote(name)
for name in self.__filename_list(element))
def __str__(self):
return self.__quoted_filenames(self.__default_element)
def __hash__(self):
return hash(str(self))
def __eq__(self, other):
return str(self) == other
def __call__(self, **kwargs):
# Override and/or extend the default element with the kwargs.
assigned_extents = []
coords = {}
for extent in self.__dataset.scope.extents:
if extent.dimension in self.__default_element.space.dimensions:
assigned_extents.append(extent)
coords[extent.dimension] = \
self.__default_element[extent.dimension]
if extent.dimension.name in kwargs:
if extent not in assigned_extents:
assigned_extents.append(extent)
coords[extent.dimension] = kwargs[extent.dimension.name]
new_element = space.Element(space.Space(assigned_extents), coords)
return self.__quoted_filenames(new_element)
|
marktsuchida/NDMake
|
ndmake/depgraph.py
|
Python
|
mit
| 32,848
|
[
"VisIt"
] |
57c2acf3adfe62ce49eeffe3505a7f179f3af6251b1af56312ad76021d00f043
|
from jinja2.visitor import NodeVisitor
from jinja2._compat import iteritems
VAR_LOAD_PARAMETER = 'param'
VAR_LOAD_RESOLVE = 'resolve'
VAR_LOAD_ALIAS = 'alias'
VAR_LOAD_UNDEFINED = 'undefined'
def find_symbols(nodes, parent_symbols=None):
sym = Symbols(parent=parent_symbols)
visitor = FrameSymbolVisitor(sym)
for node in nodes:
visitor.visit(node)
return sym
def symbols_for_node(node, parent_symbols=None):
sym = Symbols(parent=parent_symbols)
sym.analyze_node(node)
return sym
class Symbols(object):
def __init__(self, parent=None, level=None):
if level is None:
if parent is None:
level = 0
else:
level = parent.level + 1
self.level = level
self.parent = parent
self.refs = {}
self.loads = {}
self.stores = set()
def analyze_node(self, node, **kwargs):
visitor = RootVisitor(self)
visitor.visit(node, **kwargs)
def _define_ref(self, name, load=None):
ident = 'l_%d_%s' % (self.level, name)
self.refs[name] = ident
if load is not None:
self.loads[ident] = load
return ident
def find_load(self, target):
if target in self.loads:
return self.loads[target]
if self.parent is not None:
return self.parent.find_load(target)
def find_ref(self, name):
if name in self.refs:
return self.refs[name]
if self.parent is not None:
return self.parent.find_ref(name)
def ref(self, name):
rv = self.find_ref(name)
if rv is None:
raise AssertionError('Tried to resolve a name to a reference that '
'was unknown to the frame (%r)' % name)
return rv
def copy(self):
rv = object.__new__(self.__class__)
rv.__dict__.update(self.__dict__)
rv.refs = self.refs.copy()
rv.loads = self.loads.copy()
rv.stores = self.stores.copy()
return rv
def store(self, name):
self.stores.add(name)
# If we have not see the name referenced yet, we need to figure
# out what to set it to.
if name not in self.refs:
# If there is a parent scope we check if the name has a
# reference there. If it does it means we might have to alias
# to a variable there.
if self.parent is not None:
outer_ref = self.parent.find_ref(name)
if outer_ref is not None:
self._define_ref(name, load=(VAR_LOAD_ALIAS, outer_ref))
return
# Otherwise we can just set it to undefined.
self._define_ref(name, load=(VAR_LOAD_UNDEFINED, None))
def declare_parameter(self, name):
self.stores.add(name)
return self._define_ref(name, load=(VAR_LOAD_PARAMETER, None))
def load(self, name):
target = self.find_ref(name)
if target is None:
self._define_ref(name, load=(VAR_LOAD_RESOLVE, name))
def branch_update(self, branch_symbols):
stores = {}
for branch in branch_symbols:
for target in branch.stores:
if target in self.stores:
continue
stores[target] = stores.get(target, 0) + 1
for sym in branch_symbols:
self.refs.update(sym.refs)
self.loads.update(sym.loads)
self.stores.update(sym.stores)
for name, branch_count in iteritems(stores):
if branch_count == len(branch_symbols):
continue
target = self.find_ref(name)
assert target is not None, 'should not happen'
if self.parent is not None:
outer_target = self.parent.find_ref(name)
if outer_target is not None:
self.loads[target] = (VAR_LOAD_ALIAS, outer_target)
continue
self.loads[target] = (VAR_LOAD_RESOLVE, name)
def dump_stores(self):
rv = {}
node = self
while node is not None:
for name in node.stores:
if name not in rv:
rv[name] = self.find_ref(name)
node = node.parent
return rv
def dump_param_targets(self):
rv = set()
node = self
while node is not None:
for target, (instr, _) in iteritems(self.loads):
if instr == VAR_LOAD_PARAMETER:
rv.add(target)
node = node.parent
return rv
class RootVisitor(NodeVisitor):
def __init__(self, symbols):
self.sym_visitor = FrameSymbolVisitor(symbols)
def _simple_visit(self, node, **kwargs):
for child in node.iter_child_nodes():
self.sym_visitor.visit(child)
visit_Template = visit_Block = visit_Macro = visit_FilterBlock = \
visit_Scope = visit_If = visit_ScopedEvalContextModifier = \
_simple_visit
def visit_AssignBlock(self, node, **kwargs):
for child in node.body:
self.sym_visitor.visit(child)
def visit_CallBlock(self, node, **kwargs):
for child in node.iter_child_nodes(exclude=('call',)):
self.sym_visitor.visit(child)
def visit_OverlayScope(self, node, **kwargs):
for child in node.body:
self.sym_visitor.visit(child)
def visit_For(self, node, for_branch='body', **kwargs):
if for_branch == 'body':
self.sym_visitor.visit(node.target, store_as_param=True)
branch = node.body
elif for_branch == 'else':
branch = node.else_
elif for_branch == 'test':
self.sym_visitor.visit(node.target, store_as_param=True)
if node.test is not None:
self.sym_visitor.visit(node.test)
return
else:
raise RuntimeError('Unknown for branch')
for item in branch or ():
self.sym_visitor.visit(item)
def visit_With(self, node, **kwargs):
for target in node.targets:
self.sym_visitor.visit(target)
for child in node.body:
self.sym_visitor.visit(child)
def generic_visit(self, node, *args, **kwargs):
raise NotImplementedError('Cannot find symbols for %r' %
node.__class__.__name__)
class FrameSymbolVisitor(NodeVisitor):
"""A visitor for `Frame.inspect`."""
def __init__(self, symbols):
self.symbols = symbols
def visit_Name(self, node, store_as_param=False, **kwargs):
"""All assignments to names go through this function."""
if store_as_param or node.ctx == 'param':
self.symbols.declare_parameter(node.name)
elif node.ctx == 'store':
self.symbols.store(node.name)
elif node.ctx == 'load':
self.symbols.load(node.name)
def visit_NSRef(self, node, **kwargs):
self.symbols.load(node.name)
def visit_If(self, node, **kwargs):
self.visit(node.test, **kwargs)
original_symbols = self.symbols
def inner_visit(nodes):
self.symbols = rv = original_symbols.copy()
for subnode in nodes:
self.visit(subnode, **kwargs)
self.symbols = original_symbols
return rv
body_symbols = inner_visit(node.body)
elif_symbols = inner_visit(node.elif_)
else_symbols = inner_visit(node.else_ or ())
self.symbols.branch_update([body_symbols, elif_symbols, else_symbols])
def visit_Macro(self, node, **kwargs):
self.symbols.store(node.name)
def visit_Import(self, node, **kwargs):
self.generic_visit(node, **kwargs)
self.symbols.store(node.target)
def visit_FromImport(self, node, **kwargs):
self.generic_visit(node, **kwargs)
for name in node.names:
if isinstance(name, tuple):
self.symbols.store(name[1])
else:
self.symbols.store(name)
def visit_Assign(self, node, **kwargs):
"""Visit assignments in the correct order."""
self.visit(node.node, **kwargs)
self.visit(node.target, **kwargs)
def visit_For(self, node, **kwargs):
"""Visiting stops at for blocks. However the block sequence
is visited as part of the outer scope.
"""
self.visit(node.iter, **kwargs)
def visit_CallBlock(self, node, **kwargs):
self.visit(node.call, **kwargs)
def visit_FilterBlock(self, node, **kwargs):
self.visit(node.filter, **kwargs)
def visit_With(self, node, **kwargs):
for target in node.values:
self.visit(target)
def visit_AssignBlock(self, node, **kwargs):
"""Stop visiting at block assigns."""
self.visit(node.target, **kwargs)
def visit_Scope(self, node, **kwargs):
"""Stop visiting at scopes."""
def visit_Block(self, node, **kwargs):
"""Stop visiting at blocks."""
def visit_OverlayScope(self, node, **kwargs):
"""Do not visit into overlay scopes."""
|
astaninger/speakout
|
venv/lib/python3.6/site-packages/jinja2/idtracking.py
|
Python
|
mit
| 9,197
|
[
"VisIt"
] |
d866c34b322f180ac40462e4a2f2e4a847e6631996b047fc737419c0ce2e36cc
|
# CoordinateMapper -- map between genomic, cds, and protein coordinates
# AUTHOR: Reece Hart <reecehart@gmail.com>
# Modifications: Lenna X. Peterson <arklenna@gmail.com>
# LICENSE: BioPython
# Examples:
# AB026906.1:g.7872G>T
# AB026906.1:c.274G>T
# BA...:p.Asp92Tyr
#
# All refer to congruent variants. A coordinate mapper is needed to
# translate between at least these three coordinate frames. The mapper
# should deal with specialized syntax for splicing and UTR (e.g., 88+1,
# 89-2, -14, *46). In addition, care should be taken to ensure consistent 0-
# or 1-based numbering (0 internally, as with Python/BioPython and
# Perl/BioPerl).
#
# g -----------00000000000-----1111111----------22222222222*222-----
# s0 e0 s1 e1 s2 e2
# \ \ | | / /
# +--+ +--+ | | +-------+ +-------+
# \ \| |/ /
# c 00000000000111111122222222222*222
# c0 c1 c2
# aaabbbcccdddeeefffggghhhiiijj*kkk
# p A B C D E F G H I J K
# p0 p1 p2 ... pn
#
#
# TODO:
# * g2c returns index + extended syntax
# * c2g accepts index + extended syntax
from functools import wraps
from math import floor
import warnings
import MapPositions # required for pos_factory decorator
from MapPositions import CDSPositionError, ProteinPositionError
from MapPositions import GenomePosition, CDSPosition, ProteinPosition
from Bio import BiopythonParserWarning
# FIXME change to absolute import once CompoundLocation is merged to master
from Bio.SeqFeature import FeatureLocation
class CoordinateMapper(object):
"""Convert positions between genomic, CDS, and protein coordinates."""
def __init__(self, selist):
"""Set exons to be used for mapping.
Parameters
----------
selist : SeqRecord, SeqFeature, list
Object containing exon information
"""
self._exons = self._get_exons(selist)
@staticmethod
def _get_exons(seq):
"""Extract exons from SeqRecord, SeqFeature, or list of pairs.
Parameters
----------
seq : SeqRecord, SeqFeature, list
Object containing exon information.
Returns
-------
SeqFeature.FeatureLocation
"""
# Try as SeqRecord
if hasattr(seq, 'features'):
# generator
cdsf = (f for f in seq.features if f.type == 'CDS').next()
return cdsf.location
# Try as SeqFeature
elif hasattr(seq, 'location'):
if seq.type != 'CDS':
# FIXME should this be a fatal error?
warnings.warn("Provided SeqFeature should be CDS",
BiopythonParserWarning)
return seq.location
# Try as list of pairs
return sum([FeatureLocation(s, e) for s, e in seq])
@property # read-only
def exons(self):
return self._exons
@property # read-only
def exon_list(self):
return list(self.exons)
def pos_factory(pos_type):
"""
Convert string or int pos to appropriate Position object
Parameters
----------
pos_type : str
Position type (Genome, CDS, Protein)
"""
def wrapper(fn):
@wraps(fn)
def make_pos(self, pos, dialect=None):
# retrieve Position object
_obj = getattr(MapPositions, pos_type + "Position")
# if pos is not already Position object, make it one
if not isinstance(pos, _obj):
# no dialect: use default constructor
if dialect is None:
pos = _obj(pos)
# use dialect alternate constructor
else:
pos = _obj.from_dialect(dialect, pos)
# call function with new pos
return fn(self, pos, dialect)
return make_pos
return wrapper
@pos_factory("Genome")
def g2c(self, gpos, dialect=None):
"""Convert integer from genomic to CDS coordinates
Parameters
----------
gpos : int
Genomic position
dialect : str
Coordinate dialect (GenBank or HGVS, default None)
Returns
-------
CDSPosition
"""
gpos = int(gpos)
fmts = CDSPosition.fmt_dict
def _simple_g2c(g):
return CDSPosition(self.exon_list.index(g))
# within exon
if gpos in self.exons:
return _simple_g2c(gpos)
# before CDS
if gpos < self.exons.start:
return CDSPosition(fmts['post-CDS'] % (gpos - self.exons.start))
# after CDS
if gpos >= self.exons.end:
return CDSPosition(fmts['pre-CDS'] % (gpos - self.exons.end + 1))
# intron
# set start of first intron
prev_end = self.exons.parts[0].end
for part in self.exons.parts[1:]:
# not in this intron
if gpos > part.start:
prev_end = part.end
continue
len_intron = part.start - prev_end
# second half (exclusive) of intron
# offset > middle of intron
if gpos - prev_end > floor(len_intron / 2.0):
anchor = _simple_g2c(part.start)
offset = gpos - part.start
assert offset < 0
# first half (inclusive) of intron
else:
anchor = _simple_g2c(prev_end - 1)
offset = gpos - prev_end + 1
assert offset > 0
assert self.check_intron(anchor, offset)
return CDSPosition(fmts['intron'] % (anchor, offset))
assert False # function should return for every integer
# TODO verify that values of offset are sane
def check_intron(self, anchor, offset):
"""
Verify that CDS-relative intron position is valid with given exons.
Parameters
----------
anchor : int
Intron anchor (closest CDS position)
offset : int
Intron offset (distance to anchor)
Returns
-------
bool
"""
for exon in self.exons.parts:
start = int(self.g2c(exon.start))
if anchor == start:
if offset > 0:
raise CDSPositionError(
"Invalid intron: offset from exon start must be negative.")
return True
end = int(self.g2c(exon.end - 1))
if anchor == end:
if offset < 0:
raise CDSPositionError(
"Invalid intron: offset from exon end must be positive.")
return True
raise CDSPositionError(
"Invalid intron: anchor must be start or end of an exon.")
def get_strand(self, gpos):
for exon in self.exons.parts:
if gpos in exon:
return exon.strand
raise ValueError("Provided gpos must be exon")
@pos_factory("CDS")
def c2g(self, cpos, dialect=None):
"""Convert from CDS to genomic coordinates
Parameters
----------
cpos : int
CDS position
dialect : str
Coordinate dialect (GenBank or HGVS, default None)
Returns
-------
GenomePosition
"""
if cpos.pos_type == "pre-CDS":
return GenomePosition(self.exons.start + cpos.offset)
elif cpos.pos_type == "post-CDS":
return GenomePosition(self.exons.end - 1 + cpos.offset)
g_anchor = self.exon_list[cpos.anchor]
if cpos.pos_type == "exon":
strand = self.get_strand(g_anchor)
return GenomePosition(g_anchor, strand=strand)
elif cpos.pos_type == "intron":
offset = cpos.offset
if self.check_intron(cpos.anchor, offset):
return GenomePosition(g_anchor + offset)
assert False # all positions should be one of the 4 types
@pos_factory("CDS")
def c2p(self, cpos, dialect=None):
"""Convert from CDS to protein coordinates
Parameters
----------
cpos : int
CDS position
dialect : str
Coordinate dialect (GenBank or HGVS, default None)
Returns
-------
ProteinPosition
"""
try:
cpos = int(cpos)
except TypeError:
raise CDSPositionError("'%s' does not correspond to a protein"
% repr(cpos))
return ProteinPosition(int(cpos / 3.0))
@pos_factory("Genome")
def g2p(self, gpos, dialect=None):
"""Convert integer from genomic to protein coordinates
Parameters
----------
gpos : int
Genomic position
dialect : str
Coordinate dialect (GenBank or HGVS, default None)
Returns
-------
ProteinPosition
"""
return self.c2p(self.g2c(gpos))
@pos_factory("Protein")
def p2c(self, ppos, dialect=None):
"""Convert integer from protein coordinate to CDS closed range
Parameters
----------
ppos : int
Protein position
dialect : str
Coordinate dialect (GenBank or HGVS, default None)
Returns
-------
CDSPosition
"""
try:
ppos = int(ppos)
except TypeError:
return None
if ppos < 0:
raise ProteinPositionError("'%s' should not be negative")
# FIXME is CDS guaranteed to have len % 3 == 0?
first_base = (ppos) * 3
last_base = first_base + 2
if last_base > len(self.exons):
raise ProteinPositionError("'%s' is too large")
return (CDSPosition(first_base), CDSPosition(last_base))
@pos_factory("Protein")
def p2g(self, ppos, dialect=None):
"""Convert integer from protein to genomic coordinates
Parameters
----------
ppos : int
Protein position
dialect : str
Coordinate dialect (GenBank or HGVS, default None)
Returns
-------
GenomePosition
"""
return tuple(self.c2g(x) for x in self.p2c(ppos))
if __name__ == '__main__':
# The following exons are from AB026906.1.
# test case: g.7872 -> c.274 -> p.92
# N.B. These are python counting coordinates (0-based)
exons = [(5808, 5860), (6757, 6874), (7767, 7912), (13709, 13785)]
def test_list(g_range):
cm = CoordinateMapper(exons)
for g1 in g_range:
print g1,
c1 = cm.g2c(g1)
print c1,
p1 = cm.c2p(c1)
print p1,
if p1:
c2 = cm.p2c(p1)[0]
else:
c2 = c1
print ' | ', c2,
g2 = cm.c2g(c2)
print g2
print cm.g2p(7872)
print cm.p2g(92)
def test_simple():
from SeqFeature import SeqFeature
location = sum([FeatureLocation(2, 4, +1),
FeatureLocation(8, 11, +1),
FeatureLocation(16, 18, +1)])
simple_exons = SeqFeature(location, type="CDS")
cm = CoordinateMapper(simple_exons)
print cm.exons
print list(cm.exons)
print range(len(cm.exons))
for i in range(len(cm.exons)):
print "%3s" % cm.c2g(i),
print
for i in xrange(20):
print "%3d" % i,
print
for i in xrange(20):
print "%3s" % cm.g2c(i),
print
r1 = (7870, 7871, 7872, 7873, 7874)
r2 = (5807, 5808, 5809,
6871, 6872, 6873, 6874, 6875,
7766, 7767, 7768, 7769,
13784, 13785, 13786)
test_list(r1)
#test_list(r2)
print
test_simple()
|
HaseloffLab/PartsDB
|
partsdb/tools/CoordinateMapper/CoordinateMapper.py
|
Python
|
mit
| 12,170
|
[
"BioPerl",
"Biopython"
] |
5760f34060c01f1c1b20e4d15b68a5cfc58ab7a166eed1465c2adcb3ddc79451
|
# Copyright 2014-2016 The ODL development group
#
# This file is part of ODL.
#
# ODL is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ODL is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ODL. If not, see <http://www.gnu.org/licenses/>.
"""Tests for linearized deformation operators."""
# Imports for common Python 2/3 codebase
from __future__ import print_function, division, absolute_import
from future import standard_library
standard_library.install_aliases()
import numpy as np
import pytest
import odl
from odl.deform import LinDeformFixedTempl, LinDeformFixedDisp
from odl.util.testutils import almost_equal, simple_fixture
# Set up fixtures
dtype = simple_fixture('dtype', ['float', 'complex'])
interp = simple_fixture('interp', ['linear', 'nearest'])
ndim = simple_fixture('ndim', [1, 2, 3])
@pytest.fixture
def space(request, ndim, interp, dtype, fn_impl):
"""Example space.
Generates example spaces with various implementations, dimensions, dtypes
and interpolations.
"""
if np.dtype(dtype) not in odl.FN_IMPLS[fn_impl].available_dtypes():
pytest.skip('dtype not available for this backend')
return odl.uniform_discr([-1] * ndim, [1] * ndim, [20] * ndim,
interp=interp, impl=fn_impl, dtype=dtype)
# Set up constants and helper functions
SIGMA = 0.3 # width of the gaussian
EPS = 0.25 # scale of the displacement field
def error_bound(interp):
"""Error bound varies with interpolation (larger for "worse")."""
if interp == 'linear':
return 0.1
elif interp == 'nearest':
return 0.2
def prod(x):
"""Product of a sequence."""
prod = 1
for xi in x:
prod = prod * xi
return prod
def template_function(x):
"""Gaussian function with std SIGMA."""
return np.exp(-sum(xi ** 2 for xi in x) / SIGMA ** 2)
def template_grad_factory(n):
"""Gradient of the gaussian."""
def template_grad_i(i):
# Indirection for lambda capture
return lambda x: -2 * x[i] / SIGMA ** 2 * template_function(x)
return [template_grad_i(i) for i in range(n)]
def disp_field_factory(n):
"""Displacement field.
In 1d: (x)
In 2d: (xy, y)
In 3d: (xyz, y, z)
etc...
"""
def coordinate_projection_i(i):
# Indirection for lambda capture
return lambda x: EPS * x[i]
lst = [lambda x: EPS * prod(x)]
lst.extend(coordinate_projection_i(i) for i in range(1, n))
return lst
def exp_div_inv_disp(x):
"""Exponential of the divergence of the displacement field.
In 1d: exp(- EPS)
In 2d: exp(- EPS * (y + 1))
In 2d: exp(- EPS * (yz + 2))
"""
return np.exp(- EPS * (prod(x[1:]) + (len(x) - 1)))
def displaced_points(x):
"""Displaced coordinate points."""
disp = [dsp(x) for dsp in disp_field_factory(len(x))]
return [xi + di for xi, di in zip(x, disp)]
def deformed_template(x):
"""Deformed template."""
return template_function(displaced_points(x))
def vector_field_factory(n):
"""Vector field for the gradient.
In 1d: (x)
In 2d: (x, y)
In 3d: (x, y, z)
etc...
"""
def vector_field_i(i):
return lambda x: x[i]
return [vector_field_i(i) for i in range(n)]
def template_deformed_grad_factory(n):
"""Deformed gradient."""
templ_grad = template_grad_factory(n)
def template_deformed_gradi(i):
# Indirection for lambda capture
return lambda x: templ_grad[i](displaced_points(x))
return [template_deformed_gradi(i) for i in range(n)]
def fixed_templ_deriv(x):
"""Derivative taken in disp_field and evaluated in vector_field."""
dg = [tdgf(x) for tdgf in template_deformed_grad_factory(len(x))]
v = [vff(x) for vff in vector_field_factory(len(x))]
return sum(dgi * vi for dgi, vi in zip(dg, v))
def inv_deformed_template(x):
"""Analytic inverse deformation of the template function."""
disp = [dsp(x) for dsp in disp_field_factory(len(x))]
disp_x = [xi - di for xi, di in zip(x, disp)]
return template_function(disp_x)
# --- LinDeformFixedTempl --- #
def test_fixed_templ_init():
"""Verify that the init method and checks work properly."""
space = odl.uniform_discr(0, 1, 5)
template = space.element(template_function)
# Valid input
print(LinDeformFixedTempl(template))
# Invalid input
with pytest.raises(TypeError):
# template not a DiscreteLpElement
LinDeformFixedTempl(template_function)
def test_fixed_templ_call(space):
"""Test deformation for LinDeformFixedTempl."""
# Define the analytic template as the hat function and its gradient
template = space.element(template_function)
deform_op = LinDeformFixedTempl(template)
# Calculate result and exact result
true_deformed_templ = space.element(deformed_template)
deformed_templ = deform_op(disp_field_factory(space.ndim))
# Verify that the result is within error limits
error = (true_deformed_templ - deformed_templ).norm()
rlt_err = error / deformed_templ.norm()
assert rlt_err < error_bound(space.interp)
def test_fixed_templ_deriv(space):
if not space.is_rn:
pytest.skip('derivative not implemented for complex dtypes')
# Set up template and displacement field
template = space.element(template_function)
disp_field = disp_field_factory(space.ndim)
vector_field = vector_field_factory(space.ndim)
fixed_templ_op = LinDeformFixedTempl(template)
# Calculate result
fixed_templ_op_deriv = fixed_templ_op.derivative(disp_field)
fixed_templ_deriv_comp = fixed_templ_op_deriv(vector_field)
# Calculate the analytic result
fixed_templ_deriv_exact = space.element(fixed_templ_deriv)
# Verify that the result is within error limits
error = (fixed_templ_deriv_exact - fixed_templ_deriv_comp).norm()
rlt_err = error / fixed_templ_deriv_comp.norm()
assert rlt_err < error_bound(space.interp)
# --- LinDeformFixedDisp --- #
def test_fixed_disp_init():
"""Verify that the init method and checks work properly."""
space = odl.uniform_discr(0, 1, 5)
disp_field = space.tangent_bundle.element(
disp_field_factory(space.ndim))
# Valid input
print(LinDeformFixedDisp(disp_field))
print(LinDeformFixedDisp(disp_field, templ_space=space))
# Non-valid input
with pytest.raises(TypeError): # displacement not ProductSpaceElement
LinDeformFixedDisp(space.one())
with pytest.raises(TypeError): # templ_space not DiscreteLp
LinDeformFixedDisp(disp_field, space.tangent_bundle)
with pytest.raises(TypeError): # templ_space not a power space
bad_pspace = odl.ProductSpace(space, odl.rn(3))
LinDeformFixedDisp(disp_field, bad_pspace)
with pytest.raises(TypeError): # templ_space not based on DiscreteLp
bad_pspace = odl.ProductSpace(odl.rn(2), 1)
LinDeformFixedDisp(disp_field, bad_pspace)
with pytest.raises(TypeError): # wrong dtype on templ_space
wrong_dtype = odl.ProductSpace(space.astype(complex), 1)
LinDeformFixedDisp(disp_field, wrong_dtype)
with pytest.raises(ValueError): # vector field spaces don't match
bad_space = odl.uniform_discr(0, 1, 10)
LinDeformFixedDisp(disp_field, bad_space)
def test_fixed_disp_call(space):
"""Verify that LinDeformFixedDisp produces the correct deformation."""
template = space.element(template_function)
disp_field = space.real_space.tangent_bundle.element(
disp_field_factory(space.ndim))
# Calculate result and exact result
deform_op = LinDeformFixedDisp(disp_field, templ_space=space)
deformed_templ = deform_op(template)
true_deformed_templ = space.element(deformed_template)
# Verify that the result is within error limits
error = (true_deformed_templ - deformed_templ).norm()
rlt_err = error / deformed_templ.norm()
assert rlt_err < error_bound(space.interp)
def test_fixed_disp_inv(space):
"""Verify that the inverse of LinDeformFixedDisp is correct."""
# Set up template and displacement field
template = space.element(template_function)
disp_field = space.real_space.tangent_bundle.element(
disp_field_factory(space.ndim))
# Verify that the inverse is in fact a (left and right) inverse
deform_op = LinDeformFixedDisp(disp_field, templ_space=space)
result_op_inv = deform_op(deform_op.inverse(template))
error = (result_op_inv - template).norm()
rel_err = error / template.norm()
assert rel_err < 2 * error_bound(space.interp) # need a bit more tolerance
result_inv_op = deform_op.inverse(deform_op(template))
error = (result_inv_op - template).norm()
rel_err = error / template.norm()
assert rel_err < 2 * error_bound(space.interp) # need a bit more tolerance
def test_fixed_disp_adj(space):
"""Verify that the adjoint of LinDeformFixedDisp is correct."""
# Set up template and displacement field
template = space.element(template_function)
disp_field = space.real_space.tangent_bundle.element(
disp_field_factory(space.ndim))
# Calculate result
deform_op = LinDeformFixedDisp(disp_field, templ_space=space)
deformed_templ_adj = deform_op.adjoint(template)
# Calculate the analytic result
true_deformed_templ_adj = space.element(inv_deformed_template)
exp_div = space.element(exp_div_inv_disp)
true_deformed_templ_adj *= exp_div
# Verify that the result is within error limits
error = (deformed_templ_adj - true_deformed_templ_adj).norm()
rel_err = error / true_deformed_templ_adj.norm()
assert rel_err < error_bound(space.interp)
# Verify the adjoint definition <Ax, x> = <x, A^* x>
deformed_templ = deform_op(template)
inner1 = deformed_templ.inner(template)
inner2 = template.inner(deformed_templ_adj)
assert almost_equal(inner1, inner2, places=1)
if __name__ == '__main__':
pytest.main([str(__file__.replace('\\', '/')), '-v'])
|
bgris/ODL_bgris
|
odl/test/deform/linearized_deform_test.py
|
Python
|
gpl-3.0
| 10,465
|
[
"Gaussian"
] |
9e4d6df3dfc9ea826f9a69d195cc641544c6a8fd3b255bb908c6a59cad2afd6a
|
#!/usr/bin/python
# -*- coding: UTF-8 -*-
#
# Copyright 2010 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests to cover BulkMutateJobService."""
__author__ = 'api.sgrinberg@gmail.com (Stan Grinberg)'
import os
import sys
sys.path.insert(0, os.path.join('..', '..', '..'))
import unittest
from datetime import date
from adspygoogle.common import Utils
from tests.adspygoogle.adwords import HTTP_PROXY
from tests.adspygoogle.adwords import SERVER_V201109
from tests.adspygoogle.adwords import TEST_VERSION_V201109
from tests.adspygoogle.adwords import VERSION_V201109
from tests.adspygoogle.adwords import client
class BulkMutateJobServiceTestV201109(unittest.TestCase):
"""Unittest suite for BulkMutateJobService using v201109."""
SERVER = SERVER_V201109
VERSION = VERSION_V201109
client.debug = False
service = None
campaign_id = '0'
ad_group_id1 = '0'
ad_group_id2 = '0'
def setUp(self):
"""Prepare unittest."""
print self.id()
if not self.__class__.service:
self.__class__.service = client.GetBulkMutateJobService(
self.__class__.SERVER, self.__class__.VERSION, HTTP_PROXY)
if self.__class__.campaign_id == '0':
operations = [
{
'operator': 'ADD',
'operand': {
'name': 'Campaign #%s' % Utils.GetUniqueName(),
'status': 'PAUSED',
'biddingStrategy': {
'xsi_type': 'ManualCPC'
},
'endDate': date(date.today().year + 1,
12, 31).strftime('%Y%m%d'),
'budget': {
'period': 'DAILY',
'amount': {
'microAmount': '1000000'
},
'deliveryMethod': 'STANDARD'
}
}
}
]
service = client.GetCampaignService(
self.__class__.SERVER, self.__class__.VERSION, HTTP_PROXY)
self.__class__.campaign_id = service.Mutate(
operations)[0]['value'][0]['id']
if (self.__class__.ad_group_id1 == '0' or
self.__class__.ad_group_id2 == '0'):
operations = [
{
'operator': 'ADD',
'operand': {
'xsi_type': 'AdGroup',
'campaignId': self.__class__.campaign_id,
'name': 'AdGroup #%s' % Utils.GetUniqueName(),
'status': 'ENABLED',
'bids': {
'xsi_type': 'ManualCPCAdGroupBids',
'keywordMaxCpc': {
'amount': {
'microAmount': '1000000'
}
}
}
}
},
{
'operator': 'ADD',
'operand': {
'xsi_type': 'AdGroup',
'campaignId': self.__class__.campaign_id,
'name': 'AdGroup #%s' % Utils.GetUniqueName(),
'status': 'ENABLED',
'bids': {
'xsi_type': 'ManualCPCAdGroupBids',
'keywordMaxCpc': {
'amount': {
'microAmount': '1000000'
}
}
}
}
}
]
service = client.GetAdGroupService(
self.__class__.SERVER, self.__class__.VERSION, HTTP_PROXY)
ad_groups = service.Mutate(operations)[0]['value']
self.__class__.ad_group_id1 = ad_groups[0]['id']
self.__class__.ad_group_id2 = ad_groups[1]['id']
def testMultiplePartsMultipleStreams(self):
"""Test whether we can add ads and keywords using multiple part job with
multiple streams."""
ad_stream1_ops = []
for _ in xrange(25):
ad_stream1_ops.append(
{
'xsi_type': 'AdGroupAd',
'operator': 'ADD',
'operand': {
'xsi_type': 'AdGroupAd',
'adGroupId': self.__class__.ad_group_id1,
'ad': {
'xsi_type': 'TextAd',
'url': 'http://www.example.com',
'displayUrl': 'example.com',
'description1': 'Visit the Red Planet in style.',
'description2': 'Low-gravity fun for everyone!',
'headline': 'Luxury Cruise to Mars'
},
'status': 'ENABLED'
}
})
ad_stream1 = {
'scopingEntityId': {
'type': 'CAMPAIGN_ID',
'value': self.__class__.campaign_id,
},
'operations': ad_stream1_ops
}
ad_stream2_ops = []
for _ in xrange(25):
ad_stream2_ops.append(
{
'xsi_type': 'AdGroupAd',
'operator': 'ADD',
'operand': {
'xsi_type': 'AdGroupAd',
'adGroupId': self.__class__.ad_group_id2,
'ad': {
'xsi_type': 'TextAd',
'url': 'http://www.example.com',
'displayUrl': 'example.com',
'description1': 'Visit the Red Planet in style.',
'description2': 'Low-gravity fun for everyone!',
'headline': 'Luxury Cruise to Mars is here now!!!'
},
'status': 'ENABLED'
}
})
ad_stream2 = {
'scopingEntityId': {
'type': 'CAMPAIGN_ID',
'value': self.__class__.campaign_id,
},
'operations': ad_stream2_ops
}
part1 = {
'partIndex': '0',
'operationStreams': [ad_stream1, ad_stream2]
}
operation = {
'operator': 'ADD',
'operand': {
'xsi_type': 'BulkMutateJob',
'request': part1,
'numRequestParts': '2'
}
}
job = self.__class__.service.Mutate(operation)
self.assert_(isinstance(job, tuple))
kw_stream1_ops = []
for _ in xrange(25):
kw_stream1_ops.append(
{
'xsi_type': 'AdGroupCriterion',
'operator': 'ADD',
'operand': {
'xsi_type': 'BiddableAdGroupCriterion',
'adGroupId': self.__class__.ad_group_id1,
'criterion': {
'xsi_type': 'Keyword',
'matchType': 'BROAD',
'text': 'mars cruise'
}
}
})
kw_stream1 = {
'scopingEntityId': {
'type': 'CAMPAIGN_ID',
'value': self.__class__.campaign_id,
},
'operations': kw_stream1_ops
}
kw_stream2_ops = []
for _ in xrange(25):
kw_stream2_ops.append(
{
'xsi_type': 'AdGroupCriterion',
'operator': 'ADD',
'operand': {
'xsi_type': 'BiddableAdGroupCriterion',
'adGroupId': self.__class__.ad_group_id2,
'criterion': {
'xsi_type': 'Keyword',
'matchType': 'BROAD',
'text': 'mars cruise'
}
}
})
kw_stream2 = {
'scopingEntityId': {
'type': 'CAMPAIGN_ID',
'value': self.__class__.campaign_id,
},
'operations': kw_stream2_ops
}
part2 = {
'partIndex': '1',
'operationStreams': [kw_stream1, kw_stream2]
}
operation = {
'operator': 'SET',
'operand': {
'xsi_type': 'BulkMutateJob',
'id': job[0]['id'],
'request': part2
}
}
job = self.__class__.service.Mutate(operation)
self.assert_(isinstance(job, tuple))
def makeTestSuiteV201109():
"""Set up test suite using v201109.
Returns:
TestSuite test suite using v201109.
"""
suite = unittest.TestSuite()
suite.addTests(unittest.makeSuite(BulkMutateJobServiceTestV201109))
return suite
if __name__ == '__main__':
suites = []
if TEST_VERSION_V201109:
suites.append(makeTestSuiteV201109())
if suites:
alltests = unittest.TestSuite(suites)
unittest.main(defaultTest='alltests')
|
nearlyfreeapps/python-googleadwords
|
tests/adspygoogle/adwords/bulk_mutate_job_service_unittest.py
|
Python
|
apache-2.0
| 8,918
|
[
"VisIt"
] |
019437651f5b01a2cb49e68780fecacb4ec3adce74147d3d98f6b3fb14e4b3d1
|
# copyright 2003-2013 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
#
# This file is part of logilab-astng.
#
# logilab-astng is free software: you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 2.1 of the License, or (at your
# option) any later version.
#
# logilab-astng is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License
# for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with logilab-astng. If not, see <http://www.gnu.org/licenses/>.
"""this module contains utilities for rebuilding a _ast tree in
order to get a single ASTNG representation
"""
import sys
from _ast import (Expr as Discard, Str,
# binary operators
Add, Div, FloorDiv, Mod, Mult, Pow, Sub, BitAnd, BitOr, BitXor,
LShift, RShift,
# logical operators
And, Or,
# unary operators
UAdd, USub, Not, Invert,
# comparison operators
Eq, Gt, GtE, In, Is, IsNot, Lt, LtE, NotEq, NotIn,
)
from . import nodes as new
_BIN_OP_CLASSES = {Add: '+',
BitAnd: '&',
BitOr: '|',
BitXor: '^',
Div: '/',
FloorDiv: '//',
Mod: '%',
Mult: '*',
Pow: '**',
Sub: '-',
LShift: '<<',
RShift: '>>'}
_BOOL_OP_CLASSES = {And: 'and',
Or: 'or'}
_UNARY_OP_CLASSES = {UAdd: '+',
USub: '-',
Not: 'not',
Invert: '~'}
_CMP_OP_CLASSES = {Eq: '==',
Gt: '>',
GtE: '>=',
In: 'in',
Is: 'is',
IsNot: 'is not',
Lt: '<',
LtE: '<=',
NotEq: '!=',
NotIn: 'not in'}
CONST_NAME_TRANSFORMS = {'None': None,
'True': True,
'False': False}
REDIRECT = {'arguments': 'Arguments',
'Attribute': 'Getattr',
'comprehension': 'Comprehension',
'Call': 'CallFunc',
'ClassDef': 'Class',
"ListCompFor": 'Comprehension',
"GenExprFor": 'Comprehension',
'excepthandler': 'ExceptHandler',
'Expr': 'Discard',
'FunctionDef': 'Function',
'GeneratorExp': 'GenExpr',
'ImportFrom': 'From',
'keyword': 'Keyword',
'Repr': 'Backquote',
}
def _init_set_doc(node, newnode):
newnode.doc = None
try:
if isinstance(node.body[0], Discard) and isinstance(node.body[0].value, Str):
newnode.tolineno = node.body[0].lineno
newnode.doc = node.body[0].value.s
node.body = node.body[1:]
except IndexError:
pass # ast built from scratch
def _lineno_parent(oldnode, newnode, parent):
newnode.parent = parent
if hasattr(oldnode, 'lineno'):
newnode.lineno = oldnode.lineno
if hasattr(oldnode, 'col_offset'):
newnode.col_offset = oldnode.col_offset
def _set_infos(oldnode, newnode, parent):
newnode.parent = parent
if hasattr(oldnode, 'lineno'):
newnode.lineno = oldnode.lineno
if hasattr(oldnode, 'col_offset'):
newnode.col_offset = oldnode.col_offset
newnode.set_line_info(newnode.last_child()) # set_line_info accepts None
class TreeRebuilder(object):
"""Rebuilds the _ast tree to become an ASTNG tree"""
_visit_meths = {}
def __init__(self):
self.init()
def init(self):
self.asscontext = None
self._metaclass = ['']
self._global_names = []
self._from_nodes = []
self._delayed_assattr = []
def visit(self, node, parent):
cls = node.__class__
if cls in self._visit_meths:
return self._visit_meths[cls](node, parent)
else:
cls_name = cls.__name__
visit_name = 'visit_' + REDIRECT.get(cls_name, cls_name).lower()
visit_method = getattr(self, visit_name)
self._visit_meths[cls] = visit_method
return visit_method(node, parent)
def _save_assignment(self, node, name=None):
"""save assignement situation since node.parent is not available yet"""
if self._global_names and node.name in self._global_names[-1]:
node.root().set_local(node.name, node)
else:
node.parent.set_local(node.name, node)
def visit_arguments(self, node, parent):
"""visit a Arguments node by returning a fresh instance of it"""
newnode = new.Arguments()
_lineno_parent(node, newnode, parent)
self.asscontext = "Ass"
newnode.args = [self.visit(child, newnode) for child in node.args]
self.asscontext = None
newnode.defaults = [self.visit(child, newnode) for child in node.defaults]
newnode.vararg = node.vararg
newnode.kwarg = node.kwarg
# save argument names in locals:
if node.vararg:
newnode.parent.set_local(newnode.vararg, newnode)
if node.kwarg:
newnode.parent.set_local(newnode.kwarg, newnode)
newnode.set_line_info(newnode.last_child())
return newnode
def visit_assattr(self, node, parent):
"""visit a AssAttr node by returning a fresh instance of it"""
assc, self.asscontext = self.asscontext, None
newnode = new.AssAttr()
_lineno_parent(node, newnode, parent)
newnode.expr = self.visit(node.expr, newnode)
self.asscontext = assc
self._delayed_assattr.append(newnode)
newnode.set_line_info(newnode.last_child())
return newnode
def visit_assert(self, node, parent):
"""visit a Assert node by returning a fresh instance of it"""
newnode = new.Assert()
_lineno_parent(node, newnode, parent)
newnode.test = self.visit(node.test, newnode)
if node.msg is not None:
newnode.fail = self.visit(node.msg, newnode)
newnode.set_line_info(newnode.last_child())
return newnode
def visit_assign(self, node, parent):
"""visit a Assign node by returning a fresh instance of it"""
newnode = new.Assign()
_lineno_parent(node, newnode, parent)
self.asscontext = "Ass"
newnode.targets = [self.visit(child, newnode) for child in node.targets]
self.asscontext = None
newnode.value = self.visit(node.value, newnode)
# set some function or metaclass infos XXX explain ?
klass = newnode.parent.frame()
if (isinstance(klass, new.Class)
and isinstance(newnode.value, new.CallFunc)
and isinstance(newnode.value.func, new.Name)):
func_name = newnode.value.func.name
for ass_node in newnode.targets:
try:
meth = klass[ass_node.name]
if isinstance(meth, new.Function):
if func_name in ('classmethod', 'staticmethod'):
meth.type = func_name
elif func_name == 'classproperty': # see lgc.decorators
meth.type = 'classmethod'
meth.extra_decorators.append(newnode.value)
except (AttributeError, KeyError):
continue
elif getattr(newnode.targets[0], 'name', None) == '__metaclass__':
# XXX check more...
self._metaclass[-1] = 'type' # XXX get the actual metaclass
newnode.set_line_info(newnode.last_child())
return newnode
def visit_assname(self, node, parent, node_name=None):
'''visit a node and return a AssName node'''
newnode = new.AssName()
_set_infos(node, newnode, parent)
newnode.name = node_name
self._save_assignment(newnode)
return newnode
def visit_augassign(self, node, parent):
"""visit a AugAssign node by returning a fresh instance of it"""
newnode = new.AugAssign()
_lineno_parent(node, newnode, parent)
newnode.op = _BIN_OP_CLASSES[node.op.__class__] + "="
self.asscontext = "Ass"
newnode.target = self.visit(node.target, newnode)
self.asscontext = None
newnode.value = self.visit(node.value, newnode)
newnode.set_line_info(newnode.last_child())
return newnode
def visit_backquote(self, node, parent):
"""visit a Backquote node by returning a fresh instance of it"""
newnode = new.Backquote()
_lineno_parent(node, newnode, parent)
newnode.value = self.visit(node.value, newnode)
newnode.set_line_info(newnode.last_child())
return newnode
def visit_binop(self, node, parent):
"""visit a BinOp node by returning a fresh instance of it"""
newnode = new.BinOp()
_lineno_parent(node, newnode, parent)
newnode.left = self.visit(node.left, newnode)
newnode.right = self.visit(node.right, newnode)
newnode.op = _BIN_OP_CLASSES[node.op.__class__]
newnode.set_line_info(newnode.last_child())
return newnode
def visit_boolop(self, node, parent):
"""visit a BoolOp node by returning a fresh instance of it"""
newnode = new.BoolOp()
_lineno_parent(node, newnode, parent)
newnode.values = [self.visit(child, newnode) for child in node.values]
newnode.op = _BOOL_OP_CLASSES[node.op.__class__]
newnode.set_line_info(newnode.last_child())
return newnode
def visit_break(self, node, parent):
"""visit a Break node by returning a fresh instance of it"""
newnode = new.Break()
_set_infos(node, newnode, parent)
return newnode
def visit_callfunc(self, node, parent):
"""visit a CallFunc node by returning a fresh instance of it"""
newnode = new.CallFunc()
_lineno_parent(node, newnode, parent)
newnode.func = self.visit(node.func, newnode)
newnode.args = [self.visit(child, newnode) for child in node.args]
if node.starargs is not None:
newnode.starargs = self.visit(node.starargs, newnode)
if node.kwargs is not None:
newnode.kwargs = self.visit(node.kwargs, newnode)
newnode.args.extend(self.visit(child, newnode) for child in node.keywords)
newnode.set_line_info(newnode.last_child())
return newnode
def visit_class(self, node, parent):
"""visit a Class node to become astng"""
self._metaclass.append(self._metaclass[-1])
newnode = new.Class(node.name, None)
_lineno_parent(node, newnode, parent)
_init_set_doc(node, newnode)
newnode.bases = [self.visit(child, newnode) for child in node.bases]
newnode.body = [self.visit(child, newnode) for child in node.body]
if 'decorator_list' in node._fields and node.decorator_list:# py >= 2.6
newnode.decorators = self.visit_decorators(node, newnode)
newnode.set_line_info(newnode.last_child())
metaclass = self._metaclass.pop()
if not newnode.bases:
# no base classes, detect new / style old style according to
# current scope
newnode._newstyle = metaclass == 'type'
newnode.parent.frame().set_local(newnode.name, newnode)
return newnode
def visit_const(self, node, parent):
"""visit a Const node by returning a fresh instance of it"""
newnode = new.Const(node.value)
_set_infos(node, newnode, parent)
return newnode
def visit_continue(self, node, parent):
"""visit a Continue node by returning a fresh instance of it"""
newnode = new.Continue()
_set_infos(node, newnode, parent)
return newnode
def visit_compare(self, node, parent):
"""visit a Compare node by returning a fresh instance of it"""
newnode = new.Compare()
_lineno_parent(node, newnode, parent)
newnode.left = self.visit(node.left, newnode)
newnode.ops = [(_CMP_OP_CLASSES[op.__class__], self.visit(expr, newnode))
for (op, expr) in zip(node.ops, node.comparators)]
newnode.set_line_info(newnode.last_child())
return newnode
def visit_comprehension(self, node, parent):
"""visit a Comprehension node by returning a fresh instance of it"""
newnode = new.Comprehension()
_lineno_parent(node, newnode, parent)
self.asscontext = "Ass"
newnode.target = self.visit(node.target, newnode)
self.asscontext = None
newnode.iter = self.visit(node.iter, newnode)
newnode.ifs = [self.visit(child, newnode) for child in node.ifs]
newnode.set_line_info(newnode.last_child())
return newnode
def visit_decorators(self, node, parent):
"""visit a Decorators node by returning a fresh instance of it"""
# /!\ node is actually a _ast.Function node while
# parent is a astng.nodes.Function node
newnode = new.Decorators()
_lineno_parent(node, newnode, parent)
if 'decorators' in node._fields: # py < 2.6, i.e. 2.5
decorators = node.decorators
else:
decorators= node.decorator_list
newnode.nodes = [self.visit(child, newnode) for child in decorators]
newnode.set_line_info(newnode.last_child())
return newnode
def visit_delete(self, node, parent):
"""visit a Delete node by returning a fresh instance of it"""
newnode = new.Delete()
_lineno_parent(node, newnode, parent)
self.asscontext = "Del"
newnode.targets = [self.visit(child, newnode) for child in node.targets]
self.asscontext = None
newnode.set_line_info(newnode.last_child())
return newnode
def visit_dict(self, node, parent):
"""visit a Dict node by returning a fresh instance of it"""
newnode = new.Dict()
_lineno_parent(node, newnode, parent)
newnode.items = [(self.visit(key, newnode), self.visit(value, newnode))
for key, value in zip(node.keys, node.values)]
newnode.set_line_info(newnode.last_child())
return newnode
def visit_dictcomp(self, node, parent):
"""visit a DictComp node by returning a fresh instance of it"""
newnode = new.DictComp()
_lineno_parent(node, newnode, parent)
newnode.key = self.visit(node.key, newnode)
newnode.value = self.visit(node.value, newnode)
newnode.generators = [self.visit(child, newnode)
for child in node.generators]
newnode.set_line_info(newnode.last_child())
return newnode
def visit_discard(self, node, parent):
"""visit a Discard node by returning a fresh instance of it"""
newnode = new.Discard()
_lineno_parent(node, newnode, parent)
newnode.value = self.visit(node.value, newnode)
newnode.set_line_info(newnode.last_child())
return newnode
def visit_ellipsis(self, node, parent):
"""visit an Ellipsis node by returning a fresh instance of it"""
newnode = new.Ellipsis()
_set_infos(node, newnode, parent)
return newnode
def visit_emptynode(self, node, parent):
"""visit an EmptyNode node by returning a fresh instance of it"""
newnode = new.EmptyNode()
_set_infos(node, newnode, parent)
return newnode
def visit_excepthandler(self, node, parent):
"""visit an ExceptHandler node by returning a fresh instance of it"""
newnode = new.ExceptHandler()
_lineno_parent(node, newnode, parent)
if node.type is not None:
newnode.type = self.visit(node.type, newnode)
if node.name is not None:
# /!\ node.name can be a tuple
self.asscontext = "Ass"
newnode.name = self.visit(node.name, newnode)
self.asscontext = None
newnode.body = [self.visit(child, newnode) for child in node.body]
newnode.set_line_info(newnode.last_child())
return newnode
def visit_exec(self, node, parent):
"""visit an Exec node by returning a fresh instance of it"""
newnode = new.Exec()
_lineno_parent(node, newnode, parent)
newnode.expr = self.visit(node.body, newnode)
if node.globals is not None:
newnode.globals = self.visit(node.globals, newnode)
if node.locals is not None:
newnode.locals = self.visit(node.locals, newnode)
newnode.set_line_info(newnode.last_child())
return newnode
def visit_extslice(self, node, parent):
"""visit an ExtSlice node by returning a fresh instance of it"""
newnode = new.ExtSlice()
_lineno_parent(node, newnode, parent)
newnode.dims = [self.visit(dim, newnode) for dim in node.dims]
newnode.set_line_info(newnode.last_child())
return newnode
def visit_for(self, node, parent):
"""visit a For node by returning a fresh instance of it"""
newnode = new.For()
_lineno_parent(node, newnode, parent)
self.asscontext = "Ass"
newnode.target = self.visit(node.target, newnode)
self.asscontext = None
newnode.iter = self.visit(node.iter, newnode)
newnode.body = [self.visit(child, newnode) for child in node.body]
newnode.orelse = [self.visit(child, newnode) for child in node.orelse]
newnode.set_line_info(newnode.last_child())
return newnode
def visit_from(self, node, parent):
"""visit a From node by returning a fresh instance of it"""
names = [(alias.name, alias.asname) for alias in node.names]
newnode = new.From(node.module or '', names, node.level or None)
_set_infos(node, newnode, parent)
# store From names to add them to locals after building
self._from_nodes.append(newnode)
return newnode
def visit_function(self, node, parent):
"""visit an Function node to become astng"""
self._global_names.append({})
newnode = new.Function(node.name, None)
_lineno_parent(node, newnode, parent)
_init_set_doc(node, newnode)
newnode.args = self.visit(node.args, newnode)
newnode.body = [self.visit(child, newnode) for child in node.body]
if 'decorators' in node._fields: # py < 2.6
attr = 'decorators'
else:
attr = 'decorator_list'
decorators = getattr(node, attr)
if decorators:
newnode.decorators = self.visit_decorators(node, newnode)
newnode.set_line_info(newnode.last_child())
self._global_names.pop()
frame = newnode.parent.frame()
if isinstance(frame, new.Class):
if newnode.name == '__new__':
newnode.type = 'classmethod'
else:
newnode.type = 'method'
if newnode.decorators is not None:
for decorator_expr in newnode.decorators.nodes:
if isinstance(decorator_expr, new.Name):
if decorator_expr.name in ('classmethod', 'staticmethod'):
newnode.type = decorator_expr.name
elif decorator_expr.name == 'classproperty':
newnode.type = 'classmethod'
frame.set_local(newnode.name, newnode)
return newnode
def visit_genexpr(self, node, parent):
"""visit a GenExpr node by returning a fresh instance of it"""
newnode = new.GenExpr()
_lineno_parent(node, newnode, parent)
newnode.elt = self.visit(node.elt, newnode)
newnode.generators = [self.visit(child, newnode) for child in node.generators]
newnode.set_line_info(newnode.last_child())
return newnode
def visit_getattr(self, node, parent):
"""visit a Getattr node by returning a fresh instance of it"""
if self.asscontext == "Del":
# FIXME : maybe we should reintroduce and visit_delattr ?
# for instance, deactivating asscontext
newnode = new.DelAttr()
elif self.asscontext == "Ass":
# FIXME : maybe we should call visit_assattr ?
newnode = new.AssAttr()
self._delayed_assattr.append(newnode)
else:
newnode = new.Getattr()
_lineno_parent(node, newnode, parent)
asscontext, self.asscontext = self.asscontext, None
newnode.expr = self.visit(node.value, newnode)
self.asscontext = asscontext
newnode.attrname = node.attr
newnode.set_line_info(newnode.last_child())
return newnode
def visit_global(self, node, parent):
"""visit an Global node to become astng"""
newnode = new.Global(node.names)
_set_infos(node, newnode, parent)
if self._global_names: # global at the module level, no effect
for name in node.names:
self._global_names[-1].setdefault(name, []).append(newnode)
return newnode
def visit_if(self, node, parent):
"""visit a If node by returning a fresh instance of it"""
newnode = new.If()
_lineno_parent(node, newnode, parent)
newnode.test = self.visit(node.test, newnode)
newnode.body = [self.visit(child, newnode) for child in node.body]
newnode.orelse = [self.visit(child, newnode) for child in node.orelse]
newnode.set_line_info(newnode.last_child())
return newnode
def visit_ifexp(self, node, parent):
"""visit a IfExp node by returning a fresh instance of it"""
newnode = new.IfExp()
_lineno_parent(node, newnode, parent)
newnode.test = self.visit(node.test, newnode)
newnode.body = self.visit(node.body, newnode)
newnode.orelse = self.visit(node.orelse, newnode)
newnode.set_line_info(newnode.last_child())
return newnode
def visit_import(self, node, parent):
"""visit a Import node by returning a fresh instance of it"""
newnode = new.Import()
_set_infos(node, newnode, parent)
newnode.names = [(alias.name, alias.asname) for alias in node.names]
# save import names in parent's locals:
for (name, asname) in newnode.names:
name = asname or name
newnode.parent.set_local(name.split('.')[0], newnode)
return newnode
def visit_index(self, node, parent):
"""visit a Index node by returning a fresh instance of it"""
newnode = new.Index()
_lineno_parent(node, newnode, parent)
newnode.value = self.visit(node.value, newnode)
newnode.set_line_info(newnode.last_child())
return newnode
def visit_keyword(self, node, parent):
"""visit a Keyword node by returning a fresh instance of it"""
newnode = new.Keyword()
_lineno_parent(node, newnode, parent)
newnode.arg = node.arg
newnode.value = self.visit(node.value, newnode)
newnode.set_line_info(newnode.last_child())
return newnode
def visit_lambda(self, node, parent):
"""visit a Lambda node by returning a fresh instance of it"""
newnode = new.Lambda()
_lineno_parent(node, newnode, parent)
newnode.args = self.visit(node.args, newnode)
newnode.body = self.visit(node.body, newnode)
newnode.set_line_info(newnode.last_child())
return newnode
def visit_list(self, node, parent):
"""visit a List node by returning a fresh instance of it"""
newnode = new.List()
_lineno_parent(node, newnode, parent)
newnode.elts = [self.visit(child, newnode) for child in node.elts]
newnode.set_line_info(newnode.last_child())
return newnode
def visit_listcomp(self, node, parent):
"""visit a ListComp node by returning a fresh instance of it"""
newnode = new.ListComp()
_lineno_parent(node, newnode, parent)
newnode.elt = self.visit(node.elt, newnode)
newnode.generators = [self.visit(child, newnode)
for child in node.generators]
newnode.set_line_info(newnode.last_child())
return newnode
def visit_module(self, node, modname, package):
"""visit a Module node by returning a fresh instance of it"""
newnode = new.Module(modname, None)
newnode.package = package
_lineno_parent(node, newnode, parent=None)
_init_set_doc(node, newnode)
newnode.body = [self.visit(child, newnode) for child in node.body]
newnode.set_line_info(newnode.last_child())
return newnode
def visit_name(self, node, parent):
"""visit a Name node by returning a fresh instance of it"""
# True and False can be assigned to something in py2x, so we have to
# check first the asscontext
if self.asscontext == "Del":
newnode = new.DelName()
elif self.asscontext is not None: # Ass
assert self.asscontext == "Ass"
newnode = new.AssName()
elif node.id in CONST_NAME_TRANSFORMS:
newnode = new.Const(CONST_NAME_TRANSFORMS[node.id])
_set_infos(node, newnode, parent)
return newnode
else:
newnode = new.Name()
_lineno_parent(node, newnode, parent)
newnode.name = node.id
# XXX REMOVE me :
if self.asscontext in ('Del', 'Ass'): # 'Aug' ??
self._save_assignment(newnode)
newnode.set_line_info(newnode.last_child())
return newnode
def visit_bytes(self, node, parent):
"""visit a Bytes node by returning a fresh instance of Const"""
newnode = new.Const(node.s)
_set_infos(node, newnode, parent)
return newnode
def visit_num(self, node, parent):
"""visit a Num node by returning a fresh instance of Const"""
newnode = new.Const(node.n)
_set_infos(node, newnode, parent)
return newnode
def visit_pass(self, node, parent):
"""visit a Pass node by returning a fresh instance of it"""
newnode = new.Pass()
_set_infos(node, newnode, parent)
return newnode
def visit_str(self, node, parent):
"""visit a Str node by returning a fresh instance of Const"""
newnode = new.Const(node.s)
_set_infos(node, newnode, parent)
return newnode
def visit_print(self, node, parent):
"""visit a Print node by returning a fresh instance of it"""
newnode = new.Print()
_lineno_parent(node, newnode, parent)
newnode.nl = node.nl
if node.dest is not None:
newnode.dest = self.visit(node.dest, newnode)
newnode.values = [self.visit(child, newnode) for child in node.values]
newnode.set_line_info(newnode.last_child())
return newnode
def visit_raise(self, node, parent):
"""visit a Raise node by returning a fresh instance of it"""
newnode = new.Raise()
_lineno_parent(node, newnode, parent)
if node.type is not None:
newnode.exc = self.visit(node.type, newnode)
if node.inst is not None:
newnode.inst = self.visit(node.inst, newnode)
if node.tback is not None:
newnode.tback = self.visit(node.tback, newnode)
newnode.set_line_info(newnode.last_child())
return newnode
def visit_return(self, node, parent):
"""visit a Return node by returning a fresh instance of it"""
newnode = new.Return()
_lineno_parent(node, newnode, parent)
if node.value is not None:
newnode.value = self.visit(node.value, newnode)
newnode.set_line_info(newnode.last_child())
return newnode
def visit_set(self, node, parent):
"""visit a Set node by returning a fresh instance of it"""
newnode = new.Set()
_lineno_parent(node, newnode, parent)
newnode.elts = [self.visit(child, newnode) for child in node.elts]
newnode.set_line_info(newnode.last_child())
return newnode
def visit_setcomp(self, node, parent):
"""visit a SetComp node by returning a fresh instance of it"""
newnode = new.SetComp()
_lineno_parent(node, newnode, parent)
newnode.elt = self.visit(node.elt, newnode)
newnode.generators = [self.visit(child, newnode)
for child in node.generators]
newnode.set_line_info(newnode.last_child())
return newnode
def visit_slice(self, node, parent):
"""visit a Slice node by returning a fresh instance of it"""
newnode = new.Slice()
_lineno_parent(node, newnode, parent)
if node.lower is not None:
newnode.lower = self.visit(node.lower, newnode)
if node.upper is not None:
newnode.upper = self.visit(node.upper, newnode)
if node.step is not None:
newnode.step = self.visit(node.step, newnode)
newnode.set_line_info(newnode.last_child())
return newnode
def visit_subscript(self, node, parent):
"""visit a Subscript node by returning a fresh instance of it"""
newnode = new.Subscript()
_lineno_parent(node, newnode, parent)
subcontext, self.asscontext = self.asscontext, None
newnode.value = self.visit(node.value, newnode)
newnode.slice = self.visit(node.slice, newnode)
self.asscontext = subcontext
newnode.set_line_info(newnode.last_child())
return newnode
def visit_tryexcept(self, node, parent):
"""visit a TryExcept node by returning a fresh instance of it"""
newnode = new.TryExcept()
_lineno_parent(node, newnode, parent)
newnode.body = [self.visit(child, newnode) for child in node.body]
newnode.handlers = [self.visit(child, newnode) for child in node.handlers]
newnode.orelse = [self.visit(child, newnode) for child in node.orelse]
newnode.set_line_info(newnode.last_child())
return newnode
def visit_tryfinally(self, node, parent):
"""visit a TryFinally node by returning a fresh instance of it"""
newnode = new.TryFinally()
_lineno_parent(node, newnode, parent)
newnode.body = [self.visit(child, newnode) for child in node.body]
newnode.finalbody = [self.visit(n, newnode) for n in node.finalbody]
newnode.set_line_info(newnode.last_child())
return newnode
def visit_tuple(self, node, parent):
"""visit a Tuple node by returning a fresh instance of it"""
newnode = new.Tuple()
_lineno_parent(node, newnode, parent)
newnode.elts = [self.visit(child, newnode) for child in node.elts]
newnode.set_line_info(newnode.last_child())
return newnode
def visit_unaryop(self, node, parent):
"""visit a UnaryOp node by returning a fresh instance of it"""
newnode = new.UnaryOp()
_lineno_parent(node, newnode, parent)
newnode.operand = self.visit(node.operand, newnode)
newnode.op = _UNARY_OP_CLASSES[node.op.__class__]
newnode.set_line_info(newnode.last_child())
return newnode
def visit_while(self, node, parent):
"""visit a While node by returning a fresh instance of it"""
newnode = new.While()
_lineno_parent(node, newnode, parent)
newnode.test = self.visit(node.test, newnode)
newnode.body = [self.visit(child, newnode) for child in node.body]
newnode.orelse = [self.visit(child, newnode) for child in node.orelse]
newnode.set_line_info(newnode.last_child())
return newnode
def visit_with(self, node, parent):
newnode = new.With()
_lineno_parent(node, newnode, parent)
_node = getattr(node, 'items', [node])[0] # python 3.3 XXX
newnode.expr = self.visit(_node.context_expr, newnode)
self.asscontext = "Ass"
if _node.optional_vars is not None:
newnode.vars = self.visit(_node.optional_vars, newnode)
self.asscontext = None
newnode.body = [self.visit(child, newnode) for child in node.body]
newnode.set_line_info(newnode.last_child())
return newnode
def visit_yield(self, node, parent):
"""visit a Yield node by returning a fresh instance of it"""
newnode = new.Yield()
_lineno_parent(node, newnode, parent)
if node.value is not None:
newnode.value = self.visit(node.value, newnode)
newnode.set_line_info(newnode.last_child())
return newnode
class TreeRebuilder3k(TreeRebuilder):
"""extend and overwrite TreeRebuilder for python3k"""
def visit_arg(self, node, parent):
"""visit a arg node by returning a fresh AssName instance"""
# the <arg> node is coming from py>=3.0, but we use AssName in py2.x
# XXX or we should instead introduce a Arg node in astng ?
return self.visit_assname(node, parent, node.arg)
def visit_excepthandler(self, node, parent):
"""visit an ExceptHandler node by returning a fresh instance of it"""
newnode = new.ExceptHandler()
_lineno_parent(node, newnode, parent)
if node.type is not None:
newnode.type = self.visit(node.type, newnode)
if node.name is not None:
newnode.name = self.visit_assname(node, newnode, node.name)
newnode.body = [self.visit(child, newnode) for child in node.body]
newnode.set_line_info(newnode.last_child())
return newnode
def visit_nonlocal(self, node, parent):
"""visit a Nonlocal node and return a new instance of it"""
newnode = new.Nonlocal(node.names)
_set_infos(node, newnode, parent)
return newnode
def visit_raise(self, node, parent):
"""visit a Raise node by returning a fresh instance of it"""
newnode = new.Raise()
_lineno_parent(node, newnode, parent)
# no traceback; anyway it is not used in Pylint
if node.exc is not None:
newnode.exc = self.visit(node.exc, newnode)
if node.cause is not None:
newnode.cause = self.visit(node.cause, newnode)
newnode.set_line_info(newnode.last_child())
return newnode
def visit_starred(self, node, parent):
"""visit a Starred node and return a new instance of it"""
newnode = new.Starred()
_lineno_parent(node, newnode, parent)
newnode.value = self.visit(node.value, newnode)
newnode.set_line_info(newnode.last_child())
return newnode
def visit_try(self, node, parent):
# python 3.3 introduce a new Try node replacing TryFinally/TryExcept nodes
if node.finalbody:
newnode = new.TryFinally()
_lineno_parent(node, newnode, parent)
newnode.finalbody = [self.visit(n, newnode) for n in node.finalbody]
if node.handlers:
excnode = new.TryExcept()
_lineno_parent(node, excnode, parent)
excnode.body = [self.visit(child, newnode) for child in node.body]
excnode.handlers = [self.visit(child, newnode) for child in node.handlers]
excnode.orelse = [self.visit(child, newnode) for child in node.orelse]
newnode.body = [excnode]
else:
newnode.body = [self.visit(child, newnode) for child in node.body]
elif node.handlers:
newnode = new.TryExcept()
_lineno_parent(node, newnode, parent)
newnode.body = [self.visit(child, newnode) for child in node.body]
newnode.handlers = [self.visit(child, newnode) for child in node.handlers]
newnode.orelse = [self.visit(child, newnode) for child in node.orelse]
newnode.set_line_info(newnode.last_child())
return newnode
def visit_yieldfrom(self, node, parent):
return self.visit_yield(node, parent)
if sys.version_info >= (3, 0):
TreeRebuilder = TreeRebuilder3k
|
yorvic/.vim
|
bundle/python-mode/pylibs/pylama/checkers/pylint/logilab/astng/rebuilder.py
|
Python
|
gpl-3.0
| 36,633
|
[
"VisIt"
] |
672cbddeff303a13dfc7235cae46b1b84eb70122f9aac58006129cd87c10beb9
|
# -*- coding: utf-8 -*-
"""
Created on Sat Aug 26 17:54:12 2017
@author: Eliot
skimage_segmentation.py
"""
import os
from skimage import color, util, filters, io, morphology, segmentation, measure, feature
import cv2
import numpy as np
import matplotlib.pylab as plt
thisfilepath = os.path.dirname(__file__)
loaddirpath = os.path.abspath(os.path.join(thisfilepath, "test_files/orig"))
savedirpath = os.path.abspath(os.path.join(thisfilepath, "test_files/skimage"))
image2 = io.imread(loaddirpath + "/001CROP11-17-59.jpg") #rgb
gray = io.imread(loaddirpath + "/medianmaskgray.jpg") #rgb
gray = filters.gaussian(gray, sigma=2)
edges = filters.sobel(gray)
print(edges.dtype)
'''
grid = util.regular_grid(gray.shape, 400)
seeds = np.zeros(gray.shape, "int")
seeds[grid] = np.arange(seeds[grid].size).reshape(seeds[grid].shape) + 1
w0 = morphology.watershed(edges, seeds)
w1 = morphology.watershed(edges, seeds, compactness=0.0001)
fig, (ax0, ax1) = plt.subplots(1,2)
ax0.imshow(color.label2rgb(w0, gray))
ax0.set_title("Classical watershed")
ax1.imshow(color.label2rgb(w1, gray))
ax1.set_title("Compact watershed")
plt.show()
# now try segmentation with superpixels:
seg2 = segmentation.slic(gray, n_segments = 117, max_iter=160, sigma=1,
compactness=0.1, multichannel=False)
fig2, (ax20, ax21) = plt.subplots(1,2)
ax20.imshow(gray)
ax20.set_title("Original Image")
ax21.imshow(color.label2rgb(seg2, gray, image_alpha=0.5))
ax21.set_title("Superpixels Segmentation")
plt.show()
'''
# trying measure contours according to skimage.find_contours
### >> POTENTIALLY USE ACTIVE_CONTOURS ON THIS!
'''
contours = measure.find_contours(gray, 80)
print(contours)
fig3, ax3 = plt.subplots()
ax3.imshow(gray, interpolation="lanczos", cmap=plt.cm.inferno)
for n, contour in enumerate(contours):
ax3.plot(contour[:,1], contour[:,0], linewidth=2)
ax3.axis("image")
ax3.set_xticks([])
ax3.set_yticks([])
plt.show()
contours = measure.find_contours(gray, 140)
print(contours)
fig4, ax4 = plt.subplots()
ax4.imshow(gray, interpolation="lanczos", cmap=plt.cm.inferno)
for n, contour in enumerate(contours):
ax4.plot(contour[:,1], contour[:,0], linewidth=2)
ax4.axis("image")
ax4.set_xticks([])
ax4.set_yticks([])
plt.show()
threshd = np.zeros_like(edges)
threshd[edges > 0.012] = 1
threshd = morphology.dilation(threshd)
skel = morphology.skeletonize(threshd)
contours = measure.find_contours(skel, 1)
print(contours)
fig4, ax4 = plt.subplots()
ax4.imshow(skel, interpolation="lanczos", cmap=plt.cm.inferno)
for n, contour in enumerate(contours):
ax4.plot(contour[:,1], contour[:,0], linewidth=2)
ax4.axis("image")
ax4.set_xticks([])
ax4.set_yticks([])
plt.show()
'''
# use canny rather than sobel
'''
edges1 = feature.canny(gray, low_threshold=0.585, high_threshold=0.875, use_quantiles=True) # first settings: low_threshold=0.035, high_threshold=0.145
edges2 = feature.canny(gray, low_threshold=0.585, high_threshold=0.875, sigma=3, use_quantiles=True)
print(edges1.dtype)
closed1 = morphology.closing(edges1, morphology.square(9))
closed2 = morphology.closing(edges2, morphology.square(13))
contours1 = measure.find_contours(closed1, False, fully_connected='high')#, positive_orientation='high')
contours2 = measure.find_contours(closed2, False, fully_connected='high', positive_orientation='high')
#print(contours2)
fig, (ax1, ax2, ax3) = plt.subplots(nrows=1, ncols=3, figsize=(8,3),
sharex=True, sharey=True)
ax1.imshow(gray, cmap=plt.cm.inferno)
ax1.axis('on')
ax1.set_title("original image")
ax2.imshow(edges1, cmap=plt.cm.inferno)
ax2.axis('off')
ax2.set_title("Canny filter, $\sigma=1$", fontsize=18)
for n, contour in enumerate(contours1):
ax2.plot(contour[:,1], contour[:,0], linewidth=2)
ax3.imshow(closed2, cmap=plt.cm.inferno)
ax3.axis('off')
ax3.set_title("Canny filter, $\sigma=3$", fontsize=18)
for n, contour in enumerate(contours2):
ax3.plot(contour[:,1], contour[:,0], linewidth=2)
fig.tight_layout()
plt.show()
'''
# chan-vese segmentation
## Can't be used as need skimage0.14 for this
print(gray.dtype)
print(np.max(gray), np.min(gray))
cv = segmentation.chan_vese(gray, mu=0.2, lambda1=1, tol=1e-3, max_iter=200,
dt=0.5, init_level_set="checkerboard",
extended_output=True)
fig, axes = plt.subplots(2,2,figsize=(8,8))
ax = axes.flatten()
ax[0].imshow(gray, cmap="gray")
ax[0].set_axis_off()
ax[0].set_title("Original Image", fontsize=12)
ax[1].imshow(cv[0], cmap="gray")
ax[1].set_axis_off()
title = "Chan-Vese segmentation - {} iterations".format(len(cv[2]))
ax[1].set_title(title, fontsize=12)
ax[2].imshow(cv[1], cmap="gray")
ax[2].set_axis_off()
ax[2].set_title("Final Level Set", fontsize=12)
ax[3].imshow(cv[2], cmap="gray")
ax[3].set_title("Evolution of energy over iterations", fontsize=12)
fig.tight_layout()
plt.show()
|
EliotBryant/ShadDetector
|
shadDetector_testing/Gradient Based Methods/skimage_segmentation.py
|
Python
|
gpl-3.0
| 4,924
|
[
"Gaussian"
] |
c7ccb074d00472bfd1c1edf9c6707665d41941a4e6e37e370f53c66c1edcd7ca
|
# Copyright 2016 The BigDL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import pickle
from pmdarima.arima import ARIMA
from pmdarima.arima import ndiffs
from pmdarima.arima import nsdiffs
from bigdl.chronos.metric.forecast_metrics import Evaluator
class ARIMAModel:
def __init__(self):
"""
Initialize Model
"""
self.seasonal = True
self.metric = 'mse'
self.model = None
self.model_init = False
def _build(self, **config):
"""
build the models and initialize.
:param config: hyperparameters for the model
"""
p = config.get('p', 2)
d = config.get('d', 0)
q = config.get('q', 2)
self.seasonal = config.get('seasonality_mode', True)
P = config.get('P', 1)
D = config.get('D', 0)
Q = config.get('Q', 1)
m = config.get('m', 7)
self.metric = config.get('metric', self.metric)
self.metric_func = config.get('metric_func', None)
order = (p, d, q)
if not self.seasonal:
seasonal_order = (0, 0, 0, 0)
else:
seasonal_order = (P, D, Q, m)
self.model = ARIMA(order=order, seasonal_order=seasonal_order, suppress_warnings=True)
def fit_eval(self, data, validation_data, **config):
"""
Fit on the training data from scratch.
:param data: A 1-D numpy array as the training data
:param validation_data: A 1-D numpy array as the evaluation data
:return: the evaluation metric value
"""
if not self.model_init:
# Estimating differencing term (d) and seasonal differencing term (D)
kpss_diffs = ndiffs(data, alpha=0.05, test='kpss', max_d=6)
adf_diffs = ndiffs(data, alpha=0.05, test='adf', max_d=6)
d = max(adf_diffs, kpss_diffs)
D = 0 if not self.seasonal else nsdiffs(data, m=7, max_D=12)
config.update(d=d, D=D)
self._build(**config)
self.model_init = True
self.model.fit(data)
if self.metric_func:
val_metric = self.evaluate(x=None, target=validation_data,
metrics=[self.metric_func])[0].item()
else:
val_metric = self.evaluate(x=None, target=validation_data,
metrics=[self.metric])[0].item()
if self.metric_func:
return {self.metric_func.__name__: val_metric}
else:
return {self.metric: val_metric}
def predict(self, x=None, horizon=24, update=False, rolling=False):
"""
Predict horizon time-points ahead the input x in fit_eval
:param x: ARIMA predicts the horizon steps foreward from the training data.
So x should be None as it is not used.
:param horizon: the number of steps forward to predict
:param update: whether to update the original model
:param rolling: whether to use rolling prediction
:return: predicted result of length horizon
"""
if x is not None:
raise ValueError("x should be None")
if update and not rolling:
raise Exception("We don't support updating model without rolling prediction currently")
if self.model is None:
raise Exception("Needs to call fit_eval or restore first before calling predict")
if not update and not rolling:
forecasts = self.model.predict(n_periods=horizon)
elif rolling:
if not update:
self.save("tmp.pkl")
forecasts = []
for step in range(horizon):
fc = self.model.predict(n_periods=1).item()
forecasts.append(fc)
# Updates the existing model with a small number of MLE steps for rolling prediction
self.model.update(fc)
if not update:
self.restore("tmp.pkl")
os.remove("tmp.pkl")
return forecasts
def evaluate(self, target, x=None, metrics=['mse'], rolling=False):
"""
Evaluate on the prediction results and y. We predict horizon time-points ahead the input x
in fit_eval before evaluation, where the horizon length equals the second dimension size of
y.
:param target: target for evaluation.
:param x: ARIMA predicts the horizon steps foreward from the training data.
So x should be None as it is not used.
:param metrics: a list of metrics in string format or callable function with format
it signature should be func(y_true, y_pred), where y_true and y_pred are numpy
ndarray. The function should return a float value as evaluation result.
:param rolling: whether to use rolling prediction
:return: a list of metric evaluation results
"""
if x is not None:
raise ValueError("We don't support input x currently")
if target is None:
raise ValueError("Input invalid target of None")
if self.model is None:
raise Exception("Needs to call fit_eval or restore first before calling evaluate")
forecasts = self.predict(horizon=len(target), rolling=rolling)
return Evaluator.evaluate(metrics, target, forecasts, aggregate="mean")
def save(self, checkpoint_file):
if self.model is None:
raise Exception("Needs to call fit_eval or restore first before calling save")
with open(checkpoint_file, 'wb') as fout:
pickle.dump(self.model, fout)
def restore(self, checkpoint_file):
with open(checkpoint_file, 'rb') as fin:
self.model = pickle.load(fin)
self.model_init = True
class ARIMABuilder:
def __init__(self, **arima_config):
"""
Initialize ARIMA Model Builder
:param ARIMA_config: Other ARIMA hyperparameters. You may refer to
https://alkaline-ml.com/pmdarima/modules/generated/pmdarima.arima.ARIMA.html#pmdarima.arima.ARIMA
for the parameter names to specify.
"""
self.model_config = arima_config.copy()
def build(self, config):
"""
Build ARIMA Model
:param config: Other ARIMA hyperparameters. You may refer to
https://alkaline-ml.com/pmdarima/modules/generated/pmdarima.arima.ARIMA.html#pmdarima.arima.ARIMA
for the parameter names to specify.
"""
from bigdl.chronos.model.arima import ARIMAModel
model = ARIMAModel()
model._build(**config)
return model
|
intel-analytics/BigDL
|
python/chronos/src/bigdl/chronos/model/arima.py
|
Python
|
apache-2.0
| 7,152
|
[
"ADF"
] |
f898c800219545723bb8093c4087bb821f5bd595326da5d0c7a45e33ac9e994d
|
"""Module managing errors."""
import logging
import os
import re
import sys
import numpy as np
import scooby
import pyvista
from pyvista import _vtk
import contextlib
import collections
def set_error_output_file(filename):
"""Set a file to write out the VTK errors."""
filename = os.path.abspath(os.path.expanduser(filename))
fileOutputWindow = _vtk.vtkFileOutputWindow()
fileOutputWindow.SetFileName(filename)
outputWindow = _vtk.vtkOutputWindow()
outputWindow.SetInstance(fileOutputWindow)
return fileOutputWindow, outputWindow
class VtkErrorCatcher:
"""Context manager to temporarily catch VTK errors.
Parameters
----------
raise_errors : bool, optional
Raise a ``RuntimeError`` when a VTK error is encountered. Defaults to ``False``.
send_to_logging : bool, optional
Determine whether VTK errors raised within the context should also be sent to logging. Defaults to ``True``.
Examples
--------
Catch VTK errors using the context manager.
>>> import pyvista
>>> with pyvista.VtkErrorCatcher() as error_catcher:
... sphere = pyvista.Sphere()
"""
def __init__(self, raise_errors=False, send_to_logging=True):
"""Initialize context manager."""
self.raise_errors = raise_errors
self.send_to_logging = send_to_logging
def __enter__(self):
"""Observe VTK string output window for errors."""
error_output = _vtk.vtkStringOutputWindow()
error_win = _vtk.vtkOutputWindow()
self._error_output_orig = error_win.GetInstance()
error_win.SetInstance(error_output)
obs = Observer(log=self.send_to_logging, store_history=True)
obs.observe(error_output)
self._observer = obs
def __exit__(self, type, val, traceback):
"""Stop observing VTK string output window."""
error_win = _vtk.vtkOutputWindow()
error_win.SetInstance(self._error_output_orig)
self.events = self._observer.event_history
if self.raise_errors and self.events:
errors = [RuntimeError(f'{e.kind}: {e.alert}', e.path, e.address) for e in self.events]
raise RuntimeError(errors)
class Observer:
"""A standard class for observing VTK objects."""
def __init__(self, event_type='ErrorEvent', log=True, store_history=False):
"""Initialize observer."""
self.__event_occurred = False
self.__message = None
self.__message_etc = None
self.CallDataType = 'string0'
self.__observing = False
self.event_type = event_type
self.__log = log
self.store_history = store_history
self.event_history = []
@staticmethod
def parse_message(message):
"""Parse the given message."""
# Message format
regex = re.compile(r'([A-Z]+):\sIn\s(.+),\sline\s.+\n\w+\s\((.+)\):\s(.+)')
try:
kind, path, address, alert = regex.findall(message)[0]
return kind, path, address, alert
except:
return '', '', '', message
def log_message(self, kind, alert):
"""Parse different event types and passes them to logging."""
if kind == 'ERROR':
logging.error(alert)
else:
logging.warning(alert)
return
def __call__(self, obj, event, message):
"""Declare standard call function for the observer.
On an event occurrence, this function executes.
"""
self.__event_occurred = True
self.__message_etc = message
kind, path, address, alert = self.parse_message(message)
self.__message = alert
if self.__log:
self.log_message(kind, alert)
if self.store_history:
VtkEvent = collections.namedtuple('VtkEvent', ['kind', 'path', 'address', 'alert'])
self.event_history.append(VtkEvent(kind, path, address, alert))
def has_event_occurred(self):
"""Ask self if an error has occurred since last queried.
This resets the observer's status.
"""
occ = self.__event_occurred
self.__event_occurred = False
return occ
def get_message(self, etc=False):
"""Get the last set error message.
Returns
-------
str: the last set error message
"""
if etc:
return self.__message_etc
return self.__message
def observe(self, algorithm):
"""Make this an observer of an algorithm."""
if self.__observing:
raise RuntimeError('This error observer is already observing an algorithm.')
if hasattr(algorithm, 'GetExecutive') and algorithm.GetExecutive() is not None:
algorithm.GetExecutive().AddObserver(self.event_type, self)
algorithm.AddObserver(self.event_type, self)
self.__observing = True
return
def send_errors_to_logging():
"""Send all VTK error/warning messages to Python's logging module."""
error_output = _vtk.vtkStringOutputWindow()
error_win = _vtk.vtkOutputWindow()
error_win.SetInstance(error_output)
obs = Observer()
return obs.observe(error_output)
def get_gpu_info():
"""Get all information about the GPU."""
# an OpenGL context MUST be opened before trying to do this.
plotter = pyvista.Plotter(notebook=False, off_screen=True)
plotter.add_mesh(pyvista.Sphere())
plotter.show(auto_close=False)
gpu_info = plotter.ren_win.ReportCapabilities()
plotter.close()
# Remove from list of Plotters
pyvista.plotting._ALL_PLOTTERS.pop(plotter._id_name)
return gpu_info
class GPUInfo():
"""A class to hold GPU details."""
def __init__(self):
"""Instantiate a container for the GPU information."""
self._gpu_info = get_gpu_info()
@property
def renderer(self):
"""GPU renderer name."""
regex = re.compile("OpenGL renderer string:(.+)\n")
try:
renderer = regex.findall(self._gpu_info)[0]
except IndexError:
raise RuntimeError("Unable to parse GPU information for the renderer.")
return renderer.strip()
@property
def version(self):
"""GPU renderer version."""
regex = re.compile("OpenGL version string:(.+)\n")
try:
version = regex.findall(self._gpu_info)[0]
except IndexError:
raise RuntimeError("Unable to parse GPU information for the version.")
return version.strip()
@property
def vendor(self):
"""GPU renderer vendor."""
regex = re.compile("OpenGL vendor string:(.+)\n")
try:
vendor = regex.findall(self._gpu_info)[0]
except IndexError:
raise RuntimeError("Unable to parse GPU information for the vendor.")
return vendor.strip()
def get_info(self):
"""All GPU information as tuple pairs."""
return (("GPU Vendor", self.vendor),
("GPU Renderer", self.renderer),
("GPU Version", self.version),
)
def _repr_html_(self):
"""HTML table representation."""
fmt = "<table>"
row = "<tr><th>{}</th><td>{}</td></tr>\n"
for meta in self.get_info():
fmt += row.format(*meta)
fmt += "</table>"
return fmt
def __repr__(self):
"""Representation method."""
content = "\n"
for k, v in self.get_info():
content += f"{k:>18} : {v}\n"
content += "\n"
return content
class Report(scooby.Report):
"""A class for custom scooby.Report."""
def __init__(self, additional=None, ncol=3, text_width=80, sort=False,
gpu=True):
"""Generate a :class:`scooby.Report` instance.
Parameters
----------
additional : list(ModuleType), list(str)
List of packages or package names to add to output information.
ncol : int, optional
Number of package-columns in html table; only has effect if
``mode='HTML'`` or ``mode='html'``. Defaults to 3.
text_width : int, optional
The text width for non-HTML display modes
sort : bool, optional
Alphabetically sort the packages
gpu : bool
Gather information about the GPU. Defaults to ``True`` but if
experiencing renderinng issues, pass ``False`` to safely generate
a report.
"""
# Mandatory packages.
core = ['pyvista', 'vtk', 'numpy', 'imageio', 'appdirs', 'scooby',
'meshio']
# Optional packages.
optional = ['matplotlib', 'pyvistaqt', 'PyQt5', 'IPython', 'colorcet',
'cmocean', 'ipyvtklink', 'scipy', 'itkwidgets', 'tqdm']
# Information about the GPU - bare except in case there is a rendering
# bug that the user is trying to report.
if gpu:
try:
extra_meta = GPUInfo().get_info()
except:
extra_meta = ("GPU Details", "error")
else:
extra_meta = ("GPU Details", "None")
scooby.Report.__init__(self, additional=additional, core=core,
optional=optional, ncol=ncol,
text_width=text_width, sort=sort,
extra_meta=extra_meta)
def assert_empty_kwargs(**kwargs):
"""Assert that all keyword arguments have been used (internal helper).
If any keyword arguments are passed, a ``TypeError`` is raised.
"""
n = len(kwargs)
if n == 0:
return True
caller = sys._getframe(1).f_code.co_name
keys = list(kwargs.keys())
bad_arguments = ', '.join([f'"{key}"' for key in keys])
if n == 1:
grammar = "is an invalid keyword argument"
else:
grammar = "are invalid keyword arguments"
message = f"{bad_arguments} {grammar} for `{caller}`"
raise TypeError(message)
def check_valid_vector(point, name=''):
"""Check if a vector contains three components."""
if np.array(point).size != 3:
if name == '':
name = 'Vector'
raise TypeError(f'{name} must be a length three tuple of floats.')
|
akaszynski/vtkInterface
|
pyvista/utilities/errors.py
|
Python
|
mit
| 10,267
|
[
"VTK"
] |
5d0cbdd077ccb2139f5af19aa69eac7db17182b6e41c0b81724829d7c8cf56b9
|
# -*- coding: utf-8 -*-
"""
* Copyright (C) 2010-2014 Loïc BLOT, CNRS <http://www.unix-experience.fr/>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
from django.http import HttpResponse
from django.utils.translation import ugettext_lazy as _
# Temporary function to permit communication between Z-Eye PHP and Python locales
def get_locale(request):
if request.method == "GET":
if "locale" in request.GET:
if len(request.GET["locale"]) > 0:
return HttpResponse(_(request.GET["locale"]))
else:
return HttpResponse(_('Err-Wrong-Request'))
else:
return HttpResponse(_('Err-Wrong-Request'))
else:
return HttpResponse(_('Err-Wrong-Request'))
"""
TEMP CALLs to locales, before their migration to Django App, they need to be
referenced in a place. It seems here is a good idea
"""
_("Action")
_("Add")
_('Cancel')
_('comma')
_("Confirm")
_("confirm-disconnect")
_("Connection")
_("Connect-to")
_("CSV-content")
_("day")
_("days")
_("Default")
_("Description")
_("Disconnection")
_("Done")
_("English")
_("Error")
_("err-bad-datas")
_("err-devel")
_("err-devel-locale")
_("err-must-be-connected")
_("err-no-rights")
_("err-sql-query-failed")
_("err-unk-module")
_("French")
_("hour")
_("hours")
_("Import")
_("Loading")
_("minute")
_("minutes")
_("Modify")
_("Name")
_("No")
_("None")
_("Notification")
_("OK")
_("Online")
_("Remove")
_("Replace-?")
_("rule-read-datas")
_("rule-write-datas")
_("Save")
_("Searching...")
_("second")
_("seconds")
_("semi-colon")
_("separator")
_("Settings")
_("Type")
_("unknown")
_('Yes')
_('Connect')
_('err-bad-user')
_('err-unk')
_('inactivity-disconnect')
_('Login')
_('menu-title')
_('Password')
_('connok-then-load')
_('title-conn')
_('alert-on')
_('alert-s')
_('Attack')
_('Availability')
_('CRITICAL')
_('critical-s')
_('DOWN')
_('Duration')
_('err-detect-atk')
_('err-icinga')
_('err-icinga-off')
_('err-net')
_('err-security')
_('Host')
_('inc-bw')
_('ipaddr')
_('Link')
_('menu-name')
_('menu-title')
_('out-bw')
_('sensors')
_('Service')
_('Since-icinga-start')
_('State')
_('state-net')
_('state-security')
_('state-srv')
_('Status-information')
_('WARN')
_('warning-s')
_('ACL')
_('ACL-Mgmt')
_('acl-name')
_('acls-to-include')
_('Addr')
_('add-acl')
_('add-cluster')
_('Add-Record')
_('add-server')
_('add-zone')
_('Advanced-tools')
_('algorithm')
_('allow-notify')
_('allow-query')
_('allow-recurse')
_('allow-transfer')
_('allow-update')
_('Alone')
_('Any')
_('caching-servers')
_('Clusters')
_('confirm-remove-acl')
_('confirm-remove-cluster')
_('confirm-remove-dnssrc')
_('confirm-remove-record')
_('confirm-remove-server')
_('confirm-remove-tsig')
_('confirm-remove-zone')
_('chroot-path')
_('define-tsig-key')
_('Desc')
_('DNSSec-Mgmt')
_('dns-to-include')
_('DNS-zone')
_('DNS-zones')
_('edit-server')
_('enable-dnssec')
_('enable-dnssec-validation')
_('err-acl-already-exists')
_('err-acl-name-protected')
_('err-acl-not-exists')
_('err-bad-login')
_('err-bad-zonetype')
_('err-cluster-already-exists')
_('err-cluster-member-only-one-category')
_('err-cluster-need-master')
_('err-cluster-not-exists')
_('err-invalid-db')
_('err-invalid-req')
_('err-invalid-zonename')
_('err-masterdir-not-readable')
_('err-miss-bad-fields')
_('err-namedconf-not-readable')
_('err-named-zeyenamed-different')
_('err-no-records')
_('err-no-rule-specified')
_('err-no-zone')
_('err-one-dns-server-required')
_('err-server-already-exists')
_('err-server-not-exists')
_('err-slavedir-not-readable')
_('err-some-dns-invalid')
_('err-some-ip-invalid')
_('err-tsig-key-algo-invalid')
_('err-tsig-key-already-exists')
_('err-tsig-key-exactly-same')
_('err-tsig-key-id-invalid')
_('err-tsig-key-not-exists')
_('err-tsig-not-base64')
_('err-unable-conn')
_('err-zeyenamedpath-together')
_('err-z-eye-not-writable')
_('err-zone-already-exists')
_('err-zone-not-exists')
_('expert-tools')
_('fail-tab')
_('Filter')
_('Forward-only')
_('Forwarders')
_('found-records')
_('Herited')
_('ip-addr')
_('ip-to-include')
_('key-alias')
_('key-id')
_('Login')
_('machine-FQDN')
_('master-servers')
_('Masters')
_('masterzone-path')
_('menu-name')
_('menu-title')
_('Modification')
_('modify-servers')
_('named-conf-path')
_('named-zeye-path')
_('no-data-found')
_('no-found-records')
_('Others')
_('Password')
_('Password-repeat')
_('Record')
_('Record-TTL')
_('Record-Type')
_('Return')
_('Save')
_('Search')
_('Server')
_('Server-Mgmt')
_('serverlist')
_('Servers')
_('Slave-only')
_('slave-servers')
_('slavezone-path')
_('soa-ttl-expire')
_('soa-ttl-minimum')
_('soa-ttl-refresh')
_('soa-ttl-retry')
_('ssh-user')
_('Stats')
_('subnets-to-include')
_('tip-dnsserver')
_('title-dns')
_('title-dns-cluster')
_('title-dns-server')
_('title-dns-tsig')
_('title-dns-zone')
_('title-old-records')
_('tooltip-chroot')
_('tooltip-machine-FQDN')
_('tooltip-masterzone-path')
_('tooltip-rights')
_('tooltip-slavezone-path')
_('tooltip-soattl-expire')
_('tooltip-soattl-minimum')
_('tooltip-soattl-refresh')
_('tooltip-soattl-retry')
_('tooltip-tsig-transfer')
_('tooltip-tsig-update')
_('tooltip-zeyenamed-path')
_('tsig-to-include')
_('tsig-transfer')
_('tsig-update')
_('Value')
_('Zone')
_('Zone-Mgmt')
_('Zone-type')
_('Classic')
_('clustername')
_('tip-dnscluster')
_('tooltip-dnsinclude')
_('tooltip-ipinclude')
_('Add')
_('confirm-removegrp')
_('Delete')
_('err-already-exist')
_('err-bad-data')
_('err-not-exist')
_('Groupname')
_('menu-name')
_('menu-title')
_('New-group')
_('Rule')
_('Save')
_('sure-delete')
_('title-edit')
_('title-mgmt')
_('title-opts')
_('User-nb')
_('active-check-en')
_('Add')
_('Address')
_('Alias')
_('alivecommand')
_('Availability')
_('checkcmd')
_('check-freshness')
_('check-interval')
_('checkperiod')
_('Commands')
_('Comment')
_('Confirm')
_('confirm-remove-command')
_('confirm-remove-contact')
_('confirm-remove-contactgroup')
_('confirm-remove-host')
_('confirm-remove-hostgroup')
_('confirm-remove-notif-strategy')
_('confirm-remove-service')
_('confirm-remove-timeperiod')
_('Contactgroups')
_('Contacts')
_('Critical')
_('Description')
_('DisplayName')
_('Down')
_('Email')
_('err-bad-data')
_('err-binary-not-found')
_('err-binary-used')
_('err-contact-used')
_('err-ctg-used')
_('err-data-exist')
_('err-data-not-exist')
_('err-fail-writecfg')
_('err-hg-used')
_('err-not-found')
_('err-no-cmd')
_('err-no-contact')
_('err-no-contactgroups')
_('err-no-host')
_('err-no-hostgroup')
_('err-no-hosts')
_('err-no-service')
_('err-no-sensor')
_('err-no-timeperiod')
_('err-notification-strategy-not-exists')
_('err-notification-strategy-used-contact')
_('err-notification-strategy-used-host')
_('err-notification-strategy-used-service')
_('err-timeperiod-not-exists')
_('eventhdl-en')
_('failpredict-en')
_('fail-tab')
_('flap-en')
_('Friday')
_('From')
_('General')
_('hostnotifcmd')
_('hostnotifperiod')
_('hostoptdown')
_('hostoptflap')
_('hostoptrec')
_('hostoptsched')
_('hostoptunreach')
_('Host')
_('Hosts')
_('Hostgroup')
_('Hostgroups')
_('Hosttype')
_('Icon')
_('is-template')
_('max-check')
_('Members')
_('menu-name')
_('menu-title')
_('Modification')
_('Monitor')
_('Monday')
_('new-cmd')
_('new-contact')
_('new-contactgroup')
_('new-host')
_('new-hostgroup')
_('new-service')
_('new-strategy')
_('new-timeperiod')
_('No')
_('None')
_('not-implemented')
_('notif-en')
_('notif-interval')
_('notifperiod')
_('Notification-strategy')
_('Notification-strategy-hosts')
_('Notification-strategy-services')
_('Notification-strategies')
_('obs-over-srv')
_('Option')
_('parallel-check')
_('Parent')
_('passive-check-en')
_('perfdata')
_('Periods')
_('retainstatus')
_('retainnonestatus')
_('retry-check-interval')
_('rule-modify-cmd')
_('rule-modify-ctg')
_('rule-modify-contact')
_('rule-modify-hg')
_('rule-modify-host')
_('rule-modify-notif')
_('rule-modify-service')
_('rule-modify-timeperiod')
_('Saturday')
_('Save')
_('Services')
_('Since-icinga-start')
_('srvnotifcmd')
_('srvnotifperiod')
_('srvoptcrit')
_('srvoptflap')
_('srvoptrec')
_('srvoptsched')
_('srvoptunreach')
_('srvoptwarn')
_('Sunday')
_('Template')
_('Thursday')
_('Timeperiods')
_('title-cmd-edit')
_('title-edit-contact')
_('title-edit-contactgroup')
_('title-edit-service')
_('title-host-edit')
_('title-hostgroup-edit')
_('title-icinga')
_('To')
_('tooltip-cmd')
_('tooltip-cmdname')
_('total-problems')
_('total-sensors')
_('Tuesday')
_('Value')
_('Warn')
_('Wednesday')
_('Yes')
_('Command')
_('retainnonstatus')
_('err-fields-missing')
_('err-mail-invalid')
_('err-mail-match')
_('err-name-invalid')
_('err-pwd-match')
_('err-pwd-too-weak')
_('err-step-invalid')
_('err-surname-invalid')
_('err-username-invalid')
_('Finish')
_('Lets-Go')
_('Mail')
_('Mail-repeat')
_('menu-title')
_('Option')
_('Password')
_('Password-repeat')
_('Send')
_('Surname')
_('title-admin-set')
_('title-master-install')
_('title-welcome')
_('Username')
_('Value')
_('text-admin-set')
_('text-finish')
_('text-welcome')
_('title-install-finished')
_('Add')
_('add-cluster')
_('add-to-dynamic-distrib')
_('Advanced-tools')
_('Available-s')
_('bad-filter')
_('Baux')
_('boolean')
_('change-interval')
_('choose-net')
_('Cluster-master')
_('Cluster-members')
_('Cluster-mode')
_('Cluster-name')
_('clustering-ip')
_('create-option')
_('create-option-group')
_('Comment')
_('configure-ip-range')
_('confirm-import-reserv')
_('confirm-remove-dhcp-cluster')
_('confirm-remove-custom-option')
_('confirm-remove-declared-subnet')
_('confirm-remove-dhcp')
_('confirm-remove-option')
_('Consult')
_('Contact')
_('create-custom-option')
_('crit-line')
_('days')
_('declare-subnet')
_('default-lease-time')
_('dhcp-cluster')
_('dhcp-clusters')
_('dhcp-type')
_('dhcpd-path')
_('Distributed')
_('Distributed-by-ipmanager')
_('DNS')
_('domain-name')
_('end-ip')
_('En-IP-history')
_('En-monitor')
_('err-bad-datas')
_('err-bad-hostname')
_('err-bad-ip-addr')
_('err-bad-mac-addr')
_('err-bad-range-action')
_('err-bad-subnet')
_('err-clustermaster-iscdhcp')
_('err-clustermaster-not-in-members')
_('err-clustermode-require-two')
_('err-cluster-already-exists')
_('err-cluster-need-members')
_('err-cluster-not-exists')
_('err-comment-too-long')
_('err-custom-option-already-exists')
_('err-custom-option-not-exists')
_('err-dlease-sup-mlease')
_('err-dhcpserver-invalid-alias')
_('err-dhcpserver-invalid-clusteraddr')
_('err-dhcpserver-dhcpdpath')
_('err-dhcpserver-leasepath')
_('err-dhcpserver-not-exists')
_('err-dhcpserver-reservconf')
_('err-dhcpserver-subnetconf')
_('err-dhcp-option-not-exists')
_('err-dhcp-opts-group-already-exists')
_('err-dhcp-opts-group-not-exists')
_('err-distrib-subnet-need-infos')
_('err-endip-not-in-range')
_('err-expiration-date-invalid')
_('err-expiration-date-must-be-today-or-more')
_('err-hostname-already-defined')
_('err-hostname-already-used')
_('err-invalid-csv')
_('err-invalid-csv-entry')
_('err-invalid-csv-entry-multiple-hostname')
_('err-invalid-csv-entry-multiple-ip')
_('err-invalid-csv-entry-multiple-mac')
_('err-invalid-hostname')
_('err-invalid-ip')
_('err-invalid-mac')
_('err-invalid-req')
_('err-ip-not-in-cache')
_('err-ip-already-used')
_('err-ip-not-in-subnet')
_('err-ip-not-in-declared-subnets')
_('err-mac-already-used')
_('err-mac-already-used-in-subnet')
_('err-miss-data')
_('err-need-dhcp-server')
_('err-no-dhcp-custom-option')
_('err-no-dhcp-option')
_('err-no-info-for-ip-addr')
_('err-option-already-exists')
_('err-option-code-lower-255')
_('err-option-code-protected')
_('err-option-not-exists')
_('err-option-value-invalid')
_('err-pwd-not-match')
_('err-remove-dhcpserver-master')
_('err-remove-dhcpserver-master2')
_('err-reserv-need-fields')
_('err-router-not-in-subnet')
_('err-subnet-already-exists')
_('err-subnet-bad-ipv6-infos')
_('err-subnet-not-exists')
_('err-ssh-conn-failed')
_('err-ssh-auth-failed')
_('err-startip-lower-endip')
_('err-startip-not-in-range')
_('err-unable-read')
_('err-vlan-already-used')
_('Expert-tools')
_('Expiration')
_('Failover')
_('fail-tab')
_('Free')
_('Free-s')
_('Groupname')
_('Group-DHCP-opts')
_('History')
_('Hostname')
_('import-dhcp-reserv')
_('integer-8')
_('integer-16')
_('integer-32')
_('intval-days')
_('in-cache')
_('IP-Addr')
_('last-view')
_('Lease-end')
_('lease-path')
_('Loadbalancing')
_('MAC-Addr')
_('Manage-DHCP-Opts')
_('Manage-Servers')
_('Manage-Subnets')
_('max-age')
_('max-lease-time')
_('member-of')
_('menu-name')
_('menu-title')
_('modif-record')
_('Monitoring')
_('netid')
_('netidv6')
_('netmask')
_('Network')
_('None')
_('note-needed')
_('no-net-found')
_('no-old-record')
_('no-tab')
_('not-usable')
_('options')
_('option-alias')
_('option-code')
_('option-group')
_('option-name')
_('option-type')
_('option-value')
_('os-name')
_('Port')
_('prefixlen')
_('Remove')
_('remove-from-dynamic-distrib')
_('remove-history')
_('required-if-cluster')
_('reservconf-path')
_('Reserv')
_('Reserved')
_('Reserved-by-ipmanager')
_('Reservations')
_('router')
_('rule-manage-server')
_('Save')
_('Search')
_('Server')
_('Servers')
_('server-addr')
_('server-alias')
_('server-desc')
_('Server-mgmt')
_('ssh-user')
_('ssh-pwd')
_('ssh-pwd-repeat')
_('start-ip')
_('Stats')
_('Status')
_('Stuck-IP')
_('subnetconf-path')
_('subnet-desc')
_('subnet-shortname')
_('Switch')
_('text')
_('tip-custom-dhcp-opts')
_('tip-dhcp-opts')
_('tip-dhcp-opt-group')
_('tip-import-reserv')
_('tip-inherit-if-null')
_('tip-range')
_('tip-reserv')
_('title-add-server')
_('title-custom-dhcp-opts')
_('title-declared-subnets')
_('title-dhcp-cluster-mgmt')
_('title-dhcp-opts')
_('title-dhcp-opts-group')
_('title-dhcp-server-mgmt')
_('title-history-1d')
_('title-history-1w')
_('title-history-1m')
_('title-history-1y')
_('title-history-since')
_('title-ip-management')
_('title-old-record')
_('title-remove-dhcp')
_('title-search-old')
_('title-subnet-management')
_('tooltip-clustermaster')
_('tooltip-clustermode')
_('tooltip-clustering-ip')
_('tooltip-comment')
_('tooltip-contact')
_('tooltip-default-lease-time')
_('tooltip-desc')
_('tooltip-dhcpdpath')
_('tooltip-dhcp-alias')
_('tooltip-dhcp-cluster-distrib')
_('tooltip-dhcp-desc')
_('tooltip-dhcp-option-code')
_('tooltip-dhcp-option-group')
_('tooltip-dhcp-option-value')
_('tooltip-dhcp-server-ip')
_('tooltip-domainname')
_('tooltip-import-reserv')
_('tooltip-ipv6')
_('tooltip-ip-expiration')
_('tooltip-ip-hostname')
_('tooltip-ip-reserv')
_('tooltip-leasepath')
_('tooltip-max-age')
_('tooltip-max-lease-time')
_('tooltip-prefixlen')
_('tooltip-reservconfpath')
_('tooltip-router')
_('tooltip-shortname')
_('tooltip-subnetconfpath')
_('tooltip-vlanid')
_('tooltip-%use')
_('uinteger-8')
_('uinteger-16')
_('uinteger-32')
_('Usable')
_('Used')
_('vlanid')
_('warn-line')
_('confirm-remove-reservation')
_('err-already-exists')
_('rule-manage-servers')
_('title-remove-server')
_('tooltip-ip-comment')
_('Add')
_('All')
_('confirm-remove-groupright')
_('confirm-remove-userright')
_('err-already-exist')
_('err-bad-datas')
_('err-no-subnet')
_('err-not-found')
_('Filter')
_('Go')
_('group-rights')
_('Groups')
_('ip-addr')
_('Login')
_('Modification')
_('None')
_('Return')
_('Right')
_('right-advancedtools')
_('right-history')
_('right-ipmgmt')
_('right-optionsgrpmgmt')
_('right-optionsmgmt')
_('right-rangemgmt')
_('right-read')
_('right-servermgmt')
_('right-subnetmgmt')
_('Save')
_('Server')
_('title-bysubnet')
_('title-globalrights')
_('title-ipmrightsmgmt')
_('Type')
_('User')
_('user-rights')
_('Users')
_('Writing')
_('Date')
_('Entry')
_('err-no-logs')
_('fail-tab')
_('Filter')
_('Level')
_('Module')
_('Service')
_('Stats')
_('User')
_('webapp')
_('Add-Edge')
_('Add-Node')
_('Color')
_('Dest-node')
_('err-edge-exists')
_('err-edge-not-exists')
_('err-no-node-found')
_('err-no-tab')
_('err-node-exists')
_('err-node-not-exists')
_('err-src-equal-dest')
_('fail-tab')
_('icinga-map')
_('Import')
_('Import-Nodes')
_('link-state')
_('net-map')
_('net-map-full')
_('PositionX')
_('PositionY')
_('Size')
_('Source-node')
_('title-maps')
_('Add-community')
_('database')
_('device-expiration')
_('dns-suffix')
_('err-community-already-exist')
_('err-read-netdisco')
_('err-readorwrite')
_('err-invalid-data')
_('err-community-not-exist')
_('err-no-snmp-community')
_('err-unable-read')
_('err-netdisco-write-fail')
_('General')
_('Go')
_('global-conf')
_('main-node')
_('mod-ok')
_('node-expiration')
_('pg-db')
_('pg-host')
_('pg-pwd')
_('pg-user')
_('Read')
_('snmp-community')
_('SNMP-communities')
_('snmp-conf')
_('snmp-read')
_('snmp-timeout')
_('snmp-try')
_('snmp-version')
_('snmp-write')
_('timer-conf')
_('title-netdisco')
_('tooltip-devicetimeout')
_('tooltip-dnssuffix')
_('tooltip-firstnode')
_('tooltip-nodetimeout')
_('tooltip-community-read')
_('tooltip-snmptimeout')
_('tooltip-snmptry')
_('tooltip-community-write')
_('Write')
_('Account')
_('Account-nb')
_('Acct-expiration-date')
_('Acct-start-date')
_('Add')
_('Administrate')
_('advanced-tools')
_('Alias')
_('Already-valid')
_('Auth-Type')
_('Attributes')
_('auto-import-dhcp')
_('confirm-remove-datasrc')
_('confirm-remove-group')
_('confirm-remove-subnetlink')
_('confirm-remove-user')
_('Creation-date')
_('db-name')
_('db-type')
_('Delete')
_('Delete-accounting')
_('Delete-logs')
_('Delete-profil')
_('Delete-subnet-import')
_('DHCP-zone')
_('enable-autoclean')
_('entitlement')
_('Error')
_('err-alias-already-used')
_('err-bad-server')
_('err-bad-tab')
_('err-db-conn-fail')
_('err-delete')
_('err-end-before-start')
_('err-exist')
_('err-exist2')
_('err-field-missing')
_('err-group-not-exists')
_('err-invalid-auth-server')
_('err-invalid-entry')
_('err-invalid-tab')
_('err-invalid-table')
_('err-miss-data')
_('err-no-server')
_('err-no-subnet-for-import')
_('err-no-user')
_('err-radius-not-exist')
_('expiration-field')
_('Expiration-Date')
_('fail-tab')
_('Filter')
_('From')
_('Generation-type')
_('Group')
_('Groups')
_('Host')
_('Identifier')
_('Infinite')
_('ip-addr-dns')
_('Login')
_('Mac-addr')
_('Manage')
_('Manage-radius-db')
_('mass-account-deleg')
_('mass-import')
_('mass-import-restriction')
_('menu-name')
_('menu-title')
_('mono-account-deleg')
_('New-Attribute')
_('New-Group')
_('New-Profil')
_('New-User')
_('None')
_('OK')
_('ok-user')
_('Other')
_('Password')
_('Password-repeat')
_('Permanent')
_('Period')
_('Port')
_('Prefix')
_('Profil')
_('Profilname')
_('Profils')
_('Pwd-Type')
_('Radius-profile')
_('random-name')
_('Return')
_('rule-db-mgmt')
_('rule-user-mgmt-deleg')
_('Save')
_('Server')
_('SQL-table')
_('Subname')
_('sure-delete-user')
_('Tables')
_('table-radacct')
_('table-radcheck')
_('table-radgrpchk')
_('table-radgrprep')
_('table-radreply')
_('table-radusrgrp')
_('Target')
_('Template')
_('Temporary')
_('title-add-radius')
_('title-auto-import')
_('title-auto-import2')
_('title-cleanusers')
_('title-deleg')
_('title-edit-radius')
_('title-groupmod')
_('title-mass-import')
_('title-profillist')
_('title-radius-db')
_('title-userlist')
_('title-usermod')
_('title-usermgmt')
_('To')
_('tooltip-ac-expirationfield')
_('tooltip-ac-sqltable')
_('tooltip-ac-sqluserfield')
_('tooltip-alias')
_('tooltip-dbname')
_('tooltip-port')
_('tooltip-radacct')
_('tooltip-radcheck')
_('tooltip-radgrpchk')
_('tooltip-radgrprep')
_('tooltip-radreply')
_('tooltip-radusrgrp')
_('tooltip-user')
_('Type')
_('User')
_('user-field')
_('User-nb')
_('Userlist-CSV')
_('User-type')
_('Users')
_('Value')
_('Validity')
_('Attribut')
_('mass-account')
_('mono-account')
_('tooltip-ac-sqlexpirationfield')
_('Active')
_('Accounting')
_('active-reserv')
_('Address')
_('Alias')
_('and-the')
_('attribution-type')
_('Between')
_('Bytes')
_('Classic')
_('comment')
_('Description')
_('Device')
_('DHCP-name')
_('DHCP-type')
_('dig-results')
_('dynamic')
_('dhcp-hostname')
_('Download')
_('end-session')
_('end-session-cause')
_('err-no-res')
_('err-no-search')
_('First-view')
_('Forward-only')
_('Groups')
_('inactive-reserv')
_('Informations')
_('ipv4-addr')
_('ipv6-addr')
_('Last-view')
_('link-ip')
_('link-mac-addr')
_('machine-FQDN')
_('Manufacturer')
_('menu-title')
_('Model')
_('Modify-IPM-Infos')
_('netbios-machine')
_('netbios-user')
_('netid')
_('netmask')
_('Network')
_('Network-device')
_('Node')
_('option-alias')
_('option-code')
_('option-group')
_('option-name')
_('option-type')
_('option-value')
_('os')
_('Other')
_('Plug')
_('Radius-Server')
_('Radius-user')
_('Ref-desc')
_('Ref-plug')
_('Ref-room')
_('Room')
_('Search')
_('Since')
_('start-session')
_('Static')
_('static')
_('subnet-shortname')
_('title-8021x-bw')
_('title-8021x-users')
_('title-dhcp-custom-options')
_('title-dhcp-distrib')
_('title-dhcp-distrib-z-eye')
_('title-dhcp-hostname')
_('title-dhcp-options')
_('title-dhcp-option-groups')
_('title-dhcp-servers')
_('title-dns-acl')
_('title-dns-assoc')
_('title-dns-cluster')
_('title-dns-records')
_('title-dns-resolution')
_('title-dns-server')
_('title-dns-zone')
_('title-ip-addr')
_('title-last-device')
_('title-mac-addr')
_('title-netbios')
_('title-netbios-name')
_('title-network-places')
_('title-res-nb')
_('title-subnet-ipmanager')
_('title-vlan-device')
_('title-vlan-ipmanager')
_('Total')
_('Unknown')
_('Upload')
_('User')
_('Username')
_('Validity')
_('vlanid')
_('Zone-type')
_('Active?')
_('DHCP-cluster')
_('Members')
_('title-dhcp-cluster')
_('Action-nb')
_('Alert')
_('Date')
_('Destination')
_('General')
_('IP-addr')
_('last-100')
_('Last-logs')
_('Last-visit')
_('Maximum')
_('nb-ip-atk')
_('nb-scan-port')
_('nb-ssh-atk')
_('nb-tse-atk')
_('No-alert-found')
_('Scans')
_('Security')
_('Source')
_('SSH')
_('SSH-atk')
_('The')
_('total-atk')
_('TSE')
_('TSE-atk')
_('title-attack-report')
_('title-z-eye-report')
_('Update')
_('violent-days')
_('Add-community')
_('confirm-remove-community')
_('err-already-exist')
_('err-read-fail')
_('err-readorwrite')
_('err-invalid-data')
_('err-not-exist')
_('err-no-snmp-community')
_('err-unable-read')
_('err-write-fail')
_('OK')
_('Read')
_('snmp-community')
_('snmp-communities')
_('snmp-conf')
_('snmp-read')
_('snmp-timeout')
_('snmp-try')
_('snmp-version')
_('snmp-write')
_('tooltip-read')
_('tooltip-write')
_('Activate')
_('bad-data')
_('data-storage')
_('Database')
_('en-smtp-sensor')
_('en-ssh-sensor')
_('en-telnet-sensor')
_('en-tse-sensor')
_('fail-cron-wr')
_('fail-snort-conf-wr')
_('fail-tab')
_('lan-list')
_('menu-name')
_('menu-title')
_('General')
_('mod-in-progress')
_('page-title')
_('Password')
_('pg-host')
_('port-ftp')
_('port-http')
_('port-imap')
_('port-oracle')
_('port-pop')
_('port-sip')
_('port-smtp')
_('port-ssh')
_('prev-hour')
_('Register')
_('Remote')
_('Reports')
_('sent-hour')
_('sql-oracle')
_('srv-dns')
_('srv-ftp')
_('srv-http')
_('srv-imap')
_('srv-oracle')
_('srv-pop')
_('srv-sip')
_('srv-smtp')
_('srv-snmp')
_('srv-sql')
_('srv-ssh')
_('srv-telnet')
_('srv-tse')
_('title-nightreport')
_('title-we')
_('tooltip-ipv4')
_('tooltip-snort-port')
_('tooltip-prev-hour')
_('User')
_('Active')
_('admin-duplex')
_('admin-speed')
_('Advanced-Functions')
_('Advanced-tools')
_('All')
_('Apply')
_('Apply-VLAN')
_('backuporder-launched')
_('backuporder-terminated')
_('backup-all-switches')
_('Building')
_('bw-stats')
_('cdp-enable')
_('cdp-tooltip')
_('Channel')
_('Configuration')
_('confirm-remove-device')
_('Connected-devices')
_('Contact')
_('Copy-in-progress')
_('creation-date')
_('crit-step')
_('Description')
_('Details')
_('Device')
_('Device-detail')
_('dhcp-snooping-rate')
_('dhcp-snooping-rate-tooltip')
_('dhcp-snooping-trust-enable')
_('dhcp-snooping-trust-tooltip')
_('Disabled')
_('Discover')
_('Discover-device')
_('Discovering-in-progress')
_('done-with-success')
_('Dot1x-hostm')
_('Duplex')
_('Enable')
_('Enabled')
_('enable-monitor')
_('encap-vlan')
_('Energy')
_('err-auth-fail')
_('err-bad-datas')
_('err-bad-ip')
_('err-conn-fail')
_('err-csv-replace-data')
_('err-enable-auth-fail')
_('err-fail-mod-switch')
_('err-fail-tab')
_('err-invalid-csv')
_('err-invalid-csv-device')
_('err-invalid-csv-entry')
_('err-invalid-csv-port')
_('err-invalid-export')
_('err-no-credentials')
_('err-no-device')
_('err-no-device2')
_('err-no-port-bw')
_('err-no-sshlink-configured')
_('err-one-bad-value')
_('err-output')
_('err-output-value')
_('err-pwd-mismatch')
_('err-not-implemented')
_('err-no-snmp-cache')
_('err-no-snmp-community')
_('err-no-tab')
_('err-no-vlan')
_('err-some-backup-fail')
_('err-some-field-missing')
_('err-thereis-errors')
_('err-transfer-abandonned')
_('err-transfer-apply')
_('err-transfer-no-mem')
_('err-transfer-not-ready')
_('err-transfer-protocol')
_('err-transfer-right')
_('err-transfer-timeout')
_('err-transfer-src')
_('err-transfer-unk')
_('err-unhandled-port-number')
_('err-unknown-action')
_('err-vlan-fail')
_('err-vlan-not-on-device')
_('Export')
_('Fail')
_('fail-vlan')
_('Field')
_('Filename')
_('frontview')
_('generated-mrtg')
_('Go')
_('iface-dev-cfg')
_('iface-dev-status')
_('Inactive')
_('Internal-mod')
_('IP-addr')
_('ip-network')
_('Label')
_('MAB-dead')
_('MAB-dead-tooltip')
_('MAB-fail-tooltip')
_('MAB-noresp')
_('MAB-noresp-tooltip')
_('MAB-opt')
_('MAC-addr')
_('MAC-addr-iface')
_('Match-MAC-addr')
_('menu-name')
_('menu-title')
_('mod-in-progress')
_('Model')
_('Monitoring')
_('MTU')
_('multi-auth')
_('multi-domain')
_('multi-host')
_('must-confirm')
_('must-verify-ports')
_('native-vlan')
_('Network')
_('new-vlanid')
_('None')
_('Offline')
_('old-vlanid')
_('on')
_('OS')
_('Others')
_('Password')
_('Place')
_('Plug')
_('Portlist')
_('portsecurity')
_('portsec-enable')
_('portsec-maxmac')
_('portsec-maxmac-tooltip')
_('portsec-status')
_('portsec-violmode')
_('portsec-viol-tooltip')
_('Power')
_('Protect')
_('Remove')
_('req-sent')
_('Restore')
_('restore-in-progress')
_('Restrict')
_('Room')
_('rule-discover-device')
_('rule-export-cfg')
_('rule-import-plugs')
_('rule-save-devices')
_('Running-Cfg')
_('Save')
_('save-all-switches')
_('saveorder-launched')
_('saveorder-terminated')
_('Save-switch')
_('search-ports')
_('Send')
_('Serialnb')
_('Server-addr')
_('Server-type')
_('Shut')
_('Shutdown')
_('Shutdown-ports')
_('single-host')
_('Speed')
_('SSH')
_('ssh-link-state')
_('Startup-Cfg')
_('State')
_('Success')
_('sure-remove-device')
_('switch-view')
_('Switchon-ports')
_('switchport-mode')
_('tip-discover-devices')
_('tip-import-plug-room')
_('title-dhcpsnooping')
_('title-global-fct')
_('title-import-plug-room')
_('title-managerights')
_('title-network-device-mgmt')
_('title-port-modiflist')
_('title-restore-startup')
_('title-retag')
_('title-router-switch')
_('title-transfer-conf')
_('title-WiFi-AP')
_('tooltip-backup')
_('tooltip-port-desc')
_('tooltip-dhcpsnoopingen')
_('tooltip-dhcpsnoopingmatch')
_('tooltip-dhcpsnoopingopt')
_('tooltip-dhcpsnoopingvlan')
_('tooltip-plug')
_('tooltip-room')
_('tooltip-save')
_('tooltip-saveone')
_('tooltip-shut')
_('tooltip-speed')
_('transfer-way')
_('Type')
_('Unavailable')
_('unk-answer')
_('Uptime')
_('User')
_('Use-DHCP-opt-82')
_('Value')
_('Verify-ports')
_('Version')
_('Violation')
_('voice-vlan')
_('Vlan')
_('VLANlist')
_('VTP-domain')
_('warn-step')
_('enable-pwd')
_('enable-pwd-repeat')
_('sent-req')
_('SSH-pwd')
_('SSH-pwd-repeat')
_('tooltip-voicevlan')
_('Add')
_('All')
_('confirm-remove-backupsrv')
_('confirm-remove-groupright')
_('confirm-remove-userright')
_('device')
_('DHCP-Snooping-mgmt')
_('err-already-exist')
_('err-bad-datas')
_('err-no-backup-found')
_('err-no-snmp-community')
_('err-not-found')
_('err-snmpgid-not-found')
_('Export-cfg')
_('Filter')
_('Go')
_('group-rights')
_('Groups')
_('ip-addr')
_('Login')
_('menu-name')
_('menu-title')
_('Modification')
_('New-Server')
_('None')
_('Password')
_('Password-repeat')
_('Portmod-cdp')
_('Portmod-dhcpsnooping')
_('Portmod-portsec')
_('Portmod-voicevlan')
_('Reading')
_('Read-port-stats')
_('Read-ssh-portinfos')
_('Read-ssh-showstart')
_('Read-ssh-showrun')
_('Read-switch-details')
_('Read-switch-modules')
_('Read-switch-vlan')
_('Remove-Switch')
_('Restore-startup-cfg')
_('Retag-vlan')
_('Return')
_('Right')
_('rule-manage-serverbackup')
_('Save')
_('Server')
_('server-path')
_('Set-switch-sshpwd')
_('snmp-community')
_('srv-type')
_('title-device-backup')
_('title-edit-backup-switch-server')
_('title-rightsbysnmp')
_('title-rightsbyswitch')
_('title-switchrightsmgmt')
_('Type')
_('User')
_('user-rights')
_('Users')
_('Writing')
_('Write-port-mon')
_('rule-manage-backupservers')
_('Adding')
_('Add-to-new-group')
_('attr-mail')
_('attr-name')
_('attr-subname')
_('attr-uid')
_('base-dn')
_('confirm-removedirectory')
_('confirm-removeuser')
_('Directory')
_('Editing')
_('err-group-not-exists')
_('err-invalid-bad-data')
_('err-invalid-user')
_('err-ldap-bad-data')
_('err-ldap-exist')
_('err-ldap-not-exist')
_('err-mail')
_('err-pwd-complex')
_('err-pwd-match')
_('err-pwd-short')
_('err-pwd-unk')
_('err-user-already-exists')
_('err-user-not-found')
_('Extern')
_('Group')
_('Groups')
_('Import')
_('import-user')
_('inscription')
_('Intern')
_('last-conn')
_('last-ip')
_('ldap-addr')
_('ldap-filter')
_('ldap-port')
_('menu-name')
_('menu-title')
_('Mail')
_('Modify')
_('new-directory')
_('None')
_('Password')
_('Password-repeat')
_('port')
_('Remove')
_('root-dn')
_('root-pwd')
_('Save')
_('Server')
_('SSL')
_('Subname')
_('rule-import-user')
_('rule-modify-directory')
_('rule-modify-user')
_('Template')
_('tip-password')
_('title-directory')
_('title-directorymgmt')
_('title-user-dont-exist')
_('title-usermgmt')
_('title-user-mod')
_('tooltip-attr-mail')
_('tooltip-attr-name')
_('tooltip-attr-subname')
_('tooltip-attr-uid')
_('tooltip-base-dn')
_('tooltip-ldap-filter')
_('tooltip-root-dn')
_('User')
_('User-type')
_('filter-ldap')
_('Account-parameters')
_('Android-options')
_('API-Key')
_('App-Lang')
_('Disconnect-after')
_('Enable-Monitoring')
_('err-bad-lang')
_('tooltip-disconnect-after')
_('Connection')
_('Supervision')
_('Speed reporting')
_('DNS management')
_('Hypervision')
_('Users and rights')
_('Z-Eye groups management')
_('Icinga sensors')
_('Install')
_('DHCP/IP management')
_('IP manager')
_('Z-Eye Engine')
_('Z-Eye logs')
_('Maps')
_('Netdisco collect engine')
_('RADIUS servers')
_('Search')
_('Security reports')
_('SNMP communities')
_('SNORT IDS engine')
_('Network devices management')
_('Network devices (rights & backup)')
_('User management')
_('Add')
_('All')
_('confirm-remove-groupright')
_('confirm-remove-userright')
_('err-already-exist')
_('err-bad-datas')
_('err-no-subnet')
_('err-not-found')
_('Filter')
_('Go')
_('group-rights')
_('Groups')
_('ip-addr')
_('Login')
_('menu-name')
_('menu-title')
_('Modification')
_('None')
_('Return')
_('Right')
_('right-advancedtools')
_('right-history')
_('right-ipmgmt')
_('right-optionsgrpmgmt')
_('right-optionsmgmt')
_('right-rangemgmt')
_('right-read')
_('right-servermgmt')
_('right-subnetmgmt')
_('Save')
_('Server')
_('title-bysubnet')
_('title-globalrights')
_('title-ipmrightsmgmt')
_('Type')
_('User')
_('user-rights')
_('Users')
_('Writing')
|
nerzhul/Z-Eye
|
service/WebApp/Z_Eye/InterfaceManager/__init__.py
|
Python
|
gpl-2.0
| 31,485
|
[
"VisIt"
] |
e6b542afe6bc3f93933b0fd1054031b72844e7ac55798efa21939a1e031f626b
|
"""
It needs as input the final folder of bcbio
python sv-report.py --run report /path/to/final/bcbio
12% chances to work :)
"""
import os
import os.path as op
import yaml
import glob
from argparse import ArgumentParser
from collections import Counter, defaultdict
import pybedtools
from bcbio.distributed.transaction import file_transaction
from bcbio.utils import file_exists, splitext_plus, tmpfile, safe_makedir
from bcbio.install import _get_data_dir
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import gzip
from collections import Counter
import pandas as pd
# from ggplot import *
import vcf
def _sv_dist(fn_in):
count = Counter()
vcf_reader = vcf.Reader(open(fn_in, 'r'))
samples = vcf_reader.samples
remove_sr = remove_common = 0
for record in vcf_reader:
if record.genotype(samples[0])['GT'] == '0/0':
continue
if record.genotype(samples[1])['GT'] == '0/0':
if record.genotype(samples[0])['SR'] > 5:
# print record.genotype(samples[0])['GT']
# print record.genotype(samples[1])['GT']
# print record.ALT
count[(record.CHROM, record.var_subtype)] += 1
else:
remove_sr += 1
else:
remove_common += 1
print "Removed common %s, removed SR %s" % (remove_sr, remove_common)
return count
def _parse_count(count, sample):
tab = []
row = 0
for k in count:
row +=1
tab.append([sample, k[0], k[1], count[k]])
return tab
def simple_report(data, args):
summary = []
for sample in data:
print sample[0]['files']['lumpy-pair.vcf']
dt = _sv_dist(sample[0]['files']['lumpy-pair.vcf'])
dt = pd.DataFrame(_parse_count(dt, sample[0]['name']))
# print dt.columns
dt.columns = ['sample', 'chrom', 'sv', 'counts']
# p = ggplot(aes(x="chrom", y="counts", fill="sv"), data=dt) + geom_bar(position = 'dodge', stat = 'identity')
out_file = op.join(args.out, sample[0]['name'] + "_lumpy.tsv")
dt.to_csv(out_file, sep='\t', index=False)
summary.append(dt)
#ggsave(p, sample[0]['name'] + "_lumpy.png")
out_file = op.join(args.out, "lumpy.tsv")
pd.concat(summary).to_csv(out_file, sep='\t', index=False)
def _get_samples(out_dir):
data = defaultdict(dict)
for fn in glob.glob(op.join(out_dir,'*/*')):
if fn.endswith('tbi') or fn.endswith('bai'):
continue
rel_path = fn.split(out_dir)[1][1:]
sample = rel_path.split("/")[0]
fn_type, ext = splitext_plus(rel_path.split("/")[1].replace(sample + "-", ""))
if sample.find("Tumor") > -1:
data[sample][fn_type + ext.replace(".gz", "")] = fn
return data
def _prepare_samples(args):
"""
create dict for each sample having all information
"""
# if args.galaxy:
# system_config = args.galaxy
# else:
# system_config = os.path.join(_get_data_dir(), "galaxy", "bcbio_system.yaml")
# config = yaml.load(open(system_config))
# config['algorithm'] = {}
data = []
raw = _get_samples(args.files[0])
for sample in raw:
dt = {}
dt['name'] = sample
dt['files'] = raw[sample]
# dt['config'] = config
data.append([dt])
return data
if __name__ == "__main__":
parser = ArgumentParser(description="clean SV VCF files.")
parser.add_argument("--region", help="bed file with regions.")
parser.add_argument("files", nargs="*", help="final folder of bcbio.")
parser.add_argument("--run", required=1, help="Clean VCF file.", choices=['report'])
args = parser.parse_args()
safe_makedir(args.out)
if args.run == 'report':
data = _prepare_samples(args)
simple_report(data, args)
|
lpantano/eggs
|
svreport/sv-report.py
|
Python
|
gpl-2.0
| 3,843
|
[
"Galaxy"
] |
b6d5102c617d23003d29656756b50984a046dbd91e0db702d6b41770b026ff28
|
# -*- coding: utf-8 -*-
# Part of the psychopy.iohub library.
# Copyright (C) 2012-2016 iSolver Software Solutions
# Distributed under the terms of the GNU General Public License (GPL).
"""iohub wintab util objects / functions for stylus, position traces,
and validation process psychopy graphics.
"""
from __future__ import division, absolute_import
import math
from collections import OrderedDict
import numpy as np
from psychopy import visual, core
from psychopy.visual.basevisual import MinimalStim
class PenPositionStim(MinimalStim):
"""Draws the current pen x,y position with graphics that represent the
pressure, z axis, and tilt data for the wintab sample used."""
def __init__(self, win, min_opacity=0.0, hover_color=(255,0,0),
touching_color=(0,255,0), tiltline_color=(255,255, 0),
tiltline_width=2,
min_size=0.033, size_range=0.1666, tiltline_scalar=1.0,
name=None, autoLog=None, depth=-10000):
self.win = win
self.depth = depth
super(PenPositionStim, self).__init__(name, autoLog)
# Pen Hovering Related
# Opaticy is changed based on pen's z axis if data for z axis
# is available. Opacity of min_opacity is used when pen is at the
# furthest hover distance (z value) supported by the device.
# Opacity of 1.0 is used when z value == 0, meaning pen is touching
# digitizer surface.
self.min_opacity = min_opacity
# If z axis is supported, hover_color specifies the color of the pen
# position dot when z val > 0.
self.hover_color = hover_color
# Pen Pressure Related
# Smallest radius (in norm units) that the pen position gaussian blob
# will have, which occurs when pen pressure value is 0
self.min_size = min_size
# As pen pressure value increases, so does position gaussian blob
# radius (in norm units). Max radius is reached when pressure is
# at max device pressure value, and is equal to min_size+size_range
self.size_range = size_range
# Color of pen position blob when pressure > 0.
self.touching_color = touching_color
# Pen tilt Related
# Color of line graphic used to represent the pens tilt relative to
# the digitizer surface.
self.tiltline_color = tiltline_color
self.tiltline_width = tiltline_width
self.tiltline_scalar = tiltline_scalar
# Create a Gausian blob stim to use for pen position graphic
self.pen_guass = visual.PatchStim(win, units='norm', tex='none',
mask='gauss', pos=(0, 0), colorSpace='rgb255',
size=(self.min_size,self.min_size),
color=self.hover_color,
autoLog=False,
opacity=0.0)
# Create a line stim to use for pen position graphic
self.pen_tilt_line = visual.Line(win, units='norm', start=[0, 0],
lineWidth=self.tiltline_width,
end=[0.5, 0.5],
lineColorSpace='rgb255',
lineColor=self.tiltline_color,
opacity=0.0)
def updateFromEvent(self, evt):
"""Update the pen position and tilt graphics based on the data from
a wintab sample event.
:param evt: iohub wintab sample event
:return:
"""
# update the pen position stim based on
# the last tablet event's data
if evt.pressure > 0:
# pen is touching tablet surface
self.pen_guass.color = self.touching_color
else:
# pen is hovering just above tablet surface
self.pen_guass.color = self.hover_color
if evt.device.axis['pressure']['supported']:
# change size of pen position blob based on samples pressure
# value
pnorm = evt.pressure / evt.device.axis['pressure']['range']
self.pen_guass.size = self.min_size + pnorm * self.size_range
# set the position of the gauss blob to be the pen x,y value converted
# to norm screen coords.
self.pen_guass.pos = evt.getNormPos()
# if supported, update all graphics opacity based on the samples z value
# otherwise opacity is always 1.0
if evt.device.axis['z']['supported']:
z = evt.device.axis['z']['range'] - evt.z
znorm = z / evt.device.axis['z']['range']
sopacity = self.min_opacity + znorm * (1.0 - self.min_opacity)
self.pen_guass.opacity = self.pen_tilt_line.opacity = sopacity
else:
self.pen_guass.opacity = self.pen_tilt_line.opacity = 1.0
# Change the tilt line start position to == pen position
self.pen_tilt_line.start = self.pen_guass.pos
# Change the tilt line end position based on samples tilt value
# If tilt is not supported, it will always return 0,0
# so no line is drawn.
t1, t2 = evt.tilt
pen_tilt_xy = 0, 0
if t1 != t2 != 0:
pen_tilt_xy = t1 * math.sin(t2), t1 * math.cos(t2)
pen_pos = self.pen_guass.pos
tiltend = (pen_pos[0] + pen_tilt_xy[0]*self.tiltline_scalar,
pen_pos[1] + pen_tilt_xy[1]*self.tiltline_scalar)
self.pen_tilt_line.end = tiltend
def draw(self):
"""Draw the PenPositionStim to the opengl back buffer. This needs
to be called prior to calling win.flip() for the stim is to be
displayed.
:return: None
"""
self.pen_guass.draw()
self.pen_tilt_line.draw()
def clear(self):
"""Hide the graphics on the screen, even if they are drawn, by
setting opacity to 0.
:return: None
"""
self.pen_guass.opacity = 0.0
self.pen_tilt_line.opacity = 0.0
def __del__(self):
self.win = None
class PenTracesStim(MinimalStim):
"""Graphics representing where the pen has been moved on the digitizer
surface. Positions where sample pressure > 0 are included.
Implemented as a list of visual.ShapeStim, each representing a
single pen trace/segment (series on pen samples with pressure >
0). For improved performance, a single pen trace can have
max_trace_len points before a new ShapeStim is created and made
the 'current' pen trace'.
"""
def __init__( self, win, lineWidth=2, lineColor=(0, 0, 0), opacity=1.0,
maxlen=256, name=None, autoLog=None, depth=-1000):
self.depth = depth
self.win = win
super(PenTracesStim, self).__init__(name, autoLog)
# A single pen trace can have at most max_trace_len points.
self.max_trace_len = maxlen
# The list of ShapeStim representing pen traces
self.pentracestim = []
# The ShapeStim state new / upcoming position points will be added to.
self.current_pentrace = None
# A list representation of the current_pentrace.vertices
self.current_points = []
# The last pen position added to a pen trace.
self.last_pos = [0, 0]
self.lineWidth=lineWidth
self.lineColor=lineColor
self.opacity=opacity
@property
def traces(self):
"""List of np arrays, each np array is the set of vertices for one
pen trace.
:return: list
"""
return [pts.vertices for pts in self.pentracestim]
def updateFromEvents(self, sample_events):
"""
Update the stim graphics based on 0 - n pen sample events.
:param sample_events:
:return: None
"""
for pevt in sample_events:
if 'FIRST_ENTER' in pevt.status:
self.end()
if pevt.pressure > 0:
lpx, lpy = self.last_pos
px, py = pevt.getPixPos(self.win)
if lpx != px or lpy != py:
if len(self.current_points) >= self.max_trace_len:
self.end()
self.append((lpx, lpy))
self.last_pos = (px, py)
self.append(self.last_pos)
else:
self.end()
def draw(self):
"""Draws each pen trace ShapeStim to the opengl back buffer. This
method must be called prior to calling win.flip() if it is to
appear on the screen.
:return: None
"""
for pts in self.pentracestim:
pts.draw()
def start(self, first_point):
"""Start a new pen trace, by creating a new ShapeStim, adding it to
the pentracestim list, and making it the current_pentrace.
:param first_point: the first point in the ShapStim being craeted.
:return: None
"""
self.end()
self.current_points.append(first_point)
self.current_pentrace = visual.ShapeStim(self.win,
units='pix',
lineWidth=self.lineWidth,
lineColor=self.lineColor,
lineColorSpace='rgb255',
vertices=self.current_points,
closeShape=False,
pos=(0, 0),
size=1,
ori=0.0,
opacity=self.opacity,
interpolate=True)
self.pentracestim.append(self.current_pentrace)
def end(self):
"""Stop using the current_pentrace ShapeStim. Next time a pen
sample position is added to the PenTracesStim instance, a new
ShapeStim will created and added to the pentracestim list.
:return: None
"""
self.current_pentrace = None
self.current_points = []
self.last_pos = [0, 0]
def append(self, pos):
"""Add a pen position (in pix coords) to the current_pentrace
ShapeStim vertices.
:param pos: (x,y) tuple
:return: None
"""
if self.current_pentrace is None:
self.start(pos)
else:
self.current_points.append(pos)
self.current_pentrace.vertices = self.current_points
def clear(self):
"""Remove all ShapStim being used. Next time this stim is drawn, no
pen traces will exist.
:return:
"""
self.end()
del self.pentracestim[:]
def __del__(self):
self.clear()
self.win = None
#
# Pen position validation process code
#
class ScreenPositionValidation(object):
NUM_VALID_SAMPLES_PER_TARG = 100
TARGET_TIMEOUT = 10.0
def __init__(self, win, io, target_stim=None, pos_grid=None,
display_pen_pos=True, force_quit=True, intro_title=None,
intro_text1=None, intro_text2=None, intro_target_pos=None):
"""ScreenPositionValidation is used to perform a pen position
accuracy test for an iohub wintab device.
:param win: psychopy Window instance to ude for the validation graphics
:param io: iohub connection instance
:param target_stim: None to use default, or psychopy.iohub.util.targetpositionsequence.TargetStim instance
:param pos_grid: None to use default, or psychopy.iohub.util.targetpositionsequence.PositionGrid instance
:param display_pen_pos: True to add calculated pen position graphic
:param force_quit: Not Used
:param intro_title: None to use default, str or unicode to set the text used for the introduction screen title, or an instance of psychopy.visual.TextStim
:param intro_text1: None to use default, str or unicode to set the text used for the introduction text part 1, or an instance of psychopy.visual.TextStim
:param intro_text2: None to use default, str or unicode to set the text used for the introduction text part 2, or an instance of psychopy.visual.TextStim
:param intro_target_pos: None to use default, or (x,y) position to place the target graphic on the introduction screen. (x,y) position must be specified in 'norm' coordinate space.
:return:
"""
from psychopy.iohub.util.targetpositionsequence import TargetStim, PositionGrid
self.win = win
self.io = io
self._lastPenSample = None
self._targetStim = target_stim
self._positionGrid = pos_grid
self._forceQuit = force_quit
self._displayPenPosition = display_pen_pos
# IntroScreen Graphics
intro_graphics = self._introScreenGraphics = OrderedDict()
# Title Text
title_stim = visual.TextStim(self.win, units='norm',
pos=(0, .9),
height=0.1,
text='Pen Position Validation')
if isinstance(intro_title, basestring):
title_stim.setText(intro_title)
elif isinstance(intro_title, visual.TextStim):
title_stim = intro_title
intro_graphics['title'] = title_stim
# Intro Text part 1
text1_stim = visual.TextStim(self.win, units='norm',
pos=(0, .65),
height=0.05,
text='On the following screen, '
'press the pen on the target '
'graphic when it appears, '
'as accurately as '
'possible, until the target '
'moves to a different '
'location. Then press at the '
'next target location. '
'Hold the stylus in exactly '
'the same way as you would '
'hold a pen for normal '
'handwriting.',
wrapWidth=1.25
)
if isinstance(intro_text1, basestring):
text1_stim.setText(intro_text1)
elif isinstance(intro_text1, visual.TextStim):
text1_stim = intro_text1
intro_graphics['text1'] = text1_stim
# Intro Text part 2
text2_stim = visual.TextStim(self.win, units='norm',
pos=(0, -0.2),
height=0.066,
color='green',
text='Press the pen on the above '
'target to start the '
'validation, or the ESC key '
'to skip the procedure.')
if isinstance(intro_text2, basestring):
text2_stim.setText(intro_text2)
elif isinstance(intro_text2, visual.TextStim):
text2_stim = intro_text2
intro_graphics['text2'] = text2_stim
self._penStim = None
if self._displayPenPosition:
# Validation Screen Graphics
self._penStim = visual.Circle(self.win,
radius=4,
fillColor=[255, 0, 0],
lineColor=[255, 0, 0],
lineWidth=0,
edges=8, # int(np.pi*radius),
units='pix',
lineColorSpace='rgb255',
fillColorSpace='rgb255',
opacity=0.9,
contrast=1,
interpolate=True,
autoLog=False)
if self._targetStim is None:
self._targetStim = TargetStim(win,
radius=16,
fillcolor=[64, 64, 64],
edgecolor=[192, 192, 192],
edgewidth=1,
dotcolor=[255, 255, 255],
dotradius=3,
units='pix',
colorspace='rgb255',
opacity=1.0,
contrast=1.0
)
if intro_target_pos:
self._targetStim.setPos(intro_target_pos)
intro_graphics['target'] = self._targetStim
if self._positionGrid is None:
self._positionGrid = PositionGrid(
winSize=win.monitor.getSizePix(),
shape=[
3,
3],
scale=0.9,
posList=None,
noiseStd=None,
firstposindex=0,
repeatfirstpos=True)
# IntroScreen Graphics
finished_graphics = self._finsihedScreenGraphics = OrderedDict()
finished_graphics['title'] = visual.TextStim(
self.win, units='norm', pos=(
0, .9), height=0.1, text='Validation Complete')
finished_graphics['result_status'] = visual.TextStim(
self.win, units='norm', pos=(
0, .7), height=0.07, color='blue', text='Result: {}')
finished_graphics['result_stats'] = visual.TextStim(self.win, units='norm', pos=(
0, .6), height=0.05, text='{}/{} Points Validated. Min, Max, Mean Errors: {}, {}, {}')
finished_graphics['exit_text'] = visual.TextStim(
self.win, units='norm', pos=(
0, .5), height=0.05, text='Press any key to continue...')
@property
def targetStim(self):
return self._targetStim
@targetStim.setter
def targetStim(self, ts):
self._targetStim = ts
@property
def positionGrid(self):
return self._positionGrid
@positionGrid.setter
def positionGrid(self, ts):
self._positionGrid = ts
def _enterIntroScreen(self):
kb = self.io.devices.keyboard
pen = self.io.devices.tablet
exit_screen = False
hitcount = 0
pen.reporting = True
kb.getPresses()
while exit_screen is False:
for ig in self._introScreenGraphics.values():
ig.draw()
samples = pen.getSamples()
if samples:
self._drawPenStim(samples[-1])
spos = samples[-1].getPixPos(self.win)
if samples[-1].pressure > 0 and \
self._introScreenGraphics['target'].contains(spos):
if hitcount > 10:
exit_screen = True
hitcount = hitcount + 1
else:
hitcount = 0
self.win.flip()
if 'escape' in kb.getPresses():
exit_screen = True
pen.reporting = False
return False
pen.reporting = False
return True
def _enterValidationSequence(self):
val_results = dict(target_data=dict(), avg_err=0, min_err=1000,
max_err=-1000, status='PASSED', point_count=0,
ok_point_count=0)
self._lastPenSample = None
kb = self.io.devices.keyboard
pen = self.io.devices.tablet
self._positionGrid.randomize()
pen.reporting = True
for tp in self._positionGrid:
self._targetStim.setPos(tp)
self._targetStim.draw()
targ_onset_time = self.win.flip()
pen.clearEvents()
val_sample_list = []
while len(val_sample_list) < self.NUM_VALID_SAMPLES_PER_TARG:
if core.getTime() - targ_onset_time > self.TARGET_TIMEOUT:
break
self._targetStim.draw()
samples = pen.getSamples()
for s in samples:
spos = s.getPixPos(self.win)
if s.pressure > 0 and self.targetStim.contains(spos):
dx = math.fabs(tp[0] - spos[0])
dy = math.fabs(tp[1] - spos[1])
perr = math.sqrt(dx * dx + dy * dy)
val_sample_list.append((spos[0], spos[1], perr))
else:
val_sample_list = []
if samples:
self._drawPenStim(samples[-1])
self._lastPenSample = samples[-1]
elif self._lastPenSample:
self._drawPenStim(self._lastPenSample)
self.win.flip()
tp = int(tp[0]), int(tp[1])
val_results['target_data'][tp] = None
val_results['point_count'] = val_results['point_count'] + 1
if val_sample_list:
pos_acc_array = np.asarray(val_sample_list)
serr_array = pos_acc_array[:, 2]
targ_err_stats = val_results['target_data'][tp] = dict()
targ_err_stats['samples'] = pos_acc_array
targ_err_stats['count'] = len(val_sample_list)
targ_err_stats['min'] = serr_array.min()
targ_err_stats['max'] = serr_array.max()
targ_err_stats['mean'] = serr_array.mean()
targ_err_stats['median'] = np.median(serr_array)
targ_err_stats['stdev'] = serr_array.std()
val_results['min_err'] = min(
val_results['min_err'], targ_err_stats['min'])
val_results['max_err'] = max(
val_results['max_err'], targ_err_stats['max'])
val_results['avg_err'] = val_results[
'avg_err'] + targ_err_stats['mean']
val_results['ok_point_count'] = val_results[
'ok_point_count'] + 1
else:
val_results['status'] = 'FAILED'
self._lastPenSample = None
if val_results['ok_point_count'] > 0:
val_results['avg_err'] = val_results[
'avg_err'] / val_results['ok_point_count']
pen.reporting = False
return val_results
def _enterFinishedScreen(self, results):
kb = self.io.devices.keyboard
status = results['status']
ok_point_count = results['ok_point_count']
min_err = results['min_err']
max_err = results['max_err']
avg_err = results['avg_err']
point_count = results['point_count']
self._finsihedScreenGraphics['result_status'].setText(
'Result: {}'.format(status))
self._finsihedScreenGraphics['result_stats'].setText(
'%d/%d '
'Points Validated.'
'Min, Max, Mean '
'Errors: '
'%.3f, %.3f, %.3f'
'' %
(ok_point_count, point_count, min_err, max_err, avg_err))
for ig in self._finsihedScreenGraphics.values():
ig.draw()
self.win.flip()
kb.clearEvents()
while not kb.getPresses():
for ig in self._finsihedScreenGraphics.values():
ig.draw()
self.win.flip()
def _drawPenStim(self, s):
if self._displayPenPosition:
spos = s.getPixPos(self.win)
if spos:
self._penStim.setPos(spos)
if s.pressure == 0:
self._penStim.setFillColor([255, 0, 0])
self._penStim.setLineColor([255, 0, 0])
else:
self._penStim.setFillColor([0, 0, 255])
self._penStim.setLineColor([0, 0, 255])
self._penStim.draw()
def run(self):
"""Starts the validation process. This function will not return
until the validation is complete. The validation results are
returned in dict format.
:return: dist containing validation results.
"""
continue_val = self._enterIntroScreen()
if continue_val is False:
return None
# delay about 0.5 sec before staring validation
ftime = self.win.flip()
while core.getTime() - ftime < 0.5:
self.win.flip()
self.io.clearEvents()
val_results = self._enterValidationSequence()
# delay about 0.5 sec before showing validation end screen
ftime = self.win.flip()
while core.getTime() - ftime < 0.5:
self.win.flip()
self.io.clearEvents()
self._enterFinishedScreen(val_results)
self.io.clearEvents()
self.win.flip()
return val_results
# returning None indicates to experiment that the vaidation process
# was terminated by the user
# return None
def free(self):
self.win = None
self.io = None
self._finsihedScreenGraphics.clear()
self._introScreenGraphics.clear()
self._targetStim = None
self._penStim = None
def __del__(self):
self.free()
|
isolver/OpenHandWrite
|
distribution/getwrite/wintabtest/wintabgraphics.py
|
Python
|
gpl-3.0
| 25,925
|
[
"Gaussian"
] |
42467077ed34a1ce891a6bf52c5c6a4ebb28791e0744e095b9f93c772c05b845
|
"""
=======================================================
Filter fastq files based on reads unmapped in bam file
=======================================================
:Author: Nick Ilott
:Release: $Id$
:Date: |today|
:Tags: Python
"""
# load modules
from ruffus import *
import CGAT.Experiment as E
import logging as L
import CGAT.Database as Database
import CGAT.CSV as CSV
import sys
import os
import re
import shutil
import itertools
import math
import glob
import time
import gzip
import collections
import random
import numpy
import sqlite3
import CGAT.GTF as GTF
import CGAT.IOTools as IOTools
import CGAT.IndexedFasta as IndexedFasta
from rpy2.robjects import r as R
import rpy2.robjects as ro
import rpy2.robjects.vectors as rovectors
from rpy2.rinterface import RRuntimeError
import CGATPipelines.PipelineMapping as PipelineMapping
#import CGATPipelines.PipelineMetagenomeAssembly as PipelineMetagenomeAssembly
import CGAT.FastaIterator as FastaIterator
import CGAT.Metaphlan as Metaphlan
import CGATPipelines.PipelineMapping as PipelineMapping
import CGATPipelines.PipelineMappingQC as PipelineMappingQC
import pysam
import CGAT.Fastq as Fastq
###################################################
###################################################
###################################################
# Pipeline configuration
###################################################
# load options from the config file
import CGATPipelines.Pipeline as P
P.getParameters(
["pipeline.ini"])
PARAMS = P.PARAMS
###################################################################
###################################################################
###################################################################
@transform(glob.glob("*.fastq.gz"),
regex("(\S+).fastq.gz"),
add_inputs(glob.glob("*.bam")),
r"filtered.\1.fastq.gz")
def filterFastq(infiles, outfile):
'''
filter fastq file based on read tyep
speicifed in bam file
'''
t = PARAMS.get("reads_type")
fastq = infiles[0]
track = P.snip(fastq, ".fastq.gz")
bams = infiles[1]
if len(bams) == 1:
bam = bams[0]
else:
bam = [bam for bam in bams if bam.find(track) != -1][0]
job_options="-l mem_free=30G"
if PARAMS.get("reads_invert") == 1:
invert = "--invert"
else:
invert = ""
statement = '''zcat %(fastq)s | python /ifs/projects/proj029/src/scripts/fastq2filteredfastq.py
-b %(bam)s
%(invert)s
--reads=%(t)s
--log=%(outfile)s.log
| gzip > %(outfile)s'''
P.run()
#########################################
#########################################
#########################################
if __name__ == "__main__":
sys.exit(P.main(sys.argv))
|
CGATOxford/proj029
|
Proj029Pipelines/pipeline_fastqfilter.py
|
Python
|
bsd-3-clause
| 2,937
|
[
"pysam"
] |
6857e0aa45a1bc89bdd0dc14b3d6d89f0a0e5873820f6dec0a0e65056a1ab5b0
|
########################################################################
# $HeadURL $
# File: ProcessPoolTests.py
# Author: Krzysztof.Ciba@NOSPAMgmail.com
# Date: 2012/02/13 07:55:31
########################################################################
""" :mod: ProcessPoolTests
=======================
.. module: ProcessPoolTests
:synopsis: unit tests for ProcessPool
.. moduleauthor:: Krzysztof.Ciba@NOSPAMgmail.com
unit tests for ProcessPool
"""
__RCSID__ = "$Id $"
##
# @file ProcessPoolTests.py
# @author Krzysztof.Ciba@NOSPAMgmail.com
# @date 2012/02/13 07:55:46
# @brief Definition of ProcessPoolTests class.
## imports
import os
import unittest
import random
import time
import threading
## from DIRAC
# from DIRAC.Core.Base import Script
# Script.parseCommandLine()
from DIRAC import gLogger
## SUT
from DIRAC.Core.Utilities.ProcessPool import ProcessPool
def ResultCallback( task, taskResult ):
""" dummy result callback """
print "callback for %s result is %s" % ( task.getTaskID(), taskResult )
def ExceptionCallback( task, exec_info ):
""" dummy exception callback """
print "callback for %s exception is %s" % ( task.getTaskID(), exec_info )
def CallableFunc( taskID, timeWait, raiseException = False ):
""" global function to be executed in task """
print "pid=%s task=%s will sleep for %s s" % ( os.getpid(), taskID, timeWait )
time.sleep( timeWait )
if raiseException:
raise Exception( "testException" )
return timeWait
class CallableClass( object ):
""" callable class to be executed in task """
def __init__( self, taskID, timeWait, raiseException=False ):
self.log = gLogger.getSubLogger( self.__class__.__name__ + "/%s" % taskID )
self.taskID = taskID
self.timeWait = timeWait
self.raiseException = raiseException
def __call__( self ):
self.log.always( "pid=%s task=%s will sleep for %s s" % ( os.getpid(), self.taskID, self.timeWait ) )
time.sleep( self.timeWait )
if self.raiseException:
raise Exception("testException")
return self.timeWait
## global locked lock
gLock = threading.Lock()
# make sure it is locked
gLock.acquire()
## dummy callable locked class
class LockedCallableClass( object ):
""" callable and locked class """
def __init__( self, taskID, timeWait, raiseException=False ):
self.log = gLogger.getSubLogger( self.__class__.__name__ + "/%s" % taskID )
self.taskID = taskID
self.log.always( "pid=%s task=%s I'm locked" % ( os.getpid(), self.taskID ) )
gLock.acquire()
self.log.always("you can't see that line, object is stuck by gLock" )
self.timeWait = timeWait
self.raiseException = raiseException
gLock.release()
def __call__( self ):
self.log.always("If you see this line, miracle had happened!")
self.log.always("will sleep for %s" % self.timeWait )
time.sleep( self.timeWait )
if self.raiseException:
raise Exception("testException")
return self.timeWait
########################################################################
class TaskCallbacksTests(unittest.TestCase):
"""
.. class:: TaskCallbacksTests
test case for ProcessPool
"""
def setUp( self ):
gLogger.showHeaders( True )
self.log = gLogger.getSubLogger( self.__class__.__name__ )
self.processPool = ProcessPool( 4, 8, 8 )
self.processPool.daemonize()
def testCallableClass( self ):
""" CallableClass and task callbacks test """
i = 0
while True:
if self.processPool.getFreeSlots() > 0:
timeWait = random.randint(0, 5)
raiseException = False
if not timeWait:
raiseException = True
result = self.processPool.createAndQueueTask( CallableClass,
taskID = i,
args = ( i, timeWait, raiseException ),
callback = ResultCallback,
exceptionCallback = ExceptionCallback,
blocking = True )
if result["OK"]:
self.log.always("CallableClass enqueued to task %s" % i )
i += 1
else:
continue
if i == 10:
break
self.processPool.finalize( 2 )
def testCallableFunc( self ):
""" CallableFunc and task callbacks test """
i = 0
while True:
if self.processPool.getFreeSlots() > 0:
timeWait = random.randint(0, 5)
raiseException = False
if not timeWait:
raiseException = True
result = self.processPool.createAndQueueTask( CallableFunc,
taskID = i,
args = ( i, timeWait, raiseException ),
callback = ResultCallback,
exceptionCallback = ExceptionCallback,
blocking = True )
if result["OK"]:
self.log.always("CallableClass enqueued to task %s" % i )
i += 1
else:
continue
if i == 10:
break
self.processPool.finalize( 2 )
########################################################################
class ProcessPoolCallbacksTests( unittest.TestCase ):
"""
.. class:: ProcessPoolCallbacksTests
test case for ProcessPool
"""
def setUp( self ):
"""c'tor
:param self: self reference
"""
gLogger.showHeaders( True )
self.log = gLogger.getSubLogger( self.__class__.__name__ )
self.processPool = ProcessPool( 4, 8, 8,
poolCallback = self.poolCallback,
poolExceptionCallback = self.poolExceptionCallback )
self.processPool.daemonize()
def poolCallback( self, taskID, taskResult ):
self.log.always( "callback for %s result is %s" % ( taskID, taskResult ) )
def poolExceptionCallback( self, taskID, taskException ):
self.log.always( "callback for %s exception is %s" % ( taskID, taskException ) )
def testCallableClass( self ):
""" CallableClass and pool callbacks test """
i = 0
while True:
if self.processPool.getFreeSlots() > 0:
timeWait = random.randint(0, 5)
raiseException = False
if not timeWait:
raiseException = True
result = self.processPool.createAndQueueTask( CallableClass,
taskID = i,
args = ( i, timeWait, raiseException ),
usePoolCallbacks = True,
blocking = True )
if result["OK"]:
self.log.always("CallableClass enqueued to task %s" % i )
i += 1
else:
continue
if i == 10:
break
self.processPool.finalize( 2 )
def testCallableFunc( self ):
""" CallableFunc and pool callbacks test """
i = 0
while True:
if self.processPool.getFreeSlots() > 0:
timeWait = random.randint(0, 5)
raiseException = False
if not timeWait:
raiseException = True
result = self.processPool.createAndQueueTask( CallableFunc,
taskID = i,
args = ( i, timeWait, raiseException ),
usePoolCallbacks = True,
blocking = True )
if result["OK"]:
self.log.always("CallableFunc enqueued to task %s" % i )
i += 1
else:
continue
if i == 10:
break
self.processPool.finalize( 2 )
########################################################################
class TaskTimeOutTests( unittest.TestCase ):
"""
.. class:: TaskTimeOutTests
test case for ProcessPool
"""
def setUp( self ):
"""c'tor
:param self: self reference
"""
gLogger.showHeaders( True )
self.log = gLogger.getSubLogger( self.__class__.__name__ )
self.processPool = ProcessPool( 2,
4,
8,
poolCallback = self.poolCallback,
poolExceptionCallback = self.poolExceptionCallback )
self.processPool.daemonize()
def poolCallback( self, taskID, taskResult ):
self.log.always( "callback result for %s is %s" % ( taskID, taskResult ) )
def poolExceptionCallback( self, taskID, taskException ):
self.log.always( "callback exception for %s is %s" % ( taskID, taskException ) )
def testCallableClass( self ):
""" CallableClass and task time out test """
i = 0
while True:
if self.processPool.getFreeSlots() > 0:
timeWait = random.randint( 0, 5 ) * 10
raiseException = False
if not timeWait:
raiseException = True
result = self.processPool.createAndQueueTask( CallableClass,
taskID = i,
args = ( i, timeWait, raiseException ),
timeOut = 15,
usePoolCallbacks = True,
blocking = True )
if result["OK"]:
self.log.always("CallableClass enqueued to task %s timeWait=%s exception=%s" % ( i, timeWait, raiseException ) )
i += 1
else:
continue
if i == 16:
break
self.processPool.finalize( 2 )
def testCallableFunc( self ):
""" CallableFunc and task timeout test """
i = 0
while True:
if self.processPool.getFreeSlots() > 0:
timeWait = random.randint(0, 5) * 5
raiseException = False
if not timeWait:
raiseException = True
result = self.processPool.createAndQueueTask( CallableFunc,
taskID = i,
args = ( i, timeWait, raiseException ),
timeOut = 15,
usePoolCallbacks = True,
blocking = True )
if result["OK"]:
self.log.always("CallableFunc enqueued to task %s timeWait=%s exception=%s" % ( i, timeWait, raiseException ) )
i += 1
else:
continue
if i == 16:
break
self.processPool.finalize( 2 )
def testLockedClass( self ):
""" LockedCallableClass and task time out test """
for loop in range(2):
self.log.always( "loop %s" % loop )
i = 0
while i < 16:
if self.processPool.getFreeSlots() > 0:
timeWait = random.randint(0, 5) * 5
raiseException = False
if timeWait == 5:
raiseException = True
klass = CallableClass
if timeWait >= 20:
klass = LockedCallableClass
result = self.processPool.createAndQueueTask( klass,
taskID = i,
args = ( i, timeWait, raiseException ),
timeOut = 15,
usePoolCallbacks = True,
blocking = True )
if result["OK"]:
self.log.always("%s enqueued to task %s timeWait=%s exception=%s" % ( klass.__name__ , i, timeWait, raiseException ) )
i += 1
else:
continue
self.log.always("being idle for a while")
for _ in range( 100000 ):
for _ in range( 1000 ):
pass
self.log.always("finalizing...")
self.processPool.finalize( 10 )
## unlock
gLock.release()
## SUT suite execution
if __name__ == "__main__":
testLoader = unittest.TestLoader()
suitePPCT = testLoader.loadTestsFromTestCase( ProcessPoolCallbacksTests )
suiteTCT = testLoader.loadTestsFromTestCase( TaskCallbacksTests )
suiteTTOT = testLoader.loadTestsFromTestCase( TaskTimeOutTests )
suite = unittest.TestSuite( [ suitePPCT, suiteTCT, suiteTTOT ] )
unittest.TextTestRunner(verbosity=3).run(suite)
|
coberger/DIRAC
|
Core/Utilities/test/ProcessPoolTests.py
|
Python
|
gpl-3.0
| 12,777
|
[
"DIRAC"
] |
9997ec42969001875eb9a91c0ffd09d26d1480202af0f1018b3eb8d2eefb0b7b
|
from __future__ import division
from builtins import zip
import os.path
import tempfile
import shutil
import neo.io
import numpy as np
from pype9.cmd import simulate
from pype9.utils.units import parse_units
from pype9.utils.arguments import CATALOG_PREFIX
import ninemlcatalog
import quantities as pq
from pype9.simulate.neuron import (
Simulation as NeuronSimulation,
CellMetaClass as NeuronCellMetaClass,
Network as NetworkNEURON)
from pype9.simulate.nest import (
Simulation as NESTSimulation,
CellMetaClass as NESTCellMetaClass,
Network as NetworkNEST)
import nineml
import nineml.units as un
if __name__ == '__main__':
from pype9.utils.testing import DummyTestCase as TestCase # @UnusedImport
else:
from unittest import TestCase # @Reimport
class TestSimulateCell(TestCase):
ref_path = ''
# Izhikevich simulation params
t_stop = 100.0
dt = 0.001
U = (-1.625, 'pA') # (-14.0, 'mV/ms')
V = (-65.0, 'mV')
izhi_path = 'catalog://neuron/Izhikevich#SampleIzhikevichFastSpiking'
isyn_path = os.path.join(os.path.relpath(ninemlcatalog.root), 'input',
'StepCurrent.xml#StepCurrent')
isyn_amp = (100.0, 'pA')
isyn_onset = (50.0, 'ms')
isyn_init = (0.0, 'pA')
rec_t_start = (1.0, 'ms')
def setUp(self):
self.tmpdir = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.tmpdir)
def test_single_cell(self):
in_path = '{}/isyn.pkl'.format(self.tmpdir)
out_path = '{}/v.pkl'.format(self.tmpdir)
# First simulate input signal to have something to play into izhikevich
# cell
argv = ("{input_model} nest {t_stop} {dt} "
"--record current_output {out_path} {rec_t_start} "
"--prop amplitude {amp} "
"--prop onset {onset} "
"--init_value current_output {init} "
"--build_mode lazy "
"--build_version Cmd "
.format(input_model=self.isyn_path, out_path=in_path,
t_stop=self.t_stop, dt=self.dt,
amp='{} {}'.format(*self.isyn_amp),
onset='{} {}'.format(*self.isyn_onset),
init='{} {}'.format(*self.isyn_init),
rec_t_start='{} {}'.format(*self.rec_t_start)))
# Run input signal simulation
simulate.run(argv.split())
isyn = neo.io.PickleIO(in_path).read()[0].analogsignals[0]
# Check sanity of input signal
self.assertEqual(isyn.max(), self.isyn_amp[0],
"Max of isyn input signal {} ({}) did not match "
"specified amplitude, {}".format(
isyn.max(), in_path, self.isyn_amp[0]))
self.assertEqual(isyn.min(), self.isyn_init[0],
"Min of isyn input signal {} ({}) did not match "
"specified initial value, {}"
.format(isyn.min(), in_path, self.isyn_init[0]))
for simulator in ('neuron', 'nest'):
argv = (
"{nineml_model} {sim} {t_stop} {dt} "
"--record V {out_path} {rec_t_start} "
"--init_value U {U} "
"--init_value V {V} "
"--init_regime subVb "
"--play iSyn {in_path} "
"--build_mode force "
"--build_version Cmd "
"--device_delay 0.5 ms "
"--min_delay 0.5 ms "
.format(nineml_model=self.izhi_path, sim=simulator,
out_path=out_path, in_path=in_path, t_stop=self.t_stop,
dt=self.dt, U='{} {}'.format(*self.U),
V='{} {}'.format(*self.V),
isyn_amp='{} {}'.format(*self.isyn_amp),
isyn_onset='{} {}'.format(*self.isyn_onset),
isyn_init='{} {}'.format(*self.isyn_init),
rec_t_start='{} {}'.format(*self.rec_t_start)))
simulate.run(argv.split())
data_seg = neo.io.PickleIO(out_path).read()[0]
v = data_seg.analogsignals[0]
regimes = data_seg.epochs[0]
ref_v, ref_regimes = self._ref_single_cell(simulator, isyn)
self.assertTrue(all(v == ref_v),
"'simulate' command produced different results to"
" to api reference for izhikevich model using "
"'{}' simulator".format(simulator))
# FIXME: Need a better test
self.assertGreater(
v.max(), -60.0,
"No spikes generated for '{}' (max val: {}) version of Izhi "
"model. Probably error in 'play' method if all dynamics tests "
"pass ".format(simulator, v.max()))
self.assertTrue(all(regimes.times == ref_regimes.times))
self.assertTrue(all(regimes.durations == ref_regimes.durations))
self.assertEqual(regimes.labels, ref_regimes.labels)
self.assertEqual(len(regimes.times), 6)
self.assertEqual(regimes.labels[0], 'subVb')
self.assertTrue('subthreshold' in regimes.labels)
def _ref_single_cell(self, simulator, isyn):
if simulator == 'neuron':
metaclass = NeuronCellMetaClass
Simulation = NeuronSimulation
else:
metaclass = NESTCellMetaClass
Simulation = NESTSimulation
nineml_model = ninemlcatalog.load(self.izhi_path[len(CATALOG_PREFIX):])
Cell = metaclass(nineml_model.component_class, build_version='API',
external_currents=['iSyn'])
with Simulation(dt=self.dt * un.ms, min_delay=0.5 * un.ms,
device_delay=0.5 * un.ms) as sim:
cell = Cell(nineml_model, U=self.U[0] * parse_units(self.U[1]),
V=self.V[0] * parse_units(self.V[1]), regime_='subVb')
cell.record('V')
cell.record_regime()
cell.play('iSyn', isyn)
sim.run(self.t_stop * un.ms)
return (cell.recording('V', t_start=pq.Quantity(self.rec_t_start[0],
self.rec_t_start[1])),
cell.regime_epochs())
class TestSimulateNetwork(TestCase):
brunel_path = 'network/Brunel2000/AI'
brunel_name = 'Brunel_AI_reduced'
reduced_brunel_fname = 'reduced_brunel.xml'
recorded_pops = ('Exc', 'Inh')
reduced_brunel_order = 10
t_stop = 100.0
dt = 0.001
seed = 12345
rec_t_start = (1.0, 'ms')
def setUp(self):
self.tmpdir = tempfile.mkdtemp()
# Create reduced version of Brunel network
model = ninemlcatalog.load(self.brunel_path).as_network(
self.brunel_name)
scale = float(self.reduced_brunel_order) / model.population('Inh').size
# rescale populations
reduced_model = model.clone()
for pop in reduced_model.populations:
pop.size = int(np.ceil(pop.size * scale))
for proj in reduced_model.projections:
connectivity = proj.connectivity
connectivity._src_size = proj.pre.size
connectivity._dest_size = proj.post.size
if proj.name in ('Excitation', 'Inhibition'):
props = connectivity.rule_properties
number = props.property('number')
props.set(nineml.Property(
number.name,
int(np.ceil(float(number.value) * scale)) * un.unitless))
self.reduced_brunel_path = os.path.join(self.tmpdir,
self.reduced_brunel_fname)
reduced_model.write(self.reduced_brunel_path) # , version=2)
def tearDown(self):
shutil.rmtree(self.tmpdir)
def test_network(self):
for simulator in ('nest', ): # , 'neuron'):
argv = (
"{model_url}#{model_name} {sim} {t_stop} {dt} "
"--record Exc.spike_output {tmpdir}/Exc-{sim}.neo.pkl "
"{rec_t_start} "
"--record Inh.spike_output {tmpdir}/Inh-{sim}.neo.pkl "
"{rec_t_start} "
"--build_mode force "
"--seed {seed}"
.format(model_url=self.reduced_brunel_path,
model_name=self.brunel_name, sim=simulator,
tmpdir=self.tmpdir, t_stop=self.t_stop, dt=self.dt,
seed=self.seed,
rec_t_start='{} {}'.format(*self.rec_t_start)))
simulate.run(argv.split())
ref_recs = self._ref_network(simulator)
for pop_name in self.recorded_pops:
rec_path = '{}/{}-{}.neo.pkl'.format(self.tmpdir, pop_name,
simulator)
rec = neo.io.PickleIO(rec_path).read()[0].spiketrains
ref = ref_recs[pop_name].spiketrains
self.assertTrue(
all(all(c == f) for c, f in zip(rec, ref)),
"'simulate' command produced different results to"
" to api reference for izhikevich model using "
"'{}' simulator".format(simulator))
# TODO: Need a better test
self.assertGreater(
len(rec), 0,
"No spikes generated for '{}' population using {}."
.format(pop_name, simulator))
def _ref_network(self, simulator, external_input=None, **kwargs):
if simulator == 'nest':
NetworkClass = NetworkNEST
Simulation = NESTSimulation
elif simulator == 'neuron':
NetworkClass = NetworkNEURON
Simulation = NeuronSimulation
else:
assert False
model = nineml.read(self.reduced_brunel_path).as_network(
'ReducedBrunel')
with Simulation(dt=self.dt * un.ms, seed=self.seed,
**model.delay_limits()) as sim:
network = NetworkClass(model, **kwargs)
if external_input is not None:
network.component_array('Ext').play('spike_input__cell',
external_input)
for pop_name in self.recorded_pops:
network.component_array(pop_name).record('spike_output')
sim.run(self.t_stop * un.ms)
recordings = {}
for pop_name in self.recorded_pops:
recordings[pop_name] = network.component_array(pop_name).recording(
'spike_output')
return recordings
|
CNS-OIST/PyPe9
|
test/unittests/test_cmds/test_simulate.py
|
Python
|
mit
| 10,784
|
[
"NEURON"
] |
79e12441cd32585f881ceae8872597e56b604da7507492a13ef276114390b2f8
|
# --------------------------------------------------------------------------
# Software: InVesalius - Software de Reconstrucao 3D de Imagens Medicas
# Copyright: (C) 2001 Centro de Pesquisas Renato Archer
# Homepage: http://www.softwarepublico.gov.br
# Contact: invesalius@cti.gov.br
# License: GNU - GPL 2 (LICENSE.txt/LICENCA.txt)
# --------------------------------------------------------------------------
# Este programa e software livre; voce pode redistribui-lo e/ou
# modifica-lo sob os termos da Licenca Publica Geral GNU, conforme
# publicada pela Free Software Foundation; de acordo com a versao 2
# da Licenca.
#
# Este programa eh distribuido na expectativa de ser util, mas SEM
# QUALQUER GARANTIA; sem mesmo a garantia implicita de
# COMERCIALIZACAO ou de ADEQUACAO A QUALQUER PROPOSITO EM
# PARTICULAR. Consulte a Licenca Publica Geral GNU para obter mais
# detalhes.
# --------------------------------------------------------------------------
import gdcm
import numpy as np
import vtk
from vtk.util import numpy_support
def to_vtk(
n_array,
spacing=(1.0, 1.0, 1.0),
slice_number=0,
orientation="AXIAL",
origin=(0, 0, 0),
padding=(0, 0, 0),
):
if orientation == "SAGITTAL":
orientation = "SAGITAL"
try:
dz, dy, dx = n_array.shape
except ValueError:
dy, dx = n_array.shape
dz = 1
px, py, pz = padding
v_image = numpy_support.numpy_to_vtk(n_array.flat)
if orientation == "AXIAL":
extent = (
0 - px,
dx - 1 - px,
0 - py,
dy - 1 - py,
slice_number - pz,
slice_number + dz - 1 - pz,
)
elif orientation == "SAGITAL":
dx, dy, dz = dz, dx, dy
extent = (
slice_number - px,
slice_number + dx - 1 - px,
0 - py,
dy - 1 - py,
0 - pz,
dz - 1 - pz,
)
elif orientation == "CORONAL":
dx, dy, dz = dx, dz, dy
extent = (
0 - px,
dx - 1 - px,
slice_number - py,
slice_number + dy - 1 - py,
0 - pz,
dz - 1 - pz,
)
# Generating the vtkImageData
image = vtk.vtkImageData()
image.SetOrigin(origin)
image.SetSpacing(spacing)
image.SetDimensions(dx, dy, dz)
# SetNumberOfScalarComponents and SetScalrType were replaced by
# AllocateScalars
# image.SetNumberOfScalarComponents(1)
# image.SetScalarType(numpy_support.get_vtk_array_type(n_array.dtype))
image.AllocateScalars(numpy_support.get_vtk_array_type(n_array.dtype), 1)
image.SetExtent(extent)
image.GetPointData().SetScalars(v_image)
image_copy = vtk.vtkImageData()
image_copy.DeepCopy(image)
return image_copy
def to_vtk_mask(n_array, spacing=(1.0, 1.0, 1.0), origin=(0.0, 0.0, 0.0)):
dz, dy, dx = n_array.shape
ox, oy, oz = origin
sx, sy, sz = spacing
ox -= sx
oy -= sy
oz -= sz
v_image = numpy_support.numpy_to_vtk(n_array.flat)
extent = (0, dx - 1, 0, dy - 1, 0, dz - 1)
# Generating the vtkImageData
image = vtk.vtkImageData()
image.SetOrigin(ox, oy, oz)
image.SetSpacing(sx, sy, sz)
image.SetDimensions(dx - 1, dy - 1, dz - 1)
# SetNumberOfScalarComponents and SetScalrType were replaced by
# AllocateScalars
# image.SetNumberOfScalarComponents(1)
# image.SetScalarType(numpy_support.get_vtk_array_type(n_array.dtype))
image.AllocateScalars(numpy_support.get_vtk_array_type(n_array.dtype), 1)
image.SetExtent(extent)
image.GetPointData().SetScalars(v_image)
# image_copy = vtk.vtkImageData()
# image_copy.DeepCopy(image)
return image
def np_rgba_to_vtk(n_array, spacing=(1.0, 1.0, 1.0)):
dy, dx, dc = n_array.shape
v_image = numpy_support.numpy_to_vtk(n_array.reshape(dy * dx, dc))
extent = (0, dx - 1, 0, dy - 1, 0, 0)
# Generating the vtkImageData
image = vtk.vtkImageData()
image.SetOrigin(0, 0, 0)
image.SetSpacing(spacing)
image.SetDimensions(dx, dy, 1)
# SetNumberOfScalarComponents and SetScalrType were replaced by
# AllocateScalars
# image.SetNumberOfScalarComponents(1)
# image.SetScalarType(numpy_support.get_vtk_array_type(n_array.dtype))
image.AllocateScalars(numpy_support.get_vtk_array_type(n_array.dtype), dc)
image.SetExtent(extent)
image.GetPointData().SetScalars(v_image)
return image
# Based on http://gdcm.sourceforge.net/html/ConvertNumpy_8py-example.html
def gdcm_to_numpy(image, apply_intercep_scale=True):
map_gdcm_np = {
gdcm.PixelFormat.SINGLEBIT: np.uint8,
gdcm.PixelFormat.UINT8: np.uint8,
gdcm.PixelFormat.INT8: np.int8,
gdcm.PixelFormat.UINT12: np.uint16,
gdcm.PixelFormat.INT12: np.int16,
gdcm.PixelFormat.UINT16: np.uint16,
gdcm.PixelFormat.INT16: np.int16,
gdcm.PixelFormat.UINT32: np.uint32,
gdcm.PixelFormat.INT32: np.int32,
# gdcm.PixelFormat.FLOAT16:np.float16,
gdcm.PixelFormat.FLOAT32: np.float32,
gdcm.PixelFormat.FLOAT64: np.float64,
}
pf = image.GetPixelFormat()
if image.GetNumberOfDimensions() == 3:
shape = (
image.GetDimension(2),
image.GetDimension(1),
image.GetDimension(0),
pf.GetSamplesPerPixel(),
)
else:
shape = image.GetDimension(1), image.GetDimension(0), pf.GetSamplesPerPixel()
dtype = map_gdcm_np[pf.GetScalarType()]
gdcm_array = image.GetBuffer()
np_array = np.frombuffer(
gdcm_array.encode("utf-8", errors="surrogateescape"), dtype=dtype
)
if pf.GetScalarType() == gdcm.PixelFormat.SINGLEBIT:
np_array = np.unpackbits(np_array)
np_array.shape = shape
np_array = np_array.squeeze()
if apply_intercep_scale:
shift = image.GetIntercept()
scale = image.GetSlope()
output = np.empty_like(np_array, np.int16)
output[:] = scale * np_array + shift
return output
else:
return np_array
|
paulojamorim/invesalius3
|
invesalius/data/converters.py
|
Python
|
gpl-2.0
| 6,144
|
[
"VTK"
] |
e66ebcc02ceb20ada730882971eecda774ba31826c749314b6bc6c61ed98c849
|
r"""Solve Poisson's equation on a sphere using a mixed formulation
The Poisson equation is in strong form
.. math::
\nabla^2 u &= f \\
u(x, y=\pm 1) &= 0 \\
u(x=2\pi, y) &= u(x=0, y)
We solve using the mixed formulation
.. math::
g - \nabla(u) &= 0 \\
\nabla \cdot g &= f \\
u(x, y=\pm 1) &= 0 \\
u(x=2\pi, y) &= u(x=0, y) \\
g(x=2\pi, y) &= g(x=0, y)
The problem is solved without boundary conditions and in spherical
coordinates. The mixed equations are solved coupled and implicit.
"""
import numpy as np
import sympy as sp
from shenfun import *
config['basisvectors'] = 'normal'
# Define spherical coordinates
r = 1
theta, phi = psi = sp.symbols('x,y', real=True, positive=True)
rv = (r*sp.sin(theta)*sp.cos(phi), r*sp.sin(theta)*sp.sin(phi), r*sp.cos(theta))
# Define a manufactured solution
#ue = rv[0]*rv[1]*rv[2]
sph = sp.functions.special.spherical_harmonics.Ynm
ue = sph(6, 3, theta, phi)
#ue = sp.cos(4*(sp.sin(theta)*sp.cos(phi) + sp.sin(theta)*sp.sin(phi) + sp.cos(theta)))
N, M = 40, 40
L0 = FunctionSpace(N, 'L', domain=(0, np.pi))
F1 = FunctionSpace(M, 'F', dtype='D')
T = TensorProductSpace(comm, (L0, F1), coordinates=(psi, rv, sp.Q.positive(sp.sin(theta))))
VT = VectorSpace(T)
Q = CompositeSpace([VT, T])
gu = TrialFunction(Q)
pq = TestFunction(Q)
g, u = gu
p, q = pq
A00 = inner(p, g)
A01 = inner(div(p), u)
A10 = inner(q, div(g))
# Get f and g on quad points
gh = (div(grad(TrialFunction(T)))).tosympy(basis=ue, psi=psi)
vfj = Array(Q, buffer=(0, 0, gh))
vj, fj = vfj
vf_hat = Function(Q)
vf_hat[1] = inner(q, fj, output_array=vf_hat[1])
M = BlockMatrix(A00+A01+A10)
gu_hat = M.solve(vf_hat, constraints=((2, 0, 0),))
gu = gu_hat.backward()
g_, u_ = gu
# Exact Cartesian gradient
gradue = Array(VT, buffer=(ue.diff(theta, 1), ue.diff(phi, 1)/sp.sin(theta)**2))
uj = Array(T, buffer=ue)
error = [comm.reduce(np.linalg.norm(uj-u_)),
comm.reduce(np.linalg.norm(gradue[0]-g_[0])),
comm.reduce(np.linalg.norm(gradue[1]-g_[1]))]
if comm.Get_rank() == 0:
print('Error u dudx dudy')
print(' %2.4e %2.4e %2.4e' %(error[0], error[1], error[2]))
#assert np.all(abs(np.array(error)) < 1e-8), error
from mayavi import mlab
xx, yy, zz = T.local_cartesian_mesh(uniform=True)
gu = gu_hat.backward(kind='uniform')
g_, u_ = gu
# For plotting - get gradient as Cartesian vector
df = g_.get_cartesian_vector()
# plot real part of
fig = surf3D(u_.imag, [xx, yy, zz], backend='mayavi', wrapaxes=[1], kind='uniform')
#fig.show()
quiver3D(df.imag, [xx, yy, zz], wrapaxes=[1], kind='uniform', fig=fig)
mlab.show()
|
spectralDNS/shenfun
|
demo/MixedPoissonSphere.py
|
Python
|
bsd-2-clause
| 2,621
|
[
"Mayavi"
] |
51019646c328e1bd43f8e1766c2561ab86fbbb73ac27522142495fc8ceac098e
|
"""
Universe configuration builder.
"""
import sys, os
import logging, logging.config
from optparse import OptionParser
import ConfigParser
from galaxy.util import string_as_bool
from galaxy import eggs
import pkg_resources
log = logging.getLogger( __name__ )
def resolve_path( path, root ):
"""If 'path' is relative make absolute by prepending 'root'"""
if not( os.path.isabs( path ) ):
path = os.path.join( root, path )
return path
class ConfigurationError( Exception ):
pass
class Configuration( object ):
def __init__( self, **kwargs ):
self.config_dict = kwargs
self.root = kwargs.get( 'root_dir', '.' )
# Collect the umask and primary gid from the environment
self.umask = os.umask( 077 ) # get the current umask
os.umask( self.umask ) # can't get w/o set, so set it back
self.gid = os.getgid() # if running under newgrp(1) we'll need to fix the group of data created on the cluster
# Database related configuration
self.database = resolve_path( kwargs.get( "database_file", "database/universe.d" ), self.root )
self.database_connection = kwargs.get( "database_connection", False )
self.database_engine_options = get_database_engine_options( kwargs )
self.database_create_tables = string_as_bool( kwargs.get( "database_create_tables", "True" ) )
# Where dataset files are stored
self.file_path = resolve_path( kwargs.get( "file_path", "database/files" ), self.root )
self.new_file_path = resolve_path( kwargs.get( "new_file_path", "database/tmp" ), self.root )
# dataset Track files
self.track_store_path = kwargs.get( "track_store_path", "${extra_files_path}/tracks")
self.tool_path = resolve_path( kwargs.get( "tool_path", "tools" ), self.root )
self.tool_data_path = resolve_path( kwargs.get( "tool_data_path", "tool-data" ), os.getcwd() )
self.test_conf = resolve_path( kwargs.get( "test_conf", "" ), self.root )
self.tool_config = resolve_path( kwargs.get( 'tool_config_file', 'tool_conf.xml' ), self.root )
self.tool_secret = kwargs.get( "tool_secret", "" )
self.id_secret = kwargs.get( "id_secret", "USING THE DEFAULT IS NOT SECURE!" )
self.set_metadata_externally = string_as_bool( kwargs.get( "set_metadata_externally", "False" ) )
self.use_remote_user = string_as_bool( kwargs.get( "use_remote_user", "False" ) )
self.remote_user_maildomain = kwargs.get( "remote_user_maildomain", None )
self.remote_user_logout_href = kwargs.get( "remote_user_logout_href", None )
self.require_login = string_as_bool( kwargs.get( "require_login", "False" ) )
self.allow_user_creation = string_as_bool( kwargs.get( "allow_user_creation", "True" ) )
self.allow_user_deletion = string_as_bool( kwargs.get( "allow_user_deletion", "False" ) )
self.new_user_dataset_access_role_default_private = string_as_bool( kwargs.get( "new_user_dataset_access_role_default_private", "False" ) )
self.template_path = resolve_path( kwargs.get( "template_path", "templates" ), self.root )
self.template_cache = resolve_path( kwargs.get( "template_cache_path", "database/compiled_templates" ), self.root )
self.local_job_queue_workers = int( kwargs.get( "local_job_queue_workers", "5" ) )
self.cluster_job_queue_workers = int( kwargs.get( "cluster_job_queue_workers", "3" ) )
self.job_scheduler_policy = kwargs.get("job_scheduler_policy", "FIFO")
self.job_queue_cleanup_interval = int( kwargs.get("job_queue_cleanup_interval", "5") )
self.cluster_files_directory = os.path.abspath( kwargs.get( "cluster_files_directory", "database/pbs" ) )
self.job_working_directory = resolve_path( kwargs.get( "job_working_directory", "database/job_working_directory" ), self.root )
self.outputs_to_working_directory = string_as_bool( kwargs.get( 'outputs_to_working_directory', False ) )
self.output_size_limit = int( kwargs.get( 'output_size_limit', 0 ) )
self.job_walltime = kwargs.get( 'job_walltime', None )
self.admin_users = kwargs.get( "admin_users", "" )
self.sendmail_path = kwargs.get('sendmail_path',"/usr/sbin/sendmail")
self.mailing_join_addr = kwargs.get('mailing_join_addr',"galaxy-user-join@bx.psu.edu")
self.error_email_to = kwargs.get( 'error_email_to', None )
self.smtp_server = kwargs.get( 'smtp_server', None )
self.start_job_runners = kwargs.get( 'start_job_runners', None )
self.default_cluster_job_runner = kwargs.get( 'default_cluster_job_runner', 'local:///' )
self.pbs_application_server = kwargs.get('pbs_application_server', "" )
self.pbs_dataset_server = kwargs.get('pbs_dataset_server', "" )
self.pbs_dataset_path = kwargs.get('pbs_dataset_path', "" )
self.pbs_stage_path = kwargs.get('pbs_stage_path', "" )
self.use_heartbeat = string_as_bool( kwargs.get( 'use_heartbeat', 'False' ) )
self.use_memdump = string_as_bool( kwargs.get( 'use_memdump', 'False' ) )
self.log_memory_usage = string_as_bool( kwargs.get( 'log_memory_usage', 'False' ) )
self.log_actions = string_as_bool( kwargs.get( 'log_actions', 'False' ) )
self.log_events = string_as_bool( kwargs.get( 'log_events', 'False' ) )
self.ucsc_display_sites = kwargs.get( 'ucsc_display_sites', "main,test,archaea,ucla" ).lower().split(",")
self.gbrowse_display_sites = kwargs.get( 'gbrowse_display_sites', "main,test,tair" ).lower().split(",")
self.genetrack_display_sites = kwargs.get( 'genetrack_display_sites', "main,test" ).lower().split(",")
self.brand = kwargs.get( 'brand', None )
self.wiki_url = kwargs.get( 'wiki_url', 'http://g2.trac.bx.psu.edu/' )
self.bugs_email = kwargs.get( 'bugs_email', None )
self.blog_url = kwargs.get( 'blog_url', None )
self.screencasts_url = kwargs.get( 'screencasts_url', None )
self.library_import_dir = kwargs.get( 'library_import_dir', None )
if self.library_import_dir is not None and not os.path.exists( self.library_import_dir ):
raise ConfigurationError( "library_import_dir specified in config (%s) does not exist" % self.library_import_dir )
self.user_library_import_dir = kwargs.get( 'user_library_import_dir', None )
if self.user_library_import_dir is not None and not os.path.exists( self.user_library_import_dir ):
raise ConfigurationError( "user_library_import_dir specified in config (%s) does not exist" % self.user_library_import_dir )
self.allow_library_path_paste = kwargs.get( 'allow_library_path_paste', False )
# Configuration options for taking advantage of nginx features
self.nginx_x_accel_redirect_base = kwargs.get( 'nginx_x_accel_redirect_base', False )
self.nginx_upload_store = kwargs.get( 'nginx_upload_store', False )
self.nginx_upload_path = kwargs.get( 'nginx_upload_path', False )
if self.nginx_upload_store:
self.nginx_upload_store = os.path.abspath( self.nginx_upload_store )
# Parse global_conf and save the parser
global_conf = kwargs.get( 'global_conf', None )
global_conf_parser = ConfigParser.ConfigParser()
self.global_conf_parser = global_conf_parser
if global_conf and "__file__" in global_conf:
global_conf_parser.read(global_conf['__file__'])
#Store per-tool runner config
try:
self.tool_runners = global_conf_parser.items("galaxy:tool_runners")
except ConfigParser.NoSectionError:
self.tool_runners = []
self.datatypes_config = kwargs.get( 'datatypes_config_file', 'datatypes_conf.xml' )
# Cloud configuration options
self.cloud_controller_instance = string_as_bool( kwargs.get( 'cloud_controller_instance', 'False' ) )
if self.cloud_controller_instance == True:
self.enable_cloud_execution = string_as_bool( kwargs.get( 'enable_cloud_execution', 'True' ) )
else:
self.enable_cloud_execution = string_as_bool( kwargs.get( 'enable_cloud_execution', 'False' ) )
def get( self, key, default ):
return self.config_dict.get( key, default )
def get_bool( self, key, default ):
if key in self.config_dict:
return string_as_bool( self.config_dict[key] )
else:
return default
def check( self ):
# Check that required directories exist
for path in self.root, self.file_path, self.tool_path, self.tool_data_path, self.template_path, self.job_working_directory, self.cluster_files_directory:
if not os.path.isdir( path ):
raise ConfigurationError("Directory does not exist: %s" % path )
# Check that required files exist
for path in self.tool_config, self.datatypes_config:
if not os.path.isfile(path):
raise ConfigurationError("File not found: %s" % path )
# Check job runners so the admin can scramble dependent egg.
if self.start_job_runners is not None:
runner_to_egg = dict( pbs = 'pbs_python', sge = 'DRMAA_python' )
for runner in self.start_job_runners.split( ',' ):
try:
pkg_resources.require( runner_to_egg[runner] )
except eggs.EggNotFetchable, e:
raise eggs.EggNotFetchable( 'You must scramble the %s egg to use the %s job runner. Instructions are available at:\n http://bitbucket.org/galaxy/galaxy-central/wiki/Config/Cluster' % ( runner_to_egg[runner], runner ) )
except KeyError:
raise Exception( 'No such job runner: %s. Please double-check the value of start_job_runners in universe_wsgi.ini' % runner )
def is_admin_user( self,user ):
"""
Determine if the provided user is listed in `admin_users`.
NOTE: This is temporary, admin users will likely be specified in the
database in the future.
"""
admin_users = self.get( "admin_users", "" ).split( "," )
return ( user is not None and user.email in admin_users )
def get_database_engine_options( kwargs ):
"""
Allow options for the SQLAlchemy database engine to be passed by using
the prefix "database_engine_option_".
"""
conversions = {
'convert_unicode': string_as_bool,
'pool_timeout': int,
'echo': string_as_bool,
'echo_pool': string_as_bool,
'pool_recycle': int,
'pool_size': int,
'max_overflow': int,
'pool_threadlocal': string_as_bool
}
prefix = "database_engine_option_"
prefix_len = len( prefix )
rval = {}
for key, value in kwargs.iteritems():
if key.startswith( prefix ):
key = key[prefix_len:]
if key in conversions:
value = conversions[key](value)
rval[ key ] = value
return rval
def configure_logging( config ):
"""
Allow some basic logging configuration to be read from the cherrpy
config.
"""
# PasteScript will have already configured the logger if the appropriate
# sections were found in the config file, so we do nothing if the
# config has a loggers section, otherwise we do some simple setup
# using the 'log_*' values from the config.
if config.global_conf_parser.has_section( "loggers" ):
return
format = config.get( "log_format", "%(name)s %(levelname)s %(asctime)s %(message)s" )
level = logging._levelNames[ config.get( "log_level", "DEBUG" ) ]
destination = config.get( "log_destination", "stdout" )
log.info( "Logging at '%s' level to '%s'" % ( level, destination ) )
# Get root logger
root = logging.getLogger()
# Set level
root.setLevel( level )
# Turn down paste httpserver logging
if level <= logging.DEBUG:
logging.getLogger( "paste.httpserver.ThreadPool" ).setLevel( logging.WARN )
# Remove old handlers
for h in root.handlers[:]:
root.removeHandler(h)
# Create handler
if destination == "stdout":
handler = logging.StreamHandler( sys.stdout )
else:
handler = logging.FileHandler( destination )
# Create formatter
formatter = logging.Formatter( format )
# Hook everything up
handler.setFormatter( formatter )
root.addHandler( handler )
|
volpino/Yeps-EURAC
|
lib/galaxy/config.py
|
Python
|
mit
| 12,546
|
[
"Galaxy"
] |
265670c354d21e36846879db4b79564c000c1d988b874627ae255b9eb869bccc
|
# -*- coding: utf-8 -*-
from __future__ import print_function
from __future__ import absolute_import
import string
import os
import copy
import sys
import pandas as pds
import numpy as np
from . import _custom
from . import _files
from . import _orbits
from . import _meta
from . import utils
from pysat import data_dir
from pysat import DataFrame, Series
# main class for users
class Instrument(object):
"""Download, load, manage, modify and analyze science data.
Parameters
----------
platform : string
name of platform/satellite.
name : string
name of instrument.
tag : string, optional
identifies particular subset of instrument data.
sat_id : string, optional
identity within constellation
clean_level : {'clean','dusty','dirty','none'}, optional
level of data quality
pad : pandas.DateOffset, or dictionary, optional
Length of time to pad the begining and end of loaded data for
time-series processing. Extra data is removed after applying all
custom functions. Dictionary, if supplied, is simply passed to
pandas DateOffset.
orbit_info : dict
Orbit information, {'index':index, 'kind':kind, 'period':period}.
See pysat.Orbits for more information.
inst_module : module, optional
Provide instrument module directly.
Takes precedence over platform/name.
update_files : boolean, optional
If True, immediately query filesystem for instrument files and store.
temporary_file_list : boolean, optional
If true, the list of Instrument files will not be written to disk.
Prevents a race condition when running multiple pysat processes.
multi_file_day : boolean, optional
Set to True if Instrument data files for a day are spread across
multiple files and data for day n could be found in a file
with a timestamp of day n-1 or n+1.
manual_org : bool
if True, then pysat will look directly in pysat data directory
for data files and will not use default /platform/name/tag
directory_format : str
directory naming structure in string format. Variables such as
platform, name, and tag will be filled in as needed using python
string formatting. The default directory structure would be
expressed as '{platform}/{name}/{tag}'
file_format : str or NoneType
File naming structure in string format. Variables such as year,
month, and sat_id will be filled in as needed using python string
formatting. The default file format structure is supplied in the
instrument list_files routine.
Attributes
----------
data : pandas.DataFrame
loaded science data
date : pandas.datetime
date for loaded data
yr : int
year for loaded data
bounds : (datetime/filename/None, datetime/filename/None)
bounds for loading data, supply array_like for a season with gaps
doy : int
day of year for loaded data
files : pysat.Files
interface to instrument files
meta : pysat.Meta
interface to instrument metadata, similar to netCDF 1.6
orbits : pysat.Orbits
interface to extracting data orbit-by-orbit
custom : pysat.Custom
interface to instrument nano-kernel
kwargs : dictionary
keyword arguments passed to instrument loading routine
Note
----
Pysat attempts to load the module platform_name.py located in
the pysat/instruments directory. This module provides the underlying
functionality to download, load, and clean instrument data.
Alternatively, the module may be supplied directly
using keyword inst_module.
Examples
--------
::
# 1-second mag field data
vefi = pysat.Instrument(platform='cnofs',
name='vefi',
tag='dc_b',
clean_level='clean')
start = pysat.datetime(2009,1,1)
stop = pysat.datetime(2009,1,2)
vefi.download(start, stop)
vefi.load(date=start)
print(vefi['dB_mer'])
print(vefi.meta['db_mer'])
# 1-second thermal plasma parameters
ivm = pysat.Instrument(platform='cnofs',
name='ivm',
tag='',
clean_level='clean')
ivm.download(start,stop)
ivm.load(2009,1)
print(ivm['ionVelmeridional'])
# Ionosphere profiles from GPS occultation
cosmic = pysat.Instrument('cosmic2013',
'gps',
'ionprf',
altitude_bin=3)
# bins profile using 3 km step
cosmic.download(start, stop, user=user, password=password)
cosmic.load(date=start)
"""
def __init__(self, platform=None, name=None, tag=None, sat_id=None,
clean_level='clean', update_files=None, pad=None,
orbit_info=None, inst_module=None, multi_file_day=None,
manual_org=None, directory_format=None, file_format=None,
temporary_file_list=False,
*arg, **kwargs):
if inst_module is None:
# use strings to look up module name
if isinstance(platform, str) and isinstance(name, str):
self.platform = platform.lower()
self.name = name.lower()
# look to module for instrument functions and defaults
self._assign_funcs(by_name=True)
elif (platform is None) and (name is None):
# creating "empty" Instrument object with this path
self.name = ''
self.platform = ''
self._assign_funcs()
else:
raise ValueError('Inputs platform and name must both be strings, or both None.')
else:
# user has provided a module
try:
# platform and name are expected to be part of module
self.name = inst_module.name.lower()
self.platform = inst_module.platform.lower()
except AttributeError:
raise AttributeError(string.join(('A name and platform attribute for the ',
'instrument is required if supplying routine module directly.')))
# look to module for instrument functions and defaults
self._assign_funcs(inst_module=inst_module)
# more reasonable defaults for optional parameters
self.tag = tag.lower() if tag is not None else ''
self.sat_id = sat_id.lower() if sat_id is not None else ''
self.clean_level = (clean_level.lower() if clean_level is not None
else 'none')
# assign_func sets some instrument defaults, direct info rules all
if directory_format is not None:
self.directory_format = directory_format.lower()
# value not provided by user, check if there is a value provided by
# instrument module
elif self.directory_format is not None:
try:
# check if it is a function
self.directory_format = self.directory_format(tag, sat_id)
except TypeError:
pass
if file_format is not None:
self.file_format = file_format
# check to make sure value is reasonable
if self.file_format is not None:
# check if it is an iterable string. If it isn't formatted
# properly, raise Error
if (not isinstance(self.file_format, str) or
(self.file_format.find("{") < 0) or
(self.file_format.find("}") < 0)):
estr = 'file format set to default, supplied string must be '
estr = '{:s}iteratable [{:}]'.format(estr, self.file_format)
raise ValueError(estr)
# set up empty data and metadata
self.data = DataFrame(None)
self.meta = _meta.Meta()
# function processing class, processes data on load
self.custom = _custom.Custom()
# create arrays to store data around loaded day
# enables padding across day breaks with minimal loads
self._next_data = DataFrame(None)
self._next_data_track = []
self._prev_data = DataFrame(None)
self._prev_data_track = []
self._curr_data = DataFrame(None)
# multi file day, default set by assign_funcs
if multi_file_day is not None:
self.multi_file_day = multi_file_day
# arguments for padding
if isinstance(pad, pds.DateOffset):
self.pad = pad
elif isinstance(pad, dict):
self.pad = pds.DateOffset(**pad)
elif pad is None:
self.pad = None
else:
estr = 'pad must be a dictionary or a pandas.DateOffset instance.'
raise ValueError(estr)
# instantiate Files class
manual_org = False if manual_org is None else manual_org
temporary_file_list = not temporary_file_list
self.files = _files.Files(self, manual_org=manual_org,
directory_format=self.directory_format,
update_files=update_files,
file_format=self.file_format,
write_to_disk=temporary_file_list)
# set bounds for iteration
# self.bounds requires the Files class
# setting (None,None) loads default bounds
self.bounds = (None, None)
self.date = None
self._fid = None
self.yr = None
self.doy = None
self._load_by_date = False
# initialize orbit support
if orbit_info is None:
if self.orbit_info is None:
# if default info not provided, set None as default
orbit_info = {'index': None, 'kind': None, 'period': None}
else:
# default provided by instrument module
orbit_info = self.orbit_info
self.orbits = _orbits.Orbits(self, **orbit_info)
# store kwargs, passed to load routine
self.kwargs = kwargs
# run instrument init function, a basic pass function is used
# if user doesn't supply the init function
self._init_rtn(self)
# store base attributes, used in particular by Meta class
self._base_attr = dir(self)
def __getitem__(self, key):
"""
Convenience notation for accessing data; inst['name'] is inst.data.name
Examples
--------
::
# By name
inst['name']
# By position
inst[row_index, 'name']
# Slicing by row
inst[row1:row2, 'name']
# By Date
inst[datetime, 'name']
# Slicing by date, inclusive
inst[datetime1:datetime2, 'name']
# Slicing by name and row/date
inst[datetime1:datetime1, 'name1':'name2']
"""
if isinstance(key, tuple):
# support slicing
return self.data.ix[key[0], key[1]]
else:
return self.data[key]
def __setitem__(self, key, new):
"""Convenience method for adding data to instrument.
Examples
--------
::
# Simple Assignment, default metadata assigned
# 'long_name' = 'name'
# 'units' = ''
inst['name'] = newData
# Assignment with Metadata
inst['name'] = {'data':new_data,
'long_name':long_name,
'units':units}
Note
----
If no metadata provided and if metadata for 'name' not already stored
then default meta information is also added,
long_name = 'name', and units = ''.
"""
if isinstance(new, dict):
# metadata should be included in dict
self.data[key] = new.pop('data')
# pass the rest to meta
self.meta[key] = new
else:
if isinstance(key, tuple):
self.data.ix[key[0], key[1]] = new
self.meta[key[1]] = {}
elif isinstance(key, str):
self.data[key] = new
self.meta[key] = {}
elif isinstance(new, DataFrame):
self.data[key] = new[key]
for ke in key:
self.meta[ke] = {}
else:
raise ValueError("No support for supplied input key")
@property
def empty(self):
"""Boolean flag reflecting lack of data.
True if there is no Instrument data."""
return self.data.empty
def copy(self):
"""Deep copy of the entire Instrument object."""
return copy.deepcopy(self)
def _pass_func(*args, **kwargs):
pass
def _assign_funcs(self, by_name=False, inst_module=None):
"""Assign all external science instrument methods to Instrument object."""
import importlib
# set defaults
self._list_rtn = self._pass_func
self._load_rtn = self._pass_func
self._default_rtn = self._pass_func
self._clean_rtn = self._pass_func
self._init_rtn = self._pass_func
self._download_rtn = self._pass_func
# default params
self.directory_format = None
self.file_format = None
self.multi_file_day = False
self.orbit_info = None
if by_name:
# look for code with filename name, any errors passed up
inst = importlib.import_module(''.join(('.', self.platform, '_',
self.name)), package='pysat.instruments')
elif inst_module is not None:
# user supplied an object with relevant instrument routines
inst = inst_module
else:
# no module or name info, default pass functions assigned
return
try:
self._load_rtn = inst.load
self._list_rtn = inst.list_files
self._download_rtn = inst.download
except AttributeError:
estr = 'A load, file_list, and download routine are required for '
raise AttributeError('{:s}every instrument.'.format(estr))
try:
self._default_rtn = inst.default
except AttributeError:
pass
try:
self._init_rtn = inst.init
except AttributeError:
pass
try:
self._clean_rtn = inst.clean
except AttributeError:
pass
# look for instrument default parameters
try:
self.directory_format = inst.directory_format
except AttributeError:
pass
try:
self.multi_file_day = inst.self.multi_file_day
except AttributeError:
pass
try:
self.orbit_info = inst.orbit_info
except AttributeError:
pass
return
def __repr__(self):
output_str = '\npysat Instrument object\n'
output_str += '-----------------------\n'
output_str += 'Platform: '+self.platform+'\n'
output_str += 'Name: '+self.name+'\n'
output_str += 'Tag: '+self.tag+'\n'
output_str += 'Satellite id: '+self.sat_id+'\n'
output_str += '\nData Processing\n'
output_str += '---------------\n'
output_str += 'Cleaning Level: ' + self.clean_level + '\n'
output_str += 'Data Padding: ' + self.pad.__repr__() + '\n'
output_str += 'Keyword Arguments Passed to load(): ' + self.kwargs.__repr__() +'\n'
output_str += 'Custom Functions : \n'
if len(self.custom._functions) > 0:
for func in self.custom._functions:
output_str += ' ' + func.__repr__()
else:
output_str += ' ' + 'No functions applied.\n'
output_str += '\nOrbit Settings' + '\n'
output_str += '--------------' + '\n'
if self.orbit_info is None:
output_str += 'Orbit properties not set.\n'
else:
output_str += 'Orbit Kind: ' + self.orbit_info['kind'] + '\n'
output_str += 'Orbit Index: ' + self.orbit_info['index'] + '\n'
output_str += 'Orbit Period: ' + self.orbit_info['period'].__str__() + '\n'
output_str += 'Number of Orbits: {:d}'.format(self.orbits.num) + '\n'
output_str += 'Loaded Orbit Number: {:d}'.format(self.orbits.current) + '\n'
output_str += '\nLocal File Statistics' + '\n'
output_str += '---------------------' + '\n'
output_str += 'Number of files: ' + str(len(self.files.files)) + '\n'
output_str += 'Date Range: '+self.files.files.index[0].strftime('%m/%d/%Y')
output_str += ' --- ' + self.files.files.index[-1].strftime('%m/%d/%Y') + '\n'
output_str += '\nLoaded Data Statistics'+'\n'
output_str += '----------------------'+'\n'
if not self.empty:
# if self._fid is not None:
# output_str += 'Filename: ' +
output_str += 'Date: ' + self.date.strftime('%m/%d/%Y') + '\n'
output_str += 'DOY: {:03d}'.format(self.doy) + '\n'
output_str += 'Time range: ' + self.data.index[0].strftime('%m/%d/%Y %H:%M:%S') + ' --- '
output_str += self.data.index[-1].strftime('%m/%d/%Y %H:%M:%S')+'\n'
output_str += 'Number of Times: ' + str(len(self.data.index)) + '\n'
output_str += 'Number of variables: ' + str(len(self.data.columns)) + '\n'
output_str += '\nVariable Names:'+'\n'
num = len(self.data.columns)//3
for i in np.arange(num):
output_str += self.data.columns[3 * i].ljust(30)
output_str += self.data.columns[3 * i + 1].ljust(30)
output_str += self.data.columns[3 * i + 2].ljust(30)+'\n'
for i in np.arange(len(self.data.columns) - 3 * num):
output_str += self.data.columns[i+3*num].ljust(30)
output_str += '\n'
else:
output_str += 'No loaded data.'+'\n'
output_str += '\n'
return output_str
def _load_data(self, date=None, fid=None):
"""
Load data for an instrument on given date or fid, dependng upon input.
"""
if fid is not None:
# get filename based off of index value
fname = self.files[fid:fid+1]
elif date is not None:
fname = self.files[date: date+pds.DateOffset(days=1)]
else:
raise ValueError('Must supply either a date or file id number.')
if len(fname) > 0:
load_fname = [os.path.join(self.files.data_path, f) for f in fname]
data, mdata = self._load_rtn(load_fname, tag=self.tag,
sat_id=self.sat_id, **self.kwargs)
else:
data = DataFrame(None)
mdata = _meta.Meta()
output_str = '{platform} {name} {tag} {sat_id}'
output_str = output_str.format(platform=self.platform,
name=self.name, tag=self.tag,
sat_id=self.sat_id)
if not data.empty:
if not isinstance(data, DataFrame):
raise TypeError(' '.join(('Data returned by instrument load',
'routine must be a pandas.DataFrame')))
if not isinstance(mdata, _meta.Meta):
raise TypeError('Metadata returned must be a pysat.Meta object')
if date is not None:
output_str = ' '.join(('Returning', output_str, 'data for', date.strftime('%D')))
else:
if len(fname) == 1:
# this check was zero
output_str = ' '.join(('Returning', output_str, 'data from', fname[0]))
else:
output_str = ' '.join(('Returning', output_str, 'data from', fname[0], '::', fname[-1]))
else:
# no data signal
output_str = ' '.join(('No', output_str, 'data for', date.strftime('%D')))
# remove extra spaces, if any
output_str = " ".join(output_str.split())
print (output_str)
return data, mdata
def _load_next(self):
"""Load the next days data (or file) without incrementing the date.
Repeated calls will not advance date/file and will produce the same data
Uses info stored in object to either increment the date,
or the file. Looks for self._load_by_date flag.
"""
if self._load_by_date:
next_date = self.date + pds.DateOffset(days=1)
return self._load_data(date=next_date)
else:
return self._load_data(fid=self._fid+1)
def _load_prev(self):
"""Load the next days data (or file) without decrementing the date.
Repeated calls will not decrement date/file and will produce the same data
Uses info stored in object to either decrement the date,
or the file. Looks for self._load_by_date flag.
"""
if self._load_by_date:
prev_date = self.date - pds.DateOffset(days=1)
return self._load_data(date=prev_date)
else:
return self._load_data(fid=self._fid-1)
def _set_load_parameters(self, date=None, fid=None):
self.date = date
self._fid = fid
if date is not None:
year, doy = utils.getyrdoy(date)
self.yr = year
self.doy = doy
self._load_by_date = True
else:
self.yr = None
self.doy = None
self._load_by_date = False
def load(self, yr=None, doy=None, date=None, fname=None, fid=None,
verifyPad=False):
"""Load instrument data into Instrument object .data.
Parameters
----------
yr : integer
year for desired data
doy : integer
day of year
date : datetime object
date to load
fname : 'string'
filename to be loaded
verifyPad : boolean
if True, padding data not removed (debug purposes)
Returns
--------
Void. Data is added to self.data
Note
----
Loads data for a chosen instrument into .data. Any functions chosen
by the user and added to the custom processing queue (.custom.add)
are automatically applied to the data before it is available to
user in .data.
"""
# set options used by loading routine based upon user input
if date is not None:
self._set_load_parameters(date=date, fid=None)
# increment
inc = pds.DateOffset(days=1)
curr = date
elif (yr is not None) & (doy is not None):
date = pds.datetime(yr, 1, 1) + pds.DateOffset(days=(doy-1))
self._set_load_parameters(date=date, fid=None)
# increment
inc = pds.DateOffset(days=1)
curr = self.date
elif fname is not None:
# date will have to be set later by looking at the data
self._set_load_parameters(date=None, fid=self.files.get_index(fname))
# increment one file at a time
inc = 1
curr = self._fid.copy()
elif fid is not None:
self._set_load_parameters(date=None, fid=fid)
# increment one file at a time
inc = 1
curr = fid
else:
estr = 'Must supply a yr,doy pair, or datetime object, or filename'
estr = '{:s} to load data from.'.format(estr)
raise TypeError(estr)
self.orbits._reset()
# if pad or multi_file_day is true, need to have a three day/file load
loop_pad = self.pad if self.pad is not None else pds.DateOffset(seconds=0)
if (self.pad is not None) | self.multi_file_day:
if self._next_data.empty & self._prev_data.empty:
# data has not already been loaded for previous and next days
# load data for all three
print('Initializing three day/file window')
# using current date or fid
self._prev_data, self._prev_meta = self._load_prev()
self._curr_data, self._curr_meta = \
self._load_data(date=self.date, fid=self._fid)
self._next_data, self._next_meta = self._load_next()
else:
# moving forward in time
if self._next_data_track == curr:
self._prev_data = self._curr_data
self._prev_meta = self._curr_meta
self._curr_data = self._next_data
self._curr_meta = self._next_meta
self._next_data, self._next_meta = self._load_next()
# moving backward in time
elif self._prev_data_track == curr:
self._next_data = self._curr_data
self._next_meta = self._curr_meta
self._curr_data = self._prev_data
self._curr_meta = self._prev_meta
self._prev_data, self._prev_meta = self._load_prev()
# jumped in time/or switched from filebased to date based access
else:
self._prev_data, self._prev_meta = self._load_prev()
self._curr_data, self._curr_meta = \
self._load_data(date=self.date, fid=self._fid)
self._next_data, self._next_meta = self._load_next()
# make sure datetime indices for all data is monotonic
if not self._prev_data.index.is_monotonic_increasing:
self._prev_data.sort_index(inplace=True)
if not self._curr_data.index.is_monotonic_increasing:
self._curr_data.sort_index(inplace=True)
if not self._next_data.index.is_monotonic_increasing:
self._next_data.sort_index(inplace=True)
# make tracking indexes consistent with new loads
self._next_data_track = curr + inc
self._prev_data_track = curr - inc
# attach data to object
if not self._curr_data.empty:
self.data = self._curr_data.copy()
self.meta = self._curr_meta.copy()
else:
self.data = DataFrame(None)
# line below removed as it would delete previous meta, if any
# if you end a seasonal analysis with a day with no data, then
# no meta: self.meta = _meta.Meta()
# multi file days can extend past a single day, only want data from
# specific date if loading by day
# set up times for the possible data padding coming up
if self._load_by_date:
#print ('double trouble')
first_time = self.date
first_pad = self.date - loop_pad
last_time = self.date + pds.DateOffset(days=1)
last_pad = self.date + pds.DateOffset(days=1) + loop_pad
want_last_pad = False
# loading by file, can't be a multi_file-day flag situation
elif (not self._load_by_date) and (not self.multi_file_day):
#print ('single trouble')
first_time = self._curr_data.index[0]
first_pad = first_time - loop_pad
last_time = self._curr_data.index[-1]
last_pad = last_time + loop_pad
want_last_pad = True
else:
raise ValueError("multi_file_day and loading by date are effectively equivalent."+
"Can't have multi_file_day and load by file.")
#print (first_pad, first_time, last_time, last_pad)
# pad data based upon passed parameter
if (not self._prev_data.empty) & (not self.data.empty):
padLeft = self._prev_data.loc[first_pad : self.data.index[0]]
if len(padLeft) > 0:
if (padLeft.index[-1] == self.data.index[0]) :
padLeft = padLeft.iloc[:-1, :]
self.data = pds.concat([padLeft, self.data])
if (not self._next_data.empty) & (not self.data.empty):
padRight = self._next_data.loc[self.data.index[-1] : last_pad]
if len(padRight) > 0:
if (padRight.index[0] == self.data.index[-1]) :
padRight = padRight.iloc[1:, :]
self.data = pds.concat([self.data, padRight])
self.data = self.data.ix[first_pad : last_pad]
# want exclusive end slicing behavior from above
if (self.data.index[-1] == last_pad) & (not want_last_pad):
self.data = self.data.iloc[:-1, :]
## drop any possible duplicate index times
##self.data.drop_duplicates(inplace=True)
#self.data = self.data[~self.data.index.duplicated()]
# if self.pad is False, load single day
else:
self.data, meta = self._load_data(date=self.date, fid=self._fid)
if not self.data.empty:
self.meta = meta
# check if load routine actually returns meta
if self.meta.data.empty:
self.meta[self.data.columns] = {'long_name': self.data.columns,
'units': ['']*len(self.data.columns)}
# if loading by file set the yr, doy, and date
if not self._load_by_date:
if self.pad is not None:
temp = first_time
else:
temp = self.data.index[0]
self.date = pds.datetime(temp.year, temp.month, temp.day)
self.yr, self.doy = utils.getyrdoy(self.date)
if not self.data.empty:
self._default_rtn(self)
# clean
if (not self.data.empty) & (self.clean_level != 'none'):
self._clean_rtn(self)
# apply custom functions
if not self.data.empty:
self.custom._apply_all(self)
# remove the excess padding, if any applied
if (self.pad is not None) & (not self.data.empty) & (not verifyPad):
self.data = self.data[first_time : last_time]
if (self.data.index[-1] == last_time) & (not want_last_pad):
self.data = self.data.iloc[:-1, :]
# transfer any extra attributes in meta to the Instrument object
self.meta.transfer_attributes_to_instrument(self)
sys.stdout.flush()
return
def download(self, start, stop, freq='D', user=None, password=None):
"""Download data for given Instrument object from start to stop.
Parameters
----------
start : pandas.datetime
start date to download data
stop : pandas.datetime
stop date to download data
freq : string
Stepsize between dates for season, 'D' for daily, 'M' monthly
(see pandas)
user : string
username, if required by instrument data archive
password : string
password, if required by instrument data archive
Note
----
Data will be downloaded to pysat_data_dir/patform/name/tag
If Instrument bounds are set to defaults they are updated
after files are downloaded.
"""
import errno
# make sure directories are there, otherwise create them
try:
os.makedirs(self.files.data_path)
except OSError as e:
if e.errno != errno.EEXIST:
raise
print('Downloading data to: ', self.files.data_path)
date_array = utils.season_date_range(start, stop, freq=freq)
if user is None:
self._download_rtn(date_array,
tag=self.tag,
sat_id=self.sat_id,
data_path=self.files.data_path)
else:
self._download_rtn(date_array,
tag=self.tag,
sat_id=self.sat_id,
data_path=self.files.data_path,
user=user,
password=password)
# get current file date range
first_date = self.files.start_date
last_date = self.files.stop_date
print('Updating pysat file list')
self.files.refresh()
# if instrument object has default bounds, update them
if len(self.bounds[0]) == 1:
if(self.bounds[0][0] == first_date and
self.bounds[1][0] == last_date):
print('Updating instrument object bounds.')
self.bounds = None
@property
def bounds(self):
"""Boundaries for iterating over instrument object by date or file.
Parameters
----------
start : datetime object, filename, or None (default)
start of iteration, if None uses first data date.
list-like collection also accepted
end : datetime object, filename, or None (default)
end of iteration, inclusive. If None uses last data date.
list-like collection also accepted
Note
----
Both start and stop must be the same type (date, or filename) or None
Examples
--------
::
inst = pysat.Instrument(platform=platform,
name=name,
tag=tag)
start = pysat.datetime(2009,1,1)
stop = pysat.datetime(2009,1,31)
inst.bounds = (start,stop)
start2 = pysat.datetetime(2010,1,1)
stop2 = pysat.datetime(2010,2,14)
inst.bounds = ([start, start2], [stop, stop2])
"""
return self._iter_start, self._iter_stop
@bounds.setter
def bounds(self, value=None):
if value is None:
value = (None, None)
if len(value) < 2:
raise ValueError('Must supply both a start and end date/file' +
'Supply None if you want the first/last possible')
start = value[0]
end = value[1]
# get the frequency, or step size, of season
if len(value) == 3:
step = value[2]
else:
# default do daily
step = 'D'
if (start is None) and (end is None):
# set default
self._iter_start = [self.files.start_date]
self._iter_stop = [self.files.stop_date]
self._iter_type = 'date'
if self._iter_start[0] is not None:
# check here in case Instrument is initialized with no input
self._iter_list = utils.season_date_range(self._iter_start, self._iter_stop, freq=step)
elif (hasattr(start, '__iter__') and not isinstance(start,str)) and (hasattr(end, '__iter__') and not isinstance(end,str)):
base = type(start[0])
for s, t in zip(start, end):
if (type(s) != type(t)) or (type(s) != base):
raise ValueError('Start and end items must all be of the same type')
if isinstance(start[0], str):
self._iter_type = 'file'
self._iter_list = self.files.get_file_array(start, end)
elif isinstance(start[0], pds.datetime):
self._iter_type = 'date'
self._iter_list = utils.season_date_range(start, end, freq=step)
else:
raise ValueError('Input is not a known type, string or datetime')
self._iter_start = start
self._iter_stop = end
elif (hasattr(start, '__iter__') and not isinstance(start,str)) or (hasattr(end, '__iter__') and not isinstance(end,str)):
raise ValueError('Both start and end must be iterable if one bound is iterable')
elif isinstance(start, str) or isinstance(end, str):
if isinstance(start, pds.datetime) or isinstance(end, pds.datetime):
raise ValueError('Not allowed to mix file and date bounds')
if start is None:
start = self.files[0]
if end is None:
end = self.files.files[-1]
self._iter_start = [start]
self._iter_stop = [end]
self._iter_list = self.files.get_file_array(self._iter_start, self._iter_stop)
self._iter_type = 'file'
elif isinstance(start, pds.datetime) or isinstance(end, pds.datetime):
if start is None:
start = self.files.start_date
if end is None:
end = self.files.stop_date
self._iter_start = [start]
self._iter_stop = [end]
self._iter_list = utils.season_date_range(start, end, freq=step)
self._iter_type = 'date'
else:
raise ValueError('Provided an invalid combination of bounds. ' +
'if specifying by file, both bounds must be by file. Other ' +
'combinations of datetime objects and None are allowed.')
def __iter__(self):
"""Iterates instrument object by loading subsequent days or files.
Note
----
Limits of iteration, and iteration type (date/file)
set by `bounds` attribute.
Default bounds are the first and last dates from files on local system.
Examples
--------
::
inst = pysat.Instrument(platform=platform,
name=name,
tag=tag)
start = pysat.datetime(2009,1,1)
stop = pysat.datetime(2009,1,31)
inst.bounds = (start,stop)
for inst in inst:
print('Another day loaded', inst.date)
"""
if self._iter_type == 'file':
for fname in self._iter_list:
self.load(fname=fname)
yield self
elif self._iter_type == 'date':
for date in self._iter_list:
self.load(date=date)
yield self
def next(self, verifyPad=False):
"""Manually iterate through the data loaded in Instrument object.
Bounds of iteration and iteration type (day/file) are set by
`bounds` attribute.
Note
----
If there were no previous calls to load then the
first day(default)/file will be loaded.
"""
if self._iter_type == 'date':
if self.date is not None:
idx, = np.where(self._iter_list == self.date)
if (len(idx) == 0) | (idx+1 >= len(self._iter_list)):
raise StopIteration('Outside the set date boundaries.')
else:
idx += 1
self.load(date=self._iter_list[idx[0]], verifyPad=verifyPad)
else:
self.load(date=self._iter_list[0], verifyPad=verifyPad)
elif self._iter_type == 'file':
if self._fid is not None:
first = self.files.get_index(self._iter_list[0])
last = self.files.get_index(self._iter_list[-1])
if (self._fid < first) | (self._fid+1 > last):
raise StopIteration('Outside the set file boundaries.')
else:
self.load(fname=self._iter_list[self._fid+1-first], verifyPad=verifyPad)
else:
self.load(fname=self._iter_list[0], verifyPad=verifyPad)
def prev(self, verifyPad=False):
"""Manually iterate backwards through the data in Instrument object.
Bounds of iteration and iteration type (day/file)
are set by `bounds` attribute.
Note
----
If there were no previous calls to load then the
first day(default)/file will be loaded.
"""
if self._iter_type == 'date':
if self.date is not None:
idx, = np.where(self._iter_list == self.date)
if (len(idx) == 0) | (idx-1 < 0):
raise StopIteration('Outside the set date boundaries.')
else:
idx -= 1
self.load(date=self._iter_list[idx[0]], verifyPad=verifyPad)
else:
self.load(date=self._iter_list[-1], verifyPad=verifyPad)
elif self._iter_type == 'file':
if self._fid is not None:
first = self.files.get_index(self._iter_list[0])
last = self.files.get_index(self._iter_list[-1])
if (self._fid-1 < first) | (self._fid > last):
raise StopIteration('Outside the set file boundaries.')
else:
self.load(fname=self._iter_list[self._fid-1-first], verifyPad=verifyPad)
else:
self.load(fname=self._iter_list[-1], verifyPad=verifyPad)
def _get_data_info(self, data, format):
# get type of data
data_type = data.dtype
# check if older format
if format[:7] == 'NETCDF3':
old_format = True
else:
old_format = False
# check for object type
if data_type != np.dtype('O'):
# simple data, not an object
# no 64bit ints in netCDF3
if (data_type == np.int64) & old_format:
data = data.astype(np.int32)
if data_type == np.dtype('<M8[ns]'):
if not old_format:
data_type = np.int64
else:
data_type = np.float
datetime_flag = True
else:
datetime_flag = False
else:
# dealing with a more complicated object
sub_d = data.loc[0]
return data, data_type, datetime_flag
def to_netcdf4(self, fname=None, format=None, base_instrument=None):
"""Stores loaded data into a netCDF3/4 file.
Parameters
----------
fname : string
full path to save instrument object to
format : string
format keyword passed to netCDF4 routine
NETCDF3_CLASSIC, NETCDF3_64BIT, NETCDF4_CLASSIC, and NETCDF4
base_instrument : pysat.Instrument
used as a comparison, only attributes that are present with
self and not on base_instrument are written to netCDF
Note
----
Stores 1-D data along dimension 'epoch' - the date time index.
Stores object data (e.g. dataframes within series) separately
- The name of the series is used to prepend extra variable
dimensions within netCDF, key_2, key_3; first dimension time
- The index organizing the data stored as key_sample_index
- from_netcdf3 uses this naming scheme to reconstruct data structure
The datetime index is stored as 'UNIX time'. netCDF-3 doesn't support
64-bit integers so it is stored as a 64-bit float. This results in a
loss of datetime precision when converted back to datetime index
up to hundreds of nanoseconds. Use netCDF4 if this is a problem.
All attributes attached to instrument meta are written to netCDF attrs.
"""
import netCDF4
import pysat
if format is None:
format = 'NETCDF4'
else:
format = format.upper()
base_instrument = Instrument() if base_instrument is None else base_instrument
with netCDF4.Dataset(fname, mode='w', format=format) as out_data:
num = len(self.data.index)
out_data.createDimension('epoch', num)
# write out the datetime index
if format == 'NETCDF4':
cdfkey = out_data.createVariable('epoch', 'i8', dimensions=('epoch'),)
cdfkey.units = 'Microseconds since 1970-1-1 00:00:00'
cdfkey[:] = (self.data.index.values.astype(np.int64)*1E-3).astype(np.int64)
else:
# can't store full time resolution
cdfkey = out_data.createVariable('epoch', 'f8', dimensions=('epoch'),)
cdfkey.units = 'Milliseconds since 1970-1-1 00:00:00'
cdfkey[:] = (self.data.index.values.astype(int)*1.E-6).astype(np.float)
cdfkey.long_name = 'UNIX time'
cdfkey.calendar = 'standard'
# store all of the data in dataframe columns
for key in self.data.columns:
# print ('key', key)
# print (self[key])
if self[key].dtype != np.dtype('O'):
# not an object, simple column of data, write it out
if ((self[key].dtype == np.int64) & (format[:7] == 'NETCDF3')):
self[key] = self[key].astype(np.int32)
# check if it is a datetime column
datetime_flag = False
coltype = self[key].dtype
# check for datetime index
if coltype == np.dtype('<M8[ns]'):
if format == 'NETCDF4':
coltype = np.int64
else:
coltype = np.float
datetime_flag = True
# print ('gonna create variable')
cdfkey = out_data.createVariable(key,
coltype,
dimensions=('epoch'), )
# print ('created')
# attach any meta data
try:
new_dict = self.meta[key].to_dict()
if u'_FillValue' in new_dict.keys():
# make sure _FillValue is the same type as the data
new_dict['_FillValue'] = np.array(new_dict['_FillValue']).astype(self[key].dtype)
if u'FillVal' in new_dict.keys():
# make sure _FillValue is the same type as the data
new_dict['FillVal'] = np.array(new_dict['FillVal']).astype(self[key].dtype)
# really attach metadata now
cdfkey.setncatts(new_dict)
except:
print(', '.join(('Unable to find MetaData for',key)) )
if datetime_flag:
if format == 'NETCDF4':
# cdfkey.units = 'Microseconds since 1970-1-1 00:00:00'
cdfkey[:] = (self[key].values.astype(coltype)*1.E-3).astype(coltype)
else:
# cdfkey.units = 'Milliseconds since 1970-1-1 00:00:00'
cdfkey[:] = (self[key].values.astype(coltype)*1.E-6).astype(coltype)
# cdfkey.long_name = 'UNIX time'
else:
# #cdfkey.units = ''
# if self[key].iloc[0].index.name is not None:
# cdfkey.long_name = self[key].iloc[0].index.name
# else:
# cdfkey.long_name = key
cdfkey[:] = self[key].values #.to_native_types()
# attach the data
# cdfkey[:] = self[key].values
else:
if not isinstance(self[0, key], pysat.DataFrame):
# dealing with a string
cdfkey = out_data.createVariable(key,
'S30',
dimensions=('epoch'), )
# attach any meta data
try:
new_dict = self.meta[key].to_dict()
if u'_FillValue' in new_dict.keys():
# make sure _FillValue is the same type as the data
new_dict['_FillValue'] = np.array(new_dict['_FillValue']).astype(self[key].dtype)
if u'FillVal' in new_dict.keys():
# make sure _FillValue is the same type as the data
new_dict['FillVal'] = np.array(new_dict['FillVal']).astype(self[key].dtype)
# really attach metadata now
cdfkey.setncatts(new_dict)
except:
print(', '.join(('Unable to find MetaData for',key)) )
else:
# we are dealing with a more complicated object
# presuming a series with a dataframe in each location
dims = np.shape(self[key].iloc[0])
obj_dim_names = []
for i, dim in enumerate(dims[:-1]):
# don't need to go over last dimension value,
# it covers number of columns
obj_dim_names.append(key+'_dim_%i' % (i+1))
out_data.createDimension(obj_dim_names[-1], dim)
# total dimensions stored for object are epoch plus ones just above
var_dim = tuple(['epoch']+obj_dim_names)
#print (key, var_dim)
# iterate over columns and store
try:
iterable = self[key].iloc[0].columns
is_frame = True
except AttributeError:
# looking at a series, which doesn't have columns
iterable = self[key].iloc[0].name
is_frame = False
for col in iterable:
if is_frame:
coltype = self[key].iloc[0][col].dtype
else:
coltype = self[key].iloc[0].dtype
if ((coltype == np.int64) & (format[:7] == 'NETCDF3')):
coltype = np.int32
#elif coltype == np.dtype('O'):
# if isinstance(self[key].iloc[0][col][0], basestring):
# coltype = 'S1'
#print (key+'_' +col, var_dim, coltype)
cdfkey = out_data.createVariable(key + '_' +col,
coltype,
dimensions=var_dim)
#cdfkey.long_name = col
#cdfkey.units = ''
if is_frame:
# attach any meta data
try:
cdfkey.setncatts(self.meta[key][col].to_dict())
except:
print(', '.join(('Unable to find MetaData for',key,col)) )
# attach data
for i in range(num):
cdfkey[i, :] = self[key].iloc[i][col].values.astype(coltype)
else:
# attach any meta data
cdfkey.setncatts(self.meta[key].to_dict())
# attach data
for i in range(num):
cdfkey[i, :] = self[key].iloc[i].values.astype(coltype)
# store the dataframe index for each time of main dataframe
datetime_flag = False
coltype = self[key].iloc[0].index.dtype
# check for datetime index
if coltype == np.dtype('<M8[ns]'):
if format == 'NETCDF4':
coltype = np.int64
else:
coltype = np.float
datetime_flag = True
#if coltype == np.int64:
# coltype = np.int32
#print (key+'_' + '_ample', var_dim, coltype)
cdfkey = out_data.createVariable(key+'_dim_1',
coltype, dimensions=var_dim)
if datetime_flag:
#print('datetime flag')
if format == 'NETCDF4':
cdfkey.units = 'Microseconds since 1970-1-1 00:00:00'
for i in range(num):
cdfkey[i, :] = (self[key].iloc[i].index.values.astype(coltype)*1.E-3).astype(coltype)
else:
cdfkey.units = 'Milliseconds since 1970-1-1 00:00:00'
for i in range(num):
cdfkey[i, :] = (self[key].iloc[i].index.values.astype(coltype)*1.E-6).astype(coltype)
cdfkey.long_name = 'UNIX time'
else:
#cdfkey.units = ''
if self[key].iloc[0].index.name is not None:
cdfkey.long_name = self[key].iloc[0].index.name
else:
cdfkey.long_name = key
for i in range(num):
cdfkey[i, :] = self[key].iloc[i].index.to_native_types()
# store any non standard attributes
base_attrb = dir(base_instrument)
this_attrb = dir(self)
adict = {}
for key in this_attrb:
if key not in base_attrb:
if key[0] != '_':
adict[key] = self.__getattribute__(key)
# store any non-standard attributes attached to meta
base_attrb = dir(base_instrument.meta)
this_attrb = dir(self.meta)
for key in this_attrb:
if key not in base_attrb:
if key[0] != '_':
adict[key] = self.meta.__getattribute__(key)
adict['pysat_version'] = pysat.__version__
adict['Conventions'] = 'CF-1.6'
# check for binary types
for key in adict.keys():
if isinstance(adict[key], bool):
adict[key] = int(adict[key])
out_data.setncatts(adict)
return
|
aburrell/pysat
|
pysat/_instrument.py
|
Python
|
bsd-3-clause
| 56,546
|
[
"NetCDF"
] |
46bdca0dd68c99237a1cfc2f57bbc5a448a1fb0ea6dc7da283dff9c014dc151c
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Public Domain
"""
CairoSVG - A Simple SVG Converter for Cairo
===========================================
CairoSVG is a SVG converter based on Cairo. It can export SVG files to PDF,
PostScript and PNG files.
For further information, please visit the `CairoSVG Website
<http://www.cairosvg.org/>`_.
"""
import codecs
import re
from os import path
from distutils.core import setup
init_path = path.join(path.dirname(__file__), 'cairosvg', '__init__.py')
with codecs.open(init_path, 'r', 'utf-8') as fd:
VERSION = re.search("VERSION = '([^']+)'", fd.read().strip()).group(1)
# When the version is updated, ``cairosvg.VERSION`` must be modified.
# A new section in the ``NEWS`` file must be added too.
setup(
name="CairoSVG",
version=VERSION,
description="A Simple SVG Converter for Cairo",
long_description=__doc__,
author="Kozea",
author_email="guillaume.ayoub@kozea.fr",
url="http://www.cairosvg.org/",
license="GNU LGPL v3+",
platforms="Any",
packages=["cairosvg", "cairosvg.surface"],
provides=["cairosvg"],
install_requires=["cairocffi"],
scripts=["bin/cairosvg"],
keywords=["svg", "cairo", "pdf", "png", "postscript"],
classifiers=[
"Development Status :: 5 - Production/Stable",
"Environment :: Console",
"Intended Audience :: End Users/Desktop",
"License :: OSI Approved :: "
"GNU Lesser General Public License v3 or later (LGPLv3+)",
"Operating System :: OS Independent",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.2",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"Topic :: Multimedia :: Graphics :: Graphics Conversion"])
|
tgaillar/CairoSVG
|
setup.py
|
Python
|
lgpl-3.0
| 1,955
|
[
"VisIt"
] |
6d2966c6cf8d08a03a830e143f37857dc4987c01444bb1959b6d41c90db9aab3
|
"""
Helper functions which obtain forces and energies
corresponding to atoms in structures. These functions automatically
cast atoms into their respective atomic environments.
"""
import numpy as np
import multiprocessing as mp
from typing import Tuple, List, Union
from flare.env import AtomicEnvironment
from flare.gp import GaussianProcess
from flare.mgp import MappedGaussianProcess
from flare.struc import Structure
from math import nan
def predict_on_atom(
param: Tuple[Structure, int, GaussianProcess]
) -> ("np.ndarray", "np.ndarray"):
"""
Return the forces/std. dev. uncertainty associated with an individual atom
in a structure, without necessarily having cast it to a chemical
environment. In order to work with other functions,
all arguments are passed in as a tuple.
:param param: tuple of FLARE Structure, atom index, and Gaussian Process
object
:type param: Tuple(Structure, integer, GaussianProcess)
:return: 3-element force array and associated uncertainties
:rtype: (np.ndarray, np.ndarray)
"""
# Unpack the input tuple, convert a chemical environment
structure, atom, gp = param
# Obtain the associated chemical environment
chemenv = AtomicEnvironment(structure, atom, gp.cutoffs, cutoffs_mask=gp.hyps_mask)
# predict force components and standard deviations
force, var = gp.predict_force_xyz(chemenv)
std = np.sqrt(np.abs(var))
return force, std
def predict_on_atom_en(
param: Tuple[Structure, int, GaussianProcess]
) -> ("np.ndarray", "np.ndarray", float):
"""
Return the forces/std. dev. uncertainty / energy associated with an
individual atom in a structure, without necessarily having cast it to a
chemical environment. In order to work with other functions,
all arguments are passed in as a tuple.
:param param: tuple of FLARE Structure, atom index, and Gaussian Process
object
:type param: Tuple(Structure, integer, GaussianProcess)
:return: 3-element force array, associated uncertainties, and local energy
:rtype: (np.ndarray, np.ndarray, float)
"""
# Unpack the input tuple, convert a chemical environment
structure, atom, gp = param
# Obtain the associated chemical environment
chemenv = AtomicEnvironment(structure, atom, gp.cutoffs, cutoffs_mask=gp.hyps_mask)
# Predict forces / std. dev / energy
force, var = gp.predict_force_xyz(chemenv)
std = np.sqrt(np.abs(var))
local_energy = gp.predict_local_energy(chemenv)
return force, std, local_energy
def predict_on_atom_en_std(param):
"""Predict local energy and predictive std of a chemical environment."""
structure, atom, gp = param
chemenv = AtomicEnvironment(structure, atom, gp.cutoffs, cutoffs_mask=gp.hyps_mask)
# predict local energy
loc_en, loc_en_var = gp.predict_local_energy_and_var(chemenv)
loc_en_std = np.sqrt(np.abs(loc_en_var))
return loc_en, loc_en_std
def predict_on_atom_efs(param):
"""Predict the local energy, forces, and partial stresses and predictive
variances of a chemical environment."""
structure, atom, gp = param
chemenv = AtomicEnvironment(structure, atom, gp.cutoffs)
return gp.predict_efs(chemenv)
def predict_on_structure(
structure: Structure,
gp: GaussianProcess,
n_cpus: int = None,
write_to_structure: bool = True,
selective_atoms: List[int] = None,
skipped_atom_value=0,
) -> ("np.ndarray", "np.ndarray"):
"""
Return the forces/std. dev. uncertainty associated with each
individual atom in a structure. Forces are stored directly to the
structure and are also returned.
:param structure: FLARE structure to obtain forces for, with N atoms
:param gp: Gaussian Process model
:param write_to_structure: Write results to structure's forces,
std attributes
:param selective_atoms: Only predict on these atoms; e.g. [0,1,2] will
only predict and return for those atoms
:param skipped_atom_value: What value to use for atoms that are skipped.
Defaults to 0 but other options could be e.g. NaN. Will NOT
write this to the structure if write_to_structure is True.
:return: N x 3 numpy array of foces, Nx3 numpy array of uncertainties
:rtype: (np.ndarray, np.ndarray)
"""
forces = np.zeros((structure.nat, 3))
stds = np.zeros((structure.nat, 3))
if selective_atoms:
forces.fill(skipped_atom_value)
stds.fill(skipped_atom_value)
else:
selective_atoms = []
for n in range(structure.nat):
# Skip the atoms which we aren't predicting on if
# selective atoms is on.
if n not in selective_atoms and selective_atoms:
continue
chemenv = AtomicEnvironment(structure, n, gp.cutoffs, cutoffs_mask=gp.hyps_mask)
force, var = gp.predict_force_xyz(chemenv)
std = np.sqrt(np.abs(var))
forces[n] = force
stds[n] = std
if write_to_structure:
structure.forces[n] = force
structure.stds[n] = std
return forces, stds
def predict_on_structure_par(
structure: Structure,
gp: GaussianProcess,
n_cpus: int = None,
write_to_structure: bool = True,
selective_atoms: List[int] = None,
skipped_atom_value=0,
) -> ("np.ndarray", "np.ndarray"):
"""
Return the forces/std. dev. uncertainty associated with each
individual atom in a structure. Forces are stored directly to the
structure and are also returned.
:param structure: FLARE structure to obtain forces for, with N atoms
:param gp: Gaussian Process model
:param n_cpus: Number of cores to parallelize over
:param write_to_structure: Write results to structure's forces,
std attributes
:param selective_atoms: Only predict on these atoms; e.g. [0,1,2] will
only predict and return for those atoms
:param skipped_atom_value: What value to use for atoms that are skipped.
Defaults to 0 but other options could be e.g. NaN. Will NOT
write this to the structure if write_to_structure is True.
:return: N x 3 array of forces, N x 3 array of uncertainties
:rtype: (np.ndarray, np.ndarray)
"""
# Just work in serial in the number of cpus is 1
# or the gp is not parallelized by atoms
if n_cpus == 1 or not gp.per_atom_par:
return predict_on_structure(
structure=structure,
gp=gp,
n_cpus=n_cpus,
write_to_structure=write_to_structure,
selective_atoms=selective_atoms,
skipped_atom_value=skipped_atom_value,
)
forces = np.zeros(shape=(structure.nat, 3))
stds = np.zeros(shape=(structure.nat, 3))
if selective_atoms:
forces.fill(skipped_atom_value)
stds.fill(skipped_atom_value)
else:
selective_atoms = []
# Automatically detect number of cpus available.
if n_cpus is None:
pool = mp.Pool(processes=mp.cpu_count())
else:
pool = mp.Pool(processes=n_cpus)
# Parallelize over atoms in structure.
results = []
for atom in range(structure.nat):
# If selective atoms is on, skip ones that was skipped.
if atom not in selective_atoms and selective_atoms:
# Keep length of results equal to nat.
results.append(None)
continue
results.append(pool.apply_async(predict_on_atom, args=[(structure, atom, gp)]))
pool.close()
pool.join()
for i in range(structure.nat):
if i not in selective_atoms and selective_atoms:
continue
r = results[i].get()
forces[i] = r[0]
stds[i] = r[1]
if write_to_structure:
structure.forces[i] = r[0]
structure.stds[i] = r[1]
return forces, stds
def predict_on_structure_efs(
structure: Structure,
gp: GaussianProcess,
n_cpus: int = None,
write_to_structure: bool = True,
selective_atoms: List[int] = None,
skipped_atom_value=0,
):
local_energies = np.zeros(structure.nat)
forces = np.zeros((structure.nat, 3))
partial_stresses = np.zeros((structure.nat, 6))
local_energy_stds = np.zeros(structure.nat)
force_stds = np.zeros((structure.nat, 3))
partial_stress_stds = np.zeros((structure.nat, 6))
for n in range(structure.nat):
chemenv = AtomicEnvironment(structure, n, gp.cutoffs)
(
en_pred,
force_pred,
stress_pred,
en_var,
force_var,
stress_var,
) = gp.predict_efs(chemenv)
local_energies[n] = en_pred
forces[n] = force_pred
partial_stresses[n] = stress_pred
local_energy_stds[n] = en_var
force_stds[n] = force_var
partial_stress_stds[n] = stress_var
# Convert variances to standard deviations.
local_energy_stds = np.sqrt(np.abs(local_energy_stds))
force_stds = np.sqrt(np.abs(force_stds))
partial_stress_stds = np.sqrt(np.abs(partial_stress_stds))
if write_to_structure:
write_efs_to_structure(
structure,
local_energies,
forces,
partial_stresses,
local_energy_stds,
force_stds,
partial_stress_stds,
)
return (
local_energies,
forces,
partial_stresses,
local_energy_stds,
force_stds,
partial_stress_stds,
)
def predict_on_structure_efs_par(
structure: Structure,
gp: GaussianProcess,
n_cpus: int = None,
write_to_structure: bool = True,
selective_atoms: List[int] = None,
skipped_atom_value=0,
):
# Just work in serial in the number of cpus is 1,
# or the gp is not parallelized by atoms
if (n_cpus == 1) or (not gp.per_atom_par):
return predict_on_structure_efs(
structure=structure,
gp=gp,
n_cpus=n_cpus,
write_to_structure=write_to_structure,
selective_atoms=selective_atoms,
skipped_atom_value=skipped_atom_value,
)
local_energies = np.zeros(structure.nat)
forces = np.zeros((structure.nat, 3))
partial_stresses = np.zeros((structure.nat, 6))
local_energy_stds = np.zeros(structure.nat)
force_stds = np.zeros((structure.nat, 3))
partial_stress_stds = np.zeros((structure.nat, 6))
# Set the number of cpus.
if n_cpus is None:
pool = mp.Pool(processes=mp.cpu_count())
else:
pool = mp.Pool(processes=n_cpus)
# Parallelize over atoms in structure.
results = []
for atom in range(structure.nat):
results.append(
pool.apply_async(predict_on_atom_efs, args=[(structure, atom, gp)])
)
pool.close()
pool.join()
for i in range(structure.nat):
r = results[i].get()
local_energies[i] = r[0]
forces[i] = r[1]
partial_stresses[i] = r[2]
local_energy_stds[i] = r[3]
force_stds[i] = r[4]
partial_stress_stds[i] = r[5]
# Convert variances to standard deviations.
local_energy_stds = np.sqrt(np.abs(local_energy_stds))
force_stds = np.sqrt(np.abs(force_stds))
partial_stress_stds = np.sqrt(np.abs(partial_stress_stds))
if write_to_structure:
write_efs_to_structure(
structure,
local_energies,
forces,
partial_stresses,
local_energy_stds,
force_stds,
partial_stress_stds,
)
return (
local_energies,
forces,
partial_stresses,
local_energy_stds,
force_stds,
partial_stress_stds,
)
def write_efs_to_structure(
structure,
local_energies,
forces,
partial_stresses,
local_energy_stds,
force_stds,
partial_stress_stds,
):
structure.local_energies = local_energies
structure.forces = forces
structure.partial_stresses = partial_stresses
structure.local_energy_stds = local_energy_stds
structure.stds = force_stds
structure.partial_stress_stds = partial_stress_stds
# Record potential energy
structure.potential_energy = np.sum(structure.local_energies)
# Compute stress tensor.
# FLARE format: xx, xy, xz, yy, yz, zz
current_volume = np.linalg.det(structure.cell)
flare_stress = np.sum(partial_stresses, 0) / current_volume
# Convert stress tensor to ASE format: xx yy zz yz xz xy
structure.stress = -np.array(
[
flare_stress[0],
flare_stress[3],
flare_stress[5],
flare_stress[4],
flare_stress[2],
flare_stress[1],
]
)
# Record stress uncertainties.
stress_stds = (
np.sqrt(np.sum(structure.partial_stress_stds ** 2, 0)) / current_volume
)
structure.stress_stds = np.array(
[
stress_stds[0],
stress_stds[3],
stress_stds[5],
stress_stds[4],
stress_stds[2],
stress_stds[1],
]
)
def predict_on_structure_en(
structure: Structure,
gp: GaussianProcess,
n_cpus: int = None,
write_to_structure: bool = True,
selective_atoms: List[int] = None,
skipped_atom_value=0,
) -> ("np.ndarray", "np.ndarray", "np.ndarray"):
"""
Return the forces/std. dev. uncertainty / local energy associated with each
individual atom in a structure. Forces are stored directly to the
structure and are also returned.
:param structure: FLARE structure to obtain forces for, with N atoms
:param gp: Gaussian Process model
:param n_cpus: Dummy parameter passed as an argument to allow for
flexibility when the callable may or may not be parallelized
:return: N x 3 array of forces, N x 3 array of uncertainties,
N-length array of energies
:rtype: (np.ndarray, np.ndarray, np.ndarray)
"""
# Set up local energy array
forces = np.zeros((structure.nat, 3))
stds = np.zeros((structure.nat, 3))
local_energies = np.zeros(structure.nat)
if selective_atoms:
forces.fill(skipped_atom_value)
stds.fill(skipped_atom_value)
local_energies.fill(skipped_atom_value)
else:
selective_atoms = []
# Loop through atoms in structure and predict forces, uncertainties,
# and energies
for n in range(structure.nat):
if selective_atoms and n not in selective_atoms:
continue
chemenv = AtomicEnvironment(structure, n, gp.cutoffs, cutoffs_mask=gp.hyps_mask)
for i in range(3):
force, var = gp.predict_force_xyz(chemenv)
std = np.sqrt(np.abs(var))
forces[n] = force
stds[n] = std
if write_to_structure and structure.forces is not None:
structure.forces[n] = force
structure.stds[n] = std
local_energies[n] = gp.predict_local_energy(chemenv)
return forces, stds, local_energies
def predict_on_structure_par_en(
structure: Structure,
gp: GaussianProcess,
n_cpus: int = None,
write_to_structure: bool = True,
selective_atoms: List[int] = None,
skipped_atom_value=0,
) -> ("np.ndarray", "np.ndarray", "np.ndarray"):
"""
Return the forces/std. dev. uncertainty / local energy associated with each
individual atom in a structure, parallelized over atoms. Forces are
stored directly to the structure and are also returned.
:param structure: FLARE structure to obtain forces for, with N atoms
:param gp: Gaussian Process model
:param n_cpus: Number of cores to parallelize over
:return: N x 3 array of forces, N x 3 array of uncertainties,
N-length array of energies
:rtype: (np.ndarray, np.ndarray, np.ndarray)
"""
# Work in serial if the number of cpus is 1
# or the gp is not parallelized by atoms
if (n_cpus == 1) or (not gp.per_atom_par):
predict_on_structure_en(
structure,
gp,
n_cpus,
write_to_structure,
selective_atoms,
skipped_atom_value,
)
forces = np.zeros((structure.nat, 3))
stds = np.zeros((structure.nat, 3))
local_energies = np.zeros(structure.nat)
if selective_atoms:
forces.fill(skipped_atom_value)
stds.fill(skipped_atom_value)
local_energies.fill(skipped_atom_value)
else:
selective_atoms = []
if n_cpus is None:
pool = mp.Pool(processes=mp.cpu_count())
else:
pool = mp.Pool(processes=n_cpus)
# Parallelize over atoms in structure
results = []
for atom_i in range(structure.nat):
if atom_i not in selective_atoms and selective_atoms:
results.append(None)
continue
results.append(
pool.apply_async(predict_on_atom_en, args=[(structure, atom_i, gp)])
)
pool.close()
pool.join()
# Compile results
for i in range(structure.nat):
if i not in selective_atoms and selective_atoms:
continue
r = results[i].get()
forces[i][:] = r[0]
stds[i][:] = r[1]
local_energies[i] = r[2]
if write_to_structure:
structure.forces[i] = forces[i]
structure.stds[i] = stds[i]
return forces, stds, local_energies
def predict_on_atom_mgp(atom: int, structure, mgp, write_to_structure=False):
chemenv = AtomicEnvironment(
structure, atom, mgp.cutoffs, cutoffs_mask=mgp.hyps_mask
)
# predict force components and standard deviations
force, var, virial, local_energy = mgp.predict(chemenv)
comps = force
stds = np.sqrt(np.absolute(var))
if write_to_structure:
structure.forces[atom][:] = force
structure.stds[atom][:] = stds
if structure.local_energy is None:
structure.local_energy = np.zeros(structure.nat)
structure.local_energy[atom] = local_energy
return comps, stds, local_energy
def predict_on_structure_mgp(
structure: Structure,
mgp: MappedGaussianProcess,
output=None,
output_name=None,
n_cpus: int = None,
write_to_structure: bool = True,
selective_atoms: List[int] = None,
skipped_atom_value: Union[float, int] = 0,
energy: bool = False,
) -> Union[Tuple["np.ndarray", "np.ndarray", float], Tuple["np.ndarray", "np.ndarray"]]:
"""
Assign forces to structure based on an mgp
"""
if output and output_name:
output.write_to_output("\npredict with mapping:\n", output_name)
forces = np.zeros(shape=(structure.nat, 3))
stds = np.zeros(shape=(structure.nat, 3))
local_energy = np.zeros(shape=(structure.nat))
if selective_atoms:
forces.fill(skipped_atom_value)
stds.fill(skipped_atom_value)
else:
selective_atoms = []
for n in range(structure.nat):
if n not in selective_atoms and selective_atoms:
continue
forces[n, :], stds[n, :], local_energy[n] = predict_on_atom_mgp(
n, structure, mgp, write_to_structure
)
if energy:
return forces, stds, local_energy
else:
return forces, stds
|
mir-group/flare
|
flare/predict.py
|
Python
|
mit
| 19,367
|
[
"ASE",
"Gaussian"
] |
bdd83539eec63e4d5c7662be6595c716ad86575aba04847141953d51decc66d7
|
from numpy import zeros, array, eye
from numpy.random import multivariate_normal, uniform, random_integers
from sklearn.covariance import GraphLasso
import sklearn as skl
import matplotlib.pyplot as plt
from neighbor_select import NeighborSelect
from solver import SklearnCDSolver, ActiveSetCDSolver, ProximalGradientSolver, AccelProximalGradientSolver
from screening_rules import *
if __name__ == '__main__':
""" Sequential (standalone) version for testing graph-lasso
implementation against scipy version.
"""
N = 200 # number of training examples
DIMS = 10 # number of features
COV = eye(DIMS) # covariance matrix
CORR_RANGE = [0.3, 0.6] # min and max off-diagonal correlations
CORR_FRAC = 0.4 # fraction of correlated features (0.0 <= CORR_FRAC < 1.0)
# Well, this is not a very good way of generating a specific covariance matrix:
# For certain parameters, it could result in negative semi-definite matrices.
NUM_CORR = int(float(DIMS)*CORR_FRAC)
for i in range(NUM_CORR):
val = uniform(CORR_RANGE[0], CORR_RANGE[1])
a = b = 0
while a==b or COV[a,b]>0.0:
(a,b) = random_integers(low=0, high=DIMS-1, size=2)
COV[a,b] = val
COV[b,a] = val
# draw samples from a multivariate Gaussian
X = multivariate_normal(mean=zeros(COV.shape[0]), cov=COV, size=N)
X = skl.preprocessing.normalize(X, norm='l2').T
# compute the empirical covariance matrix
C_emp = X.dot(X.T)/float(N)
print('Empirical Cov:')
print C_emp
# neighborhood selection
nhs = NeighborSelect(EDPP(), ProximalGradientSolver(), path_lb=0.2, path_steps=5, path_scale='log')
Cb = nhs.fit(np.ascontiguousarray(X))
print Cb
glasso = GraphLasso(alpha=0.005, tol=0.0001, max_iter=1000, verbose=False)
glasso.fit(X.T)
C = glasso.get_precision()
print glasso.error_norm(COV)
print('GraphLasso Cov:')
print C
# plot some example network
plt.figure()
plt.subplot(2, len(Cb), 1)
plt.title('Cov')
plt.pcolor(COV)
plt.subplot(2, len(Cb), 2)
plt.title('Emp. Cov')
plt.pcolor(C_emp)
plt.subplot(2, len(Cb), 3)
plt.title('GraphLasso')
plt.pcolor(C)
for i in range(len(Cb)):
plt.subplot(2, len(Cb), len(Cb)+1+i)
foo = Cb[i].todense()
foo[foo < 2.0] = 0.0
plt.pcolor(np.array(foo))
plt.show()
print('Done.')
|
nicococo/AdaScreen
|
scripts/test_neighorhood_selection.py
|
Python
|
mit
| 2,431
|
[
"Gaussian"
] |
2fd541d6077372f0053ee6c0426077f51be2b6a96d6c92e9797998d6dd19d5a9
|
def start(forceOptimizer, debug=False):
if debug:
print ""
print "============="
print "Initial Phase"
print "============="
print ""
# the main group is the only group on the highest level, so the queue starts with her
forceOptimizer.groups = sorted(forceOptimizer.groups, cmp=group_compare_negative)
forceOptimizer.wide_search_queue.append(forceOptimizer.group_main)
wide_search(forceOptimizer, debug)
set_block_relation_to_group(forceOptimizer, debug)
calculate_groups_frame_position2(forceOptimizer, debug)
def wide_search(forceOptimizer, debug):
'''
Description: Sorts the groups to the gnd / vcc / out list in their distance to out
'''
if debug:
print "\nWide Search"
print "Wide Search Queue count:", len(forceOptimizer.wide_search_queue)
# get the first group of the queue to start a wide search on her over her subgroups
group = forceOptimizer.wide_search_queue.pop(0)
if debug:
print ""
print "Group ID:", group.group_id, " Count Group Childs: ", len(group.childs)
if len(group.childs) == 0:
if len(forceOptimizer.wide_search_queue) > 0:
wide_search(forceOptimizer, debug)
else:
# looking for a start child with connection to the parents east neighbors
start_child = None
# a sub wide search queue to start a classic wide search on the actual group
queue = []
for child in group.childs:
if debug:
print str(child.group_id) + " Connected to parent's east neighbor:" + str(child.connected_parent_east)
print child
print child.parent
if child.connected_parent_east > 0:
if start_child is None:
start_child = child
queue.insert(0, start_child)
else:
queue.append(child)
else:
if len(group.childs) == 1:
if start_child is None:
start_child = child
queue.insert(0, start_child)
else:
queue.append(child)
if debug:
print "Start Child:", start_child.group_id
# classic wide search
start_child.wide_search_flag = 1
while len(queue) > 0:
visited_child = queue.pop(0)
if debug:
print "Visited Child:", visited_child.group_id
if visited_child not in forceOptimizer.wide_search_queue and visited_child not in group.childs_east_sorted:
forceOptimizer.wide_search_queue.append(visited_child)
group.childs_east_sorted.append(visited_child)
if visited_child.connected_parent_east and visited_child not in group.child_east:
group.child_east.append(visited_child)
if visited_child.connected_parent_north and visited_child not in group.child_north:
group.child_north.append(visited_child)
if visited_child.connected_parent_south and visited_child not in group.child_south:
group.child_south.append(visited_child)
if visited_child.connected_parent_west and visited_child not in group.child_west:
group.child_west.append(visited_child)
for neighbor in visited_child.neighbor_unsorted:
if debug:
print "Neighbor:", neighbor.group_id
# only looking for neighbors in the same group and which are not allready discovered
if neighbor.parent == visited_child.parent and neighbor.wide_search_flag == 0:
neighbor.wide_search_flag = 1
queue.append(neighbor)
visited_child.wide_search_flag = 2
# increment the number of connected to parent north/south/east/west
sort_extern_neighbors(group.childs_east_sorted, debug, forceOptimizer)
# when all children / subgroups are visited
# then we can start to sort the neighborhood of these childs in the group
sort_unsorted_neighbor(group.childs_east_sorted, debug)
# when the wide search is finish with one group and her subgroups,
# then starts a wide search on a group in the same level
# or when all groups on one level where visited, then go to the next lower level
# the algorithm produce a sequence in the wide_search_queue,
# where groups of a higher level are in the first positions
# and the groups of a lower level comes in the last part
if len(forceOptimizer.wide_search_queue) > 0:
wide_search(forceOptimizer, debug)
def sort_extern_neighbors(east_list, debug, forceOptimizer):
for group in east_list:
if debug:
print "SORT EXTERN NEIGHBOR for:", str(group.group_id), "Neighbors count:", len(group.neighbor_extern)
for neigh in group.neighbor_extern:
print " ", neigh.group_id
print "Group PArent Neighbor East count:", len(group.parent.neighbor_east)
for neigh in group.parent.neighbor_east:
print " ", neigh.group_id
print "Group PArent Neighbor WEST count:", len(group.parent.neighbor_west)
for neigh in group.parent.neighbor_west:
print " ", neigh.group_id
print "Group PArent Neighbor SOUTH count:", len(group.parent.neighbor_south)
for neigh in group.parent.neighbor_south:
print " ", neigh.group_id
print "Group PArent Neighbor NORTH count:", len(group.parent.neighbor_north)
for neigh in group.parent.neighbor_north:
print " ", neigh.group_id
print ""
for neighbor in group.neighbor_extern:
if neighbor.parent in group.parent.neighbor_east:
find_blocks(group, neighbor, 2, debug)
if neighbor.parent in group.parent.neighbor_west:
find_blocks(group, neighbor, 4, debug)
if neighbor.parent in group.parent.neighbor_north:
find_blocks(group, neighbor, 1, debug)
if neighbor.parent in group.parent.neighbor_south:
find_blocks(group, neighbor, 3, debug)
if debug:
print "Group connected parent east:", group.connected_parent_east
print "Group connected parent west:", group.connected_parent_west
print "Group connected parent south:", group.connected_parent_south
print "Group connected parent north:", group.connected_parent_north
print ""
def find_blocks(group, neighbor, orientation, debug):
if debug:
print ""
print "Find Blocks connected to extern Block"
print "Group:", group.group_id, " Neighbor:", neighbor.group_id, " Border:", orientation
for block in group.blocks:
for n_block in neighbor.blocks:
for pin in block.pins.values():
for n_pin in n_block.pins.values():
# pin out is ok
if pin.net not in ['vdd', 'gnd', 'vss'] and pin.net == n_pin.net:
if debug:
print block.name, " -> ", pin.net," -> ", n_block.name
if orientation == 1:
# Neighbor is NORTH
if not group.is_bias:
group.block_north.add(block);
if not neighbor.is_bias:
neighbor.block_south.add(n_block)
if orientation == 2:
# Neighbor is EAST
if not group.is_bias:
group.block_east.add(block);
if not neighbor.is_bias:
neighbor.block_west.add(n_block)
if orientation == 3:
# Neighbor is SOUTH
if not neighbor.is_bias:
neighbor.block_north.add(n_block);
if not group.is_bias:
group.block_south.add(block)
if orientation == 4:
# Neighbor is WEST
if not neighbor.is_bias:
neighbor.block_east.add(n_block);
if not group.is_bias:
group.block_west.add(block)
def sort_unsorted_neighbor( east_list, debug):
'''
'''
groups = []
for group in east_list:
groups.append(group.group_id)
if debug:
print "Sort Unsorted Neighbor:", groups
#go through all groups in their relative distance to out
for group in east_list:
if debug:
print "Group in East List:", group.group_id
#if the group has neighbor which are not sorted to north, south, east, west
if len(group.neighbor_unsorted) > 0:
if debug:
print "Group connected to parent north:", group.connected_parent_north
print "Group connected to parent south:", group.connected_parent_south
print "Group connected to parent east:", group.connected_parent_east
print "Group connected to parent west:", group.connected_parent_west
#go through all unsorted neighbor
for neighbor in group.neighbor_unsorted:
#if the neighbor is connected to vcc and gnd
if neighbor.connected_parent_north and neighbor.connected_parent_south:
#then the only legal position for the neighbor is west
add_neighbor_east_west(group, neighbor)
#such a neighbor is dominant an the west list have to close
group.listfull_west = True
if neighbor.connected_parent_north and neighbor.connected_parent_south == 0 and group.connected_parent_north == 0:
# the neighbor has a parent NORTH connection but no parent SOUTH and the group herself have no connection to the parent NORTH
# than add the neighbor to the NORTH of the group
add_neighbor_north_south(neighbor, group)
if neighbor.connected_parent_east and neighbor.connected_parent_west == 0 and group.connected_parent_east == 0:
# the neighbor has a parent EAST connection but no parent WEST connection and the group herself has no parent EAST connection
# than add
add_neighbor_east_west(neighbor, group)
if neighbor.connected_parent_south and neighbor.connected_parent_north == 0 and group.connected_parent_south == 0:
# the neighbor has a parent SOUTH connection but no parent NORTH and the group herself have no connection to the parent SOUTH
# than add the neighbor to the SOUTH of the group
add_neighbor_north_south(group, neighbor)
if neighbor.connected_parent_west and neighbor.connected_parent_east == 0 and group.connected_parent_west == 0:
# the neighbor has a parent WEST connection but no parent EAST connection and the group herself has no parent WEST connection
# than add
add_neighbor_east_west(group, neighbor)
'''
if neighbor.connected_parent_north and neighbor.connected_parent_south and len(group.blocks) and len(neighbor.blocks):
for block in group.neighbors[neighbor]:
group.block_west.add(block)
for block in neighbor.neighbors[group]:
neighbor.block_east.add(block)
if neighbor.connected_parent_north and neighbor.connected_parent_south == 0 and group.connected_parent_north == 0 and len(group.blocks) and len(neighbor.blocks):
for block in neighbor.neighbors[group]:
neighbor.block_south.add(block)
for block in group.neighbors[neighbor]:
group.add(block)
'''
if debug:
print group
def group_compare(x, y):
'''
Sort groups by their group_id
groups on the low level with long IDs came first
'''
return len(y.group_id) - len(x.group_id)
def group_compare_negative(x, y):
'''
Sort groups by their group_id
groups on the high level with short IDs came first
'''
return len(x.group_id) - len(y.group_id)
def search_group(group_id,forceOptimizer):
'''
PARAMETER: group_ids is an array with the IDs of the parent Groups and the ID of the searched group
return the group if it exists, else None
STATE: not finish
'''
for group in forceOptimizer.groups:
if group.group_id == group_id:
return group
return None
def set_block_relation_to_group(forceOptimizer, debug):
if debug:
print ""
print "============="
print "Block relation to group"
print "============="
print ""
for block in forceOptimizer.blocks:
if debug:
print "BLOCK:",block.name
group = search_group(block.groups, forceOptimizer)
if debug:
print "Group:", group.group_id
for key in forceOptimizer.dictionary_net_blocks:
if block in forceOptimizer.dictionary_net_blocks[key]:
if debug:
print "Net:", key
for neighbor in forceOptimizer.dictionary_net_blocks[key]:
if debug:
print "Block_Neighbor:", neighbor.name
if neighbor != block and neighbor.groups != block.groups:
group_neighbor = search_group(neighbor.groups, forceOptimizer)
if debug:
print "Group_Neighbor:", group_neighbor.group_id
# group_neighbor => internal neighbors
# group_neighbor.parent => external neighbors
for needle, obj in [(group_neighbor, group), (group_neighbor.parent, group.parent)]:
pairs = [("north", obj.neighbor_north, obj.block_north), ("south", obj.neighbor_south, obj.block_south),
("east", obj.neighbor_east, obj.block_east), ("west", obj.neighbor_west, obj.block_west)]
for desc, haystack, bucket in pairs:
if group_neighbor in haystack:
bucket.add(block)
if debug:
print "add block: {name} to group: {gid} with orientation: {desc}".format(
name=block.name, gid=group.group_id, desc=desc)
# .... instead of:
#intern group_neighbors
#if group_neighbor in group.neighbor_north:
#
# group.block_north.add(block)
# if debug:
# print "add ", block.name, " to ", group.group_id, ".block_north:", block in group.block_north
#
#if group_neighbor in group.neighbor_south:
#
# group.block_south.add(block)
# if debug:
# print "add ", block.name, " to ", group.group_id, ".block_south:", block in group.block_south
#
#if group_neighbor in group.neighbor_east:
#
# group.block_east.add(block)
# if debug:
# print "add ", block.name, " to ", group.group_id, ".block_east:", block in group.block_east
#
#if group_neighbor in group.neighbor_west:
#
# group.block_west.add(block)
# if debug:
# print "add ", block.name, " to ", group.group_id, ".block_west:", block in group.block_west
#
#extern group_neighbors
#if group_neighbor.parent in group.parent.neighbor_north:
# group.block_north.add(block)
# if debug:
# print "add ", block.name, " to ", group.group_id, ".block_north:", block in group.block_north
#
#if group_neighbor.parent in group.parent.neighbor_south:
#
# group.block_south.add(block)
# if debug:
# print "add ", block.name, " to ", group.group_id, ".block_south:", block in group.block_south
#
#if group_neighbor.parent in group.parent.neighbor_east:
#
# group.block_east.add(block)
# if debug:
# print "add ", block.name, " to ", group.group_id, ".block_east:", block in group.block_east
#
#if group_neighbor.parent in group.parent.neighbor_west:
#
# group.block_west.add(block)
# if debug:
# print "add ", block.name, " to ", group.group_id, ".block_west:", block in group.block_west
if debug: #
print ""
for group in forceOptimizer.groups:
print group
def calculate_groups_frame_position2(forceOptimizer, debug):
'''
'''
if debug:
print ""
print "========================================="
print " Calculate Group Position and Frame Size"
print "========================================="
print ""
forceOptimizer.groups = sorted(forceOptimizer.groups, cmp=group_compare)
if debug:
for group in forceOptimizer.groups:
print "Group:", group.group_id
groups = forceOptimizer.groups[:]
groups.append(forceOptimizer.group_main)
for group in groups:
if debug:
print group
width_south = 0
width_north = 0
height_east = 0
height_west = 0
if debug:
print "GROUP:", group.group_id
for child in group.child_south:
print " Child SOUTH:", child.group_id
if len(group.childs):
# for the group width
north_border = 0
south_border = 0
east_west_max_width = 0
# for the group height
south_max_height = 0
north_max_height = 0
north_south_max_height = 0
north_south_border = 0
east_border = 0
west_border = 0
eas_west_max_width = 0
for child in group.childs_east_sorted:
child.position_x = -1
child.position_y = -1
if child in group.child_north and child not in group.child_south:
north_border += child.size_width
if north_max_height < child.size_height:
north_max_height = child.size_height
if child in group.child_south and child not in group.child_north:
south_border += child.size_width
if south_max_height < child.size_height:
south_max_height = child.size_height
if child in group.child_south and child in group.child_north:
north_south_border += child.size_width
if north_south_max_height < child.size_height:
north_south_max_height = child.size_height
if child not in group.child_south and child not in group.child_north and child in group.child_east:
east_border += child.size_height
if child not in group.child_south and child not in group.child_north and child in group.child_west:
west_border += child.size_height
if child in group.child_east and child in group.child_west:
if east_west_max_width < child.size_width:
east_west_max_width = child.size_width
horizontal_border = max(north_border+north_south_border, south_border+north_south_border)
horizontal_border = max(horizontal_border, east_west_max_width)
vertical_border = max(east_border, west_border)
vertical_border = max(north_max_height + south_max_height + vertical_border, north_south_max_height)
group.size_height = vertical_border
group.size_width = horizontal_border
if debug:
print group.group_id, "Size:", (group.size_width, group.size_height)
#new height for children with north + south connection
north_south_childs = set(group.child_north) & set(group.child_south)
north_south_child_update(group, north_south_childs, debug)
#new width for children with east + west connection
#east_west_childs = set(group.child_east) & set(group.child_west)
#east_west_child_update(group, east_west_childs, debug)
# start positioning the childs
# position of north childs
north_heights = calculate_child_position_north_south(group, group.child_north, False, debug)
# position of south childs
south_heights = calculate_child_position_north_south(group, group.child_south, True, debug)
calculate_child_position_east_west(group, group.child_east, False, debug, north_heights)
calculate_child_position_east_west(group, group.child_west, True, debug, north_heights)
calculate_child_position_center(group, debug)
elif len(group.blocks):
if group.is_bias_connected:
group.size_height = 1
group.size_width = len(group.blocks)
else:
east_blocks = set(group.block_east) - (set(group.block_north) | set(group.block_south) )
west_blocks = set(group.block_west) - (set(group.block_north) | set(group.block_south) )
width_north += len(group.block_north)
width_south += len(group.block_south)
height_west += len(west_blocks)
height_east += len(east_blocks)
if debug:
print "Group:", group.group_id, "North:", width_north, "South:", width_south, "East:", height_east, "West:", height_west
#the bigger width of north and south is the width for the group
group.size_width = max({width_north, width_south})
#group.size_height = max({height_east, height_west})
# only for low level groups
if len(group.blocks) > 0:
group.size_height = group.size_height * 1
#if the group area is to small to place all blocks without overlapping
while (group.size_height * group.size_width) < len(group.blocks):
#increment the group width and height by 1
#group.size_width = group.size_width + 1
group.size_height = group.size_height + 1
if debug:
print group
for group in groups:
if len(group.blocks):
for block in group.blocks:
block.pos[0] = group.size_width / 2 - 0.5
block.pos[1] = group.size_height / 2 -0.5
for block in group.block_north:
block.pos[1] = 0
for block in group.block_south:
block.pos[1] = group.size_height-1
for block in group.block_east:
block.pos[0] = group.size_width-1
for block in group.block_west:
block.pos[0] = 0
def north_south_child_update(group, north_south_childs, debug):
for child in north_south_childs:
child.size_height = group.size_height
if len(child.childs):
calculate_child_position_north_south(child, child.child_north, False, debug)
calculate_child_position_north_south(child, child.child_south, True, debug)
def east_west_child_update(group, east_west_childs, debug):
for child in east_west_childs:
child.size_width = group.size_width
def calculate_child_position_center(group, debug):
center_childs = set(group.childs) - (set(group.child_north) | set(group.child_south) | set(group.child_east) | set(group.child_west))
for child in center_childs:
child.position_x = group.size_width - child.size_width
child.position_y = 0
for neighbor in child.neighbor_north:
if neighbor.position_x < child.position_x:
child.position_x = neighbor.position_x
if neighbor.position_y + neighbor.size_height > child.position_y:
child.position_y = neighbor.position_y + neighbor.size_height
if len(child.neighbor_north) == 0:
for neighbor in child.neighbor_south:
if neighbor.position_x < child.position_x:
child.position_x = neighbor.position_x
if neighbor.position_y - child.size_height < child.position_y:
child.position_y = neighbor.position_y + neighbor.size_height
def calculate_child_position_east_west(group, group_border, is_west, debug, north_heights):
center_childs = set(group_border) - (set(group.child_north) | set(group.child_south))
north_y = max(north_heights[0], max(north_heights[1], north_heights[2]))
for child in center_childs:
if is_west:
child.position_x = 0
else:
child.position_x = group.size_width - child.size_width
if child.position_y == -1:
child.position_y = north_y
north_y = child.position_y + child.size_height
def calculate_child_position_north_south(group, group_border, is_south, debug):
east_childs = set(group.child_east) & set(group_border)
west_childs = set(group.child_west) & set(group_border)
center_childs = set(group_border) - (set(group.child_east) | set(group.child_west))
east_size = [0,0]
west_size = [0,0]
center_size = [0,0]
east_x = group.size_width
west_x = 0
if debug:
if is_south:
print "Child Positions South:"
else:
print "Child Positions North:"
for child in east_childs:
child.position_x = east_x - child.size_width
if is_south:
child.position_y = group.size_height - child.size_height
else:
child.position_y = 0
east_x = child.position_x
east_size = [east_size[0] + child.size_width, max(east_size[0], child.size_height)]
if debug:
print " EAST:", child.group_id, (child.position_x, child.position_y)
for child in west_childs:
child.position_x = west_x
if is_south:
child.position_y = group.size_height - child.size_height
else:
child.position_y = 0
west_x = child.position_x + child.size_width
west_size = [west_size[0] + child.size_width, max(west_size[0], child.size_height)]
if debug:
print " WEST:", child.group_id, (child.position_x, child.position_y)
center_size[0] = group.size_width - east_size[0] + west_size[0]
for child in center_childs:
if is_south:
child.position_y = group.size_height - child.size_height
else:
child.position_y = 0
for neighbor in child.neighbor_east:
if neighbor in east_childs:
child.position_x = east_x -child.size_width
east_x = child.position_x
center_size[1] = max(center_size[0], child.size_height)
break
for neighbor in child.neighbor_west:
if neighbor in west_childs:
child.position_x = west_x
west_x = child.position_x + child.size_width
center_size[1] = max(center_size[0], child.size_height)
break
if child.position_x == -1:
child.position_x = west_x
west_x = child.position_x +child.size_width
center_size[1] = max(center_size[0], child.size_height)
if debug:
print " CENTER:", child.group_id, (child.position_x, child.position_y)
return [west_size[1], center_size[1], east_size[1]]
def calculate_groups_frame_position(forceOptimizer, debug):
'''
'''
if debug:
print ""
print "========================================="
print " Calculate Group Position and Frame Size"
print "========================================="
print ""
if debug:
for group in forceOptimizer.groups:
print "Group:", group.group_id
forceOptimizer.groups = sorted(forceOptimizer.groups, cmp=group_compare)
groups = forceOptimizer.groups[:]
groups.append( forceOptimizer.group_main)
#print "#########"
#print [x.group_id for x in groups]
#for group in groups:
#if len(group.blocks)==0:
#group.position_x = 0
#group.position_y = 0
#if debug:
#print "SortedGroup:", group.group_id
for group in groups:
if debug:
print group
width_south = 0
width_north = 0
height_east = 0
height_west = 0
if debug:
print "GROUP:", group.group_id
if len(group.childs):
not_visited = group.childs_east_sorted[:]
start_child = group.childs_east_sorted[0]
group.size_height = start_child.size_height
group.size_width = start_child.size_width
visit_next = []
visited = []
while len(not_visited):
if debug:
print " START:",start_child.group_id
not_visited.remove(start_child)
for neighbor in start_child.neighbor_south:
if debug:
print " Neighbor:",neighbor.group_id
if neighbor in not_visited:
if start_child.position_y + start_child.size_width > neighbor.position_y:
neighbor.position_y = start_child.position_y + start_child.size_width
if neighbor not in visit_next and neighbor in not_visited and neighbor not in visited:
visit_next.append(neighbor)
for neighbor in start_child.neighbor_north:
if debug:
print " Neighbor:",neighbor.group_id
if neighbor in not_visited:
if start_child.position_y-neighbor.size_height < neighbor.position_y:
neighbor.position_y = start_child.position_y-neighbor.size_height
if neighbor not in visit_next and neighbor in not_visited and neighbor not in visited:
visit_next.append(neighbor)
for neighbor in start_child.neighbor_west:
if debug:
print " Neighbor:",neighbor.group_id
if neighbor in not_visited:
if start_child.position_x - neighbor.size_width < neighbor.position_x:
neighbor.position_x = start_child.position_x - neighbor.size_width
if neighbor not in visit_next and neighbor in not_visited and neighbor not in visited:
visit_next.append(neighbor)
for neighbor in start_child.neighbor_east:
if debug:
print " Neighbor:",neighbor.group_id
if neighbor in not_visited:
if neighbor not in visit_next and neighbor in not_visited and neighbor not in visited:
visit_next.append(neighbor)
rebuild_group_size(group, start_child, visited)
if debug:
print " VISITED"
for child in visited:
print " ", child.group_id, (child.position_x, child.position_y), (child.size_width, child.size_height)
if debug:
print " VISIT NEXT"
for child in visit_next:
print " ", child.group_id, (child.position_x, child.position_y), (child.size_width, child.size_height)
if len(visit_next):
start_child = visit_next[0]
del visit_next[0]
if len(group.blocks):
width_north += len(group.block_north)
width_south += len(group.block_south)
height_west += len(group.block_west)
height_east += len(group.block_east)
if debug:
print "Group:", group.group_id, "North:", width_north, "South:", width_south, "East:", height_east, "West:", height_west
#the bigger width of north and south is the width for the group
group.size_width = max({width_north, width_south})
group.size_height = max({height_east, height_west})
# only for low level groups
if len(group.blocks) > 0:
group.size_height = group.size_height * 1
#if the group area is to small to place all blocks without overlapping
while (group.size_height * group.size_width) < len(group.blocks):
#increment the group width and height by 1
#group.size_width = group.size_width + 1
group.size_height = group.size_height + 1
if debug:
print group
for group in groups:
if len(group.blocks):
for block in group.blocks:
block.pos[0] = group.size_width / 2 - 0.5
block.pos[1] = group.size_height / 2 -0.5
for block in group.block_north:
block.pos[1] = 0
for block in group.block_south:
block.pos[1] = group.size_height-1
for block in group.block_east:
block.pos[0] = group.size_width-1
for block in group.block_west:
block.pos[0] = 0
def rebuild_group_size(group, new_child, fixed_childs):
width_diff = 0
height_diff = 0
x_diff = 0
y_diff = 0
if new_child.position_x < 0:
x_diff = abs(new_child.position_x)
if new_child.position_y < 0:
y_diff = abs(new_child.position_y)
group.size_height += y_diff
group.size_width += x_diff
if new_child.size_width + new_child.position_x > group.size_width:
width_diff = new_child.size_width + new_child.position_x - group.size_width
if new_child.size_height + new_child.position_y > group.size_height:
height_diff = new_child.size_height + new_child.position_y - group.size_height
group.size_height += height_diff
group.size_width += width_diff
fixed_childs.append(new_child)
for child in fixed_childs:
child.position_x += x_diff
child.position_y += y_diff
set_childs_position(group,fixed_childs)
def set_childs_position(group,fixed_childs):
for child in fixed_childs:
if child in group.child_north:
child.position_y = 0
if child in group.child_south:
child.size_height = group.size_height
elif child in group.child_south:
child.position_y = group.size_height - child.size_height
if child in group.child_west:
child.position_x = 0
if child in group.child_east:
child.size_width = group.size_width
elif child in group.child_east:
child.position_x = group.size_width - child.size_width
if len(child.childs):
set_childs_position(child, child.childs)
def calculate_groups_position(forceOptimizer, debug):
'''
Only the position of the groups will be calculated
starting with the high level group and set the positions of the childs
position is relative to parent left upper corner
'''
if debug:
print ""
print "========================"
print "Calculate Group Position"
print "========================"
print ""
for group in forceOptimizer.groups:
if debug:
print "Group:", group.group_id
forceOptimizer.groups = sorted(forceOptimizer.groups, cmp=group_compare)
print "#########"
print [x.group_id for x in forceOptimizer.groups]
for group in forceOptimizer.groups:
group.position_x = 0
group.position_y = 0
if debug:
print "SortedGroup:", group.group_id
groups = forceOptimizer.groups[:]
groups.insert(0, forceOptimizer.group_main)
#go through every group
for group in groups:
for child in group.childs:
# children connected to NORTH and SOUTH have position y = 0 and are tall as the group
if child in group.child_north and child in group.child_south:
child.position_y = group.size_height / 2 - child.size_height / 2
#child.size_height = group.size_height
# children only connected to NORTH (not to SOUTH) have position y = 0
elif child in group.child_north:
child.position_y = 0
# children only connected to SOUTH (not to NORTH) touching the lower bound of the group
elif child in group.child_south:
child.position_y = group.size_height - child.size_height
#children with no connection to NORTH or SOUTH
else:
#search for neighbors with north connection
#find the highest and set the child under that neighbor
max_height = 0
for neighbor in child.neighbors:
if neighbor in group.child_north and max_height < neighbor.size_height:
max_height = 0 + neighbor.size_height
if max_height:
child.position_y = max_height
else:
#no neighbor in North, so check South
#search for neighbors with south connection
#find the highest and set the child over that neighbor
for neighbor in child.neighbors:
if neighbor in group.child_south and max_height < neighbor.size_height:
max_height = 0 + neighbor.size_height
if max_height:
child.position_y = group.size_height - max_height - child.size_height
else:
# placed child in the center
# overlapping is allowed and will be fixed in the force algorithm
child.position_y = group.size_height / 2 - child.size_height / 2
# children connected to WEST and EAST have position x = 0 and are wide as the group
if child in group.child_west and child in group.child_east:
child.position_x = group.size_width / 2 - child.size_width / 2
#child.size_width = group.size_width
# children only connected to WEST (not to EAST) have position x = 0
elif child in group.child_west:
child.position_x = 0
# children only connected to EAST (not to WEST) touching the right bound of the group
elif child in group.child_east:
child.position_x = group.size_width - child.size_width
#children with no connection to WEST or EAST
else:
#search for neighbors with west connection
#find the biggest and set the child right to that neighbor
max_width = 0
for neighbor in child.neighbors:
if neighbor in group.child_west and max_width < neighbor.size_width:
max_width = 0 + neighbor.size_width
if max_width:
child.position_x = max_width
else:
#no neighbor in west, so check east
#search for neighbors with east connection
#find the biggest and set the child left to that neighbor
max_width = 0
for neighbor in child.neighbors:
if neighbor in group.child_east and max_width < neighbor.size_width:
max_width = 0 + neighbor.size_width
if max_width:
child.position_x = group.size_width - max_width - child.size_width
else:
# placed child in the center
# overlapping is allowed and will be fixed in the force algorithm
child.position_x = group.size_width / 2 - child.size_width / 2
for group in groups:
for child in group.child_north:
if child not in group.child_east:
child.position_x = calculate_border_north_south_position(child, group.child_north)
for child in group.child_south:
if child not in group.child_east:
child.position_x = calculate_border_north_south_position(child, group.child_south)
for child in group.child_east:
if child not in group.child_south and child not in group.child_north:
child.position_y = calculate_border_east_west_position(child, group.child_east)
for child in group.child_west:
if child not in group.child_south and child not in group.child_north:
child.position_y = calculate_border_east_west_position(child, group.child_west)
if debug:
for group in groups:
print group
def calculate_border_north_south_position(group,neighbors):
right = group.parent.size_width
for neighbor in neighbors:
if neighbor.position_x > group.position_x:
right = right - neighbor.size_width
return right-group.size_width
def calculate_border_east_west_position(group,neighbors):
down = group.parent.size_height
for neighbor in neighbors:
if neighbor.position_y > group.position_y:
down = down - neighbor.size_height
return down-group.size_height
def add_neighbor_north_south( group_north, group_south):
'''
'''
group_north.neighbor_south.append(group_south)
if group_south in group_north.neighbor_unsorted:
group_north.neighbor_unsorted.remove(group_south)
group_south.neighbor_north.append(group_north)
if group_north in group_south.neighbor_unsorted:
group_south.neighbor_unsorted.remove(group_north)
for child in group_north.childs:
for child_neighbor in child.neighbors:
if child_neighbor.parent is group_south:
child.connected_parent_south = child_neighbor.connected_parent_south + 1
child_neighbor.connected_parent_north = child_neighbor.connected_parent_north + 1
for block_north in group_north.blocks:
for block_south in group_south.blocks:
for pin_north in block_north.pins.values():
for pin_south in block_south.pins.values():
if pin_north.net not in ['vdd', 'gnd', 'vss'] and pin_north.net == pin_south.net:
group_north.block_south.add(block_north)
group_south.block_north.add(block_south)
def add_neighbor_east_west( group_east, group_west):
'''
'''
group_east.neighbor_west.append(group_west)
if group_west in group_east.neighbor_unsorted:
group_east.neighbor_unsorted.remove(group_west)
group_west.neighbor_east.append(group_east)
if group_east in group_west.neighbor_unsorted:
group_west.neighbor_unsorted.remove(group_east)
for child in group_east.childs:
for child_neighbor in child.neighbors:
if child_neighbor.parent is group_west:
child.connected_parent_west = child_neighbor.connected_parent_west + 1
child_neighbor.connected_parent_east = child_neighbor.connected_parent_east + 1
for block_east in group_east.blocks:
for block_west in group_west.blocks:
for pin_east in block_east.pins.values():
for pin_west in block_west.pins.values():
if pin_east.net not in ['vdd', 'gnd', 'vss'] and pin_east.net == pin_west.net:
group_east.block_west.add(block_east)
group_west.block_east.add(block_west)
|
daringer/schemmaker
|
src/force_optimizer/initial_step.py
|
Python
|
apache-2.0
| 45,727
|
[
"VisIt"
] |
299b57baf25ae632735dfbc56b5249235262a918c222d19ad6bae5d5174d6ea2
|
# -*- coding: utf-8 -*-
#
desc = 'Noise with a horizontal color bar'
phash = '5f5ca33da3816983'
def plot():
from matplotlib import pyplot as pp
import numpy as np
# Make plot with horizontal colorbar
fig = pp.figure()
ax = fig.add_subplot(111)
np.random.seed(123)
data = np.clip(np.random.randn(250, 250), -1, 1)
cax = ax.imshow(data, interpolation='nearest')
ax.set_title('Gaussian noise with horizontal colorbar')
cbar = fig.colorbar(cax, ticks=[-1, 0, 1], orientation='horizontal')
# horizontal colorbar
cbar.ax.set_xticklabels(['Low', 'Medium', 'High'])
return fig
|
dougnd/matplotlib2tikz
|
test/testfunctions/noise2.py
|
Python
|
mit
| 628
|
[
"Gaussian"
] |
ac825d2abb7df46a49f0bf448d835810205bff816878bf2ebdb3cbac8f35657e
|
from __future__ import division
import numpy as np
'''
This file contains implementation of various activation functions.
'''
#Random seed for consistency in results.
np.random.seed(32)
def sigmoid(tensor):
'''
Sigmoid transfer.
:param tensor: numpy array
:return: numpy array of same shape, where each input element has been modified.
'''
return 1/(1+np.exp(-tensor))
def tanh(tensor):
'''
Hyperbolic tangent.
:param tensor: numpy array.
:return: numpy array of same shape, where each input element has been modified.
'''
return np.tanh(tensor)
def ReLU(tensor):
'''
Rectified Linear Unit
:param tensor:numpy array
:return: numpy array of same shape, where each input element has been rectified with noise.
'''
return np.maximum(0,tensor)
def Noisy_ReLU(tensor):
'''
Nosiy Rectified Linear Unit. The noise is chosen from a
gaussian distribution of zero mean and unit variance.
:param tensor: numpy array
:return: numpy array of same shape, where each input element has been rectified.
'''
return np.maximum(0,tensor+np.random.normal(0,1))
def Leaky_ReLU(tensor):
'''
Leaky Rectified Linear Unit.
:param tensor: numpy array
:return: numpy array of same shape.
'''
tensor[tensor<0] *= 0.01
return tensor
def softmax(tensor):
'''
Softmax function(normalized exponential): changes array of arbitrary real
values into array of real values in the range (0,1) that add upto 1.
Softmax usually fails for large numbers so we will shift the numbers and then calculate.
:param tensor: 1D numpy array.
:return: softmax transferred numpy array of same dimension.
'''
tensor = tensor -np.max(tensor, axis=1,keepdims=True)
exp = np.exp(tensor)
return exp/np.sum(exp, axis=1,keepdims=True)
|
prateekbhat91/Neural-Network
|
neuralnetwork/activation_functions.py
|
Python
|
bsd-3-clause
| 1,854
|
[
"Gaussian"
] |
68f26a81938fa1f93bc17e90f41620c6a4582dbb6f888ea4e385e69ab30e2639
|
# lots to do:
# __ native drawLines
# __ add native drawCurve method
# __ native rectangle/round rect method
# __ native drawEllipse
# __ native drawArc
# __ drawImage support (work on Pyart side of things)
from __future__ import print_function
import pyart
from rdkit.sping.pid import *
from rdkit.sping.PDF import pdfmetrics
import Fontmapping # helps by mapping pid font classes to Pyart font names
# note for now I'm just going to do the standard PDF fonts & forget the rest
class PyartCanvas(Canvas):
"note the default face is 'times' and is set in Fontmapping.py"
def __init__(self,size=(300,300),name='PyartCanvas.png'):
self._pycan = pyart.Canvas(size[0], size[1], dpi=72)
self.filename = name
Canvas.__init__(self, size, name)
# self.defaultFillColor = transparent
# now we need to setup our tracking of the defaults vs the current state
# see if the __setattr__ approach is any better than the _updateXX strategy
def __setattr__(self, name, value):
if name == 'defaultLineColor':
if value:
# print('setting defaultLineColor to %s, 0x%x' % (value, value.toHexRGB()))
if value != transparent:
self._pycan.gstate.stroke = value.toHexRGB()
self.__dict__[name] = value
elif name == 'defaultFillColor':
if value:
if value != transparent:
self._pycan.gstate.fill = value.toHexRGB()
self.__dict__[name] = value
elif name == 'defaultLineWidth' :
if value:
self._pycan.gstate.stroke_width = value
self.__dict__[name] = value
elif name == 'defaultFont':
if value:
self.__dict__[name] = value
self._setPyartFont(value)
else: # received None so set to default font face & size=12
self.__dict__[name] = Font(face='times')
self._setPyartFont(self.__dict__[name])
else:
self.__dict__[name] = value
## Private methods ##
def _protectArtState(self, bool):
if bool:
self._pycan.gsave()
return bool
def _restoreArtState(self, bool):
if bool:
self._pycan.grestore()
def _setPyartFont(self, fontInstance):
# accounts for "None" option
# does not act on self.defaultFont at all
fontsize = fontInstance.size
self._pycan.gstate.font_size = fontsize
# map pid name for font to Pyart name
pyartname = Fontmapping.getPyartName(fontInstance)
self._pycan.gstate.setfont(pyartname)
# # # # #
### public PID Canvas methods ##
def clear(self):
pass
def flush(self):
pass
def save(self, file=None, format=None):
# fileobj = getFileObject(file)
if not file:
file = self.filename
if isinstance(file, StringType):
self._pycan.save(file)
else:
raise NotImplementedError
def _findExternalFontName(self, font): #copied from piddlePDF by cwl- hack away!
"""Attempts to return proper font name.
PDF uses a standard 14 fonts referred to
by name. Default to self.defaultFont('Helvetica').
The dictionary allows a layer of indirection to
support a standard set of PIDDLE font names."""
piddle_font_map = {
'Times':'Times',
'times':'Times',
'Courier':'Courier',
'courier':'Courier',
'helvetica':'Helvetica',
'Helvetica':'Helvetica',
'symbol':'Symbol',
'Symbol':'Symbol',
'monospaced':'Courier',
'serif':'Times',
'sansserif':'Helvetica',
'ZapfDingbats':'ZapfDingbats',
'zapfdingbats':'ZapfDingbats',
'arial':'Helvetica'
}
try:
face = piddle_font_map[string.lower(font.face)]
except:
return 'Helvetica'
name = face + '-'
if font.bold and face in ['Courier','Helvetica','Times']:
name = name + 'Bold'
if font.italic and face in ['Courier', 'Helvetica']:
name = name + 'Oblique'
elif font.italic and face == 'Times':
name = name + 'Italic'
if name == 'Times-':
name = name + 'Roman'
# symbol and ZapfDingbats cannot be modified!
#trim and return
if name[-1] == '-':
name = name[0:-1]
return name
def stringWidth(self, s, font=None):
if not font:
font = self.defaultFont
fontname = Fontmapping.getPdfName(font)
return pdfmetrics.stringwidth(s, fontname) * font.size * 0.001
def fontAscent(self, font=None):
if not font:
font = self.defaultFont
fontname = Fontmapping.getPdfName(font)
return pdfmetrics.ascent_descent[fontname][0] * 0.001 * font.size
def fontDescent(self, font=None):
if not font:
font = self.defaultFont
fontname = Fontmapping.getPdfName(font)
return -pdfmetrics.ascent_descent[fontname][1] * 0.001 * font.size
def drawLine(self, x1, y1, x2, y2, color=None, width=None):
## standard code ##
color = color or self.defaultLineColor
width = width or self.defaultLineWidth
if color != transparent:
changed = self._protectArtState( (color != self.defaultLineColor) or
(width != self.defaultLineWidth) )
if color != self.defaultLineColor:
self._pycan.gstate.stroke = color.toHexRGB()
# print("color is %s <-> %s" % (color, color.toHexStr()))
if width != self.defaultLineWidth:
self._pycan.gstate.stroke_width = width
###################
# actual drawing
p = pyart.VectorPath(3)
p.moveto_open(x1,y1)
p.lineto(x2,y2)
self._pycan.stroke(p)
## standard code ##
if changed:
self._pycan.grestore()
###################
# def drawLines(self, lineList, color=None, width=None):
# pass
def drawString(self, s, x, y, font=None, color=None, angle=0):
# start w/ the basics
self._pycan.drawString(x,y, s)
def drawPolygon(self, pointlist,
edgeColor=None, edgeWidth=None, fillColor=None, closed=0):
eColor = edgeColor or self.defaultLineColor
fColor = fillColor or self.defaultFillColor
eWidth = edgeWidth or self.defaultLineWidth
changed = self._protectArtState( (eColor != self.defaultLineColor) or
(eWidth != self.defaultLineWidth) or
(fColor != self.defaultFillColor) )
if eColor != self.defaultLineColor:
self._pycan.gstate.stroke = eColor.toHexRGB()
if fColor != self.defaultFillColor:
self._pycan.gstate.fill = fColor.toHexRGB()
if eWidth != self.defaultLineWidth:
self._pycan.gstate.stroke_width = eWidth
path = pyart.VectorPath(len(pointlist)+1)
if closed:
path.moveto_closed(pointlist[0][0], pointlist[0][1])
else:
path.moveto_open(pointlist[0][0], pointlist[0][1])
for pt in pointlist[1:]:
path.lineto(pt[0],pt[1])
if closed:
path.close()
if fColor != transparent and closed:
self._pycan.fill(path)
if eColor != transparent:
self._pycan.stroke(path)
self._restoreArtState(changed)
#def drawCurve(self, x1, y1, x2, y2, x3, y3, x4, y4,
# edgeColor=None, edgeWidth=None, fillColor=None, closed=0):
# pass
# def drawRoundRect(self, x1,y1, x2,y2, rx=8, ry=8,
# edgeColor=None, edgeWidth=None, fillColor=None):
# pass
# def drawEllipse(self, x1,y1, x2,y2, edgeColor=None, edgeWidth=None,
# fillColor=None):
# pass
# def drawArc(self, x1,y1, x2,y2, startAng=0, extent=360, edgeColor=None,
# edgeWidth=None, fillColor=None):
# pass
# def drawFigure(self, partList,
# edgeColor=None, edgeWidth=None, fillColor=None, closed=0):
# pass
# def drawImage(self, image, x1, y1, x2=None,y2=None):
# pass
## basic tests ##
if __name__=='__main__':
import rdkit.sping.tests.pidtest
can = PyartCanvas(size=(300,300), name='basictest.png')
#can.defaultLineColor = Color(0.7, 0.7, 1.0)
#can.drawLine(10,10, 290,290)
#can.drawLine(10,10, 50, 10, color=green, width = 4.5)
rdkit.sping.tests.pidtest.drawBasics(can)
can.save(file='basicTest.png')
print('saving basicTest.png')
can = PyartCanvas(size=(400,400), name='test-strings.png')
rdkit.sping.tests.pidtest.drawStrings(can)
can.save()
|
soerendip42/rdkit
|
rdkit/sping/Pyart/pidPyart.py
|
Python
|
bsd-3-clause
| 9,314
|
[
"RDKit"
] |
6c7f8e39d4caa30f19c65cab1483c7a705e8e259e12cf1dec02df3495ea14444
|
"""
Computes putative binding pockets on protein.
"""
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
__author__ = "Bharath Ramsundar"
__copyright__ = "Copyright 2017, Stanford University"
__license__ = "GPL"
import os
import tempfile
import numpy as np
import openbabel as ob
from rdkit import Chem
from subprocess import call
from scipy.spatial import ConvexHull
from deepchem.feat import hydrogenate_and_compute_partial_charges
from deepchem.feat.atomic_coordinates import AtomicCoordinates
from deepchem.feat.grid_featurizer import load_molecule
from deepchem.feat.binding_pocket_features import BindingPocketFeaturizer
from deepchem.feat.fingerprints import CircularFingerprint
from deepchem.models.sklearn_models import SklearnModel
from deepchem.data.datasets import NumpyDataset
def extract_active_site(protein_file, ligand_file, cutoff=4):
"""Extracts a box for the active site."""
protein_coords = load_molecule(protein_file, add_hydrogens=False)[0]
ligand_coords = load_molecule(ligand_file, add_hydrogens=False)[0]
num_ligand_atoms = len(ligand_coords)
num_protein_atoms = len(protein_coords)
pocket_inds = []
pocket_atoms = set([])
for lig_atom_ind in range(num_ligand_atoms):
lig_atom = ligand_coords[lig_atom_ind]
for protein_atom_ind in range(num_protein_atoms):
protein_atom = protein_coords[protein_atom_ind]
if np.linalg.norm(lig_atom - protein_atom) < cutoff:
if protein_atom_ind not in pocket_atoms:
pocket_atoms = pocket_atoms.union(set([protein_atom_ind]))
# Should be an array of size (n_pocket_atoms, 3)
pocket_atoms = list(pocket_atoms)
n_pocket_atoms = len(pocket_atoms)
pocket_coords = np.zeros((n_pocket_atoms, 3))
for ind, pocket_ind in enumerate(pocket_atoms):
pocket_coords[ind] = protein_coords[pocket_ind]
x_min = int(np.floor(np.amin(pocket_coords[:, 0])))
x_max = int(np.ceil(np.amax(pocket_coords[:, 0])))
y_min = int(np.floor(np.amin(pocket_coords[:, 1])))
y_max = int(np.ceil(np.amax(pocket_coords[:, 1])))
z_min = int(np.floor(np.amin(pocket_coords[:, 2])))
z_max = int(np.ceil(np.amax(pocket_coords[:, 2])))
return (((x_min, x_max), (y_min, y_max), (z_min, z_max)),
pocket_atoms, pocket_coords)
def compute_overlap(mapping, box1, box2):
"""Computes overlap between the two boxes.
Overlap is defined as % atoms of box1 in box2. Note that
overlap is not a symmetric measurement.
"""
atom1 = set(mapping[box1])
atom2 = set(mapping[box2])
return len(atom1.intersection(atom2))/float(len(atom1))
def get_all_boxes(coords, pad=5):
"""Get all pocket boxes for protein coords.
We pad all boxes the prescribed number of angstroms.
TODO(rbharath): It looks like this may perhaps be non-deterministic?
"""
hull = ConvexHull(coords)
boxes = []
for triangle in hull.simplices:
# coords[triangle, 0] gives the x-dimension of all triangle points
# Take transpose to make sure rows correspond to atoms.
points = np.array(
[coords[triangle, 0], coords[triangle, 1], coords[triangle, 2]]).T
# We voxelize so all grids have integral coordinates (convenience)
x_min, x_max = np.amin(points[:, 0]), np.amax(points[:, 0])
x_min, x_max = int(np.floor(x_min))-pad, int(np.ceil(x_max))+pad
y_min, y_max = np.amin(points[:, 1]), np.amax(points[:, 1])
y_min, y_max = int(np.floor(y_min))-pad, int(np.ceil(y_max))+pad
z_min, z_max = np.amin(points[:, 2]), np.amax(points[:, 2])
z_min, z_max = int(np.floor(z_min))-pad, int(np.ceil(z_max))+pad
boxes.append(((x_min, x_max), (y_min, y_max), (z_min, z_max)))
return boxes
def boxes_to_atoms(atom_coords, boxes):
"""Maps each box to a list of atoms in that box.
TODO(rbharath): This does a num_atoms x num_boxes computations. Is
there a reasonable heuristic we can use to speed this up?
"""
mapping = {}
for box_ind, box in enumerate(boxes):
box_atoms = []
(x_min, x_max), (y_min, y_max), (z_min, z_max) = box
print("Handing box %d/%d" % (box_ind, len(boxes)))
for atom_ind in range(len(atom_coords)):
atom = atom_coords[atom_ind]
x_cont = x_min <= atom[0] and atom[0] <= x_max
y_cont = y_min <= atom[1] and atom[1] <= y_max
z_cont = z_min <= atom[2] and atom[2] <= z_max
if x_cont and y_cont and z_cont:
box_atoms.append(atom_ind)
mapping[box] = box_atoms
return mapping
def merge_boxes(box1, box2):
"""Merges two boxes."""
(x_min1, x_max1), (y_min1, y_max1), (z_min1, z_max1) = box1
(x_min2, x_max2), (y_min2, y_max2), (z_min2, z_max2) = box2
x_min = min(x_min1, x_min2)
y_min = min(y_min1, y_min2)
z_min = min(z_min1, z_min2)
x_max = max(x_max1, x_max2)
y_max = max(y_max1, y_max2)
z_max = max(z_max1, z_max2)
return ((x_min, x_max), (y_min, y_max), (z_min, z_max))
def merge_overlapping_boxes(mapping, boxes, threshold=.8):
"""Merge boxes which have an overlap greater than threshold.
TODO(rbharath): This merge code is terribly inelegant. It's also quadratic
in number of boxes. It feels like there ought to be an elegant divide and
conquer approach here. Figure out later...
"""
num_boxes = len(boxes)
outputs = []
for i in range(num_boxes):
box = boxes[0]
new_boxes = []
new_mapping = {}
# If overlap of box with previously generated output boxes, return
contained = False
for output_box in outputs:
# Carry forward mappings
new_mapping[output_box] = mapping[output_box]
if compute_overlap(mapping, box, output_box) == 1:
contained = True
if contained:
continue
# We know that box has at least one atom not in outputs
unique_box = True
for merge_box in boxes[1:]:
overlap = compute_overlap(mapping, box, merge_box)
if overlap < threshold:
new_boxes.append(merge_box)
new_mapping[merge_box] = mapping[merge_box]
else:
# Current box has been merged into box further down list.
# No need to output current box
unique_box = False
merged = merge_boxes(box, merge_box)
new_boxes.append(merged)
new_mapping[merged] = list(
set(mapping[box]).union(set(mapping[merge_box])))
if unique_box:
outputs.append(box)
new_mapping[box] = mapping[box]
boxes = new_boxes
mapping = new_mapping
return outputs, mapping
class BindingPocketFinder(object):
"""Abstract superclass for binding pocket detectors"""
def find_pockets(self, protein_file, ligand_file):
"""Finds potential binding pockets in proteins."""
raise NotImplementedError
class ConvexHullPocketFinder(BindingPocketFinder):
"""Implementation that uses convex hull of protein to find pockets.
Based on https://www.ncbi.nlm.nih.gov/pmc/articles/PMC4112621/pdf/1472-6807-14-18.pdf
"""
def __init__(self, pad=5):
self.pad = pad
def find_all_pockets(self, protein_file):
"""Find list of binding pockets on protein."""
# protein_coords is (N, 3) tensor
coords = load_molecule(protein_file, add_hydrogens=False)[0]
return get_all_boxes(coords, self.pad)
def find_pockets(self, protein_file, ligand_file):
"""Find list of suitable binding pockets on protein."""
protein_coords = load_molecule(protein_file, add_hydrogens=False)[0]
ligand_coords = load_molecule(ligand_file, add_hydrogens=False)[0]
boxes = get_all_boxes(protein_coords, self.pad)
mapping = boxes_to_atoms(protein_coords, boxes)
pockets, pocket_atoms_map = merge_overlapping_boxes(mapping, boxes)
pocket_coords = []
for pocket in pockets:
atoms = pocket_atoms_map[pocket]
coords = np.zeros((len(atoms), 3))
for ind, atom in enumerate(atoms):
coords[ind] = protein_coords[atom]
pocket_coords.append(coords)
return pockets, pocket_atoms_map, pocket_coords
class RFConvexHullPocketFinder(BindingPocketFinder):
"""Uses pre-trained RF model + ConvexHulPocketFinder to select pockets."""
def __init__(self, pad=5):
self.pad = pad
self.convex_finder = ConvexHullPocketFinder(pad)
# Load binding pocket model
self.base_dir = tempfile.mkdtemp()
print("About to download trained model.")
# TODO(rbharath): Shift refined to full once trained.
call(("wget -c http://deepchem.io.s3-website-us-west-1.amazonaws.com/trained_models/pocket_random_refined_RF.tar.gz").split())
call(("tar -zxvf pocket_random_refined_RF.tar.gz").split())
call(("mv pocket_random_refined_RF %s" % (self.base_dir)).split())
self.model_dir = os.path.join(self.base_dir, "pocket_random_refined_RF")
# Fit model on dataset
self.model = SklearnModel(model_dir=self.model_dir)
self.model.reload()
# Create featurizers
self.pocket_featurizer = BindingPocketFeaturizer()
self.ligand_featurizer = CircularFingerprint(size=1024)
def find_pockets(self, protein_file, ligand_file):
"""Compute features for a given complex
TODO(rbharath): This has a log of code overlap with
compute_binding_pocket_features in
examples/binding_pockets/binding_pocket_datasets.py. Find way to refactor
to avoid code duplication.
"""
if not ligand_file.endswith(".sdf"):
raise ValueError("Only .sdf ligand files can be featurized.")
ligand_basename = os.path.basename(ligand_file).split(".")[0]
ligand_mol2 = os.path.join(
self.base_dir, ligand_basename + ".mol2")
# Write mol2 file for ligand
obConversion = ob.OBConversion()
conv_out = obConversion.SetInAndOutFormats(str("sdf"), str("mol2"))
ob_mol = ob.OBMol()
obConversion.ReadFile(ob_mol, str(ligand_file))
obConversion.WriteFile(ob_mol, str(ligand_mol2))
# Featurize ligand
mol = Chem.MolFromMol2File(str(ligand_mol2), removeHs=False)
if mol is None:
return None, None
# Default for CircularFingerprint
n_ligand_features = 1024
ligand_features = self.ligand_featurizer.featurize([mol])
# Featurize pocket
pockets, pocket_atoms_map, pocket_coords = self.convex_finder.find_pockets(
protein_file, ligand_file)
n_pockets = len(pockets)
n_pocket_features = BindingPocketFeaturizer.n_features
features = np.zeros((n_pockets, n_pocket_features+n_ligand_features))
pocket_features = self.pocket_featurizer.featurize(
protein_file, pockets, pocket_atoms_map, pocket_coords)
# Note broadcast operation
features[:, :n_pocket_features] = pocket_features
features[:, n_pocket_features:] = ligand_features
dataset = NumpyDataset(X=features)
pocket_preds = self.model.predict(dataset)
pocket_pred_proba = np.squeeze(self.model.predict_proba(dataset))
# Find pockets which are active
active_pockets = []
active_pocket_atoms_map = {}
active_pocket_coords = []
for pocket_ind in range(len(pockets)):
#################################################### DEBUG
# TODO(rbharath): For now, using a weak cutoff. Fix later.
#if pocket_preds[pocket_ind] == 1:
if pocket_pred_proba[pocket_ind][1] > .15:
#################################################### DEBUG
pocket = pockets[pocket_ind]
active_pockets.append(pocket)
active_pocket_atoms_map[pocket] = pocket_atoms_map[pocket]
active_pocket_coords.append(pocket_coords[pocket_ind])
return active_pockets, active_pocket_atoms_map, active_pocket_coords
|
bowenliu16/deepchem
|
deepchem/dock/binding_pocket.py
|
Python
|
gpl-3.0
| 11,449
|
[
"RDKit"
] |
9126bed4ec105a92c7022a22875d1469e6e3c21c889a3c9bf65014b7c4439075
|
import unittest
from catkit import Gratoms
from catkit.gen.utils import to_gratoms
from ase.constraints import FixAtoms
from ase.build import molecule
import networkx as nx
class TestGratoms(unittest.TestCase):
"""Test features of the gratoms module."""
def test_edge_addition(self):
"""Test that edges are added correctly."""
edges = [(0, 1), (0, 2)]
atoms = Gratoms(edges=edges)
for n in atoms.edges:
assert(n in edges)
def test_attributes(self):
"""Test get_neighbor_symbols, get_chemical_tags
and set_node_attributes functions.
"""
mol = molecule('H2O')
atoms = to_gratoms(mol)
atoms.graph.add_edges_from([(0, 1), (0, 2)])
sym_test = atoms.get_neighbor_symbols(0)
assert(sym_test.tolist() == ['H', 'H'])
test_tags = atoms.get_chemical_tags(rank=1)
assert(test_tags == '2,0,0,0,0,0,0,1')
test_comp, test_bonds = atoms.get_chemical_tags()
assert(test_comp == '2,0,0,0,0,0,0,1')
assert(test_bonds == '4,0,0,0,0,0,0,3')
atoms.set_constraint(FixAtoms(indices=[0]))
del atoms[2]
assert(len(atoms) == 2)
nx.set_node_attributes(
atoms.graph, name='valence', values={0: 1, 1: 0})
test_nodes = atoms.get_unsaturated_nodes(screen=1)
assert(test_nodes == [0])
if __name__ == '__main__':
unittest.main()
|
jboes/CatKit
|
catkit/tests/test_gratoms.py
|
Python
|
gpl-3.0
| 1,432
|
[
"ASE"
] |
9052d3a519e580da40b50b70ed1682e095495aa43bd328c61f8938da17956982
|
# Attribute basis to MRL - (do not individually apply to MTP)
# http://www.music.mcgill.ca/~jason/mumt621/papers5/fujishima_1999.pdf ?
# https://www.jstor.org/stable/pdf/40285717.pdf
# Zicheng (Brian) Gao
from MusicRoll import *
import TensionModule
import PPMBasis
import numpy as np
import pickle
import pprint
from tkinter import Tk, Button, Frame, Canvas, Scrollbar
import tkinter.constants as Tkconstants
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import AxesGrid
# from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
__verbose = False
__block = True
__report_interval = 100
__hard_limit = 1000
# parameters:
attn_decay = 0.3 # decay by this much from note onsets
n_smoothing = 0.0 # forward smoothing on notes
# candidates must be above b_ambiguous * most_likely_likelihood
# if there are multiple candidates, it is an ambiguous section
# lesser cuts more; higher makes ambiguous sections more likely
# thus, higher is actually more stringent
b_ambiguous = 0.4
max_gap = 4
thresh_conf = 0.25
# lower bound for continuation of current hypothesis
persistence = 0.25
def debug_print(*args):
if __verbose:
print(args)
def ema(seq, inertia):
# mutating operation: exponential moving average applied over an array
for i in np.r_[1:np.size(seq, 0)]:
seq[i] = inertia * seq[i-1] + (1 - inertia) * seq[i]
return seq
def apply_with_window(origin, target, vfunction, width, wdirection = 'backwards'):
# wdirection must either be 'forwards' or 'backwards'
assert np.size(origin, 0) == np.size(target, 0)
# branching for efficiency? save calculations in loop?
if wdirection == 'backwards':
for i in np.r_[0:width]:
target[i] = vfunction(origin[ 0 : i ])
for i in np.r_[width:np.size(origin, 0)]:
target[i] = vfunction(origin[ i - width : i ])
def trim(vector, factor):
return 1.0 * (vector > ((1 - factor) * np.max(vector)))
def matches(belief, actual):
# How much does the observed value match the hypothesis
return 1 - np.linalg.norm(belief - actual)
def do_basis_label(filename, metric = TensionModule.metric.dissonance):
pp = pprint.PrettyPrinter(indent=4)
roll = pickle.load(open(filename, 'rb'))
pp.pprint(vars(roll))
tens_mod = TensionModule.TensionModule(metric)
for tempo, group in roll.get_tape_groups().items():
max_gap = 2 * max([tape.min_common for tape in group])
note_data = MusicRoll.combine_notes(group)
# Array - newness matters - simulate attentional decay
# @newness = 0 -> 1-k
# @newness = 1 -> 1
# factor = (1-k) + n * k
# = 1 - k + nk = 1 - k * (n - 1)
orig_notes = note_data[...,0] * (1 - attn_decay * (note_data[...,1] - 1) )
duration = min(np.size(orig_notes, 0), __hard_limit)
notes = ema(np.tile(orig_notes, 1), n_smoothing)[:duration]
# smoothing?
keys = np.r_[:np.size(notes, 1)]
fig1 = plt.figure(1, figsize = (5, 5))
grid = AxesGrid(fig1, 111,
nrows_ncols = (4, 1),
axes_pad = 0.05,
label_mode = "1",
)
Plot_Bases = grid[3]
basis_prob = np.zeros((duration, 12)) # likelihoods
basis_label = np.zeros((duration, 1)) - 1 # labels
tension = np.zeros(duration)
def axis_basis(quanta):
actives = keys[quanta > 0.0001] + 3 # due to midi pitch nonsense - 0 is C
weights = quanta[quanta > 0.0001]
return tens_mod.basis_likelihood(np.vstack((actives, weights)).T)
def label(base, start, end):
if start == 0:
start -= 1
Plot_Bases.broken_barh([(start + 1, end - start - 1)], (base + 0.25 , 0.5), facecolors = 'red', alpha = 0.3, linewidth = 0)
basis_label[start + 1:end] = base
def bar(time):
Plot_Bases.broken_barh([(time, 0.1)], (0, 12), facecolors = 'red', alpha = 0.7, linewidth = 0)
def block(base, start, end, color):
if __block:
Plot_Bases.broken_barh([(start + 1, end - start)], (base + 0.25, 0.5), facecolors = color, alpha = 1.0, linewidth = 0)
# go through the time slices...
left = 0
right = 0
reach = 0
candidate = None
confidence = 0 # SHOULD BE USED
def notes_in_time(start, end):
return np.sum(note_data[start:end,:,1])
def label_basis():
if candidate == None:
for base in TensionModule.v_basis[b_curr == np.max(b_curr)]:
label(base, left - 1, right + 1)
else:
label(candidate, left - 1, right + 1)
def get_cand(notes):
# return (candidate, confidence)
return (trim(b_curr, b_ambiguous), np.max(b_curr))
while right < duration and right < __hard_limit:
# report
if right % __report_interval == 0:
print('{0}/{1}...'.format(right, duration))
# If we didn't have a candidate, try to check for one
if candidate == None:
# get current hypothesis from accumulated notes
confidence_factor = 1 - 1/(notes_in_time(left, right + 1) + 1)
section = np.sum(notes[left:right+1], 0)
b_curr = axis_basis(section) * confidence_factor
basis_prob[right] = b_curr
tension[right] = tens_mod.selfTension(section)
(try_cand, try_conf) = get_cand(b_curr)
# make sure there is only one candidate, and that it is confident enough
if np.sum(try_cand) == 1 and try_conf > thresh_conf:
# found a candidate
debug_print('got', right, try_cand, try_conf)
block(-0.5, right - 1, right, 'purple')
candidate = TensionModule.v_basis[try_cand > b_ambiguous][0]
confidence = try_conf
else:
# no candidate / still ambiguous
if np.sum(try_cand) > 1:
debug_print('non', right, 'multiple')
else:
debug_print('non', right, try_conf, '<', thresh_conf)
block(-0.5, right - 1, right, 'blue')
# If there is a candidate, check to see if the next observed slice follows
else:
reach = 0
similarity = -1
# attempt to bridge gap if dissimilarity is seen
while reach < max_gap and right + reach < duration and similarity < persistence:
# check if the following slice fits the hypothesis
# section = np.vstack((notes[left:right+1], notes[right + reach]))
# b_next = axis_basis(np.sum(section, 0))
b_next = axis_basis(notes[right + reach])
(try_cand, try_conf) = get_cand(b_next)
# similarity = matches(trim(b_curr, b_ambiguous, 1), b_next)
similarity = b_next[candidate]
debug_print('chk', right, right + reach, 'cnd', candidate, similarity)
reach += 1
# exited due to similarity - can extend
if similarity >= persistence or right + reach >= duration:
block(-0.5, right - 1, right, 'green')
debug_print('ext', right, similarity)
# all's right - we can aggregate this slice
section = np.sum(notes[left:right+1], 0)
# basis_prob[right] = b_curr = axis_basis(section)
basis_prob[right] = b_curr = axis_basis(notes[right])
tension[right] = tens_mod.selfTension(section)
# a gap was found and was too large
elif similarity < persistence and reach >= max_gap:
block(0, right - 1, right, 'yellow')
debug_print('rev', right, similarity, list(b_next))
bar(right)
right -= 1
label_basis()
candidate = None
left = right + 1
right += 1
# back-label
# label when hitting end
label_basis()
# basis_prob = np.apply_along_axis(axis_basis, 1, notes)
# ema(basis_prob, 0.2)
grid[0].set_title("{0} group {1}".format(roll.filepath, tempo))
Plot_Bases.locator_params(axis='y', nbins = 12)
min_note = min([tape.min_note for tape in group])
max_note = max([tape.max_note for tape in group])
grid[0].plot(np.r_[:duration] + 0.5, 2 * tension / np.max(tension), 'k')
grid[1].imshow(notes.T[min_note:max_note + 1],
interpolation = 'none',
cmap = plt.cm.Oranges,
origin = 'lower',
extent=[0, duration, 0, 12],
aspect = 0.5 * duration / 24)
Plot_Bases.imshow(basis_prob.T,
interpolation = 'none',
cmap = plt.cm.Greys,
origin = 'lower',
extent=[0, duration, 0, 12],
aspect = 0.5 * duration / 24)
grid[2].imshow(basis_label.T,
interpolation = 'none',
cmap = plt.cm.jet,
origin = 'lower',
extent=[0, duration, 0, 12],
aspect = 0.5 * duration / (24 * 3))
# print(basis_label)
plt.show()
# TODO label afterwards, and re-pickle
# This is really a classification problem that ought to be addressed with the proper tools
# get it to mark what actions it took - exploring, backlabelling
# prevent "consecutive self-labelling" for example
# and also smooth away hiccups
# from pycallgraph import PyCallGraph
# from pycallgraph.output import GraphvizOutput
if __name__ == '__main__':
# with PyCallGraph(output=GraphvizOutput(output_file = "BASIS.png")):
# do_basis_label('./mid/bach/aof/can1.mrl', dissonance_metric)
do_basis_label('./mid/moldau_single.mrl', TensionModule.metric.western)
do_basis_label('./mid/moldau_accomp.mrl', TensionModule.metric.western)
# do_basis_label('./mid/bach/aof/can1.mrl', TensionModule.metric.western)
# do_basis_label('./mid/oldyuanxian2.mrl', TensionModule.metric.western)
# do_basis_label('./mid/mary.mrl')
# do_basis_label('./mid/ambigious_test.mrl', TensionModule.metric.western) # lowsim 0.3 - 0.35
# do_basis_label('./mid/ivivi.mrl', TensionModule.metric.western) # lowsim 0.5
"""
TODO: Confidence in labelling - should be used
TODO: Make sure no OTHER candidate surpasses the first before extending
"""
"""
Entirely hopeless; needs restart.
ON NEXT TRY:
1: Interleave note activation times in MusicRoll instead of producing the actual timeseries.
(Timeseries for use with neural model)
2: Bias towards "sensible" shifts from previous basis (circle of fifths distance)
"""
|
ichaelm/MusicPrediction
|
src/midibasis.py
|
Python
|
mit
| 9,495
|
[
"Brian"
] |
fcd3b042ea18dcc223255215c05e748c0d9fe09e7e8046c550b9175473c8400a
|
#!/usr/bin/python
"""
" @section DESCRIPTION
" Functions for generating various stimuli
"""
import numpy as np
from scipy.special import erf
from scipy.integrate import cumtrapz
def create_step_current_sequence(params):
""" Generates a step current sequence
Args:
sim_time: total simulation time (ms)
dt: time bin length (ms)
input_dim: input dimensionality
input_type: stimulus type
input_amp: stimulus amplitude
rho: density parameter for the RC and the DRC stimuli
Returns:
step_current: generated current values and time points
Raises:
Exception if no the requested model is not implemented
"""
sim_time = params['sim_time']
dt = params['dt']
input_params = params['stimuli_params']
# Generate a time vector starting from 1 up to time_tot
# in increments of time_bin_resolution
time = np.arange(1.0, np.int(sim_time), dt)
# Generate values for each time point based on the selected model
# Uniform
if input_params['name'] == 'uniform':
values = _uniform_distribution(time, input_params['params'])
# Gaussian
elif input_params['name'] == 'gaussian':
values = _gaussian_distribution(time, input_params['params'])
# Random chords ('RC')
elif input_params['name'] == 'rc':
values = _rc_sequence(time, input_params['params'])
# Dynamic random chords ('DRC')
elif input_params['name'] == 'drc':
values = _drc_sequence(time, input_params['params'])
# Dynamic moving ripples ('DMR')
elif input_params['name'] == 'dmr':
values = _dynamic_moving_ripple(time, input_params['params'])
# Ripple noise ('RN')
elif input_params['name'] == 'rn':
values = _ripple_noise(time, input_params['params'])
# FM tones ('FM')
elif input_params['name'] == 'fm':
values = _fm_sweeps(time, input_params['params'])
# Modulated noise ('MN')
elif input_params['name'] == 'mn':
values = _modulated_noise(time, input_params['params'])
else:
raise Exception("No input named: {0}".format(input_params['name']))
step_current = {'name': 'step_current_generator',
'time': time,
'values': values}
return step_current
def _uniform_distribution(time, params):
""" stimuli generated from a uniform distribution
:param time:
:param params:
:return values:
"""
# Parameters
min_val = 0.0
max_val = params['amplitude']
range = max_val - min_val
stimulus_dims = params['dimensions']
n_stimulus_elements = reduce(lambda i, j: i * j, stimulus_dims)
# Values
values = np.random.rand(time.size * n_stimulus_elements) * range + min_val
values = values.reshape([len(time)] + stimulus_dims)
return values
def _gaussian_distribution(time, params):
""" stimuli generated from a gaussian distribution
:param time:
:param params:
:return values:
"""
stimulus_dims = params['dimensions']
n_stimulus_elements = reduce(lambda i, j: i * j, stimulus_dims)
mean = params['amplitude']
std = mean / 4
values = mean + np.random.randn(len(time) * n_stimulus_elements) * std
values = values.reshape([len(time)] + stimulus_dims)
return values
def _rc_sequence(time, params):
""" Random chord sequence
:param time:
:param params:
:return values:
"""
stimulus_dims = params['dimensions']
n_stimulus_elements = reduce(lambda i, j: i * j, stimulus_dims)
amplitude = params['amplitude']
rho = params['rho']
rand = np.random.rand(time.size * n_stimulus_elements)
values = amplitude * np.ones(time.size * n_stimulus_elements)
values[rand > rho] = 0
values = values.reshape([len(time)] + stimulus_dims)
return values
def _drc_sequence(time, params):
""" Dynamic random chord sequence
:param time:
:param params:
:return values:
"""
stimulus_dims = params['dimensions']
n_stimulus_elements = reduce(lambda i, j: i * j, stimulus_dims)
amplitude = params['amplitude']
rho = params['rho']
min_val = amplitude / 2
max_val = amplitude
range = max_val - min_val
values = np.random.rand(time.size * n_stimulus_elements) * range + min_val
rand = np.random.rand(time.size * n_stimulus_elements)
values[rand > rho] = 0
values = values.reshape([len(time)] + stimulus_dims)
return values
def _dynamic_moving_ripple(time, params):
"""Generates a dynamic random ripple stimuli
See. Escabi & Schreiner (2002)
Args:
time: Time vector
n_freq: Number of frequencies
octave_range: Octave spacing between min and max frequency
Returns:
s_lin: matrix containing the DMR stimuli
Raises:
"""
# Only used for estimating spectro-temporal receptive fields
assert params['dimensions'][1] == 1
n_freq = params['dimensions'][0]
octave_range = params['octave_range']
amplitude = params['amplitude']
# Parameters
m = 30
f_lim = [-350, 350]
f_rate = 3
sigma_lim = [0 , 4]
sigma_rate = 6
x = np.linspace(0, octave_range, n_freq)
t_tot = time.max() / 1e3
n_f = np.int64(f_rate*t_tot)
n_sigma = np.int64(sigma_rate*t_tot)
# F
t_tmp = np.linspace(0, t_tot, n_f+1)
f_tmp = np.random.randn(n_f+1)
# fun = interp1d(t_tmp, f_tmp, 'quadratic')
# f = fun(time/1e3)
f = np.interp(time/1e3, t_tmp, f_tmp)
# Sigma
t_tmp = np.linspace(0, t_tot, n_sigma+1)
sigma_tmp = np.random.randn(n_sigma+1)
# fun = interp1d(t_tmp, sigma_tmp, 'quadratic')
# sigma = fun(time/1e3)
sigma = np.interp(time/1e3, t_tmp, sigma_tmp)
# Rescaling
# First to the range [-1, 1]
f = erf(f)
sigma = erf(sigma)
# Then to provided range
f += 1
f *= (f_lim[1]-f_lim[0])/2
f += f_lim[0]
sigma += 1
sigma *= (sigma_lim[1]-sigma_lim[0])/2
sigma += sigma_lim[0]
# Finalizing
f_int = cumtrapz(f, time/1e3)
f_int = np.insert(f_int, 0, 0)
s_dmr = m/2 * np.sin(2*np.pi*np.outer(x, sigma) +
np.outer(np.ones(n_freq), f_int))
# s_lin = 10**((s_dmr-m/2)/20)
s_lin = (s_dmr + m/2) / m
s_lin *= amplitude
return s_lin.T
def _ripple_noise(time, params):
"""Generates a radnom ripple stimuli
See. Escabi & Schreiner (2002)
Args:
time: Time vector
n_freq: Number of frequencies
octave_range: Octave spacing between min and max frequency
Returns:
s_lin: matrix containing the DMR stimuli
Raises:
"""
# Only used for estimating spectro-temporal receptive fields
assert params['dimensions'][1] == 1
amplitude = params['amplitude']
# Parameters
n_dmrs = 16
s_rn = _dynamic_moving_ripple(time, params)
for i in range(n_dmrs-1):
s_rn += _dynamic_moving_ripple(time, params)
s_rn /= n_dmrs
s_mean = s_rn.mean()
s_std = s_rn.std()
s_rn = 0.5*erf((s_rn-s_mean)/s_std) + 0.5
scaling = amplitude / s_rn.max()
s_rn *= scaling
return s_rn
def _fm_sweeps(time, params):
"""Generates block-design FM tones
See. Meyer et.a. (2014)
Args:
time: Time vector
n_freq: Number of frequencies
n_sweeps: Number of frequency sweeps in each block
block_length: block_length in time bins
Returns:
fm_tones: matrix containing the block-design
Raises:
"""
# Only used for estimating spectro-temporal receptive fields
assert params['dimensions'][1] == 1
n_freq = params['dimensions'][0]
n_sweeps = params['octave_range']
block_length = params['block_length']
amplitude = params['amplitude']
values = np.zeros([time.size, n_freq])
n_blocks = int(np.ceil(time.size / block_length))
for sweep in range(n_sweeps):
# freq = [np.linspace(2**(8+np.random.rand()*6),
# 2**(8+np.random.rand()*6),
# block_length) for i in range(n_blocks)]
# freq = np.hstack(freq)
# freq = np.log2(freq) - 8
# freq = freq / 6 * (n_freq-1)
freq = [np.linspace(np.random.rand(),
np.random.rand(),
block_length) for i in range(n_blocks)]
freq = np.hstack(freq)
freq = freq / freq.max() * (n_freq-1)
freq = np.int64(np.round(freq))
values[np.arange(time.size), freq[0:time.size]] = 1.0
values *= amplitude
return values
def _modulated_noise(time, params):
"""Generates a modulated noise stimuli
See. Woolley et.a. (2005)
Args:
time: Time vector
n_freq: Number of frequencies
Returns:
s_lin: matrix containing the MN stimuli
Raises:
"""
# Only used for estimating spectro-temporal receptive fields
assert params['dimensions'][1] == 1
n_freq = params['dimensions'][0]
amplitude = params['amplitude']
# Parameters
n_ripples = 100
m = 30 # taken from Escabi & Schreiner (2002)
frequencies = np.arange(n_freq)
s_mn = np.zeros([n_freq, time.size])
for ripple in range(n_ripples):
omega = (2*np.random.rand()-1) / 0.5
theta = (2*np.random.rand()-1) / 0.5
phase = 2*np.pi*np.random.rand()
for freq in frequencies:
s_mn[freq, :] += np.cos(theta*time + omega*freq + phase)
s_mean = s_mn.mean()
s_std = s_mn.std()
s_mn = m/2*erf((s_mn-s_mean)/s_std)
s_lin = 10**((s_mn-m/2)/20)
# s_lin = (s_dmr + m/2) / m
s_lin *= amplitude
return s_lin.T
|
JohanWesto/receptive-field-models
|
simulation/stimulus_generator.py
|
Python
|
mit
| 9,656
|
[
"Gaussian"
] |
1b8450c3f89ca0a9a3ccfdae7baafe8f99b6f264e9156e3cd0360bddb9160a43
|
# TODO: By PySCF-1.5 release
# Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# 1. code style
# * Indent: 3 -> 4
# * Line wrap around 80 columns
#
# 2. Move to pyscf/examples/tools
#
# Developed by Elvira Sayfutyarova:
# using wmme program provided by Gerald Knizia
#
# Comment: you need to get .xml file after running SCF or MCSCF in Molpro before running this program
#=====================================================================================================
# Note, that right now the default value is SkipVirtual =True, that means that Orbitals COrb include only those,
# which have non-zero occupation numbers in XML file. If you want virtual orbitals too, change SkipVirtual to False.
from __future__ import print_function
import numpy as np
from wmme import mdot
from pyscf import gto
from pyscf import scf
import wmme
import MolproXml
import MolproXmlToPyscf
from pyscf import ao2mo
from pyscf import mcscf
from functools import reduce
from pyscf import fci
def rmsd(X):
return np.mean(X.flatten()**2)**.5
def PrintMatrix(Caption, M):
print("Matrix {} [{} x {}]:\n".format(Caption, M.shape[0], M.shape[1]))
ColsFmt = M.shape[1] * " {:11.5f}"
for iRow in range(M.shape[0]):
print(ColsFmt.format(tuple(M[iRow,:])))
def _run_with_pyscf(FileNameXml):
# read Molpro XML file exported via {put,xml,filename}
print("\n* Reading: '{}'".format(FileNameXml))
XmlData = MolproXml.ReadMolproXml(FileNameXml, SkipVirtual=True)
# Note, that right now the default value is SkipVirtual =True, that means that Orbitals COrb include only those,
# which have non-zero occupation numbers in XML file. If you want virtual orbitals too, change SkipVirtual to False.
print("Atoms from file [a.u.]:\n{}".format(XmlData.Atoms.MakeXyz(NumFmt="%20.15f",Scale=1/wmme.ToAng)))
# convert data from XML file (atom positions, basis sets, MO coeffs) into format compatible
# with PySCF.
Atoms, Basis, COrb = MolproXmlToPyscf.ConvertMolproXmlToPyscfInput(XmlData)
# make pyscf Mole object
mol = gto.Mole()
mol.build(
verbose = 0,
atom = Atoms,
basis = Basis,
spin = 0
)
# compute overlap matrix with PySCF
S = mol.intor_symmetric('cint1e_ovlp_sph')
# compute overlap matrix of MO basis overlap, using the MOs imported from the XML,
# and the overlap matrix computed with PySCF to check that MO were imported properly.
SMo = mdot(COrb.T, S, COrb)
PrintMatrix("MO-Basis overlap (should be unity!)", SMo)
print("RMSD(SMo-id): {:8.2e}".format(rmsd(SMo - np.eye(SMo.shape[0]))))
print
def get_1e_integrals_in_MOs_from_Molpro_for_SOC(FileNameXml):
# read Molpro XML file exported via {put,xml,filename}
print("\n* Reading: '{}'".format(FileNameXml))
XmlData = MolproXml.ReadMolproXml(FileNameXml, SkipVirtual=True)
print("Atoms from file [a.u.]:\n{}".format(XmlData.Atoms.MakeXyz(NumFmt="%20.15f",Scale=1/wmme.ToAng)))
# convert data from XML file (atom positions, basis sets, MO coeffs) into format compatible # with PySCF.
Atoms, Basis, COrb = MolproXmlToPyscf.ConvertMolproXmlToPyscfInput(XmlData)
# make pyscf Mole object
mol = gto.Mole()
mol.build(
verbose = 0,
atom = Atoms,
basis = Basis,
# symmetry = 'D2h',
spin = 0
)
natoms =1
norb =11
nelec =10
all_orbs = len(COrb)
# COrb is a list of orbs, you get the info about orbs in the output when reading orbs with a scheme above :
# # of an orb in COrb list, irrep in the point group used in Molpro calcs, # of orb in a given irrep in Molpro output
Orblist = [6,7,8,9,10,18,19,24,25,27,28]
ActOrb = np.zeros(shape=(all_orbs,norb))
ActOrb2 = np.zeros(shape=(all_orbs,norb))
for o1 in range(all_orbs):
for o2 in range(norb):
ActOrb[o1,o2]= COrb[o1, Orblist[o2]]
print("=================================================")
print(" Now print So1e integrals")
for id in range(natoms):
chg = mol.atom_charge(id)
mol.set_rinv_origin_(mol.atom_coord(id)) # set the gauge origin to first atom
h1ao = abs(chg) *mol.intor('cint1e_prinvxp_sph', comp=3) # comp=3 for x,y,z directions
h1 = []
for i in range(3):
h1.append(reduce(np.dot, (ActOrb.T, h1ao[i], ActOrb)))
for i in range(3):
for j in range(h1[i].shape[0]):
for k in range(h1[i].shape[1]):
print(id, i+1, j+1, k+1, h1[i][j,k])
if __name__ == "__main__":
_run_with_pyscf("pd_3d.xml")
get_1e_integrals_in_MOs_from_Molpro_for_SOC("pd_3d.xml")
|
gkc1000/pyscf
|
pyscf/tools/Molpro2Pyscf/example_Pd_atom.py
|
Python
|
apache-2.0
| 5,097
|
[
"Molpro",
"PySCF"
] |
cbd483940cc1fdefd3807c386e784bbddff7effe1a24bbe1e7394e05f2c29119
|
import numpy as np
import scipy.sparse
import os
import sys
import emcee
import copy
from astropy.cosmology import Planck15
from .class_utils import *
from .lensing import *
from .utils import *
from .calc_likelihood import calc_vis_lnlike
arcsec2rad = np.pi/180/3600
def LensModelMCMC(data,lens,source,
xmax=10.,highresbox=[-2.,2.,-2.,2.],emitres=None,fieldres=None,
sourcedatamap=None, scaleamp=False, shiftphase=False,
modelcal=True,cosmo=Planck15,
nwalkers=1e3,nburn=1e3,nstep=1e3,pool=None,nthreads=1,mpirun=False):
"""
Wrapper function which basically takes what the user wants and turns it into the
format needed for the acutal MCMC lens modeling.
Inputs:
data:
One or more visdata objects; if multiple datasets are being
fit to, should be a list of visdata objects.
lens:
Any of the currently implemented lens objects or ExternalShear.
source:
One or more of the currently implemented source objects; if more than
one source to be fit, should be a list of multiple sources.
xmax:
(Half-)Grid size, in arcseconds; the grid will span +/-xmax in x&y
highresbox:
The region to model at higher resolution (to account for high-magnification
and differential lensing effects), as [xmin, xmax, ymin, ymax].
Note the sign convention is: +x = West, +y = North, like the lens
positions.
sourcedatamap:
A list of length the number of datasets which tells which source(s)
are to be fit to which dataset(s). Eg, if two sources are to be fit
to two datasets jointly, should be [[0,1],[0,1]]. If we have four
sources and three datasets, could be [[0,1],[0,1],[2,3]] to say that the
first two sources should both be fit to the first two datasets, while the
second two should be fit to the third dataset. If None, will assume
all sources should be fit to all datasets.
scaleamp:
A list of length the number of datasets which tells whether a flux
rescaling is allowed and which dataset the scaling should be relative to.
False indicates no scaling should be done, while True indicates that
amplitude scaling should be allowed.
shiftphase:
Similar to scaleamp above, but allowing for positional/astrometric offsets.
modelcal:
Whether or not to perform the pseudo-selfcal procedure of H+13
cosmo:
The cosmology to use, as an astropy object, e.g.,
from astropy.cosmology import WMAP9; cosmo=WMAP9
Default is Planck15.
nwalkers:
Number of walkers to use in the mcmc process; see dan.iel.fm/emcee/current
for more details.
nburn:
Number of burn-in steps to take with the chain.
nstep:
Number of actual steps to take in the mcmc chains after the burn-in
nthreads:
Number of threads (read: cores) to use during the fitting, default 1.
mpirun:
Whether to parallelize using MPI instead of multiprocessing. If True,
nthreads has no effect, and your script should be run with, eg,
mpirun -np 16 python lensmodel.py.
Returns:
mcmcresult:
A nested dict containing the chains requested. Will have all the MCMC
chain results, plus metadata about the run (initial params, data used,
etc.). Formatting still a work in progress (esp. for modelcal phases).
chains:
The raw chain data, for testing.
blobs:
Everything else returned by the likelihood function; will have
magnifications and any modelcal phase offsets at each step; eventually
will remove this once get everything packaged up for mcmcresult nicely.
colnames:
Basically all the keys to the mcmcresult dict; eventually won't need
to return this once mcmcresult is packaged up nicely.
"""
if pool: nthreads = 1
elif mpirun:
nthreads = 1
from emcee.utils import MPIPool
pool = MPIPool(debug=False,loadbalance=True)
if not pool.is_master():
pool.wait()
sys.exit(0)
else: pool = None
# Making these lists just makes later stuff easier since we now know the dtype
lens = list(np.array([lens]).flatten())
source = list(np.array([source]).flatten()) # Ensure source(s) are a list
data = list(np.array([data]).flatten()) # Same for dataset(s)
scaleamp = list(np.array([scaleamp]).flatten())
shiftphase = list(np.array([shiftphase]).flatten())
modelcal = list(np.array([modelcal]).flatten())
if len(scaleamp)==1 and len(scaleamp)<len(data): scaleamp *= len(data)
if len(shiftphase)==1 and len(shiftphase)<len(data): shiftphase *= len(data)
if len(modelcal)==1 and len(modelcal)<len(data): modelcal *= len(data)
if sourcedatamap is None: sourcedatamap = [None]*len(data)
# emcee isn't very flexible in terms of how it gets initialized; start by
# assembling the user-provided info into a form it likes
ndim, p0, colnames = 0, [], []
# Lens(es) first
for i,ilens in enumerate(lens):
if ilens.__class__.__name__=='SIELens':
for key in ['x','y','M','e','PA']:
if not vars(ilens)[key]['fixed']:
ndim += 1
p0.append(vars(ilens)[key]['value'])
colnames.append(key+'L'+str(i))
elif ilens.__class__.__name__=='ExternalShear':
for key in ['shear','shearangle']:
if not vars(ilens)[key]['fixed']:
ndim += 1
p0.append(vars(ilens)[key]['value'])
colnames.append(key)
# Then source(s)
for i,src in enumerate(source):
if src.__class__.__name__=='GaussSource':
for key in ['xoff','yoff','flux','width']:
if not vars(src)[key]['fixed']:
ndim += 1
p0.append(vars(src)[key]['value'])
colnames.append(key+'S'+str(i))
elif src.__class__.__name__=='SersicSource':
for key in ['xoff','yoff','flux','majax','index','axisratio','PA']:
if not vars(src)[key]['fixed']:
ndim += 1
p0.append(vars(src)[key]['value'])
colnames.append(key+'S'+str(i))
elif src.__class__.__name__=='PointSource':
for key in ['xoff','yoff','flux']:
if not vars(src)[key]['fixed']:
ndim += 1
p0.append(vars(src)[key]['value'])
colnames.append(key+'S'+str(i))
# Then flux rescaling; only matters if >1 dataset
for i,t in enumerate(scaleamp[1:]):
if t:
ndim += 1
p0.append(1.) # Assume 1.0 scale factor to start
colnames.append('ampscale_dset'+str(i+1))
# Then phase/astrometric shift; each has two vals for a shift in x&y
for i,t in enumerate(shiftphase[1:]):
if t:
ndim += 2
p0.append(0.); p0.append(0.) # Assume zero initial offset
colnames.append('astromshift_x_dset'+str(i+1))
colnames.append('astromshift_y_dset'+str(i+1))
# Get any model-cal parameters set up. The process involves some expensive
# matrix inversions, but these only need to be done once, so we'll do them
# now and pass the results as arguments to the likelihood function. See docs
# in calc_likelihood.model_cal for more info.
for i,dset in enumerate(data):
if modelcal[i]:
uniqant = np.unique(np.asarray([dset.ant1,dset.ant2]).flatten())
dPhi_dphi = np.zeros((uniqant.size-1,dset.u.size))
for j in range(1,uniqant.size):
dPhi_dphi[j-1,:]=(dset.ant1==uniqant[j])-1*(dset.ant2==uniqant[j])
C = scipy.sparse.diags((dset.sigma/dset.amp)**-2.,0)
F = np.dot(dPhi_dphi,C*dPhi_dphi.T)
Finv = np.linalg.inv(F)
FdPC = np.dot(-Finv,dPhi_dphi*C)
modelcal[i] = [dPhi_dphi,FdPC]
# Create our lensing grid coordinates now, since those shouldn't be
# recalculated with every call to the likelihood function
xmap,ymap,xemit,yemit,indices = GenerateLensingGrid(data,xmax,highresbox,
fieldres,emitres)
# Calculate the uv coordinates we'll interpolate onto; only need to calculate
# this once, so do it here.
kmax = 0.5/((xmap[0,1]-xmap[0,0])*arcsec2rad)
ug = np.linspace(-kmax,kmax,xmap.shape[0])
# Calculate some distances; we only need to calculate these once.
# This assumes multiple sources are all at same z; should be this
# way anyway or else we'd have to deal with multiple lensing planes
if cosmo is None: cosmo = Planck15
Dd = cosmo.angular_diameter_distance(lens[0].z).value
Ds = cosmo.angular_diameter_distance(source[0].z).value
Dds= cosmo.angular_diameter_distance_z1z2(lens[0].z,source[0].z).value
p0 = np.array(p0)
# Create a ball of starting points for the walkers, gaussian ball of
# 10% width; if initial value is 0 (eg, astrometric shift), give a small sigma
# for angles, generally need more spread than 10% to sample well, do 30% for those cases [~0.5% >180deg for p0=100deg]
isangle = np.array([0.30 if 'PA' in s or 'angle' in s else 0.1 for s in colnames])
initials = emcee.utils.sample_ball(p0,np.asarray([isangle[i]*x if x else 0.05 for i,x in enumerate(p0)]),int(nwalkers))
# All the lens objects know if their parameters have been altered since the last time
# we calculated the deflections. If all the lens pars are fixed, we only need to do the
# deflections once. This step ensures that the lens object we create the sampler with
# has these initial deflections.
for i,ilens in enumerate(lens):
if ilens.__class__.__name__ == 'SIELens': ilens.deflect(xemit,yemit,Dd,Ds,Dds)
elif ilens.__class__.__name__ == 'ExternalShear': ilens.deflect(xemit,yemit,lens[0])
# Create the sampler object; uses calc_likelihood function defined elsewhere
lenssampler = emcee.EnsembleSampler(nwalkers,ndim,calc_vis_lnlike,
args = [data,lens,source,Dd,Ds,Dds,ug,
xmap,ymap,xemit,yemit,indices,
sourcedatamap,scaleamp,shiftphase,modelcal],
threads=nthreads,pool=pool)
# Run burn-in phase
print("Running burn-in... ")
#pos,prob,rstate,mus = lenssampler.run_mcmc(initials,nburn,storechain=False)
for i,result in enumerate(lenssampler.sample(initials,iterations=nburn,storechain=False)):
if i%20==0: print('Burn-in step ',i,'/',nburn)
pos,prob,rstate,blob = result
lenssampler.reset()
# Run actual chains
print("Done. Running chains... ")
for i,result in enumerate(lenssampler.sample(pos,rstate0=rstate,iterations=nstep,storechain=True)):
if i%20==0: print('Chain step ',i,'/',nstep)
#lenssampler.run_mcmc(pos,nstep,rstate0=rstate)
if mpirun: pool.close()
print("Mean acceptance fraction: ",np.mean(lenssampler.acceptance_fraction))
#return lenssampler.flatchain,lenssampler.blobs,colnames
# Package up the magnifications and modelcal phases; disregards nan points (where
# we failed the prior, usu. because a periodic angle wrapped).
blobs = lenssampler.blobs
mus = np.asarray([[a[0] for a in l] for l in blobs]).flatten(order='F')
bad = np.where(np.asarray([np.any(np.isnan(m)) for m in mus],dtype=bool))[0]
for k in bad: mus[k] = np.array([np.nan]*len(source))
mus = np.asarray(list(mus),dtype=float).reshape((-1,len(source)),order='F') # stupid-ass hack
bad = np.isnan(mus)[:,0]
#bad = bad.reshape((-1,len(source)),order='F')[:,0]
#mus = np.atleast_2d(np.asarray([mus[i] if not bad[i] else [np.nan]*len(source) for i in range(mus.size)])).T
colnames.extend(['mu{0:.0f}'.format(i) for i in range(len(source))])
# Assemble the output. Want to return something that contains both the MCMC chains
# themselves, but also metadata about the run.
mcmcresult = {}
# keep track of git revision, for reproducibility's sake
# if run under mpi, this will spew some scaremongering warning text,
# but it's fine. use --mca mpi_warn_on_fork 0 in the mpirun statement to disable
try:
import subprocess
gitd = os.path.abspath(os.path.join(os.path.dirname(__file__),os.pardir))
mcmcresult['githash'] = subprocess.check_output('git --git-dir={0:s} --work-tree={1:s} '\
'rev-parse HEAD'.format(gitd+'/.git',gitd),shell=True).rstrip()
except:
mcmcresult['githash'] = 'No repo found'
mcmcresult['datasets'] = [dset.filename for dset in data] # Data files used
mcmcresult['lens_p0'] = lens # Initial params for lens,src(s),shear; also tells if fixed, priors, etc.
mcmcresult['source_p0'] = source
if sourcedatamap: mcmcresult['sourcedatamap'] = sourcedatamap
mcmcresult['xmax'] = xmax
mcmcresult['highresbox'] = highresbox
mcmcresult['fieldres'] = fieldres
mcmcresult['emitres'] = emitres
if any(scaleamp): mcmcresult['scaleamp'] = scaleamp
if any(shiftphase): mcmcresult['shiftphase'] = shiftphase
mcmcresult['chains'] = np.core.records.fromarrays(np.hstack((lenssampler.flatchain[~bad],mus[~bad])).T,names=colnames)
mcmcresult['lnlike'] = lenssampler.flatlnprobability[~bad]
# Keep track of best-fit params, derived from chains.
c = copy.deepcopy(mcmcresult['chains'])
mcmcresult['best-fit'] = {}
pbest = []
# Calculate the best fit values as medians of each param
lens,source = copy.deepcopy(mcmcresult['lens_p0']), copy.deepcopy(mcmcresult['source_p0'])
for i,ilens in enumerate(lens):
if ilens.__class__.__name__ == 'SIELens':
ilens.__dict__['_altered'] = True
for key in ['x','y','M','e','PA']:
if not vars(ilens)[key]['fixed']:
ilens.__dict__[key]['value'] = np.median(c[key+'L'+str(i)])
pbest.append(np.median(c[key+'L'+str(i)]))
elif ilens.__class__.__name__ == 'ExternalShear':
for key in ['shear','shearangle']:
if not vars(ilens)[key]['fixed']:
ilens.__dict__[key]['value'] = np.median(c[key])
pbest.append(np.median(c[key]))
mcmcresult['best-fit']['lens'] = lens
# now do the source(s)
for i,src in enumerate(source): # Source is a list of source objects
if src.__class__.__name__ == 'GaussSource':
for key in ['xoff','yoff','flux','width']:
if not vars(src)[key]['fixed']:
src.__dict__[key]['value'] = np.median(c[key+'S'+str(i)])
pbest.append(np.median(c[key+'S'+str(i)]))
elif src.__class__.__name__ == 'SersicSource':
for key in ['xoff','yoff','flux','majax','index','axisratio','PA']:
if not vars(src)[key]['fixed']:
src.__dict__[key]['value'] = np.median(c[key+'S'+str(i)])
pbest.append(np.median(c[key+'S'+str(i)]))
elif src.__class__.__name__ == 'PointSource':
for key in ['xoff','yoff','flux']:
if not vars(src)[key]['fixed']:
src.__dict__[key]['value'] = np.median(c[key+'S'+str(i)])
pbest.append(np.median(c[key+'S'+str(i)]))
mcmcresult['best-fit']['source'] = source
mcmcresult['best-fit']['magnification'] = np.median(mus[~bad],axis=0)
# Any amplitude scaling or astrometric shifts
bfscaleamp = np.ones(len(data))
if 'scaleamp' in mcmcresult.keys():
for i,t in enumerate(mcmcresult['scaleamp']): # only matters if >1 datasets
if i==0: pass
elif t:
bfscaleamp[i] = np.median(c['ampscale_dset'+str(i)])
pbest.append(np.median(c['ampscale_dset'+str(i)]))
else: pass
mcmcresult['best-fit']['scaleamp'] = bfscaleamp
bfshiftphase = np.zeros((len(data),2))
if 'shiftphase' in mcmcresult.keys():
for i,t in enumerate(mcmcresult['shiftphase']):
if i==0: pass # only matters if >1 datasets
elif t:
bfshiftphase[i][0] = np.median(c['astromshift_x_dset'+str(i)])
bfshiftphase[i][1] = np.median(c['astromshift_y_dset'+str(i)])
pbest.append(np.median(c['astromshift_x_dset'+str(i)]))
pbest.append(np.median(c['astromshift_y_dset'+str(i)]))
else: pass # no shifting
mcmcresult['best-fit']['shiftphase'] = bfshiftphase
mcmcresult['best-fit']['lnlike'] = calc_vis_lnlike(pbest,data,mcmcresult['best-fit']['lens'],
mcmcresult['best-fit']['source'],
Dd,Ds,Dds,ug,xmap,ymap,xemit,yemit,indices,
sourcedatamap,scaleamp,shiftphase,modelcal)[0]
# Calculate the deviance information criterion, using the Spiegelhalter+02 definition (cf Gelman+04)
mcmcresult['best-fit']['DIC'] = -4*np.mean(mcmcresult['lnlike']) + 2*mcmcresult['best-fit']['lnlike']
# If we did any modelcal stuff, keep the antenna phase offsets here
if any(modelcal):
mcmcresult['modelcal'] = [True if j else False for j in modelcal]
dp = np.squeeze(np.asarray([[a[1] for a in l if ~np.any(np.isnan(a[0]))] for l in blobs]))
a = [x for l in dp for x in l] # Have to dick around with this if we had any nan's
dphases = np.squeeze(np.reshape(a,(nwalkers*nstep-bad.sum(),len(data),-1),order='F'))
if len(data) > 1:
for i in range(len(data)):
if modelcal[i]: mcmcresult['calphases_dset'+str(i)] = np.vstack(dphases[:,i])
else:
if any(modelcal): mcmcresult['calphases_dset0'] = dphases
return mcmcresult
|
jspilker/visilens
|
visilens/LensModelMCMC.py
|
Python
|
mit
| 19,225
|
[
"Gaussian"
] |
e8797edfc4712ef142987289f617d763a8e7242ab7790b2faad5e97188be303a
|
"""The suite of window functions."""
from __future__ import division, print_function, absolute_import
import numpy as np
from scipy import special, linalg
from scipy.fftpack import fft
__all__ = ['boxcar', 'triang', 'parzen', 'bohman', 'blackman', 'nuttall',
'blackmanharris', 'flattop', 'bartlett', 'hanning', 'barthann',
'hamming', 'kaiser', 'gaussian', 'general_gaussian', 'chebwin',
'slepian', 'hann', 'get_window']
def boxcar(M, sym=True):
"""Return a boxcar or rectangular window.
Included for completeness, this is equivalent to no window at all.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
Whether the window is symmetric. (Has no effect for boxcar.)
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.boxcar(51)
>>> plt.plot(window)
>>> plt.title("Boxcar window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the boxcar window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
return np.ones(M, float)
def triang(M, sym=True):
"""Return a triangular window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
When True, generates a symmetric window, for use in filter design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if the number of samples is even and sym is True).
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.triang(51)
>>> plt.plot(window)
>>> plt.title("Triangular window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the triangular window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
n = np.arange(1, (M + 1) // 2 + 1)
if M % 2 == 0:
w = (2 * n - 1.0) / M
w = np.r_[w, w[::-1]]
else:
w = 2 * n / (M + 1.0)
w = np.r_[w, w[-2::-1]]
if not sym and not odd:
w = w[:-1]
return w
def parzen(M, sym=True):
"""Return a Parzen window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
When True, generates a symmetric window, for use in filter design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if the number of samples is even and sym is True).
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.parzen(51)
>>> plt.plot(window)
>>> plt.title("Parzen window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the Parzen window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
n = np.arange(-(M - 1) / 2.0, (M - 1) / 2.0 + 0.5, 1.0)
na = np.extract(n < -(M - 1) / 4.0, n)
nb = np.extract(abs(n) <= (M - 1) / 4.0, n)
wa = 2 * (1 - np.abs(na) / (M / 2.0)) ** 3.0
wb = (1 - 6 * (np.abs(nb) / (M / 2.0)) ** 2.0 +
6 * (np.abs(nb) / (M / 2.0)) ** 3.0)
w = np.r_[wa, wb, wa[::-1]]
if not sym and not odd:
w = w[:-1]
return w
def bohman(M, sym=True):
"""Return a Bohman window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
When True, generates a symmetric window, for use in filter design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if the number of samples is even and sym is True).
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.bohman(51)
>>> plt.plot(window)
>>> plt.title("Bohman window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the Bohman window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
fac = np.abs(np.linspace(-1, 1, M)[1:-1])
w = (1 - fac) * np.cos(np.pi * fac) + 1.0 / np.pi * np.sin(np.pi * fac)
w = np.r_[0, w, 0]
if not sym and not odd:
w = w[:-1]
return w
def blackman(M, sym=True):
"""
Return a Blackman window.
The Blackman window is a taper formed by using the the first three
terms of a summation of cosines. It was designed to have close to the
minimal leakage possible. It is close to optimal, only slightly worse
than a Kaiser window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
When True, generates a symmetric window, for use in filter design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if the number of samples is even and sym is True).
Notes
-----
The Blackman window is defined as
.. math:: w(n) = 0.42 - 0.5 \\cos(2\\pi n/M) + 0.08 \\cos(4\\pi n/M)
Most references to the Blackman window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function. It is known as a
"near optimal" tapering function, almost as good (by some measures)
as the Kaiser window.
References
----------
.. [1] Blackman, R.B. and Tukey, J.W., (1958) The measurement of power
spectra, Dover Publications, New York.
.. [2] Oppenheim, A.V., and R.W. Schafer. Discrete-Time Signal Processing.
Upper Saddle River, NJ: Prentice-Hall, 1999, pp. 468-471.
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.blackman(51)
>>> plt.plot(window)
>>> plt.title("Blackman window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the Blackman window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
# Docstring adapted from NumPy's blackman function
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
n = np.arange(0, M)
w = (0.42 - 0.5 * np.cos(2.0 * np.pi * n / (M - 1)) +
0.08 * np.cos(4.0 * np.pi * n / (M - 1)))
if not sym and not odd:
w = w[:-1]
return w
def nuttall(M, sym=True):
"""Return a minimum 4-term Blackman-Harris window according to Nuttall.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
When True, generates a symmetric window, for use in filter design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if the number of samples is even and sym is True).
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.nuttall(51)
>>> plt.plot(window)
>>> plt.title("Nuttall window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the Nuttall window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
a = [0.3635819, 0.4891775, 0.1365995, 0.0106411]
n = np.arange(0, M)
fac = n * 2 * np.pi / (M - 1.0)
w = (a[0] - a[1] * np.cos(fac) +
a[2] * np.cos(2 * fac) - a[3] * np.cos(3 * fac))
if not sym and not odd:
w = w[:-1]
return w
def blackmanharris(M, sym=True):
"""Return a minimum 4-term Blackman-Harris window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
When True, generates a symmetric window, for use in filter design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if the number of samples is even and sym is True).
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.blackmanharris(51)
>>> plt.plot(window)
>>> plt.title("Blackman-Harris window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the Blackman-Harris window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
a = [0.35875, 0.48829, 0.14128, 0.01168]
n = np.arange(0, M)
fac = n * 2 * np.pi / (M - 1.0)
w = (a[0] - a[1] * np.cos(fac) +
a[2] * np.cos(2 * fac) - a[3] * np.cos(3 * fac))
if not sym and not odd:
w = w[:-1]
return w
def flattop(M, sym=True):
"""Return a flat top window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
When True, generates a symmetric window, for use in filter design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if the number of samples is even and sym is True).
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.flattop(51)
>>> plt.plot(window)
>>> plt.title("Flat top window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the flat top window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
a = [0.2156, 0.4160, 0.2781, 0.0836, 0.0069]
n = np.arange(0, M)
fac = n * 2 * np.pi / (M - 1.0)
w = (a[0] - a[1] * np.cos(fac) +
a[2] * np.cos(2 * fac) - a[3] * np.cos(3 * fac) +
a[4] * np.cos(4 * fac))
if not sym and not odd:
w = w[:-1]
return w
def bartlett(M, sym=True):
"""
Return a Bartlett window.
The Bartlett window is very similar to a triangular window, except
that the end points are at zero. It is often used in signal
processing for tapering a signal, without generating too much
ripple in the frequency domain.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
When True, generates a symmetric window, for use in filter design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The triangular window, with the maximum value normalized to 1
(though the value 1 does not appear if the number of samples is even
and sym is True), with the first and last samples equal to zero.
Notes
-----
The Bartlett window is defined as
.. math:: w(n) = \\frac{2}{M-1} \\left(
\\frac{M-1}{2} - \\left|n - \\frac{M-1}{2}\\right|
\\right)
Most references to the Bartlett window come from the signal
processing literature, where it is used as one of many windowing
functions for smoothing values. Note that convolution with this
window produces linear interpolation. It is also known as an
apodization (which means"removing the foot", i.e. smoothing
discontinuities at the beginning and end of the sampled signal) or
tapering function. The fourier transform of the Bartlett is the product
of two sinc functions.
Note the excellent discussion in Kanasewich.
References
----------
.. [1] M.S. Bartlett, "Periodogram Analysis and Continuous Spectra",
Biometrika 37, 1-16, 1950.
.. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics",
The University of Alberta Press, 1975, pp. 109-110.
.. [3] A.V. Oppenheim and R.W. Schafer, "Discrete-Time Signal
Processing", Prentice-Hall, 1999, pp. 468-471.
.. [4] Wikipedia, "Window function",
http://en.wikipedia.org/wiki/Window_function
.. [5] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling,
"Numerical Recipes", Cambridge University Press, 1986, page 429.
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.bartlett(51)
>>> plt.plot(window)
>>> plt.title("Bartlett window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the Bartlett window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
# Docstring adapted from NumPy's bartlett function
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
n = np.arange(0, M)
w = np.where(np.less_equal(n, (M - 1) / 2.0),
2.0 * n / (M - 1), 2.0 - 2.0 * n / (M - 1))
if not sym and not odd:
w = w[:-1]
return w
def hann(M, sym=True):
"""
Return a Hann window.
The Hann window is a taper formed by using a raised cosine or sine-squared
with ends that touch zero.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
When True, generates a symmetric window, for use in filter design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Notes
-----
The Hann window is defined as
.. math:: w(n) = 0.5 - 0.5 \\cos\\left(\\frac{2\\pi{n}}{M-1}\\right)
\\qquad 0 \\leq n \\leq M-1
The window was named for Julius van Hann, an Austrian meterologist. It is
also known as the Cosine Bell. It is sometimes erroneously referred to as
the "Hanning" window, from the use of "hann" as a verb in the original
paper and confusion with the very similar Hamming window.
Most references to the Hann window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function.
References
----------
.. [1] Blackman, R.B. and Tukey, J.W., (1958) The measurement of power
spectra, Dover Publications, New York.
.. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics",
The University of Alberta Press, 1975, pp. 106-108.
.. [3] Wikipedia, "Window function",
http://en.wikipedia.org/wiki/Window_function
.. [4] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling,
"Numerical Recipes", Cambridge University Press, 1986, page 425.
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.hann(51)
>>> plt.plot(window)
>>> plt.title("Hann window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the Hann window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
# Docstring adapted from NumPy's hanning function
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
n = np.arange(0, M)
w = 0.5 - 0.5 * np.cos(2.0 * np.pi * n / (M - 1))
if not sym and not odd:
w = w[:-1]
return w
hanning = hann
def barthann(M, sym=True):
"""Return a modified Bartlett-Hann window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
When True, generates a symmetric window, for use in filter design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if the number of samples is even and sym is True).
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.barthann(51)
>>> plt.plot(window)
>>> plt.title("Bartlett-Hann window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the Bartlett-Hann window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
n = np.arange(0, M)
fac = np.abs(n / (M - 1.0) - 0.5)
w = 0.62 - 0.48 * fac + 0.38 * np.cos(2 * np.pi * fac)
if not sym and not odd:
w = w[:-1]
return w
def hamming(M, sym=True):
r"""Return a Hamming window.
The Hamming window is a taper formed by using a raised cosine with
non-zero endpoints, optimized to minimize the nearest side lobe.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
When True, generates a symmetric window, for use in filter design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if the number of samples is even and sym is True).
Notes
-----
The Hamming window is defined as
.. math:: w(n) = 0.54 - 0.46 \cos\left(\frac{2\pi{n}}{M-1}\right)
\qquad 0 \leq n \leq M-1
The Hamming was named for R. W. Hamming, an associate of J. W. Tukey and
is described in Blackman and Tukey. It was recommended for smoothing the
truncated autocovariance function in the time domain.
Most references to the Hamming window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function.
References
----------
.. [1] Blackman, R.B. and Tukey, J.W., (1958) The measurement of power
spectra, Dover Publications, New York.
.. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", The
University of Alberta Press, 1975, pp. 109-110.
.. [3] Wikipedia, "Window function",
http://en.wikipedia.org/wiki/Window_function
.. [4] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling,
"Numerical Recipes", Cambridge University Press, 1986, page 425.
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.hamming(51)
>>> plt.plot(window)
>>> plt.title("Hamming window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the Hamming window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
# Docstring adapted from NumPy's hamming function
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
n = np.arange(0, M)
w = 0.54 - 0.46 * np.cos(2.0 * np.pi * n / (M - 1))
if not sym and not odd:
w = w[:-1]
return w
def kaiser(M, beta, sym=True):
r"""Return a Kaiser window.
The Kaiser window is a taper formed by using a Bessel function.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
beta : float
Shape parameter, determines trade-off between main-lobe width and
side lobe level. As beta gets large, the window narrows.
sym : bool, optional
When True, generates a symmetric window, for use in filter design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if the number of samples is even and sym is True).
Notes
-----
The Kaiser window is defined as
.. math:: w(n) = I_0\left( \beta \sqrt{1-\frac{4n^2}{(M-1)^2}}
\right)/I_0(\beta)
with
.. math:: \quad -\frac{M-1}{2} \leq n \leq \frac{M-1}{2},
where :math:`I_0` is the modified zeroth-order Bessel function.
The Kaiser was named for Jim Kaiser, who discovered a simple approximation
to the DPSS window based on Bessel functions.
The Kaiser window is a very good approximation to the Digital Prolate
Spheroidal Sequence, or Slepian window, which is the transform which
maximizes the energy in the main lobe of the window relative to total
energy.
The Kaiser can approximate many other windows by varying the beta
parameter.
==== =======================
beta Window shape
==== =======================
0 Rectangular
5 Similar to a Hamming
6 Similar to a Hann
8.6 Similar to a Blackman
==== =======================
A beta value of 14 is probably a good starting point. Note that as beta
gets large, the window narrows, and so the number of samples needs to be
large enough to sample the increasingly narrow spike, otherwise NaNs will
get returned.
Most references to the Kaiser window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function.
References
----------
.. [1] J. F. Kaiser, "Digital Filters" - Ch 7 in "Systems analysis by
digital computer", Editors: F.F. Kuo and J.F. Kaiser, p 218-285.
John Wiley and Sons, New York, (1966).
.. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", The
University of Alberta Press, 1975, pp. 177-178.
.. [3] Wikipedia, "Window function",
http://en.wikipedia.org/wiki/Window_function
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.kaiser(51, beta=14)
>>> plt.plot(window)
>>> plt.title(r"Kaiser window ($\beta$=14)")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title(r"Frequency response of the Kaiser window ($\beta$=14)")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
# Docstring adapted from NumPy's kaiser function
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
n = np.arange(0, M)
alpha = (M - 1) / 2.0
w = (special.i0(beta * np.sqrt(1 - ((n - alpha) / alpha) ** 2.0)) /
special.i0(beta))
if not sym and not odd:
w = w[:-1]
return w
def gaussian(M, std, sym=True):
r"""Return a Gaussian window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
std : float
The standard deviation, sigma.
sym : bool, optional
When True, generates a symmetric window, for use in filter design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if the number of samples is even and sym is True).
Notes
-----
The Gaussian window is defined as
.. math:: w(n) = e^{ -\frac{1}{2}\left(\frac{n}{\sigma}\right)^2 }
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.gaussian(51, std=7)
>>> plt.plot(window)
>>> plt.title(r"Gaussian window ($\sigma$=7)")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title(r"Frequency response of the Gaussian window ($\sigma$=7)")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
n = np.arange(0, M) - (M - 1.0) / 2.0
sig2 = 2 * std * std
w = np.exp(-n ** 2 / sig2)
if not sym and not odd:
w = w[:-1]
return w
def general_gaussian(M, p, sig, sym=True):
r"""Return a window with a generalized Gaussian shape.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
p : float
Shape parameter. p = 1 is identical to `gaussian`, p = 0.5 is
the same shape as the Laplace distribution.
sig : float
The standard deviation, sigma.
sym : bool, optional
When True, generates a symmetric window, for use in filter design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if the number of samples is even and sym is True).
Notes
-----
The generalized Gaussian window is defined as
.. math:: w(n) = e^{ -\frac{1}{2}\left|\frac{n}{\sigma}\right|^{2p} }
the half-power point is at
.. math:: (2 \log(2))^{1/(2 p)} \sigma
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.general_gaussian(51, p=1.5, sig=7)
>>> plt.plot(window)
>>> plt.title(r"Generalized Gaussian window (p=1.5, $\sigma$=7)")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title(r"Freq. resp. of the gen. Gaussian window (p=1.5, $\sigma$=7)")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
n = np.arange(0, M) - (M - 1.0) / 2.0
w = np.exp(-0.5 * np.abs(n / sig) ** (2 * p))
if not sym and not odd:
w = w[:-1]
return w
# `chebwin` contributed by Kumar Appaiah.
def chebwin(M, at, sym=True):
"""Return a Dolph-Chebyshev window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
at : float
Attenuation (in dB).
sym : bool, optional
When True, generates a symmetric window, for use in filter design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value always normalized to 1
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.chebwin(51, at=100)
>>> plt.plot(window)
>>> plt.title("Dolph-Chebyshev window (100 dB)")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the Dolph-Chebyshev window (100 dB)")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
# compute the parameter beta
order = M - 1.0
beta = np.cosh(1.0 / order * np.arccosh(10 ** (np.abs(at) / 20.)))
k = np.r_[0:M] * 1.0
x = beta * np.cos(np.pi * k / M)
# Find the window's DFT coefficients
# Use analytic definition of Chebyshev polynomial instead of expansion
# from scipy.special. Using the expansion in scipy.special leads to errors.
p = np.zeros(x.shape)
p[x > 1] = np.cosh(order * np.arccosh(x[x > 1]))
p[x < -1] = (1 - 2 * (order % 2)) * np.cosh(order * np.arccosh(-x[x < -1]))
p[np.abs(x) <= 1] = np.cos(order * np.arccos(x[np.abs(x) <= 1]))
# Appropriate IDFT and filling up
# depending on even/odd M
if M % 2:
w = np.real(fft(p))
n = (M + 1) // 2
w = w[:n] / w[0]
w = np.concatenate((w[n - 1:0:-1], w))
else:
p = p * np.exp(1.j * np.pi / M * np.r_[0:M])
w = np.real(fft(p))
n = M // 2 + 1
w = w / w[1]
w = np.concatenate((w[n - 1:0:-1], w[1:n]))
if not sym and not odd:
w = w[:-1]
return w
def slepian(M, width, sym=True):
"""Return a digital Slepian (DPSS) window.
Used to maximize the energy concentration in the main lobe. Also called
the digital prolate spheroidal sequence (DPSS).
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
width : float
Bandwidth
sym : bool, optional
When True, generates a symmetric window, for use in filter design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value always normalized to 1
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fftpack import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.slepian(51, width=0.3)
>>> plt.plot(window)
>>> plt.title("Slepian (DPSS) window (BW=0.3)")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the Slepian window (BW=0.3)")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
if (M * width > 27.38):
raise ValueError("Cannot reliably obtain slepian sequences for"
" M*width > 27.38.")
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
twoF = width / 2.0
alpha = (M - 1) / 2.0
m = np.arange(0, M) - alpha
n = m[:, np.newaxis]
k = m[np.newaxis, :]
AF = twoF * special.sinc(twoF * (n - k))
[lam, vec] = linalg.eig(AF)
ind = np.argmax(abs(lam), axis=-1)
w = np.abs(vec[:, ind])
w = w / max(w)
if not sym and not odd:
w = w[:-1]
return w
def get_window(window, Nx, fftbins=True):
"""
Return a window.
Parameters
----------
window : string, float, or tuple
The type of window to create. See below for more details.
Nx : int
The number of samples in the window.
fftbins : bool, optional
If True, create a "periodic" window ready to use with ifftshift
and be multiplied by the result of an fft (SEE ALSO fftfreq).
Returns
-------
get_window : ndarray
Returns a window of length `Nx` and type `window`
Notes
-----
Window types:
boxcar, triang, blackman, hamming, hann, bartlett, flattop,
parzen, bohman, blackmanharris, nuttall, barthann,
kaiser (needs beta), gaussian (needs std),
general_gaussian (needs power, width),
slepian (needs width), chebwin (needs attenuation)
If the window requires no parameters, then `window` can be a string.
If the window requires parameters, then `window` must be a tuple
with the first argument the string name of the window, and the next
arguments the needed parameters.
If `window` is a floating point number, it is interpreted as the beta
parameter of the kaiser window.
Each of the window types listed above is also the name of
a function that can be called directly to create a window of
that type.
Examples
--------
>>> from scipy import signal
>>> signal.get_window('triang', 7)
array([ 0.25, 0.5 , 0.75, 1. , 0.75, 0.5 , 0.25])
>>> signal.get_window(('kaiser', 4.0), 9)
array([ 0.08848053, 0.32578323, 0.63343178, 0.89640418, 1. ,
0.89640418, 0.63343178, 0.32578323, 0.08848053])
>>> signal.get_window(4.0, 9)
array([ 0.08848053, 0.32578323, 0.63343178, 0.89640418, 1. ,
0.89640418, 0.63343178, 0.32578323, 0.08848053])
"""
sym = not fftbins
try:
beta = float(window)
except (TypeError, ValueError):
args = ()
if isinstance(window, tuple):
winstr = window[0]
if len(window) > 1:
args = window[1:]
elif isinstance(window, str):
if window in ['kaiser', 'ksr', 'gaussian', 'gauss', 'gss',
'general gaussian', 'general_gaussian',
'general gauss', 'general_gauss', 'ggs',
'slepian', 'optimal', 'slep', 'dss',
'chebwin', 'cheb']:
raise ValueError("The '" + window + "' window needs one or "
"more parameters -- pass a tuple.")
else:
winstr = window
if winstr in ['blackman', 'black', 'blk']:
winfunc = blackman
elif winstr in ['triangle', 'triang', 'tri']:
winfunc = triang
elif winstr in ['hamming', 'hamm', 'ham']:
winfunc = hamming
elif winstr in ['bartlett', 'bart', 'brt']:
winfunc = bartlett
elif winstr in ['hanning', 'hann', 'han']:
winfunc = hann
elif winstr in ['blackmanharris', 'blackharr', 'bkh']:
winfunc = blackmanharris
elif winstr in ['parzen', 'parz', 'par']:
winfunc = parzen
elif winstr in ['bohman', 'bman', 'bmn']:
winfunc = bohman
elif winstr in ['nuttall', 'nutl', 'nut']:
winfunc = nuttall
elif winstr in ['barthann', 'brthan', 'bth']:
winfunc = barthann
elif winstr in ['flattop', 'flat', 'flt']:
winfunc = flattop
elif winstr in ['kaiser', 'ksr']:
winfunc = kaiser
elif winstr in ['gaussian', 'gauss', 'gss']:
winfunc = gaussian
elif winstr in ['general gaussian', 'general_gaussian',
'general gauss', 'general_gauss', 'ggs']:
winfunc = general_gaussian
elif winstr in ['boxcar', 'box', 'ones', 'rect', 'rectangular']:
winfunc = boxcar
elif winstr in ['slepian', 'slep', 'optimal', 'dpss', 'dss']:
winfunc = slepian
elif winstr in ['chebwin', 'cheb']:
winfunc = chebwin
else:
raise ValueError("Unknown window type.")
params = (Nx,) + args + (sym,)
else:
winfunc = kaiser
params = (Nx, beta, sym)
return winfunc(*params)
|
sargas/scipy
|
scipy/signal/windows.py
|
Python
|
bsd-3-clause
| 44,546
|
[
"Gaussian"
] |
bcff2623493f8c3b020b3b7a1e6c22b9bcabebf8b6686a3fcc19edbf66177e81
|
#!/usr/bin/python3
# Usage example:
# python captions.py --videoid='<video_id>' --name='<name>' --file='<file>' --language='<language>' --action='action'
import httplib2, os, sys, re, datetime
from pprint import pprint
from googleapiclient.discovery import build_from_document
from googleapiclient.errors import HttpError
from oauth2client.client import flow_from_clientsecrets
from oauth2client.file import Storage
from oauth2client.tools import argparser, run_flow
import logging
logging.basicConfig()
def get_isosplit(s, split):
if split in s:
n, s = s.split(split)
else:
n = 0
return n, s
# https://stackoverflow.com/a/64232786/3682277
def parse_isoduration(s):
"""Helper function for parsing video durations"""
# Remove prefix
s = s.split('P')[-1]
# Step through letter dividers
days, s = get_isosplit(s, 'D')
_, s = get_isosplit(s, 'T')
hours, s = get_isosplit(s, 'H')
minutes, s = get_isosplit(s, 'M')
seconds, s = get_isosplit(s, 'S')
# Convert all to seconds
dt = datetime.timedelta(days=int(days), hours=int(hours), minutes=int(minutes), seconds=int(seconds))
return int(dt.total_seconds())
# The CLIENT_SECRETS_FILE variable specifies the name of a file that contains
# the OAuth 2.0 information for this application, including its client_id and
# client_secret. You can acquire an OAuth 2.0 client ID and client secret from
# the {{ Google Dev Console }} at
# {{ https://console.developers.google.com/ }}.
# Please ensure that you have enabled the YouTube Data API for your project.
# For more information about using OAuth2 to access the YouTube Data API, see:
# https://developers.google.com/youtube/v3/guides/authentication
# For more information about the client_secrets.json file format, see:
# https://developers.google.com/api-client-library/python/guide/aaa_client_secrets
SECRETS_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), "../SECRETS"))
CLIENT_SECRETS_FILE = "%s/google_client_secrets.json" % (SECRETS_DIR)
# This OAuth 2.0 access scope allows for full read/write access to the
# authenticated user's account and requires requests to use an SSL connection.
YOUTUBE_READ_WRITE_SSL_SCOPE = "https://www.googleapis.com/auth/youtube.force-ssl"
YOUTUBE_API_SERVICE_NAME = "youtube"
YOUTUBE_API_VERSION = "v3"
# This variable defines a message to display if the CLIENT_SECRETS_FILE is
# missing.
MISSING_CLIENT_SECRETS_MESSAGE = """
WARNING: Please configure OAuth 2.0
To make this sample run you will need to populate the client_secrets.json file
found at:
%s
with information from the APIs Console
https://console.developers.google.com
For more information about the client_secrets.json file format, please visit:
https://developers.google.com/api-client-library/python/guide/aaa_client_secrets
""" % os.path.abspath(os.path.join(os.path.dirname(__file__),
CLIENT_SECRETS_FILE))
# Authorize the request and store authorization credentials.
def get_authenticated_service(args):
flow = flow_from_clientsecrets(CLIENT_SECRETS_FILE, scope=YOUTUBE_READ_WRITE_SSL_SCOPE,
message=MISSING_CLIENT_SECRETS_MESSAGE)
# The credentials will be saved to this file,
# so we need to sign in only once
storage = Storage("%s/%s-oauth2.json" % (SECRETS_DIR, os.path.basename(sys.argv[0])))
credentials = storage.get()
if credentials is None or credentials.invalid:
credentials = run_flow(flow, storage, args)
# https://stackoverflow.com/questions/29762529/where-can-i-find-the-youtube-v3-api-captions-json-discovery-document
with open("%s/youtube-v3-api.json" % os.path.dirname(__file__), "r", encoding = "utf-8") as f:
doc = f.read()
return build_from_document(doc, http=credentials.authorize(httplib2.Http()))
# Call the API's captions.list method to list the existing caption tracks.
def list_captions(youtube, video_id, verbose=True):
results = youtube.captions().list(
part="snippet",
videoId=video_id
).execute()
for item in results["items"]:
id = item["id"]
name = item["snippet"]["name"]
language = item["snippet"]["language"]
if verbose:
print("Caption track '%s(%s)' in '%s' language." % (name, id, language))
return results["items"]
# Call the API's captions.insert method to upload a caption track in draft status.
def upload_caption(youtube, video_id, language, name, is_draft, file):
try:
insert_result = youtube.captions().insert(
part="snippet",
body=dict(
snippet=dict(
videoId=video_id,
language=language,
name=name,
isDraft=is_draft
),
),
media_mime_type = 'text/xml',
media_body = file,
).execute()
except HttpError as e:
print("Got the following error during sub upload, YTID = ", video_id)
print(e)
raise
id = insert_result["id"]
name = insert_result["snippet"]["name"]
language = insert_result["snippet"]["language"]
status = insert_result["snippet"]["status"]
print("Uploaded caption track '%s(%s) in '%s' language, '%s' status." % (name,
id, language, status) )
return True
# Call the API's captions.update method to update an existing caption track's draft status
# and publish it. If a new binary file is present, update the track with the file as well.
def update_caption(youtube, video_id, language, caption_id, is_draft, file):
try:
update_result = youtube.captions().update(
part="snippet",
body=dict(
id=caption_id,
snippet=dict(
isDraft=is_draft
)
),
media_mime_type = 'text/xml',
media_body=file
).execute()
except HttpError as e:
print("Got the following error during subtitle update")
print(e)
raise
name = update_result["snippet"]["name"]
isDraft = update_result["snippet"]["isDraft"]
print("Updated caption track '%s' draft status to be: '%s'" % (name, isDraft))
if file:
print("and updated the track with the new uploaded file.")
return True
# Call the API's captions.download method to download an existing caption track.
def download_caption(youtube, caption_id, tfmt):
subtitle = youtube.captions().download(
id=caption_id,
tfmt=tfmt
).execute()
#print("First line of caption track: %s" % (subtitle))
with open(caption_id, "wb") as f:
f.write(subtitle)
# Get full API information about an YT video
def list_video(youtube, youtube_id):
"""https://developers.google.com/youtube/v3/docs/videos/list"""
response = youtube.videos().list(
part='snippet,contentDetails,status',
id=youtube_id).execute()
snippet = response['items'][0]['snippet']
pprint(response['items'][0])
return snippet
def update_video_language(youtube, youtube_id, lang):
"""https://developers.google.com/youtube/v3/docs/videos/update"""
# Stupidly, YouTube require to have snippet.title
# in update body. So we first need to get it (and not change it)
response = youtube.videos().list(
part='snippet', id=youtube_id).execute()
snippet = response['items'][0]['snippet']
print("%s BEFORE UPDATE: lang=%s audioLang=%s\n" %
(youtube_id,
snippet.get('defaultLanguage',''),
snippet.get('defaultAudioLanguage', ''))
)
updated_snippet = {
'defaultLanguage': lang,
'defaultAudioLanguage': lang,
'title': snippet['title'],
'categoryId': snippet['categoryId'],
}
response = youtube.videos().update(
part='id,snippet',
body={
'id': youtube_id,
'snippet': updated_snippet,
}).execute()
snippet = response['snippet']
print("%s AFTER UPDATE: lang=%s audioLang=%s\n" %
(youtube_id, snippet['defaultLanguage'],snippet['defaultAudioLanguage'])
)
return snippet
def update_video_description(youtube, youtube_id):
"""https://developers.google.com/youtube/v3/docs/videos/update"""
response = youtube.videos().list(
part='snippet', id=youtube_id).execute()
snippet = response['items'][0]['snippet']
desc = snippet.get('description')
print("ERROR: You need to modify the script to change video descriptions!")
sys.exit(1)
# TODO: This needs to be customized for each use case!
#new_desc = desc.replace('Přihlašte','Přihlaste');
if desc == new_desc:
print("Nothing to do for video %s" % youtube_id)
return snippet
# Title and categoryId are always required, even though
# they were not changed.
updated_snippet = {
'description': new_desc,
'title': snippet['title'],
'categoryId': snippet['categoryId'],
}
response = youtube.videos().update(
part='id,snippet',
body={
'id': youtube_id,
'snippet': updated_snippet,
}).execute()
snippet = response['snippet']
print("%s updated" % youtube_id)
return snippet
# Get specific information for a list of videos
def list_videos(youtube, youtube_ids):
"""https://developers.google.com/youtube/v3/docs/videos/list
Adapted from Khan codebase in:
webapp/gcloud/youtube/youtube_api.py
"""
# Youtube service returns results for ids with trailing
# whitespaces. We need to strip it here to make sure that we
# keep a canonical youtube_id for each video.
all_youtube_ids = [ytid.strip() for ytid in youtube_ids]
# The YouTube API will only let us fetch 50 IDs at a time.
max_results = 50
data = []
for i in range(0, len(all_youtube_ids), max_results):
response = youtube.videos().list(
part='id,snippet,contentDetails,status',
id=",".join(all_youtube_ids[i:i + max_results]),
maxResults=max_results).execute()
data.extend(response["items"])
fields = ('title', 'video_id', 'video_url', 'published_at', 'duration', 'lang',
'has_captions', 'privacy_status', 'license', 'made_for_kids')
header = "\t".join(fields)
fmtstring = "\t".join(["%s" for i in fields])
print(header)
for video in data:
snippet = video['snippet']
details = video['contentDetails']
status = video['status']
to_print = {
'video_id': video['id'],
'video_url': "https://www.youtube.com/watch?v=%s" % video['id'],
'title': snippet['title'],
'has_captions': details['caption'],
'published_at': snippet['publishedAt'],
'duration': parse_isoduration(details['duration']),
'privacy_status': status['privacyStatus'],
'license': status['license'],
'made_for_kids': status['madeForKids'],
}
# For some reason, some video are missing this param
to_print['lang'] = snippet.get('defaultAudioLanguage') or snippet.get('defaultLanguage', '')
print(fmtstring % tuple([to_print[key] for key in fields]))
return data
# Get API information about a YT channel
def list_channel(youtube, channel_id):
"""https://developers.google.com/youtube/v3/docs/channels/list"""
response = youtube.channels().list(
part='id,snippet,contentDetails',
id=channel_id).execute()
snippet = response['items'][0]['snippet']
pprint(response)
return snippet
# Get API information about a YT playlist
def list_playlist(youtube, playlist_id):
"""https://developers.google.com/youtube/v3/docs/channels/list"""
response = youtube.playlists().list(
part='id,snippet,contentDetails',
id=playlist_id).execute()
snippet = response['items'][0]['snippet']
pprint(response)
return snippet
# List auto-generated playlists for a given channel
def list_channel_playlists(youtube, channel_id):
"""https://developers.google.com/youtube/v3/docs/channels/list"""
all_playlists = {}
response = youtube.channels().list(
part='contentDetails',
id=channel_id).execute()
playlists = response['items'][0]['contentDetails']['relatedPlaylists']
for pl in playlists:
title = pl
playlist_id = playlists[pl]
all_playlists[title] = playlist_id
print("%s\t%s" % (playlist_id, title))
return all_playlists
# List custom playlists
def list_custom_playlists(youtube, channel_id, nextPageToken=None):
"""https://developers.google.com/youtube/v3/docs/channels/list"""
all_playlists = {}
response = youtube.playlists().list(
part='id,snippet',
maxResults=50,
pageToken=nextPageToken,
channelId=channel_id).execute()
playlists = response['items']
for pl in playlists:
title = pl['snippet']['title']
playlist_id = pl['id']
all_playlists[title] = playlist_id
print("%s\t%s" % (playlist_id, title))
if 'nextPageToken' in response.keys():
all_playlists.update(list_custom_playlists(
youtube, channel_id, response['nextPageToken']
)
)
return all_playlists
# List all uploaded videos for a given channel
# use action=list_video to get a channel id
# unlisted and private videos will be included only
# if we're authenticated as a manager for the channel
def list_all_videos_in_channel(youtube, channel_id):
playlists = list_channel_playlists(youtube, channel_id)
playlist_id = playlists['uploads']
list_all_videos_in_playlist(youtube, playlist_id)
def list_all_videos_in_playlist(youtube, playlist_id, nextPageToken=None):
"""https://developers.google.com/youtube/v3/docs/playlistItems/list"""
if nextPageToken is None:
print("Printing videos in playlist %s" % playlist_id)
youtube_ids = set()
response = youtube.playlistItems().list(
part='id,snippet',
maxResults=50,
pageToken=nextPageToken,
playlistId=playlist_id).execute()
for video in response['items']:
snippet = video['snippet']
title = snippet['title']
video_id = snippet['resourceId']['videoId']
youtube_ids.add(video_id)
print("%s\t%s" % (video_id, title))
if 'nextPageToken' in response.keys():
youtube_ids.union(list_all_videos_in_playlist(
youtube, playlist_id, response['nextPageToken']
)
)
return youtube_ids
if __name__ == "__main__":
# The "videoid" option specifies the YouTube video ID that uniquely
# identifies the video for which the caption track will be uploaded.
argparser.add_argument("--videoid",
help="Required; ID for video for which the caption track will be uploaded.")
argparser.add_argument("--videoids-file",
help="Input file with one ID per row.")
# The "name" option specifies the name of the caption trackto be used.
argparser.add_argument("--name", help="Caption track name", default="")
# The "file" option specifies the binary file to be uploaded as a caption track.
argparser.add_argument("--file", help="Captions track file to upload")
# The "language" option specifies the language of the caption track to be uploaded.
argparser.add_argument("--language", help="Caption track language", default="en")
# The "captionid" option specifies the ID of the caption track to be processed.
argparser.add_argument("--captionid", help="Required; ID of the caption track to be processed")
# The "action" option specifies the action to be processed.
argparser.add_argument("--action", help="Action: list|list_video|upload|update|download")
# The "action" option specifies the action to be processed.
argparser.add_argument("--draft", help="Publish subtitles?", default=False, action='store_true')
argparser.add_argument("--channelid", help="YouTube Channel ID")
argparser.add_argument("--playlistid", help="YouTube playlist ID")
argparser.add_argument("--videolang", help="Language of video")
args = argparser.parse_args()
SUPPORTED_ACTIONS = (
# actions related to captions
'upload_captions', 'download_captions', 'update_captions',
'list_captions',
# actions related to videos
'list_video', 'list_many_videos', 'update_video_language',
'update_video_description',
# actions related to channels
'list_channel', 'list_channel_videos', 'list_channel_playlists',
# actions related to playlists
'list_playlist', 'list_playlist_videos')
if args.action not in SUPPORTED_ACTIONS:
print("Available actions:", SUPPORTED_ACTIONS)
exit("Unsupported action = %s" % args.action)
if (args.action in ('upload_captions', 'list_captions', 'list_video',
'update_video_language', 'update_video_description')):
if not args.videoid:
exit("Please specify videoid using the --videoid= parameter.")
if (args.action in ('update_video_language')):
if not args.videolang:
exit("Please specify video language using the --videolang parameter.")
if args.action in ('list_many_videos'):
if not args.videoids_file:
exit("Please specify videoids in a file (one per line), using the --videoids-file=fname parameter.")
if (args.action in ('update_captions', 'download_captions', 'delete_captions')):
if not args.captionid:
exit("Please specify captionid using the --captionid= parameter.")
if (args.action in ('list_channel', 'list_channel_videos', 'list_channel_playlists')):
if not args.channelid:
exit("Please specify channel ID using the --channelid= parameter.")
if (args.action in ('list_playlist', 'list_playlist_videos')):
if not args.playlistid:
exit("Please specify playlist ID using the --playlistid= parameter.")
if args.action == 'upload':
if not args.file:
exit("Please specify a caption track file using the --file= parameter.")
if not os.path.exists(args.file):
exit("Please specify a valid file using the --file= parameter.")
if args.action in ('upload_captions', 'update_captions', 'delete_captions'):
# NOTE(danielhollas): this is just a precautionary measure
if args.language != 'cs':
exit("We do not support upload to other languages besides Czech!")
youtube = get_authenticated_service(args)
youtube_ids = set()
if args.videoids_file:
with open(args.videoids_file, 'r') as f:
youtube_ids = set(f.read().split('\n'))
youtube_ids.remove('')
elif args.videoid:
youtube_ids = set([args.videoid])
YTID_REGEX = r'^[a-zA-Z0-9_-]{11}$'
for youtube_id in youtube_ids:
if not re.fullmatch(YTID_REGEX, youtube_id):
exit("Invalid YouTube ID: %s" % youtube_id)
try:
# Channel actions
if args.action == 'list_channel':
list_channel(youtube, args.channelid);
elif args.action == 'list_channel_playlists':
list_channel_playlists(youtube, args.channelid);
list_custom_playlists(youtube, args.channelid);
elif args.action == 'list_channel_videos':
list_all_videos_in_channel(youtube, args.channelid);
# Playlist actions
elif args.action == 'list_playlist':
list_playlist(youtube, args.playlistid);
elif args.action == 'list_playlist_videos':
list_all_videos_in_playlist(youtube, args.playlistid);
# Video actions
elif args.action == 'list_video':
list_video(youtube, args.videoid)
# Bulk listing specific data for videos
elif args.action == 'list_many_videos':
list_videos(youtube, youtube_ids)
elif args.action == 'update_video_language':
update_video_language(youtube, args.videoid, args.videolang)
elif args.action == 'update_video_description':
update_video_description(youtube, args.videoid)
# Caption actions
elif args.action == 'upload_captions':
upload_caption(youtube, args.videoid, args.language, args.name, args.draft, args.file)
elif args.action == 'download_captions':
download_caption(youtube, args.captionid, 'srt')
elif args.action == 'update_captions':
update_caption(youtube, args.videoid, args.language, args.captionid, args.draft, args.file);
elif args.action == 'list_captions':
list_captions(youtube, youtube_id)
except HttpError as e:
print("An HTTP error %d occurred:\n%s" % (e.resp.status, e.content))
|
danielhollas/AmaraUpload
|
api/youtube_oauth.py
|
Python
|
mit
| 20,326
|
[
"VisIt"
] |
9affec3fde25bd9eac631bbd38c251b2d1768902ce690e17fd0d816c2463e69e
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import time
# !! This is the configuration of Nikola. !! #
# !! You should edit it to your liking. !! #
# ! Some settings can be different in different languages.
# ! A comment stating (translatable) is used to denote those.
# ! There are two ways to specify a translatable setting:
# ! (a) BLOG_TITLE = "My Blog"
# ! (b) BLOG_TITLE = {"en": "My Blog", "es": "Mi Blog"}
# ! Option (a) is used when you don't want that setting translated.
# ! Option (b) is used for settings that are different in different languages.
# Data about this site
BLOG_AUTHOR = "lennyh" # (translatable)
BLOG_TITLE = "v2h" # (translatable)
# This is the main URL for your site. It will be used
# in a prominent link. Don't forget the protocol (http/https)!
SITE_URL = "http://qytz.github.io/"
# This is the URL where Nikola's output will be deployed.
# If not set, defaults to SITE_URL
# BASE_URL = "https://github.com/lennyhbt/v2h/"
AUTHOR_GITHUB = "https://github.com/qytz"
BLOG_EMAIL = "hhhhhf@foxmail.com"
BLOG_DESCRIPTION = "All about my history, now and future." # (translatable)
# Nikola is multilingual!
#
# Currently supported languages are:
#
# en English
# ar Arabic
# az Azerbaijani
# bg Bulgarian
# bs Bosnian
# ca Catalan
# cs Czech [ALTERNATIVELY cz]
# da Danish
# de German
# el Greek [NOT gr]
# eo Esperanto
# es Spanish
# et Estonian
# eu Basque
# fa Persian
# fi Finnish
# fr French
# hi Hindi
# hr Croatian
# hu Hungarian
# id Indonesian
# it Italian
# ja Japanese [NOT jp]
# ko Korean
# nb Norwegian Bokmål
# nl Dutch
# pa Punjabi
# pl Polish
# pt Portuguese
# pt_br Portuguese (Brazil)
# ru Russian
# sk Slovak
# sl Slovene
# sr Serbian (Cyrillic)
# sr_latin Serbian (Latin)
# sv Swedish
# tr Turkish [NOT tr_TR]
# uk Ukrainian
# ur Urdu
# zh_cn Chinese (Simplified)
#
# If you want to use Nikola with a non-supported language you have to provide
# a module containing the necessary translations
# (cf. the modules at nikola/data/themes/base/messages/).
# If a specific post is not translated to a language, then the version
# in the default language will be shown instead.
# What is the default language?
DEFAULT_LANG = "zh_cn"
# What other languages do you have?
# The format is {"translationcode" : "path/to/translation" }
# the path will be used as a prefix for the generated pages location
TRANSLATIONS = {
DEFAULT_LANG: "",
# Example for another language:
# "es": "./es",
}
# What will translated input files be named like?
# If you have a page something.rst, then something.pl.rst will be considered
# its Polish translation.
# (in the above example: path == "something", ext == "rst", lang == "pl")
# this pattern is also used for metadata:
# something.meta -> something.pl.meta
TRANSLATIONS_PATTERN = "{path}.{lang}.{ext}"
# Links for the sidebar / navigation bar. (translatable)
# This is a dict. The keys are languages, and values are tuples.
#
# For regular links:
# ('https://getnikola.com/', 'Nikola Homepage')
#
# For submenus:
# (
# (
# ('https://apple.com/', 'Apple'),
# ('https://orange.com/', 'Orange'),
# ),
# 'Fruits'
# )
#
# WARNING: Support for submenus is theme-dependent.
# Only one level of submenus is supported.
# WARNING: Some themes, including the default Bootstrap 3 theme,
# may present issues if the menu is too large.
# (in bootstrap3, the navbar can grow too large and cover contents.)
# WARNING: If you link to directories, make sure to follow
# ``STRIP_INDEXES``. If it’s set to ``True``, end your links
# with a ``/``, otherwise end them with ``/index.html`` — or
# else they won’t be highlighted when active.
NAVIGATION_LINKS = {
DEFAULT_LANG: (
("/archive.html", "文章存档"),
("/categories/", "标签"),
("/rss.xml", "RSS feed"),
),
}
# Name of the theme to use.
THEME = "v2h-theme"
#THEME = "material-theme"
#THEME = "yesplease"
# Primary color of your theme. This will be used to customize your theme and
# auto-generate related colors in POSTS_SECTION_COLORS. Must be a HEX value.
THEME_COLOR = '#5670d4'
# POSTS and PAGES contains (wildcard, destination, template) tuples.
#
# The wildcard is used to generate a list of reSt source files
# (whatever/thing.txt).
#
# That fragment could have an associated metadata file (whatever/thing.meta),
# and optionally translated files (example for Spanish, with code "es"):
# whatever/thing.es.txt and whatever/thing.es.meta
#
# This assumes you use the default TRANSLATIONS_PATTERN.
#
# From those files, a set of HTML fragment files will be generated:
# cache/whatever/thing.html (and maybe cache/whatever/thing.html.es)
#
# These files are combined with the template to produce rendered
# pages, which will be placed at
# output / TRANSLATIONS[lang] / destination / pagename.html
#
# where "pagename" is the "slug" specified in the metadata file.
#
# The difference between POSTS and PAGES is that POSTS are added
# to feeds and are considered part of a blog, while PAGES are
# just independent HTML pages.
#
POSTS = (
("posts/*.rst", "posts", "post.tmpl"),
("posts/*.txt", "posts", "post.tmpl"),
)
PAGES = (
("stories/*.rst", "stories", "story.tmpl"),
("stories/*.txt", "stories", "story.tmpl"),
)
# Below this point, everything is optional
# Post's dates are considered in UTC by default, if you want to use
# another time zone, please set TIMEZONE to match. Check the available
# list from Wikipedia:
# https://en.wikipedia.org/wiki/List_of_tz_database_time_zones
# (e.g. 'Europe/Zurich')
# Also, if you want to use a different time zone in some of your posts,
# you can use the ISO 8601/RFC 3339 format (ex. 2012-03-30T23:00:00+02:00)
TIMEZONE = "Asia/Shanghai"
# If you want to use ISO 8601 (also valid RFC 3339) throughout Nikola
# (especially in new_post), set this to True.
# Note that this does not affect DATE_FORMAT.
# FORCE_ISO8601 = False
# Date format used to display post dates. (translatable)
# (str used by datetime.datetime.strftime)
DATE_FORMAT = '%Y-%m-%d %H:%M'
# Date format used to display post dates, if local dates are used. (translatable)
# (str used by moment.js)
JS_DATE_FORMAT = 'YYYY-MM-DD HH:mm'
# Date fanciness.
#
# 0 = using DATE_FORMAT and TIMEZONE
# 1 = using JS_DATE_FORMAT and local user time (via moment.js)
# 2 = using a string like “2 days ago”
#
# Your theme must support it, bootstrap and bootstrap3 already do.
DATE_FANCINESS = 0
# While Nikola can select a sensible locale for each language,
# sometimes explicit control can come handy.
# In this file we express locales in the string form that
# python's locales will accept in your OS, by example
# "en_US.utf8" in Unix-like OS, "English_United States" in Windows.
# LOCALES = dict mapping language --> explicit locale for the languages
# in TRANSLATIONS. You can omit one or more keys.
# LOCALE_FALLBACK = locale to use when an explicit locale is unavailable
# LOCALE_DEFAULT = locale to use for languages not mentioned in LOCALES; if
# not set the default Nikola mapping is used.
# One or more folders containing files to be copied as-is into the output.
# The format is a dictionary of {source: relative destination}.
# Default is:
# FILES_FOLDERS = {'files': ''}
# Which means copy 'files' into 'output'
# One or more folders containing listings to be processed and stored into
# the output. The format is a dictionary of {source: relative destination}.
# Default is:
# LISTINGS_FOLDERS = {'listings': 'listings'}
# Which means process listings from 'listings' into 'output/listings'
# A mapping of languages to file-extensions that represent that language.
# Feel free to add or delete extensions to any list, but don't add any new
# compilers unless you write the interface for it yourself.
#
# 'rest' is reStructuredText
# 'markdown' is MarkDown
# 'html' assumes the file is HTML and just copies it
COMPILERS = {
"rest": ('.rst', '.txt'),
"markdown": ('.md', '.mdown', '.markdown'),
"textile": ('.textile',),
"txt2tags": ('.t2t',),
"bbcode": ('.bb',),
"wiki": ('.wiki',),
"ipynb": ('.ipynb',),
"html": ('.html', '.htm'),
# PHP files are rendered the usual way (i.e. with the full templates).
# The resulting files have .php extensions, making it possible to run
# them without reconfiguring your server to recognize them.
"php": ('.php',),
# Pandoc detects the input from the source filename
# but is disabled by default as it would conflict
# with many of the others.
# "pandoc": ('.rst', '.md', '.txt'),
}
# Create by default posts in one file format?
# Set to False for two-file posts, with separate metadata.
# ONE_FILE_POSTS = True
# If this is set to True, the DEFAULT_LANG version will be displayed for
# untranslated posts.
# If this is set to False, then posts that are not translated to a language
# LANG will not be visible at all in the pages in that language.
# Formerly known as HIDE_UNTRANSLATED_POSTS (inverse)
# SHOW_UNTRANSLATED_POSTS = True
# Nikola supports logo display. If you have one, you can put the URL here.
# Final output is <img src="LOGO_URL" id="logo" alt="BLOG_TITLE">.
# The URL may be relative to the site root.
# LOGO_URL = ''
# If you want to hide the title of your website (for example, if your logo
# already contains the text), set this to False.
# SHOW_BLOG_TITLE = True
# Writes tag cloud data in form of tag_cloud_data.json.
# Warning: this option will change its default value to False in v8!
WRITE_TAG_CLOUD = True
# Generate pages for each section. The site must have at least two sections
# for this option to take effect. It wouldn't build for just one section.
POSTS_SECTIONS = True
# Setting this to False generates a list page instead of an index. Indexes
# are the default and will apply GENERATE_ATOM if set.
# POSTS_SECTIONS_ARE_INDEXES = True
# Each post and section page will have an associated color that can be used
# to style them with a recognizable color detail across your site. A color
# is assigned to each section based on shifting the hue of your THEME_COLOR
# at least 7.5 % while leaving the lightness and saturation untouched in the
# HUSL colorspace. You can overwrite colors by assigning them colors in HEX.
# POSTS_SECTION_COLORS = {
# DEFAULT_LANG: {
# 'posts': '#49b11bf',
# 'reviews': '#ffe200',
# },
# }
# Associate a description with a section. For use in meta description on
# section index pages or elsewhere in themes.
# POSTS_SECTION_DESCRIPTIONS = {
# DEFAULT_LANG: {
# 'how-to': 'Learn how-to things properly with these amazing tutorials.',
# },
# }
# Sections are determined by their output directory as set in POSTS by default,
# but can alternatively be determined from file metadata instead.
# POSTS_SECTION_FROM_META = False
# Names are determined from the output directory name automatically or the
# metadata label. Unless overwritten below, names will use title cased and
# hyphens replaced by spaces.
# POSTS_SECTION_NAME = {
# DEFAULT_LANG: {
# 'posts': 'Blog Posts',
# 'uncategorized': 'Odds and Ends',
# },
# }
# Titles for per-section index pages. Can be either one string where "{name}"
# is substituted or the POSTS_SECTION_NAME, or a dict of sections. Note
# that the INDEX_PAGES option is also applied to section page titles.
# POSTS_SECTION_TITLE = {
# DEFAULT_LANG: {
# 'how-to': 'How-to and Tutorials',
# },
# }
# Paths for different autogenerated bits. These are combined with the
# translation paths.
# Final locations are:
# output / TRANSLATION[lang] / TAG_PATH / index.html (list of tags)
# output / TRANSLATION[lang] / TAG_PATH / tag.html (list of posts for a tag)
# output / TRANSLATION[lang] / TAG_PATH / tag.xml (RSS feed for a tag)
# (translatable)
# TAG_PATH = "categories"
# See TAG_PATH's "list of tags" for the default setting value. Can be overwritten
# here any path relative to the output directory.
# (translatable)
# TAGS_INDEX_PATH = "tags.html"
# If TAG_PAGES_ARE_INDEXES is set to True, each tag's page will contain
# the posts themselves. If set to False, it will be just a list of links.
# TAG_PAGES_ARE_INDEXES = False
# Set descriptions for tag pages to make them more interesting. The
# default is no description. The value is used in the meta description
# and displayed underneath the tag list or index page’s title.
# TAG_PAGES_DESCRIPTIONS = {
# DEFAULT_LANG: {
# "blogging": "Meta-blog posts about blogging about blogging.",
# "open source": "My contributions to my many, varied, ever-changing, and eternal libre software projects."
# },
# }
# Set special titles for tag pages. The default is "Posts about TAG".
# TAG_PAGES_TITLES = {
# DEFAULT_LANG: {
# "blogging": "Meta-posts about blogging",
# "open source": "Posts about open source software"
# },
# }
# If you do not want to display a tag publicly, you can mark it as hidden.
# The tag will not be displayed on the tag list page, the tag cloud and posts.
# Tag pages will still be generated.
HIDDEN_TAGS = ['mathjax']
# Only include tags on the tag list/overview page if there are at least
# TAGLIST_MINIMUM_POSTS number of posts or more with every tag. Every tag
# page is still generated, linked from posts, and included in the sitemap.
# However, more obscure tags can be hidden from the tag index page.
# TAGLIST_MINIMUM_POSTS = 1
# Final locations are:
# output / TRANSLATION[lang] / CATEGORY_PATH / index.html (list of categories)
# output / TRANSLATION[lang] / CATEGORY_PATH / CATEGORY_PREFIX category.html (list of posts for a category)
# output / TRANSLATION[lang] / CATEGORY_PATH / CATEGORY_PREFIX category.xml (RSS feed for a category)
# (translatable)
# CATEGORY_PATH = "categories"
# CATEGORY_PREFIX = "cat_"
# If CATEGORY_ALLOW_HIERARCHIES is set to True, categories can be organized in
# hierarchies. For a post, the whole path in the hierarchy must be specified,
# using a forward slash ('/') to separate paths. Use a backslash ('\') to escape
# a forward slash or a backslash (i.e. '\//\\' is a path specifying the
# subcategory called '\' of the top-level category called '/').
CATEGORY_ALLOW_HIERARCHIES = False
# If CATEGORY_OUTPUT_FLAT_HIERARCHY is set to True, the output written to output
# contains only the name of the leaf category and not the whole path.
CATEGORY_OUTPUT_FLAT_HIERARCHY = False
# If CATEGORY_PAGES_ARE_INDEXES is set to True, each category's page will contain
# the posts themselves. If set to False, it will be just a list of links.
# CATEGORY_PAGES_ARE_INDEXES = False
# Set descriptions for category pages to make them more interesting. The
# default is no description. The value is used in the meta description
# and displayed underneath the category list or index page’s title.
# CATEGORY_PAGES_DESCRIPTIONS = {
# DEFAULT_LANG: {
# "blogging": "Meta-blog posts about blogging about blogging.",
# "open source": "My contributions to my many, varied, ever-changing, and eternal libre software projects."
# },
# }
# Set special titles for category pages. The default is "Posts about CATEGORY".
# CATEGORY_PAGES_TITLES = {
# DEFAULT_LANG: {
# "blogging": "Meta-posts about blogging",
# "open source": "Posts about open source software"
# },
# }
# If you do not want to display a category publicly, you can mark it as hidden.
# The category will not be displayed on the category list page.
# Category pages will still be generated.
HIDDEN_CATEGORIES = []
# If ENABLE_AUTHOR_PAGES is set to True and there is more than one
# author, author pages are generated.
# ENABLE_AUTHOR_PAGES = True
# Final locations are:
# output / TRANSLATION[lang] / AUTHOR_PATH / index.html (list of tags)
# output / TRANSLATION[lang] / AUTHOR_PATH / author.html (list of posts for a tag)
# output / TRANSLATION[lang] / AUTHOR_PATH / author.xml (RSS feed for a tag)
# AUTHOR_PATH = "authors"
# If AUTHOR_PAGES_ARE_INDEXES is set to True, each author's page will contain
# the posts themselves. If set to False, it will be just a list of links.
# AUTHOR_PAGES_ARE_INDEXES = False
# Set descriptions for author pages to make them more interesting. The
# default is no description. The value is used in the meta description
# and displayed underneath the author list or index page’s title.
# AUTHOR_PAGES_DESCRIPTIONS = {
# DEFAULT_LANG: {
# "Juanjo Conti": "Python coder and writer.",
# "Roberto Alsina": "Nikola father."
# },
# }
# If you do not want to display an author publicly, you can mark it as hidden.
# The author will not be displayed on the author list page and posts.
# Tag pages will still be generated.
HIDDEN_AUTHORS = ['Guest']
# Final location for the main blog page and sibling paginated pages is
# output / TRANSLATION[lang] / INDEX_PATH / index-*.html
# INDEX_PATH = ""
# Create per-month archives instead of per-year
# CREATE_MONTHLY_ARCHIVE = False
# Create one large archive instead of per-year
# CREATE_SINGLE_ARCHIVE = False
# Create year, month, and day archives each with a (long) list of posts
# (overrides both CREATE_MONTHLY_ARCHIVE and CREATE_SINGLE_ARCHIVE)
# CREATE_FULL_ARCHIVES = False
# If monthly archives or full archives are created, adds also one archive per day
# CREATE_DAILY_ARCHIVE = False
# Final locations for the archives are:
# output / TRANSLATION[lang] / ARCHIVE_PATH / ARCHIVE_FILENAME
# output / TRANSLATION[lang] / ARCHIVE_PATH / YEAR / index.html
# output / TRANSLATION[lang] / ARCHIVE_PATH / YEAR / MONTH / index.html
# output / TRANSLATION[lang] / ARCHIVE_PATH / YEAR / MONTH / DAY / index.html
# ARCHIVE_PATH = ""
# ARCHIVE_FILENAME = "archive.html"
# If ARCHIVES_ARE_INDEXES is set to True, each archive page which contains a list
# of posts will contain the posts themselves. If set to False, it will be just a
# list of links.
# ARCHIVES_ARE_INDEXES = False
# URLs to other posts/pages can take 3 forms:
# rel_path: a relative URL to the current page/post (default)
# full_path: a URL with the full path from the root
# absolute: a complete URL (that includes the SITE_URL)
# URL_TYPE = 'rel_path'
# If USE_BASE_TAG is True, then all HTML files will include
# something like <base href=http://foo.var.com/baz/bat> to help
# the browser resolve relative links.
# In some rare cases, this will be a problem, and you can
# disable it by setting USE_BASE_TAG to False.
# USE_BASE_TAG = True
# Final location for the blog main RSS feed is:
# output / TRANSLATION[lang] / RSS_PATH / rss.xml
# RSS_PATH = ""
# Slug the Tag URL. Easier for users to type, special characters are
# often removed or replaced as well.
# SLUG_TAG_PATH = True
# Slug the Author URL. Easier for users to type, special characters are
# often removed or replaced as well.
# SLUG_AUTHOR_PATH = True
# A list of redirection tuples, [("foo/from.html", "/bar/to.html")].
#
# A HTML file will be created in output/foo/from.html that redirects
# to the "/bar/to.html" URL. notice that the "from" side MUST be a
# relative URL.
#
# If you don't need any of these, just set to []
REDIRECTIONS = []
# Presets of commands to execute to deploy. Can be anything, for
# example, you may use rsync:
# "rsync -rav --delete output/ joe@my.site:/srv/www/site"
# And then do a backup, or run `nikola ping` from the `ping`
# plugin (`nikola plugin -i ping`). Or run `nikola check -l`.
# You may also want to use github_deploy (see below).
# You can define multiple presets and specify them as arguments
# to `nikola deploy`. If no arguments are specified, a preset
# named `default` will be executed. You can use as many presets
# in a `nikola deploy` command as you like.
# DEPLOY_COMMANDS = {
# 'default': [
# "rsync -rav --delete output/ joe@my.site:/srv/www/site",
# ]
# }
# For user.github.io OR organization.github.io pages, the DEPLOY branch
# MUST be 'master', and 'gh-pages' for other repositories.
GITHUB_SOURCE_BRANCH = 'src'
GITHUB_DEPLOY_BRANCH = 'master'
# The name of the remote where you wish to push to, using github_deploy.
GITHUB_REMOTE_NAME = 'origin'
# Where the output site should be located
# If you don't use an absolute path, it will be considered as relative
# to the location of conf.py
# OUTPUT_FOLDER = 'output'
# where the "cache" of partial generated content should be located
# default: 'cache'
# CACHE_FOLDER = 'cache'
# Filters to apply to the output.
# A directory where the keys are either: a file extensions, or
# a tuple of file extensions.
#
# And the value is a list of commands to be applied in order.
#
# Each command must be either:
#
# A string containing a '%s' which will
# be replaced with a filename. The command *must* produce output
# in place.
#
# Or:
#
# A python callable, which will be called with the filename as
# argument.
#
# By default, only .php files uses filters to inject PHP into
# Nikola’s templates. All other filters must be enabled through FILTERS.
#
# Many filters are shipped with Nikola. A list is available in the manual:
# <https://getnikola.com/handbook.html#post-processing-filters>
#
# from nikola import filters
# FILTERS = {
# ".html": [filters.typogrify],
# ".js": [filters.closure_compiler],
# ".jpg": ["jpegoptim --strip-all -m75 -v %s"],
# }
# Expert setting! Create a gzipped copy of each generated file. Cheap server-
# side optimization for very high traffic sites or low memory servers.
# GZIP_FILES = False
# File extensions that will be compressed
# GZIP_EXTENSIONS = ('.txt', '.htm', '.html', '.css', '.js', '.json', '.atom', '.xml')
# Use an external gzip command? None means no.
# Example: GZIP_COMMAND = "pigz -k {filename}"
# GZIP_COMMAND = None
# Make sure the server does not return a "Accept-Ranges: bytes" header for
# files compressed by this option! OR make sure that a ranged request does not
# return partial content of another representation for these resources. Do not
# use this feature if you do not understand what this means.
# Compiler to process LESS files.
# LESS_COMPILER = 'lessc'
# A list of options to pass to the LESS compiler.
# Final command is: LESS_COMPILER LESS_OPTIONS file.less
# LESS_OPTIONS = []
# Compiler to process Sass files.
# SASS_COMPILER = 'sass'
# A list of options to pass to the Sass compiler.
# Final command is: SASS_COMPILER SASS_OPTIONS file.s(a|c)ss
# SASS_OPTIONS = []
# #############################################################################
# Image Gallery Options
# #############################################################################
# One or more folders containing galleries. The format is a dictionary of
# {"source": "relative_destination"}, where galleries are looked for in
# "source/" and the results will be located in
# "OUTPUT_PATH/relative_destination/gallery_name"
# Default is:
# GALLERY_FOLDERS = {"galleries": "galleries"}
# More gallery options:
# THUMBNAIL_SIZE = 180
# MAX_IMAGE_SIZE = 1280
# USE_FILENAME_AS_TITLE = True
# EXTRA_IMAGE_EXTENSIONS = []
#
# If set to False, it will sort by filename instead. Defaults to True
# GALLERY_SORT_BY_DATE = True
#
# Folders containing images to be used in normal posts or pages. Images will be
# scaled down according to IMAGE_THUMBNAIL_SIZE and MAX_IMAGE_SIZE options, but
# will have to be referenced manually to be visible on the site
# (the thumbnail has ``.thumbnail`` added before the file extension).
# The format is a dictionary of {source: relative destination}.
IMAGE_FOLDERS = {'images': 'images'}
# IMAGE_THUMBNAIL_SIZE = 400
# #############################################################################
# HTML fragments and diverse things that are used by the templates
# #############################################################################
# Data about post-per-page indexes.
# INDEXES_PAGES defaults to ' old posts, page %d' or ' page %d' (translated),
# depending on the value of INDEXES_PAGES_MAIN.
#
# (translatable) If the following is empty, defaults to BLOG_TITLE:
# INDEXES_TITLE = ""
#
# (translatable) If the following is empty, defaults to ' [old posts,] page %d' (see above):
# INDEXES_PAGES = ""
#
# If the following is True, INDEXES_PAGES is also displayed on the main (the
# newest) index page (index.html):
# INDEXES_PAGES_MAIN = False
#
# If the following is True, index-1.html has the oldest posts, index-2.html the
# second-oldest posts, etc., and index.html has the newest posts. This ensures
# that all posts on index-x.html will forever stay on that page, now matter how
# many new posts are added.
# If False, index-1.html has the second-newest posts, index-2.html the third-newest,
# and index-n.html the oldest posts. When this is active, old posts can be moved
# to other index pages when new posts are added.
# INDEXES_STATIC = True
#
# (translatable) If PRETTY_URLS is set to True, this setting will be used to create
# prettier URLs for index pages, such as page/2/index.html instead of index-2.html.
# Valid values for this settings are:
# * False,
# * a list or tuple, specifying the path to be generated,
# * a dictionary mapping languages to lists or tuples.
# Every list or tuple must consist of strings which are used to combine the path;
# for example:
# ['page', '{number}', '{index_file}']
# The replacements
# {number} --> (logical) page number;
# {old_number} --> the page number inserted into index-n.html before (zero for
# the main page);
# {index_file} --> value of option INDEX_FILE
# are made.
# Note that in case INDEXES_PAGES_MAIN is set to True, a redirection will be created
# for the full URL with the page number of the main page to the normal (shorter) main
# page URL.
# INDEXES_PRETTY_PAGE_URL = False
# Color scheme to be used for code blocks. If your theme provides
# "assets/css/code.css" this is ignored.
# Can be any of:
# algol
# algol_nu
# arduino
# autumn
# borland
# bw
# colorful
# default
# emacs
# friendly
# fruity
# igor
# lovelace
# manni
# monokai
# murphy
# native
# paraiso_dark
# paraiso_light
# pastie
# perldoc
# rrt
# tango
# trac
# vim
# vs
# xcode
# This list MAY be incomplete since pygments adds styles every now and then.
# CODE_COLOR_SCHEME = 'default'
# If you use 'site-reveal' theme you can select several subthemes
# THEME_REVEAL_CONFIG_SUBTHEME = 'sky'
# You can also use: beige/serif/simple/night/default
# Again, if you use 'site-reveal' theme you can select several transitions
# between the slides
# THEME_REVEAL_CONFIG_TRANSITION = 'cube'
# You can also use: page/concave/linear/none/default
# FAVICONS contains (name, file, size) tuples.
# Used to create favicon link like this:
# <link rel="name" href="file" sizes="size"/>
# FAVICONS = (
# ("icon", "/favicon.ico", "16x16"),
# ("icon", "/icon_128x128.png", "128x128"),
# )
# Show teasers (instead of full posts) in indexes? Defaults to False.
# INDEX_TEASERS = False
# HTML fragments with the Read more... links.
# The following tags exist and are replaced for you:
# {link} A link to the full post page.
# {read_more} The string “Read more” in the current language.
# {reading_time} An estimate of how long it will take to read the post.
# {remaining_reading_time} An estimate of how long it will take to read the post, sans the teaser.
# {min_remaining_read} The string “{remaining_reading_time} min remaining to read” in the current language.
# {paragraph_count} The amount of paragraphs in the post.
# {remaining_paragraph_count} The amount of paragraphs in the post, sans the teaser.
# {{ A literal { (U+007B LEFT CURLY BRACKET)
# }} A literal } (U+007D RIGHT CURLY BRACKET)
# 'Read more...' for the index page, if INDEX_TEASERS is True (translatable)
INDEX_READ_MORE_LINK = '<p class="more"><a href="{link}">{read_more}…</a></p>'
# 'Read more...' for the feeds, if FEED_TEASERS is True (translatable)
FEED_READ_MORE_LINK = '<p><a href="{link}">{read_more}…</a> ({min_remaining_read})</p>'
# Append a URL query to the FEED_READ_MORE_LINK in Atom and RSS feeds. Advanced
# option used for traffic source tracking.
# Minimum example for use with Piwik: "pk_campaign=feed"
# The following tags exist and are replaced for you:
# {feedRelUri} A relative link to the feed.
# {feedFormat} The name of the syndication format.
# Example using replacement for use with Google Analytics:
# "utm_source={feedRelUri}&utm_medium=nikola_feed&utm_campaign={feedFormat}_feed"
FEED_LINKS_APPEND_QUERY = False
# A HTML fragment describing the license, for the sidebar.
# (translatable)
LICENSE = ""
# I recommend using the Creative Commons' wizard:
# https://creativecommons.org/choose/
LICENSE = """
<a rel="license" href="https://creativecommons.org/licenses/by-nc-sa/4.0/">
<img alt="Creative Commons License BY-NC-SA"
style="border-width:0; margin-bottom:12px;"
src="https://i.creativecommons.org/l/by-nc-sa/4.0/88x31.png"></a>
<br />This work is licensed under a <a rel="license" href="http://creativecommons.org/licenses/by-nc/4.0/">Creative Commons Attribution-NonCommercial 4.0 International License</a>.
"""
# A small copyright notice for the page footer (in HTML).
# (translatable)
CONTENT_FOOTER = 'Contents © {date} <a href="mailto:{email}">{author}</a> | <a href="{author_github}">{author}</a> - Powered by <a href="https://getnikola.com" rel="nofollow">Nikola</a> {license}'
# Things that will be passed to CONTENT_FOOTER.format(). This is done
# for translatability, as dicts are not formattable. Nikola will
# intelligently format the setting properly.
# The setting takes a dict. The keys are languages. The values are
# tuples of tuples of positional arguments and dicts of keyword arguments
# to format(). For example, {'en': (('Hello'), {'target': 'World'})}
# results in CONTENT_FOOTER['en'].format('Hello', target='World').
# WARNING: If you do not use multiple languages with CONTENT_FOOTER, this
# still needs to be a dict of this format. (it can be empty if you
# do not need formatting)
# (translatable)
CONTENT_FOOTER_FORMATS = {
DEFAULT_LANG: (
(),
{
"email": BLOG_EMAIL,
"author_github": AUTHOR_GITHUB,
"author": BLOG_AUTHOR,
"date": time.gmtime().tm_year,
"license": LICENSE
}
)
}
# To use comments, you can choose between different third party comment
# systems. The following comment systems are supported by Nikola:
# disqus, facebook, googleplus, intensedebate, isso, livefyre, muut
# You can leave this option blank to disable comments.
COMMENT_SYSTEM = "disqus"
# And you also need to add your COMMENT_SYSTEM_ID which
# depends on what comment system you use. The default is
# "nikolademo" which is a test account for Disqus. More information
# is in the manual.
COMMENT_SYSTEM_ID = "v2h"
# Enable annotations using annotateit.org?
# If set to False, you can still enable them for individual posts and pages
# setting the "annotations" metadata.
# If set to True, you can disable them for individual posts and pages using
# the "noannotations" metadata.
# ANNOTATIONS = False
# Create index.html for page (story) folders?
# WARNING: if a page would conflict with the index file (usually
# caused by setting slug to `index`), the STORY_INDEX
# will not be generated for that directory.
# STORY_INDEX = False
# Enable comments on story pages?
# COMMENTS_IN_STORIES = False
# Enable comments on picture gallery pages?
# COMMENTS_IN_GALLERIES = False
# What file should be used for directory indexes?
# Defaults to index.html
# Common other alternatives: default.html for IIS, index.php
# INDEX_FILE = "index.html"
# If a link ends in /index.html, drop the index.html part.
# http://mysite/foo/bar/index.html => http://mysite/foo/bar/
# (Uses the INDEX_FILE setting, so if that is, say, default.html,
# it will instead /foo/default.html => /foo)
# (Note: This was briefly STRIP_INDEX_HTML in v 5.4.3 and 5.4.4)
STRIP_INDEXES = True
# Should the sitemap list directories which only include other directories
# and no files.
# Default to True
# If this is False
# e.g. /2012 includes only /01, /02, /03, /04, ...: don't add it to the sitemap
# if /2012 includes any files (including index.html)... add it to the sitemap
# SITEMAP_INCLUDE_FILELESS_DIRS = True
# List of files relative to the server root (!) that will be asked to be excluded
# from indexing and other robotic spidering. * is supported. Will only be effective
# if SITE_URL points to server root. The list is used to exclude resources from
# /robots.txt and /sitemap.xml, and to inform search engines about /sitemapindex.xml.
# ROBOTS_EXCLUSIONS = ["/archive.html", "/category/*.html"]
# Instead of putting files in <slug>.html, put them in <slug>/index.html.
# No web server configuration is required. Also enables STRIP_INDEXES.
# This can be disabled on a per-page/post basis by adding
# .. pretty_url: False
# to the metadata.
PRETTY_URLS = True
# If True, publish future dated posts right away instead of scheduling them.
# Defaults to False.
# FUTURE_IS_NOW = False
# If True, future dated posts are allowed in deployed output
# Only the individual posts are published/deployed; not in indexes/sitemap
# Generally, you want FUTURE_IS_NOW and DEPLOY_FUTURE to be the same value.
# DEPLOY_FUTURE = False
# If False, draft posts will not be deployed
# DEPLOY_DRAFTS = True
# Allows scheduling of posts using the rule specified here (new_post -s)
# Specify an iCal Recurrence Rule: http://www.kanzaki.com/docs/ical/rrule.html
# SCHEDULE_RULE = ''
# If True, use the scheduling rule to all posts by default
# SCHEDULE_ALL = False
# Do you want a add a Mathjax config file?
# MATHJAX_CONFIG = ""
# If you are using the compile-ipynb plugin, just add this one:
# MATHJAX_CONFIG = """
# <script type="text/x-mathjax-config">
# MathJax.Hub.Config({
# tex2jax: {
# inlineMath: [ ['$','$'], ["\\\(","\\\)"] ],
# displayMath: [ ['$$','$$'], ["\\\[","\\\]"] ],
# processEscapes: true
# },
# displayAlign: 'left', // Change this to 'center' to center equations.
# "HTML-CSS": {
# styles: {'.MathJax_Display': {"margin": 0}}
# }
# });
# </script>
# """
# Want to use KaTeX instead of MathJax? While KaTeX is less featureful,
# it's faster and the output looks better.
# If you set USE_KATEX to True, you also need to add an extra CSS file
# like this:
# EXTRA_HEAD_DATA = """<link rel="stylesheet" href="//cdnjs.cloudflare.com/ajax/libs/KaTeX/0.3.0/katex.min.css">"""
EXTRA_HEAD_DATA = """
<link rel="stylesheet" type="text/css" href="/assets/css/tipuesearch.css">
<link rel="stylesheet" type="text/css" href="/assets/css/chinese.css">
"""
# USE_KATEX = False
# Do you want to customize the nbconversion of your IPython notebook?
# IPYNB_CONFIG = {}
# With the following example configuration you can use a custom jinja template
# called `toggle.tpl` which has to be located in your site/blog main folder:
# IPYNB_CONFIG = {'Exporter':{'template_file': 'toggle'}}
# What Markdown extensions to enable?
# You will also get gist, nikola and podcast because those are
# done in the code, hope you don't mind ;-)
# Note: most Nikola-specific extensions are done via the Nikola plugin system,
# with the MarkdownExtension class and should not be added here.
# The default is ['fenced_code', 'codehilite']
MARKDOWN_EXTENSIONS = ['fenced_code', 'codehilite', 'extra']
# Extra options to pass to the pandoc comand.
# by default, it's empty, is a list of strings, for example
# ['-F', 'pandoc-citeproc', '--bibliography=/Users/foo/references.bib']
# PANDOC_OPTIONS = []
# Social buttons. This is sample code for AddThis (which was the default for a
# long time). Insert anything you want here, or even make it empty (which is
# the default right now)
# (translatable)
SOCIAL_BUTTONS_CODE = """
<script>window._bd_share_config={"common":{"bdSnsKey":{},"bdText":"","bdMini":"2","bdMiniList":["weixin","tsina","sqq","qzone","douban","tieba","hx","youdao","fbook","twi","linkedin","mail","copy","print"],"bdPic":"","bdStyle":"0","bdSize":"16"},"slide":{"type":"slide","bdImg":"0","bdPos":"right","bdTop":"100"}};with(document)0[(getElementsByTagName('head')[0]||body).appendChild(createElement('script')).src='http://bdimg.share.baidu.com/static/api/js/share.js?v=89860593.js?cdnversion='+~(-new Date()/36e5)];</script>
"""
#SOCIAL_BUTTONS_CODE = """
#<!-- Social buttons -->
#<div id="addthisbox" class="addthis_toolbox addthis_peekaboo_style addthis_default_style addthis_label_style addthis_32x32_style">
#<a class="addthis_button_more">Share</a>
#<ul><li><a class="addthis_button_facebook"></a>
#<li><a class="addthis_button_google_plusone_share"></a>
#<li><a class="addthis_button_linkedin"></a>
#<li><a class="addthis_button_twitter"></a>
#</ul>
#</div>
#<script src="//s7.addthis.com/js/300/addthis_widget.js#pubid=ra-4f7088a56bb93798"></script>
#<!-- End of social buttons -->
#"""
# Show link to source for the posts?
# Formerly known as HIDE_SOURCELINK (inverse)
# SHOW_SOURCELINK = True
# Copy the source files for your pages?
# Setting it to False implies SHOW_SOURCELINK = False
# COPY_SOURCES = True
# Modify the number of Post per Index Page
# Defaults to 10
# INDEX_DISPLAY_POST_COUNT = 10
# By default, Nikola generates RSS files for the website and for tags, and
# links to it. Set this to False to disable everything RSS-related.
# GENERATE_RSS = True
# By default, Nikola does not generates Atom files for indexes and links to
# them. Generate Atom for tags by setting TAG_PAGES_ARE_INDEXES to True.
# Atom feeds are built based on INDEX_DISPLAY_POST_COUNT and not FEED_LENGTH
# Switch between plain-text summaries and full HTML content using the
# FEED_TEASER option. FEED_LINKS_APPEND_QUERY is also respected. Atom feeds
# are generated even for old indexes and have pagination link relations
# between each other. Old Atom feeds with no changes are marked as archived.
# GENERATE_ATOM = False
# Only inlclude teasers in Atom and RSS feeds. Disabling include the full
# content. Defaults to True.
# FEED_TEASERS = True
# Strip HTML from Atom annd RSS feed summaries and content. Defaults to False.
# FEED_PLAIN = False
# Number of posts in Atom and RSS feeds.
# FEED_LENGTH = 10
# Include preview image as a <figure><img></figure> at the top of the entry.
# Requires FEED_PLAIN = False. If the preview image is found in the content,
# it will not be included again. Image will be included as-is, aim to optmize
# the image source for Feedly, Apple News, Flipboard, and other popular clients.
# FEED_PREVIEWIMAGE = True
# RSS_LINK is a HTML fragment to link the RSS or Atom feeds. If set to None,
# the base.tmpl will use the feed Nikola generates. However, you may want to
# change it for a FeedBurner feed or something else.
# RSS_LINK = None
# A search form to search this site, for the sidebar. You can use a Google
# custom search (https://www.google.com/cse/)
# Or a DuckDuckGo search: https://duckduckgo.com/search_box.html
# Default is no search form.
# (translatable)
# SEARCH_FORM = ""
#
# This search form works for any site and looks good in the "site" theme where
# it appears on the navigation bar:
#
# SEARCH_FORM = """
# <!-- DuckDuckGo custom search -->
# <form method="get" id="search" action="//duckduckgo.com/"
# class="navbar-form pull-left">
# <input type="hidden" name="sites" value="%s">
# <input type="hidden" name="k8" value="#444444">
# <input type="hidden" name="k9" value="#D51920">
# <input type="hidden" name="kt" value="h">
# <input type="text" name="q" maxlength="255"
# placeholder="Search…" class="span2" style="margin-top: 4px;">
# <input type="submit" value="DuckDuckGo Search" style="visibility: hidden;">
# </form>
# <!-- End of custom search -->
# """ % SITE_URL
#
# If you prefer a Google search form, here's an example that should just work:
# SEARCH_FORM = """
# <!-- Google custom search -->
# <form method="get" action="https://www.google.com/search" class="navbar-form navbar-right" role="search">
# <div class="form-group">
# <input type="text" name="q" class="form-control" placeholder="Search">
# </div>
# <button type="submit" class="btn btn-primary">
# <span class="glyphicon glyphicon-search"></span>
# </button>
# <input type="hidden" name="sitesearch" value="%s">
# </form>
# <!-- End of custom search -->
# """ % SITE_URL
SEARCH_FORM = """
<span class="navbar-form pull-left">
<input type="text" class="form-control" placeholder="Search" id="tipue_search_input">
</span>"""
# Use content distribution networks for jQuery, twitter-bootstrap css and js,
# and html5shiv (for older versions of Internet Explorer)
# If this is True, jQuery and html5shiv are served from the Google CDN and
# Bootstrap is served from BootstrapCDN (provided by MaxCDN)
# Set this to False if you want to host your site without requiring access to
# external resources.
# USE_CDN = False
# Check for USE_CDN compatibility.
# If you are using custom themes, have configured the CSS properly and are
# receiving warnings about incompatibility but believe they are incorrect, you
# can set this to False.
# USE_CDN_WARNING = True
# Extra things you want in the pages HEAD tag. This will be added right
# before </head>
# (translatable)
# EXTRA_HEAD_DATA = ""
# Google Analytics or whatever else you use. Added to the bottom of <body>
# in the default template (base.tmpl).
# (translatable)
# BODY_END = ""
#BODY_END = """
#<script src="/assets/js/tipuesearch_set.js"></script>
#<script src="/assets/js/tipuesearch.js"></script>
#<script>
#$(document).ready(function() {
# $('#tipue_search_input').tipuesearch({
# 'mode': 'json',
# 'contentLocation': '/assets/js/tipuesearch_content.json',
# 'showUrl': false
# });
#});
#</script>
#"""
BODY_END = """
<!-- Modal -->
<div id="search-results" class="modal fade" role="dialog" style="height: 80%;">
<div class="modal-dialog">
<!-- Modal content-->
<div class="modal-content">
<div class="modal-header">
<button type="button" class="close" data-dismiss="modal">×</button>
<h4 class="modal-title">Search Results:</h4>
</div>
<div class="modal-body" id="tipue_search_content" style="max-height: 600px; overflow-y: auto;">
</div>
<div class="modal-footer">
<button type="button" class="btn btn-default" data-dismiss="modal">Close</button>
</div>
</div>
</div>
</div>
<script>
$(document).ready(function() {
$.when(
$.getScript( "/assets/js/tipuesearch_set.js" ),
$.getScript( "/assets/js/tipuesearch.js" ),
$.Deferred(function( deferred ){
$( deferred.resolve );
})
).done(function() {
$('#tipue_search_input').tipuesearch({
'mode': 'json',
'contentLocation': '/assets/js/tipuesearch_content.json'
});
$('#tipue_search_input').keyup(function (e) {
if (e.keyCode == 13) {
$('#search-results').modal()
}
});
});
});
</script>
"""
# The possibility to extract metadata from the filename by using a
# regular expression.
# To make it work you need to name parts of your regular expression.
# The following names will be used to extract metadata:
# - title
# - slug
# - date
# - tags
# - link
# - description
#
# An example re is the following:
# '(?P<date>\d{4}-\d{2}-\d{2})-(?P<slug>.*)-(?P<title>.*)\.md'
# FILE_METADATA_REGEXP = None
# If you hate "Filenames with Capital Letters and Spaces.md", you should
# set this to true.
UNSLUGIFY_TITLES = True
# Additional metadata that is added to a post when creating a new_post
ADDITIONAL_METADATA = {
'author': BLOG_AUTHOR
}
# Nikola supports Open Graph Protocol data for enhancing link sharing and
# discoverability of your site on Facebook, Google+, and other services.
# Open Graph is enabled by default.
# USE_OPEN_GRAPH = True
# Nikola supports Twitter Card summaries, but they are disabled by default.
# They make it possible for you to attach media to Tweets that link
# to your content.
#
# IMPORTANT:
# Please note, that you need to opt-in for using Twitter Cards!
# To do this please visit https://cards-dev.twitter.com/validator
#
# Uncomment and modify to following lines to match your accounts.
# Images displayed come from the `previewimage` meta tag.
# You can specify the card type by using the `card` parameter in TWITTER_CARD.
# TWITTER_CARD = {
# # 'use_twitter_cards': True, # enable Twitter Cards
# # 'card': 'summary', # Card type, you can also use 'summary_large_image',
# # see https://dev.twitter.com/cards/types
# # 'site': '@website', # twitter nick for the website
# # 'creator': '@username', # Username for the content creator / author.
# }
# If webassets is installed, bundle JS and CSS into single files to make
# site loading faster in a HTTP/1.1 environment but is not recommended for
# HTTP/2.0 when caching is used. Defaults to True.
# USE_BUNDLES = True
# Plugins you don't want to use. Be careful :-)
# DISABLED_PLUGINS = ["render_galleries"]
# Add the absolute paths to directories containing plugins to use them.
# For example, the `plugins` directory of your clone of the Nikola plugins
# repository.
# EXTRA_PLUGINS_DIRS = []
# List of regular expressions, links matching them will always be considered
# valid by "nikola check -l"
# LINK_CHECK_WHITELIST = []
# If set to True, enable optional hyphenation in your posts (requires pyphen)
# Enabling hyphenation has been shown to break math support in some cases,
# use with caution.
# HYPHENATE = False
# The <hN> tags in HTML generated by certain compilers (reST/Markdown)
# will be demoted by that much (1 → h1 will become h2 and so on)
# This was a hidden feature of the Markdown and reST compilers in the
# past. Useful especially if your post titles are in <h1> tags too, for
# example.
# (defaults to 1.)
# DEMOTE_HEADERS = 1
# If you don’t like slugified file names ([a-z0-9] and a literal dash),
# and would prefer to use all the characters your file system allows.
# USE WITH CARE! This is also not guaranteed to be perfect, and may
# sometimes crash Nikola, your web server, or eat your cat.
# USE_SLUGIFY = True
# Templates will use those filters, along with the defaults.
# Consult your engine's documentation on filters if you need help defining
# those.
# TEMPLATE_FILTERS = {}
# Put in global_context things you want available on all your templates.
# It can be anything, data, functions, modules, etc.
GLOBAL_CONTEXT = {}
# Add functions here and they will be called with template
# GLOBAL_CONTEXT as parameter when the template is about to be
# rendered
GLOBAL_CONTEXT_FILLER = []
|
qytz/qytz.github.io
|
conf.py
|
Python
|
cc0-1.0
| 47,282
|
[
"VisIt"
] |
b8f65208e95b43f6f54e32ec0b13f5ef8e3ce1ca6693aa0659a2bd36e60d623f
|
# Copyright (C) 2010-2018 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import unittest as ut
import numpy as np
import espressomd.lb
import espressomd.lbboundaries
import espressomd.shapes
"""
Check the Lattice Boltzmann 'pressure' driven flow in a slab system
by comparing to the analytical solution.
"""
AGRID = .25
EXT_FORCE = .1
VISC = .7
DENS = 1.7
TIME_STEP = 0.1
LB_PARAMS = {'agrid': AGRID,
'dens': DENS,
'visc': VISC,
'fric': 1.0,
'tau': TIME_STEP,
'ext_force_density': [0.0, 0.0, EXT_FORCE]}
def poiseuille_flow(z, H, ext_force_density, dyn_visc):
"""
Analytical solution for plane Poiseuille flow.
Parameters
----------
z : :obj:`float`
Distance to the mid plane of the channel.
H : :obj:`float`
Distance between the boundaries.
ext_force_density : :obj:`float`
Force density on the fluid normal to the boundaries.
dyn_visc : :obj:`float`
Dynamic viscosity of the LB fluid.
"""
return ext_force_density * 1. / (2 * dyn_visc) * (H**2.0 / 4.0 - z**2.0)
class LBPoiseuilleCommon(object):
"""Base class of the test that holds the test logic."""
lbf = None
system = espressomd.System(box_l=[12.0, 3.0, 3.0])
system.time_step = TIME_STEP
system.cell_system.skin = 0.4 * AGRID
def prepare(self):
"""
Integrate the LB fluid until steady state is reached within a certain
accuracy.
"""
self.system.actors.clear()
self.system.actors.add(self.lbf)
wall_shape1 = espressomd.shapes.Wall(normal=[1, 0, 0], dist=AGRID)
wall_shape2 = espressomd.shapes.Wall(
normal=[-1, 0, 0], dist=-(self.system.box_l[0] - AGRID))
wall1 = espressomd.lbboundaries.LBBoundary(shape=wall_shape1)
wall2 = espressomd.lbboundaries.LBBoundary(shape=wall_shape2)
self.system.lbboundaries.add(wall1)
self.system.lbboundaries.add(wall2)
mid_indices = [int((self.system.box_l[0] / AGRID) / 2),
int((self.system.box_l[1] / AGRID) / 2),
int((self.system.box_l[2] / AGRID) / 2)]
diff = float("inf")
old_val = self.lbf[mid_indices].velocity[2]
while diff > 0.01:
self.system.integrator.run(100)
new_val = self.lbf[mid_indices].velocity[2]
diff = abs(new_val - old_val)
old_val = new_val
def test_profile(self):
"""
Compare against analytical function by calculating the RMSD.
"""
self.prepare()
velocities = np.zeros((int(self.system.box_l[0] / AGRID), 2))
for x in range(velocities.shape[0]):
v_tmp = []
for y in range(int(self.system.box_l[1] + 1)):
for z in range(int(self.system.box_l[2] + 1)):
v_tmp.append(self.lbf[x, y, z].velocity[2])
velocities[x, 1] = np.mean(np.array(v_tmp))
velocities[x, 0] = (x + 0.5) * AGRID
v_measured = velocities[1:-1, 1]
v_expected = poiseuille_flow(velocities[1:-1,
0] - 0.5 * self.system.box_l[
0],
self.system.box_l[0] - 2.0 * AGRID,
EXT_FORCE,
VISC * DENS)
rmsd = np.sqrt(np.sum(np.square(v_expected - v_measured)))
self.assertLess(rmsd, 0.02 * AGRID / TIME_STEP)
@ut.skipIf(not espressomd.has_features(
['LB', 'LB_BOUNDARIES', 'EXTERNAL_FORCES']), "Skipping test due to missing features.")
class LBCPUPoiseuille(ut.TestCase, LBPoiseuilleCommon):
"""Test for the CPU implementation of the LB."""
def setUp(self):
self.lbf = espressomd.lb.LBFluid(**LB_PARAMS)
@ut.skipIf(not espressomd.has_features(
['LB_GPU', 'LB_BOUNDARIES_GPU', 'EXTERNAL_FORCES']), "Skipping test due to missing features.")
class LBGPUPoiseuille(ut.TestCase, LBPoiseuilleCommon):
"""Test for the GPU implementation of the LB."""
def setUp(self):
self.lbf = espressomd.lb.LBFluidGPU(**LB_PARAMS)
if __name__ == '__main__':
ut.main()
|
hmenke/espresso
|
testsuite/python/lb_poiseuille.py
|
Python
|
gpl-3.0
| 4,895
|
[
"ESPResSo"
] |
d6fba8b93ca732cebcf79b46c702d025a49c0fc3a8ed0b86346e8da9bb329c97
|
# -*- coding: utf-8 -*-
#
# mpitest_issue_578_sp.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
"""
This test is called from test_mpitests.py
"""
import nest
import sys
import traceback
HAVE_GSL = nest.ll_api.sli_func("statusdict/have_gsl ::")
class TestIssue578():
def test_targets(self):
nest.ResetKernel()
nest.set_verbosity('M_ALL')
# Testing with 2 MPI processes
nest.SetKernelStatus(
{
'resolution': 0.1,
'total_num_virtual_procs': 2
}
)
# Update the SP interval
nest.EnableStructuralPlasticity()
nest.SetKernelStatus({
'structural_plasticity_update_interval': 1000.,
})
growth_curve = {
'growth_curve': "gaussian",
'growth_rate': 0.0001, # Beta (elements/ms)
'continuous': False,
'eta': 0.1,
'eps': 0.7,
}
structural_p_elements_E = {
'Den_ex': growth_curve,
'Den_in': growth_curve,
'Axon_ex': growth_curve
}
neuronDict = {'V_m': -60.,
't_ref': 5.0, 'V_reset': -60.,
'V_th': -50., 'C_m': 200.,
'E_L': -60., 'g_L': 10.,
'E_ex': 0., 'E_in': -80.,
'tau_syn_ex': 5., 'tau_syn_in': 10.,
'I_e': 220.}
nest.SetDefaults("iaf_cond_exp", neuronDict)
neuronsE = nest.Create('iaf_cond_exp', 1, {
'synaptic_elements': structural_p_elements_E})
# synapses
synDictE = {'synapse_model': 'static_synapse',
'weight': 3.,
'pre_synaptic_element': 'Axon_ex',
'post_synaptic_element': 'Den_ex'}
nest.SetKernelStatus({
'structural_plasticity_synapses': {
'synapseEE': synDictE,
}
})
try:
nest.Simulate(200 * 1000)
except Exception:
print(sys.exc_info()[0])
self.fail("Exception during simulation")
# We can not define the regular suite() and runner() functions here, because
# it will not show up as failed in the testsuite if it fails. This is
# because the test is called from test_mpitests, and the unittest system in
# test_mpitests will only register the failing test if we call this test
# directly.
if HAVE_GSL:
mpitest = TestIssue578()
mpitest.test_targets()
else:
print("Skipping because GSL is not available")
|
SepehrMN/nest-simulator
|
pynest/nest/tests/test_sp/mpitest_issue_578_sp.py
|
Python
|
gpl-2.0
| 3,195
|
[
"Gaussian"
] |
e0963d0fd0cd1ee628029dcffdcc4693023a5a8d6da6a91889c560fdbf4f8b2f
|
#!/usr/bin/python
#
# This program is called mconvert2gpx.py. It find the
# files that match *TES and converts them to
# gpx and kmz files
#
# Import modules
#
# 2015 Aug 13 Frank Monaldo Add *CSV files from Columbus v990 receiver
import glob
import os
#
# Find TES and CSV files
inputfiles = glob.glob('*.TES') + glob.glob('*.CSV')
#
# Convert files to gpx and kmz
#
for file in inputfiles:
cmd= 'convert2gpx.py ' + file
os.system(cmd)
|
fmonaldo/gpssoftware
|
mconvert2gpx.py
|
Python
|
cc0-1.0
| 445
|
[
"COLUMBUS"
] |
ad921de49b8b36b1a48fcd8269b8dac95a5abb4dda493b7650d218c87c4e114a
|
#
# Copyright 2018 Analytics Zoo Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pickle
import argparse
from pyspark.sql.functions import array
from zoo.orca import init_orca_context
from zoo.orca.learn.tf2.estimator import Estimator
from zoo.friesian.feature import FeatureTable
from model import *
spark_conf = {"spark.network.timeout": "10000000",
"spark.sql.broadcastTimeout": "7200",
"spark.sql.shuffle.partitions": "2000",
"spark.locality.wait": "0s",
"spark.sql.hive.filesourcePartitionFileCacheSize": "4096000000",
"spark.sql.crossJoin.enabled": "true",
"spark.serializer": "org.apache.spark.serializer.KryoSerializer",
"spark.kryo.unsafe": "true",
"spark.kryoserializer.buffer.max": "1024m",
"spark.task.cpus": "1",
"spark.executor.heartbeatInterval": "200s",
"spark.driver.maxResultSize": "40G",
"spark.eventLog.enabled": "true",
"spark.app.name": "recsys-2tower",
"spark.executor.memoryOverhead": "120g"}
def train(config, train_tbl, test_tbl, epochs=1, batch_size=128, model_dir='.'):
two_tower = TwoTowerModel(config["user_col_info"], config["item_col_info"])
def model_creator(config):
model = two_tower.build_model()
print(model.summary())
optimizer = tf.keras.optimizers.Adam(config["lr"])
model.compile(optimizer=optimizer,
loss='binary_crossentropy',
metrics=['binary_accuracy', 'Recall', 'AUC'])
return model
estimator = Estimator.from_keras(model_creator=model_creator,
verbose=False,
config=config)
callbacks = []
from tensorflow.keras.callbacks import EarlyStopping
callbacks.append(EarlyStopping(monitor='val_auc', mode='max', verbose=1, patience=5))
train_count, test_count = train_tbl.size(), test_tbl.size()
train_df, test_df = train_tbl.df, test_tbl.df
steps_per_epoch = math.ceil(train_count / batch_size)
val_steps = math.ceil(test_count / batch_size)
feature_cols = config["user_col_info"].get_name_list() + config["item_col_info"].get_name_list()
print("Total number of train records: {}".format(train_count))
print("Total number of val records: {}".format(test_count))
estimator.fit(train_df, epochs=epochs, batch_size=batch_size,
feature_cols=feature_cols,
label_cols=['label'],
callbacks=callbacks,
validation_data=test_df,
steps_per_epoch=steps_per_epoch,
validation_steps=val_steps)
model = estimator.get_model()
user_model = get_1tower_model(model, two_tower.user_col_info)
item_model = get_1tower_model(model, two_tower.item_col_info)
tf.saved_model.save(model, os.path.join(model_dir, "twotower-model"))
tf.saved_model.save(user_model, os.path.join(model_dir, "user-model"))
tf.saved_model.save(item_model, os.path.join(model_dir, "item-model"))
estimator.save(os.path.join(model_dir, "twotower_model.ckpt"))
print("saved models")
return estimator
def prepare_features(train_tbl, test_tbl, reindex_tbls):
def add_ratio_features(tbl):
cal_ratio = (lambda x: x[1] / x[0] if x[0] > 0 else 0.0)
tbl = tbl.apply(["engaged_with_user_follower_count", "engaged_with_user_following_count"],
"engaged_with_user_follower_following_ratio", cal_ratio, "float")\
.apply(["enaging_user_follower_count", "enaging_user_following_count"],
"enaging_user_follower_following_ratio", cal_ratio, "float")
return tbl
def organize_cols(tbl):
tbl = tbl.select(array("enaging_user_follower_count", "enaging_user_following_count",
"enaging_user_follower_following_ratio").alias("user_num"),
array("len_hashtags", "len_domains", "len_links",
"engaged_with_user_follower_count",
"engaged_with_user_following_count",
"engaged_with_user_follower_following_ratio").alias("item_num"),
*cat_cols, *embed_cols, "label")
return tbl
print("reindexing embedding cols")
train_tbl = train_tbl.reindex(embed_cols, reindex_tbls)
test_tbl = test_tbl.reindex(embed_cols, reindex_tbls)
embed_in_dims = {}
for i, c, in enumerate(embed_cols):
embed_in_dims[c] = max(reindex_tbls[i].df.agg({c+"_new": "max"}).collect()[0])
print("add ratio features")
train_tbl = add_ratio_features(train_tbl)
test_tbl = add_ratio_features(test_tbl)
print("scale numerical features")
train_tbl, min_max_dic = train_tbl.min_max_scale(num_cols + ratio_cols)
test_tbl = test_tbl.transform_min_max_scale(num_cols + ratio_cols, min_max_dic)
with open(os.path.join(args.model_dir, "stats/min_max.pkl"), 'wb') as f:
pickle.dump(min_max_dic, f)
user_col_info = ColumnInfoTower(indicator_cols=["enaging_user_is_verified"],
indicator_dims=[2],
embed_cols=["enaging_user_id"],
embed_in_dims=[embed_in_dims["enaging_user_id"]],
embed_out_dims=[16],
numerical_cols=["user_num"],
numerical_dims=[3],
name="user")
item_col_info = ColumnInfoTower(indicator_cols=["engaged_with_user_is_verified",
"present_media", "tweet_type", "language"],
indicator_dims=[2, 13, 3, 67], # max + 1
embed_cols=["engaged_with_user_id", "hashtags",
"present_links", "present_domains"],
embed_in_dims=[embed_in_dims["engaged_with_user_id"],
embed_in_dims["hashtags"],
embed_in_dims["present_links"],
embed_in_dims["present_domains"]],
embed_out_dims=[16, 16, 16, 16],
numerical_cols=["item_num"],
numerical_dims=[6],
name="item")
print("organize columns and specify user_col_info and item_col_info")
train_tbl = organize_cols(train_tbl)
test_tbl = organize_cols(test_tbl)
return train_tbl, test_tbl, user_col_info, item_col_info
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Two Tower Training/Inference')
parser.add_argument('--cluster_mode', type=str, default="local",
help='The cluster mode, such as local, yarn or standalone.')
parser.add_argument('--master', type=str, default=None,
help='The master url, only used when cluster mode is standalone.')
parser.add_argument('--executor_cores', type=int, default=8,
help='The executor core number.')
parser.add_argument('--executor_memory', type=str, default="160g",
help='The executor memory.')
parser.add_argument('--num_executor', type=int, default=8,
help='The number of executor.')
parser.add_argument('--driver_cores', type=int, default=4,
help='The driver core number.')
parser.add_argument('--driver_memory', type=str, default="36g",
help='The driver memory.')
parser.add_argument('--lr', default=0.001, type=float, help='learning rate')
parser.add_argument('--epochs', default=1, type=int, help='train epoch')
parser.add_argument('--batch_size', default=8000, type=int, help='batch size')
parser.add_argument('--model_dir', default='snapshot', type=str,
help='snapshot directory name (default: snapshot)')
parser.add_argument('--data_dir', type=str, help='data directory')
parser.add_argument('--frequency_limit', type=int, default=25, help='frequency limit')
args = parser.parse_args()
if args.cluster_mode == "local":
sc = init_orca_context("local", init_ray_on_spark=True)
elif args.cluster_mode == "standalone":
sc = init_orca_context("standalone", master=args.master,
cores=args.executor_cores, num_nodes=args.num_executor,
memory=args.executor_memory,
driver_cores=args.driver_cores, driver_memory=args.driver_memory,
conf=spark_conf,
init_ray_on_spark=True)
elif args.cluster_mode == "yarn":
sc = init_orca_context("yarn-client", cores=args.executor_cores,
num_nodes=args.num_executor, memory=args.executor_memory,
driver_cores=args.driver_cores, driver_memory=args.driver_memory,
conf=spark_conf, extra_python_lib="two_tower.py",
object_store_memory="80g",
init_ray_on_spark=True)
elif args.cluster_mode == "spark-submit":
sc = init_orca_context("spark-submit")
num_cols = ["enaging_user_follower_count", 'enaging_user_following_count',
"engaged_with_user_follower_count", "engaged_with_user_following_count",
"len_hashtags", "len_domains", "len_links", "hashtags", "present_links",
"present_domains"]
cat_cols = ["engaged_with_user_is_verified", "enaging_user_is_verified",
"present_media", "tweet_type", "language"]
ratio_cols = ["engaged_with_user_follower_following_ratio",
"enaging_user_follower_following_ratio"]
embed_cols = ["enaging_user_id", "engaged_with_user_id", "hashtags", "present_links",
"present_domains"]
useful_cols = num_cols + cat_cols + embed_cols
train_tbl = FeatureTable.read_parquet(args.data_dir + "/train_parquet")
test_tbl = FeatureTable.read_parquet(args.data_dir + "/test_parquet")
full_tbl = train_tbl.concat(test_tbl, "outer")
reindex_tbls = full_tbl.gen_reindex_mapping(embed_cols, freq_limit=args.frequency_limit)
train_tbl, test_tbl, user_info, item_info = prepare_features(train_tbl, test_tbl, reindex_tbls)
output_dir = args.data_dir + "/embed_reindex"
for i, c in enumerate(embed_cols):
reindex_tbls[i].write_parquet(output_dir + "_c")
train_config = {"lr": 1e-3,
"user_col_info": user_info,
"item_col_info": item_info,
"inter_op_parallelism": 4,
"intra_op_parallelism": args.executor_cores}
train(train_config, train_tbl, test_tbl, epochs=args.epochs, batch_size=args.batch_size,
model_dir=args.model_dir)
|
intel-analytics/analytics-zoo
|
pyzoo/zoo/examples/friesian/two_tower/train_2tower.py
|
Python
|
apache-2.0
| 11,763
|
[
"ORCA"
] |
629f66db7b9dd313b11f9940196169cbe3095e37415070764758c4aafea91ba4
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import requests
import logging
import time
import ConfigParser
import os
import base64
import re
logger = logging.getLogger('data')
USER_AGENT = "Andrew Head (for academic research) <andrewhead@eecs.berekeley.edu>"
default_requests_session = requests.Session()
default_requests_session.headers['User-Agent'] = USER_AGENT
GITHUB_PAGE_SIZE = 100 # the maximum page size for many GitHub queries
def make_request(method, *args, **kwargs):
# We read the max_attempts and retry_delay arguments from the kwargs dictionary
# instead of named kwargs because we want to preserve the order of the
# "request" method's positional arguments for clients of this method.
max_attempts = kwargs.get('max_attempts', 2)
retry_delay = kwargs.get('retry_delay', 10)
try_again = True
attempts = 0
res = None
def log_error(err_msg):
logger.warn(
"Error (%s) For API call %s, Args: %s, Kwargs: %s",
str(err_msg), str(method), str(args), str(kwargs)
)
while try_again and attempts < max_attempts:
try:
res = method(*args, **kwargs)
if hasattr(res, 'status_code') and res.status_code not in [200]:
log_error(str(res.status_code))
res = None
try_again = False
except requests.exceptions.ConnectionError:
log_error("ConnectionError")
except requests.exceptions.ReadTimeout:
log_error("ReadTimeout")
if try_again:
logger.warn("Waiting %d seconds for before retrying.", int(retry_delay))
time.sleep(retry_delay)
attempts += 1
return res
'''
A class for connecting to the GitHub API.
Users mus have their GitHub API credentials stored at ~/.github/github.cfg
'''
github_config = ConfigParser.ConfigParser()
github_config.read(os.path.expanduser(os.path.join('~', '.github', 'github.cfg')))
github_username = github_config.get('auth', 'username')
github_password = github_config.get('auth', 'password')
# Define a unique session for each GitHub API calls, for
# which we can set parameters like the page size.
GITHUB_API_URL = 'https://api.github.com'
GITHUB_DELAY = 1 # Max request rate: 5000 / hour -> (1 request / .72s)
github_session = requests.Session()
github_session.headers['User-Agent'] = USER_AGENT
github_session.headers['Authorization'] =\
"Basic " + base64.b64encode(github_username + ':' + github_password)
github_session.params = {
'per_page': GITHUB_PAGE_SIZE
}
def _get_next_page_url(response):
# If there is no "Link" header, then there is no next page
if 'Link' not in response.headers:
return None
# Extract the next URL from the Link header.
next_url = None
next_url_match = re.match("<([^>]*)>; rel=\"next\",", response.headers['Link'])
if next_url_match is not None:
next_url = next_url_match.group(1)
return next_url
def github_get(start_url, results_callback, delay=GITHUB_DELAY, *args, **kwargs):
# Make the first request to the GitHub API
response = make_request(github_session.get, start_url, *args, **kwargs)
# Notify the calling routine via a callback that results have been returned
if response is not None:
results_callback(response.json())
# While there is another page to visit, continue to query the GitHub API
# until there are no more links to follow. After each round of results,
# notify the caller of the partial results from that page.
next_url = _get_next_page_url(response)
while response is not None and next_url is not None:
response = make_request(github_session.get, next_url)
if response is not None:
results_callback(response.json())
next_url = _get_next_page_url(response)
time.sleep(delay)
|
andrewhead/Package-Qualifiers
|
fetch/api.py
|
Python
|
mit
| 3,957
|
[
"VisIt"
] |
c43a4a5313febe4a2a60a14843f583f9655dbd5cc9bf2d5da25a188bf57c977d
|
#! /usr/bin/env python
# ==========================================================================
# This script generates the TS distribution for a particular model based
# on Monte-Carlo simulations.
#
# Copyright (C) 2011-2013 Jurgen Knodlseder
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# ==========================================================================
from ctools import *
from gammalib import *
import obsutils
import sys
import csv
import math
# ============== #
# cstsdist class #
# ============== #
class cstsdist(GApplication):
"""
This class implements the TS distribution generation script. It derives
from the GammaLib::GApplication class which provides support for parameter
files, command line arguments, and logging. In that way the Python
script behaves just as a regular ctool.
"""
def __init__(self, *argv):
"""
Constructor.
"""
# Set name
self.name = "cstsdist"
self.version = "0.1.0"
# Initialise some members
self.obs = None
self.bkg_model = None
self.full_model = None
# Make sure that parfile exists
file = self.parfile()
# Initialise application
if len(argv) == 0:
GApplication.__init__(self, self.name, self.version)
elif len(argv) ==1:
GApplication.__init__(self, self.name, self.version, *argv)
else:
raise TypeError("Invalid number of arguments given.")
# Set logger properties
self.log_header()
self.log.date(True)
# Return
return
def __del__(self):
"""
Destructor.
"""
# Write separator into logger
if self.logTerse():
self.log("\n")
# Return
return
def parfile(self):
"""
Check if parfile exists. If parfile does not exist then create a
default parfile. This kluge avoids shipping the cscript with a parfile.
"""
# Set parfile name
parfile = self.name+".par"
try:
pars = GPars(parfile)
except:
# Signal if parfile was not found
sys.stdout.write("Parfile "+parfile+" not found. Create default parfile.\n")
# Create default parfile
pars = GPars()
pars.append(GPar("outfile","f","h","ts.dat","","","Output file name"))
pars.append(GPar("ntrials","i","a","10","","","Number of trials"))
pars.append(GPar("caldb","s","a","$GAMMALIB/share/caldb/cta","","","Calibration database"))
pars.append(GPar("irf","s","a","cta_dummy_irf","","","Instrument response function"))
pars.append(GPar("type","s","a","point","","","Source model type (point/gauss/shell/disk)"))
pars.append(GPar("index","r","h","-2.48","","","Spectral index"))
pars.append(GPar("offset","r","a","0.0","0.0","","Source offset angle (deg)"))
pars.append(GPar("bkg","s","a","$GAMMALIB/share/models/bkg_dummy.txt","","","Background model file function (none=power law for E)"))
pars.append(GPar("emin","r","a","0.1","0.0","","Lower energy limit (TeV)"))
pars.append(GPar("emax","r","a","100.0","0.0","","Upper energy limit (TeV)"))
pars.append(GPar("enumbins","i","a","0","","","Number of energy bins (0=unbinned)"))
pars.append(GPar("duration","r","a","180000.0","","","Effective exposure time (s)"))
pars.append(GPar("rad","r","h","5.0","","","Radius of ROI (deg)"))
pars.append(GPar("npix","i","h","200","","","Number of pixels for binned"))
pars.append(GPar("binsz","r","h","0.05","","","Pixel size for binned (deg/pixel)"))
pars.append_standard()
pars.save(parfile)
# Return
return
def get_parameters(self):
"""
Get parameters from parfile and setup the observation.
"""
# Get parameters
self.m_outfile = self["outfile"].filename()
self.m_ntrials = self["ntrials"].integer()
self.m_caldb = self["caldb"].string()
self.m_irf = self["irf"].string()
self.m_type = self["type"].string()
self.m_index = self["index"].real()
self.m_offset = self["offset"].real()
self.m_bkg = self["bkg"].string()
self.m_emin = self["emin"].real()
self.m_emax = self["emax"].real()
self.m_enumbins = self["enumbins"].integer()
self.m_duration = self["duration"].real()
self.m_rad = self["rad"].real()
self.m_npix = self["npix"].integer()
self.m_binsz = self["binsz"].real()
# Set some fixed parameters
self.m_log = False # Logging in client tools
self.m_debug = False # Debugging in client tools
# Setup observation
self.obs = self.set_obs(emin=self.m_emin, emax=self.m_emax)
# Initialise models. Note that we centre the point source at the Galactic
# center as our observation is also centred at the Galactic centre, so
# we're onaxis.
self.bkg_model = GModels()
self.full_model = GModels()
self.bkg_model.append(self.set_bkg_model())
self.full_model.append(self.set_bkg_model())
self.full_model.append(self.set_src_model(0.0, self.m_offset, \
flux=0.010, \
type=self.m_type, \
index=self.m_index))
# Attach background model to observation container
self.obs.models(self.bkg_model)
# Return
return
def models(self, models):
"""
Set model.
"""
# Copy models
self.model = models.copy()
# Return
return
def execute(self):
"""
Execute the script.
"""
# Run the script
self.run()
# Return
return
def run(self):
"""
Run the script.
"""
# Switch screen logging on in debug mode
if self.logDebug():
self.log.cout(True)
# Get parameters
self.get_parameters()
# Write input parameters into logger
if self.logTerse():
self.log_parameters()
self.log("\n")
# Write observation into logger
if self.logTerse():
self.log("\n")
self.log.header1("Observation")
self.log(str(self.obs))
self.log("\n")
# Write models into logger
if self.logTerse():
self.log("\n")
self.log.header1("Test model")
self.log(str(self.full_model))
self.log("\n")
# Write header
if self.logTerse():
self.log("\n")
self.log.header1("Generate TS distribution")
# Loop over trials
for seed in range(self.m_ntrials):
# Make a trial
result = self.trial(seed)
# Write out result immediately
if seed == 0:
file = open(self.m_outfile, 'w')
writer = csv.DictWriter(file, result['colnames'])
writer.writerow(dict((_,_) for _ in result['colnames']))
else:
file = open(self.m_outfile, 'a')
writer = csv.DictWriter(file, result['colnames'])
writer.writerow(result['values'])
file.close()
# Return
return
def set_obs(self, lpnt=0.0, bpnt=0.0, emin=0.1, emax=100.0):
"""
Returns an observation container with a single CTA observation.
Keywords:
lpnt - Galactic longitude of pointing [deg] (default: 0.0)
bpnt - Galactic latitude of pointing [deg] (default: 0.0)
emin - Minimum energy [TeV] (default: 0.1)
emax - Maximum energy [TeV] (default: 100.0)
"""
# Allocate observation container
obs = GObservations()
# Set single pointing
pntdir = GSkyDir()
pntdir.lb_deg(lpnt, bpnt)
# Create CTA observation
run = obsutils.set(pntdir, caldb=self.m_caldb, irf=self.m_irf, \
duration=self.m_duration, \
emin=emin, emax=emax, rad=self.m_rad)
# Append observation to container
obs.append(run)
# Return observation container
return obs
def set_bkg_model(self, fitsigma=False):
"""
Setup CTA background model.
"""
# Define radial component
radial = GCTAModelRadialGauss(3.0)
if fitsigma:
radial["Sigma"].free()
else:
radial["Sigma"].fix()
# Define spectral component
spectrum = GModelSpectralFunc(self.m_bkg, 1.0)
# Create background model
model = GCTAModelRadialAcceptance(radial, spectrum)
model.name("Background")
model.instruments("CTA")
# Return background model
return model
def set_src_model(self, l, b, flux=1.0, index=-2.48, \
type="point", sigma=1.0, radius=1.0, width=0.1, \
fitpos=False, fitidx=False):
"""
Returns a single source with Crab-like spectrum. The source flux
can be scaled in Crab units. The Crab spectrum is based on MAGIC
observations (Albert et al. 2008, ApJ, 674, 1037).
Parameters:
l - Galactic longitude of source location [deg]
b - Galactic latitude of source location [deg]
Keywords:
flux - Source flux [Crabs]
index - Spectral index
type - Source type ("point", "gauss", "disk", "shell")
sigma - Gaussian sigma (for type="gauss")
radius - Disk or shell inner radius [deg] (for type="disk" and type="shell")
width - Shell width [deg] (for type="shell")
fitpos - Fit position? (default: True)
fitidx - Fit index? (default: True)
"""
# Set source location
location = GSkyDir()
location.lb_deg(l, b)
# Set source spectrum
spectrum = GModelSpectralPlaw(flux*5.7e-16, index, GEnergy(0.3, "TeV"))
if fitidx:
spectrum["Index"].free()
else:
spectrum["Index"].fix()
# Set source
if type == "point":
spatial = GModelSpatialPointSource(location)
if fitpos:
spatial[0].free()
spatial[1].free()
elif type == "gauss":
spatial = GModelSpatialRadialGauss(location, sigma)
if fitpos:
spatial[0].free()
spatial[1].free()
elif type == "disk":
spatial = GModelSpatialRadialDisk(location, radius)
if fitpos:
spatial[0].free()
spatial[1].free()
elif type == "shell":
spatial = GModelSpatialRadialShell(location, radius, width)
if fitpos:
spatial[0].free()
spatial[1].free()
else:
self.log("ERROR: Unknown source type '"+type+"'.\n")
return None
source = GModelSky(spatial, spectrum)
# Set source name
source.name("Test")
# Return source
return source
def trial(self, seed):
"""
Create the TS for a single trial.
Parameters:
seed - Random number generator seed
"""
# Write header
if self.logExplicit():
self.log.header2("Trial "+str(seed+1))
# Simulate events
sim = obsutils.sim(self.obs, \
nbins=self.m_enumbins, \
seed=seed, \
binsz=self.m_binsz, \
npix=self.m_npix, \
log=self.m_log, debug=self.m_debug)
# Determine number of events in simulation
nevents = 0.0
for run in sim:
nevents += run.events().number()
# Write simulation results
if self.logExplicit():
self.log.header3("Simulation")
self.log.parformat("Number of simulated events")
self.log(nevents)
self.log("\n")
# Fit background only
sim.models(self.bkg_model)
like_bgm = obsutils.fit(sim, log=self.m_log, debug=self.m_debug)
result_bgm = like_bgm.obs().models()
LogL_bgm = like_bgm.opt().value()
npred_bgm = like_bgm.obs().npred()
# Write background fit results
if self.logExplicit():
self.log.header3("Background model fit")
self.log.parformat("log likelihood")
self.log(LogL_bgm)
self.log("\n")
self.log.parformat("Number of predicted events")
self.log(npred_bgm)
self.log("\n")
for model in result_bgm:
self.log.parformat("Model")
self.log(model.name())
self.log("\n")
for par in model:
self.log(str(par)+"\n")
# Fit background and test source
sim.models(self.full_model)
like_all = obsutils.fit(sim, log=self.m_log, debug=self.m_debug)
result_all = like_all.obs().models()
LogL_all = like_all.opt().value()
npred_all = like_all.obs().npred()
ts = 2.0*(LogL_bgm-LogL_all)
# Write background and test source fit results
if self.logExplicit():
self.log.header3("Background and test source model fit")
self.log.parformat("Test statistics")
self.log(ts)
self.log("\n")
self.log.parformat("log likelihood")
self.log(LogL_all)
self.log("\n")
self.log.parformat("Number of predicted events")
self.log(npred_all)
self.log("\n")
for model in result_all:
self.log.parformat("Model")
self.log(model.name())
self.log("\n")
for par in model:
self.log(str(par)+"\n")
# Write result
elif self.logTerse():
self.log.parformat("Trial "+str(seed))
self.log("TS=")
self.log(ts)
self.log(" Prefactor=")
self.log(result_all["Test"]["Prefactor"].value())
self.log("+/-")
self.log(result_all["Test"]["Prefactor"].error())
self.log("\n")
# Initialise results
colnames = []
values = {}
# Set TS value
colnames.append("TS")
values["TS"] = ts
# Set logL for background fit
colnames.append("LogL_bgm")
values["LogL_bgm"] = LogL_bgm
# Set logL for full fit
colnames.append("LogL_all")
values["LogL_all"] = LogL_all
# Set Nevents
colnames.append("Nevents")
values["Nevents"] = nevents
# Set Npred for background fit
colnames.append("Npred_bkg")
values["Npred_bkg"] = npred_bgm
# Set Npred for full fit
colnames.append("Npred_all")
values["Npred_all"] = npred_all
# Gather free full fit parameters
for i in range(result_all.size()):
model = result_all[i]
model_name = model.name()
for k in range(model.size()):
par = model[k]
if par.isfree():
# Set parameter name
name = model_name+"_"+par.name()
# Append value
colnames.append(name)
values[name] = par.value()
# Append error
name = "Unc_"+name
colnames.append(name)
values[name] = par.error()
# Bundle together results
result = {'colnames': colnames, 'values': values}
# Return
return result
# ======================== #
# Main routine entry point #
# ======================== #
if __name__ == '__main__':
"""
Generates TS distribution.
"""
# Create instance of application
app = cstsdist(sys.argv)
# Open logfile
app.logFileOpen()
# Execute application
app.execute()
|
cdeil/ctools
|
scripts/cstsdist.py
|
Python
|
gpl-3.0
| 14,295
|
[
"Gaussian"
] |
f6602f81e0943db22f87694f968e8bea75ce6cdd24af0acf5b9a9fe6a56407c9
|
#!/usr/bin/env python
# Copyright 2012, 2013 The GalSim developers:
# https://github.com/GalSim-developers
#
# This file is part of GalSim: The modular galaxy image simulation toolkit.
#
# GalSim is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# GalSim is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GalSim. If not, see <http://www.gnu.org/licenses/>
#
"""@brief Example script for generating the examples/input/galsim_default_input.asc default input
catalog file.
Generates a 10 x 10 grid of galaxy postage stamps, each of size 48 pixels.
"""
import os
import numpy as np
# This machinery lets us run Python examples even though they aren't positioned
# properly to find galsim as a package in the current directory.
try:
import galsim
except ImportError:
path, filename = os.path.split(__file__)
sys.path.append(os.path.abspath(os.path.join(path, "..", "..")))
import galsim
# Some fixed parameter values for the catalogue
STAMPSIZE = 48 # Postage stamp size in pixels
GRIDSIZE = 10 # Number of postage stamps per square side of image
MOFFAT_BETA = 3.5 # } Adopt GREAT08 PSF values for historicity!
MOFFAT_FWHM = 2.85 # }
MOFFAT_E1 = -0.019 # }
MOFFAT_E2 = -0.007 # }
MOFFAT_TRUNCATIONFWHM = 2. # }
EXPONENTIAL_HLR = 0.82 * MOFFAT_FWHM # } Again, things are slightly more complex than this for
DEVAUCOULEURS_HLR = 1.59 * MOFFAT_FWHM # } actual GREAT08 images, but this is a starting example
# } to adopt.
EXPONENTIAL_DEVAUCOULEURS_SIGMA_E = 0.3 # } Approximate the ellipticity distribition as a Gaussian
# } with this sigma.
GAL_CENTROID_SHIFT_RADIUS = 1.0 # Set the radius of centroid shifts (uniform within unit circle).
GAL_CENTROID_SHIFT_RADIUS_SQUARED = GAL_CENTROID_SHIFT_RADIUS**2
RNG_SEED = 1848
NOBJECTS = GRIDSIZE * GRIDSIZE
def make_default_input():
# Set the PSF catalogue values
moffat_beta = np.zeros(NOBJECTS) + MOFFAT_BETA
moffat_fwhm = np.zeros(NOBJECTS) + MOFFAT_FWHM
moffat_e1 = np.zeros(NOBJECTS) + MOFFAT_E1
moffat_e2 = np.zeros(NOBJECTS) + MOFFAT_E2
moffat_trunc = np.zeros(NOBJECTS) + MOFFAT_TRUNCATIONFWHM * MOFFAT_FWHM
# Then set the exponential disc catalogue fixed values
exponential_hlr = np.zeros(NOBJECTS) + EXPONENTIAL_HLR
# Then set the dVc bulge catalogue fixed values
devaucouleurs_hlr = np.zeros(NOBJECTS) + DEVAUCOULEURS_HLR
# Then set up the Gaussian RNG for making the ellipticity values
urng = galsim.UniformDeviate(RNG_SEED)
edist = galsim.GaussianDeviate(urng, sigma=EXPONENTIAL_DEVAUCOULEURS_SIGMA_E)
# Slightly hokey way of making vectors of Gaussian deviates, using images... No direct NumPy
# array-filling with galsim RNGs at the moment.
#
# In GREAT08 these galaxy ellipticies were made in rotated pairs to reduce shape noise, but for
# this illustrative default file we do not do this.
ime1 = galsim.ImageD(NOBJECTS, 1)
ime1.addNoise(edist)
exponential_e1 = ime1.array.flatten()
ime2 = galsim.ImageD(NOBJECTS, 1)
ime2.addNoise(edist)
exponential_e2 = ime2.array.flatten()
# Make galaxies co-elliptical
devaucouleurs_e1 = exponential_e1
devaucouleurs_e2 = exponential_e2
# Add a centroid shift in drawn uniform randomly from the unit circle around (0., 0.)
dx = np.empty(NOBJECTS)
dy = np.empty(NOBJECTS)
for i in xrange(NOBJECTS):
# Apply a random centroid shift:
rsq = 2 * GAL_CENTROID_SHIFT_RADIUS_SQUARED
while (rsq > GAL_CENTROID_SHIFT_RADIUS_SQUARED):
dx[i] = (2. * urng() - 1.) * GAL_CENTROID_SHIFT_RADIUS
dy[i] = (2. * urng() - 1.) * GAL_CENTROID_SHIFT_RADIUS
rsq = dx[i]**2 + dy[i]**2
# Then write this to file
path, modfile = os.path.split(__file__)
outfile = os.path.join(path, "galsim_default_input.asc")
# Make a nice header with the default fields described
header = ("# psf.beta psf.fwhm psf.e1 psf.e2 psf.trunc"+
" disk.hlr disk.e1 disk.e2"+
" bulge.hlr bulge.e1 bulge.e2"+
" gal.shift.dx gal.shift.dy \n")
# Open the file and output the columns in the correct order, row-by-row
output = open(outfile, "w")
output.write("# galsim_default_input.asc : illustrative default input catalog for GalSim\n")
output.write("#\n")
output.write(header)
for i in xrange(NOBJECTS):
outline = (" %6.2f %6.2f %7.3f %7.3f %6.2f %6.2f %14.7f %14.7f "+
"%6.2f %14.7f %14.7f %14.7f %14.7f\n") % \
(moffat_beta[i], moffat_fwhm[i], moffat_e1[i], moffat_e2[i], moffat_trunc[i],
exponential_hlr[i], exponential_e1[i], exponential_e2[i],
devaucouleurs_hlr[i], devaucouleurs_e1[i], devaucouleurs_e2[i], dx[i], dy[i])
output.write(outline)
output.close()
if __name__ == "__main__":
make_default_input()
|
mardom/GalSim
|
examples/input/make_default_input.py
|
Python
|
gpl-3.0
| 5,442
|
[
"Galaxy",
"Gaussian"
] |
880cef3694f101f203887bb5b42480d57830801474b8bcff9562a8a8fd04ef92
|
'''
=========================================
Inference for Non-Linear Gaussian Systems
=========================================
This module contains "Square Root" implementations to the Unscented Kalman
Filter. Square Root implementations typically propagate the mean and Cholesky
factorization of the covariance matrix in order to prevent numerical error.
When possible, Square Root implementations should be preferred to their
standard counterparts.
References
----------
* Terejanu, G.A. Towards a Decision-Centric Framework for Uncertainty
Propagation and Data Assimilation. 2010. Page 108.
* Van Der Merwe, R. and Wan, E.A. The Square-Root Unscented Kalman Filter for
State and Parameter-Estimation. 2001.
'''
import numpy as np
from numpy import ma
from scipy import linalg
from ..utils import array1d, array2d, check_random_state
from ..standard import _last_dims, _arg_or_default
from ..unscented import AdditiveUnscentedKalmanFilter as AUKF, \
SigmaPoints, Moments
def _reconstruct_covariances(covariance2s):
'''Reconstruct covariance matrices given their cholesky factors'''
if len(covariance2s.shape) == 2:
covariance2s = covariance2s[np.newaxis, :, :]
T = covariance2s.shape[0]
covariances = np.zeros(covariance2s.shape)
for t in range(T):
M = covariance2s[t]
covariances[t] = M.T.dot(M)
return covariances
def cholupdate(A2, X, weight):
'''Calculate chol(A + w x x')
Parameters
----------
A2 : [n_dim, n_dim] array
A = A2.T.dot(A2) for A positive definite, symmetric
X : [n_dim] or [n_vec, n_dim] array
vector(s) to be used for x. If X has 2 dimensions, then each row will be
added in turn.
weight : float
weight to be multiplied to each x x'. If negative, will use
sign(weight) * sqrt(abs(weight)) instead of sqrt(weight).
Returns
-------
A2 : [n_dim, n_dim array]
cholesky decomposition of updated matrix
Notes
-----
Code based on the following MATLAB snippet taken from Wikipedia on
August 14, 2012::
function [L] = cholupdate(L,x)
p = length(x);
x = x';
for k=1:p
r = sqrt(L(k,k)^2 + x(k)^2);
c = r / L(k, k);
s = x(k) / L(k, k);
L(k, k) = r;
L(k,k+1:p) = (L(k,k+1:p) + s*x(k+1:p)) / c;
x(k+1:p) = c*x(k+1:p) - s*L(k, k+1:p);
end
end
'''
# make copies
X = X.copy()
A2 = A2.copy()
# standardize input shape
if len(X.shape) == 1:
X = X[np.newaxis, :]
n_vec, n_dim = X.shape
# take sign of weight into account
sign, weight = np.sign(weight), np.sqrt(np.abs(weight))
X = weight * X
for i in range(n_vec):
x = X[i, :]
for k in range(n_dim):
r_squared = A2[k, k] ** 2 + sign * x[k] ** 2
r = 0.0 if r_squared < 0 else np.sqrt(r_squared)
c = r / A2[k, k]
s = x[k] / A2[k, k]
A2[k, k] = r
A2[k, k + 1:] = (A2[k, k + 1:] + sign * s * x[k + 1:]) / c
x[k + 1:] = c * x[k + 1:] - s * A2[k, k + 1:]
return A2
def qr(A):
'''Get square upper triangular matrix of QR decomposition of matrix A'''
N, L = A.shape
if not N >= L:
raise ValueError("Number of columns must exceed number of rows")
Q, R = linalg.qr(A)
return R[:L, :L]
def points2moments(points, sigma2_noise=None):
'''Calculate estimated mean and covariance of sigma points
Parameters
----------
points : [2 * n_dim_state + 1, n_dim_state] SigmaPoints
SigmaPoints object containing points and weights
sigma_noise : [n_dim_state, n_dim_state] array
additive noise covariance matrix, if any
Returns
-------
moments : Moments object of size [n_dim_state]
Mean and covariance estimated using points
'''
(points, weights_mu, weights_sigma) = points
mu = points.T.dot(weights_mu)
# make points to perform QR factorization on. each column is one data point
qr_points = [
np.sign(weights_sigma)[np.newaxis, :]
* np.sqrt(np.abs(weights_sigma))[np.newaxis, :]
* (points.T - mu[:, np.newaxis])
]
if sigma2_noise is not None:
qr_points.append(sigma2_noise)
sigma2 = qr(np.hstack(qr_points).T)
#sigma2 = cholupdate(sigma2, points[0] - mu, weights_sigma[0])
return Moments(mu.ravel(), sigma2)
def moments2points(moments, alpha=None, beta=None, kappa=None):
'''Calculate "sigma points" used in Unscented Kalman Filter
Parameters
----------
moments : [n_dim] Moments object
mean and covariance of a multivariate normal
alpha : float
Spread of the sigma points. Typically 1e-3.
beta : float
Used to "incorporate prior knowledge of the distribution of the state".
2 is optimal is the state is normally distributed.
kappa : float
a parameter which means ????
Returns
-------
points : [2*n_dim+1, n_dim] SigmaPoints
sigma points and associated weights
'''
(mu, sigma2) = moments
n_dim = len(mu)
mu = array2d(mu, dtype=float)
if alpha is None:
alpha = 1.0
if beta is None:
beta = 0.0
if kappa is None:
kappa = 3.0 - n_dim
# just because I saw it in the MATLAB implementation
sigma2 = sigma2.T
# Calculate scaling factor for all off-center points
lamda = (alpha * alpha) * (n_dim + kappa) - n_dim
c = n_dim + lamda
# calculate the sigma points; that is,
# mu
# mu + each column of sigma2 * sqrt(c)
# mu - each column of sigma2 * sqrt(c)
# Each column of points is one of these.
points = np.tile(mu.T, (1, 2 * n_dim + 1))
points[:, 1:(n_dim + 1)] += sigma2 * np.sqrt(c)
points[:, (n_dim + 1):] -= sigma2 * np.sqrt(c)
# Calculate weights
weights_mean = np.ones(2 * n_dim + 1)
weights_mean[0] = lamda / c
weights_mean[1:] = 0.5 / c
weights_cov = np.copy(weights_mean)
weights_cov[0] = lamda / c + (1 - alpha * alpha + beta)
return SigmaPoints(points.T, weights_mean, weights_cov)
def _unscented_transform(points, f=None, points_noise=None, sigma2_noise=None):
'''Apply the Unscented Transform.
Parameters
==========
points : [n_points, n_dim_1] array
points representing state to pass through `f`
f : [n_dim_1, n_dim_3] -> [n_dim_2] function
function to apply pass all points through
points_noise : [n_points, n_dim_3] array
points representing noise to pass through `f`, if any.
sigma2_noise : [n_dim_2, n_dim_2] array
square root of covariance matrix for additive noise
Returns
=======
points_pred : [n_points, n_dim_2] array
points passed through f
mu_pred : [n_dim_2] array
empirical mean
sigma2_pred : [n_dim_2, n_dim_2] array
R s.t. R' R = empirical covariance
'''
n_points, n_dim_state = points.points.shape
(points, weights_mean, weights_covariance) = points
# propagate points through f. Each column is a sample point
if f is not None:
if points_noise is None:
points_pred = [f(points[i]) for i in range(n_points)]
else:
points_pred = [f(points[i], points_noise[i]) for i in range(n_points)]
else:
points_pred = points
# make each row a predicted point
points_pred = np.vstack(points_pred)
points_pred = SigmaPoints(points_pred, weights_mean, weights_covariance)
# calculate approximate mean, covariance
moments_pred = points2moments(
points_pred, sigma2_noise=sigma2_noise
)
return (points_pred, moments_pred)
def _unscented_correct(cross_sigma, moments_pred, obs_moments_pred, z):
'''Correct predicted state estimates with an observation
Parameters
----------
cross_sigma : [n_dim_state, n_dim_obs] array
cross-covariance between the state at time t given all observations
from timesteps [0, t-1] and the observation at time t
moments_pred : [n_dim_state] Moments
mean and covariance of state at time t given observations from
timesteps [0, t-1]
obs_moments_pred : [n_dim_obs] Moments
mean and covariance of observation at time t given observations from
times [0, t-1]
z : [n_dim_obs] array
observation at time t
Returns
-------
moments_filt : [n_dim_state] Moments
mean and covariance of state at time t given observations from time
steps [0, t]
'''
mu_pred, sigma2_pred = moments_pred
obs_mu_pred, obs_sigma2_pred = obs_moments_pred
n_dim_state = len(mu_pred)
n_dim_obs = len(obs_mu_pred)
if not np.any(ma.getmask(z)):
##############################################
# Same as this, but more stable (supposedly) #
##############################################
# K = cross_sigma.dot(
# linalg.pinv(
# obs_sigma2_pred.T.dot(obs_sigma2_pred)
# )
# )
##############################################
# equivalent to this MATLAB code
# K = (cross_sigma / obs_sigma2_pred.T) / obs_sigma2_pred
K = linalg.lstsq(obs_sigma2_pred, cross_sigma.T)[0]
K = linalg.lstsq(obs_sigma2_pred.T, K)[0]
K = K.T
# correct mu, sigma
mu_filt = mu_pred + K.dot(z - obs_mu_pred)
U = K.dot(obs_sigma2_pred)
sigma2_filt = cholupdate(sigma2_pred, U.T, -1.0)
else:
# no corrections to be made
mu_filt = mu_pred
sigma2_filt = sigma2_pred
return Moments(mu_filt, sigma2_filt)
def unscented_filter_predict(transition_function, points_state,
points_transition=None,
sigma2_transition=None):
"""Predict next state distribution
Using the sigma points representing the state at time t given observations
from time steps 0...t, calculate the predicted mean, covariance, and sigma
points for the state at time t+1.
Parameters
----------
transition_function : function
function describing how the state changes between times t and t+1
points_state : [2*n_dim_state+1, n_dim_state] SigmaPoints
sigma points corresponding to the state at time step t given
observations from time steps 0...t
points_transition : [2*n_dim_state+1, n_dim_state] SigmaPoints
sigma points corresponding to the noise in transitioning from time step
t to t+1, if available. If not, assumes that noise is additive
sigma_transition : [n_dim_state, n_dim_state] array
covariance corresponding to additive noise in transitioning from time
step t to t+1, if available. If not, assumes noise is not additive.
Returns
-------
points_pred : [2*n_dim_state+1, n_dim_state] SigmaPoints
sigma points corresponding to state at time step t+1 given observations
from time steps 0...t. These points have not been "standardized" by the
unscented transform yet.
moments_pred : [n_dim_state] Moments
mean and covariance corresponding to time step t+1 given observations
from time steps 0...t
"""
assert points_transition is not None or sigma2_transition is not None, \
"Your system is noiseless? really?"
(points_pred, moments_pred) = (
_unscented_transform(
points_state, transition_function,
points_noise=points_transition, sigma2_noise=sigma2_transition
)
)
return (points_pred, moments_pred)
def unscented_filter_correct(observation_function, moments_pred,
points_pred, observation,
points_observation=None,
sigma2_observation=None):
"""Integrate new observation to correct state estimates
Parameters
----------
observation_function : function
function characterizing how the observation at time t+1 is generated
moments_pred : [n_dim_state] Moments
mean and covariance of state at time t+1 given observations from time
steps 0...t
points_pred : [2*n_dim_state+1, n_dim_state] SigmaPoints
sigma points corresponding to moments_pred
observation : [n_dim_state] array
observation at time t+1. If masked, treated as missing.
points_observation : [2*n_dim_state, n_dim_obs] SigmaPoints
sigma points corresponding to predicted observation at time t+1 given
observations from times 0...t, if available. If not, noise is assumed
to be additive.
sigma_observation : [n_dim_obs, n_dim_obs] array
covariance matrix corresponding to additive noise in observation at
time t+1, if available. If missing, noise is assumed to be non-linear.
Returns
-------
moments_filt : [n_dim_state] Moments
mean and covariance of state at time t+1 given observations from time
steps 0...t+1
"""
# Calculate E[z_t | z_{0:t-1}], Var(z_t | z_{0:t-1})
(obs_points_pred, obs_moments_pred) = (
_unscented_transform(
points_pred, observation_function,
points_noise=points_observation, sigma2_noise=sigma2_observation
)
)
# Calculate Cov(x_t, z_t | z_{0:t-1})
sigma_pair = (
((points_pred.points - moments_pred.mean).T)
.dot(np.diag(points_pred.weights_mean))
.dot(obs_points_pred.points - obs_moments_pred.mean)
)
# Calculate E[x_t | z_{0:t}], Var(x_t | z_{0:t})
moments_filt = _unscented_correct(sigma_pair, moments_pred, obs_moments_pred, observation)
return moments_filt
def _additive_unscented_filter(mu_0, sigma_0, f, g, Q, R, Z):
'''Apply the Unscented Kalman Filter with additive noise
Parameters
----------
mu_0 : [n_dim_state] array
mean of initial state distribution
sigma_0 : [n_dim_state, n_dim_state] array
covariance of initial state distribution
f : function or [T-1] array of functions
state transition function(s). Takes in an the current state and outputs
the next.
g : function or [T] array of functions
observation function(s). Takes in the current state and outputs the
current observation.
Q : [n_dim_state, n_dim_state] array
transition covariance matrix
R : [n_dim_state, n_dim_state] array
observation covariance matrix
Returns
-------
mu_filt : [T, n_dim_state] array
mu_filt[t] = mean of state at time t given observations from times [0,
t]
sigma2_filt : [T, n_dim_state, n_dim_state] array
sigma2_filt[t] = square root of the covariance of state at time t given
observations from times [0, t]
'''
# extract size of key components
T = Z.shape[0]
n_dim_state = Q.shape[-1]
n_dim_obs = R.shape[-1]
# construct container for results
mu_filt = np.zeros((T, n_dim_state))
sigma2_filt = np.zeros((T, n_dim_state, n_dim_state))
Q2 = linalg.cholesky(Q)
R2 = linalg.cholesky(R)
for t in range(T):
# Calculate sigma points for P(x_{t-1} | z_{0:t-1})
if t == 0:
mu, sigma2 = mu_0, linalg.cholesky(sigma_0)
else:
mu, sigma2 = mu_filt[t - 1], sigma2_filt[t - 1]
points_state = moments2points(Moments(mu, sigma2))
# Calculate E[x_t | z_{0:t-1}], Var(x_t | z_{0:t-1})
if t == 0:
points_pred = points_state
moments_pred = points2moments(points_pred)
else:
transition_function = _last_dims(f, t - 1, ndims=1)[0]
(_, moments_pred) = (
unscented_filter_predict(
transition_function, points_state, sigma2_transition=Q2
)
)
points_pred = moments2points(moments_pred)
# Calculate E[z_t | z_{0:t-1}], Var(z_t | z_{0:t-1})
observation_function = _last_dims(g, t, ndims=1)[0]
mu_filt[t], sigma2_filt[t] = unscented_filter_correct(
observation_function, moments_pred, points_pred,
Z[t], sigma2_observation=R2
)
return (mu_filt, sigma2_filt)
def _additive_unscented_smoother(mu_filt, sigma2_filt, f, Q):
'''Apply the Unscented Kalman Filter assuming additiven noise
Parameters
----------
mu_filt : [T, n_dim_state] array
mu_filt[t] = mean of state at time t given observations from times
[0, t]
sigma_2filt : [T, n_dim_state, n_dim_state] array
sigma2_filt[t] = square root of the covariance of state at time t given
observations from times [0, t]
f : function or [T-1] array of functions
state transition function(s). Takes in an the current state and outputs
the next.
Q : [n_dim_state, n_dim_state] array
transition covariance matrix
Returns
-------
mu_smooth : [T, n_dim_state] array
mu_smooth[t] = mean of state at time t given observations from times
[0, T-1]
sigma2_smooth : [T, n_dim_state, n_dim_state] array
sigma2_smooth[t] = square root of the covariance of state at time t
given observations from times [0, T-1]
'''
# extract size of key parts of problem
T, n_dim_state = mu_filt.shape
# instantiate containers for results
mu_smooth = np.zeros(mu_filt.shape)
sigma2_smooth = np.zeros(sigma2_filt.shape)
mu_smooth[-1], sigma2_smooth[-1] = mu_filt[-1], sigma2_filt[-1]
Q2 = linalg.cholesky(Q)
for t in reversed(range(T - 1)):
# get sigma points for state
mu = mu_filt[t]
sigma2 = sigma2_filt[t]
moments_state = Moments(mu, sigma2)
points_state = moments2points(moments_state)
# compute E[x_{t+1} | z_{0:t}], Var(x_{t+1} | z_{0:t})
transition_function = _last_dims(f, t, ndims=1)[0]
(points_pred, moments_pred) = (
_unscented_transform(points_state, transition_function, sigma2_noise=Q2)
)
# Calculate Cov(x_{t+1}, x_t | z_{0:t-1})
sigma_pair = (
(points_pred.points - moments_pred.mean).T
.dot(np.diag(points_pred.weights_covariance))
.dot(points_state.points - moments_state.mean).T
)
# compute smoothed mean, covariance
#############################################
# Same as this, but more stable (supposedly)#
#############################################
# smoother_gain = (
# sigma_pair.dot(linalg.pinv(sigma2_pred.T.dot(sigma2_pred)))
# )
#############################################
smoother_gain = linalg.lstsq(moments_pred.covariance.T, sigma_pair.T)[0]
smoother_gain = linalg.lstsq(moments_pred.covariance, smoother_gain)[0]
smoother_gain = smoother_gain.T
mu_smooth[t] = (
mu_filt[t]
+ smoother_gain
.dot(mu_smooth[t + 1] - moments_pred.mean)
)
U = cholupdate(moments_pred.covariance, sigma2_smooth[t + 1], -1.0)
sigma2_smooth[t] = (
cholupdate(sigma2_filt[t], smoother_gain.dot(U.T).T, -1.0)
)
return (mu_smooth, sigma2_smooth)
class AdditiveUnscentedKalmanFilter(AUKF):
r'''Implements the Unscented Kalman Filter with additive noise.
Observations are assumed to be generated from the following process,
.. math::
x_0 &\sim \text{Normal}(\mu_0, \Sigma_0) \\
x_{t+1} &= f_t(x_t) + \text{Normal}(0, Q) \\
z_{t} &= g_t(x_t) + \text{Normal}(0, R)
While less general the general-noise Unscented Kalman Filter, the Additive
version is more computationally efficient with complexity :math:`O(Tn^3)`
where :math:`T` is the number of time steps and :math:`n` is the size of
the state space.
Parameters
----------
transition_functions : function or [n_timesteps-1] array of functions
transition_functions[t] is a function of the state at time t and
produces the state at time t+1. Also known as :math:`f_t`.
observation_functions : function or [n_timesteps] array of functions
observation_functions[t] is a function of the state at time t and
produces the observation at time t. Also known as :math:`g_t`.
transition_covariance : [n_dim_state, n_dim_state] array
transition noise covariance matrix. Also known as :math:`Q`.
observation_covariance : [n_dim_obs, n_dim_obs] array
observation noise covariance matrix. Also known as :math:`R`.
initial_state_mean : [n_dim_state] array
mean of initial state distribution. Also known as :math:`\mu_0`.
initial_state_covariance : [n_dim_state, n_dim_state] array
covariance of initial state distribution. Also known as
:math:`\Sigma_0`.
n_dim_state: optional, integer
the dimensionality of the state space. Only meaningful when you do not
specify initial values for `transition_covariance`, or
`initial_state_mean`, `initial_state_covariance`.
n_dim_obs: optional, integer
the dimensionality of the observation space. Only meaningful when you
do not specify initial values for `observation_covariance`.
random_state : optional, int or RandomState
seed for random sample generation
'''
def filter(self, Z):
'''Run Unscented Kalman Filter
Parameters
----------
Z : [n_timesteps, n_dim_state] array
Z[t] = observation at time t. If Z is a masked array and any of
Z[t]'s elements are masked, the observation is assumed missing and
ignored.
Returns
-------
filtered_state_means : [n_timesteps, n_dim_state] array
filtered_state_means[t] = mean of state distribution at time t
given observations from times [0, t]
filtered_state_covariances : [n_timesteps, n_dim_state, n_dim_state] array
filtered_state_covariances[t] = covariance of state distribution at
time t given observations from times [0, t]
'''
Z = self._parse_observations(Z)
(transition_functions, observation_functions,
transition_covariance, observation_covariance,
initial_state_mean, initial_state_covariance) = (
self._initialize_parameters()
)
n_timesteps = Z.shape[0]
# run square root filter
(filtered_state_means, sigma2_filt) = (
_additive_unscented_filter(
initial_state_mean, initial_state_covariance,
transition_functions, observation_functions,
transition_covariance, observation_covariance,
Z
)
)
# reconstruct covariance matrices
filtered_state_covariances = np.zeros(sigma2_filt.shape)
for t in range(n_timesteps):
filtered_state_covariances[t] = sigma2_filt[t].T.dot(sigma2_filt[t])
return (filtered_state_means, filtered_state_covariances)
def filter_update(self,
filtered_state_mean, filtered_state_covariance,
observation=None,
transition_function=None, transition_covariance=None,
observation_function=None, observation_covariance=None):
r"""Update a Kalman Filter state estimate
Perform a one-step update to estimate the state at time :math:`t+1`
give an observation at time :math:`t+1` and the previous estimate for
time :math:`t` given observations from times :math:`[0...t]`. This
method is useful if one wants to track an object with streaming
observations.
Parameters
----------
filtered_state_mean : [n_dim_state] array
mean estimate for state at time t given observations from times
[1...t]
filtered_state_covariance : [n_dim_state, n_dim_state] array
covariance of estimate for state at time t given observations from
times [1...t]
observation : [n_dim_obs] array or None
observation from time t+1. If `observation` is a masked array and
any of `observation`'s components are masked or if `observation` is
None, then `observation` will be treated as a missing observation.
transition_function : optional, function
state transition function from time t to t+1. If unspecified,
`self.transition_functions` will be used.
transition_covariance : optional, [n_dim_state, n_dim_state] array
state transition covariance from time t to t+1. If unspecified,
`self.transition_covariance` will be used.
observation_function : optional, function
observation function at time t+1. If unspecified,
`self.observation_functions` will be used.
observation_covariance : optional, [n_dim_obs, n_dim_obs] array
observation covariance at time t+1. If unspecified,
`self.observation_covariance` will be used.
Returns
-------
next_filtered_state_mean : [n_dim_state] array
mean estimate for state at time t+1 given observations from times
[1...t+1]
next_filtered_state_covariance : [n_dim_state, n_dim_state] array
covariance of estimate for state at time t+1 given observations
from times [1...t+1]
"""
# initialize parameters
(transition_functions, observation_functions,
transition_cov, observation_cov,
_, _) = (
self._initialize_parameters()
)
def default_function(f, arr):
if f is None:
assert len(arr) == 1
f = arr[0]
return f
transition_function = default_function(
transition_function, transition_functions
)
observation_function = default_function(
observation_function, observation_functions
)
transition_covariance = _arg_or_default(
transition_covariance, transition_cov,
2, "transition_covariance"
)
observation_covariance = _arg_or_default(
observation_covariance, observation_cov,
2, "observation_covariance"
)
# Make a masked observation if necessary
if observation is None:
n_dim_obs = observation_covariance.shape[0]
observation = np.ma.array(np.zeros(n_dim_obs))
observation.mask = True
else:
observation = np.ma.asarray(observation)
# preprocess covariance matrices
filtered_state_covariance2 = linalg.cholesky(filtered_state_covariance)
transition_covariance2 = linalg.cholesky(transition_covariance)
observation_covariance2 = linalg.cholesky(observation_covariance)
# make sigma points
moments_state = Moments(filtered_state_mean, filtered_state_covariance2)
points_state = moments2points(moments_state)
# predict
(_, moments_pred) = (
unscented_filter_predict(
transition_function, points_state,
sigma2_transition=transition_covariance2
)
)
points_pred = moments2points(moments_pred)
# correct
(next_filtered_state_mean, next_filtered_state_covariance2) = (
unscented_filter_correct(
observation_function, moments_pred, points_pred,
observation, sigma2_observation=observation_covariance2
)
)
next_filtered_state_covariance = (
_reconstruct_covariances(next_filtered_state_covariance2)
)
return (next_filtered_state_mean, next_filtered_state_covariance)
def smooth(self, Z):
'''Run Unscented Kalman Smoother
Parameters
----------
Z : [n_timesteps, n_dim_state] array
Z[t] = observation at time t. If Z is a masked array and any of
Z[t]'s elements are masked, the observation is assumed missing and
ignored.
Returns
-------
smoothed_state_means : [n_timesteps, n_dim_state] array
filtered_state_means[t] = mean of state distribution at time t
given observations from times [0, n_timesteps-1]
smoothed_state_covariances : [n_timesteps, n_dim_state, n_dim_state] array
smoothed_state_covariances[t] = covariance of state distribution at
time t given observations from times [0, n_timesteps-1]
'''
Z = self._parse_observations(Z)
(transition_functions, observation_functions,
transition_covariance, observation_covariance,
initial_state_mean, initial_state_covariance) = (
self._initialize_parameters()
)
n_timesteps = Z.shape[0]
# run filter, then smoother
(filtered_state_means, sigma2_filt) = (
_additive_unscented_filter(
initial_state_mean, initial_state_covariance,
transition_functions, observation_functions,
transition_covariance, observation_covariance,
Z
)
)
(smoothed_state_means, sigma2_smooth) = (
_additive_unscented_smoother(
filtered_state_means, sigma2_filt,
transition_functions, transition_covariance
)
)
# reconstruction covariance matrices
smoothed_state_covariances = np.zeros(sigma2_smooth.shape)
for t in range(n_timesteps):
smoothed_state_covariances[t] = (
sigma2_smooth[t].T.dot(sigma2_smooth[t])
)
return (smoothed_state_means, smoothed_state_covariances)
|
nmayorov/pykalman
|
pykalman/sqrt/unscented.py
|
Python
|
bsd-3-clause
| 30,009
|
[
"Gaussian"
] |
585e8576635c7822c5157def7ad09b8fa8c599cb880e4bc05a6a48cf5b528b8f
|
import copy
import numpy
import sys
import theano
import theano.tensor as T
from theano.sandbox.rng_mrg import MRG_RandomStreams
from grbm import GBRBM
from linear_regression import LinearRegression
from mlp import HiddenLayer
from rbm import RBM
class DBN(object):
"""
Deep Belief Network
A deep belief network is obtained by stacking several RBMs on top of each
other. The hidden layer of the RBM at layer `i` becomes the input of the
RBM at layer `i+1`. The first layer RBM gets as input the input of the
network, and the hidden layer of the last RBM represents the output. When
used for classification, the DBN is treated as a MLP, by adding a logistic
regression layer on top.
"""
def __init__(self, numpy_rng, theano_rng=None, n_visible=784,
hidden_layers_sizes=None, n_outs=10, params=None, gaussian_visible=False):
"""This class is made to support a variable number of layers.
:type numpy_rng: numpy.random.RandomState
:param numpy_rng: numpy random number generator used to draw initial
weights
:type theano_rng: theano.tensor.shared_randomstreams.RandomStreams
:param theano_rng: Theano random generator; if None is given one is
generated based on a seed drawn from `rng`
:type n_visible: int
:param n_visible: dimension of the input to the DBN
:type hidden_layers_sizes: list of ints
:param hidden_layers_sizes: intermediate layers size, must contain
at least one value
:type n_outs: int
:param n_outs: dimension of the output of the network
:type params: List of numpy array
:param params: free params of the models
:type gaussian_visible: Boolean
:param gaussian_visible: True if the visible units are gaussian
"""
# Params to reconstruct the DBN
self.n_visible = n_visible
self.hidden_layers_sizes = hidden_layers_sizes
self.n_outs = n_outs
self.gaussian_visible = gaussian_visible
self.sigmoid_layers = []
self.rbm_layers = []
self.params = []
self.n_layers = len(self.hidden_layers_sizes)
assert self.n_layers > 0
if not theano_rng:
theano_rng = MRG_RandomStreams(numpy_rng.randint(2 ** 30))
# allocate symbolic variables for the data
self.input = T.matrix('x')
# The DBN is an MLP, for which all weights of intermediate layers are shared with a different RBM.
# We will first construct the DBN as a deep multilayer perceptron, and when
# constructing each sigmoid layer we also construct an RBM
# that shares weights with that layer.
# During pre-training we
# will train these RBMs (which will lead to changing the weights of the MLP as well)
# During fine-tuning we will finish training the DBN by doing stochastic gradient descent on the MLP.
for i in xrange(self.n_layers):
# construct the sigmoid layer
if i == 0:
input_size = self.n_visible
layer_input = self.input
else:
input_size = self.hidden_layers_sizes[i - 1]
layer_input = self.sigmoid_layers[-1].output
# Set params W and b from params for hidden layer
W_val = None
b_val = None
if params is not None:
W_val = params[i * 2]
b_val = params[i * 2 + 1]
sigmoid_layer = HiddenLayer(
rng=numpy_rng,
input=layer_input,
n_in=input_size,
n_out=self.hidden_layers_sizes[i],
activation_function=T.nnet.sigmoid,
W=W_val,
b=b_val
)
# add the layer to our list of layers
self.sigmoid_layers.append(sigmoid_layer)
self.params.extend(sigmoid_layer.params)
# Construct an RBM that shared weights with this layer
if i == 0 and gaussian_visible:
# self.__class__.__name__ = "GBRBM-DBN"
rbm_layer = GBRBM(
numpy_rng=numpy_rng,
theano_rng=theano_rng,
input=layer_input,
n_visible=input_size,
n_hidden=self.hidden_layers_sizes[i],
W=sigmoid_layer.W,
h_bias=sigmoid_layer.b
)
else:
rbm_layer = RBM(
numpy_rng=numpy_rng,
theano_rng=theano_rng,
input=layer_input,
n_visible=input_size,
n_hidden=self.hidden_layers_sizes[i],
W=sigmoid_layer.W,
h_bias=sigmoid_layer.b
)
self.rbm_layers.append(rbm_layer)
# We now need to add top of the MLP
W_val = None
b_val = None
if params is not None:
W_val = params[-2]
b_val = params[-1]
self.outputLayer = LinearRegression(
input=self.sigmoid_layers[-1].output,
n_in=self.hidden_layers_sizes[-1],
n_out=self.n_outs,
W=W_val,
b=b_val
)
self.params.extend(self.outputLayer.params)
# Output of the model
self.output = self.outputLayer.output
self.L1 = 0
self.L2 = 0
for p in self.params:
# L1 norm one regularization option is to enforce L1 norm to be small
self.L1 += T.sum(abs(p))
# square of L2 norm one regularization option is to enforce square of L2 norm to be small
self.L2 += T.sum(p ** 2)
def __getstate__(self):
if 'pydevd' in sys.modules:
print 'Serializing ' + self.__class__.__name__
state = copy.deepcopy(self.__dict__)
del state['input']
del state['output']
del state['rbm_layers']
del state['sigmoid_layers']
del state['outputLayer']
del state['L1']
del state['L2']
for i, val in enumerate(state['params']):
state['params'][i] = val.get_value(borrow=True)
return state
def __setstate__(self, state):
if 'pydevd' in sys.modules:
print 'De-serializing ' + self.__class__.__name__
dbn = DBN(
numpy_rng=numpy.random.RandomState(),
theano_rng=None,
n_visible=state['n_visible'],
hidden_layers_sizes=state['hidden_layers_sizes'],
n_outs=state['n_out'],
params=state['params'],
gaussian_visible=state["gaussian_visible"]
)
self.__dict__ = dbn.__dict__
def cost(self, y):
"""
Return a cost function of the model
:type y: theano.tensor.TensorType
:param y: corresponds to a vector that gives for the output
"""
if y.ndim != self.output.ndim:
raise TypeError(
'y should have the same shape as self.y_pred',
('y', y.type, 'y_pred', self.output.type)
)
return T.mean((self.output - y) ** 2)
# return T.sum((self.output - y) ** 2)
def pre_training_functions(self, datasets, batch_size, k=1):
"""
Generates a list of functions, for performing one step of
gradient descent at a given layer. The function will require
as input the mini batch index, and to train an RBM you just
need to iterate, calling the corresponding function on all
mini batch indexes.
:type datasets: Theano shred variable
:param datasets: Dataset with train, test and valid sets
:type batch_size: int
:param batch_size: size of a mini batch
:type k: int
:param k: number of Gibbs steps to do in CD-k / PCD-k
"""
train_set_x, train_set_y = datasets['train_set']
# index to a [mini]batch
index = T.lscalar('index')
learning_rate = T.scalar('lr')
# number of batches
batch_begin = index * batch_size
batch_end = batch_begin + batch_size
pre_train_fns = []
for rbm in self.rbm_layers:
# get the cost and the updates list
# using CD-k here (persisent=None) for training each RBM.
cost, updates = rbm.get_cost_updates(learning_rate, persistent=None, k=k)
# compile the theano function
fn = theano.function(
inputs=[index, learning_rate],
outputs=cost,
updates=updates,
givens={
self.input: train_set_x[batch_begin:batch_end]
}
)
# append fn to the list of functions
pre_train_fns.append(fn)
return pre_train_fns
def predict(self, input):
"""
Predict function of the model.
Parameters
----------
input: Matrix of vectors
"""
predict_function = theano.function(
inputs=[self.input],
outputs=self.output)
predicted_values = predict_function(input)
return predicted_values
|
gdl-civestav-localization/cinvestav_location_fingerprinting
|
models/classification/deep_models/dbn.py
|
Python
|
gpl-3.0
| 9,358
|
[
"Gaussian"
] |
8c8a2af5ed60d2fc3fb4eeb75ebf267a4f51ec15bb30d3fab976fcbac2dc7131
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
The initial version of this module was based on a similar implementation
present in FireWorks (https://pypi.python.org/pypi/FireWorks).
Work done by D. Waroquiers, A. Jain, and M. Kocher.
The main difference wrt the Fireworks implementation is that the QueueAdapter
objects provide a programmatic interface for setting important attributes
such as the number of MPI nodes, the number of OMP threads and the memory requirements.
This programmatic interface is used by the `TaskManager` for optimizing the parameters
of the run before submitting the job (Abinit provides the autoparal option that
allows one to get a list of parallel configuration and their expected efficiency).
"""
from __future__ import print_function, division, unicode_literals
import sys
import os
import abc
import string
import copy
import getpass
import six
import json
import math
from . import qutils as qu
from collections import namedtuple
from subprocess import Popen, PIPE
from atomicfile import AtomicFile
from monty.string import is_string, list_strings
from monty.collections import AttrDict
from monty.functools import lazy_property
from monty.inspect import all_subclasses
from monty.io import FileLock
from monty.json import MSONable
from pymatgen.core.units import Memory
from .utils import Condition
from .launcher import ScriptEditor
from .qjobs import QueueJob
import logging
logger = logging.getLogger(__name__)
__all__ = [
"MpiRunner",
"make_qadapter",
]
__author__ = "Matteo Giantomassi"
__copyright__ = "Copyright 2013, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Matteo Giantomassi"
class SubmitResults(namedtuple("SubmitResult", "qid, out, err, process")):
"""
named tuple createc by the concrete implementation of _submit_to_que to pass the results of the process of
submitting the jobfile to the que.
qid: queue id of the submission
out: stdout of the submission
err: stdrr of the submisison
process: process object of the submission
"""
class MpiRunner(object):
"""
This object provides an abstraction for the mpirunner provided
by the different MPI libraries. It's main task is handling the
different syntax and options supported by the different mpirunners.
"""
def __init__(self, name, type=None, options=""):
self.name = name
self.type = None
self.options = options
def string_to_run(self, executable, mpi_procs, stdin=None, stdout=None, stderr=None, exec_args=None):
stdin = "< " + stdin if stdin is not None else ""
stdout = "> " + stdout if stdout is not None else ""
stderr = "2> " + stderr if stderr is not None else ""
if exec_args:
executable = executable + " " + " ".join(list_strings(exec_args))
if self.has_mpirun:
if self.type is None:
# TODO: better treatment of mpirun syntax.
#se.add_line('$MPIRUN -n $MPI_PROCS $EXECUTABLE < $STDIN > $STDOUT 2> $STDERR')
num_opt = "-n " + str(mpi_procs)
cmd = " ".join([self.name, num_opt, executable, stdin, stdout, stderr])
else:
raise NotImplementedError("type %s is not supported!")
else:
#assert mpi_procs == 1
cmd = " ".join([executable, stdin, stdout, stderr])
return cmd
@property
def has_mpirun(self):
"""True if we are running via mpirun, mpiexec ..."""
return self.name is not None
class OmpEnv(AttrDict):
"""
Dictionary with the OpenMP environment variables
see https://computing.llnl.gov/tutorials/openMP/#EnvironmentVariables
"""
_KEYS = [
"OMP_SCHEDULE",
"OMP_NUM_THREADS",
"OMP_DYNAMIC",
"OMP_PROC_BIND",
"OMP_NESTED",
"OMP_STACKSIZE",
"OMP_WAIT_POLICY",
"OMP_MAX_ACTIVE_LEVELS",
"OMP_THREAD_LIMIT",
"OMP_STACKSIZE",
"OMP_PROC_BIND",
]
@classmethod
def as_ompenv(cls, obj):
"""Convert an object into a OmpEnv"""
if isinstance(obj, cls): return obj
if obj is None: return cls()
return cls(**obj)
def __init__(self, *args, **kwargs):
"""
Constructor method inherited from dictionary:
>>> assert OmpEnv(OMP_NUM_THREADS=1).OMP_NUM_THREADS == 1
To create an instance from an INI file, use:
OmpEnv.from_file(filename)
"""
super(OmpEnv, self).__init__(*args, **kwargs)
err_msg = ""
for key, value in self.items():
self[key] = str(value)
if key not in self._KEYS:
err_msg += "unknown option %s\n" % key
if err_msg:
raise ValueError(err_msg)
def export_str(self):
"""Return a string with the bash statements needed to setup the OMP env."""
return "\n".join("export %s=%s" % (k, v) for k, v in self.items())
class Hardware(object):
"""
This object collects information on the hardware available in a given queue.
Basic definitions:
- A node refers to the physical box, i.e. cpu sockets with north/south switches connecting memory systems
and extension cards, e.g. disks, nics, and accelerators
- A cpu socket is the connector to these systems and the cpu cores
- A cpu core is an independent computing with its own computing pipeline, logical units, and memory controller.
Each cpu core will be able to service a number of cpu threads, each having an independent instruction stream
but sharing the cores memory controller and other logical units.
"""
def __init__(self, **kwargs):
self.num_nodes = int(kwargs.pop("num_nodes"))
self.sockets_per_node = int(kwargs.pop("sockets_per_node"))
self.cores_per_socket = int(kwargs.pop("cores_per_socket"))
# Convert memory to megabytes.
m = str(kwargs.pop("mem_per_node"))
self.mem_per_node = int(Memory.from_string(m).to("Mb"))
if self.mem_per_node <= 0 or self.sockets_per_node <= 0 or self.cores_per_socket <= 0:
raise ValueError("invalid parameters: %s" % kwargs)
if kwargs:
raise ValueError("Found invalid keywords in the partition section:\n %s" % kwargs.keys())
def __str__(self):
"""String representation."""
lines = []
app = lines.append
app(" num_nodes: %d, sockets_per_node: %d, cores_per_socket: %d, mem_per_node %s," %
(self.num_nodes, self.sockets_per_node, self.cores_per_socket, self.mem_per_node))
return "\n".join(lines)
@property
def num_cores(self):
"""Total number of cores available"""
return self.cores_per_socket * self.sockets_per_node * self.num_nodes
@property
def cores_per_node(self):
"""Number of cores per node."""
return self.cores_per_socket * self.sockets_per_node
@property
def mem_per_core(self):
"""Memory available on a single node."""
return self.mem_per_node / self.cores_per_node
def can_use_omp_threads(self, omp_threads):
"""True if omp_threads fit in a node."""
return self.cores_per_node >= omp_threads
def divmod_node(self, mpi_procs, omp_threads):
"""Use divmod to compute (num_nodes, rest_cores)"""
return divmod(mpi_procs * omp_threads, self.cores_per_node)
def as_dict(self):
return {'num_nodes': self.num_nodes,
'sockets_per_node': self.sockets_per_node,
'cores_per_socket': self.cores_per_socket,
'mem_per_node': str(Memory(val=self.mem_per_node, unit='Mb'))}
@classmethod
def from_dict(cls, dd):
return cls(num_nodes=dd['num_nodes'],
sockets_per_node=dd['sockets_per_node'],
cores_per_socket=dd['cores_per_socket'],
mem_per_node=dd['mem_per_node'])
class _ExcludeNodesFile(object):
"""
This file contains the list of nodes to be excluded.
Nodes are indexed by queue name.
"""
DIRPATH = os.path.join(os.getenv("HOME"), ".abinit", "abipy")
FILEPATH = os.path.join(DIRPATH, "exclude_nodes.json")
def __init__(self):
if not os.path.exists(self.FILEPATH):
if not os.path.exists(self.DIRPATH): os.makedirs(self.DIRPATH)
with FileLock(self.FILEPATH):
with open(self.FILEPATH, "w") as fh:
json.dump({}, fh)
def read_nodes(self, qname):
with open(self.FILEPATH, "w") as fh:
return json.load(fh).get(qname, [])
def add_nodes(self, qname, nodes):
nodes = (nodes,) if not isinstance(nodes, (tuple, list)) else nodes
with FileLock(self.FILEPATH):
with AtomicFile(self.FILEPATH, mode="w+") as fh:
d = json.load(fh)
if qname in d:
d["qname"].extend(nodes)
d["qname"] = list(set(d["qname"]))
else:
d["qname"] = nodes
json.dump(d, fh)
_EXCL_NODES_FILE = _ExcludeNodesFile()
def show_qparams(qtype, stream=sys.stdout):
"""Print to the given stream the template of the :class:`QueueAdapter` of type `qtype`."""
for cls in all_subclasses(QueueAdapter):
if cls.QTYPE == qtype: return stream.write(cls.QTEMPLATE)
raise ValueError("Cannot find class associated to qtype %s" % qtype)
def all_qtypes():
"""List of all qtypes supported."""
return [cls.QTYPE for cls in all_subclasses(QueueAdapter)]
def make_qadapter(**kwargs):
"""
Return the concrete :class:`QueueAdapter` class from a string.
Note that one can register a customized version with:
.. example::
from qadapters import SlurmAdapter
class MyAdapter(SlurmAdapter):
QTYPE = "myslurm"
# Add your customized code here
# Register your class.
SlurmAdapter.register(MyAdapter)
make_qadapter(qtype="myslurm", **kwargs)
.. warning::
MyAdapter should be pickleable, hence one should declare it
at the module level so that pickle can import it at run-time.
"""
# Get all known subclasses of QueueAdapter.
d = {c.QTYPE: c for c in all_subclasses(QueueAdapter)}
# Preventive copy before pop
kwargs = copy.deepcopy(kwargs)
qtype = kwargs["queue"].pop("qtype")
return d[qtype](**kwargs)
class QueueAdapterError(Exception):
"""Base Error class for exceptions raise by QueueAdapter."""
class MaxNumLaunchesError(QueueAdapterError):
"""Raised by `submit_to_queue` if we try to submit more than `max_num_launches` times."""
class QueueAdapter(six.with_metaclass(abc.ABCMeta, MSONable)):
"""
The `QueueAdapter` is responsible for all interactions with a specific queue management system.
This includes handling all details of queue script format as well as queue submission and management.
This is the **abstract** base class defining the methods that must be implemented by the concrete classes.
Concrete classes should extend this class with implementations that work on specific queue systems.
.. note::
A `QueueAdapter` has a handler (:class:`QueueJob`) defined in qjobs.py that allows one
to contact the resource manager to get info about the status of the job.
Each concrete implementation of `QueueAdapter` should have a corresponding `QueueJob`.
"""
Error = QueueAdapterError
MaxNumLaunchesError = MaxNumLaunchesError
@classmethod
def autodoc(cls):
return """
# dictionary with info on the hardware available on this particular queue.
hardware:
num_nodes: # Number of nodes available on this queue. Mandatory
sockets_per_node: # Mandatory.
cores_per_socket: # Mandatory.
# dictionary with the options used to prepare the enviroment before submitting the job
job:
setup: # List of commands (str) executed before running (default empty)
omp_env: # Dictionary with OpenMP env variables (default empty i.e. no OpenMP)
modules: # List of modules to be imported (default empty)
shell_env: # Dictionary with shell env variables.
mpi_runner: # MPI runner i.e. mpirun, mpiexec, Default is None i.e. no mpirunner
pre_run: # List of commands executed before the run (default: empty)
post_run: # List of commands executed after the run (default: empty)
# dictionary with the name of the queue and optional parameters
# used to build/customize the header of the submission script.
queue:
qname: # Name of the queue (mandatory)
qparams: # Dictionary with values used to generate the header of the job script
# See pymatgen.io.abinitio.qadapters.py for the list of supported values.
# dictionary with the constraints that must be fulfilled in order to run on this queue.
limits:
min_cores: # Minimum number of cores (default 1)
max_cores: # Maximum number of cores (mandatory),
# hard limit to hint_cores, the limit beyond which the scheduler will not accept the job (mandatory)
hint_cores: # the limit used in the first setup of jobs,
# Fix_Critical method may increase this number until max_cores is reached
min_mem_per_proc: # Minimum memory per MPI process in Mb, units can be specified e.g. 1.4 Gb
# (default hardware.mem_per_core)
max_mem_per_proc: # Maximum memory per MPI process in Mb, units can be specified e.g. `1.4Gb`
# (default hardware.mem_per_node)
timelimit # Initial time-limit
timelimit_hard # The hard time-limit for this queue.
# Error handlers could try to submit jobs with increased timelimit
# up to timelimit_hard. If not specified, timelimit_hard == timelimit
condition: # MongoDB-like condition (default empty, i.e. not used)
allocation: # String defining the policy used to select the optimal number of CPUs.
# possible values are ["nodes", "force_nodes", "shared"]
# "nodes" means that we should try to allocate entire nodes if possible.
# This is a soft limit, in the sense that the qadapter may use a configuration
# that does not fulfill this requirement. If failing, it will try to use the
# smallest number of nodes compatible with the optimal configuration.
# Use `force_nodes` to enfore entire nodes allocation.
# `shared` mode does not enforce any constraint (default).
max_num_launches # limit to the time a specific task can be restarted (default 10)
"""
def __init__(self, **kwargs):
"""
Args:
qname: Name of the queue.
qparams: Dictionary with the parameters used in the template.
setup: String or list of commands to execute during the initial setup.
modules: String or list of modules to load before running the application.
shell_env: Dictionary with the environment variables to export before running the application.
omp_env: Dictionary with the OpenMP variables.
pre_run: String or list of commands to execute before launching the calculation.
post_run: String or list of commands to execute once the calculation is completed.
mpi_runner: Path to the MPI runner or :class:`MpiRunner` instance. None if not used
max_num_launches: Maximum number of submissions that can be done for a specific task. Defaults to 10
qverbatim:
min_cores, max_cores, hint_cores: Minimum, maximum, and hint limits of number of cores that can be used
min_mem_per_proc=Minimun memory per process in megabytes.
max_mem_per_proc=Maximum memory per process in megabytes.
timelimit: initial time limit in seconds
timelimit_hard: hard limelimit for this queue
priority: Priority level, integer number > 0
condition: Condition object (dictionary)
.. note::
priority is a non-negative integer used to order the qadapters. The :class:`TaskManager` will
try to run jobs on the qadapter with the highest priority if possible
"""
# TODO
#task_classes
# Make defensive copies so that we can change the values at runtime.
kwargs = copy.deepcopy(kwargs)
self.priority = int(kwargs.pop("priority"))
self.hw = Hardware(**kwargs.pop("hardware"))
self._parse_queue(kwargs.pop("queue"))
self._parse_limits(kwargs.pop("limits"))
self._parse_job(kwargs.pop("job"))
# List of dictionaries with the parameters used to submit jobs
# The launcher will use this information to increase the resources
self.launches = []
if kwargs:
raise ValueError("Found unknown keywords:\n%s" % list(kwargs.keys()))
self.validate_qparams()
# Initialize some values from the info reported in the partition.
self.set_mpi_procs(self.min_cores)
self.set_mem_per_proc(self.min_mem_per_proc)
self.set_master_mem_overhead(self.master_mem_overhead)
# Final consistency check.
self.validate_qparams()
def as_dict(self):
"""
Provides a simple though not complete dict serialization of the object (OMP missing, not all limits are
kept in the dictionary, ... other things to be checked)
Raise:
`ValueError` if errors.
"""
if self.has_omp:
raise NotImplementedError('as_dict method of QueueAdapter not yet implemented when OpenMP is activated')
return {'@module': self.__class__.__module__,
'@class': self.__class__.__name__,
'priority': self.priority,
'hardware': self.hw.as_dict(),
'queue': {'qtype': self.QTYPE,
'qname': self._qname,
'qnodes': self.qnodes,
'qparams': self._qparams},
'limits': {'timelimit': self._timelimit,
'min_cores': self.min_cores,
'max_cores': self.max_cores,
'min_mem_per_proc': self.min_mem_per_proc,
'max_mem_per_proc': self.max_mem_per_proc,
'master_mem_overhead': self.master_mem_overhead
},
'job': {},
'mpi_procs': self._mpi_procs,
'mem_per_proc': self._mem_per_proc,
'timelimit': self._timelimit,
}
@classmethod
def from_dict(cls, dd):
priority = dd.pop('priority')
hardware = dd.pop('hardware')
queue = dd.pop('queue')
limits = dd.pop('limits')
job = dd.pop('job')
qa = make_qadapter(priority=priority, hardware=hardware, queue=queue, limits=limits, job=job)
qa.set_mpi_procs(dd.pop('mpi_procs'))
qa.set_timelimit(dd.pop('timelimit'))
qa.set_mem_per_proc(dd.pop('mem_per_proc'))
dd.pop('@module', None)
dd.pop('@class', None)
if dd:
raise ValueError("Found unknown keywords:\n%s" % list(dd.keys()))
return qa
def validate_qparams(self):
"""
Check if the keys specified by the user in qparams are supported.
Raise:
`ValueError` if errors.
"""
# No validation for ShellAdapter.
if isinstance(self, ShellAdapter): return
# Parse the template so that we know the list of supported options.
err_msg = ""
for param in self.qparams:
if param not in self.supported_qparams:
err_msg += "Unsupported QUEUE parameter name %s\n" % param
err_msg += "Supported parameters:\n"
for param_sup in self.supported_qparams:
err_msg += " %s \n" % param_sup
if err_msg:
raise ValueError(err_msg)
def _parse_limits(self, d):
# Time limits.
self.set_timelimit(qu.timelimit_parser(d.pop("timelimit")))
tl_hard = d.pop("timelimit_hard", None)
tl_hard = qu.timelimit_parser(tl_hard) if tl_hard is not None else self.timelimit
self.set_timelimit_hard(tl_hard)
# Cores
self.min_cores = int(d.pop("min_cores", 1))
self.max_cores = int(d.pop("max_cores"))
self.hint_cores = int(d.pop("hint_cores", self.max_cores))
if self.min_cores > self.max_cores:
raise ValueError("min_cores %s cannot be greater than max_cores %s" % (self.min_cores, self.max_cores))
# Memory
# FIXME: Neeed because autoparal 1 with paral_kgb 1 is not able to estimate memory
self.min_mem_per_proc = qu.any2mb(d.pop("min_mem_per_proc", self.hw.mem_per_core))
self.max_mem_per_proc = qu.any2mb(d.pop("max_mem_per_proc", self.hw.mem_per_node))
self._master_mem_overhead = qu.any2mb(d.pop("master_mem_overhead", 0))
# Misc
self.max_num_launches = int(d.pop("max_num_launches", 10))
self.condition = Condition(d.pop("condition", {}))
self.allocation = d.pop("allocation", "shared")
if self.allocation not in ("nodes", "force_nodes", "shared"):
raise ValueError("Wrong value for `allocation` option")
if d:
raise ValueError("Found unknown keyword(s) in limits section:\n %s" % d.keys())
def _parse_job(self, d):
setup = d.pop("setup", None)
if is_string(setup): setup = [setup]
self.setup = setup[:] if setup is not None else []
omp_env = d.pop("omp_env", None)
self.omp_env = omp_env.copy() if omp_env is not None else {}
modules = d.pop("modules", None)
if is_string(modules): modules = [modules]
self.modules = modules[:] if modules is not None else []
shell_env = d.pop("shell_env", None)
self.shell_env = shell_env.copy() if shell_env is not None else {}
self.mpi_runner = d.pop("mpi_runner", None)
if not isinstance(self.mpi_runner, MpiRunner):
self.mpi_runner = MpiRunner(self.mpi_runner)
pre_run = d.pop("pre_run", None)
if is_string(pre_run): pre_run = [pre_run]
self.pre_run = pre_run[:] if pre_run is not None else []
post_run = d.pop("post_run", None)
if is_string(post_run): post_run = [post_run]
self.post_run = post_run[:] if post_run is not None else []
if d:
raise ValueError("Found unknown keyword(s) in job section:\n %s" % d.keys())
def _parse_queue(self, d):
# Init params
qparams = d.pop("qparams", None)
self._qparams = copy.deepcopy(qparams) if qparams is not None else {}
self.set_qname(d.pop("qname", ""))
self.qnodes = d.pop("qnodes", "standard")
if self.qnodes not in ["standard", "shared", "exclusive"]:
raise ValueError("Nodes must be either in standard, shared or exclusive mode "
"while qnodes parameter was {}".format(self.qnodes))
if d:
raise ValueError("Found unknown keyword(s) in queue section:\n %s" % d.keys())
def __str__(self):
lines = ["%s:%s" % (self.__class__.__name__, self.qname)]
app = lines.append
app("Hardware:\n" + str(self.hw))
#lines.extend(["qparams:\n", str(self.qparams)])
if self.has_omp: app(str(self.omp_env))
return "\n".join(lines)
@property
def qparams(self):
"""Dictionary with the parameters used to construct the header."""
return self._qparams
@lazy_property
def supported_qparams(self):
"""
Dictionary with the supported parameters that can be passed to the
queue manager (obtained by parsing QTEMPLATE).
"""
import re
return re.findall("\$\$\{(\w+)\}", self.QTEMPLATE)
@property
def has_mpi(self):
"""True if we are using MPI"""
return bool(self.mpi_runner)
@property
def has_omp(self):
"""True if we are using OpenMP threads"""
return hasattr(self, "omp_env") and bool(getattr(self, "omp_env"))
@property
def num_cores(self):
"""Total number of cores employed"""
return self.mpi_procs * self.omp_threads
@property
def omp_threads(self):
"""Number of OpenMP threads."""
if self.has_omp:
return self.omp_env["OMP_NUM_THREADS"]
else:
return 1
@property
def pure_mpi(self):
"""True if only MPI is used."""
return self.has_mpi and not self.has_omp
@property
def pure_omp(self):
"""True if only OpenMP is used."""
return self.has_omp and not self.has_mpi
@property
def hybrid_mpi_omp(self):
"""True if we are running in MPI+Openmp mode."""
return self.has_omp and self.has_mpi
@property
def run_info(self):
"""String with info on the run."""
return "MPI: %d, OMP: %d" % (self.mpi_procs, self.omp_threads)
def deepcopy(self):
"""Deep copy of the object."""
return copy.deepcopy(self)
def record_launch(self, queue_id): # retcode):
"""Save submission"""
self.launches.append(
AttrDict(queue_id=queue_id, mpi_procs=self.mpi_procs, omp_threads=self.omp_threads,
mem_per_proc=self.mem_per_proc, timelimit=self.timelimit))
return len(self.launches)
def remove_launch(self, index):
"""Remove launch with the given index."""
self.launches.pop(index)
@property
def num_launches(self):
"""Number of submission tried with this adapter so far."""
return len(self.launches)
@property
def last_launch(self):
"""Return the last launch."""
if len(self.launches) > 0:
return self.launches[-1]
else:
return None
def validate(self):
"""Validate the parameters of the run. Raises self.Error if invalid parameters."""
errors = []
app = errors.append
if not self.hint_cores >= self.mpi_procs * self.omp_threads >= self.min_cores:
app("self.hint_cores >= mpi_procs * omp_threads >= self.min_cores not satisfied")
if self.omp_threads > self.hw.cores_per_node:
app("omp_threads > hw.cores_per_node")
if self.mem_per_proc > self.hw.mem_per_node:
app("mem_mb >= self.hw.mem_per_node")
if not self.max_mem_per_proc >= self.mem_per_proc >= self.min_mem_per_proc:
app("self.max_mem_per_proc >= mem_mb >= self.min_mem_per_proc not satisfied")
if self.priority <= 0:
app("priority must be > 0")
if not (1 <= self.min_cores <= self.hw.num_cores >= self.hint_cores):
app("1 <= min_cores <= hardware num_cores >= hint_cores not satisfied")
if errors:
raise self.Error(str(self) + "\n".join(errors))
def set_omp_threads(self, omp_threads):
"""Set the number of OpenMP threads."""
self.omp_env["OMP_NUM_THREADS"] = omp_threads
@property
def mpi_procs(self):
"""Number of CPUs used for MPI."""
return self._mpi_procs
def set_mpi_procs(self, mpi_procs):
"""Set the number of MPI processes to mpi_procs"""
self._mpi_procs = mpi_procs
@property
def qname(self):
"""The name of the queue."""
return self._qname
def set_qname(self, qname):
"""Set the name of the queue."""
self._qname = qname
# todo this assumes only one wall time. i.e. the one in the mamanager file is the one always used
# we should use the standard walltime to start with but also allow to increase the walltime
@property
def timelimit(self):
"""Returns the walltime in seconds."""
return self._timelimit
@property
def timelimit_hard(self):
"""Returns the walltime in seconds."""
return self._timelimit_hard
def set_timelimit(self, timelimit):
"""Set the start walltime in seconds, fix method may increase this one until timelimit_hard is reached."""
self._timelimit = timelimit
def set_timelimit_hard(self, timelimit_hard):
"""Set the maximal possible walltime in seconds."""
self._timelimit_hard = timelimit_hard
@property
def mem_per_proc(self):
"""The memory per process in megabytes."""
return self._mem_per_proc
@property
def master_mem_overhead(self):
"""The memory overhead for the master process in megabytes."""
return self._master_mem_overhead
def set_mem_per_proc(self, mem_mb):
"""
Set the memory per process in megabytes. If mem_mb <=0, min_mem_per_proc is used.
"""
# Hack needed because abinit is still not able to estimate memory.
# COMMENTED by David.
# This is not needed anymore here because the "hack" is performed directly in select_qadapter/_use_qadpos_pconf
# methods of TaskManager. Moreover, this hack should be performed somewhere else (this part should be
# independent of abinit ... and if we want to have less memory than the average memory available per node, we
# have to allow it!)
#if mem_mb <= self.min_mem_per_proc: mem_mb = self.min_mem_per_proc
self._mem_per_proc = int(mem_mb)
def set_master_mem_overhead(self, mem_mb):
"""
Set the memory overhead for the master process in megabytes.
"""
if mem_mb < 0:
raise ValueError("Memory overhead for the master process should be >= 0")
self._master_mem_overhead = int(mem_mb)
@property
def total_mem(self):
"""Total memory required by the job in megabytes."""
return Memory(self.mem_per_proc * self.mpi_procs + self.master_mem_overhead, "Mb")
@abc.abstractmethod
def cancel(self, job_id):
"""
Cancel the job.
Args:
job_id: Job identifier.
Returns:
Exit status.
"""
def can_run_pconf(self, pconf):
"""True if the qadapter in principle is able to run the :class:`ParalConf` pconf"""
if not self.hint_cores >= pconf.num_cores >= self.min_cores: return False
if not self.hw.can_use_omp_threads(self.omp_threads): return False
if pconf.mem_per_proc > self.hw.mem_per_node: return False
if self.allocation == "force_nodes" and pconf.num_cores % self.hw.cores_per_node != 0:
return False
return self.condition(pconf)
def distribute(self, mpi_procs, omp_threads, mem_per_proc):
"""
Returns (num_nodes, mpi_per_node)
Aggressive: When Open MPI thinks that it is in an exactly- or under-subscribed mode
(i.e., the number of running processes is equal to or less than the number of available processors),
MPI processes will automatically run in aggressive mode, meaning that they will never voluntarily give
up the processor to other processes. With some network transports, this means that Open MPI will spin
in tight loops attempting to make message passing progress, effectively causing other processes to not get
any CPU cycles (and therefore never make any progress)
"""
class Distrib(namedtuple("Distrib", "num_nodes mpi_per_node exact")):
pass
#@property
#def mem_per_node
# return self.mpi_per_node * mem_per_proc
#def set_nodes(self, nodes):
hw = self.hw
# TODO: Add check on user-memory
if mem_per_proc <= 0:
logger.warning("mem_per_proc <= 0")
mem_per_proc = hw.mem_per_core
if mem_per_proc > hw.mem_per_node:
raise self.Error(
"mem_per_proc > mem_per_node.\n Cannot distribute mpi_procs %d, omp_threads %d, mem_per_proc %s" %
(mpi_procs, omp_threads, mem_per_proc))
# Try to use all then cores in the node.
num_nodes, rest_cores = hw.divmod_node(mpi_procs, omp_threads)
if num_nodes == 0 and mpi_procs * mem_per_proc <= hw.mem_per_node:
# One node is enough
return Distrib(num_nodes=1, mpi_per_node=mpi_procs, exact=True)
if num_nodes == 0: num_nodes = 2
mpi_per_node = mpi_procs // num_nodes
if mpi_per_node * mem_per_proc <= hw.mem_per_node and rest_cores == 0:
# Commensurate with nodes.
return Distrib(num_nodes=num_nodes, mpi_per_node=mpi_per_node, exact=True)
#if mode == "block", "cyclic"
# Try first to pack MPI processors in a node as much as possible
mpi_per_node = int(hw.mem_per_node / mem_per_proc)
assert mpi_per_node != 0
num_nodes = (mpi_procs * omp_threads) // mpi_per_node
print("exact --> false", num_nodes, mpi_per_node)
if mpi_per_node * omp_threads <= hw.cores_per_node and mem_per_proc <= hw.mem_per_node:
return Distrib(num_nodes=num_nodes, mpi_per_node=mpi_per_node, exact=False)
if (mpi_procs * omp_threads) % mpi_per_node != 0:
# Have to reduce the number of MPI procs per node
for mpi_per_node in reversed(range(1, mpi_per_node)):
if mpi_per_node > hw.cores_per_node: continue
num_nodes = (mpi_procs * omp_threads) // mpi_per_node
if (mpi_procs * omp_threads) % mpi_per_node == 0 and mpi_per_node * mem_per_proc <= hw.mem_per_node:
return Distrib(num_nodes=num_nodes, mpi_per_node=mpi_per_node, exact=False)
else:
raise self.Error("Cannot distribute mpi_procs %d, omp_threads %d, mem_per_proc %s" %
(mpi_procs, omp_threads, mem_per_proc))
def optimize_params(self):
"""
This method is called in get_subs_dict. Return a dict with parameters to be added to qparams
Subclasses may provide a specialized version.
"""
logger.debug("optimize_params of baseclass --> no optimization available!!!")
return {}
def get_subs_dict(self):
"""
Return substitution dict for replacements into the template
Subclasses may want to customize this method.
"""
#d = self.qparams.copy()
d = self.qparams
d.update(self.optimize_params())
# clean null values
subs_dict = {k: v for k, v in d.items() if v is not None}
#print("subs_dict:", subs_dict)
return subs_dict
def _make_qheader(self, job_name, qout_path, qerr_path):
"""Return a string with the options that are passed to the resource manager."""
# get substitution dict for replacements into the template
subs_dict = self.get_subs_dict()
# Set job_name and the names for the stderr and stdout of the
# queue manager (note the use of the extensions .qout and .qerr
# so that we can easily locate this file.
subs_dict['job_name'] = job_name.replace('/', '_')
subs_dict['_qout_path'] = qout_path
subs_dict['_qerr_path'] = qerr_path
qtemplate = QScriptTemplate(self.QTEMPLATE)
# might contain unused parameters as leftover $$.
unclean_template = qtemplate.safe_substitute(subs_dict)
# Remove lines with leftover $$.
clean_template = []
for line in unclean_template.split('\n'):
if '$$' not in line:
clean_template.append(line)
return '\n'.join(clean_template)
def get_script_str(self, job_name, launch_dir, executable, qout_path, qerr_path,
stdin=None, stdout=None, stderr=None, exec_args=None):
"""
Returns a (multi-line) String representing the queue script, e.g. PBS script.
Uses the template_file along with internal parameters to create the script.
Args:
job_name: Name of the job.
launch_dir: (str) The directory the job will be launched in.
executable: String with the name of the executable to be executed or list of commands
qout_path Path of the Queue manager output file.
qerr_path: Path of the Queue manager error file.
exec_args: List of arguments passed to executable (used only if executable is a string, default: empty)
"""
# PbsPro does not accept job_names longer than 15 chars.
if len(job_name) > 14 and isinstance(self, PbsProAdapter):
job_name = job_name[:14]
# Construct the header for the Queue Manager.
qheader = self._make_qheader(job_name, qout_path, qerr_path)
# Add the bash section.
se = ScriptEditor()
if self.setup:
se.add_comment("Setup section")
se.add_lines(self.setup)
se.add_emptyline()
if self.modules:
se.add_comment("Load Modules")
se.add_line("module purge")
se.load_modules(self.modules)
se.add_emptyline()
if self.has_omp:
se.add_comment("OpenMp Environment")
se.declare_vars(self.omp_env)
se.add_emptyline()
if self.shell_env:
se.add_comment("Shell Environment")
se.declare_vars(self.shell_env)
se.add_emptyline()
# Cd to launch_dir
se.add_line("cd " + os.path.abspath(launch_dir))
if self.pre_run:
se.add_comment("Commands before execution")
se.add_lines(self.pre_run)
se.add_emptyline()
# Construct the string to run the executable with MPI and mpi_procs.
if is_string(executable):
line = self.mpi_runner.string_to_run(executable, self.mpi_procs,
stdin=stdin, stdout=stdout, stderr=stderr, exec_args=exec_args)
se.add_line(line)
else:
assert isinstance(executable, (list, tuple))
se.add_lines(executable)
if self.post_run:
se.add_emptyline()
se.add_comment("Commands after execution")
se.add_lines(self.post_run)
return qheader + se.get_script_str() + "\n"
def submit_to_queue(self, script_file):
"""
Public API: wraps the concrete implementation _submit_to_queue
Raises:
`self.MaxNumLaunchesError` if we have already tried to submit the job max_num_launches
`self.Error` if generic error
"""
if not os.path.exists(script_file):
raise self.Error('Cannot find script file located at: {}'.format(script_file))
if self.num_launches == self.max_num_launches:
raise self.MaxNumLaunchesError("num_launches %s == max_num_launches %s" % (self.num_launches, self.max_num_launches))
# Call the concrete implementation.
s = self._submit_to_queue(script_file)
self.record_launch(s.qid)
if s.qid is None:
raise self.Error("Error in job submission with %s. file %s \n" %
(self.__class__.__name__, script_file) +
"The error response reads:\n %s \n " % s.err +
"The out response reads:\n %s \n" % s.out)
# Here we create a concrete instance of QueueJob
return QueueJob.from_qtype_and_id(self.QTYPE, s.qid, self.qname), s.process
@abc.abstractmethod
def _submit_to_queue(self, script_file):
"""
Submits the job to the queue, probably using subprocess or shutil
This method must be provided by the concrete classes and will be called by submit_to_queue
Args:
script_file: (str) name of the script file to use (String)
Returns:
queue_id, process
"""
def get_njobs_in_queue(self, username=None):
"""
returns the number of jobs in the queue, probably using subprocess or shutil to
call a command like 'qstat'. returns None when the number of jobs cannot be determined.
Args:
username: (str) the username of the jobs to count (default is to autodetect)
"""
if username is None: username = getpass.getuser()
njobs, process = self._get_njobs_in_queue(username=username)
if process is not None and process.returncode != 0:
# there's a problem talking to squeue server?
err_msg = ('Error trying to get the number of jobs in the queue' +
'The error response reads:\n {}'.format(process.stderr.read()))
logger.critical(err_msg)
if not isinstance(self, ShellAdapter):
logger.info('The number of jobs currently in the queue is: {}'.format(njobs))
return njobs
@abc.abstractmethod
def _get_njobs_in_queue(self, username):
"""
Concrete Subclasses must implement this method. Return (njobs, process)
"""
# Methods to fix problems
def add_exclude_nodes(self, nodes):
return _EXCL_NODES_FILE.add_nodes(self.qname, nodes)
def get_exclude_nodes(self):
return _EXCL_NODES_FILE.read_nodes(self.qname)
@abc.abstractmethod
def exclude_nodes(self, nodes):
"""Method to exclude nodes in the calculation. Return True if nodes have been excluded"""
def more_mem_per_proc(self, factor=1):
"""
Method to increase the amount of memory asked for, by factor.
Return: new memory if success, 0 if memory cannot be increased.
"""
base_increase = 2000
old_mem = self.mem_per_proc
new_mem = old_mem + factor*base_increase
if new_mem < self.hw.mem_per_node:
self.set_mem_per_proc(new_mem)
return new_mem
raise self.Error('could not increase mem_per_proc further')
def more_master_mem_overhead(self, mem_increase_mb=1000):
"""
Method to increase the amount of memory overheaded asked for the master node.
Return: new master memory overhead if success, 0 if it cannot be increased.
"""
old_master_mem_overhead = self.master_mem_overhead
new_master_mem_overhead = old_master_mem_overhead + mem_increase_mb
if new_master_mem_overhead + self.mem_per_proc < self.hw.mem_per_node:
self.set_master_mem_overhead(new_master_mem_overhead)
return new_master_mem_overhead
raise self.Error('could not increase master_mem_overhead further')
def more_cores(self, factor=1):
"""
Method to increase the number of MPI procs.
Return: new number of processors if success, 0 if processors cannot be increased.
"""
# TODO : find a formula that works for all max_cores
if self.max_cores > 40:
base_increase = 4 * int(self.max_cores / 40)
else:
base_increase = 4
new_cores = self.hint_cores + factor * base_increase
if new_cores < self.max_cores:
self.hint_cores = new_cores
return new_cores
raise self.Error('%s hint_cores reached limit on max_core %s' % (new_cores, self.max_cores))
def more_time(self, factor=1):
"""
Method to increase the wall time
"""
base_increase = int(self.timelimit_hard / 10)
new_time = self.timelimit + base_increase*factor
if new_time < self.timelimit_hard:
self.set_timelimit(new_time)
return new_time
self.priority = -1
raise self.Error("increasing time is not possible, the hard limit has been raised")
####################
# Concrete classes #
####################
class ShellAdapter(QueueAdapter):
"""Simple Adapter used to submit runs through the shell."""
QTYPE = "shell"
QTEMPLATE = """\
#!/bin/bash
$${qverbatim}
"""
def cancel(self, job_id):
return os.system("kill -9 %d" % job_id)
def _submit_to_queue(self, script_file):
# submit the job, return process and pid.
process = Popen(("/bin/bash", script_file), stderr=PIPE)
return SubmitResults(qid=process.pid, out='no out in shell submission', err='no err in shell submission', process=process)
def _get_njobs_in_queue(self, username):
return None, None
def exclude_nodes(self, nodes):
return False
class SlurmAdapter(QueueAdapter):
"""Adapter for SLURM."""
QTYPE = "slurm"
QTEMPLATE = """\
#!/bin/bash
#SBATCH --partition=$${partition}
#SBATCH --job-name=$${job_name}
#SBATCH --nodes=$${nodes}
#SBATCH --total_tasks=$${total_tasks}
#SBATCH --ntasks=$${ntasks}
#SBATCH --ntasks-per-node=$${ntasks_per_node}
#SBATCH --cpus-per-task=$${cpus_per_task}
#####SBATCH --mem=$${mem}
#SBATCH --mem-per-cpu=$${mem_per_cpu}
#SBATCH --hint=$${hint}
#SBATCH --time=$${time}
#SBATCH --exclude=$${exclude_nodes}
#SBATCH --account=$${account}
#SBATCH --mail-user=$${mail_user}
#SBATCH --mail-type=$${mail_type}
#SBATCH --constraint=$${constraint}
#SBATCH --gres=$${gres}
#SBATCH --requeue=$${requeue}
#SBATCH --nodelist=$${nodelist}
#SBATCH --propagate=$${propagate}
#SBATCH --licenses=$${licenses}
#SBATCH --output=$${_qout_path}
#SBATCH --error=$${_qerr_path}
$${qverbatim}
"""
def set_qname(self, qname):
super(SlurmAdapter, self).set_qname(qname)
if qname:
self.qparams["partition"] = qname
def set_mpi_procs(self, mpi_procs):
"""Set the number of CPUs used for MPI."""
super(SlurmAdapter, self).set_mpi_procs(mpi_procs)
self.qparams["ntasks"] = mpi_procs
def set_omp_threads(self, omp_threads):
super(SlurmAdapter, self).set_omp_threads(omp_threads)
self.qparams["cpus_per_task"] = omp_threads
def set_mem_per_proc(self, mem_mb):
"""Set the memory per process in megabytes"""
super(SlurmAdapter, self).set_mem_per_proc(mem_mb)
self.qparams["mem_per_cpu"] = self.mem_per_proc
# Remove mem if it's defined.
#self.qparams.pop("mem", None)
def set_timelimit(self, timelimit):
super(SlurmAdapter, self).set_timelimit(timelimit)
self.qparams["time"] = qu.time2slurm(timelimit)
def cancel(self, job_id):
return os.system("scancel %d" % job_id)
def optimize_params(self):
params = {}
if self.allocation == "nodes":
# run on the smallest number of nodes compatible with the configuration
params["nodes"] = max(int(math.ceil(self.mpi_procs / self.hw.cores_per_node)),
int(math.ceil(self.total_mem / self.hw.mem_per_node)))
return params
#dist = self.distribute(self.mpi_procs, self.omp_threads, self.mem_per_proc)
##print(dist)
#if False and dist.exact:
# # Can optimize parameters
# self.qparams["nodes"] = dist.num_nodes
# self.qparams.pop("ntasks", None)
# self.qparams["ntasks_per_node"] = dist.mpi_per_node
# self.qparams["cpus_per_task"] = self.omp_threads
# self.qparams["mem"] = dist.mpi_per_node * self.mem_per_proc
# self.qparams.pop("mem_per_cpu", None)
#else:
# # Delegate to slurm.
# self.qparams["ntasks"] = self.mpi_procs
# self.qparams.pop("nodes", None)
# self.qparams.pop("ntasks_per_node", None)
# self.qparams["cpus_per_task"] = self.omp_threads
# self.qparams["mem_per_cpu"] = self.mem_per_proc
# self.qparams.pop("mem", None)
#return {}
def _submit_to_queue(self, script_file):
"""Submit a job script to the queue."""
process = Popen(['sbatch', script_file], stdout=PIPE, stderr=PIPE)
out, err = process.communicate()
# grab the returncode. SLURM returns 0 if the job was successful
queue_id = None
if process.returncode == 0:
try:
# output should of the form '2561553.sdb' or '352353.jessup' - just grab the first part for job id
queue_id = int(out.split()[3])
logger.info('Job submission was successful and queue_id is {}'.format(queue_id))
except:
# probably error parsing job code
logger.critical('Could not parse job id following slurm...')
return SubmitResults(qid=queue_id, out=out, err=err, process=process)
def exclude_nodes(self, nodes):
try:
if 'exclude_nodes' not in self.qparams:
self.qparams.update({'exclude_nodes': 'node' + nodes[0]})
print('excluded node %s' % nodes[0])
for node in nodes[1:]:
self.qparams['exclude_nodes'] += ',node' + node
print('excluded node %s' % node)
return True
except (KeyError, IndexError):
raise self.Error('qadapter failed to exclude nodes')
def _get_njobs_in_queue(self, username):
process = Popen(['squeue', '-o "%u"', '-u', username], stdout=PIPE, stderr=PIPE)
out, err = process.communicate()
njobs = None
if process.returncode == 0:
# parse the result. lines should have this form:
# username
# count lines that include the username in it
outs = out.splitlines()
njobs = len([line.split() for line in outs if username in line])
return njobs, process
class PbsProAdapter(QueueAdapter):
"""Adapter for PbsPro"""
QTYPE = "pbspro"
#PBS -l select=$${select}:ncpus=$${ncpus}:vmem=$${vmem}mb:mpiprocs=$${mpiprocs}:ompthreads=$${ompthreads}
#PBS -l select=$${select}:ncpus=1:vmem=$${vmem}mb:mpiprocs=1:ompthreads=$${ompthreads}
####PBS -l select=$${select}:ncpus=$${ncpus}:vmem=$${vmem}mb:mpiprocs=$${mpiprocs}:ompthreads=$${ompthreads}
####PBS -l pvmem=$${pvmem}mb
QTEMPLATE = """\
#!/bin/bash
#PBS -q $${queue}
#PBS -N $${job_name}
#PBS -A $${account}
#PBS -l select=$${select}
#PBS -l pvmem=$${pvmem}mb
#PBS -l walltime=$${walltime}
#PBS -l model=$${model}
#PBS -l place=$${place}
#PBS -W group_list=$${group_list}
#PBS -M $${mail_user}
#PBS -m $${mail_type}
#PBS -o $${_qout_path}
#PBS -e $${_qerr_path}
$${qverbatim}
"""
def set_qname(self, qname):
super(PbsProAdapter, self).set_qname(qname)
if qname:
self.qparams["queue"] = qname
def set_timelimit(self, timelimit):
super(PbsProAdapter, self).set_timelimit(timelimit)
self.qparams["walltime"] = qu.time2pbspro(timelimit)
def set_mem_per_proc(self, mem_mb):
"""Set the memory per process in megabytes"""
super(PbsProAdapter, self).set_mem_per_proc(mem_mb)
#self.qparams["vmem"] = self.mem_per_proc
self.qparams["pvmem"] = self.mem_per_proc
def cancel(self, job_id):
return os.system("qdel %d" % job_id)
def optimize_params(self):
return {"select": self.get_select()}
def get_select(self, ret_dict=False):
"""
Select is not the most intuitive command. For more info see:
* http://www.cardiff.ac.uk/arcca/services/equipment/User-Guide/pbs.html
* https://portal.ivec.org/docs/Supercomputers/PBS_Pro
"""
hw, mem_per_proc = self.hw, int(self.mem_per_proc)
#dist = self.distribute(self.mpi_procs, self.omp_threads, mem_per_proc)
"""
if self.pure_mpi:
num_nodes, rest_cores = hw.divmod_node(self.mpi_procs, self.omp_threads)
if num_nodes == 0:
logger.info("IN_CORE PURE MPI: %s" % self.run_info)
chunks = 1
ncpus = rest_cores
mpiprocs = rest_cores
vmem = mem_per_proc * ncpus
ompthreads = 1
elif rest_cores == 0:
# Can allocate entire nodes because self.mpi_procs is divisible by cores_per_node.
logger.info("PURE MPI run commensurate with cores_per_node %s" % self.run_info)
chunks = num_nodes
ncpus = hw.cores_per_node
mpiprocs = hw.cores_per_node
vmem = ncpus * mem_per_proc
ompthreads = 1
else:
logger.info("OUT-OF-CORE PURE MPI (not commensurate with cores_per_node): %s" % self.run_info)
chunks = self.mpi_procs
ncpus = 1
mpiprocs = 1
vmem = mem_per_proc
ompthreads = 1
elif self.pure_omp:
# Pure OMP run.
logger.info("PURE OPENMP run: %s" % self.run_info)
assert hw.can_use_omp_threads(self.omp_threads)
chunks = 1
ncpus = self.omp_threads
mpiprocs = 1
vmem = mem_per_proc
ompthreads = self.omp_threads
elif self.hybrid_mpi_omp:
assert hw.can_use_omp_threads(self.omp_threads)
num_nodes, rest_cores = hw.divmod_node(self.mpi_procs, self.omp_threads)
#print(num_nodes, rest_cores)
# TODO: test this
if rest_cores == 0 or num_nodes == 0:
logger.info("HYBRID MPI-OPENMP run, perfectly divisible among nodes: %s" % self.run_info)
chunks = max(num_nodes, 1)
mpiprocs = self.mpi_procs // chunks
chunks = chunks
ncpus = mpiprocs * self.omp_threads
mpiprocs = mpiprocs
vmem = mpiprocs * mem_per_proc
ompthreads = self.omp_threads
else:
logger.info("HYBRID MPI-OPENMP, NOT commensurate with nodes: %s" % self.run_info)
chunks=self.mpi_procs
ncpus=self.omp_threads
mpiprocs=1
vmem= mem_per_proc
ompthreads=self.omp_threads
else:
raise RuntimeError("You should not be here")
"""
if self.qnodes == "standard":
return self._get_select_standard(ret_dict=ret_dict)
else:
return self._get_select_with_master_mem_overhead(ret_dict=ret_dict)
def _get_select_with_master_mem_overhead(self, ret_dict=False):
if self.has_omp:
raise NotImplementedError("select with master mem overhead not yet implemented with has_omp")
if self.qnodes == "exclusive":
return self._get_select_with_master_mem_overhead_exclusive(ret_dict=ret_dict)
elif self.qnodes == "shared":
return self._get_select_with_master_mem_overhead_shared(ret_dict=ret_dict)
else:
raise ValueError("Wrong value of qnodes parameter : {}".format(self.qnodes))
def _get_select_with_master_mem_overhead_shared(self, ret_dict=False):
chunk_master, ncpus_master, vmem_master, mpiprocs_master = 1, 1, self.mem_per_proc+self.master_mem_overhead, 1
if self.mpi_procs > 1:
chunks_slaves, ncpus_slaves, vmem_slaves, mpiprocs_slaves = self.mpi_procs - 1, 1, self.mem_per_proc, 1
select_params = AttrDict(chunk_master=chunk_master, ncpus_master=ncpus_master,
mpiprocs_master=mpiprocs_master, vmem_master=int(vmem_master),
chunks_slaves=chunks_slaves, ncpus_slaves=ncpus_slaves,
mpiprocs_slaves=mpiprocs_slaves, vmem_slaves=int(vmem_slaves))
s = "{chunk_master}:ncpus={ncpus_master}:vmem={vmem_master}mb:mpiprocs={mpiprocs_master}+" \
"{chunks_slaves}:ncpus={ncpus_slaves}:vmem={vmem_slaves}mb:" \
"mpiprocs={mpiprocs_slaves}".format(**select_params)
tot_ncpus = chunk_master*ncpus_master + chunks_slaves*ncpus_slaves
if tot_ncpus != self.mpi_procs:
raise ValueError('Total number of cpus is different from mpi_procs ...')
else:
select_params = AttrDict(chunk_master=chunk_master, ncpus_master=ncpus_master,
mpiprocs_master=mpiprocs_master, vmem_master=int(vmem_master))
s = "{chunk_master}:ncpus={ncpus_master}:vmem={vmem_master}mb:" \
"mpiprocs={mpiprocs_master}".format(**select_params)
if ret_dict:
return s, select_params
return s
def _get_select_with_master_mem_overhead_exclusive(self, ret_dict=False):
max_ncpus_master = min(self.hw.cores_per_node,
int((self.hw.mem_per_node-self.mem_per_proc-self.master_mem_overhead)
/ self.mem_per_proc) + 1)
if max_ncpus_master >= self.mpi_procs:
chunk, ncpus, vmem, mpiprocs = 1, self.mpi_procs, self.hw.mem_per_node, self.mpi_procs
select_params = AttrDict(chunks=chunk, ncpus=ncpus, mpiprocs=mpiprocs, vmem=int(vmem))
s = "{chunk}:ncpus={ncpus}:vmem={vmem}mb:mpiprocs={mpiprocs}".format(**select_params)
tot_ncpus = chunk*ncpus
else:
ncpus_left = self.mpi_procs-max_ncpus_master
max_ncpus_per_slave_node = min(self.hw.cores_per_node, int(self.hw.mem_per_node/self.mem_per_proc))
nslaves_float = float(ncpus_left)/float(max_ncpus_per_slave_node)
ncpus_per_slave = max_ncpus_per_slave_node
mpiprocs_slaves = max_ncpus_per_slave_node
chunk_master = 1
vmem_slaves = self.hw.mem_per_node
explicit_last_slave = False
chunk_last_slave, ncpus_last_slave, vmem_last_slave, mpiprocs_last_slave = None, None, None, None
if nslaves_float > int(nslaves_float):
chunks_slaves = int(nslaves_float) + 1
pot_ncpus_all_slaves = chunks_slaves*ncpus_per_slave
if pot_ncpus_all_slaves >= self.mpi_procs-1:
explicit_last_slave = True
chunks_slaves = chunks_slaves-1
chunk_last_slave = 1
ncpus_master = 1
ncpus_last_slave = self.mpi_procs - 1 - chunks_slaves*ncpus_per_slave
vmem_last_slave = self.hw.mem_per_node
mpiprocs_last_slave = ncpus_last_slave
else:
ncpus_master = self.mpi_procs-pot_ncpus_all_slaves
if ncpus_master > max_ncpus_master:
raise ValueError('ncpus for the master node exceeds the maximum ncpus for the master ... this'
'should not happen ...')
if ncpus_master < 1:
raise ValueError('ncpus for the master node is 0 ... this should not happen ...')
elif nslaves_float == int(nslaves_float):
chunks_slaves = int(nslaves_float)
ncpus_master = max_ncpus_master
else:
raise ValueError('nslaves_float < int(nslaves_float) ...')
vmem_master, mpiprocs_master = self.hw.mem_per_node, ncpus_master
if explicit_last_slave:
select_params = AttrDict(chunk_master=chunk_master, ncpus_master=ncpus_master,
mpiprocs_master=mpiprocs_master, vmem_master=int(vmem_master),
chunks_slaves=chunks_slaves, ncpus_per_slave=ncpus_per_slave,
mpiprocs_slaves=mpiprocs_slaves, vmem_slaves=int(vmem_slaves),
chunk_last_slave=chunk_last_slave, ncpus_last_slave=ncpus_last_slave,
vmem_last_slave=int(vmem_last_slave), mpiprocs_last_slave=mpiprocs_last_slave)
s = "{chunk_master}:ncpus={ncpus_master}:vmem={vmem_master}mb:mpiprocs={mpiprocs_master}+" \
"{chunks_slaves}:ncpus={ncpus_per_slave}:vmem={vmem_slaves}mb:mpiprocs={mpiprocs_slaves}+" \
"{chunk_last_slave}:ncpus={ncpus_last_slave}:vmem={vmem_last_slave}mb:" \
"mpiprocs={mpiprocs_last_slave}".format(**select_params)
tot_ncpus = chunk_master*ncpus_master+chunks_slaves*ncpus_per_slave+chunk_last_slave*ncpus_last_slave
else:
select_params = AttrDict(chunk_master=chunk_master, ncpus_master=ncpus_master,
mpiprocs_master=mpiprocs_master, vmem_master=int(vmem_master),
chunks_slaves=chunks_slaves, ncpus_per_slave=ncpus_per_slave,
mpiprocs_slaves=mpiprocs_slaves, vmem_slaves=int(vmem_slaves))
s = "{chunk_master}:ncpus={ncpus_master}:vmem={vmem_master}mb:mpiprocs={mpiprocs_master}+" \
"{chunks_slaves}:ncpus={ncpus_per_slave}:vmem={vmem_slaves}mb:" \
"mpiprocs={mpiprocs_slaves}".format(**select_params)
tot_ncpus = chunk_master*ncpus_master + chunks_slaves*ncpus_per_slave
if tot_ncpus != self.mpi_procs:
raise ValueError('Total number of cpus is different from mpi_procs ...')
if ret_dict:
return s, select_params
return s
def _get_select_standard(self, ret_dict=False):
if not self.has_omp:
chunks, ncpus, vmem, mpiprocs = self.mpi_procs, 1, self.mem_per_proc, 1
select_params = AttrDict(chunks=chunks, ncpus=ncpus, mpiprocs=mpiprocs, vmem=int(vmem))
s = "{chunks}:ncpus={ncpus}:vmem={vmem}mb:mpiprocs={mpiprocs}".format(**select_params)
else:
chunks, ncpus, vmem, mpiprocs, ompthreads = self.mpi_procs, self.omp_threads, self.mem_per_proc, 1, self.omp_threads
select_params = AttrDict(chunks=chunks, ncpus=ncpus, mpiprocs=mpiprocs, vmem=int(vmem), ompthreads=ompthreads)
s = "{chunks}:ncpus={ncpus}:vmem={vmem}mb:mpiprocs={mpiprocs}:ompthreads={ompthreads}".format(**select_params)
if ret_dict:
return s, select_params
return s
def _submit_to_queue(self, script_file):
"""Submit a job script to the queue."""
process = Popen(['qsub', script_file], stdout=PIPE, stderr=PIPE)
out, err = process.communicate()
# grab the return code. PBS returns 0 if the job was successful
queue_id = None
if process.returncode == 0:
try:
# output should of the form '2561553.sdb' or '352353.jessup' - just grab the first part for job id
queue_id = int(out.split('.')[0])
except:
# probably error parsing job code
logger.critical("Could not parse job id following qsub...")
return SubmitResults(qid=queue_id, out=out, err=err, process=process)
def _get_njobs_in_queue(self, username):
process = Popen(['qstat', '-a', '-u', username], stdout=PIPE, stderr=PIPE)
out, err = process.communicate()
njobs = None
if process.returncode == 0:
# parse the result
# lines should have this form
# '1339044.sdb username queuename 2012-02-29-16-43 20460 -- -- -- 00:20 C 00:09'
# count lines that include the username in it
# TODO: only count running or queued jobs. or rather, *don't* count jobs that are 'C'.
outs = out.split('\n')
njobs = len([line.split() for line in outs if username in line])
return njobs, process
def exclude_nodes(self, nodes):
"""No meaning for Shell"""
return False
class TorqueAdapter(PbsProAdapter):
"""Adapter for Torque."""
QTYPE = "torque"
QTEMPLATE = """\
#!/bin/bash
#PBS -q $${queue}
#PBS -N $${job_name}
#PBS -A $${account}
#PBS -l pmem=$${pmem}mb
####PBS -l mppwidth=$${mppwidth}
#PBS -l nodes=$${nodes}:ppn=$${ppn}
#PBS -l walltime=$${walltime}
#PBS -l model=$${model}
#PBS -l place=$${place}
#PBS -W group_list=$${group_list}
#PBS -M $${mail_user}
#PBS -m $${mail_type}
# Submission environment
#PBS -V
#PBS -o $${_qout_path}
#PBS -e $${_qerr_path}
$${qverbatim}
"""
def set_mem_per_proc(self, mem_mb):
"""Set the memory per process in megabytes"""
QueueAdapter.set_mem_per_proc(self, mem_mb)
self.qparams["pmem"] = self.mem_per_proc
#self.qparams["mem"] = self.mem_per_proc
#@property
#def mpi_procs(self):
# """Number of MPI processes."""
# return self.qparams.get("nodes", 1) * self.qparams.get("ppn", 1)
def set_mpi_procs(self, mpi_procs):
"""Set the number of CPUs used for MPI."""
QueueAdapter.set_mpi_procs(self, mpi_procs)
self.qparams["nodes"] = 1
self.qparams["ppn"] = mpi_procs
def exclude_nodes(self, nodes):
raise self.Error('qadapter failed to exclude nodes, not implemented yet in torque')
class SGEAdapter(QueueAdapter):
"""
Adapter for Sun Grid Engine (SGE) task submission software.
See also:
* https://www.wiki.ed.ac.uk/display/EaStCHEMresearchwiki/How+to+write+a+SGE+job+submission+script
* http://www.uibk.ac.at/zid/systeme/hpc-systeme/common/tutorials/sge-howto.html
"""
QTYPE = "sge"
QTEMPLATE = """\
#!/bin/bash
#$ -account_name $${account_name}
#$ -N $${job_name}
#$ -q $${queue_name}
#$ -pe $${parallel_environment} $${ncpus}
#$ -l h_rt=$${walltime}
# request a per slot memory limit of size bytes.
##$ -l h_vmem=$${mem_per_slot}
##$ -l mf=$${mem_per_slot}
###$ -j no
#$ -M $${mail_user}
#$ -m $${mail_type}
# Submission environment
##$ -S /bin/bash
###$ -cwd # Change to current working directory
###$ -V # Export environment variables into script
#$ -e $${_qerr_path}
#$ -o $${_qout_path}
$${qverbatim}
"""
def set_qname(self, qname):
super(SGEAdapter, self).set_qname(qname)
if qname:
self.qparams["queue_name"] = qname
def set_mpi_procs(self, mpi_procs):
"""Set the number of CPUs used for MPI."""
super(SGEAdapter, self).set_mpi_procs(mpi_procs)
self.qparams["ncpus"] = mpi_procs
def set_omp_threads(self, omp_threads):
super(SGEAdapter, self).set_omp_threads(omp_threads)
logger.warning("Cannot use omp_threads with SGE")
def set_mem_per_proc(self, mem_mb):
"""Set the memory per process in megabytes"""
super(SGEAdapter, self).set_mem_per_proc(mem_mb)
self.qparams["mem_per_slot"] = str(int(self.mem_per_proc)) + "M"
def set_timelimit(self, timelimit):
super(SGEAdapter, self).set_timelimit(timelimit)
# Same convention as pbspro e.g. [hours:minutes:]seconds
self.qparams["walltime"] = qu.time2pbspro(timelimit)
def cancel(self, job_id):
return os.system("qdel %d" % job_id)
def _submit_to_queue(self, script_file):
"""Submit a job script to the queue."""
process = Popen(['qsub', script_file], stdout=PIPE, stderr=PIPE)
out, err = process.communicate()
# grab the returncode. SGE returns 0 if the job was successful
queue_id = None
if process.returncode == 0:
try:
# output should of the form
# Your job 1659048 ("NAME_OF_JOB") has been submitted
queue_id = int(out.split(' ')[2])
except:
# probably error parsing job code
logger.critical("Could not parse job id following qsub...")
return SubmitResults(qid=queue_id, out=out, err=err, process=process)
def exclude_nodes(self, nodes):
"""Method to exclude nodes in the calculation"""
raise self.Error('qadapter failed to exclude nodes, not implemented yet in sge')
def _get_njobs_in_queue(self, username):
process = Popen(['qstat', '-u', username], stdout=PIPE, stderr=PIPE)
out, err = process.communicate()
njobs = None
if process.returncode == 0:
# parse the result
# lines should contain username
# count lines that include the username in it
# TODO: only count running or queued jobs. or rather, *don't* count jobs that are 'C'.
outs = out.splitlines()
njobs = len([line.split() for line in outs if username in line])
return njobs, process
class MOABAdapter(QueueAdapter):
"""Adapter for MOAB. See https://computing.llnl.gov/tutorials/moab/"""
QTYPE = "moab"
QTEMPLATE = """\
#!/bin/bash
#MSUB -a $${eligible_date}
#MSUB -A $${account}
#MSUB -c $${checkpoint_interval}
#MSUB -l feature=$${feature}
#MSUB -l gres=$${gres}
#MSUB -l nodes=$${nodes}
#MSUB -l partition=$${partition}
#MSUB -l procs=$${procs}
#MSUB -l ttc=$${ttc}
#MSUB -l walltime=$${walltime}
#MSUB -l $${resources}
#MSUB -p $${priority}
#MSUB -q $${queue}
#MSUB -S $${shell}
#MSUB -N $${job_name}
#MSUB -v $${variable_list}
#MSUB -o $${_qout_path}
#MSUB -e $${_qerr_path}
$${qverbatim}
"""
def set_mpi_procs(self, mpi_procs):
"""Set the number of CPUs used for MPI."""
super(MOABAdapter, self).set_mpi_procs(mpi_procs)
self.qparams["procs"] = mpi_procs
def set_timelimit(self, timelimit):
super(MOABAdapter, self).set_timelimit(timelimit)
self.qparams["walltime"] = qu.time2slurm(timelimit)
def set_mem_per_proc(self, mem_mb):
super(MOABAdapter, self).set_mem_per_proc(mem_mb)
#TODO
#raise NotImplementedError("set_mem_per_cpu")
def exclude_nodes(self, nodes):
raise self.Error('qadapter failed to exclude nodes, not implemented yet in moad')
def cancel(self, job_id):
return os.system("canceljob %d" % job_id)
def _submit_to_queue(self, script_file):
"""Submit a job script to the queue."""
process = Popen(['msub', script_file], stdout=PIPE, stderr=PIPE)
out, err = process.communicate()
queue_id = None
if process.returncode == 0:
# grab the returncode. MOAB returns 0 if the job was successful
try:
# output should be the queue_id
queue_id = int(out.split()[0])
except:
# probably error parsing job code
logger.critical('Could not parse job id following msub...')
return SubmitResults(qid=queue_id, out=out, err=err, process=process)
def _get_njobs_in_queue(self, username):
process = Popen(['showq', '-s -u', username], stdout=PIPE, stderr=PIPE)
out, err = process.communicate()
njobs = None
if process.returncode == 0:
# parse the result
# lines should have this form:
##
## active jobs: N eligible jobs: M blocked jobs: P
##
## Total job: 1
##
# Split the output string and return the last element.
out = out.splitlines()[-1]
njobs = int(out.split()[-1])
return njobs, process
class QScriptTemplate(string.Template):
delimiter = '$$'
|
migueldiascosta/pymatgen
|
pymatgen/io/abinit/qadapters.py
|
Python
|
mit
| 71,607
|
[
"ABINIT",
"pymatgen"
] |
f4ed60c481938f80dd96674c1c1e31961dcca645ab340997aec39cf157707f10
|
import os
import time
import sys
from unittest import skip
from selenium import webdriver
from selenium.webdriver.firefox.firefox_binary import FirefoxBinary
from selenium.webdriver.common.keys import Keys
from selenium.common.exceptions import WebDriverException
from django.test import LiveServerTestCase
from django.contrib.staticfiles.testing import StaticLiveServerTestCase
from django.core import mail
from django.conf import settings
from .server_tools import reset_database
from .server_tools import create_session_on_server
from .management.commands.create_session import create_pre_authenticated_session
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
PARENT_DIR = os.path.dirname(os.path.dirname(BASE_DIR))
GECKODRIVER_BIN = os.path.join( PARENT_DIR, 'bin' )
os.environ["PATH"]+=":"+GECKODRIVER_BIN
class Browser(webdriver.Firefox):
firefox_path = "/usr/local/firefox/firefox"
def __init__(self,wait_time=1):
webdriver.Firefox.__init__(self,firefox_binary=FirefoxBinary(firefox_path=self.firefox_path))
self.wait_time=1
def wait_page(self):
time.sleep(self.wait_time)
def add_session(self,email,session_key,final_url):
## to set a cookie we need to first visit the domain.
## 404 pages load the quickest!
self.get(final_url+"/404-not-found")
self.wait_page()
self.add_cookie(dict(
name=settings.SESSION_COOKIE_NAME,
value=session_key,
path='/',
))
self.get(final_url)
self.wait_page()
class FunctionalTest(StaticLiveServerTestCase):
wait_time=1
def build_browser(self):
browser = Browser(wait_time=self.wait_time)
return browser
def restart_browser(self):
self.browser.quit()
self.browser = Browser(wait_time=self.wait_time)
@classmethod
def setUpClass(cls):
cls.emaildir=None
cls.liveserver=False
for arg in sys.argv:
if 'liveserver' in arg:
cls.server_host = arg.split('=')[1]
cls.server_url = 'http://' + cls.server_host
cls.liveserver=True
cls.emaildir = "/srv/test.costruttoridimondi.org/var/mail"
if cls.liveserver:
return
super().setUpClass()
cls.server_url = cls.live_server_url
cls.liveserver=False
cls.emaildir=None
@classmethod
def tearDownClass(cls):
if cls.liveserver: return
#print(type(cls.live_server_url))
# if cls.server_url == cls.live_server_url:
super().tearDownClass()
def setUp(self):
if self.liveserver:
reset_database(self.server_host)
self.browser = Browser(wait_time=self.wait_time)
def tearDown(self):
self.browser.quit()
def wait_for(self, function_with_assertion, timeout=-1):
if timeout<=0: timeout=self.wait_time
start_time = time.time()
while time.time() - start_time < timeout:
try:
return function_with_assertion()
except (AssertionError, WebDriverException):
time.sleep(0.1)
# one more try, which will raise any errors if they are outstanding
return function_with_assertion()
def wait_browser(self):
time.sleep(self.wait_time)
def wait_for_email(self, test_email, subject):
if not self.liveserver:
email = mail.outbox[0]
self.assertIn(test_email, email.to)
self.assertEqual(email.subject, subject)
return email.body
if self.emaildir:
return self.wait_for_email_onfile(test_email,subject)
return self.wait_for_email_pop3(test_email,subject)
def wait_for_email_onfile(self, test_email, subject):
subject_line = 'Subject: {}'.format(subject)
email_list=os.listdir(self.emaildir)
email_list.sort()
if not email_list: return None
email_file=os.path.join(self.emaildir,email_list[-1])
fd=open(email_file,"r")
body=fd.read()
fd.close()
return body
def wait_for_email_pop3(self, test_email, subject):
subject_line = 'Subject: {}'.format(subject)
email_id = None
start = time.time()
inbox = poplib.POP3_SSL('pop.mail.yahoo.com')
try:
inbox.user(test_email)
inbox.pass_(os.environ['YAHOO_PASSWORD'])
while time.time() - start < 60:
count, _ = inbox.stat()
for i in reversed(range(max(1, count - 10), count + 1)):
print('getting msg', i)
_, lines, __ = inbox.retr(i)
lines = [l.decode('utf8') for l in lines]
print(lines)
if subject_line in lines:
email_id = i
body = '\n'.join(lines)
return body
time.sleep(5)
finally:
if email_id:
inbox.dele(email_id)
inbox.quit()
def get_session_key(self,email):
if self.liveserver:
session_key = create_session_on_server(self.server_host, email)
else:
session_key = create_pre_authenticated_session(email)
return session_key
def create_session(self,email):
session_key=self.get_session_key(email)
self.browser.add_session(email,session_key,self.server_url)
def create_pre_authenticated_session(self, browser, email, final_url=None):
if final_url==None: final_url=self.server_url
session_key=self.get_session_key(email)
browser.add_session(email,session_key,final_url)
class MultiuserFunctionalTest(FunctionalTest):
def create_user_browser_with_session(self,email,size=None,position=None):
user_browser = Browser(wait_time=self.wait_time)
if email in self.browsers.keys():
self.browsers[email].quit()
if size:
w,h=size
user_browser.set_window_size(w,h)
if position:
x,y=position
user_browser.set_window_position(x,y)
session_key=self.get_session_key(email)
user_browser.add_session(email,session_key,self.server_url)
self.browsers[email]=user_browser
return user_browser
def set_browser(self,email,size=None,position=None):
if email in self.browsers.keys():
self.browser=self.browsers[email]
if size:
w,h=size
self.browser.set_window_size(w,h)
if position:
x,y=position
self.browser.set_window_position(x,y)
return
kwargs={}
if size: kwargs["size"]=size
if position: kwargs["position"]=position
self.browser=self.create_user_browser_with_session(email,**kwargs)
def setUp(self):
if self.liveserver:
reset_database(self.server_host)
self.browsers = {}
self.browser = None
def tearDown(self):
for browser in self.browsers.values():
try:
browser.quit()
except:
pass
|
chiara-paci/costruttoridimondi
|
costruttoridimondi/functional_tests/base.py
|
Python
|
gpl-3.0
| 7,259
|
[
"VisIt"
] |
5588b0e71ead56898b480bd3e2d6d9aca1235bdb7ad2fbcfe4aaa8fc1ef0d186
|
__author__ = 'noe'
import numpy as np
from bhmm.hmm.generic_hmm import HMM
from bhmm.hmm.generic_sampled_hmm import SampledHMM
from bhmm.output_models.discrete import DiscreteOutputModel
from bhmm.util import config
from bhmm.util.statistics import confidence_interval_arr
class DiscreteHMM(HMM, DiscreteOutputModel):
r""" Convenience access to an HMM with a Gaussian output model.
"""
def __init__(self, hmm):
# superclass constructors
if not isinstance(hmm.output_model, DiscreteOutputModel):
raise TypeError('Given hmm is not a discrete HMM, but has an output model of type: '+
str(type(hmm.output_model)))
DiscreteOutputModel.__init__(self, hmm.output_model.output_probabilities)
HMM.__init__(self, hmm.transition_matrix, self, lag=hmm.lag, Pi=hmm.initial_distribution,
stationary=hmm.is_stationary, reversible=hmm.is_reversible)
class SampledDiscreteHMM(DiscreteHMM, SampledHMM):
""" Sampled Discrete HMM with a representative single point estimate and error estimates
Parameters
----------
estimated_hmm : :class:`HMM <generic_hmm.HMM>`
Representative HMM estimate, e.g. a maximum likelihood estimate or mean HMM.
sampled_hmms : list of :class:`HMM <generic_hmm.HMM>`
Sampled HMMs
conf : float, optional, default = 0.95
confidence interval, e.g. 0.68 for 1 sigma or 0.95 for 2 sigma.
"""
def __init__(self, estimated_hmm, sampled_hmms, conf=0.95):
# call GaussianHMM superclass constructer with estimated_hmm
DiscreteHMM.__init__(self, estimated_hmm)
# call SampledHMM superclass constructor
SampledHMM.__init__(self, estimated_hmm, sampled_hmms, conf=conf)
@property
def output_probabilities_samples(self):
r""" Samples of the output probability matrix """
res = np.empty((self.nsamples, self.nstates, self.dimension), dtype=config.dtype)
for i in range(self.nsamples):
res[i,:,:] = self._sampled_hmms[i].means
return res
@property
def output_probabilities_mean(self):
r""" The mean of the output probability matrix """
return np.mean(self.output_probabilities_samples, axis=0)
@property
def output_probabilities_std(self):
r""" The standard deviation of the output probability matrix """
return np.std(self.output_probabilities_samples, axis=0)
@property
def output_probabilities_conf(self):
r""" The standard deviation of the output probability matrix """
return confidence_interval_arr(self.output_probabilities_samples, conf=self._conf)
|
marscher/bhmm
|
bhmm/hmm/discrete_hmm.py
|
Python
|
lgpl-3.0
| 2,678
|
[
"Gaussian"
] |
78b584b149572a86e5483d08451b03472863b4c0dd0cd9002bb1c1f1c1745f7c
|
"""
Created on April 25 2020
@author: Pedram Tavadze
"""
import os
import numpy as np
from .xml_output import parse_vasprun
from .incar import VaspInput
from ..codes import CodeOutput
from ...core import Structure
from ...visual import DensityOfStates
from ...crystal.kpoints import KPoints
class VaspXML(CodeOutput):
def __init__(self, filename='vasprun.xml'):
CodeOutput.__init__(self)
if not os.path.isfile(filename):
raise ValueError('File not found ' + filename)
else:
self.filename = filename
self.spins_dict = {'spin 1': 'Spin-up', 'spin 2': 'Spin-down'}
# self.positions = None
# self.stress = None
#self.array_sizes = {}
self.data = self.read()
if self.has_diverged:
return
self.bands = self._get_bands()
self.bands_projected = self._get_bands_projected()
def read(self):
return parse_vasprun(self.filename)
def _get_dos_total(self):
spins = list(self.data['general']['dos']
['total']['array']['data'].keys())
energies = np.array(
self.data['general']['dos']['total']['array']['data'][spins[0]])[:, 0]
dos_total = {'energies': energies}
for ispin in spins:
dos_total[self.spins_dict[ispin]] = np.array(
self.data['general']['dos']['total']['array']['data'][ispin])[:, 1]
return dos_total, list(dos_total.keys())
def _get_dos_projected(self, atoms=[]):
if len(atoms) == 0:
atoms = np.arange(self.initial_structure.natom)
if 'partial' in self.data['general']['dos']:
dos_projected = {}
# using this name as vasrun.xml uses ion #
ion_list = ["ion %s" % str(x + 1) for x in atoms]
for i in range(len(ion_list)):
iatom = ion_list[i]
name = self.initial_structure.symbols[atoms[i]] + str(atoms[i])
spins = list(
self.data['general']['dos']['partial']['array']['data'][iatom].keys())
energies = np.array(
self.data['general']['dos']['partial']['array']['data'][iatom][spins[0]][spins[0]])[:, 0]
dos_projected[name] = {'energies': energies}
for ispin in spins:
dos_projected[name][self.spins_dict[ispin]] = np.array(
self.data['general']['dos']['partial']['array']['data'][iatom][ispin][ispin])[:, 1:]
return dos_projected, self.data['general']['dos']['partial']['array']['info']
else:
print("This calculation does not include partial density of states")
return None, None
def _get_bands(self):
spins = list(self.data["general"]["eigenvalues"]
["array"]["data"].keys())
kpoints_list = list(
self.data["general"]["eigenvalues"]["array"]["data"]["spin 1"].keys())
eigen_values = {}
nbands = len(
self.data["general"]["eigenvalues"]["array"]["data"][spins[0]][
kpoints_list[0]
][kpoints_list[0]]
)
nkpoints = len(kpoints_list)
for ispin in spins:
eigen_values[ispin] = {}
eigen_values[ispin]["eigen_values"] = np.zeros(
shape=(nbands, nkpoints))
eigen_values[ispin]["occupancies"] = np.zeros(
shape=(nbands, nkpoints))
for ikpoint, kpt in enumerate(kpoints_list):
temp = np.array(
self.data["general"]["eigenvalues"]["array"]["data"][ispin][kpt][kpt])
eigen_values[ispin]["eigen_values"][:, ikpoint] = (
temp[:, 0] - self.fermi
)
eigen_values[ispin]["occupancies"][:, ikpoint] = temp[:, 1]
return eigen_values
def _get_bands_projected(self):
# projected[iatom][ikpoint][iband][iprincipal][iorbital][ispin]
labels = self.data["general"]["projected"]["array"]["info"]
spins = list(self.data["general"]["projected"]["array"]["data"].keys())
kpoints_list = list(
self.data["general"]["projected"]["array"]["data"][spins[0]].keys()
)
bands_list = list(
self.data["general"]["projected"]["array"]["data"][spins[0]][
kpoints_list[0]
][kpoints_list[0]].keys()
)
bands_projected = {"labels": labels}
nspins = len(spins)
nkpoints = len(kpoints_list)
nbands = len(bands_list)
norbitals = len(labels)
natoms = self.initial_structure.natom
bands_projected["projection"] = np.zeros(
shape=(nspins, nkpoints, nbands, natoms, norbitals)
)
for ispin, spn in enumerate(spins):
for ikpoint, kpt in enumerate(kpoints_list):
for iband, bnd in enumerate(bands_list):
bands_projected["projection"][
ispin, ikpoint, iband, :, :
] = np.array(
self.data["general"]["projected"]["array"]["data"][spn][kpt][
kpt
][bnd][bnd]
)
# # ispin, ikpoint, iband, iatom, iorbital
# bands_projected["projection"] = np.swapaxes(
# bands_projected["projection"], 0, 3)
# # iatom, ikpoint, iband, ispin, iorbital
# bands_projected["projection"] = np.swapaxes(
# bands_projected["projection"], 3, 4)
# # iatom, ikpoint, iband, iorbital, ispin
# bands_projected["projection"] = bands_projected["projection"].reshape(
# natoms, nkpoints, nbands, norbitals, nspins
# )
return bands_projected
@property
def dos_to_dict(self):
"""
Returns the complete density (total,projected) of states as a python dictionary
"""
return {'total': self._get_dos_total(),
'projected': self._get_dos_projected()}
@property
def dos_total(self):
"""
Returns the total density of states as a pychemia.visual.DensityOfSates object
"""
dos_total, labels = self._get_dos_total()
dos_total['energies'] -= self.fermi
return DensityOfStates(
np.array(
[
dos_total[x] for x in dos_total]).T,
title='Total Density Of States',
labels=[
x.capitalize() for x in labels])
@property
def dos_projected(self):
"""
Returns the a list of projected density of states as a pychemia.visual.DensityOfSates object
each element refers to each atom
"""
ret = []
atoms = np.arange(self.initial_structure.natom, dtype=int)
dos_projected, info = self._get_dos_projected(atoms=atoms)
if dos_projected is None:
return None
ndos = len(dos_projected[list(dos_projected.keys())[0]]['energies'])
norbital = len(info) - 1
nspin = len(dos_projected[list(dos_projected.keys())[0]].keys()) - 1
info[0] = info[0].capitalize()
labels = []
labels.append(info[0])
if nspin > 1:
for il in info[1:]:
labels.append(il + '-Up')
for il in info[1:]:
labels.append(il + '-Down')
else:
labels = info
for iatom in dos_projected:
table = np.zeros(shape=(ndos, norbital * nspin + 1))
table[:, 0] = dos_projected[iatom]['energies'] - self.fermi
start = 1
for key in dos_projected[iatom]:
if key == 'energies':
continue
end = start + norbital
table[:, start:end] = dos_projected[iatom][key]
start = end
temp_dos = DensityOfStates(
table, title='Projected Density Of States %s' %
iatom, labels=labels)
ret.append(temp_dos)
return ret
def dos_parametric(self, atoms=None, orbitals=None, spin=None, title=None):
"""
This function sums over the list of atoms and orbitals given
for example dos_paramateric(atoms=[0,1,2],orbitals=[1,2,3],spin=[0,1])
will sum all the projections of atoms 0,1,2 and all the orbitals of 1,2,3 (px,py,pz)
and return separatly for the 2 spins as a DensityOfStates object from pychemia.visual.DensityofStates
:param atoms: list of atom index needed to be sumed over. count from zero with the same
order as POSCAR
:param orbitals: list of orbitals needed to be sumed over
| s || py || pz || px || dxy || dyz || dz2 || dxz ||x2-y2||
| 0 || 1 || 2 || 3 || 4 || 5 || 6 || 7 || 8 ||
:param spin: which spins to be included. count from 0
There are no sum over spins
"""
projected = self.dos_projected
if atoms is None:
atoms = np.arange(self.initial_structure.natom, dtype=int)
if spin is None:
spin = [0, 1]
if orbitals is None:
orbitals = np.arange(
(len(projected[0].labels) - 1) // 2, dtype=int)
if title is None:
title = 'Sum'
orbitals = np.array(orbitals)
if len(spin) == 2:
labels = ['Energy', 'Spin-Up', 'Spin-Down']
new_orbitals = []
for ispin in spin:
new_orbitals.append(
list(orbitals + ispin * (len(projected[0].labels) - 1) // 2))
orbitals = new_orbitals
else:
if spin[0] == 0:
labels = ['Energy', 'Spin-Up']
elif spin[0] == 1:
labels = ['Energy', 'Spin-Down']
ret = np.zeros(shape=(len(projected[0].energies), len(spin) + 1))
ret[:, 0] = projected[0].energies
for iatom in atoms:
if len(spin) == 2:
ret[:,
1:] += self.dos_projected[iatom].values[:,
orbitals].sum(axis=2)
elif len(spin) == 1:
ret[:,
1] += self.dos_projected[iatom].values[:,
orbitals].sum(axis=1)
return DensityOfStates(table=ret, title=title, labels=labels)
@property
def kpoints(self):
"""
Returns the kpoints used in the calculation in form of a pychemia.core.KPoints object
"""
if self.data['kpoints_info']['mode'] == 'listgenerated':
kpoints = KPoints(
kmode='path',
kvertices=self.data['kpoints_info']['kpoint_vertices'])
else:
kpoints = KPoints(kmode=self.data['kpoints_info']['mode'].lower(),
grid=self.data['kpoints_info']['kgrid'],
shifts=self.data['kpoints_info']['user_shift'])
return kpoints
@property
def kpoints_list(self):
"""
Returns the list of kpoints and weights used in the calculation in form of a pychemia.core.KPoints object
"""
return KPoints(
kmode='reduced',
kpoints_list=self.data['kpoints']['kpoints_list'],
weights=self.data['kpoints']['k_weights'])
@property
def incar(self):
"""
Returns the incar parameters used in the calculation as pychemia.code.vasp.VaspIncar object
"""
return VaspInput(variables=self.data['incar'])
@property
def final_data(self):
"""
Returns the final free energy, energy_wo_entropy and energy_sigma>0 as a python dictionary
"""
return {'energy': {'free_energy': self.iteration_data[-1]['energy']['e_fr_energy'],
'energy_without_entropy': self.iteration_data[-1]['energy']['e_wo_entrp'],
'energy(sigma->0)': self.iteration_data[-1]['energy']['e_0_energy']}}
@property
def vasp_parameters(self):
"""
Returns all of the parameters vasp has used in this calculation
"""
return self.data['vasp_params']
@property
def potcar_info(self):
"""
Returns the information about pseudopotentials(POTCAR) used in this calculation
"""
return self.data['atom_info']['atom_types']
@property
def fermi(self):
"""
Returns the fermi energy
"""
return self.data['general']['dos']['efermi']
@property
def species(self):
"""
Returns the species in POSCAR
"""
return self.initial_structure.species
def _correct_symbol(self, sym):
if sym == 'r' and any(['Zr' in self.potcar_info[x]['pseudopotential'] for x in self.potcar_info]):
return "Zr"
else :
return sym
@property
def symbols(self):
ret = [self._correct_symbol(x.strip()) for x in self.data['atom_info']['symbols']]
return ret
@property
def structures(self):
"""
Returns a list of pychemia.core.Structure representing all the ionic step structures
"""
structures = []
for ist in self.data['structures']:
structures.append(
Structure(
symbols=self.symbols,
reduced=ist['reduced'],
cell=ist['cell']))
return structures
@property
def forces(self):
"""
Returns all the forces in ionic steps
"""
return self.data['forces']
@property
def initial_structure(self):
"""
Returns the initial Structure as a pychemia structure
"""
return self.structures[0]
@property
def final_structure(self):
"""
Returns the final Structure as a pychemia structure
"""
return self.structures[-1]
@property
def iteration_data(self):
"""
Returns a list of information in each electronic and ionic step of calculation
"""
return self.data['calculation']
@property
def energies(self):
"""
Returns a list of energies in each electronic and ionic step [ionic step,electronic step, energy]
"""
scf_step = 0
ion_step = 0
double_counter = 1
energies = []
for calc in self.data['calculation']:
if 'ewald' in calc['energy']:
if double_counter == 0:
double_counter += 1
scf_step += 1
elif double_counter == 1:
double_counter = 0
ion_step += 1
scf_step = 1
else:
scf_step += 1
energies.append([ion_step, scf_step, calc['energy']['e_0_energy']])
return energies
@property
def last_energy(self):
"""
Returns the last calculated energy of the system
"""
return self.energies[-1][-1]
@property
def energy(self):
"""
Returns the last calculated energy of the system
"""
return self.last_energy
@property
def convergence_electronic(self):
"""
Returns a boolian representing if the last electronic self-consistent
calculation converged
"""
ediff = self.vasp_parameters['electronic']['EDIFF']
last_dE = abs(self.energies[-1][-1] - self.energies[-2][-1])
if last_dE < ediff:
return True
else:
return False
@property
def convergence_ionic(self):
"""
Returns a boolian representing if the ionic part of the
calculation converged
"""
energies = np.array(self.energies)
nsteps = len(np.unique(np.array(self.energies)[:, 0]))
if nsteps == 1:
print('This calculation does not have ionic steps')
return True
else:
ediffg = self.vasp_parameters['ionic']['EDIFFG']
if ediffg < 0:
last_forces_abs = np.abs(np.array(self.forces[-1]))
return not(np.any(last_forces_abs > abs(ediffg)))
else:
last_ionic_energy = energies[(
energies[:, 0] == nsteps)][-1][-1]
penultimate_ionic_energy = energies[(
energies[:, 0] == (nsteps - 1))][-1][-1]
last_dE = abs(last_ionic_energy - penultimate_ionic_energy)
if last_dE < ediffg:
return True
return False
@property
def convergence(self):
"""
Returns a boolian representing if the the electronic self-consistent
and ionic calculation converged
"""
return (self.convergence_electronic and self.convergence_ionic)
@property
def is_finished(self):
"""
Always returns True, need to fix this according to reading the xml as if the calc is
not finished we will have errors in xml parser
"""
# if vasprun.xml is read the calculation is finished
return True
@property
def valance_band_maximum(self):
ret = []
for ispin in self.bands:
eigenvalues = self.bands[ispin]['eigen_values']
occ = np.round(self.bands[ispin]['occupancies'])
ret.append(eigenvalues[occ == 1].max())
ret.append(max(ret))
return ret
@property
def conduction_band_minimum(self):
ret = []
for ispin in self.bands:
eigenvalues = self.bands[ispin]['eigen_values']
occ = np.round(self.bands[ispin]['occupancies'])
ret.append(eigenvalues[occ == 0].min())
ret.append(min(ret))
return ret
@property
def has_diverged(self):
for key in self.final_data['energy']:
if self.final_data['energy'][key] is None:
return True
return False
@property
def band_gap(self):
ret = {}
vbm = self.valance_band_maximum
cbm = self.conduction_band_minimum
for ispin, spin in enumerate(self.bands):
eigenvalues = self.bands[spin]['eigen_values']
iband_vbm, ikpoint_vbm = np.where(eigenvalues == vbm[ispin])
iband_cbm, ikpoint_cbm = np.where(eigenvalues == cbm[ispin])
kpoint_vbm = self.kpoints_list.kpoints_list[ikpoint_vbm[0]]
kpoint_cbm = self.kpoints_list.kpoints_list[ikpoint_cbm[0]]
gap = float(cbm[ispin] - vbm[ispin])
if iband_cbm[0] == iband_vbm[0] or gap < 1e-5:
gap = 0.00
ret[spin] = {'gap': gap,
'direct': bool(ikpoint_vbm[0] == ikpoint_cbm[0]),
'band': (int(iband_vbm[0]), int(iband_cbm[0])),
'kpoint': (kpoint_vbm, kpoint_cbm),
'ikpoint': (int(ikpoint_vbm[0]), int(ikpoint_cbm[0]))}
ret['total'] = {'gap': float(cbm[-1] - vbm[-1])}
return ret
|
MaterialsDiscovery/PyChemia
|
pychemia/code/vasp/vaspxml.py
|
Python
|
mit
| 19,297
|
[
"CRYSTAL",
"VASP"
] |
8dc7c9e9f763d1f5b1ff74c9e73d5d92c1bc34729f0e6347ab144f3798f41921
|
import numpy as np
import itertools
from sklearn import mixture, metrics
from sklearn.cluster import DBSCAN
from scipy import linalg
from scipy.spatial import distance
import pylab as pl
import matplotlib as mpl
# ToDo: look for better initial clustering, test with real data
#Generating random sample, in two clusters
n_samples = 30
np.random.seed(0)
C = np.array([[0., 0.91], [1, .4]])
X = np.r_[np.dot(np.random.randn(n_samples, 2), C), np.random.randn(n_samples, 2) + np.array([-6, 3])]
#n_samples = 300
#X = np.zeros((2))
#for i in range(n_samples):
#newrow = np.array([i+np.random.random()*20,np.sin(i*np.pi/90)+np.random.random()/2])
#X = np.vstack([X,newrow])
# Normalizing
print "1: ", X
X /= np.max(np.abs(X), axis=0)
print "2: ", X
# Checking First-Last point
poX, poY = zip(*X)
poMinimX=np.infty
poMaximX=-np.infty
for i in range(len(poX)):
if poX[i] < poMinimX:
poMinimX=poX[i]
yOfMinimX=poY[i]
if poX[i] > poMaximX:
poMaximX=poX[i]
yOfMaximX=poY[i]
print 'First point (x = time): ', poMinimX, yOfMinimX
print 'Last point (x = time): ', poMaximX, yOfMaximX
difX = poMaximX-poMinimX
difY = yOfMaximX-yOfMinimX
redTraj= []
redTraj.append([difX, difY])
print 'Diff First-Last: ', redTraj
# Compute similarities
D = distance.squareform(distance.pdist(X))
S = 1 - (D / np.max(D))
# Compute DBSCAN
db = DBSCAN(eps=0.001, min_samples=10, metric='cosine').fit(S)
labels = db.labels_
# Number of clusters in labels, ignoring noise if present.
n_clusters_ = len(set(labels)) - (1 if -1 in labels else 0)
print 'Estimated number of clusters: %d' % n_clusters_
# Initializing parameters
lowest_bic = np.infty
bic = []
# choose number of component to test
componentToTest=2*(n_clusters_ + 1)
print "Maximum components tested: ", componentToTest
n_components_range = range(1, componentToTest+1)
# this is a loop to test every component, choosing the lowest BIC at the end
for n_components in n_components_range:
# Fit a mixture of gaussians with EM
gmm = mixture.GMM(n_components=n_components, covariance_type='full')
gmm.fit(X)
bic.append(gmm.bic(X))
if bic[-1] < lowest_bic:
lowest_bic = bic[-1]
best_gmm = gmm
# array of BIC for the graphic table column
bic = np.array(bic)
# one tested all components, here we choose the best
clf = best_gmm
print "Best result: ", clf
print 'Weights: ', np.round(clf.weights_,2)
print 'Means: ', np.round(clf.means_,2)
print 'Covars: ', np.round(clf.covars_,2)
# From here, just graphics
# Plot the BIC scores
bars = []
spl = pl.subplot(3, 1, 1)
xpos = np.array(n_components_range) - 0.1
bars.append(pl.bar(xpos, bic[0:len(n_components_range)], width=.2, color='b'))
pl.yticks(())
pl.xticks(n_components_range)
pl.title('BIC Score after Hierarchycal Clustering')
spl.set_xlabel('Number of components')
# Plot the winner
splot = pl.subplot(3, 1, 2)
Y_ = clf.predict(X)
for i, (mean, covar) in enumerate(zip(clf.means_, clf.covars_)):
v, w = linalg.eigh(covar)
if not np.any(Y_ == i):
continue
pl.scatter(X[Y_ == i, 0], X[Y_ == i, 1], .8, color='black')
# Plot an ellipse to show the Gaussian component
angle = np.arctan2(w[0][1], w[0][0])
angle = 180 * angle / np.pi # convert to degrees
v *= 4
ell = mpl.patches.Ellipse(mean, v[0], v[1], 180 + angle, color='red')
ell.set_clip_box(splot.bbox)
ell.set_alpha(.5)
splot.add_artist(ell)
pl.xticks(())
pl.yticks(())
pl.subplots_adjust(hspace=.55, bottom=.02)
pl.title('GMM-BIC. Gaussians: ' + str(len(clf.means_)))
# interpolation
from scipy.interpolate import Rbf, InterpolatedUnivariateSpline
meansX, meansY = zip(*clf.means_)
if len(meansX) > 1:
minimX=np.infty
maximX=-np.infty
for i in range(len(meansX)):
if meansX[i] < minimX:
minimX=meansX[i]
if meansX[i] > maximX:
maximX=meansX[i]
#print minimX, maximX
xi = np.linspace(minimX, maximX, 10*len(meansX))
testrbf = Rbf(meansX, meansY, function='gaussian')
yi = testrbf(xi)
pl.subplot(3, 1, 3)
pl.plot(xi, yi, 'g')
pl.scatter(meansX, meansY,8, color='red')
pl.xticks(())
pl.yticks(())
pl.title('Interpolation using RBF')
pl.subplots_adjust(hspace=.55, bottom=.02)
pl.show()
else:
pl.show()
|
smorante/continuous-goal-directed-actions
|
simulated-CGDA/generalization/generalization_old_visual.py
|
Python
|
mit
| 4,306
|
[
"Gaussian"
] |
6478fd21c2b9c777347611cd924988707a1f686e9515f60baebe693d4b05c47d
|
#!/usr/bin/python
# -------------------------------------------------------------------
# Import statements
# -------------------------------------------------------------------
import sys
import os
import math
import re
from decimal import *
from operator import *
from astropy.io import fits
from sqlalchemy.orm import relationship, deferred
from sqlalchemy.schema import Column
from sqlalchemy.engine import reflection
from sqlalchemy.dialects.postgresql import *
from sqlalchemy.types import Float, Integer, String
from sqlalchemy.orm.session import Session
from sqlalchemy import select, func # for aggregate, other functions
from sqlalchemy.ext.hybrid import hybrid_property, hybrid_method
from sqlalchemy.sql import column
from sqlalchemy_utils import Timestamp
from marvin.db.ArrayUtils import ARRAY_D
from marvin.core.caching_query import RelationshipCache
import numpy as np
try:
from sdss_access.path import Path
except ImportError as e:
Path = None
from marvin.db.database import db
import marvin.db.models.SampleModelClasses as sampledb
from flask_login import UserMixin
from werkzeug.security import generate_password_hash, check_password_hash
# ========================
# Define database classes
# ========================
Base = db.Base
class ArrayOps(object):
''' this class adds array functionality '''
__tablename__ = 'arrayops'
__table_args__ = {'extend_existing': True}
@property
def cols(self):
return list(self.__table__.columns._data.keys())
@property
def collist(self):
return ['wavelength', 'flux', 'ivar', 'mask', 'xpos', 'ypos', 'specres']
def getTableName(self):
return self.__table__.name
def matchIndex(self, name=None):
# Get index of correct column
incols = [x for x in self.cols if x in self.collist]
if not any(incols):
return None
elif len(incols) == 1:
idx = self.cols.index(incols[0])
else:
if not name:
print('Multiple columns found. Column name must be specified!')
return None
elif name in self.collist:
idx = self.cols.index(name)
else:
return None
return idx
def filter(self, start, end, name=None):
# Check input types or map string operators
startnum = type(start) == int or type(start) == float
endnum = type(end) == int or type(end) == float
opdict = {'=': eq, '<': lt, '<=': le, '>': gt, '>=': ge, '!=': ne}
if start in opdict.keys() or end in opdict.keys():
opind = list(opdict.keys()).index(start) if start in opdict.keys() else list(opdict.keys()).index(end)
if start in opdict.keys():
start = opdict[list(opdict.keys())[opind]]
if end in opdict.keys():
end = opdict[list(opdict.keys())[opind]]
# Get matching index
self.idx = self.matchIndex(name=name)
if not self.idx:
return None
# Perform calculation
try:
data = self.__getattribute__(self.cols[self.idx])
except:
data = None
if data:
if startnum and endnum:
arr = [x for x in data if x >= start and x <= end]
elif not startnum and endnum:
arr = [x for x in data if start(x, end)]
elif startnum and not endnum:
arr = [x for x in data if end(x, start)]
elif startnum == eq or endnum == eq:
arr = [x for x in data if start(x, end)] if start == eq else [x for x in data if end(x, start)]
return arr
else:
return None
def equal(self, num, name=None):
# Get matching index
self.idx = self.matchIndex(name=name)
if not self.idx:
return None
# Perform calculation
try:
data = self.__getattribute__(self.cols[self.idx])
except:
data = None
if data:
arr = [x for x in data if x == num]
return arr
else:
return None
def less(self, num, name=None):
# Get matching index
self.idx = self.matchIndex(name=name)
if not self.idx:
return None
# Perform calculation
try:
data = self.__getattribute__(self.cols[self.idx])
except:
data = None
if data:
arr = [x for x in data if x <= num]
return arr
else:
return None
def greater(self, num, name=None):
# Get matching index
self.idx = self.matchIndex(name=name)
if not self.idx:
return None
# Perform calculation
try:
data = self.__getattribute__(self.cols[self.idx])
except:
data = None
if data:
arr = [x for x in data if x >= num]
return arr
else:
return None
def getIndices(self, arr):
if self.idx:
indices = [self.__getattribute__(self.cols[self.idx]).index(a) for a in arr]
else:
return None
return indices
class Cube(Base, ArrayOps):
__tablename__ = 'cube'
__table_args__ = {'autoload': True, 'schema': 'mangadatadb', 'extend_existing': True}
specres = deferred(Column(ARRAY_D(Float, zero_indexes=True)))
specresd = deferred(Column(ARRAY_D(Float, zero_indexes=True)))
prespecres = deferred(Column(ARRAY_D(Float, zero_indexes=True)))
prespecresd = deferred(Column(ARRAY_D(Float, zero_indexes=True)))
def __repr__(self):
return '<Cube (pk={0}, plate={1}, ifudesign={2}, tag={3})>'.format(self.pk, self.plate, self.ifu.name, self.pipelineInfo.version.version)
@property
def header(self):
'''Returns an astropy header'''
session = Session.object_session(self)
data = session.query(FitsHeaderKeyword.label, FitsHeaderValue.value,
FitsHeaderValue.comment).join(FitsHeaderValue).filter(
FitsHeaderValue.cube == self).all()
hdr = fits.Header(data)
return hdr
@property
def name(self):
return 'manga-{0}-{1}-LOGCUBE.fits.gz'.format(self.plate, self.ifu.name)
@property
def default_mapsname(self):
return 'mangadap-{0}-{1}-default.fits.gz'.format(self.plate, self.ifu.name)
def getPath(self):
sasurl = os.getenv('SAS_URL')
if sasurl:
sasredux = os.path.join(sasurl, 'sas/mangawork/manga/spectro/redux')
path = sasredux
else:
redux = os.getenv('MANGA_SPECTRO_REDUX')
path = redux
version = self.pipelineInfo.version.version
cubepath = os.path.join(path, version, str(self.plate), 'stack')
return cubepath
@property
def location(self):
name = self.name
path = self.getPath()
loc = os.path.join(path, name)
return loc
@property
def image(self):
ifu = '{0}.png'.format(self.ifu.name)
path = self.getPath()
imageloc = os.path.join(path, 'images', ifu)
return imageloc
def header_to_dict(self):
'''Returns a simple python dictionary header'''
values = self.headervals
hdrdict = {str(val.keyword.label): val.value for val in values}
return hdrdict
@property
def plateclass(self):
'''Returns a plate class'''
plate = Plate(self)
return plate
def testhead(self, key):
''' Test existence of header keyword'''
try:
if self.header_to_dict()[key]:
return True
except:
return False
def getFlags(self, bits, name):
session = Session.object_session(self)
# if bits not a digit, return None
if not str(bits).isdigit():
return 'NULL'
else:
bits = int(bits)
# Convert the integer value to list of bits
bitlist = [int(i) for i in '{0:08b}'.format(bits)]
bitlist.reverse()
indices = [i for i, bit in enumerate(bitlist) if bit]
labels = []
for i in indices:
maskbit = session.query(MaskBit).filter_by(flag=name, bit=i).one()
labels.append(maskbit.label)
return labels
def getQualFlags(self, stage='3d'):
''' get quality flags '''
name = 'MANGA_DRP2QUAL' if stage == '2d' else 'MANGA_DRP3QUAL'
col = 'DRP2QUAL' if stage == '2d' else 'DRP3QUAL'
try:
bits = self.header_to_dict()[col]
except:
bits = None
if bits:
labels = self.getFlags(bits, name)
return labels
else:
return None
def getTargFlags(self, type=1):
''' get target flags '''
name = 'MANGA_TARGET1' if type == 1 else 'MANGA_TARGET2' if type == 2 else 'MANGA_TARGET3'
hdr = self.header_to_dict()
istarg = 'MNGTARG1' in hdr.keys()
if istarg:
col = 'MNGTARG1' if type == 1 else 'MNGTARG2' if type == 2 else 'MNGTARG3'
else:
col = 'MNGTRG1' if type == 1 else 'MNGTRG2' if type == 2 else 'MNGTRG3'
try:
bits = hdr[col]
except:
bits = None
if bits:
labels = self.getFlags(bits, name)
return labels
else:
return None
def get3DCube(self, extension='flux'):
"""Returns a 3D array of ``extension`` from the cube spaxels.
For example, ``cube.get3DCube('flux')`` will return the original
flux cube with the same ordering as the FITS data cube.
Note that this method seems to be really slow retrieving arrays (this
is especially serious for large IFUs).
"""
session = Session.object_session(self)
spaxels = session.query(getattr(Spaxel, extension)).filter(
Spaxel.cube_pk == self.pk).order_by(Spaxel.x, Spaxel.y).all()
# Assumes cubes are always square (!)
nx = ny = int(np.sqrt(len(spaxels)))
nwave = len(spaxels[0][0])
spArray = np.array(spaxels)
return spArray.transpose().reshape((nwave, ny, nx)).transpose(0, 2, 1)
@hybrid_property
def plateifu(self):
'''Returns parameter plate-ifu'''
return '{0}-{1}'.format(self.plate, self.ifu.name)
@plateifu.expression
def plateifu(cls):
return func.concat(Cube.plate, '-', IFUDesign.name)
@hybrid_property
def restwave(self):
if self.target:
redshift = self.target.NSA_objects[0].z
wave = np.array(self.wavelength.wavelength)
restwave = wave / (1 + redshift)
return restwave
else:
return None
@restwave.expression
def restwave(cls):
restw = (func.rest_wavelength(sampledb.NSA.z))
return restw
def has_modelspaxels(self, name=None):
if not name:
name = '(SPX|HYB)'
has_ms = False
model_cubes = [f.modelcube for f in self.dapfiles if re.search('LOGCUBE-{0}'.format(name), f.filename)]
if model_cubes:
mc = sum(model_cubes, [])
if mc:
from marvin.db.models.DapModelClasses import ModelSpaxel
session = Session.object_session(mc[0])
ms = session.query(ModelSpaxel).filter_by(modelcube_pk=mc[0].pk).first()
has_ms = True if ms else False
return has_ms
def has_spaxels(self):
if len(self.spaxels) > 0:
return True
else:
return False
def has_fibers(self):
if len(self.fibers) > 0:
return True
else:
return False
class Wavelength(Base, ArrayOps):
__tablename__ = 'wavelength'
__table_args__ = {'autoload': True, 'schema': 'mangadatadb', 'extend_existing': True}
wavelength = deferred(Column(ARRAY_D(Float, zero_indexes=True)))
def __repr__(self):
return '<Wavelength (pk={0})>'.format(self.pk)
class Spaxel(Base, ArrayOps):
__tablename__ = 'spaxel'
__table_args__ = {'autoload': True, 'schema': 'mangadatadb', 'extend_existing': True}
flux = deferred(Column(ARRAY_D(Float, zero_indexes=True)))
ivar = deferred(Column(ARRAY_D(Float, zero_indexes=True)))
mask = deferred(Column(ARRAY_D(Integer, zero_indexes=True)))
disp = deferred(Column(ARRAY_D(Float, zero_indexes=True)))
predisp = deferred(Column(ARRAY_D(Float, zero_indexes=True)))
def __repr__(self):
return '<Spaxel (pk={0}, x={1}, y={2})'.format(self.pk, self.x, self.y)
@hybrid_method
def sum(self, name=None):
total = sum(self.flux)
return total
@sum.expression
def sum(cls):
# return select(func.sum(func.unnest(cls.flux))).select_from(func.unnest(cls.flux)).label('totalflux')
return select([func.sum(column('totalflux'))]).select_from(func.unnest(cls.flux).alias('totalflux'))
class RssFiber(Base, ArrayOps):
__tablename__ = 'rssfiber'
__table_args__ = {'autoload': True, 'schema': 'mangadatadb', 'extend_existing': True}
flux = deferred(Column(ARRAY_D(Float, zero_indexes=True)))
ivar = deferred(Column(ARRAY_D(Float, zero_indexes=True)))
mask = deferred(Column(ARRAY_D(Integer, zero_indexes=True)))
xpos = deferred(Column(ARRAY_D(Float, zero_indexes=True)))
ypos = deferred(Column(ARRAY_D(Float, zero_indexes=True)))
disp = deferred(Column(ARRAY_D(Float, zero_indexes=True)))
predisp = deferred(Column(ARRAY_D(Float, zero_indexes=True)))
def __repr__(self):
return '<RssFiber (pk={0})>'.format(self.pk)
class PipelineInfo(Base):
__tablename__ = 'pipeline_info'
__table_args__ = {'autoload': True, 'schema': 'mangadatadb'}
def __repr__(self):
return '<Pipeline_Info (pk={0})>'.format(self.pk)
class PipelineVersion(Base):
__tablename__ = 'pipeline_version'
__table_args__ = {'autoload': True, 'schema': 'mangadatadb'}
def __repr__(self):
return '<Pipeline_Version (pk={0}, version={1})>'.format(self.pk, self.version)
class PipelineStage(Base):
__tablename__ = 'pipeline_stage'
__table_args__ = {'autoload': True, 'schema': 'mangadatadb'}
def __repr__(self):
return '<Pipeline_Stage (pk={0}, label={1})>'.format(self.pk, self.label)
class PipelineCompletionStatus(Base):
__tablename__ = 'pipeline_completion_status'
__table_args__ = {'autoload': True, 'schema': 'mangadatadb'}
def __repr__(self):
return '<Pipeline_Completion_Status (pk={0}, label={1})>'.format(self.pk, self.label)
class PipelineName(Base):
__tablename__ = 'pipeline_name'
__table_args__ = {'autoload': True, 'schema': 'mangadatadb'}
def __repr__(self):
return '<Pipeline_Name (pk={0}, label={1})>'.format(self.pk, self.label)
class FitsHeaderValue(Base):
__tablename__ = 'fits_header_value'
__table_args__ = {'autoload': True, 'schema': 'mangadatadb'}
def __repr__(self):
return '<Fits_Header_Value (pk={0})'.format(self.pk)
class FitsHeaderKeyword(Base):
__tablename__ = 'fits_header_keyword'
__table_args__ = {'autoload': True, 'schema': 'mangadatadb'}
def __repr__(self):
return '<Fits_Header_Keyword (pk={0}, label={1})'.format(self.pk, self.label)
class IFUDesign(Base):
__tablename__ = 'ifudesign'
__table_args__ = {'autoload': True, 'schema': 'mangadatadb'}
def __repr__(self):
return '<IFU_Design (pk={0}, name={1})'.format(self.pk, self.name)
@property
def ifuname(self):
return self.name
@property
def designid(self):
return self.name
@property
def ifutype(self):
return self.name[:-2]
class IFUToBlock(Base):
__tablename__ = 'ifu_to_block'
__table_args__ = {'autoload': True, 'schema': 'mangadatadb'}
def __repr__(self):
return '<IFU_to_Block (pk={0})'.format(self.pk)
class SlitBlock(Base):
__tablename__ = 'slitblock'
__table_args__ = {'autoload': True, 'schema': 'mangadatadb'}
def __repr__(self):
return '<SlitBlock (pk={0})'.format(self.pk)
class Cart(Base):
__tablename__ = 'cart'
__table_args__ = {'autoload': True, 'schema': 'mangadatadb'}
def __repr__(self):
return '<Cart (pk={0}, id={1})'.format(self.pk, self.id)
class Fibers(Base):
__tablename__ = 'fibers'
__table_args__ = {'autoload': True, 'schema': 'mangadatadb'}
def __repr__(self):
return '<Fibers (pk={0}, fiberid={1}, fnum={2})'.format(self.pk, self.fiberid, self.fnum)
class FiberType(Base):
__tablename__ = 'fiber_type'
__table_args__ = {'autoload': True, 'schema': 'mangadatadb'}
def __repr__(self):
return '<Fiber_Type (pk={0},label={1})'.format(self.pk, self.label)
class TargetType(Base):
__tablename__ = 'target_type'
__table_args__ = {'autoload': True, 'schema': 'mangadatadb'}
def __repr__(self):
return '<Target_Type (pk={0},label={1})'.format(self.pk, self.label)
class Sample(Base, ArrayOps):
__tablename__ = 'sample'
__table_args__ = {'autoload': True, 'schema': 'mangadatadb'}
def __repr__(self):
return '<Sample (pk={0},cube={1})'.format(self.pk, self.cube)
@hybrid_property
def nsa_logmstar(self):
try:
return math.log10(self.nsa_mstar)
except ValueError:
return -9999.0
except TypeError:
return None
@nsa_logmstar.expression
def nsa_logmstar(cls):
return func.log(cls.nsa_mstar)
@hybrid_property
def nsa_logmstar_el(self):
try:
return math.log10(self.nsa_mstar_el)
except ValueError as e:
return -9999.0
except TypeError as e:
return None
@nsa_logmstar_el.expression
def nsa_logmstar_el(cls):
return func.log(cls.nsa_mstar_el)
class CartToCube(Base):
__tablename__ = 'cart_to_cube'
__table_args__ = {'autoload': True, 'schema': 'mangadatadb'}
def __repr__(self):
return '<CartToCube (pk={0},cube={1}, cart={2})'.format(self.pk, self.cube, self.cart)
class Wcs(Base, ArrayOps):
__tablename__ = 'wcs'
__table_args__ = {'autoload': True, 'schema': 'mangadatadb'}
def __repr__(self):
return '<WCS (pk={0},cube={1})'.format(self.pk, self.cube)
def makeHeader(self):
wcscols = self.cols[2:]
newhdr = fits.Header()
for c in wcscols:
newhdr[c] = float(self.__getattribute__(c)) if type(self.__getattribute__(c)) == Decimal else self.__getattribute__(c)
return newhdr
class ObsInfo(Base):
__tablename__ = 'obsinfo'
__table_args__ = {'autoload': True, 'schema': 'mangadatadb'}
def __repr__(self):
return '<ObsInfo (pk={0},cube={1})'.format(self.pk, self.cube)
class CubeShape(Base):
__tablename__ = 'cube_shape'
__table_args__ = {'autoload': True, 'schema': 'mangadatadb'}
def __repr__(self):
return '<CubeShape (pk={0},cubes={1},size={2},totalrows={3})'.format(self.pk, len(self.cubes), self.size, self.total)
@property
def shape(self):
return (self.size, self.size)
def makeIndiceArray(self):
''' Return the indices array as a numpy array '''
return np.array(self.indices)
def getXY(self, index=None):
''' Get the x,y elements from a single digit index '''
if index is not None:
if index > self.total:
return None, None
else:
i = int(index / self.size)
j = int(index - i * self.size)
else:
arrind = self.makeIndiceArray()
i = np.array(arrind / self.size, dtype=int)
j = np.array(self.makeIndiceArray() - i * self.size, dtype=int)
return i, j
@hybrid_property
def x(self):
'''Returns parameter plate-ifu'''
x = self.getXY()[0]
return x
@x.expression
def x(cls):
#arrind = func.unnest(cls.indices).label('arrind')
#x = func.array_agg(arrind / cls.size).label('x')
s = db.Session()
arrind = (func.unnest(cls.indices) / cls.size).label('xarrind')
#x = s.query(arrind).select_from(cls).subquery('xarr')
#xagg = s.query(func.array_agg(x.c.xarrind))
return arrind
@hybrid_property
def y(self):
'''Returns parameter plate-ifu'''
y = self.getXY()[1]
return y
@y.expression
def y(cls):
#arrind = func.unnest(cls.indices).label('arrind')
#x = arrind / cls.size
#y = func.array_agg(arrind - x*cls.size).label('y')
#return y
s = db.Session()
arrunnest = func.unnest(cls.indices)
xarr = (func.unnest(cls.indices) / cls.size).label('xarrind')
arrind = (arrunnest - xarr*cls.size).label('yarrind')
#n.arrind-(n.arrind/n.size)*n.size
y = s.query(arrind).select_from(cls).subquery('yarr')
yagg = s.query(func.array_agg(y.c.yarrind))
return yagg.as_scalar()
class Plate(object):
''' new plate class '''
__tablename__ = 'myplate'
def __init__(self, cube=None, id=None):
self.id = cube.plate if cube else id if id else None
self.cube = cube if cube else None
self.drpver = None
if self.cube:
self._hdr = self.cube.header_to_dict()
self.type = self.getPlateType()
self.platetype = self._hdr.get('PLATETYP', None)
self.surveymode = self._hdr.get('SRVYMODE', None)
self.dateobs = self._hdr.get('DATE-OBS', None)
self.ra = self._hdr.get('CENRA', None)
self.dec = self._hdr.get('CENDEC', None)
self.designid = self._hdr.get('DESIGNID', None)
self.cartid = self._hdr.get('CARTID', None)
self.drpver = self.cube.pipelineInfo.version.version
self.isbright = 'APOGEE' in self.surveymode
self.dir3d = 'mastar' if self.isbright else 'stack'
# cast a few
self.ra = float(self.ra) if self.ra else None
self.dec = float(self.dec) if self.dec else None
self.id = int(self.id) if self.id else None
self.designid = int(self.designid) if self.designid else None
def __repr__(self):
return self.__str__()
def __str__(self):
return ('Plate (id={0}, ra={1}, dec={2}, '
' designid={3})'.format(self.id, self.ra, self.dec, self.designid))
def getPlateType(self):
''' Get the type of MaNGA plate '''
hdr = self.cube.header
# try galaxy
mngtrg = self._hdr.get('MNGTRG1', None)
pltype = 'Galaxy' if mngtrg else None
# try stellar
if not pltype:
mngtrg = self._hdr.get('MNGTRG2', None)
pltype = 'Stellar' if mngtrg else None
# try ancillary
if not pltype:
mngtrg = self._hdr.get('MNGTRG3', None)
pltype = 'Ancillary' if mngtrg else None
return pltype
@property
def cubes(self):
''' Get all cubes on this plate '''
session = db.Session()
if self.drpver:
cubes = session.query(Cube).join(PipelineInfo, PipelineVersion).\
filter(Cube.plate == self.id, PipelineVersion.version == self.drpver).all()
else:
cubes = session.query(Cube).filter(Cube.plate == self.id).all()
return cubes
# ================
# manga Aux DB classes
# ================
class CubeHeader(Base):
__tablename__ = 'cube_header'
__table_args__ = {'autoload': True, 'schema': 'mangaauxdb'}
def __repr__(self):
return '<CubeHeader (pk={0},cube={1})'.format(self.pk, self.cube)
class MaskLabels(Base):
__tablename__ = 'maskbit_labels'
__table_args__ = {'autoload': True, 'schema': 'mangaauxdb'}
def __repr__(self):
return '<MaskLabels (pk={0},bit={1})'.format(self.pk, self.maskbit)
class MaskBit(Base):
__tablename__ = 'maskbit'
__table_args__ = {'autoload': True, 'schema': 'mangaauxdb'}
def __repr__(self):
return '<MaskBit (pk={0},flag={1}, bit={2}, label={3})'.format(self.pk, self.flag, self.bit, self.label)
# ================
# Query Meta classes
# ================
class QueryMeta(Base, Timestamp):
__tablename__ = 'query'
__table_args__ = {'autoload': True, 'schema': 'history'}
def __repr__(self):
return '<QueryMeta (pk={0}, filter={1}), count={2}>'.format(self.pk, self.searchfilter, self.count)
class User(Base, UserMixin, Timestamp):
__tablename__ = 'user'
__table_args__ = {'autoload': True, 'schema': 'history'}
def __repr__(self):
return '<User (pk={0}, username={1})'.format(self.pk, self.username)
def set_password(self, password):
self.password_hash = generate_password_hash(password)
def check_password(self, password):
return check_password_hash(self.password_hash, password)
def get_id(self):
return (self.pk)
def update_stats(self, request=None):
remote_addr = request.remote_addr or None
self.login_count += 1
old_current_ip, new_current_ip = self.current_ip, remote_addr
self.last_ip = old_current_ip
self.current_ip = new_current_ip
# Define relationships
# ========================
Cube.pipelineInfo = relationship(PipelineInfo, backref="cubes")
Cube.wavelength = relationship(Wavelength, backref="cube")
Cube.ifu = relationship(IFUDesign, backref="cubes")
Cube.carts = relationship(Cart, secondary=CartToCube.__table__, backref="cubes")
Cube.wcs = relationship(Wcs, backref='cube', uselist=False)
Cube.shape = relationship(CubeShape, backref='cubes', uselist=False)
Cube.obsinfo = relationship(ObsInfo, backref='cube', uselist=False)
# from SampleDB
Cube.target = relationship(sampledb.MangaTarget, backref='cubes')
Sample.cube = relationship(Cube, backref="sample", uselist=False)
FitsHeaderValue.cube = relationship(Cube, backref="headervals")
FitsHeaderValue.keyword = relationship(FitsHeaderKeyword, backref="value")
IFUDesign.blocks = relationship(SlitBlock, secondary=IFUToBlock.__table__, backref='ifus')
Fibers.ifu = relationship(IFUDesign, backref="fibers")
Fibers.fibertype = relationship(FiberType, backref="fibers")
Fibers.targettype = relationship(TargetType, backref="fibers")
insp = reflection.Inspector.from_engine(db.engine)
fks = insp.get_foreign_keys(Spaxel.__table__.name, schema='mangadatadb')
if fks:
Spaxel.cube = relationship(Cube, backref='spaxels')
fks = insp.get_foreign_keys(RssFiber.__table__.name, schema='mangadatadb')
if fks:
RssFiber.cube = relationship(Cube, backref='rssfibers')
RssFiber.fiber = relationship(Fibers, backref='rssfibers')
PipelineInfo.name = relationship(PipelineName, backref="pipeinfo")
PipelineInfo.stage = relationship(PipelineStage, backref="pipeinfo")
PipelineInfo.version = relationship(PipelineVersion, backref="pipeinfo")
PipelineInfo.completionStatus = relationship(PipelineCompletionStatus, backref="pipeinfo")
# from AuxDB
CubeHeader.cube = relationship(Cube, backref='hdr')
# ---------------------------------------------------------
# Test that all relationships/mappings are self-consistent.
# ---------------------------------------------------------
from sqlalchemy.orm import configure_mappers
try:
configure_mappers()
except RuntimeError as error:
print("""
mangadb.DataModelClasses:
An error occurred when verifying the relationships between the database tables.
Most likely this is an error in the definition of the SQLAlchemy relationships -
see the error message below for details.
""")
print("Error type: %s" % sys.exc_info()[0])
print("Error value: %s" % sys.exc_info()[1])
print("Error trace: %s" % sys.exc_info()[2])
sys.exit(1)
data_cache = RelationshipCache(Cube.target).\
and_(RelationshipCache(Cube.ifu)).\
and_(RelationshipCache(Cube.spaxels)).\
and_(RelationshipCache(Cube.wavelength)).\
and_(RelationshipCache(IFUDesign.fibers)).\
and_(RelationshipCache(Cube.rssfibers))
|
albireox/marvin
|
python/marvin/db/models/DataModelClasses.py
|
Python
|
bsd-3-clause
| 28,307
|
[
"Galaxy"
] |
8299ee7060e02ac9a9983ddd459d789e3a84590303de31006e4e3741d5d65e14
|
# Copyright (c) 2010 Howard Hughes Medical Institute.
# All rights reserved.
# Use is subject to Janelia Farm Research Campus Software Copyright 1.1 license terms.
# http://license.janelia.org/license/jfrc_copyright_1_1.html
import wx
from library_item import LibraryItem
from network.neuron import Neuron
class NeuronClass(LibraryItem):
@classmethod
def displayName(cls):
return gettext('Neuron Class')
@classmethod
def listProperty(cls):
return 'neuronClasses'
@classmethod
def lookupProperty(cls):
return 'neuronClass'
@classmethod
def bitmap(cls):
image = Neuron.image()
if image == None:
return None
else:
return wx.BitmapFromImage(image)
def __init__(self, parentClass = None, *args, **keywordArgs):
""" """
# Pull out the keyword arguments specific to this class before we call super.
# We need to do this so we can know if the caller specified an argument or not.
# For example, the caller might specify a parent class and one attribute to override. We need to know which attributes _not_ to set.
localAttrNames = ['activation', 'functions', 'neurotransmitters', 'polarity']
localKeywordArgs = {}
for attrName in localAttrNames:
if attrName in keywordArgs:
localKeywordArgs[attrName] = keywordArgs[attrName]
del keywordArgs[attrName]
LibraryItem.__init__(self, *args, **keywordArgs)
# Neuron classes are arranged in a hierarchy.
self.parentClass = parentClass
self.subClasses = []
if self.parentClass:
self.parentClass.subClasses.append(self)
for attrName in localAttrNames:
if attrName == 'functions':
attrValue = set([])
elif attrName == 'neurotransmitters':
attrValue = []
else:
attrValue = None
if attrName in localKeywordArgs:
# The user has explicitly set the attribute.
if attrName == 'functions':
attrValue = set(localKeywordArgs[attrName])
else:
attrValue = localKeywordArgs[attrName]
elif self.parentClass:
attrValue = getattr(self.parentClass, attrName) # Inherit the value from the parent class
setattr(self, attrName, attrValue)
|
JaneliaSciComp/Neuroptikon
|
Source/library/neuron_class.py
|
Python
|
bsd-3-clause
| 2,528
|
[
"NEURON"
] |
f27c8d7df543bd4b5a8027757ef86821ed253f990bb2926887007996ec23e9d0
|
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2010 Brian G. Matherly
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# $Id$
"""
This module provides a progress dialog for displaying the status of
long running operations.
"""
#-------------------------------------------------------------------------
#
# Standard python modules
#
#-------------------------------------------------------------------------
import time
from gramps.gen.ggettext import gettext as _
import logging
log = logging.getLogger("gen.progressdialog")
#-------------------------------------------------------------------------
#
# GTK modules
#
#-------------------------------------------------------------------------
from gi.repository import GObject
from gi.repository import Gtk
#-------------------------------------------------------------------------
#
# Gramps modules
#
#-------------------------------------------------------------------------
from gramps.gen.utils.callback import Callback
#-------------------------------------------------------------------------
#
# LongOpStatus
#
#-------------------------------------------------------------------------
class LongOpStatus(Callback):
"""LongOpStatus provides a way of communicating the status of a long
running operations. The intended use is that when a long running operation
is about to start it should create an instance of this class and emit
it so that any listeners can pick it up and use it to record the status
of the operation.
Signals
=======
op-heartbeat - emitted every 'interval' calls to heartbeat.
op-end - emitted once when the operation completes.
Example usage:
class MyClass(Callback):
__signals__ = {
'op-start' : object
}
def long(self):
status = LongOpStatus("doing long job", 100, 10)
for i in xrange(0,99):
time.sleep(0.1)
status.heartbeat()
status.end()
class MyListener(object):
def __init__(self):
self._op = MyClass()
self._op.connect('op-start', self.start)
self._current_op = None
def start(self,long_op):
self._current_op.connect('op-heartbeat', self.heartbeat)
self._current_op.connect('op-end', self.stop)
def hearbeat(self):
# update status display
def stop(self):
# close the status display
self._current_op = None
"""
__signals__ = {
'op-heartbeat' : None,
'op-end' : None
}
def __init__(self, msg="",
total_steps=None,
interval=1,
can_cancel=False):
"""
@param msg: A Message to indicated the purpose of the operation.
@type msg: string
@param total_steps: The total number of steps that the operation
will perform.
@type total_steps:
@param interval: The number of iterations between emissions.
@type interval:
@param can_cancel: Set to True if the operation can be cancelled.
If this is set the operation that creates the status object should
check the 'should_cancel' method regularly so that it can cancel
the operation.
@type can_cancel:
"""
Callback.__init__(self)
self._msg = msg
self._total_steps = total_steps
# don't allow intervals less that 1
self._interval = max(interval, 1)
self._can_cancel = can_cancel
self._cancel = False
self._count = 0
self._countdown = interval
self._secs_left = 0
self._start = time.time()
self._running = True
def __del__(self):
if self._running:
self.emit('op-end')
def heartbeat(self):
"""This should be called for each step in the operation. It will
emit a 'op-heartbeat' every 'interval' steps. It recalcuates the
'estimated_secs_to_complete' from the time taken for previous
steps.
"""
self._countdown -= 1
if self._countdown <= 0:
elapsed = time.time() - self._start
self._secs_left = \
( elapsed / self._interval ) \
* (self._total_steps - self._count)
self._count += self._interval
self._countdown = self._interval
self._start = time.time()
self.emit('op-heartbeat')
def step(self):
"""Convenience function so LongOpStatus can be used as a ProgressBar
if set up correctly"""
self.heartbeat()
def estimated_secs_to_complete(self):
"""Return the number of seconds estimated left before operation
completes. This will change as 'hearbeat' is called.
@return: estimated seconds to complete.
@rtype: int
"""
return self._secs_left
def was_cancelled(self):
"""
Has this process been cancelled?
"""
return self._cancel
def cancel(self):
"""Inform the operation that it should complete.
"""
self._cancel = True
self.end()
def end(self):
"""End the operation. Causes the 'op-end' signal to be emitted.
"""
self.emit('op-end')
self._running = False
def should_cancel(self):
"""Return true of the user has asked for the operation to be cancelled.
@return: True of the operation should be cancelled.
@rtype: bool
"""
return self._cancel
def can_cancel(self):
"""@return: True if the operation can be cancelled.
@rtype: bool
"""
return self._can_cancel
def get_msg(self):
"""@return: The current status description messages.
@rtype: string
"""
return self._msg
def set_msg(self, msg):
"""Set the current description message.
@param msg: The description message.
@type msg: string
"""
self._msg = msg
def get_total_steps(self):
"""Get to total number of steps. NOTE: this is not the
number of times that the 'op-heartbeat' message will be
emited. 'op-heartbeat' is emited get_total_steps/interval
times.
@return: total number of steps.
@rtype: int
"""
return self._total_steps
def get_interval(self):
"""Get the interval between 'op-hearbeat' signals.
@return: the interval between 'op-hearbeat' signals.
@rtype: int
"""
return self._interval
#-------------------------------------------------------------------------
#
# _StatusObjectFacade
#
#-------------------------------------------------------------------------
class _StatusObjectFacade(object):
"""This provides a simple structure for recording the information
needs about a status object."""
def __init__(self, status_obj, heartbeat_cb_id=None, end_cb_id=None):
"""
@param status_obj:
@type status_obj: L{LongOpStatus}
@param heartbeat_cb_id: (default: None)
@type heartbeat_cb_id: int
@param end_cb_id: (default: None)
@type end_cb_id: int
"""
self.status_obj = status_obj
self.heartbeat_cb_id = heartbeat_cb_id
self.end_cb_id = end_cb_id
self.pbar_idx = None
self.active = False
#-------------------------------------------------------------------------
#
# ProgressMonitor
#
#-------------------------------------------------------------------------
class ProgressMonitor(object):
"""A dialog for displaying the status of long running operations.
It will work with L{LongOpStatus} objects to track the
progress of long running operations. If the operations is going to
take longer than I{popup_time} it will pop up a dialog with a
progress bar so that the user gets some feedback about what is
happening.
"""
__default_popup_time = 5 # seconds
def __init__(self, dialog_class, dialog_class_params=(),
title=_("Progress Information"),
popup_time = None):
"""
@param dialog_class: A class used to display the progress dialog.
@type dialog_class: GtkProgressDialog or the same interface.
@param dialog_class_params: A tuple that will be used as the initial
arguments to the dialog_class, this might be used for passing in
a parent window handle.
@type dialog_class_params: tuple
@param title: The title of the progress dialog
@type title: string
@param popup_time: number of seconds to wait before popup.
@type popup_time: int
"""
self._dialog_class = dialog_class
self._dialog_class_params = dialog_class_params
self._title = title
self._popup_time = popup_time
if self._popup_time is None:
self._popup_time = self.__class__.__default_popup_time
self._status_stack = [] # list of current status objects
self._dlg = None
def _get_dlg(self):
if self._dlg is None:
self._dlg = self._dialog_class(self._dialog_class_params,
self._title)
#self._dlg.show()
return self._dlg
def add_op(self, op_status):
"""Add a new status object to the progress dialog.
@param op_status: the status object.
@type op_status: L{LongOpStatus}
"""
log.debug("adding op to Progress Monitor")
facade = _StatusObjectFacade(op_status)
self._status_stack.append(facade)
idx = len(self._status_stack)-1
# wrap up the op_status object idx into the callback calls
def heartbeat_cb():
self._heartbeat(idx)
def end_cb():
self._end(idx)
facade.heartbeat_cb_id = op_status.connect('op-heartbeat',
heartbeat_cb)
facade.end_cb_id = op_status.connect('op-end', end_cb)
def _heartbeat(self, idx):
# check the estimated time to complete to see if we need
# to pop up a progress dialog.
log.debug("heartbeat in ProgressMonitor")
if idx >= len(self._status_stack):
# this item has been cancelled
return
facade = self._status_stack[idx]
if facade.status_obj.estimated_secs_to_complete() > self._popup_time:
facade.active = True
if facade.active:
dlg = self._get_dlg()
if facade.pbar_idx is None:
facade.pbar_idx = dlg.add(facade.status_obj)
dlg.show()
dlg.step(facade.pbar_idx)
def _end(self, idx):
# hide any progress dialog
# remove the status object from the stack
log.debug("received end in ProgressMonitor")
if idx >= len(self._status_stack):
# this item has been cancelled
return
while idx < len(self._status_stack) - 1:
self._end(len(self._status_stack) - 1)
facade = self._status_stack[idx]
if facade.active:
dlg = self._get_dlg()
if len(self._status_stack) == 1:
dlg.hide()
dlg.remove(facade.pbar_idx)
facade.status_obj.disconnect(facade.heartbeat_cb_id)
facade.status_obj.disconnect(facade.end_cb_id)
del self._status_stack[idx]
if len(self._status_stack) == 0 and self._dlg:
self._dlg.close()
#-------------------------------------------------------------------------
#
# _GtkProgressBar
#
#-------------------------------------------------------------------------
class _GtkProgressBar(Gtk.VBox):
"""This widget displays the progress bar and labels for a progress
indicator. It provides an interface to updating the progress bar.
"""
def __init__(self, long_op_status):
""":param long_op_status: the status of the operation.
:type long_op_status: L{gen.utils.LongOpStatus}
"""
GObject.GObject.__init__(self)
msg = long_op_status.get_msg()
self._old_val = -1
self._lbl = Gtk.Label(label=msg)
self._lbl.set_use_markup(True)
#self.set_border_width(24)
self._pbar = Gtk.ProgressBar()
self._hbox = Gtk.HBox()
# Only display the cancel button is the operation
# can be canceled.
if long_op_status.can_cancel():
self._cancel = Gtk.Button(stock=Gtk.STOCK_CANCEL)
self._cancel.connect("clicked",
lambda x: long_op_status.cancel())
self._cancel.show()
self._hbox.pack_end(self._cancel, expand=False, fill=True, padding=0)
self._hbox.pack_start(self._pbar, True, True, 0)
self.pack_start(self._lbl, expand=False, fill=False)
self.pack_start(self._hbox, expand=False, fill=False)
self._pbar_max = (long_op_status.get_total_steps()/
long_op_status.get_interval())
self._pbar_index = 0.0
self._pbar.set_fraction(((100/float(long_op_status.get_total_steps())*
float(long_op_status.get_interval())))/
100.0)
if msg != '':
self._lbl.show()
self._pbar.show()
self._hbox.show()
def step(self):
"""Move the progress bar on a step.
"""
self._pbar_index = self._pbar_index + 1.0
if self._pbar_index > self._pbar_max:
self._pbar_index = self._pbar_max
try:
val = int(100*self._pbar_index/self._pbar_max)
except ZeroDivisionError:
val = 0
if val != self._old_val:
self._pbar.set_text("%d%%" % val)
self._pbar.set_fraction(val/100.0)
self._pbar.old_val = val
#-------------------------------------------------------------------------
#
# GtkProgressDialog
#
#-------------------------------------------------------------------------
class GtkProgressDialog(Gtk.Dialog):
"""A gtk window to display the status of a long running
process."""
def __init__(self, window_params, title):
""":param title: The title to display on the top of the window.
:type title: string
"""
GObject.GObject.__init__(self, *window_params)
self.connect('delete_event', self._warn)
self.set_title(title)
#self.set_resize_mode(Gtk.RESIZE_IMMEDIATE)
#self.show()
self._progress_bars = []
def add(self, long_op_status):
"""Add a new status object to the progress dialog.
:param long_op_status: the status object.
:type long_op_status: L{gen.utils.LongOpStatus}
:returns: a key that can be used as the L{pbar_idx}
to the other methods.
:rtype: int
"""
pbar = _GtkProgressBar(long_op_status)
self.vbox.pack_start(pbar, expand=False, fill=False)
pbar.show()
# this seems to cause an infinite loop:
#self.resize_children()
self._progress_bars.append(pbar)
# This is a bad idea; could cause deletes while adding:
#self._process_events()
return len(self._progress_bars)-1
def remove(self, pbar_idx):
"""Remove the specified status object from the progress dialog.
:param pbar_idx: the index as returned from L{add}
:type pbar_idx: int
"""
if pbar_idx is not None:
pbar = self._progress_bars[pbar_idx]
self.vbox.remove(pbar)
del self._progress_bars[pbar_idx]
def step(self, pbar_idx):
"""Click the progress bar over to the next value. Be paranoid
and insure that it doesn't go over 100%.
:param pbar_idx: the index as returned from L{add}
:type pbar_idx: int
"""
if pbar_idx < len(self._progress_bars):
self._progress_bars[pbar_idx].step()
self._process_events()
def _process_events(self):
while Gtk.events_pending():
Gtk.main_iteration()
def show(self):
"""Show the dialog and process any events.
"""
Gtk.Dialog.show(self)
self._process_events()
def hide(self):
"""Hide the dialog and process any events.
"""
Gtk.Dialog.hide(self)
self._process_events()
def _warn(self, x, y):
return True
def close(self):
self.destroy()
if __name__ == '__main__':
def test(a, b):
d = ProgressMonitor(GtkProgressDialog)
s = LongOpStatus("Doing very long operation", 100, 10, can_cancel=True)
d.add_op(s)
for i in xrange(0, 99):
if s.should_cancel():
break
time.sleep(0.1)
if i == 30:
t = LongOpStatus("doing a shorter one", 100, 10,
can_cancel=True)
d.add_op(t)
for j in xrange(0, 99):
if s.should_cancel():
t.cancel()
break
if t.should_cancel():
break
time.sleep(0.1)
t.heartbeat()
if not t.was_cancelled():
t.end()
if i == 60:
t = LongOpStatus("doing another shorter one", 100, 10)
d.add_op(t)
for j in xrange(0, 99):
if s.should_cancel():
t.cancel()
break
time.sleep(0.1)
t.heartbeat()
t.end()
s.heartbeat()
if not s.was_cancelled():
s.end()
w = Gtk.Window(Gtk.WindowType.TOPLEVEL)
w.connect('destroy', Gtk.main_quit)
button = Gtk.Button("Test")
button.connect("clicked", test, None)
w.add(button)
button.show()
w.show()
Gtk.main()
print 'done'
|
arunkgupta/gramps
|
gramps/gui/widgets/progressdialog.py
|
Python
|
gpl-2.0
| 19,477
|
[
"Brian"
] |
5f2f0c5fa105f4a29ba87c23b7bf89f851671462649d59e59d55ed0196dfbbf8
|
#!/usr/bin/env python2
# Make.py tool for managing packages and their auxiliary libs,
# auto-editing machine Makefiles, and building LAMMPS
# Syntax: Make.py -h (for help)
# Notes: needs python 2.7 (not Python 3)
import sys,os,commands,re,copy
# switch abbrevs
# switch classes = created class for each switch
# lib classes = auxiliary package libs
# build classes = build options with defaults
# make classes = makefile options with no defaults
# setargs = makefile settings
# actionargs = allowed actions (also lib-dir and machine)
abbrevs = "adhjmoprsv"
switchclasses = ("actions","dir","help","jmake","makefile",
"output","packages","redo","settings","verbose")
libclasses = ("atc","awpmd","colvars","cuda","gpu",
"meam","poems","qmmm","reax","voronoi")
buildclasses = ("intel","kokkos")
makeclasses = ("cc","mpi","fft","jpg","png")
setargs = ("gzip","#gzip","ffmpeg","#ffmpeg","smallbig","bigbig","smallsmall")
actionargs = ("lib-all","file","clean","exe")
# ----------------------------------------------------------------
# functions
# ----------------------------------------------------------------
# if flag = 1, print str and exit
# if flag = 0, print str as warning and do not exit
def error(str,flag=1):
if flag:
print "ERROR:",str
sys.exit()
else:
print "WARNING:",str
# store command-line args as sw = dict of key/value
# key = switch word, value = list of following args
# order = list of switches in order specified
# enforce no switch more than once
def parse_args(args):
narg = len(args)
sw = {}
order = []
iarg = 0
while iarg < narg:
if args[iarg][0] != '-': error("Arg %s is not a switch" % args[iarg])
switch = args[iarg][1:]
if switch in sw: error("Duplicate switch %s" % args[iarg])
order.append(switch)
first = iarg+1
last = first
while last < narg and args[last][0] != '-': last += 1
sw[switch] = args[first:last]
iarg = last
return sw,order
# convert info in switches dict back to a string, in switch_order
def switch2str(switches,switch_order):
txt = ""
for switch in switch_order:
if txt: txt += ' '
txt += "-%s" % switch
txt += ' ' + ' '.join(switches[switch])
return txt
# check if compiler works with ccflags on dummy one-line tmpauto.cpp file
# return 1 if successful, else 0
# warn = 1 = print warning if not successful, warn = 0 = no warning
# NOTE: unrecognized -override-limits can leave verride-limits file
def compile_check(compiler,ccflags,warn):
open("tmpauto.cpp",'w').write("int main(int, char **) {}")
str = "%s %s -c tmpauto.cpp" % (compiler,ccflags)
txt = commands.getoutput(str)
flag = 1
if txt or not os.path.isfile("tmpauto.o"):
flag = 0
if warn:
print str
if txt: print txt
else: print "compile produced no output"
os.remove("tmpauto.cpp")
if os.path.isfile("tmpauto.o"): os.remove("tmpauto.o")
return flag
# check if linker works with linkflags on tmpauto.o file
# return 1 if successful, else 0
# warn = 1 = print warning if not successful, warn = 0 = no warning
def link_check(linker,linkflags,warn):
open("tmpauto.cpp",'w').write("int main(int, char **) {}")
str = "%s %s -o tmpauto tmpauto.cpp" % (linker,linkflags)
txt = commands.getoutput(str)
flag = 1
if txt or not os.path.isfile("tmpauto"):
flag = 0
if warn:
print str
if txt: print txt
else: print "link produced no output"
os.remove("tmpauto.cpp")
if os.path.isfile("tmpauto"): os.remove("tmpauto")
return flag
# ----------------------------------------------------------------
# switch classes, one per single-letter switch
# ----------------------------------------------------------------
# actions
class Actions:
def __init__(self,list):
self.inlist = list[:]
def help(self):
return """
-a action1 action2 ...
possible actions = lib-all, lib-dir, file, clean, exe or machine
machine is a Makefile.machine suffix
actions can be specified in any order
each action can appear only once
lib-dir can appear multiple times for different dirs
some actions depend on installed packages
installed packages = currently installed + result of -p switch
actions are invoked in this order, independent of specified order
(1) lib-all or lib-dir = build auxiliary libraries
lib-all builds all auxiliary libs needed by installed packages
lib-dir builds a specific lib whether package installed or not
dir is any dir in lib directory (atc, cuda, meam, etc) except linalg
(2) file = create src/MAKE/MINE/Makefile.auto
use -m switch for Makefile.machine to start from,
else use existing Makefile.auto
adds settings needed for installed accelerator packages
existing Makefile.auto is NOT changed unless "file" action is specified
(3) clean = invoke "make clean-auto" to insure full build
useful if compiler flags have changed
(4) exe or machine = build LAMMPS
machine can be any existing Makefile.machine suffix
machine is converted to "exe" action, as well as:
"-m machine" is added if -m switch is not specified
"-o machine" is added if -o switch is not specified
if either "-m" or "-o" are specified, they are not overridden
does not invoke any lib builds, since libs could be previously built
exe always builds using src/MAKE/MINE/Makefile.auto
if file action also specified, it creates Makefile.auto
else if -m switch specified,
existing Makefile.machine is copied to create Makefile.auto
else Makefile.auto must already exist and is not changed
produces src/lmp_auto, or error message if unsuccessful
use -o switch to copy src/lmp_auto to new filename
"""
def check(self):
if not self.inlist: error("-a args are invalid")
alist = []
machine = 0
nlib = 0
for one in self.inlist:
if one in alist: error("An action is duplicated")
if one.startswith("lib-"):
lib = one[4:]
if lib != "all" and lib not in libclasses: error("Actions are invalid")
alist.insert(nlib,one)
nlib += 1
elif one == "file":
if nlib == 0: alist.insert(0,"file")
else: alist.insert(1,"file")
elif one == "clean":
if nlib == 0: alist.insert(0,"clean")
elif "file" not in alist: alist.insert(1,"clean")
else: alist.insert(2,"clean")
elif one == "exe":
if machine == 0: alist.append("exe")
else: error("Actions are invalid")
machine = 1
# one action can be unknown in case is a machine (checked in setup)
elif machine == 0:
alist.append(one)
machine = 1
else: error("Actions are invalid")
self.alist = alist
# dedup list of actions concatenated from two lists
# current self.inlist = specified -a switch + redo command -a switch
# specified exe/machine action replaces redo exe/machine action
# operates on and replaces self.inlist
def dedup(self):
alist = []
exemachine = 0
for one in self.inlist:
if one == "exe" or (one not in actionargs and not one.startswith("lib-")):
if exemachine: continue
exemachine = 1
if one not in alist: alist.append(one)
self.inlist = alist
# if last action is unknown, assume machine and convert to exe
# only done if action is a suffix for an existing Makefile.machine
# return machine if conversion done, else None
def setup(self):
machine = self.alist[-1]
if machine in actionargs or machine.startswith("lib-"): return None
make = MakeReader(machine,2)
self.alist[-1] = "exe"
return machine
# build one or more auxiliary package libraries
def lib(self,suffix):
if suffix != "all":
print "building",suffix,"library ..."
str = "%s.build()" % suffix
exec(str)
else:
final = packages.final
for one in packages.lib:
if final[one]:
if "user" in one: pkg = one[5:]
else: pkg = one
print "building",pkg,"library ..."
str = "%s.build()" % pkg
exec(str)
# read Makefile.machine
# if caller = "file", edit via switches
# if caller = "exe", just read
# write out new Makefile.auto
def file(self,caller):
# if caller = "file", create from mpi or read from makefile.machine or auto
# if caller = "exe" and "file" action already invoked, read from auto
# if caller = "exe" and no "file" action, read from makefile.machine or auto
if caller == "file":
if makefile and makefile.machine == "none":
if cc and mpi: machine = "mpi"
else: error("Cannot create makefile unless -cc and -mpi are used")
elif makefile: machine = makefile.machine
else: machine = "auto"
elif caller == "exe" and "file" in self.alist:
machine = "auto"
elif caller == "exe" and "file" not in self.alist:
if makefile and makefile.machine == "none":
error("Cannot build with makefile = none")
elif makefile: machine = makefile.machine
else: machine = "auto"
make = MakeReader(machine,1)
# change makefile settings to user specifications
precompiler = ""
if caller == "file":
# add compiler/linker and default CCFLAGS,LINKFLAGS
# if cc.wrap, add wrapper setting for mpi = ompi/mpich
# precompiler = env variable setting for OpenMPI wrapper compiler
if cc:
make.setvar("CC",cc.compiler)
make.setvar("LINK",cc.compiler)
if cc.wrap:
if cc.wrap == "nvcc":
wrapper = os.path.abspath("../lib/kokkos/config/nvcc_wrapper")
else: wrapper = cc.wrap
abbrev = cc.abbrev
if abbrev == "mpi":
txt = commands.getoutput("mpicxx -show")
if "-lmpich" in txt:
make.addvar("CC","-cxx=%s" % wrapper)
make.addvar("LINK","-cxx=%s" % wrapper)
elif "-lmpi" in txt:
make.addvar("OMPI_CXX",wrapper,"cc")
precompiler = "env OMPI_CXX=%s " % wrapper
else: error("Could not add MPI wrapper compiler, " +
"did not recognize OpenMPI or MPICH")
make.setvar("CCFLAGS","-g")
make.addvar("CCFLAGS","-O3")
make.setvar("LINKFLAGS","-g")
make.addvar("LINKFLAGS","-O")
# add MPI settings
if mpi:
make.delvar("MPI_INC","*")
make.delvar("MPI_PATH","*")
make.delvar("MPI_LIB","*")
if mpi.style == "mpi":
make.addvar("MPI_INC","-DMPICH_SKIP_MPICXX")
make.addvar("MPI_INC","-DOMPI_SKIP_MPICXX=1")
elif mpi.style == "mpich":
make.addvar("MPI_INC","-DMPICH_SKIP_MPICXX")
make.addvar("MPI_INC","-DOMPI_SKIP_MPICXX=1")
if mpi.dir: make.addvar("MPI_INC","-I%s/include" % mpi.dir)
if mpi.dir: make.addvar("MPI_PATH","-L%s/lib" % mpi.dir)
make.addvar("MPI_LIB","-lmpich")
make.addvar("MPI_LIB","-lmpl")
make.addvar("MPI_LIB","-lpthread")
elif mpi.style == "ompi":
make.addvar("MPI_INC","-DMPICH_SKIP_MPICXX")
make.addvar("MPI_INC","-DOMPI_SKIP_MPICXX=1")
if mpi.dir: make.addvar("MPI_INC","-I%s/include" % mpi.dir)
if mpi.dir: make.addvar("MPI_PATH","-L%s/lib" % mpi.dir)
make.addvar("MPI_LIB","-lmpi")
make.addvar("MPI_LIB","-lmpi_cxx")
elif mpi.style == "serial":
make.addvar("MPI_INC","-I../STUBS")
make.addvar("MPI_PATH","-L../STUBS")
make.addvar("MPI_LIB","-lmpi_stubs")
# add accelerator package CCFLAGS and LINKFLAGS and variables
compiler = precompiler + ' '.join(make.getvar("CC"))
linker = precompiler + ' '.join(make.getvar("LINK"))
final = packages.final
if final["opt"]:
if compile_check(compiler,"-restrict",0):
make.addvar("CCFLAGS","-restrict")
if final["user-omp"]:
if compile_check(compiler,"-restrict",0):
make.addvar("CCFLAGS","-restrict")
if compile_check(compiler,"-fopenmp",1):
make.addvar("CCFLAGS","-fopenmp")
make.addvar("LINKFLAGS","-fopenmp")
if final["user-intel"]:
if intel.mode == "cpu":
if compile_check(compiler,"-fopenmp",1):
make.addvar("CCFLAGS","-fopenmp")
make.addvar("LINKFLAGS","-fopenmp")
make.addvar("CCFLAGS","-DLAMMPS_MEMALIGN=64")
if compile_check(compiler,"-restrict",1):
make.addvar("CCFLAGS","-restrict")
if compile_check(compiler,"-xHost",1):
make.addvar("CCFLAGS","-xHost")
make.addvar("LINKFLAGS","-xHost")
if compile_check(compiler,"-fno-alias",1):
make.addvar("CCFLAGS","-fno-alias")
if compile_check(compiler,"-ansi-alias",1):
make.addvar("CCFLAGS","-ansi-alias")
if compile_check(compiler,"-override-limits",1):
make.addvar("CCFLAGS","-override-limits")
make.delvar("CCFLAGS","-DLMP_INTEL_OFFLOAD")
make.delvar("LINKFLAGS","-offload")
elif intel.mode == "phi":
if compile_check(compiler,"-fopenmp",1):
make.addvar("CCFLAGS","-fopenmp")
make.addvar("LINKFLAGS","-fopenmp")
make.addvar("CCFLAGS","-DLAMMPS_MEMALIGN=64")
if compile_check(compiler,"-restrict",1):
make.addvar("CCFLAGS","-restrict")
if compile_check(compiler,"-xHost",1):
make.addvar("CCFLAGS","-xHost")
make.addvar("CCFLAGS","-DLMP_INTEL_OFFLOAD")
if compile_check(compiler,"-fno-alias",1):
make.addvar("CCFLAGS","-fno-alias")
if compile_check(compiler,"-ansi-alias",1):
make.addvar("CCFLAGS","-ansi-alias")
if compile_check(compiler,"-override-limits",1):
make.addvar("CCFLAGS","-override-limits")
if compile_check(compiler,'-offload-option,mic,compiler,' +
'"-fp-model fast=2 -mGLOB_default_function_attrs=' +
'\\"gather_scatter_loop_unroll=4\\""',1):
make.addvar("CCFLAGS",'-offload-option,mic,compiler,' +
'"-fp-model fast=2 -mGLOB_default_function_attrs=' +
'\\"gather_scatter_loop_unroll=4\\""')
if link_check(linker,"-offload",1):
make.addvar("LINKFLAGS","-offload")
if final["kokkos"]:
if kokkos.mode == "omp":
make.delvar("KOKKOS_DEVICES","*")
make.delvar("KOKKOS_ARCH","*")
make.addvar("KOKKOS_DEVICES","OpenMP","lmp")
elif kokkos.mode == "cuda":
make.delvar("KOKKOS_DEVICES","*")
make.delvar("KOKKOS_ARCH","*")
make.addvar("KOKKOS_DEVICES","Cuda, OpenMP","lmp")
if kokkos.arch[0] == "3":
make.addvar("KOKKOS_ARCH","Kepler" + kokkos.arch,"lmp")
elif kokkos.arch[0] == "2":
make.addvar("KOKKOS_ARCH","Fermi" + kokkos.arch,"lmp")
elif kokkos.mode == "phi":
make.delvar("KOKKOS_DEVICES","*")
make.delvar("KOKKOS_ARCH","*")
make.addvar("KOKKOS_DEVICES","OpenMP","lmp")
make.addvar("KOKKOS_ARCH","KNC","lmp")
# add LMP settings
if settings:
list = settings.inlist
for one in list:
if one == "gzip": make.addvar("LMP_INC","-DLAMMPS_GZIP")
elif one == "#gzip": make.delvar("LMP_INC","-DLAMMPS_GZIP")
elif one == "ffmpeg": make.addvar("LMP_INC","-DLAMMPS_FFMPEG")
elif one == "#ffmpeg": make.delvar("LMP_INC","-DLAMMPS_FFMPEG")
elif one == "smallbig":
make.delvar("LMP_INC","-DLAMMPS_BIGBIG")
make.delvar("LMP_INC","-DLAMMPS_SMALLSMALL")
elif one == "bigbig":
make.delvar("LMP_INC","-DLAMMPS_SMALLBIG")
make.delvar("LMP_INC","-DLAMMPS_SMALLSMALL")
make.addvar("LMP_INC","-DLAMMPS_BIGBIG")
elif one == "smallsmall":
make.delvar("LMP_INC","-DLAMMPS_SMALLBIG")
make.delvar("LMP_INC","-DLAMMPS_BIGBIG")
make.addvar("LMP_INC","-DLAMMPS_SMALLSMALL")
# add FFT, JPG, PNG settings
if fft:
make.delvar("FFT_INC","*")
make.delvar("FFT_PATH","*")
make.delvar("FFT_LIB","*")
if fft.mode == "none": make.addvar("FFT_INC","-DFFT_NONE")
else:
make.addvar("FFT_INC","-DFFT_%s" % fft.mode.upper())
make.addvar("FFT_LIB",fft.lib)
if fft.dir:
make.addvar("FFT_INC","-I%s/include" % fft.dir)
make.addvar("FFT_PATH","-L%s/lib" % fft.dir)
else:
if fft.incdir: make.addvar("FFT_INC","-I%s" % fft.incdir)
if fft.libdir: make.addvar("FFT_PATH","-L%s" % fft.libdir)
if jpg:
if jpg.on == 0:
make.delvar("LMP_INC","-DLAMMPS_JPEG")
make.delvar("JPG_LIB","-ljpeg")
else:
make.addvar("LMP_INC","-DLAMMPS_JPEG")
make.addvar("JPG_LIB","-ljpeg")
if jpg.dir:
make.addvar("JPG_INC","-I%s/include" % jpg.dir)
make.addvar("JPG_PATH","-L%s/lib" % jpg.dir)
else:
if jpg.incdir: make.addvar("JPG_INC","-I%s" % jpg.incdir)
if jpg.libdir: make.addvar("JPG_PATH","-L%s" % jpg.libdir)
if png:
if png.on == 0:
make.delvar("LMP_INC","-DLAMMPS_PNG")
make.delvar("JPG_LIB","-lpng")
else:
make.addvar("LMP_INC","-DLAMMPS_PNG")
make.addvar("JPG_LIB","-lpng")
if png.dir:
make.addvar("JPG_INC","-I%s/include" % png.dir)
make.addvar("JPG_PATH","-L%s/lib" % png.dir)
else:
if png.incdir: make.addvar("JPG_INC","-I%s" % png.incdir)
if png.libdir: make.addvar("JPG_PATH","-L%s" % png.libdir)
# set self.stubs if Makefile.auto uses STUBS lib in MPI settings
if "-lmpi_stubs" in make.getvar("MPI_LIB"): self.stubs = 1
else: self.stubs = 0
# write out Makefile.auto
# unless caller = "exe" and "file" action already invoked
if caller == "file" or "file" not in self.alist:
make.write("%s/MAKE/MINE/Makefile.auto" % dir.src,1)
print "Created src/MAKE/MINE/Makefile.auto"
# test full compile and link
# unless caller = "file" and "exe" action will be invoked later
if caller == "file" and "exe" in self.alist: return
compiler = precompiler + ' '.join(make.getvar("CC"))
ccflags = ' '.join(make.getvar("CCFLAGS"))
linker = precompiler + ' '.join(make.getvar("LINK"))
linkflags = ' '.join(make.getvar("LINKFLAGS"))
if not compile_check(compiler,ccflags,1):
error("Test of compilation failed")
if not link_check(linker,linkflags,1): error("Test of link failed")
# invoke "make clean-auto" to force clean before build
def clean(self):
str = "cd %s; make clean-auto" % dir.src
commands.getoutput(str)
if verbose: print "Performed make clean-auto"
# build LAMMPS using Makefile.auto and -j setting
# invoke self.file() first, to test makefile compile/link
# delete existing lmp_auto, so can detect if build fails
# build STUBS lib (if unbuilt) if Makefile.auto MPI settings need it
def exe(self):
self.file("exe")
commands.getoutput("cd %s; rm -f lmp_auto" % dir.src)
if self.stubs and not os.path.isfile("%s/STUBS/libmpi_stubs.a" % dir.src):
print "building serial STUBS library ..."
str = "cd %s/STUBS; make clean; make" % dir.src
txt = commands.getoutput(str)
if not os.path.isfile("%s/STUBS/libmpi_stubs.a" % dir.src):
print txt
error('Unsuccessful "make stubs"')
print "Created src/STUBS/libmpi_stubs.a"
if jmake: str = "cd %s; make -j %d auto" % (dir.src,jmake.n)
else: str = "cd %s; make auto" % dir.src
txt = commands.getoutput(str)
if verbose: print txt
if not os.path.isfile("%s/lmp_auto" % dir.src):
if not verbose: print txt
error('Unsuccessful "make auto"')
elif not output: print "Created src/lmp_auto"
# dir switch
class Dir:
def __init__(self,list):
if list == None: self.inlist = None
else: self.inlist = list[:]
def help(self):
return """
-d dir
dir = LAMMPS home dir
if -d not specified, working dir must be lammps/src
"""
def check(self):
if self.inlist != None and len(self.inlist) != 1:
error("-d args are invalid")
# if inlist = None, check that cwd = lammps/src
# store cwd and lammps dir
# derive src,make,lib dirs from lammps dir
# check that they all exist
def setup(self):
self.cwd = os.getcwd()
if self.inlist == None: self.lammps = ".."
else: self.lammps = self.inlist[0]
self.lammps = os.path.realpath(self.lammps)
self.src = self.lammps + "/src"
self.make = self.lammps + "/src/MAKE"
self.lib = self.lammps + "/lib"
if not os.path.isdir(self.lammps): error("LAMMPS home dir is invalid")
if not os.path.isdir(self.src): error("LAMMPS src dir is invalid")
if not os.path.isdir(self.lib): error("LAMMPS lib dir is invalid")
# help switch
class Help:
def __init__(self,list): pass
def help(self):
return """
Syntax: Make.py switch args ...
switches can be listed in any order
help switch:
-h prints help and syntax for all other specified switches
switch for actions:
-a lib-all, lib-dir, clean, file, exe or machine
list one or more actions, in any order
machine is a Makefile.machine suffix
one-letter switches:
-d (dir), -j (jmake), -m (makefile), -o (output),
-p (packages), -r (redo), -s (settings), -v (verbose)
switches for libs:
-atc, -awpmd, -colvars, -cuda
-gpu, -meam, -poems, -qmmm, -reax, -voronoi
switches for build and makefile options:
-intel, -kokkos, -cc, -mpi, -fft, -jpg, -png
"""
# jmake switch
class Jmake:
def __init__(self,list):
self.inlist = list[:]
def help(self):
return """
-j N
use N procs for performing parallel make commands
used when building a lib or LAMMPS itself
if -j not specified, serial make commands run on single core
"""
def check(self):
if len(self.inlist) != 1: error("-j args are invalid")
if not self.inlist[0].isdigit(): error("-j args are invalid")
n = int(self.inlist[0])
if n <= 0: error("-j args are invalid")
self.n = n
# makefile switch
class Makefile:
def __init__(self,list):
self.inlist = list[:]
def help(self):
return """
-m machine
use Makefile.machine under src/MAKE as starting point to create Makefile.auto
if machine = "none", file action will create Makefile.auto from scratch
must use -cc and -mpi switches to specify compiler and MPI
if -m not specified, file/exe actions alter existing Makefile.auto
"""
def check(self):
if len(self.inlist) != 1: error("-m args are invalid")
self.machine = self.inlist[0]
# output switch
class Output:
def __init__(self,list):
self.inlist = list[:]
def help(self):
return """
-o machine
copy final src/lmp_auto to lmp_machine in working dir
if -o not specified, exe action only produces src/lmp_auto
"""
def check(self):
if len(self.inlist) != 1: error("-o args are invalid")
self.machine = self.inlist[0]
# packages switch
class Packages:
def __init__(self,list):
if list == None: self.inlist = None
else: self.inlist = list[:]
def help(self):
return """
-p = package1 package2 ...
list of packages to install or uninstall in order specified
operates on set of packages currently installed
valid package names:
and LAMMPS standard or user package (type "make package" to see list)
prefix by yes/no to install/uninstall (see abbrevs)
yes-molecule, yes-user-atc, no-molecule, no-user-atc
can use LAMMPS categories (type "make package" to see list)
all = all standard and user packages (also none = no-all)
std (or standard) = all standard packages
user = all user packages
lib = all standard and user packages with auxiliary libs
can abbreviate package names and yes/no
omp = user-omp = yes-user-omp
^omp = ^user-omp = no-user-omp
user = yes-user, ^user = no-user
all = yes-all, ^all = none = no-all
when action performed, list is processed in order,
as if typed "make yes/no" for each
if "orig" or "original" is last package in list,
set of installed packages will be restored to original (current) list
after "build" action is performed
if -p not specified, currently installed packages are not changed
"""
def check(self):
if self.inlist != None and not self.inlist: error("-p args are invalid")
def setup(self):
# extract package lists from src/Makefile
# remove names from lib that there are not Make.py lib-classes for
# most don't actually have libs, so nothing to control from Make.py
make = MakeReader("%s/Makefile" % dir.src)
std = make.getvar("PACKAGE")
user = make.getvar("PACKUSER")
lib = make.getvar("PACKLIB")
lib.remove("kim")
lib.remove("kokkos")
lib.remove("user-molfile")
lib.remove("python")
lib.remove("user-quip")
all = std + user
# plist = command line args expanded to yes-package or no-package
plist = []
if self.inlist:
for one in self.inlist:
if one in std:
plist.append("yes-%s" % one)
elif one in user:
plist.append("yes-%s" % one)
elif "user-"+one in user:
plist.append("yes-user-%s" % one)
elif one == "std" or one == "standard" or one == "user" or \
one == "lib" or one == "all": plist.append("yes-%s" % one)
elif one.startswith("yes-"):
if one[4:] in std: plist.append("yes-%s" % one[4:])
elif one[4:] in user: plist.append("yes-%s" % one[4:])
elif "user-"+one[4:] in user: plist.append("yes-user-%s" % one[4:])
elif one == "yes-std" or one == "yes-standard" or \
one == "yes-user" or one == "yes-lib" or one == "yes-all":
plist.append("yes-%s" % one[4:])
else: error("Invalid package name %s" % one)
elif one.startswith("no-"):
if one[3:] in std: plist.append("no-%s" % one[3:])
elif one[3:] in user: plist.append("no-%s" % one[3:])
elif "user-"+one[3:] in user: plist.append("no-user-%s" % one[3:])
elif one == "no-std" or one == "no-standard" or one == "no-user" or \
one == "no-lib" or one == "no-all":
plist.append("no-%s" % one[3:])
else: error("Invalid package name %s" % one)
elif one.startswith('^'):
if one[1:] in std: plist.append("no-%s" % one[1:])
elif one[1:] in user: plist.append("no-%s" % one[1:])
elif "user-"+one[1:] in user: plist.append("no-user-%s" % one[1:])
elif one == "^std" or one == "^standard" or one == "^user" or \
one == "^lib" or one == "^all": plist.append("no-%s" % one[1:])
else: error("Invalid package name %s" % one)
elif one == "none": plist.append("no-all")
elif one == "orig": plist.append(one)
else: error("Invalid package name %s" % one)
if "orig" in plist and plist.index("orig") != len(plist)-1:
error('-p orig arg must be last')
if plist.count("orig") > 1: error('-p orig arg must be last')
# original = dict of all packages
# key = package name, value = 1 if currently installed, else 0
original = {}
str = "cd %s; make ps" % dir.src
output = commands.getoutput(str).split('\n')
pattern = "Installed\s+(\w+): package (\S+)"
for line in output:
m = re.search(pattern,line)
if not m: continue
pkg = m.group(2).lower()
if pkg not in all: error('Package list does not math "make ps" results')
if m.group(1) == "NO": original[pkg] = 0
elif m.group(1) == "YES": original[pkg] = 1
# final = dict of all packages after plist applied to original
# key = package name, value = 1 if installed, else 0
final = copy.deepcopy(original)
for i,one in enumerate(plist):
if "yes" in one:
pkg = one[4:]
yes = 1
else:
pkg = one[3:]
yes = 0
if pkg in all:
final[pkg] = yes
elif pkg == "std":
for pkg in std: final[pkg] = yes
elif pkg == "user":
for pkg in user: final[pkg] = yes
elif pkg == "lib":
for pkg in lib: final[pkg] = yes
elif pkg == "all":
for pkg in all: final[pkg] = yes
self.std = std
self.user = user
self.lib = lib
self.all = all
self.plist = plist
self.original = original
self.final = final
# install packages in plist
def install(self):
if self.plist: print "Installing packages ..."
for one in self.plist:
if one == "orig": continue
commands.getoutput("cd %s; make %s" % (dir.src,one))
if self.plist and verbose:
txt = commands.getoutput("cd %s; make ps" % dir.src)
print "Package status after installation:"
print txt
# restore packages to original list if requested
# order of re-install should not matter matter b/c of Depend.sh
def uninstall(self):
if not self.plist or self.plist[-1] != "orig": return
print "Restoring packages to original state ..."
commands.getoutput("cd %s; make no-all" % dir.src)
for one in self.all:
if self.original[one]:
commands.getoutput("cd %s; make yes-%s" % (dir.src,one))
if verbose:
txt = commands.getoutput("cd %s; make ps" % dir.src)
print "Restored package status:"
print txt
# redo switch
class Redo:
def __init__(self,list):
self.inlist = list[:]
def help(self):
return """
-r file label1 label2 ...
all args are optional
invoke Make.py commands from a file
other specified switches are merged with file commands (see below)
redo file format:
blank lines and lines starting with "#" are skipped
other lines are treated as commands
each command is a list of Make.py args, as if typed at command-line
commands can have leading label, followed by ":"
commands cannot contain a "-r" switch
if no args, execute previous command, which is stored in src/Make.py.last
if one arg, execute all commands from specified file
unlabeled or labeled commands are all executed
if multiple args, execute only matching labeled commands from file
if other switches are specified,
if file command does not have the switch, it is added
if file command has the switch, the specified switch replaces it
except if -a (action) switch is both specified and in the file command,
two sets of actions are merged and duplicates removed
if both switches have "exe or machine" action,
the specified exe/machine overrides the file exe/machine
"""
def check(self):
if len(self.inlist) == 0:
self.dir = 1
self.file = "Make.py.last"
self.labels = []
else:
self.dir = 0
self.file = self.inlist[0]
self.labels = self.inlist[1:]
# read redo file
# self.commands = list of commands to execute
def setup(self):
file = self.file
if not os.path.isfile(file): error("Redo file %s does not exist" % file)
lines = open(file,'r').readlines()
cmdlines = []
for line in lines:
line = line.strip()
if not line or line[0] == '#' : continue
cmdlines.append(line)
# if no labels, add all file commands to command list
# if labels, make a dict with key = label, value = command
# and discard unlabeled commands
dict = {}
commands = []
for line in cmdlines:
words = line.split()
if "-r" in words: error("Redo command cannot contain -r switch")
if words[0][-1] == ':': label = words[0][:-1]
else: label = None
if not self.labels:
if label: commands.append(' '.join(words[1:]))
else: commands.append(line)
else:
if not label: continue
dict[label] = ' '.join(words[1:])
# extract labeled commands from dict and add to command list
for label in self.labels:
if label not in dict: error("Redo label not in redo file")
commands.append(dict[label])
self.commands = commands
# settings switch
class Settings:
def __init__(self,list):
self.inlist = list[:]
def help(self):
return """
-s set1 set2 ...
possible settings = gzip smallbig bigbig smallsmall
add each setting as LAMMPS setting to created Makefile.auto
if -s not specified, no settings are changed in Makefile.auto
"""
def check(self):
if not self.inlist: error("-s args are invalid")
for one in self.inlist:
if one not in setargs: error("-s args are invalid")
# verbose switch
class Verbose:
def __init__(self,list):
self.inlist = list[:]
def help(self):
return """
-v (no arguments)
produce verbose output as Make.py executes
if -v not specified, minimal output is produced
"""
def check(self):
if len(self.inlist): error("-v args are invalid")
# ----------------------------------------------------------------
# lib classes, one per LAMMPS auxiliary lib
# ----------------------------------------------------------------
# ATC lib
class ATC:
def __init__(self,list):
if list == None: self.inlist = None
else: self.inlist = list[:]
self.make = "g++"
self.lammpsflag = 0
def help(self):
return """
-atc make=suffix lammps=suffix2
all args are optional and can be in any order
make = use Makefile.suffix (def = g++)
lammps = use Makefile.lammps.suffix2 (def = EXTRAMAKE in makefile)
"""
def check(self):
if self.inlist != None and len(self.inlist) == 0:
error("-atc args are invalid")
for one in self.inlist:
words = one.split('=')
if len(words) != 2: error("-atc args are invalid")
if words[0] == "make": self.make = words[1]
elif words[0] == "lammps":
self.lammps = words[1]
self.lammpsflag = 1
else: error("-atc args are invalid")
def build(self):
libdir = dir.lib + "/atc"
make = MakeReader("%s/Makefile.%s" % (libdir,self.make))
if self.lammpsflag:
make.setvar("EXTRAMAKE","Makefile.lammps.%s" % self.lammps)
make.write("%s/Makefile.auto" % libdir)
commands.getoutput("cd %s; make -f Makefile.auto clean" % libdir)
if jmake: str = "cd %s; make -j %d -f Makefile.auto" % (libdir,jmake.n)
else: str = "cd %s; make -f Makefile.auto" % libdir
txt = commands.getoutput(str)
if verbose: print txt
if not os.path.isfile("%s/libatc.a" % libdir) or \
not os.path.isfile("%s/Makefile.lammps" % libdir):
if not verbose: print txt
error("Unsuccessful build of lib/atc library")
else: print "Created lib/atc library"
# AWPMD lib
class AWPMD:
def __init__(self,list):
if list == None: self.inlist = None
else: self.inlist = list[:]
self.make = "mpicc"
self.lammpsflag = 0
def help(self):
return """
-awpmd make=suffix lammps=suffix2
all args are optional and can be in any order
make = use Makefile.suffix (def = mpicc)
lammps = use Makefile.lammps.suffix2 (def = EXTRAMAKE in makefile)
"""
def check(self):
if self.inlist != None and len(self.inlist) == 0:
error("-awpmd args are invalid")
for one in self.inlist:
words = one.split('=')
if len(words) != 2: error("-awpmd args are invalid")
if words[0] == "make": self.make = words[1]
elif words[0] == "lammps":
self.lammps = words[1]
self.lammpsflag = 1
else: error("-awpmd args are invalid")
def build(self):
libdir = dir.lib + "/awpmd"
make = MakeReader("%s/Makefile.%s" % (libdir,self.make))
if self.lammpsflag:
make.setvar("EXTRAMAKE","Makefile.lammps.%s" % self.lammps)
make.write("%s/Makefile.auto" % libdir)
commands.getoutput("cd %s; make -f Makefile.auto clean" % libdir)
if jmake: str = "cd %s; make -j %d -f Makefile.auto" % (libdir,jmake.n)
else: str = "cd %s; make -f Makefile.auto" % libdir
txt = commands.getoutput(str)
if verbose: print txt
if not os.path.isfile("%s/libawpmd.a" % libdir) or \
not os.path.isfile("%s/Makefile.lammps" % libdir):
if not verbose: print txt
error("Unsuccessful build of lib/awpmd library")
else: print "Created lib/awpmd library"
# COLVARS lib
class COLVARS:
def __init__(self,list):
if list == None: self.inlist = None
else: self.inlist = list[:]
self.make = "g++"
self.lammpsflag = 0
def help(self):
return """
-colvars make=suffix lammps=suffix2
all args are optional and can be in any order
make = use Makefile.suffix (def = g++)
lammps = use Makefile.lammps.suffix2 (def = EXTRAMAKE in makefile)
"""
def check(self):
if self.inlist != None and len(self.inlist) == 0:
error("-colvars args are invalid")
for one in self.inlist:
words = one.split('=')
if len(words) != 2: error("-colvars args are invalid")
if words[0] == "make": self.make = words[1]
elif words[0] == "lammps":
self.lammps = words[1]
self.lammpsflag = 1
else: error("-colvars args are invalid")
def build(self):
libdir = dir.lib + "/colvars"
make = MakeReader("%s/Makefile.%s" % (libdir,self.make))
if self.lammpsflag:
make.setvar("EXTRAMAKE","Makefile.lammps.%s" % self.lammps)
make.write("%s/Makefile.auto" % libdir)
commands.getoutput("cd %s; make -f Makefile.auto clean" % libdir)
if jmake: str = "cd %s; make -j %d -f Makefile.auto" % (libdir,jmake.n)
else: str = "cd %s; make -f Makefile.auto" % libdir
txt = commands.getoutput(str)
if verbose: print txt
if not os.path.isfile("%s/libcolvars.a" % libdir) or \
not os.path.isfile("%s/Makefile.lammps" % libdir):
if not verbose: print txt
error("Unsuccessful build of lib/colvars library")
else: print "Created lib/colvars library"
# CUDA lib
class CUDA:
def __init__(self,list):
if list == None: self.inlist = None
else: self.inlist = list[:]
self.mode = "double"
self.arch = "31"
def help(self):
return """
-cuda mode=double arch=31
all args are optional and can be in any order
mode = double or mixed or single (def = double)
arch = M (def = 31)
M = 31 for Kepler
M = 20 for CC2.0 (GF100/110, e.g. C2050,GTX580,GTX470)
M = 21 for CC2.1 (GF104/114, e.g. GTX560, GTX460, GTX450)
M = 13 for CC1.3 (GF200, e.g. C1060, GTX285)
"""
def check(self):
if self.inlist != None and len(self.inlist) == 0:
error("-cuda args are invalid")
for one in self.inlist:
words = one.split('=')
if len(words) != 2: error("-cuda args are invalid")
if words[0] == "mode": self.mode = words[1]
elif words[0] == "arch": self.arch = words[1]
else: error("-cuda args are invalid")
if self.mode != "double" and self.mode != "mixed" and \
self.mode != "single":
error("-cuda args are invalid")
if not self.arch.isdigit(): error("-cuda args are invalid")
def build(self):
libdir = dir.lib + "/cuda"
commands.getoutput("cd %s; make clean" % libdir)
if self.mode == "double": n = 2
elif self.mode == "mixed": n = 3
elif self.mode == "single": n = 1
if jmake: str = "cd %s; make -j %d precision=%d arch=%s" % \
(libdir,jmake.n,n,self.arch)
else: str = str = "cd %s; make precision=%d arch=%s" % \
(libdir,n,self.arch)
txt = commands.getoutput(str)
if verbose: print txt
if not os.path.isfile("%s/liblammpscuda.a" % libdir) or \
not os.path.isfile("%s/Makefile.lammps" % libdir):
if not verbose: print txt
error("Unsuccessful build of lib/cuda library")
else: print "Created lib/cuda library"
# GPU lib
class GPU:
def __init__(self,list):
if list == None: self.inlist = None
else: self.inlist = list[:]
self.make = "linux.double"
self.lammpsflag = self.modeflag = self.archflag = 0
def help(self):
return """
-gpu make=suffix lammps=suffix2 mode=double arch=N
all args are optional and can be in any order
make = use Makefile.suffix (def = linux.double)
lammps = use Makefile.lammps.suffix2 (def = EXTRAMAKE in makefile)
mode = double or mixed or single (def = CUDA_PREC in makefile)
arch = 31 (Kepler) or 21 (Fermi) (def = CUDA_ARCH in makefile)
"""
def check(self):
if self.inlist != None and len(self.inlist) == 0:
error("-gpu args are invalid")
for one in self.inlist:
words = one.split('=')
if len(words) != 2: error("-gpu args are invalid")
if words[0] == "make": self.make = words[1]
elif words[0] == "lammps":
self.lammps = words[1]
self.lammpsflag = 1
elif words[0] == "mode":
self.mode = words[1]
self.modeflag = 1
elif words[0] == "arch":
self.arch = words[1]
self.archflag = 1
else: error("-gpu args are invalid")
if self.modeflag and (self.mode != "double" and
self.mode != "mixed" and
self.mode != "single"):
error("-gpu args are invalid")
if self.archflag and not self.arch.isdigit():
error("-gpu args are invalid")
def build(self):
libdir = dir.lib + "/gpu"
make = MakeReader("%s/Makefile.%s" % (libdir,self.make))
if self.modeflag:
if self.mode == "double":
make.setvar("CUDA_PRECISION","-D_DOUBLE_DOUBLE")
elif self.mode == "mixed":
make.setvar("CUDA_PRECISION","-D_SINGLE_DOUBLE")
elif self.mode == "single":
make.setvar("CUDA_PRECISION","-D_SINGLE_SINGLE")
if self.archflag:
make.setvar("CUDA_ARCH","-arch=sm_%s" % self.arch)
if self.lammpsflag:
make.setvar("EXTRAMAKE","Makefile.lammps.%s" % self.lammps)
make.write("%s/Makefile.auto" % libdir)
commands.getoutput("cd %s; make -f Makefile.auto clean" % libdir)
if jmake: str = "cd %s; make -j %d -f Makefile.auto" % (libdir,jmake.n)
else: str = "cd %s; make -f Makefile.auto" % libdir
txt = commands.getoutput(str)
if verbose: print txt
if not os.path.isfile("%s/libgpu.a" % libdir) or \
not os.path.isfile("%s/Makefile.lammps" % libdir):
if not verbose: print txt
error("Unsuccessful build of lib/gpu library")
else: print "Created lib/gpu library"
# MEAM lib
class MEAM:
def __init__(self,list):
if list == None: self.inlist = None
else: self.inlist = list[:]
self.make = "gfortran"
self.lammpsflag = 0
def help(self):
return """
-meam make=suffix lammps=suffix2
all args are optional and can be in any order
make = use Makefile.suffix (def = gfortran)
lammps = use Makefile.lammps.suffix2 (def = EXTRAMAKE in makefile)
"""
def check(self):
if self.inlist != None and len(self.inlist) == 0:
error("-meam args are invalid")
for one in self.inlist:
words = one.split('=')
if len(words) != 2: error("-meam args are invalid")
if words[0] == "make": self.make = words[1]
elif words[0] == "lammps":
self.lammps = words[1]
self.lammpsflag = 1
else: error("-meam args are invalid")
def build(self):
libdir = dir.lib + "/meam"
make = MakeReader("%s/Makefile.%s" % (libdir,self.make))
if self.lammpsflag:
make.setvar("EXTRAMAKE","Makefile.lammps.%s" % self.lammps)
make.write("%s/Makefile.auto" % libdir)
commands.getoutput("cd %s; make -f Makefile.auto clean" % libdir)
# do not use -j for MEAM build, parallel build does not work
str = "cd %s; make -f Makefile.auto" % libdir
txt = commands.getoutput(str)
if verbose: print txt
if not os.path.isfile("%s/libmeam.a" % libdir) or \
not os.path.isfile("%s/Makefile.lammps" % libdir):
if not verbose: print txt
error("Unsuccessful build of lib/meam library")
else: print "Created lib/meam library"
# POEMS lib
class POEMS:
def __init__(self,list):
if list == None: self.inlist = None
else: self.inlist = list[:]
self.make = "g++"
self.lammpsflag = 0
def help(self):
return """
-poems make=suffix lammps=suffix2
all args are optional and can be in any order
make = use Makefile.suffix (def = g++)
lammps = use Makefile.lammps.suffix2 (def = EXTRAMAKE in makefile)
"""
def check(self):
if self.inlist != None and len(self.inlist) == 0:
error("-poems args are invalid")
for one in self.inlist:
words = one.split('=')
if len(words) != 2: error("-poems args are invalid")
if words[0] == "make": self.make = words[1]
elif words[0] == "lammps":
self.lammps = words[1]
self.lammpsflag = 1
else: error("-poems args are invalid")
def build(self):
libdir = dir.lib + "/poems"
make = MakeReader("%s/Makefile.%s" % (libdir,self.make))
if self.lammpsflag:
make.setvar("EXTRAMAKE","Makefile.lammps.%s" % self.lammps)
make.write("%s/Makefile.auto" % libdir)
commands.getoutput("cd %s; make -f Makefile.auto clean" % libdir)
if jmake: str = "cd %s; make -j %d -f Makefile.auto" % (libdir,jmake.n)
else: str = "cd %s; make -f Makefile.auto" % libdir
txt = commands.getoutput(str)
if verbose: print txt
if not os.path.isfile("%s/libpoems.a" % libdir) or \
not os.path.isfile("%s/Makefile.lammps" % libdir):
if not verbose: print txt
error("Unsuccessful build of lib/poems library")
else: print "Created lib/poems library"
# QMMM lib
class QMMM:
def __init__(self,list):
if list == None: self.inlist = None
else: self.inlist = list[:]
self.make = "gfortran"
self.lammpsflag = 0
def help(self):
return """
-qmmm make=suffix lammps=suffix2
all args are optional and can be in any order
make = use Makefile.suffix (def = gfortran)
lammps = use Makefile.lammps.suffix2 (def = EXTRAMAKE in makefile)
"""
def check(self):
if self.inlist != None and len(self.inlist) == 0:
error("-qmmm args are invalid")
for one in self.inlist:
words = one.split('=')
if len(words) != 2: error("-qmmm args are invalid")
if words[0] == "make": self.make = words[1]
elif words[0] == "lammps":
self.lammps = words[1]
self.lammpsflag = 1
else: error("-qmmm args are invalid")
def build(self):
libdir = dir.lib + "/qmmm"
make = MakeReader("%s/Makefile.%s" % (libdir,self.make))
if self.lammpsflag:
make.setvar("EXTRAMAKE","Makefile.lammps.%s" % self.lammps)
make.write("%s/Makefile.auto" % libdir)
commands.getoutput("cd %s; make -f Makefile.auto clean" % libdir)
if jmake: str = "cd %s; make -j %d -f Makefile.auto" % (libdir,jmake.n)
else: str = "cd %s; make -f Makefile.auto" % libdir
txt = commands.getoutput(str)
if verbose: print txt
if not os.path.isfile("%s/libqmmm.a" % libdir) or \
not os.path.isfile("%s/Makefile.lammps" % libdir):
if not verbose: print txt
error("Unsuccessful build of lib/qmmm library")
else: print "Created lib/qmmm library"
# REAX lib
class REAX:
def __init__(self,list):
if list == None: self.inlist = None
else: self.inlist = list[:]
self.make = "gfortran"
self.lammpsflag = 0
def help(self):
return """
-reax make=suffix lammps=suffix2
all args are optional and can be in any order
make = use Makefile.suffix (def = gfortran)
lammps = use Makefile.lammps.suffix2 (def = EXTRAMAKE in makefile)
"""
def check(self):
if self.inlist != None and len(self.inlist) == 0:
error("-reax args are invalid")
for one in self.inlist:
words = one.split('=')
if len(words) != 2: error("-reax args are invalid")
if words[0] == "make": self.make = words[1]
elif words[0] == "lammps":
self.lammps = words[1]
self.lammpsflag = 1
else: error("-reax args are invalid")
def build(self):
libdir = dir.lib + "/reax"
make = MakeReader("%s/Makefile.%s" % (libdir,self.make))
if self.lammpsflag:
make.setvar("EXTRAMAKE","Makefile.lammps.%s" % self.lammps)
make.write("%s/Makefile.auto" % libdir)
commands.getoutput("cd %s; make -f Makefile.auto clean" % libdir)
if jmake: str = "cd %s; make -j %d -f Makefile.auto" % (libdir,jmake.n)
else: str = "cd %s; make -f Makefile.auto" % libdir
txt = commands.getoutput(str)
if verbose: print txt
if not os.path.isfile("%s/libreax.a" % libdir) or \
not os.path.isfile("%s/Makefile.lammps" % libdir):
if not verbose: print txt
error("Unsuccessful build of lib/reax library")
else: print "Created lib/reax library"
# VORONOI lib
class VORONOI:
def __init__(self,list):
if list == None: self.inlist = None
else: self.inlist = list[:]
self.install = ""
def help(self):
return """
-voronoi install="-d dir -v version -g -b -i installdir -l incdir libdir"
arg is optional, only needed if want to run install.py script
install = args to use with lib/voronoi/install.py script
must enclose in quotes since install.py args have switches
install.py can download, build, install, setup links to the Voro++ library
see lib/voronoi/README for details on Voro++ and using install.py
"""
def check(self):
if self.inlist != None and len(self.inlist) == 0:
error("-voronoi args are invalid")
for one in self.inlist:
words = one.split('=')
if len(words) != 2: error("-voronoi args are invalid")
if words[0] == "install": self.install = words[1]
else: error("-voronoi args are invalid")
def build(self):
if not self.install: return
libdir = dir.lib + "/voronoi"
cmd = "cd %s; python install.py %s" % (libdir,self.install)
txt = commands.getoutput(cmd)
if verbose: print txt
print "Created lib/voronoi library"
# ----------------------------------------------------------------
# build classes for intel, kokkos build options
# ----------------------------------------------------------------
# Intel class
class Intel:
def __init__(self,list):
if list == None: self.inlist = None
else: self.inlist = list[:]
self.mode = "cpu"
def help(self):
return """
-intel mode
mode = cpu or phi (def = cpu)
build Intel package for CPU or Xeon Phi
"""
def check(self):
if self.inlist == None: return
if len(self.inlist) != 1: error("-intel args are invalid")
self.mode = self.inlist[0]
if self.mode != "cpu" and self.mode != "phi":
error("-intel args are invalid")
# Kokkos class
class Kokkos:
def __init__(self,list):
if list == None: self.inlist = None
else: self.inlist = list[:]
self.mode = ""
self.archflag = 0
def help(self):
return """
-kokkos mode arch=N
mode is not optional, arch is optional
mode = omp or cuda or phi (def = KOKKOS_DEVICES setting in Makefile )
build Kokkos package for omp or cuda or phi
set KOKKOS_DEVICES to "OpenMP" (omp, phi) or "Cuda, OpenMP" (cuda)
arch = 31 (Kepler) or 21 (Fermi) (def = -arch setting in Makefile)
"""
def check(self):
if self.inlist != None and len(self.inlist) == 0:
error("-kokkos args are invalid")
if self.inlist == None: return
if len(self.inlist) < 1: error("-kokkos args are invalid")
self.mode = self.inlist[0]
if self.mode != "omp" and self.mode != "cuda" and self.mode != "phi":
error("-kokkos args are invalid")
for one in self.inlist[1:]:
words = one.split('=')
if len(words) != 2: error("-kokkos args are invalid")
if words[0] == "arch":
self.arch = words[1]
self.archflag = 1
else: error("-kokkos args are invalid")
# ----------------------------------------------------------------
# makefile classes for CC, MPI, JPG, PNG, FFT settings
# ----------------------------------------------------------------
# Cc class
class Cc:
def __init__(self,list):
self.inlist = list[:]
self.compiler = self.abbrev = ""
self.wrap = ""
def help(self):
return """
-cc compiler wrap=wcompiler
change CC setting in makefile
compiler is required, all other args are optional
compiler = any string with g++ or icc or icpc
or mpi (or mpicxx, mpiCC, mpiicpc, etc)
can be compiler name or full path to compiler
mpi by itself is changed to mpicxx
wcompiler = compiler for mpi wrapper to use
use nvcc for building for Kokkos/cuda with provided nvcc_wrapper
"""
def check(self):
if len(self.inlist) < 1: error("-cc args are invalid")
self.compiler = self.inlist[0]
if self.compiler == "mpi":
self.compiler = "mpicxx"
self.abbrev = "mpi"
elif self.compiler.startswith("mpi"):
self.abbrev = "mpi"
elif self.compiler == "g++" or self.compiler == "icc" or \
self.compiler == "icpc":
self.abbrev = self.compiler
elif "mpi" in self.compiler: self.abbrev = "mpi"
elif "g++" in self.compiler: self.abbrev = "g++"
elif "icc" in self.compiler: self.abbrev = "icc"
elif "icpc" in self.compiler: self.abbrev = "icpc"
else: error("-cc args are invalid")
for one in self.inlist[1:]:
words = one.split('=')
if len(words) != 2: error("-cc args are invalid")
if words[0] == "wrap":
if self.abbrev != "mpi": error("-cc compiler is not a wrapper")
self.wrap = words[1]
else: error("-cc args are invalid")
# Mpi class
class Mpi:
def __init__(self,list):
self.inlist = list[:]
self.style = self.dir = ""
def help(self):
return """
-mpi style dir=path
change MPI settings in makefile
style is required, all other args are optional
style = mpi or mpich or ompi or serial
mpi = no MPI settings (assume compiler is MPI wrapper)
mpich = use explicit settings for MPICH
ompi = use explicit settings for OpenMPI
serial = use settings for src/STUBS library
dir = path for MPICH or OpenMPI directory
add -I and -L settings for include and lib sub-dirs
"""
def check(self):
if len(self.inlist) < 1: error("-mpi args are invalid")
self.style = self.inlist[0]
if self.style != "mpi" and self.style != "mpich" and \
self.style != "ompi" and self.style != "serial":
error("-mpi args are invalid")
for one in self.inlist[1:]:
words = one.split('=')
if len(words) != 2: error("-mpi args are invalid")
if words[0] == "dir": self.dir = words[1]
else: error("-mpi args are invalid")
# Fft class
class Fft:
def __init__(self,list):
self.inlist = list[:]
self.dir = self.incdir = self.libdir = ""
def help(self):
return """
-fft mode lib=libname dir=homedir idir=incdir ldir=libdir
change FFT settings in makefile
mode is required, all other args are optional
removes all current FFT variable settings
mode = none or fftw or fftw3 of ...
adds -DFFT_MODE setting
lib = name of FFT library to link with (def is libname = mode)
adds -lliblib setting, e.g. -llibfftw3
dir = home dir for include and library files (def = none)
adds -Idir/include and -Ldir/lib settings
if set, overrides idir and ldir args
idir = dir for include file (def = none)
adds -Iidir setting
ldir = dir for library file (def = none)
adds -Lldir setting
"""
def check(self):
if not len(self.inlist): error("-fft args are invalid")
self.mode = self.inlist[0]
self.lib = "-l%s" % self.mode
for one in self.inlist[1:]:
words = one.split('=')
if len(words) != 2: error("-fft args are invalid")
if words[0] == "lib": self.lib = "-l%s" % words[1]
elif words[0] == "dir": self.dir = words[1]
elif words[0] == "idir": self.incdir = words[1]
elif words[0] == "ldir": self.libdir = words[1]
else: error("-fft args are invalid")
# Jpg class
class Jpg:
def __init__(self,list):
self.inlist = list[:]
self.on = 1
self.dir = self.incdir = self.libdir = ""
def help(self):
return """
-jpg flag dir=homedir idir=incdir ldir=libdir
change JPG settings in makefile
all args are optional, flag must come first if specified
flag = yes or no (def = yes)
include or exclude JPEG support
adds/removes -DLAMMPS_JPEG and -ljpeg settings
dir = home dir for include and library files (def = none)
adds -Idir/include and -Ldir/lib settings
if set, overrides idir and ldir args
idir = dir for include file (def = none)
adds -Iidir setting
ldir = dir for library file (def = none)
adds -Lldir setting
"""
def check(self):
for i,one in enumerate(self.inlist):
if one == "no" and i == 0: self.on = 0
elif one == "yes" and i == 0: self.on = 1
else:
words = one.split('=')
if len(words) != 2: error("-jpeg args are invalid")
if words[0] == "dir": self.dir = words[1]
elif words[0] == "idir": self.incdir = words[1]
elif words[0] == "ldir": self.libdir = words[1]
else: error("-jpeg args are invalid")
# Png class
class Png:
def __init__(self,list):
self.inlist = list[:]
self.on = 1
self.dir = self.incdir = self.libdir = ""
def help(self):
return """
-png flag dir=homedir idir=incdir ldir=libdir
change PNG settings in makefile
all args are optional, flag must come first if specified
flag = yes or no (def = yes)
include or exclude PNG support
adds/removes -DLAMMPS_PNG and -lpng settings
dir = home dir for include and library files (def = none)
adds -Idir/include and -Ldir/lib settings
if set, overrides idir and ldir args
idir = dir for include file (def = none)
adds -Iidir setting
ldir = dir for library file (def = none)
adds -Lldir setting
"""
def check(self):
for i,one in enumerate(self.inlist):
if one == "no" and i == 0: self.on = 0
elif one == "yes" and i == 0: self.on = 1
else:
words = one.split('=')
if len(words) != 2: error("-png args are invalid")
if words[0] == "dir": self.dir = words[1]
elif words[0] == "idir": self.incdir = words[1]
elif words[0] == "ldir": self.libdir = words[1]
else: error("-png args are invalid")
# ----------------------------------------------------------------
# auxiliary classes
# ----------------------------------------------------------------
# read, tweak, and write a Makefile
class MakeReader:
# read a makefile
# flag = 0 if file is full path name
# flag = 1,2 if file is suffix for any Makefile.machine under src/MAKE
# look for this file in same order that src/Makefile does
# if flag = 1, read the file
# if flag = 2, just check if file exists
def __init__(self,file,flag=0):
if flag == 0:
if not os.path.isfile(file): error("Makefile %s does not exist" % file)
lines = open(file,'r').readlines()
else:
mfile = "%s/MAKE/MINE/Makefile.%s" % (dir.src,file)
if not os.path.isfile(mfile):
mfile = "%s/MAKE/Makefile.%s" % (dir.src,file)
if not os.path.isfile(mfile):
mfile = "%s/MAKE/OPTIONS/Makefile.%s" % (dir.src,file)
if not os.path.isfile(mfile):
mfile = "%s/MAKE/MACHINES/Makefile.%s" % (dir.src,file)
if not os.path.isfile(mfile):
error("Makefile.%s does not exist" % file)
if flag == 1: lines = open(mfile,'r').readlines()
else: return
# scan lines of makefile
# if not a variable line, just copy to newlines
# if a variable line, concatenate any continuation lines
# convert variable to var dict entry: key = name, value = list of words
# discard any portion of value string with a comment char
# varinfo = list of variable info: (name, name with whitespace for print)
# add index into varinfo to newlines
# ccindex = index of "CC =" line, to add OMPI var before it
# lmpindex = index of "LAMMPS-specific settings" line to add KOKKOS vars before it
var = {}
varinfo = []
newlines = []
pattern = "(\S+\s+=\s+)(.*)"
conditional = 0
multiline = 0
self.ccindex = self.lmpindex = 0
for line in lines:
line = line[:-1]
if "CC =" in line: self.ccindex = len(newlines)
if "LAMMPS-specific settings" in line: self.lmpindex = len(newlines)
if "ifeq" in line:
conditional = 1
continue
if conditional:
if "endif" in line:
conditional = 0
continue
if multiline:
if '#' in line: line = line[:line.find('#')]
morevalues = line.split()
values = values[:-1] + morevalues
if values[-1] != '\\':
var[name] = values
multiline = 0
newlines.append(str(len(varinfo)))
varinfo.append((name,namewhite))
continue
varflag = 1
if len(line.strip()) == 0: varflag = 0
elif line.lstrip()[0] == '#': varflag = 0
else:
m = re.match(pattern,line)
if not m: varflag = 0
if varflag:
namewhite = m.group(1)
name = namewhite.split()[0]
if name in var:
error("Makefile variable %s appears more than once" % name)
remainder = m.group(2)
if '#' in remainder: remainder = remainder[:remainder.find('#')]
values = remainder.split()
if values and values[-1] == '\\': multiline = 1
else:
var[name] = values
newlines.append(str(len(varinfo)))
varinfo.append((name,namewhite))
else:
newlines.append(line)
self.var = var
self.varinfo = varinfo
self.lines = newlines
# return list of values associated with var
# return None if var not defined
def getvar(self,var):
if var in self.var: return self.var[var]
else: return None
# set var to single value
# if var not defined, error
def setvar(self,var,value):
if var not in self.var: error("Variable %s not in makefile" % var)
self.var[var] = [value]
# add value to var
# do not add if value already defined by var
# if var not defined,
# create new variable using "where"
# where="cc", line before "CC =" line, use ":="
# where="lmp", 2 lines before "LAMMPS-specific settings" line, use "="
def addvar(self,var,value,where=""):
if var in self.var:
if value not in self.var[var]: self.var[var].append(value)
else:
if not where:
error("Variable %s with value %s is not in makefile" % (var,value))
if where == "cc":
if not self.ccindex: error("No 'CC =' line in makefile to add variable")
index = self.ccindex
varwhite = "%s :=\t\t" % var
elif where == "lmp":
if not self.lmpindex: error("No 'LAMMPS-specific settings line' " +
"in makefile to add variable")
index = self.lmpindex - 2
varwhite = "%s =\t\t" % var
self.var[var] = [value]
varwhite = "%s =\t\t" % var
self.lines.insert(index,str(len(self.varinfo)))
self.varinfo.append((var,varwhite))
# if value = None, remove entire var
# no need to update lines or varinfo, write() will ignore deleted vars
# else remove value from var
# value can have trailing '*' to remove wildcard match
# if var or value not defined, ignore it
def delvar(self,var,value=None):
#if var == "KOKKOS_DEVICES":
# print self.var,value
if var not in self.var: return
if not value:
del self.var[var]
#print "AGAIN",self.var
elif value and value[-1] != '*':
if value not in self.var[var]: return
self.var[var].remove(value)
else:
value = value[:-1]
values = self.var[var]
dellist = []
for i,one in enumerate(values):
if one.startswith(value): dellist.append(i)
while dellist: values.pop(dellist.pop())
self.var[var] = values
# write stored makefile lines to file, using vars that may have been updated
# do not write var if not in dict, since has been deleted
# wrap var values into multiple lines if needed
# file = 1 if this is Makefile.auto, change 1st line to use "auto"
def write(self,file,flag=0):
fp = open(file,'w')
for i,line in enumerate(self.lines):
if not line.isdigit():
if flag and i == 0:
line = "# auto = makefile auto-generated by Make.py"
print >>fp,line
else:
index = int(line)
name = self.varinfo[index][0]
txt = self.varinfo[index][1]
if name not in self.var: continue
values = self.var[name]
print >>fp,"%s%s" % (txt,' '.join(values))
# ----------------------------------------------------------------
# main program
# ----------------------------------------------------------------
# parse command-line args
# switches dict: key = switch letter, value = list of args
# switch_order = list of switches in order
# will possibly be merged with redo file args below
cmd_switches,cmd_switch_order = parse_args(sys.argv[1:])
if "v" in cmd_switches:
print "Command-line parsing:"
for switch in cmd_switch_order:
print " %s: %s" % (switch,' '.join(cmd_switches[switch]))
# check for redo switch, process redo file
# redolist = list of commands to execute
redoflag = 0
redolist = []
if 'r' in cmd_switches and 'h' not in cmd_switches:
redoflag = 1
redo = Redo(cmd_switches['r'])
redo.check()
redo.setup()
redolist = redo.commands
redoindex = 0
del redo
if not redolist: error("No commands to execute from redo file")
# loop over Make.py commands
# if no redo switch, loop once for command-line command
# if redo, loop over one or more commands from redo file
while 1:
# if redo:
# parse next command from redo file
# use command-line switches to add/replace file command switches
# do not add -r, since already processed
# and don't want -r swtich to appear in Make.py.last file
# if -a in both: concatenate, de-dup,
# specified exe/machine action replaces file exe/machine action
# print resulting new command
# else just use command-line switches
if redoflag:
if redoindex == len(redolist): break
args = redolist[redoindex].split()
switches,switch_order = parse_args(args)
redoindex += 1
for switch in cmd_switches:
if switch == 'r': continue
if switch == 'a' and switch in switches:
tmp = Actions(cmd_switches[switch] + switches[switch])
tmp.dedup()
switches[switch] = tmp.inlist
continue
if switch not in switches: switch_order.append(switch)
switches[switch] = cmd_switches[switch]
argstr = switch2str(switches,switch_order)
print "Redo command: Make.py",argstr
else:
switches = cmd_switches
switch_order = cmd_switch_order
# initialize all class variables to None
for one in switchclasses: exec("%s = None" % one)
for one in libclasses: exec("%s = None" % one)
for one in buildclasses: exec("%s = None" % one)
for one in makeclasses: exec("%s = None" % one)
# classes = dictionary of created classes
# key = switch, value = class instance
classes = {}
for switch in switches:
if len(switch) == 1 and switch in abbrevs:
i = abbrevs.index(switch)
capitalized = switchclasses[i][0].upper() + switchclasses[i][1:]
txt = '%s = classes["%s"] = %s(switches["%s"])' % \
(switchclasses[i],switch,capitalized,switch)
exec(txt)
elif switch in libclasses:
i = libclasses.index(switch)
txt = '%s = classes["%s"] = %s(switches["%s"])' % \
(libclasses[i],switch,libclasses[i].upper(),switch)
exec(txt)
elif switch in buildclasses:
i = buildclasses.index(switch)
capitalized = buildclasses[i][0].upper() + buildclasses[i][1:]
txt = '%s = classes["%s"] = %s(switches["%s"])' % \
(buildclasses[i],switch,capitalized,switch)
exec(txt)
elif switch in makeclasses:
i = makeclasses.index(switch)
capitalized = makeclasses[i][0].upper() + makeclasses[i][1:]
txt = '%s = classes["%s"] = %s(switches["%s"])' % \
(makeclasses[i],switch,capitalized,switch)
exec(txt)
else: error("Unknown command-line switch -%s" % switch)
# print help messages and exit
if help or (actions and "-h" in actions.inlist) or not switches:
if not help: help = Help(None)
print help.help()
for switch in switch_order:
if switch == "h": continue
print classes[switch].help()[1:]
sys.exit()
# create needed default classes if not specified with switch
# dir and packages plus lib and build classes so defaults are set
if not dir: dir = Dir(None)
if not packages: packages = Packages(None)
for one in libclasses:
txt = "if not %s: %s = %s(None)" % (one,one,one.upper())
exec(txt)
for one in buildclasses:
capitalized = one[0].upper() + one[1:]
txt = "if not %s: %s = %s(None)" % (one,one,capitalized)
exec(txt)
# error check on args for all classes
for switch in classes: classes[switch].check()
# prep for action
# actions.setup() detects if last action = machine
# if yes, induce addition of "-m" and "-o" switches
dir.setup()
packages.setup()
if actions:
machine = actions.setup()
if machine:
switches['a'][-1] = "exe"
if 'm' not in switches:
switches['m'] = [machine]
switch_order.insert(-1,'m')
makefile = classes['m'] = Makefile(switches['m'])
makefile.check()
if 'o' not in switches:
switches['o'] = [machine]
switch_order.insert(-1,'o')
output = classes['o'] = Makefile(switches['o'])
output.check()
# perform actions
packages.install()
if actions:
for action in actions.alist:
print "Action %s ..." % action
if action.startswith("lib-"): actions.lib(action[4:])
elif action == "file": actions.file("file")
elif action == "clean": actions.clean()
elif action == "exe": actions.exe()
packages.uninstall()
# create output file if requested and exe action performed
if output and actions and "exe" in actions.alist:
txt = "cp %s/lmp_auto %s/lmp_%s" % (dir.src,dir.cwd,output.machine)
commands.getoutput(txt)
print "Created lmp_%s in %s" % (output.machine,dir.cwd)
# write current Make.py command to src/Make.py.last
fp = open("%s/Make.py.last" % dir.src,'w')
print >>fp,"# last invoked Make.py command"
print >>fp,switch2str(switches,switch_order)
fp.close()
# if not redoflag, done
if not redoflag: break
|
gurkih/lammps
|
src/Make.py
|
Python
|
gpl-2.0
| 70,853
|
[
"LAMMPS"
] |
b5068e9bbe975040f3ecf8e9165f47ce0fabea52ffaeab67c8809c31e4597c54
|
"""Generate html report from MNE database
"""
# Authors: Alex Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Mainak Jas <mainak@neuro.hut.fi>
# Teon Brooks <teon.brooks@gmail.com>
#
# License: BSD (3-clause)
import os
import os.path as op
import fnmatch
import re
import codecs
import time
from glob import glob
import warnings
import base64
from datetime import datetime as dt
import numpy as np
from . import read_evokeds, read_events, pick_types, read_cov
from .io import Raw, read_info
from .utils import _TempDir, logger, verbose, get_subjects_dir
from .viz import plot_events, plot_trans, plot_cov
from .viz._3d import _plot_mri_contours
from .forward import read_forward_solution
from .epochs import read_epochs
from .minimum_norm import read_inverse_operator
from .parallel import parallel_func, check_n_jobs
from .externals.tempita import HTMLTemplate, Template
from .externals.six import BytesIO
from .externals.six import moves
VALID_EXTENSIONS = ['raw.fif', 'raw.fif.gz', 'sss.fif', 'sss.fif.gz',
'-eve.fif', '-eve.fif.gz', '-cov.fif', '-cov.fif.gz',
'-trans.fif', '-trans.fif.gz', '-fwd.fif', '-fwd.fif.gz',
'-epo.fif', '-epo.fif.gz', '-inv.fif', '-inv.fif.gz',
'-ave.fif', '-ave.fif.gz', 'T1.mgz']
SECTION_ORDER = ['raw', 'events', 'epochs', 'evoked', 'covariance', 'trans',
'mri', 'forward', 'inverse']
###############################################################################
# PLOTTING FUNCTIONS
def _fig_to_img(function=None, fig=None, image_format='png',
scale=None, **kwargs):
"""Wrapper function to plot figure and create a binary image"""
import matplotlib.pyplot as plt
if function is not None:
plt.close('all')
fig = function(**kwargs)
output = BytesIO()
if scale is not None:
_scale_mpl_figure(fig, scale)
fig.savefig(output, format=image_format, bbox_inches='tight',
dpi=fig.get_dpi())
plt.close(fig)
output = output.getvalue()
return (output if image_format == 'svg' else
base64.b64encode(output).decode('ascii'))
def _scale_mpl_figure(fig, scale):
"""Magic scaling helper
Keeps font-size and artist sizes constant
0.5 : current font - 4pt
2.0 : current font + 4pt
XXX it's unclear why this works, but good to go for most cases
"""
fig.set_size_inches(fig.get_size_inches() * scale)
fig.set_dpi(fig.get_dpi() * scale)
import matplotlib as mpl
if scale >= 1:
sfactor = scale ** 2
elif scale < 1:
sfactor = -((1. / scale) ** 2)
for text in fig.findobj(mpl.text.Text):
fs = text.get_fontsize()
new_size = fs + sfactor
if new_size <= 0:
raise ValueError('could not rescale matplotlib fonts, consider '
'increasing "scale"')
text.set_fontsize(new_size)
fig.canvas.draw()
def _figs_to_mrislices(sl, n_jobs, **kwargs):
import matplotlib.pyplot as plt
plt.close('all')
use_jobs = min(n_jobs, max(1, len(sl)))
parallel, p_fun, _ = parallel_func(_plot_mri_contours, use_jobs)
outs = parallel(p_fun(slices=s, **kwargs)
for s in np.array_split(sl, use_jobs))
for o in outs[1:]:
outs[0] += o
return outs[0]
def _iterate_trans_views(function, **kwargs):
"""Auxiliary function to iterate over views in trans fig.
"""
from scipy.misc import imread
import matplotlib.pyplot as plt
import mayavi
fig = function(**kwargs)
assert isinstance(fig, mayavi.core.scene.Scene)
views = [(90, 90), (0, 90), (0, -90)]
fig2, axes = plt.subplots(1, len(views))
for view, ax in zip(views, axes):
mayavi.mlab.view(view[0], view[1])
# XXX: save_bmp / save_png / ...
tempdir = _TempDir()
temp_fname = op.join(tempdir, 'test.png')
if fig.scene is not None:
fig.scene.save_png(temp_fname)
im = imread(temp_fname)
else: # Testing mode
im = np.zeros((2, 2, 3))
ax.imshow(im)
ax.axis('off')
mayavi.mlab.close(fig)
img = _fig_to_img(fig=fig2)
return img
###############################################################################
# TOC FUNCTIONS
def _is_bad_fname(fname):
"""Auxiliary function for identifying bad file naming patterns
and highlighting them in red in the TOC.
"""
if fname.endswith('(whitened)'):
fname = fname[:-11]
if not fname.endswith(tuple(VALID_EXTENSIONS + ['bem', 'custom'])):
return 'red'
else:
return ''
def _get_toc_property(fname):
"""Auxiliary function to assign class names to TOC
list elements to allow toggling with buttons.
"""
if fname.endswith(('-eve.fif', '-eve.fif.gz')):
div_klass = 'events'
tooltip = fname
text = op.basename(fname)
elif fname.endswith(('-ave.fif', '-ave.fif.gz')):
div_klass = 'evoked'
tooltip = fname
text = op.basename(fname)
elif fname.endswith(('-cov.fif', '-cov.fif.gz')):
div_klass = 'covariance'
tooltip = fname
text = op.basename(fname)
elif fname.endswith(('raw.fif', 'raw.fif.gz',
'sss.fif', 'sss.fif.gz')):
div_klass = 'raw'
tooltip = fname
text = op.basename(fname)
elif fname.endswith(('-trans.fif', '-trans.fif.gz')):
div_klass = 'trans'
tooltip = fname
text = op.basename(fname)
elif fname.endswith(('-fwd.fif', '-fwd.fif.gz')):
div_klass = 'forward'
tooltip = fname
text = op.basename(fname)
elif fname.endswith(('-inv.fif', '-inv.fif.gz')):
div_klass = 'inverse'
tooltip = fname
text = op.basename(fname)
elif fname.endswith(('-epo.fif', '-epo.fif.gz')):
div_klass = 'epochs'
tooltip = fname
text = op.basename(fname)
elif fname.endswith(('.nii', '.nii.gz', '.mgh', '.mgz')):
div_klass = 'mri'
tooltip = 'MRI'
text = 'MRI'
elif fname.endswith(('bem')):
div_klass = 'mri'
tooltip = 'MRI'
text = 'MRI'
elif fname.endswith('(whitened)'):
div_klass = 'evoked'
tooltip = fname
text = op.basename(fname[:-11]) + '(whitened)'
else:
div_klass = fname.split('-#-')[1]
tooltip = fname.split('-#-')[0]
text = fname.split('-#-')[0]
return div_klass, tooltip, text
def _iterate_files(report, fnames, info, cov, baseline, sfreq, on_error):
"""Auxiliary function to parallel process in batch mode.
"""
htmls, report_fnames, report_sectionlabels = [], [], []
def _update_html(html, report_fname, report_sectionlabel):
"""Update the lists above."""
htmls.append(html)
report_fnames.append(report_fname)
report_sectionlabels.append(report_sectionlabel)
for fname in fnames:
logger.info("Rendering : %s"
% op.join('...' + report.data_path[-20:],
fname))
try:
if fname.endswith(('raw.fif', 'raw.fif.gz',
'sss.fif', 'sss.fif.gz')):
html = report._render_raw(fname)
report_fname = fname
report_sectionlabel = 'raw'
elif fname.endswith(('-fwd.fif', '-fwd.fif.gz')):
html = report._render_forward(fname)
report_fname = fname
report_sectionlabel = 'forward'
elif fname.endswith(('-inv.fif', '-inv.fif.gz')):
html = report._render_inverse(fname)
report_fname = fname
report_sectionlabel = 'inverse'
elif fname.endswith(('-ave.fif', '-ave.fif.gz')):
if cov is not None:
html = report._render_whitened_evoked(fname, cov, baseline)
report_fname = fname + ' (whitened)'
report_sectionlabel = 'evoked'
_update_html(html, report_fname, report_sectionlabel)
html = report._render_evoked(fname, baseline)
report_fname = fname
report_sectionlabel = 'evoked'
elif fname.endswith(('-eve.fif', '-eve.fif.gz')):
html = report._render_eve(fname, sfreq)
report_fname = fname
report_sectionlabel = 'events'
elif fname.endswith(('-epo.fif', '-epo.fif.gz')):
html = report._render_epochs(fname)
report_fname = fname
report_sectionlabel = 'epochs'
elif (fname.endswith(('-cov.fif', '-cov.fif.gz')) and
report.info_fname is not None):
html = report._render_cov(fname, info)
report_fname = fname
report_sectionlabel = 'covariance'
elif (fname.endswith(('-trans.fif', '-trans.fif.gz')) and
report.info_fname is not None and report.subjects_dir
is not None and report.subject is not None):
html = report._render_trans(fname, report.data_path, info,
report.subject,
report.subjects_dir)
report_fname = fname
report_sectionlabel = 'trans'
else:
html = None
report_fname = None
report_sectionlabel = None
except Exception as e:
if on_error == 'warn':
logger.warning('Failed to process file %s:\n"%s"' % (fname, e))
elif on_error == 'raise':
raise
html = None
report_fname = None
report_sectionlabel = None
_update_html(html, report_fname, report_sectionlabel)
return htmls, report_fnames, report_sectionlabels
###############################################################################
# IMAGE FUNCTIONS
def _build_image(data, cmap='gray'):
"""Build an image encoded in base64.
"""
import matplotlib.pyplot as plt
from matplotlib.figure import Figure
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
figsize = data.shape[::-1]
if figsize[0] == 1:
figsize = tuple(figsize[1:])
data = data[:, :, 0]
fig = Figure(figsize=figsize, dpi=1.0, frameon=False)
FigureCanvas(fig)
cmap = getattr(plt.cm, cmap, plt.cm.gray)
fig.figimage(data, cmap=cmap)
output = BytesIO()
fig.savefig(output, dpi=1.0, format='png')
return base64.b64encode(output.getvalue()).decode('ascii')
def _iterate_sagittal_slices(array, limits=None):
"""Iterate sagittal slice.
"""
shape = array.shape[0]
for ind in range(shape):
if limits and ind not in limits:
continue
yield ind, array[ind, :, :]
def _iterate_axial_slices(array, limits=None):
"""Iterate axial slice.
"""
shape = array.shape[1]
for ind in range(shape):
if limits and ind not in limits:
continue
yield ind, array[:, ind, :]
def _iterate_coronal_slices(array, limits=None):
"""Iterate coronal slice.
"""
shape = array.shape[2]
for ind in range(shape):
if limits and ind not in limits:
continue
yield ind, np.flipud(np.rot90(array[:, :, ind]))
def _iterate_mri_slices(name, ind, global_id, slides_klass, data, cmap,
image_format='png'):
"""Auxiliary function for parallel processing of mri slices.
"""
img_klass = 'slideimg-%s' % name
caption = u'Slice %s %s' % (name, ind)
slice_id = '%s-%s-%s' % (name, global_id, ind)
div_klass = 'span12 %s' % slides_klass
img = _build_image(data, cmap=cmap)
first = True if ind == 0 else False
html = _build_html_image(img, slice_id, div_klass,
img_klass, caption, first)
return ind, html
###############################################################################
# HTML functions
def _build_html_image(img, id, div_klass, img_klass, caption=None, show=True):
"""Build a html image from a slice array.
"""
html = []
add_style = u'' if show else u'style="display: none"'
html.append(u'<li class="%s" id="%s" %s>' % (div_klass, id, add_style))
html.append(u'<div class="thumbnail">')
html.append(u'<img class="%s" alt="" style="width:90%%;" '
'src="data:image/png;base64,%s">'
% (img_klass, img))
html.append(u'</div>')
if caption:
html.append(u'<h4>%s</h4>' % caption)
html.append(u'</li>')
return u'\n'.join(html)
slider_template = HTMLTemplate(u"""
<script>$("#{{slider_id}}").slider({
range: "min",
/*orientation: "vertical",*/
min: {{minvalue}},
max: {{maxvalue}},
step: {{step}},
value: {{startvalue}},
create: function(event, ui) {
$(".{{klass}}").hide();
$("#{{klass}}-{{startvalue}}").show();},
stop: function(event, ui) {
var list_value = $("#{{slider_id}}").slider("value");
$(".{{klass}}").hide();
$("#{{klass}}-"+list_value).show();}
})</script>
""")
def _build_html_slider(slices_range, slides_klass, slider_id):
"""Build an html slider for a given slices range and a slices klass.
"""
startvalue = slices_range[len(slices_range) // 2]
return slider_template.substitute(slider_id=slider_id,
klass=slides_klass,
step=slices_range[1] - slices_range[0],
minvalue=slices_range[0],
maxvalue=slices_range[-1],
startvalue=startvalue)
###############################################################################
# HTML scan renderer
header_template = Template(u"""
<!DOCTYPE html>
<html lang="fr">
<head>
{{include}}
<script type="text/javascript">
function togglebutton(class_name){
$(class_name).toggle();
if ($(class_name + '-btn').hasClass('active'))
$(class_name + '-btn').removeClass('active');
else
$(class_name + '-btn').addClass('active');
}
/* Scroll down on click to #id so that caption is not hidden
by navbar */
var shiftWindow = function() { scrollBy(0, -60) };
if (location.hash) shiftWindow();
window.addEventListener("hashchange", shiftWindow);
</script>
<style type="text/css">
body {
line-height: 1.5em;
font-family: arial, sans-serif;
}
h1 {
font-size: 30px;
text-align: center;
}
h4 {
text-align: center;
}
@link-color: @brand-primary;
@link-hover-color: darken(@link-color, 15%);
a{
color: @link-color;
&:hover {
color: @link-hover-color;
text-decoration: underline;
}
}
li{
list-style-type:none;
}
#wrapper {
text-align: left;
margin: 5em auto;
width: 700px;
}
#container{
position: relative;
}
#content{
margin-left: 22%;
margin-top: 60px;
width: 75%;
}
#toc {
margin-top: navbar-height;
position: fixed;
width: 20%;
height: 90%;
overflow: auto;
}
#toc li {
overflow: hidden;
padding-bottom: 2px;
margin-left: 20px;
}
#toc span {
float: left;
padding: 0 2px 3px 0;
}
div.footer {
background-color: #C0C0C0;
color: #000000;
padding: 3px 8px 3px 0;
clear: both;
font-size: 0.8em;
text-align: right;
}
</style>
</head>
<body>
<nav class="navbar navbar-inverse navbar-fixed-top" role="navigation">
<div class="container-fluid">
<div class="navbar-header navbar-left">
<ul class="nav nav-pills"><li class="active">
<a class="navbar-btn" data-toggle="collapse"
data-target="#viewnavbar" href="javascript:void(0)">
></a></li></ul>
</div>
<h3 class="navbar-text" style="color:white">{{title}}</h3>
<ul class="nav nav-pills navbar-right" style="margin-top: 7px;"
id="viewnavbar">
{{for section in sections}}
<li class="active {{sectionvars[section]}}-btn">
<a href="javascript:void(0)"
onclick="togglebutton('.{{sectionvars[section]}}')">
{{section if section != 'mri' else 'MRI'}}
</a>
</li>
{{endfor}}
</ul>
</div>
</nav>
""")
footer_template = HTMLTemplate(u"""
</div></body>
<div class="footer">
© Copyright 2012-2013, MNE Developers.
Created on {{date}}.
Powered by <a href="http://martinos.org/mne">MNE.
</div>
</html>
""")
html_template = Template(u"""
<li class="{{div_klass}}" id="{{id}}">
<h4>{{caption}}</h4>
<div class="thumbnail">{{html}}</div>
</li>
""")
image_template = Template(u"""
{{default interactive = False}}
{{default width = 50}}
{{default id = False}}
{{default image_format = 'png'}}
{{default scale = None}}
{{default comment = None}}
<li class="{{div_klass}}" {{if id}}id="{{id}}"{{endif}}
{{if not show}}style="display: none"{{endif}}>
{{if caption}}
<h4>{{caption}}</h4>
{{endif}}
<div class="thumbnail">
{{if not interactive}}
{{if image_format == 'png'}}
{{if scale is not None}}
<img alt="" style="width:{{width}}%;"
src="data:image/png;base64,{{img}}">
{{else}}
<img alt=""
src="data:image/png;base64,{{img}}">
{{endif}}
{{elif image_format == 'svg'}}
<div style="text-align:center;">
{{img}}
</div>
{{endif}}
{{if comment is not None}}
<br><br>
<div style="text-align:center;">
<style>
p.test {word-wrap: break-word;}
</style>
<p class="test">
{{comment}}
</p>
</div>
{{endif}}
{{else}}
<center>{{interactive}}</center>
{{endif}}
</div>
</li>
""")
repr_template = Template(u"""
<li class="{{div_klass}}" id="{{id}}">
<h4>{{caption}}</h4><hr>
{{repr}}
<hr></li>
""")
raw_template = Template(u"""
<li class="{{div_klass}}" id="{{id}}">
<h4>{{caption}}</h4>
<table class="table table-hover">
<tr>
<th>Measurement date</th>
{{if meas_date is not None}}
<td>{{meas_date}}</td>
{{else}}<td>Unknown</td>{{endif}}
</tr>
<tr>
<th>Experimenter</th>
{{if info['experimenter'] is not None}}
<td>{{info['experimenter']}}</td>
{{else}}<td>Unknown</td>{{endif}}
</tr>
<tr>
<th>Digitized points</th>
{{if info['dig'] is not None}}
<td>{{len(info['dig'])}} points</td>
{{else}}
<td>Not available</td>
{{endif}}
</tr>
<tr>
<th>Good channels</th>
<td>{{n_mag}} magnetometer, {{n_grad}} gradiometer,
and {{n_eeg}} EEG channels</td>
</tr>
<tr>
<th>Bad channels</th>
{{if info['bads'] is not None}}
<td>{{', '.join(info['bads'])}}</td>
{{else}}<td>None</td>{{endif}}
</tr>
<tr>
<th>EOG channels</th>
<td>{{eog}}</td>
</tr>
<tr>
<th>ECG channels</th>
<td>{{ecg}}</td>
<tr>
<th>Measurement time range</th>
<td>{{u'%0.2f' % tmin}} to {{u'%0.2f' % tmax}} sec.</td>
</tr>
<tr>
<th>Sampling frequency</th>
<td>{{u'%0.2f' % info['sfreq']}} Hz</td>
</tr>
<tr>
<th>Highpass</th>
<td>{{u'%0.2f' % info['highpass']}} Hz</td>
</tr>
<tr>
<th>Lowpass</th>
<td>{{u'%0.2f' % info['lowpass']}} Hz</td>
</tr>
</table>
</li>
""")
toc_list = Template(u"""
<li class="{{div_klass}}">
{{if id}}
<a href="javascript:void(0)" onclick="window.location.hash={{id}};">
{{endif}}
<span title="{{tooltip}}" style="color:{{color}}"> {{text}}</span>
{{if id}}</a>{{endif}}
</li>
""")
def _check_scale(scale):
"""Helper to ensure valid scale value is passed"""
if np.isscalar(scale) and scale <= 0:
raise ValueError('scale must be positive, not %s' % scale)
class Report(object):
"""Object for rendering HTML
Parameters
----------
info_fname : str
Name of the file containing the info dictionary.
subjects_dir : str | None
Path to the SUBJECTS_DIR. If None, the path is obtained by using
the environment variable SUBJECTS_DIR.
subject : str | None
Subject name.
title : str
Title of the report.
cov_fname : str
Name of the file containing the noise covariance.
baseline : None or tuple of length 2 (default (None, 0))
The time interval to apply baseline correction for evokeds.
If None do not apply it. If baseline is (a, b)
the interval is between "a (s)" and "b (s)".
If a is None the beginning of the data is used
and if b is None then b is set to the end of the interval.
If baseline is equal to (None, None) all the time
interval is used.
The baseline (a, b) includes both endpoints, i.e. all
timepoints t such that a <= t <= b.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
"""
def __init__(self, info_fname=None, subjects_dir=None,
subject=None, title=None, cov_fname=None, baseline=None,
verbose=None):
self.info_fname = info_fname
self.cov_fname = cov_fname
self.baseline = baseline
self.subjects_dir = get_subjects_dir(subjects_dir, raise_error=False)
self.subject = subject
self.title = title
self.verbose = verbose
self.initial_id = 0
self.html = []
self.fnames = [] # List of file names rendered
self.sections = [] # List of sections
self._sectionlabels = [] # Section labels
self._sectionvars = {} # Section variable names in js
# boolean to specify if sections should be ordered in natural
# order of processing (raw -> events ... -> inverse)
self._sort_sections = False
self._init_render() # Initialize the renderer
def _get_id(self):
"""Get id of plot.
"""
self.initial_id += 1
return self.initial_id
def _validate_input(self, items, captions, section, comments=None):
"""Validate input.
"""
if not isinstance(items, (list, tuple)):
items = [items]
if not isinstance(captions, (list, tuple)):
captions = [captions]
if not isinstance(comments, (list, tuple)):
if comments is None:
comments = [comments] * len(captions)
else:
comments = [comments]
if len(comments) != len(items):
raise ValueError('Comments and report items must have the same '
'length or comments should be None.')
elif len(captions) != len(items):
raise ValueError('Captions and report items must have the same '
'length.')
# Book-keeping of section names
if section not in self.sections:
self.sections.append(section)
self._sectionvars[section] = _clean_varnames(section)
return items, captions, comments
def _add_figs_to_section(self, figs, captions, section='custom',
image_format='png', scale=None, comments=None):
"""Auxiliary method for `add_section` and `add_figs_to_section`.
"""
from scipy.misc import imread
import matplotlib.pyplot as plt
mayavi = None
try:
# on some version mayavi.core won't be exposed unless ...
from mayavi import mlab # noqa, mlab imported
import mayavi
except: # on some systems importing Mayavi raises SystemExit (!)
warnings.warn('Could not import mayavi. Trying to render '
'`mayavi.core.scene.Scene` figure instances'
' will throw an error.')
figs, captions, comments = self._validate_input(figs, captions,
section, comments)
_check_scale(scale)
for fig, caption, comment in zip(figs, captions, comments):
caption = 'custom plot' if caption == '' else caption
sectionvar = self._sectionvars[section]
global_id = self._get_id()
div_klass = self._sectionvars[section]
img_klass = self._sectionvars[section]
if mayavi is not None and isinstance(fig, mayavi.core.scene.Scene):
tempdir = _TempDir()
temp_fname = op.join(tempdir, 'test')
if fig.scene is not None:
fig.scene.save_png(temp_fname)
img = imread(temp_fname)
else: # Testing mode
img = np.zeros((2, 2, 3))
mayavi.mlab.close(fig)
fig = plt.figure()
plt.imshow(img)
plt.axis('off')
img = _fig_to_img(fig=fig, scale=scale,
image_format=image_format)
html = image_template.substitute(img=img, id=global_id,
div_klass=div_klass,
img_klass=img_klass,
caption=caption,
show=True,
image_format=image_format,
comment=comment)
self.fnames.append('%s-#-%s-#-custom' % (caption, sectionvar))
self._sectionlabels.append(sectionvar)
self.html.append(html)
def add_figs_to_section(self, figs, captions, section='custom',
scale=None, image_format='png', comments=None):
"""Append custom user-defined figures.
Parameters
----------
figs : list of figures.
Each figure in the list can be an instance of
matplotlib.pyplot.Figure, mayavi.core.scene.Scene,
or np.ndarray (images read in using scipy.imread).
captions : list of str
A list of captions to the figures.
section : str
Name of the section. If section already exists, the figures
will be appended to the end of the section
scale : float | None | callable
Scale the images maintaining the aspect ratio.
If None, no scaling is applied. If float, scale will determine
the relative scaling (might not work for scale <= 1 depending on
font sizes). If function, should take a figure object as input
parameter. Defaults to None.
image_format : {'png', 'svg'}
The image format to be used for the report. Defaults to 'png'.
comments : None | str | list of str
A string of text or a list of strings of text to be appended after
the figure.
"""
return self._add_figs_to_section(figs=figs, captions=captions,
section=section, scale=scale,
image_format=image_format,
comments=comments)
def add_images_to_section(self, fnames, captions, scale=None,
section='custom', comments=None):
"""Append custom user-defined images.
Parameters
----------
fnames : str | list of str
A filename or a list of filenames from which images are read.
captions : str | list of str
A caption or a list of captions to the images.
scale : float | None
Scale the images maintaining the aspect ratio.
Defaults to None. If None, no scaling will be applied.
section : str
Name of the section. If section already exists, the images
will be appended to the end of the section.
comments : None | str | list of str
A string of text or a list of strings of text to be appended after
the image.
"""
# Note: using scipy.misc is equivalent because scipy internally
# imports PIL anyway. It's not possible to redirect image output
# to binary string using scipy.misc.
from PIL import Image
fnames, captions, comments = self._validate_input(fnames, captions,
section, comments)
_check_scale(scale)
for fname, caption, comment in zip(fnames, captions, comments):
caption = 'custom plot' if caption == '' else caption
sectionvar = self._sectionvars[section]
global_id = self._get_id()
div_klass = self._sectionvars[section]
img_klass = self._sectionvars[section]
# Convert image to binary string.
im = Image.open(fname)
output = BytesIO()
im.save(output, format='png')
img = base64.b64encode(output.getvalue()).decode('ascii')
html = image_template.substitute(img=img, id=global_id,
div_klass=div_klass,
img_klass=img_klass,
caption=caption,
width=scale,
comment=comment,
show=True)
self.fnames.append('%s-#-%s-#-custom' % (caption, sectionvar))
self._sectionlabels.append(sectionvar)
self.html.append(html)
def add_htmls_to_section(self, htmls, captions, section='custom'):
"""Append htmls to the report.
Parameters
----------
htmls : str | list of str
An html str or a list of html str.
captions : str | list of str
A caption or a list of captions to the htmls.
section : str
Name of the section. If section already exists, the images
will be appended to the end of the section.
Notes
-----
.. versionadded:: 0.9.0
"""
htmls, captions, _ = self._validate_input(htmls, captions, section)
for html, caption in zip(htmls, captions):
caption = 'custom plot' if caption == '' else caption
sectionvar = self._sectionvars[section]
global_id = self._get_id()
div_klass = self._sectionvars[section]
self.fnames.append('%s-#-%s-#-custom' % (caption, sectionvar))
self._sectionlabels.append(sectionvar)
self.html.append(
html_template.substitute(div_klass=div_klass, id=global_id,
caption=caption, html=html))
def add_bem_to_section(self, subject, caption='BEM', section='bem',
decim=2, n_jobs=1, subjects_dir=None):
"""Renders a bem slider html str.
Parameters
----------
subject : str
Subject name.
caption : str
A caption for the bem.
section : str
Name of the section. If section already exists, the bem
will be appended to the end of the section.
decim : int
Use this decimation factor for generating MRI/BEM images
(since it can be time consuming).
n_jobs : int
Number of jobs to run in parallel.
subjects_dir : str | None
Path to the SUBJECTS_DIR. If None, the path is obtained by using
the environment variable SUBJECTS_DIR.
Notes
-----
.. versionadded:: 0.9.0
"""
caption = 'custom plot' if caption == '' else caption
html = self._render_bem(subject=subject, subjects_dir=subjects_dir,
decim=decim, n_jobs=n_jobs, section=section,
caption=caption)
html, caption, _ = self._validate_input(html, caption, section)
sectionvar = self._sectionvars[section]
self.fnames.append('%s-#-%s-#-custom' % (caption[0], sectionvar))
self._sectionlabels.append(sectionvar)
self.html.extend(html)
###########################################################################
# HTML rendering
def _render_one_axis(self, slices_iter, name, global_id, cmap,
n_elements, n_jobs):
"""Render one axis of the array.
"""
global_id = global_id or name
html = []
slices, slices_range = [], []
html.append(u'<div class="col-xs-6 col-md-4">')
slides_klass = '%s-%s' % (name, global_id)
use_jobs = min(n_jobs, max(1, n_elements))
parallel, p_fun, _ = parallel_func(_iterate_mri_slices, use_jobs)
r = parallel(p_fun(name, ind, global_id, slides_klass, data, cmap)
for ind, data in slices_iter)
slices_range, slices = zip(*r)
# Render the slider
slider_id = 'select-%s-%s' % (name, global_id)
html.append(u'<div id="%s"></div>' % slider_id)
html.append(u'<ul class="thumbnails">')
# Render the slices
html.append(u'\n'.join(slices))
html.append(u'</ul>')
html.append(_build_html_slider(slices_range, slides_klass, slider_id))
html.append(u'</div>')
return '\n'.join(html)
###########################################################################
# global rendering functions
@verbose
def _init_render(self, verbose=None):
"""Initialize the renderer.
"""
inc_fnames = ['jquery-1.10.2.min.js', 'jquery-ui.min.js',
'bootstrap.min.js', 'jquery-ui.min.css',
'bootstrap.min.css']
include = list()
for inc_fname in inc_fnames:
logger.info('Embedding : %s' % inc_fname)
f = open(op.join(op.dirname(__file__), 'html', inc_fname),
'r')
if inc_fname.endswith('.js'):
include.append(u'<script type="text/javascript">' +
f.read() + u'</script>')
elif inc_fname.endswith('.css'):
include.append(u'<style type="text/css">' +
f.read() + u'</style>')
f.close()
self.include = ''.join(include)
@verbose
def parse_folder(self, data_path, pattern='*.fif', n_jobs=1, mri_decim=2,
sort_sections=True, on_error='warn', verbose=None):
"""Renders all the files in the folder.
Parameters
----------
data_path : str
Path to the folder containing data whose HTML report will be
created.
pattern : str | list of str
Filename pattern(s) to include in the report.
Example: [\*raw.fif, \*ave.fif] will include Raw as well as Evoked
files.
n_jobs : int
Number of jobs to run in parallel.
mri_decim : int
Use this decimation factor for generating MRI/BEM images
(since it can be time consuming).
sort_sections : bool
If True, sort sections in the order: raw -> events -> epochs
-> evoked -> covariance -> trans -> mri -> forward -> inverse.
on_error : str
What to do if a file cannot be rendered. Can be 'ignore',
'warn' (default), or 'raise'.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
"""
valid_errors = ['ignore', 'warn', 'raise']
if on_error not in valid_errors:
raise ValueError('on_error must be one of %s, not %s'
% (valid_errors, on_error))
self._sort = sort_sections
n_jobs = check_n_jobs(n_jobs)
self.data_path = data_path
if self.title is None:
self.title = 'MNE Report for ...%s' % self.data_path[-20:]
if not isinstance(pattern, (list, tuple)):
pattern = [pattern]
# iterate through the possible patterns
fnames = list()
for p in pattern:
fnames.extend(_recursive_search(self.data_path, p))
if self.info_fname is not None:
info = read_info(self.info_fname)
sfreq = info['sfreq']
else:
warnings.warn('`info_fname` not provided. Cannot render'
'-cov.fif(.gz) and -trans.fif(.gz) files.')
info, sfreq = None, None
cov = None
if self.cov_fname is not None:
cov = read_cov(self.cov_fname)
baseline = self.baseline
# render plots in parallel; check that n_jobs <= # of files
logger.info('Iterating over %s potential files (this may take some '
'time)' % len(fnames))
use_jobs = min(n_jobs, max(1, len(fnames)))
parallel, p_fun, _ = parallel_func(_iterate_files, use_jobs)
r = parallel(p_fun(self, fname, info, cov, baseline, sfreq, on_error)
for fname in np.array_split(fnames, use_jobs))
htmls, report_fnames, report_sectionlabels = zip(*r)
# combine results from n_jobs discarding plots not rendered
self.html = [html for html in sum(htmls, []) if html is not None]
self.fnames = [fname for fname in sum(report_fnames, []) if
fname is not None]
self._sectionlabels = [slabel for slabel in
sum(report_sectionlabels, [])
if slabel is not None]
# find unique section labels
self.sections = sorted(set(self._sectionlabels))
self._sectionvars = dict(zip(self.sections, self.sections))
# render mri
if self.subjects_dir is not None and self.subject is not None:
logger.info('Rendering BEM')
self.html.append(self._render_bem(self.subject, self.subjects_dir,
mri_decim, n_jobs))
self.fnames.append('bem')
self._sectionlabels.append('mri')
else:
warnings.warn('`subjects_dir` and `subject` not provided.'
' Cannot render MRI and -trans.fif(.gz) files.')
def save(self, fname=None, open_browser=True, overwrite=False):
"""Save html report and open it in browser.
Parameters
----------
fname : str
File name of the report.
open_browser : bool
Open html browser after saving if True.
overwrite : bool
If True, overwrite report if it already exists.
"""
if fname is None:
if not hasattr(self, 'data_path'):
self.data_path = op.dirname(__file__)
warnings.warn('`data_path` not provided. Using %s instead'
% self.data_path)
fname = op.realpath(op.join(self.data_path, 'report.html'))
else:
fname = op.realpath(fname)
self._render_toc()
html = footer_template.substitute(date=time.strftime("%B %d, %Y"))
self.html.append(html)
if not overwrite and op.isfile(fname):
msg = ('Report already exists at location %s. '
'Overwrite it (y/[n])? '
% fname)
answer = moves.input(msg)
if answer.lower() == 'y':
overwrite = True
if overwrite or not op.isfile(fname):
logger.info('Saving report to location %s' % fname)
fobj = codecs.open(fname, 'w', 'utf-8')
fobj.write(_fix_global_ids(u''.join(self.html)))
fobj.close()
# remove header, TOC and footer to allow more saves
self.html.pop(0)
self.html.pop(0)
self.html.pop()
if open_browser:
import webbrowser
webbrowser.open_new_tab('file://' + fname)
return fname
@verbose
def _render_toc(self, verbose=None):
"""Render the Table of Contents.
"""
logger.info('Rendering : Table of Contents')
html_toc = u'<div id="container">'
html_toc += u'<div id="toc"><center><h4>CONTENTS</h4></center>'
global_id = 1
# Reorder self.sections to reflect natural ordering
if self._sort_sections:
sections = list(set(self.sections) & set(SECTION_ORDER))
custom = [section for section in self.sections if section
not in SECTION_ORDER]
order = [sections.index(section) for section in SECTION_ORDER if
section in sections]
self.sections = np.array(sections)[order].tolist() + custom
# Sort by section
html, fnames, sectionlabels = [], [], []
for section in self.sections:
logger.info('%s' % section)
for sectionlabel, this_html, fname in (zip(self._sectionlabels,
self.html, self.fnames)):
if self._sectionvars[section] == sectionlabel:
html.append(this_html)
fnames.append(fname)
sectionlabels.append(sectionlabel)
logger.info('\t... %s' % fname[-20:])
color = _is_bad_fname(fname)
div_klass, tooltip, text = _get_toc_property(fname)
# loop through conditions for evoked
if fname.endswith(('-ave.fif', '-ave.fif.gz',
'(whitened)')):
text = os.path.basename(fname)
if fname.endswith('(whitened)'):
fname = fname[:-11]
# XXX: remove redundant read_evokeds
evokeds = read_evokeds(fname, verbose=False)
html_toc += toc_list.substitute(
div_klass=div_klass, id=None, tooltip=fname,
color='#428bca', text=text)
html_toc += u'<li class="evoked"><ul>'
for ev in evokeds:
html_toc += toc_list.substitute(
div_klass=div_klass, id=global_id,
tooltip=fname, color=color, text=ev.comment)
global_id += 1
html_toc += u'</ul></li>'
elif fname.endswith(tuple(VALID_EXTENSIONS +
['bem', 'custom'])):
html_toc += toc_list.substitute(div_klass=div_klass,
id=global_id,
tooltip=tooltip,
color=color,
text=text)
global_id += 1
html_toc += u'\n</ul></div>'
html_toc += u'<div id="content">'
# The sorted html (according to section)
self.html = html
self.fnames = fnames
self._sectionlabels = sectionlabels
html_header = header_template.substitute(title=self.title,
include=self.include,
sections=self.sections,
sectionvars=self._sectionvars)
self.html.insert(0, html_header) # Insert header at position 0
self.html.insert(1, html_toc) # insert TOC
def _render_array(self, array, global_id=None, cmap='gray',
limits=None, n_jobs=1):
"""Render mri without bem contours.
"""
html = []
html.append(u'<div class="row">')
# Axial
limits = limits or {}
axial_limit = limits.get('axial')
axial_slices_gen = _iterate_axial_slices(array, axial_limit)
html.append(
self._render_one_axis(axial_slices_gen, 'axial',
global_id, cmap, array.shape[1], n_jobs))
# Sagittal
sagittal_limit = limits.get('sagittal')
sagittal_slices_gen = _iterate_sagittal_slices(array, sagittal_limit)
html.append(
self._render_one_axis(sagittal_slices_gen, 'sagittal',
global_id, cmap, array.shape[1], n_jobs))
html.append(u'</div>')
html.append(u'<div class="row">')
# Coronal
coronal_limit = limits.get('coronal')
coronal_slices_gen = _iterate_coronal_slices(array, coronal_limit)
html.append(
self._render_one_axis(coronal_slices_gen, 'coronal',
global_id, cmap, array.shape[1], n_jobs))
# Close section
html.append(u'</div>')
return '\n'.join(html)
def _render_one_bem_axis(self, mri_fname, surf_fnames, global_id,
shape, orientation='coronal', decim=2, n_jobs=1):
"""Render one axis of bem contours.
"""
orientation_name2axis = dict(sagittal=0, axial=1, coronal=2)
orientation_axis = orientation_name2axis[orientation]
n_slices = shape[orientation_axis]
orig_size = np.roll(shape, orientation_axis)[[1, 2]]
name = orientation
html = []
html.append(u'<div class="col-xs-6 col-md-4">')
slides_klass = '%s-%s' % (name, global_id)
sl = np.arange(0, n_slices, decim)
kwargs = dict(mri_fname=mri_fname, surf_fnames=surf_fnames, show=False,
orientation=orientation, img_output=orig_size)
imgs = _figs_to_mrislices(sl, n_jobs, **kwargs)
slices = []
img_klass = 'slideimg-%s' % name
div_klass = 'span12 %s' % slides_klass
for ii, img in enumerate(imgs):
slice_id = '%s-%s-%s' % (name, global_id, sl[ii])
caption = u'Slice %s %s' % (name, sl[ii])
first = True if ii == 0 else False
slices.append(_build_html_image(img, slice_id, div_klass,
img_klass, caption, first))
# Render the slider
slider_id = 'select-%s-%s' % (name, global_id)
html.append(u'<div id="%s"></div>' % slider_id)
html.append(u'<ul class="thumbnails">')
# Render the slices
html.append(u'\n'.join(slices))
html.append(u'</ul>')
html.append(_build_html_slider(sl, slides_klass, slider_id))
html.append(u'</div>')
return '\n'.join(html)
def _render_image(self, image, cmap='gray', n_jobs=1):
"""Render one slice of mri without bem.
"""
import nibabel as nib
global_id = self._get_id()
if 'mri' not in self.sections:
self.sections.append('mri')
self._sectionvars['mri'] = 'mri'
nim = nib.load(image)
data = nim.get_data()
shape = data.shape
limits = {'sagittal': range(0, shape[0], 2),
'axial': range(0, shape[1], 2),
'coronal': range(0, shape[2], 2)}
name = op.basename(image)
html = u'<li class="mri" id="%d">\n' % global_id
html += u'<h2>%s</h2>\n' % name
html += self._render_array(data, global_id=global_id,
cmap=cmap, limits=limits,
n_jobs=n_jobs)
html += u'</li>\n'
return html
def _render_raw(self, raw_fname):
"""Render raw.
"""
global_id = self._get_id()
div_klass = 'raw'
caption = u'Raw : %s' % raw_fname
raw = Raw(raw_fname)
n_eeg = len(pick_types(raw.info, meg=False, eeg=True))
n_grad = len(pick_types(raw.info, meg='grad'))
n_mag = len(pick_types(raw.info, meg='mag'))
pick_eog = pick_types(raw.info, meg=False, eog=True)
if len(pick_eog) > 0:
eog = ', '.join(np.array(raw.info['ch_names'])[pick_eog])
else:
eog = 'Not available'
pick_ecg = pick_types(raw.info, meg=False, ecg=True)
if len(pick_ecg) > 0:
ecg = ', '.join(np.array(raw.info['ch_names'])[pick_ecg])
else:
ecg = 'Not available'
meas_date = raw.info['meas_date']
if meas_date is not None:
meas_date = dt.fromtimestamp(meas_date[0]).strftime("%B %d, %Y")
tmin = raw.first_samp / raw.info['sfreq']
tmax = raw.last_samp / raw.info['sfreq']
html = raw_template.substitute(div_klass=div_klass,
id=global_id,
caption=caption,
info=raw.info,
meas_date=meas_date,
n_eeg=n_eeg, n_grad=n_grad,
n_mag=n_mag, eog=eog,
ecg=ecg, tmin=tmin, tmax=tmax)
return html
def _render_forward(self, fwd_fname):
"""Render forward.
"""
div_klass = 'forward'
caption = u'Forward: %s' % fwd_fname
fwd = read_forward_solution(fwd_fname)
repr_fwd = re.sub('>', '', re.sub('<', '', repr(fwd)))
global_id = self._get_id()
html = repr_template.substitute(div_klass=div_klass,
id=global_id,
caption=caption,
repr=repr_fwd)
return html
def _render_inverse(self, inv_fname):
"""Render inverse.
"""
div_klass = 'inverse'
caption = u'Inverse: %s' % inv_fname
inv = read_inverse_operator(inv_fname)
repr_inv = re.sub('>', '', re.sub('<', '', repr(inv)))
global_id = self._get_id()
html = repr_template.substitute(div_klass=div_klass,
id=global_id,
caption=caption,
repr=repr_inv)
return html
def _render_evoked(self, evoked_fname, baseline=None, figsize=None):
"""Render evoked.
"""
evokeds = read_evokeds(evoked_fname, baseline=baseline, verbose=False)
html = []
for ev in evokeds:
global_id = self._get_id()
kwargs = dict(show=False)
img = _fig_to_img(ev.plot, **kwargs)
caption = u'Evoked : %s (%s)' % (evoked_fname, ev.comment)
div_klass = 'evoked'
img_klass = 'evoked'
show = True
html.append(image_template.substitute(img=img, id=global_id,
div_klass=div_klass,
img_klass=img_klass,
caption=caption,
show=show))
has_types = []
if len(pick_types(ev.info, meg=False, eeg=True)) > 0:
has_types.append('eeg')
if len(pick_types(ev.info, meg='grad', eeg=False)) > 0:
has_types.append('grad')
if len(pick_types(ev.info, meg='mag', eeg=False)) > 0:
has_types.append('mag')
for ch_type in has_types:
kwargs.update(ch_type=ch_type)
img = _fig_to_img(ev.plot_topomap, **kwargs)
caption = u'Topomap (ch_type = %s)' % ch_type
html.append(image_template.substitute(img=img,
div_klass=div_klass,
img_klass=img_klass,
caption=caption,
show=show))
return '\n'.join(html)
def _render_eve(self, eve_fname, sfreq=None):
"""Render events.
"""
global_id = self._get_id()
events = read_events(eve_fname)
kwargs = dict(events=events, sfreq=sfreq, show=False)
img = _fig_to_img(plot_events, **kwargs)
caption = 'Events : ' + eve_fname
div_klass = 'events'
img_klass = 'events'
show = True
html = image_template.substitute(img=img, id=global_id,
div_klass=div_klass,
img_klass=img_klass,
caption=caption,
show=show)
return html
def _render_epochs(self, epo_fname):
"""Render epochs.
"""
global_id = self._get_id()
epochs = read_epochs(epo_fname)
kwargs = dict(subject=self.subject, show=False)
img = _fig_to_img(epochs.plot_drop_log, **kwargs)
caption = 'Epochs : ' + epo_fname
div_klass = 'epochs'
img_klass = 'epochs'
show = True
html = image_template.substitute(img=img, id=global_id,
div_klass=div_klass,
img_klass=img_klass,
caption=caption,
show=show)
return html
def _render_cov(self, cov_fname, info_fname):
"""Render cov.
"""
global_id = self._get_id()
cov = read_cov(cov_fname)
fig, _ = plot_cov(cov, info_fname, show=False)
img = _fig_to_img(fig=fig)
caption = 'Covariance : %s (n_samples: %s)' % (cov_fname, cov.nfree)
div_klass = 'covariance'
img_klass = 'covariance'
show = True
html = image_template.substitute(img=img, id=global_id,
div_klass=div_klass,
img_klass=img_klass,
caption=caption,
show=show)
return html
def _render_whitened_evoked(self, evoked_fname, noise_cov, baseline):
"""Show whitened evoked.
"""
global_id = self._get_id()
evokeds = read_evokeds(evoked_fname, verbose=False)
html = []
for ev in evokeds:
ev = read_evokeds(evoked_fname, ev.comment, baseline=baseline,
verbose=False)
global_id = self._get_id()
kwargs = dict(noise_cov=noise_cov, show=False)
img = _fig_to_img(ev.plot_white, **kwargs)
caption = u'Whitened evoked : %s (%s)' % (evoked_fname, ev.comment)
div_klass = 'evoked'
img_klass = 'evoked'
show = True
html.append(image_template.substitute(img=img, id=global_id,
div_klass=div_klass,
img_klass=img_klass,
caption=caption,
show=show))
return '\n'.join(html)
def _render_trans(self, trans, path, info, subject,
subjects_dir, image_format='png'):
"""Render trans.
"""
kwargs = dict(info=info, trans=trans, subject=subject,
subjects_dir=subjects_dir)
try:
img = _iterate_trans_views(function=plot_trans, **kwargs)
except IOError:
img = _iterate_trans_views(function=plot_trans, source='head',
**kwargs)
if img is not None:
global_id = self._get_id()
caption = 'Trans : ' + trans
div_klass = 'trans'
img_klass = 'trans'
show = True
html = image_template.substitute(img=img, id=global_id,
div_klass=div_klass,
img_klass=img_klass,
caption=caption,
width=75,
show=show)
return html
def _render_bem(self, subject, subjects_dir, decim, n_jobs,
section='mri', caption='BEM'):
"""Render mri+bem.
"""
import nibabel as nib
subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
# Get the MRI filename
mri_fname = op.join(subjects_dir, subject, 'mri', 'T1.mgz')
if not op.isfile(mri_fname):
warnings.warn('MRI file "%s" does not exist' % mri_fname)
# Get the BEM surface filenames
bem_path = op.join(subjects_dir, subject, 'bem')
if not op.isdir(bem_path):
warnings.warn('Subject bem directory "%s" does not exist' %
bem_path)
return self._render_image(mri_fname, cmap='gray', n_jobs=n_jobs)
surf_fnames = []
for surf_name in ['*inner_skull', '*outer_skull', '*outer_skin']:
surf_fname = glob(op.join(bem_path, surf_name + '.surf'))
if len(surf_fname) > 0:
surf_fname = surf_fname[0]
else:
warnings.warn('No surface found for %s.' % surf_name)
return self._render_image(mri_fname, cmap='gray')
surf_fnames.append(surf_fname)
# XXX : find a better way to get max range of slices
nim = nib.load(mri_fname)
data = nim.get_data()
shape = data.shape
del data # free up memory
html = []
global_id = self._get_id()
if section == 'mri' and 'mri' not in self.sections:
self.sections.append('mri')
self._sectionvars['mri'] = 'mri'
name = caption
html += u'<li class="mri" id="%d">\n' % global_id
html += u'<h2>%s</h2>\n' % name
html += u'<div class="row">'
html += self._render_one_bem_axis(mri_fname, surf_fnames, global_id,
shape, 'axial', decim, n_jobs)
html += self._render_one_bem_axis(mri_fname, surf_fnames, global_id,
shape, 'sagittal', decim, n_jobs)
html += u'</div><div class="row">'
html += self._render_one_bem_axis(mri_fname, surf_fnames, global_id,
shape, 'coronal', decim, n_jobs)
html += u'</div>'
html += u'</li>\n'
return ''.join(html)
def _clean_varnames(s):
# Remove invalid characters
s = re.sub('[^0-9a-zA-Z_]', '', s)
# add report_ at the beginning so that the javascript class names
# are valid ones
return 'report_' + s
def _recursive_search(path, pattern):
"""Auxiliary function for recursive_search of the directory.
"""
filtered_files = list()
for dirpath, dirnames, files in os.walk(path):
for f in fnmatch.filter(files, pattern):
# only the following file types are supported
# this ensures equitable distribution of jobs
if f.endswith(tuple(VALID_EXTENSIONS)):
filtered_files.append(op.realpath(op.join(dirpath, f)))
return filtered_files
def _fix_global_ids(html):
"""Auxiliary function for fixing the global_ids after reordering in
_render_toc().
"""
html = re.sub('id="\d+"', 'id="###"', html)
global_id = 1
while len(re.findall('id="###"', html)) > 0:
html = re.sub('id="###"', 'id="%s"' % global_id, html, count=1)
global_id += 1
return html
|
aestrivex/mne-python
|
mne/report.py
|
Python
|
bsd-3-clause
| 60,627
|
[
"Mayavi"
] |
09959a20e9c63feb5c8e53088f74b161b29e3278ca2edddf8edc214956ba68b3
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-argument
"""A parser for Relay's text format."""
from __future__ import absolute_import
import sys
from ast import literal_eval
from typing import Any, Deque, Dict, List, Optional, TypeVar, Tuple, Union
from collections import deque
import tvm
from . import module
from .base import Span, SourceName
from . import adt
from . import expr
from . import ty
from . import op
PYTHON_VERSION = sys.version_info.major
try:
from .grammar.py3.RelayVisitor import RelayVisitor
from .grammar.py3.RelayParser import RelayParser
from .grammar.py3.RelayLexer import RelayLexer
except ImportError:
raise Exception("Couldn't find ANTLR parser. Try building with USE_ANTLR=ON.")
try:
from antlr4 import InputStream, CommonTokenStream
from antlr4.error.ErrorListener import ErrorListener
except ImportError:
raise Exception("Couldn't find ANTLR runtime." +
"Try running `pip{version} install antlr4-python{version}-runtime`."
.format(version=PYTHON_VERSION))
sys.setrecursionlimit(10000)
class ParseError(Exception):
"""Exception type for parse errors."""
def __init__(self, message: str) -> None:
super(ParseError, self).__init__()
self.message = message
def __repr__(self):
return "ParseError({})".format(self.message)
def __str__(self):
return repr(self)
class OpWrapper:
"""Overload the __call__ for op."""
pass
class ExprOp(OpWrapper):
"""Call an expr. The default, but does not handle attrs well."""
def __init__(self, operator):
self.operator = operator
def __call__(self, args, attrs, type_args):
try:
return expr.Call(self.operator, args, attrs, type_args)
except Exception:
raise Exception("Operator {} is not registered. It's attributes are {}"
.format(self.operator, attrs))
class FuncOp(OpWrapper):
"""Convert the attrs, call the python function with the attrs passed in as keyword arguments.
Tvm should provide this in the future, as this is pretty similar to what op.get is providing.
"""
def __init__(self, operator):
self.operator = operator
def convert(self, v):
if isinstance(v, tuple):
return tuple([self.convert(x) for x in v])
if isinstance(v, expr.Constant):
return v.data.asnumpy().item()
if isinstance(v, str):
return v
raise Exception(v)
def __call__(self, args, attrs, type_args):
if attrs is None:
attrs = {}
x = self.operator(*args, **{k: self.convert(v) for k, v in attrs.items()})
if isinstance(x, expr.TupleWrapper):
x = x.astuple()
return x
BINARY_OPS = {
RelayParser.MUL: op.multiply,
RelayParser.DIV: op.divide,
RelayParser.ADD: op.add,
RelayParser.SUB: op.subtract,
RelayParser.LT: op.less,
RelayParser.GT: op.greater,
RelayParser.LE: op.less_equal,
RelayParser.GE: op.greater_equal,
RelayParser.EQ: op.equal,
RelayParser.NE: op.not_equal,
}
FUNC_OPS = {
"nn.conv2d": op.nn.conv2d,
"nn.batch_norm": op.nn.batch_norm,
"nn.dense": op.nn.dense,
"nn.bias_add": op.nn.bias_add,
"nn.max_pool2d": op.nn.max_pool2d,
"nn.global_max_pool2d": op.nn.global_max_pool2d,
"nn.avg_pool2d": op.nn.avg_pool2d,
"nn.global_avg_pool2d": op.nn.global_avg_pool2d,
"nn.softmax": op.nn.softmax,
"reshape": op.reshape,
"nn.conv2d_transpose": op.nn.conv2d_transpose,
"concatenate": op.concatenate,
"nn.dropout": op.nn.dropout_raw,
"zeros": op.zeros,
"split": op.split,
"cast": op.cast
}
TYPE_PREFIXES = [
"int",
"uint",
"float",
"bool",
]
T = TypeVar("T")
Scope = Deque[Tuple[str, T]]
Scopes = Deque[Scope[T]]
def lookup(scopes: Scopes[T], name: str) -> Optional[T]:
"""Look up `name` in `scopes`."""
for scope in scopes:
for key, val in scope:
if key == name:
return val
return None
def spanify(f):
"""A decorator which attaches span information
to the value returned by calling `f`.
Intended for use with the below AST visiting
methods. The idea is that after we do the work
of constructing the AST we attach Span information.
"""
def _wrapper(*args, **kwargs):
# Assumes 0th arg is self and gets source_name from object.
sn = args[0].source_name
# Assumes 1st arg is an ANTLR parser context.
ctx = args[1]
ast = f(*args, **kwargs)
line, col = ctx.getSourceInterval()
sp = Span(sn, line, col)
if isinstance(ast, tvm.relay.expr.TupleWrapper):
ast = ast.astuple()
ast.set_span(sp)
return ast
return _wrapper
# TODO(@jmp): Use https://stackoverflow.com/q/13889941
# to figure out how to get ANTLR4 to be more unhappy about syntax errors
class ParseTreeToRelayIR(RelayVisitor):
"""Parse Relay text format into Relay IR."""
def __init__(self, source_name: str) -> None:
self.source_name = source_name
self.module = module.Module({}) # type: module.Module
# Adding an empty scope allows naked lets without pain.
self.var_scopes = deque([deque()]) # type: Scopes[expr.Var]
self.global_vars = {} # type: Scope[expr.GlobalVar]
self.type_var_scopes = deque([deque()]) # type: Scopes[ty.TypeVar]
self.global_type_vars = {} # type: Scope[expr.GlobalVar]
self.graph_expr = [] # type: List[expr.Expr]
super(ParseTreeToRelayIR, self).__init__()
def enter_var_scope(self) -> None:
"""Enter a new Var scope so it can be popped off later."""
self.var_scopes.appendleft(deque())
def exit_var_scope(self) -> Scope[expr.Var]:
"""Pop off the current Var scope and return it."""
return self.var_scopes.popleft()
def mk_var(self, name: str, typ: ty.Type = None):
"""Create a new Var and add it to the Var scope."""
var = expr.Var(name, typ)
self.var_scopes[0].appendleft((name, var))
return var
def mk_global_var(self, name: str) -> expr.GlobalVar:
"""Create a new GlobalVar and add it to the GlobalVar scope."""
if name in self.global_vars:
raise ParseError(f"duplicate global var \"{name}\"")
var = expr.GlobalVar(name)
self.global_vars[name] = var
return var
def enter_type_param_scope(self) -> None:
"""Enter a new TypeVar scope so it can be popped off later."""
self.type_var_scopes.appendleft(deque())
def exit_type_param_scope(self) -> Scope[ty.TypeVar]:
"""Pop off the current TypeVar scope and return it."""
return self.type_var_scopes.popleft()
def mk_typ(self, name: str, kind: ty.Kind) -> ty.TypeVar:
"""Create a new TypeVar and add it to the TypeVar scope."""
typ = ty.TypeVar(name, kind)
self.type_var_scopes[0].appendleft((name, typ))
return typ
def mk_global_typ_var(self, name, kind):
# (str, ty.Kind) -> ty.GlobalTypeVar
"""Create a new TypeVar and add it to the TypeVar scope."""
typ = ty.GlobalTypeVar(name, kind)
self._check_existing_typ_expr(name, typ)
self.global_type_vars[name] = typ
return typ
# TODO: rethink whether we should have type constructors mixed with type vars.
def mk_global_typ_cons(self, name, cons):
self._check_existing_typ_expr(name, cons)
self.global_type_vars[name] = cons
def _check_existing_typ_expr(self, name, new_expr):
if name in self.global_type_vars:
new_typ_name = self._type_expr_name(new_expr)
existing_typ_name = self._type_expr_name(self.global_type_vars[name])
raise ParseError(
f"{new_typ_name} `{name}` conflicts with existing {existing_typ_name}")
def _type_expr_name(self, e):
if isinstance(e, adt.Constructor):
return f"`{e.belong_to.var.name}` ADT constructor"
elif isinstance(e, ty.GlobalTypeVar):
if e.kind == ty.Kind.AdtHandle:
return f"ADT definition"
return "function definition"
def visitProjection(self, ctx):
return expr.TupleGetItem(self.visit(ctx.expr()), self.visit(ctx.NAT()))
def visitTerminal(self, node) -> Union[expr.Expr, int, float]:
"""Visit lexer tokens that aren't ignored or visited by other functions."""
node_type = node.getSymbol().type
node_text = node.getText()
if node_type == RelayLexer.NAT:
return int(node_text)
if node_type == RelayLexer.FLOAT:
return float(node_text[:-1])
if node_type == RelayLexer.BOOL_LIT:
if node_text == "True":
return True
if node_text == "False":
return False
raise ParseError("unrecognized BOOL_LIT: `{}`".format(node_text))
if node_type == RelayLexer.QUOTED_STRING:
return literal_eval(node_text)
raise ParseError(f"unhandled terminal \"{node_text}\" of type `{node_type}`")
def visitGeneralIdent(self, ctx):
name = ctx.getText()
# Look through all type prefixes for a match.
for type_prefix in TYPE_PREFIXES:
if name.startswith(type_prefix):
return ty.scalar_type(name)
# Next, look it up in the local then global type params.
type_param = lookup(self.type_var_scopes, name)
if type_param is None:
type_param = self.global_type_vars.get(name, None)
if type_param is not None:
return type_param
# Check if it's an operator.
op_name = ".".join([name.getText() for name in ctx.CNAME()])
if op_name in FUNC_OPS:
return FuncOp(FUNC_OPS[op_name])
return ExprOp(op.get(op_name))
def visitGlobalVar(self, ctx):
var_name = ctx.CNAME().getText()
global_var = self.global_vars.get(var_name, None)
if global_var is None:
raise ParseError(f"unbound global var `{var_name}`")
return global_var
def visitLocalVar(self, ctx):
var_name = ctx.CNAME().getText()
local_var = lookup(self.var_scopes, var_name)
if local_var is None:
raise ParseError(f"unbound local var `{var_name}`")
return local_var
def visitGraphVar(self, ctx):
return self.graph_expr[int(ctx.NAT().getText())]
def visit_list(self, ctx_list) -> List[Any]:
""""Visit a list of contexts."""
# type: RelayParser.ContextParserRuleContext
assert isinstance(ctx_list, list)
return [self.visit(ctx) for ctx in ctx_list]
def getTypeExpr(self, ctx) -> Optional[ty.Type]:
"""Return a (possibly None) Relay type."""
# type: : Optional[RelayParser.Type_Context]
if ctx is None:
return None
return self.visit(ctx)
def visitProg(self, ctx: RelayParser.ProgContext) -> Union[expr.Expr, module.Module]:
self.meta = None
if ctx.METADATA():
header, data = str(ctx.METADATA()).split("\n", 1)
assert header == "METADATA:"
self.meta = tvm.load_json(data)
if ctx.defn():
self.visit_list(ctx.defn())
return self.module
if ctx.expr():
return self.visit(ctx.expr())
return self.module
# Exprs
def visitOpIdent(self, ctx) -> op.Op:
op_name = ".".join([name.getText() for name in ctx.CNAME()])
if op_name in FUNC_OPS:
return FuncOp(FUNC_OPS[op_name])
return ExprOp(op.get(op_name))
# pass through
def visitParen(self, ctx: RelayParser.ParenContext) -> expr.Expr:
return self.visit(ctx.expr())
# pass through
def visitBody(self, ctx: RelayParser.BodyContext) -> expr.Expr:
return self.visit(ctx.expr())
def visitScalarFloat(self, ctx: RelayParser.ScalarFloatContext) -> expr.Constant:
return expr.const(self.visit(ctx.FLOAT()))
def visitScalarInt(self, ctx: RelayParser.ScalarIntContext) -> expr.Constant:
return expr.const(self.visit(ctx.NAT()))
def visitScalarBool(self, ctx: RelayParser.ScalarBoolContext) -> expr.Constant:
return expr.const(self.visit(ctx.BOOL_LIT()))
def visitNeg(self, ctx: RelayParser.NegContext) -> Union[expr.Constant, expr.Call]:
val = self.visit(ctx.expr())
if isinstance(val, expr.Constant) and val.data.asnumpy().ndim == 0:
# fold Neg in for scalars
return expr.const(-val.data.asnumpy().item())
return op.negative(val)
def visitTuple(self, ctx: RelayParser.TupleContext) -> expr.Tuple:
tup = self.visit_list(ctx.expr())
return expr.Tuple(tup)
def visitLet(self, ctx: RelayParser.LetContext) -> expr.Let:
"""Desugar various sequence constructs to Relay Let nodes."""
if ctx.var() is None:
# anonymous identity
ident = "_"
typ = None
var = self.mk_var(ident, typ)
else:
var = self.visitVar(ctx.var())
self.enter_var_scope()
value = self.visit(ctx.expr(0))
self.exit_var_scope()
body = self.visit(ctx.expr(1))
return expr.Let(var, value, body)
def visitBinOp(self, ctx: RelayParser.BinOpContext) -> expr.Call:
"""Desugar binary operators."""
arg0, arg1 = self.visit_list(ctx.expr())
relay_op = BINARY_OPS.get(ctx.op.type)
if relay_op is None:
raise ParseError("unimplemented binary op.")
return relay_op(arg0, arg1)
@spanify
def visitVar(self, ctx: RelayParser.VarContext) -> expr.Var:
"""Visit a single variable."""
ident = ctx.localVar()
if ident is None:
raise ParseError("only local ids may be used in vars.")
typeExpr = self.getTypeExpr(ctx.typeExpr())
return self.mk_var(ident.getText()[1:], typeExpr)
def visitVarList(self, ctx: RelayParser.VarListContext) -> List[expr.Var]:
return self.visit_list(ctx.var())
# TODO: support a larger class of values than just Relay exprs
def visitAttr(self, ctx: RelayParser.AttrContext) -> Tuple[str, expr.Expr]:
return (ctx.CNAME().getText(), self.visit(ctx.expr()))
def visitArgNoAttr(self, ctx: RelayParser.ArgNoAttrContext):
return (self.visit_list(ctx.varList().var()), None)
def visitAttrSeq(self, ctx: RelayParser.AttrSeqContext) -> Dict[str, expr.Expr]:
return dict(self.visit_list(ctx.attr()))
def visitArgWithAttr(self, ctx: RelayParser.AttrSeqContext) \
-> Tuple[List[expr.Var], Dict[str, expr.Expr]]:
return (self.visit_list(ctx.var()), self.visitAttrSeq(ctx.attrSeq()))
def visitArgList(self, ctx: RelayParser.ArgListContext) \
-> Tuple[Optional[List[expr.Var]], Optional[Dict[str, expr.Expr]]]:
var_list = self.visit(ctx.varList()) if ctx.varList() else None
attr_list = self.visit(ctx.attrList()) if ctx.attrList() else None
return (var_list, attr_list)
def visitMeta(self, ctx: RelayParser.MetaContext):
type_key = str(ctx.CNAME())
index = int(self.visit(ctx.NAT()))
return self.meta[type_key][index]
def mk_func(
self,
ctx: Union[RelayParser.FuncContext, RelayParser.DefnContext]) \
-> expr.Function:
"""Construct a function from either a Func or Defn."""
# Enter var scope early to put params in scope.
self.enter_var_scope()
# Capture type params in params.
self.enter_type_param_scope()
type_params = ctx.typeParamList()
if type_params is not None:
type_params = type_params.generalIdent()
assert type_params
for ty_param in type_params:
name = ty_param.getText()
self.mk_typ(name, ty.Kind.Type)
var_list, attr_list = self.visit(ctx.argList())
if var_list is None:
var_list = []
ret_type = self.getTypeExpr(ctx.typeExpr())
body = self.visit(ctx.body())
# NB(@jroesch): you must stay in the type parameter scope until
# after you exit the body, you can reference the type parameters
# of your parent scopes.
type_params = list(self.exit_type_param_scope())
if type_params:
_, type_params = zip(*type_params)
self.exit_var_scope()
attrs = tvm.make.node("DictAttrs", **attr_list) if attr_list is not None else None
return expr.Function(var_list, body, ret_type, type_params, attrs)
@spanify
def visitFunc(self, ctx: RelayParser.FuncContext) -> expr.Function:
return self.mk_func(ctx)
# TODO: how to set spans for definitions?
# @spanify
def visitFuncDefn(self, ctx: RelayParser.DefnContext) -> None:
ident_name = ctx.globalVar().getText()[1:]
ident = self.mk_global_var(ident_name)
self.module[ident] = self.mk_func(ctx)
def handle_adt_header(
self,
ctx: Union[RelayParser.ExternAdtDefnContext, RelayParser.AdtDefnContext]):
"""Handles parsing of the name and type params of an ADT definition."""
adt_name = ctx.generalIdent().getText()
adt_var = self.mk_global_typ_var(adt_name, ty.Kind.AdtHandle)
# parse type params
type_params = ctx.typeParamList()
if type_params is None:
type_params = []
else:
type_params = [self.mk_typ(type_ident.getText(), ty.Kind.Type)
for type_ident in type_params.generalIdent()]
return adt_var, type_params
def visitExternAdtDefn(self, ctx: RelayParser.ExternAdtDefnContext):
# TODO(weberlo): update this handler once extern is implemented
self.enter_type_param_scope()
adt_var, type_params = self.handle_adt_header(ctx)
# update module being built
self.module[adt_var] = adt.TypeData(adt_var, type_params, [])
self.exit_type_param_scope()
def visitAdtDefn(self, ctx: RelayParser.AdtDefnContext):
self.enter_type_param_scope()
adt_var, type_params = self.handle_adt_header(ctx)
# parse constructors
adt_cons_defns = ctx.adtConsDefnList()
if adt_cons_defns is None:
adt_cons_defns = []
else:
adt_cons_defns = adt_cons_defns.adtConsDefn()
parsed_constructors = []
for cons_defn in adt_cons_defns:
inputs = [self.visit(inp) for inp in cons_defn.typeExpr()]
cons_defn_name = cons_defn.constructorName().getText()
cons_defn = adt.Constructor(cons_defn_name, inputs, adt_var)
self.mk_global_typ_cons(cons_defn_name, cons_defn)
parsed_constructors.append(cons_defn)
# update module being built
self.module[adt_var] = adt.TypeData(adt_var, type_params, parsed_constructors)
self.exit_type_param_scope()
def visitMatch(self, ctx: RelayParser.MatchContext):
match_type = ctx.matchType().getText()
if match_type == "match":
complete_match = True
elif match_type == "match?":
complete_match = False
else:
raise RuntimeError(f"unknown match type {match_type}")
# TODO: Will need some kind of type checking to know which ADT is being
# matched on.
match_data = self.visit(ctx.expr())
match_clauses = ctx.matchClauseList()
if match_clauses is None:
match_clauses = []
else:
match_clauses = match_clauses.matchClause()
parsed_clauses = []
for clause in match_clauses:
constructor_name = clause.constructorName().getText()
constructor = self.global_type_vars[constructor_name]
self.enter_var_scope()
patternList = clause.patternList()
if patternList is None:
patterns = []
else:
patterns = [self.visit(pattern) for pattern in patternList.pattern()]
clause_body = self.visit(clause.expr())
self.exit_var_scope()
# TODO: Do we need to pass `None` if it's a 0-arity cons, or is an empty list fine?
parsed_clauses.append(adt.Clause(
adt.PatternConstructor(
constructor,
patterns
),
clause_body
))
return adt.Match(match_data, parsed_clauses, complete=complete_match)
def visitPattern(self, ctx: RelayParser.PatternContext):
text = ctx.getText()
if text == "_":
return adt.PatternWildcard()
elif text.startswith("%"):
text = ctx.localVar().getText()
typ = ctx.typeExpr()
if typ is not None:
typ = self.visit(typ)
var = self.mk_var(text[1:], typ=typ)
return adt.PatternVar(var)
else:
raise ParseError(f"invalid pattern syntax \"{text}\"")
def visitCallNoAttr(self, ctx: RelayParser.CallNoAttrContext):
return (self.visit_list(ctx.exprList().expr()), None)
def visitCallWithAttr(self, ctx: RelayParser.CallWithAttrContext):
return (self.visit_list(ctx.expr()), self.visit(ctx.attrSeq()))
def call(self, func, args, attrs, type_args):
if isinstance(func, OpWrapper):
return func(args, attrs, type_args)
elif isinstance(func, adt.Constructor):
return func(*args)
return expr.Call(func, args, attrs, type_args)
@spanify
def visitCall(self, ctx: RelayParser.CallContext):
# type: (RelayParser.CallContext) -> expr.Call
func = self.visit(ctx.expr())
args, attrs = self.visit(ctx.callList())
res = self.call(func, args, attrs, [])
return res
@spanify
def visitIfElse(self, ctx: RelayParser.IfElseContext):
# type: (RelayParser.IfElseContext) -> expr.If
"""Construct a Relay If node. Creates a new scope for each branch."""
cond = self.visit(ctx.expr())
self.enter_var_scope()
true_branch = self.visit(ctx.body(0))
self.exit_var_scope()
self.enter_var_scope()
false_branch = self.visit(ctx.body(1))
self.exit_var_scope()
return expr.If(cond, true_branch, false_branch)
@spanify
def visitGraph(self, ctx: RelayParser.GraphContext):
# type: (RelayParser.GraphContext) -> expr.Expr
"""Visit a graph variable assignment."""
graph_nid = int(ctx.graphVar().getText()[1:])
self.enter_var_scope()
value = self.visit(ctx.expr(0))
self.exit_var_scope()
if graph_nid != len(self.graph_expr):
raise ParseError(
"expected new graph variable to be `%{}`,".format(len(self.graph_expr)) + \
"but got `%{}`".format(graph_nid))
self.graph_expr.append(value)
kont = self.visit(ctx.expr(1))
return kont
# Types
# pylint: disable=unused-argument
def visitIncompleteType(self, ctx: RelayParser.IncompleteTypeContext):
# type (RelayParser.IncompleteTypeContext) -> None:
return None
def visitTypeCallType(self, ctx: RelayParser.TypeCallTypeContext):
func = self.visit(ctx.generalIdent())
args = [self.visit(arg) for arg in ctx.typeParamList().generalIdent()]
return ty.TypeCall(func, args)
def visitParensShape(self, ctx: RelayParser.ParensShapeContext):
# type: (RelayParser.ParensShapeContext) -> int
return self.visit(ctx.shape())
def visitShapeList(self, ctx: RelayParser.ShapeListContext):
# type: (RelayParser.ShapeListContext) -> List[int]
return self.visit_list(ctx.shape())
def visitTensor(self, ctx: RelayParser.TensorContext):
return tuple(self.visit_list(ctx.expr()))
def visitTensorType(self, ctx: RelayParser.TensorTypeContext):
# type: (RelayParser.TensorTypeContext) -> ty.TensorType
"""Create a simple tensor type. No generics."""
shape = self.visit(ctx.shapeList())
dtype = self.visit(ctx.typeExpr())
if not isinstance(dtype, ty.TensorType):
raise ParseError("expected dtype to be a Relay base type.")
dtype = dtype.dtype
return ty.TensorType(shape, dtype)
def visitTupleType(self, ctx: RelayParser.TupleTypeContext):
# type: (RelayParser.TupleTypeContext) -> ty.TupleType
return ty.TupleType(self.visit_list(ctx.typeExpr()))
def visitFuncType(self, ctx: RelayParser.FuncTypeContext):
# type: (RelayParser.FuncTypeContext) -> ty.FuncType
types = self.visit_list(ctx.typeExpr())
arg_types = types[:-1]
ret_type = types[-1]
return ty.FuncType(arg_types, ret_type, [], None)
def make_parser(data):
# type: (str) -> RelayParser
"""Construct a RelayParser a given data stream."""
input_stream = InputStream(data)
lexer = RelayLexer(input_stream)
lexer.addErrorListener(StrictErrorListener(data))
token_stream = CommonTokenStream(lexer)
p = RelayParser(token_stream)
p.addErrorListener(StrictErrorListener(data))
return p
__source_name_counter__ = 0
class StrictErrorListener(ErrorListener):
"""This ErrorListener fail eagerly on all error, and report the program."""
def __init__(self, text):
self.text = text
def syntaxError(self, recognizer, offendingSymbol, line, column, msg, e):
raise Exception("Syntax Error in:\n" + self.text)
def reportAmbiguity(self, recognizer, dfa, startIndex, stopIndex, exact, ambigAlts, configs):
raise Exception("Ambiguity Error in:\n" + self.text)
def reportAttemptingFullContext(self,
recognizer,
dfa,
startIndex,
stopIndex,
conflictingAlts,
configs):
raise Exception("Attempting Full Context in:\n" + self.text)
def reportContextSensitivity(self, recognizer, dfa, startIndex, stopIndex, prediction, configs):
raise Exception("Context Sensitivity in:\n" + self.text)
def fromtext(data, source_name=None):
# type: (str, str) -> Union[expr.Expr, module.Module]
"""Parse a Relay program."""
if data == "":
raise ParseError("cannot parse the empty string.")
global __source_name_counter__
if source_name is None:
source_name = "source_file{0}".format(__source_name_counter__)
if isinstance(source_name, str):
source_name = SourceName(source_name)
tree = make_parser(data).prog()
return ParseTreeToRelayIR(source_name).visit(tree)
|
Huyuwei/tvm
|
python/tvm/relay/_parser.py
|
Python
|
apache-2.0
| 27,885
|
[
"VisIt"
] |
87791c45cc65d767b7e4a9154ca486f54117b2f8dce6507bde01e4bfc27e9327
|
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
import unittest
import warnings
from shutil import which
import requests
from pymatgen.ext.cod import COD
try:
website_is_up = requests.get("https://www.crystallography.net").status_code == 200
except:
website_is_up = False
@unittest.skipIf(not website_is_up, "www.crystallography.net is down.")
class CODTest(unittest.TestCase):
_multiprocess_shared_ = True
def setUp(self):
warnings.simplefilter("ignore")
def tearDown(self):
warnings.simplefilter("default")
@unittest.skipIf(not which("mysql"), "No mysql.")
def test_get_cod_ids(self):
ids = COD().get_cod_ids("Li2O")
self.assertTrue(len(ids) > 15)
@unittest.skipIf(not which("mysql"), "No mysql.")
def test_get_structure_by_formula(self):
data = COD().get_structure_by_formula("Li2O")
self.assertTrue(len(data) > 15)
self.assertEqual(data[0]["structure"].composition.reduced_formula, "Li2O")
def test_get_structure_by_id(self):
s = COD().get_structure_by_id(2002926)
self.assertEqual(s.formula, "Be8 H64 N16 F32")
if __name__ == "__main__":
unittest.main()
|
materialsproject/pymatgen
|
pymatgen/ext/tests/test_cod.py
|
Python
|
mit
| 1,234
|
[
"pymatgen"
] |
728aa08073ab99265ba592f1d2bb4aac4e89e9d023b5c0f294735403d7277cf4
|
# -*- coding=utf-8 -*-
"""
MDT, a module for protein structure analysis.
Copyright 1989-2021 Andrej Sali.
MDT is free software: you can redistribute it and/or modify
it under the terms of version 2 of the GNU General Public License
as published by the Free Software Foundation.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with MDT. If not, see <http://www.gnu.org/licenses/>.
"""
__docformat__ = "restructuredtext"
__all__ = ['MDTError', 'FileFormatError', 'TableSection', 'Table', 'Library',
'Feature', 'Bin', 'Source', 'BondClasses', 'TupleClasses',
'HydrogenBondClasses', 'Float', 'Double', 'Int32', 'UnsignedInt32',
'Int16', 'UnsignedInt16', 'Int8', 'UnsignedInt8',
'write_2dsplinelib', 'write_anglelib', 'write_bondlib',
'write_improperlib', 'write_splinelib', 'uniform_bins',
'write_statpot']
try:
from modeller.util.modobject import ModObject
except ImportError:
from modeller.util.modobject import modobject as ModObject
from modeller.util import modlist
# Import _modeller after modeller itself, since the latter modifies the search
# path for the former:
import _modeller
import _mdt
del _modeller
#: Generic MDT exception
MDTError = _mdt.MDTError
#: File format error
FileFormatError = _mdt.FileFormatError
# Get version info
def __get_version_info(version):
try:
return tuple([int(x) for x in version.split('.')])
except ValueError:
return version
__version__ = version = _mdt.mdt_version_get()
version_info = __get_version_info(version)
def _prepare_bond_span(bond_span):
"""Helper function for bond_span_range"""
if bond_span is None:
return (-1, -1)
else:
return bond_span
class _BinType(object):
def __init__(self, bin_type):
self._bin_type = bin_type
#: Single-precision floating-point bin storage (approximate range 0 to +/-1e38)
Float = _BinType(_mdt.MOD_MDTB_FLOAT)
#: Double-precision floating-point bin storage (approximate range
#: 0 to +/-1e308)
Double = _BinType(_mdt.MOD_MDTB_DOUBLE)
#: 32-bit signed integer bin storage (range -1e31 to 1e31)
Int32 = _BinType(_mdt.MOD_MDTB_INT32)
#: 32-bit unsigned integer bin storage (range 0 to 1e32)
UnsignedInt32 = _BinType(_mdt.MOD_MDTB_UINT32)
#: 16-bit signed integer bin storage (range -1e15 to 1e15)
Int16 = _BinType(_mdt.MOD_MDTB_INT16)
#: 16-bit unsigned integer bin storage (range 0 to 1e16)
UnsignedInt16 = _BinType(_mdt.MOD_MDTB_UINT16)
#: 8-bit signed integer bin storage (range -127 to 128)
Int8 = _BinType(_mdt.MOD_MDTB_INT8)
#: 8-bit unsigned integer bin storage (range 0 to 255)
UnsignedInt8 = _BinType(_mdt.MOD_MDTB_UINT8)
class Library(ModObject):
"""
Library data used in the construction and use of MDTs.
:Parameters:
- `env`: the Modeller environment to use
- `distance_atoms`: the atom types to use for the
:class:`features.ResidueDistance` feature
- `special_atoms`: whether to treat disulfide and termini atoms
specially for atom class features (see :class:`features.AtomType`)
- `hbond_cutoff`: maximum separation between two H-bonded atoms
(see :class:`features.HydrogenBondDonor`)
"""
_modpt = None
_env = None
def __init__(self, env, distance_atoms=('CA', 'CA'), special_atoms=False,
hbond_cutoff=3.5):
self._env = env.copy()
self._modpt = _mdt.mdt_library_new(self._env.libs.modpt, self)
_mdt.mdt_library_hbond_cutoff_set(self._modpt, hbond_cutoff)
_mdt.mdt_library_special_atoms_set(self._modpt, special_atoms)
_mdt.mdt_library_distance_atoms_set(self._modpt, *distance_atoms)
def __del__(self):
_mdt.mdt_library_free(self._modpt)
def __get_atom_classes(self):
return BondClasses(self, 1)
def __get_bond_classes(self):
return BondClasses(self, 2)
def __get_angle_classes(self):
return BondClasses(self, 3)
def __get_dihedral_classes(self):
return BondClasses(self, 4)
def __get_tuple_classes(self):
return TupleClasses(self)
def __get_hbond_classes(self):
return HydrogenBondClasses(self)
atom_classes = property(__get_atom_classes,
doc="Atom classes; see :class:`BondClasses`")
bond_classes = property(__get_bond_classes,
doc="Bond classes; see :class:`BondClasses`")
angle_classes = property(__get_angle_classes,
doc="Angle classes; see :class:`BondClasses`")
dihedral_classes = property(
__get_dihedral_classes,
doc="Dihedral classes; see :class:`BondClasses`")
tuple_classes = property(
__get_tuple_classes,
doc="Atom tuple classes; see :class:`TupleClasses`"
" and :ref:`tuple_features`")
hbond_classes = property(__get_hbond_classes,
doc="Hydrogen bond atom classes; "
"see :class:`HydrogenBondClasses`")
class BondClasses(object):
"""Classifications of atoms/bonds/angles/dihedrals into classes.
These classes are used by
:ref:`atom <atom_features>` and
:ref:`chemical bond <chemical_bond_features>` features.
Usually accessed as :attr:`Library.atom_classes`,
:attr:`Library.bond_classes`, :attr:`Library.angle_classes`, or
:attr:`Library.dihedral_classes`. (There is no
need to create your own BondClasses objects.)"""
def __init__(self, mlib, n_atom):
self._mlib = mlib
self.__n_atom = n_atom
def read(self, filename):
"""Read class information from `filename`.
This is a text file with a simple format. Each line either
denotes the start of a new named class, or names a member of the
last-named class, as a residue name followed by one or more atom
names. For example, an atom class file might start with::
ATMGRP 'AC'
ATOM 'ALA' 'CA'
ATOM 'ALA' 'C'
ATOM '*' 'CB'
Thus, the first atom class is called 'AC' and any CA or C atom in
an ALA residue, or the CB atom in any residue, will be placed in
this class.
Bond class files are similar but use BNDGRP and BOND lines,
each of which names two atoms::
BNDGRP 'ALA:C:+N'
BOND 'ALA' 'C' '+N'
Note that CHARMM-style + or - prefixes can be added to atom names
for all but the first atom on a BOND line, to indicate the atom
must be found in the next or previous residue.
Angle class files use ANGGRP and ANGLE lines; each ANGLE line
names three atoms. Dihedral class files use DIHGRP and DIHEDRAL
lines; each DIHEDRAL line names four atoms.
"""
return _mdt.mdt_atom_classes_read(filename, self._mlib._modpt,
self.__n_atom)
class TupleClasses(BondClasses):
"""Classifications of tuples of atoms into classes.
Usually accessed as :attr:`Library.tuple_classes`.
These classes are used by :ref:`tuple <tuple_features>` or
:ref:`tuple pair <tuple_pair_features>` features."""
def __init__(self, mlib):
BondClasses.__init__(self, mlib, 0)
def read(self, filename):
"""Read atom tuple information from `filename`.
This is a text file with a format similar to that accepted by
:meth:`BondClasses.read`. The file can consist either of sets
of atom triplets (named with TRPGRP lines and containing triples
of atoms named on TRIPLET lines) or sets of atom doublets
using DBLGRP and DOUBLET lines. Each atom but the first in each
doublet or triplet can also be restricted to match only in
certain residue types by naming the residue in parentheses before
the rest of the atom name (and CHARMM-style + or - qualifier).
For example, a suitable atom triplet file looks like::
TRPGRP 't1'
TRIPLET 'ALA' 'CA' '+C' '-C'
TRPGRP 't2'
TRIPLET 'ALA' 'CA' '(CYS)+C' '-C'
The first triplet is named 't1' and will match any set of three
atoms where the first is called CA in an ALA residue, and the
other two atoms are C atoms in the previous and next residue.
The second triplet is similar but will only include triplets where
the next residue is a CYS.
"""
return _mdt.mdt_tuple_read(filename, self._mlib._modpt)
class HydrogenBondClasses(BondClasses):
"""Classifications of atoms into hydrogen bond classes.
Usually accessed as :attr:`Library.hbond_classes`.
These classes are used by the :class:`features.HydrogenBondAcceptor`,
:class:`features.HydrogenBondDonor` and
:class:`features.HydrogenBondSatisfaction` features."""
def __init__(self, mlib):
BondClasses.__init__(self, mlib, 1)
def read(self, filename):
"""Read hydrogen bond atom class information from a file"""
return _mdt.mdt_hbond_read(filename, self._mlib._modpt)
class TableSection(ModObject):
"""A section of a multi-dimensional table. You should not create
TableSection objects directly, but rather by indexing a :class:`Table`
object, as a TableSection is just a 'view' into an existing table.
For example, ::
>>> m = mdt.Table(mlib, features=(residue_type, xray_resolution))
>>> print m[0].entropy()
would create a section (using m[0]) which is a 1D table over the 2nd
feature (X-ray resolution) for the first bin (0) of the first feature
(residue type), and then get the entropy using the
:meth:`TableSection.entropy` method."""
_indices = ()
__mdt = None
_mlib = None
_modpt = None
_basept = None
def __init__(self, mdt, indices):
self.__mdt = mdt # Keep a reference to the MDT
self._modpt = mdt._modpt
self._basept = mdt._basept
self._mlib = mdt._mlib
self._indices = indices
def _get_removed_rank(self):
"""Return the number of dimensions removed from the full MDT in this
section (0 for the full MDT, up to nfeatures - 1)"""
return len(self._indices)
def sum(self):
"""Sum of all points in the table"""
return _mdt.mdt_section_sum(self._basept, self._indices)
def entropy(self):
"""Entropy of all points in the table"""
return _mdt.mdt_section_entropy(self._basept, self._indices)
def mean_stdev(self):
"""Mean and standard deviation of the table"""
return _mdt.mdt_section_meanstdev(self._basept, self._mlib._modpt,
self._indices)
def __check_indices(self, indices):
"""Make sure the indices for an MDT section are reasonable"""
for (feat, indx) in zip(self.features, indices):
istart, iend = feat.offset, len(feat.bins) + feat.offset - 1
if indx < 0:
indx += iend + 1
if not istart <= indx <= iend:
raise IndexError("index (%d) not in range %d<=index<=%d"
% (indx, istart, iend))
def __getitem__(self, indx):
if isinstance(indx, list):
indx = tuple(indx)
elif not isinstance(indx, tuple):
indx = (indx,)
if len(indx) < len(self.features):
self.__check_indices(indx)
return TableSection(self, self._indices + indx)
else:
return _mdt.mdt_get(self._basept, self._indices + indx)
def __setitem__(self, indx, val):
if isinstance(indx, list):
indx = tuple(indx)
elif not isinstance(indx, tuple):
indx = (indx,)
if len(indx) < len(self.features):
raise ValueError("Cannot set sections of MDTs")
else:
_mdt.mdt_set(self._basept, self._indices + indx, val)
def __get_features(self):
return _FeatureList(self)
def __get_offset(self):
return tuple([f.offset for f in self.features])
def __get_shape(self):
return tuple([len(f.bins) for f in self.features])
features = property(__get_features,
doc="Features in this MDT; a list of "
":class:`Feature` objects")
offset = property(__get_offset,
doc="Array offsets; see :attr:`Feature.offset`")
shape = property(__get_shape, doc="Array shape; the number of "
"bins for each feature")
class Table(TableSection):
"""A multi-dimensional table.
:Parameters:
- `mlib`: the MDT `Library` object to use
- `file`: if specified, the filename to read the initial table from
(if the name ends with '.hdf5', :meth:`Table.read_hdf5` is used,
otherwise :meth:`Table.read`)
- `features`: if specified (and `file` is not), a list of feature
types to initialize the table with (using :meth:`Table.make`)
- `bin_type`: type of storage for bin data (see :ref:`binstorage`).
- `shape`: if specified with `features`, the shape of the new table
(see :meth:`Table.make`)
Individual elements from the table can be accessed in standard Python
fashion, e.g. ::
>>> import mdt.features
>>> import modeller
>>> env = modeller.environ()
>>> mlib = mdt.Library(env)
>>> restyp1 = mdt.features.ResidueType(mlib, protein=0)
>>> restyp2 = mdt.features.ResidueType(mlib, protein=1)
>>> gap = mdt.features.GapDistance(mlib, mdt.uniform_bins(10, 0, 1))
>>> m = mdt.Table(mlib, features=(restyp1,restyp2,gap))
>>> print m[0,0,0]
You can also access an element as m[0][0][0], a 1D section as m[0][0],
or a 2D section as m[0]. See :class:`TableSection`.
"""
_modpt = None
_basept = None
_mlib = None
__views = None
def __init__(self, mlib, file=None, features=None, bin_type=Double,
shape=[]):
if not isinstance(bin_type, _BinType):
raise TypeError("bin_type must be a BinType object - "
"e.g. mdt.Float, mdt.Double")
self.__views = []
self._modpt = _mdt.mdt_new(bin_type._bin_type)
self._mlib = mlib
if file:
if file.endswith(".hdf5"):
self.read_hdf5(file)
else:
self.read(file)
elif features:
self.make(features, shape)
def __getstate__(self):
d = Table.__getstate__(self)
d['bin_type'] = _mdt.mod_mdt_bin_type_get(self._basept)
return d
def __setstate__(self, d):
self.__dict__.update(d)
self._modpt = _mdt.mdt_new(d.pop('bin_type'))
def __del__(self):
if self._modpt is not None:
_mdt.mdt_free(self._modpt)
def __iadd__(self, other):
_mdt.mdt_add(self._modpt, other._modpt)
return self
def __add__(self, other):
mdtout = self.copy()
mdtout += other
return mdtout
def read(self, file):
"""Read an MDT from `file`.
ValueError is raised if any views of the table exist
(see :meth:`Table.get_array_view`)."""
self.__check_for_views()
_mdt.mdt_read(self._modpt, self._mlib._modpt, file)
def read_hdf5(self, file):
"""Read an MDT in HDF5 format from `file`.
ValueError is raised if any views of the table exist
(see :meth:`Table.get_array_view`)."""
self.__check_for_views()
_mdt.mdt_read_hdf5(self._modpt, self._mlib._modpt, file)
def __check_for_views(self):
# Remove any dead weakrefs
self.__views = [x for x in self.__views if x() is not None]
if len(self.__views) > 0:
raise ValueError("Cannot modify the table: views of it exist")
def __track_view(self, v):
import weakref
self.__views.append(weakref.ref(v))
def get_array_view(self):
"""Get a NumPy array 'view' of this Table. The array contains all of
the raw data in the MDT table, allowing it to be manipulated with
NumPy functions. The data are not copied; modifications made to
the data by NumPy affect the data in the Table (and vice versa).
Functions that destroy the data in the Table (:meth:`Table.make`,
:meth:`Table.read` and :meth:`Table.read_hdf5`) cannot be called
if any NumPy array views exist, since they would invalidate the
views. The views must first be deleted.
If MDT was not built with NumPy support, a NotImplementedError
exception is raised. If NumPy cannot be loaded, an ImportError
is raised.
:return: a view of this table.
:rtype: NumPy array
"""
v = _mdt.get_numpy(self._modpt, self)
self.__track_view(v)
return v
def copy(self, bin_type=None):
"""
If `bin_type` is specified, it is the storage type to convert the
bin data to (see :ref:`binstorage`).
:return: a copy of this MDT table.
:rtype: :class:`Table`
"""
if bin_type is None:
bin_type = _mdt.mod_mdt_bin_type_get(self._basept)
elif isinstance(bin_type, _BinType):
bin_type = bin_type._bin_type
else:
raise TypeError("bin_type must be a BinType object - "
"e.g. mdt.Float, mdt.Double")
mdtout = Table(self._mlib)
_mdt.mdt_copy(self._modpt, mdtout._modpt, bin_type)
return mdtout
def make(self, features, shape=[]):
"""Clear the table, and set the features. `features` must be a list of
previously created objects from the :mod:`mdt.features` module.
If given, `shape` has the same meaning as in :meth:`Table.reshape`
and causes the table to use only a subset of the feature bins.
ValueError is raised if any views of the table exist
(see :meth:`Table.get_array_view`).
"""
self.__check_for_views()
features = self._features_to_ifeat(features)
_mdt.mdt_make(self._modpt, self._mlib._modpt, features, shape)
def clear(self):
"""Clear the table (set all bins to zero)"""
_mdt.mdt_clear(self._modpt)
def write(self, file, write_preamble=True):
"""Write the table to `file`. If `write_preamble` is False, it will
only write out the contents of the MDT table, without the preamble
including the feature list, bins, etc. This is useful for example
for creating a file to be read by another program, such as
Mathematica."""
_mdt.mdt_write(self._modpt, self._mlib._modpt, file, write_preamble)
def write_hdf5(self, file, gzip=False, chunk_size=1024*1024*10):
"""
Write an MDT in HDF5 format to `file`.
Certain library information (such as the mapping from feature
values to bin indices, and atom or tuple class information)
and information about the last scan is also written to the file.
(This information will be missing or incomplete if
:meth:`add_alignment` hasn't first been called.)
Note that this information is not read back in by :meth:`read_hdf5`;
it is intended primarily for other programs that want to reproduce
the environment in which the MDT was generated as closely as possible.
:Parameters:
- `gzip`: If True, compress the table in the HDF5 file with gzip
using the default compresion level; if a number from 0-9, compress
using that gzip compression level (0=no compression, 9=most);
if False (the default) do not compress.
- `chunk_size`: when using gzip, the table must be split up into
chunks (otherwise it is written contiguously). This parameter
can either be a list (the same length as the number of features)
defining the size of each chunk, or it can be the approximate
number of data points in each chunk, in which case the dimensions
of the chunk are chosen automatically.
"""
if gzip is False:
gzip = -1
elif gzip is True:
gzip = 6
if gzip >= 0 and not hasattr(chunk_size, '__len__'):
chunk_size = self._guess_chunk_size(self.shape, chunk_size)
_mdt.mdt_write_hdf5(self._modpt, self._mlib._modpt, file, gzip,
chunk_size)
def _guess_chunk_size(self, shape, chunk_size):
"""Determine a suitable chunk size given the total number of points"""
import math
total_size = 1
for d in shape:
total_size *= d
div = math.pow(float(total_size) / float(chunk_size),
1. / float(len(shape)))
ret = []
for d in shape:
cd = int(float(d) / div)
if cd < 1:
cd = 1
elif cd > d:
cd = d
ret.append(cd)
return ret
def reshape(self, features, offset, shape):
"""
Reorder the MDT features and optionally decrease their ranges.
When an MDT is created, each feature has exactly the bins defined in
the `Library`'s bin file. However, for each feature, you can change
the offset (initial number of bins from the bin file to omit) from the
default 0, and the shape (total number of bins).
All parameters should be lists with the same number of elements as
the MDT has features.
:Parameters:
- `features`: the new ordering of the MDT features.
- `offset`: the new offset (see `offset`).
- `shape`: the new shape (see `shape`). If any element in this list
is 0 or negative, it is added to the MDT's existing shape to get
the new value. Thus, a value of 0 would leave the shape unchanged,
-1 would remove the last (undefined) bin, etc.
:return: the reshaped MDT.
:rtype: :class:`Table`
"""
features = self._features_to_ifeat(features)
mdtout = Table(self._mlib)
_mdt.mdt_reshape(self._modpt, mdtout._modpt, features, offset, shape)
return mdtout
def smooth(self, dimensions, weight):
r"""
Smooth the MDT with a uniform prior. The MDT is treated either as a
histogram (if `dimensions` = 1) or a 2D density (`dimensions` = 2)
of dependent features (the last 1 or 2 features in the table)
and a uniform distribution is added followed by scaling:
p\ :sub:`i` = |w1| / n + |w2| |vi| / S
S = Σ\ :sub:`i`\ :sup:`n` |vi|
|w1| = 1 / ( 1 + S / (`weight` * n))
|w2| = 1 - |w1|
where *v* is the input MDT array, *n* is the number of bins in the
histogram, and *p* is the output MDT array, smoothed and normalized.
`weight` is the number of points per bin in the histogram at which
the relative weights of the input histogram and the uniform prior
are equal.
The sum of the bins in the output MDT array is 1, for each histogram.
Note that the resulting output MDT array is not necessarily a PDF,
because the bin widths are not taken into account during scaling.
That is, the sum of all bin values multiplied by the bin widths is not
1 if the bin widths are not 1.
:return: the smoothed MDT.
:rtype: :class:`Table`
.. |w1| replace:: w\ :sub:`1`
.. |w2| replace:: w\ :sub:`2`
.. |vi| replace:: v\ :sub:`i`
"""
mdtout = Table(self._mlib)
_mdt.mdt_smooth(self._modpt, mdtout._modpt, dimensions, weight)
return mdtout
def normalize(self, dimensions, dx_dy, to_zero, to_pdf):
"""
Normalize or scale the MDT. It does not really matter what the
contents of the input MDT are; sensible contents include the raw
or normalized frequencies.
:Parameters:
- `dimensions`: specifies whether a 1D or a 2D table is
normalized. More precisely, the input distributions are
*p(x | a, b, c, ...)* if `dimensions` = 1, or
*p(x, y | a, b, c, ...)* if `dimensions` = 2, where y and x are
the second to last and last features in the list of features.
- `dx_dy`: widths of the bins (either one or two numbers,
depending on `dimensions`). If the value of either dx or dy
is -999, the corresponding bin width is extracted from the MDT
data structure (not available for all features).
- `to_zero`: if the histogram is empty, setting this True will set
the bin values to zero, and False will yield a uniform
distribution. It has no effect when the histogram is not empty.
- `to_pdf`: if False, the output is obtained by scaling the input
such that for 1D histograms Σ :sub:`i` p(x :sub:`i`) = 1,
and for 2D histograms Σ :sub:`i,j` p(x :sub:`i,j`) = 1. Note
that `dx_dy` is **not** taken into account during this scaling.
If it is True, the normalization takes into account `dx_dy` so
that the normalized distribution is actually a PDF. That is,
Σ :sub:`i` p(x :sub:`i`) dx = 1 for 1D and
Σ :sub:`i,j` p(x :sub:`i,j`) dx dy = 1 for 2D, where dx and
dy are the widths of the bins.
:return: the normalized MDT.
:rtype: :class:`Table`
"""
mdtout = Table(self._mlib)
_mdt.mdt_normalize(self._modpt, mdtout._modpt, self._mlib._modpt,
dimensions, dx_dy, to_zero, to_pdf)
return mdtout
def integrate(self, features):
"""
Integrate the MDT, and reorder the features. This is useful for
squeezing large MDT arrays into smaller ones, and also for
eliminating unwanted features (such as X-ray resolution) in
preparation for :meth:`Table.write`.
:Parameters:
- `features`: the new features (all must be present in the
original MDT).
:return: the integrated MDT.
:rtype: :class:`Table`
"""
features = self._features_to_ifeat(features)
mdtout = Table(self._mlib)
_mdt.mdt_integrate(self._modpt, mdtout._modpt, features)
return mdtout
def exp_transform(self, offset, expoffset, multiplier, power):
r"""
Apply an exponential transform to the MDT.
Each element in the new MDT, *b*, is obtained from the original
MDT element *a*, using the following relation:
*b = offset + exp(expoffset + multiplier \* a ^ power)*.
:rtype: :class:`Table`
"""
mdtout = self.copy()
_mdt.mdt_exp_transform(mdtout._basept, offset, expoffset, multiplier,
power)
return mdtout
def log_transform(self, offset, multiplier, undefined=0.):
r"""
Apply a log transform to the MDT.
Each element in the new MDT, *b*, is obtained from the original
MDT element *a*, using the following relation:
*b = ln(offset + multiplier \* a)*. Where this would involve the
logarithm of a negative number, *b* is assigned to be `undefined`.
:return: the transformed MDT.
:rtype: :class:`Table`
"""
mdtout = self.copy()
_mdt.mdt_log_transform(mdtout._basept, offset, multiplier, undefined)
return mdtout
def linear_transform(self, offset, multiplier):
r"""
Apply a linear transform to the MDT.
Each element in the new MDT, *b*, is obtained from the original
MDT element *a*, using the following relation:
*b = offset + a \* multiplier*.
:return: the transformed MDT.
:rtype: :class:`Table`
"""
mdtout = self.copy()
_mdt.mdt_linear_transform(mdtout._basept, offset, multiplier)
return mdtout
def inverse_transform(self, offset, multiplier, undefined=0.):
"""
Apply an inverse transform to the MDT.
Each element in the new MDT, *b*, is obtained from the original
MDT element *a*, using the following relation:
*b = offset + multiplier / a*. Where *a* is zero, *b* is
assigned to be `undefined`.
:return: the transformed MDT.
:rtype: :class:`Table`
"""
mdtout = self.copy()
_mdt.mdt_inverse_transform(mdtout._basept, offset, multiplier,
undefined)
return mdtout
def offset_min(self, dimensions):
"""
Offset the MDT by the minimum value, either in each 1D section
(`dimensions` = 1) or in each 2D section (`dimensions` = 2).
:return: the transformed MDT.
:rtype: :class:`Table`
"""
mdtout = self.copy()
_mdt.mdt_offset_min(mdtout._basept, dimensions)
return mdtout
def close(self, dimensions):
"""
Attempt to 'close' the MDT, so that it is useful for creating splines
of periodic features.
If `dimensions` = 1, it makes the two terminal points equal to their
average. If `dimensions` = 2, it applies the averages to both pairs
of edges and then again to all four corner points.
:return: the closed MDT.
:rtype: :class:`Table`
"""
mdtout = self.copy()
_mdt.mdt_close(mdtout._basept, dimensions)
return mdtout
def entropy_full(self):
"""Print full entropy information."""
return _mdt.mdt_entropy_full(self._basept, self._mlib._modpt)
def entropy_hx(self):
r"""
The MDT is integrated to get a 1D histogram, then normalized by
the sum of the bin values. Finally, entropy is calculated as
Σ\ :sub:`i` -p\ :sub:`i` ln p\ :sub:`i`
:return: the entropy of the last dependent variable.
:rtype: float
"""
return _mdt.mdt_entropy_hx(self._basept)
def super_smooth(self, dimensions, prior_weight, entropy_weighing):
"""
Multi-level smoothing. This super-smoothes the raw frequencies in
the MDT using the hierarchical smoothing procedure for 1D histograms
described in Sali and Blundell, JMB 1993. It was also employed in
Sali and Overington, Prot Sci. 1994.
Briefly, the idea is to recursively construct the best possible
prior distribution for smoothing 1D data *p(x | a, b, c, ...)*.
The best prior is a weighted sum (weights optionally based on
entropy) of the best possible estimate of *p(x | a, b, ...)*
integrated over *c* for each *c*. Each one of these can itself be
obtained from a prior and the data, and so on recursively.
The example above is for a single dependent feature (*x*), which is the
case when `dimensions` = 1. *x* should be the last feature in the
table. `dimensions` can be set to other values if you have more
dependent features - for example, `dimensions` = 2 will work with
*p(x, y | a, b, c, ...)* where *x* and *y* are the last two features
in the table.
:Parameters:
- `dimensions`: Number of dependent features.
- `prior_weight`: Weight for the prior distribution.
- `entropy_weighing`: Whether to weight distributions by their
entropies.
:return: the smoothed MDT.
:rtype: :class:`Table`
"""
mdtout = Table(self._mlib)
_mdt.mdt_super_smooth(self._modpt, mdtout._modpt, dimensions,
prior_weight, entropy_weighing)
return mdtout
def write_asgl(self, asglroot, text, dimensions, plot_position,
plots_per_page, plot_density_cutoff=-1., plot_type='HIST2D',
every_x_numbered=1, every_y_numbered=1, x_decimal=1,
y_decimal=1):
"""
Make input files for ASGL.
:Parameters:
- `asglroot`: filename prefix for ASGL TOP script and data files.
- `text`: ASGL command lines that are written for each plot.
- `dimensions`: whether to make 1D or 2D plots.
- `plot_position`: position of the plot on the page, in
ASGL convention.
- `plots_per_page`: number of plots per page.
- `plot_density_cutoff`: the minimal sum of the bin values that
each plot has to have before it is actually written out;
otherwise it is ignored. This helps to avoid wasting paper
on empty plots when the MDT array data are sparse.
- `plot_type`: select 'HIST2D' or 'PLOT2D' when `dimensions` = 2.
- `every_x_numbered`: spacing for labels on the X axis.
- `every_y_numbered`: spacing for labels on the Y axis.
- `x_decimal`: the number of decimal places used to write
X feature values.
- `y_decimal`: the number of decimal places used to write
Y feature values.
"""
return _mdt.mdt_write_asgl(self._basept, self._mlib._modpt, asglroot,
text, dimensions, every_x_numbered,
every_y_numbered, plot_density_cutoff,
plots_per_page, plot_position, plot_type,
x_decimal, y_decimal)
def add_alignment(self, aln, distngh=6.0, surftyp=1, accessibility_type=8,
residue_span_range=(-99999, -2, 2, 99999),
chain_span_range=(-99999, 0, 0, 99999),
bond_span_range=None, disulfide=False,
exclude_bonds=False, exclude_angles=False,
exclude_dihedrals=False, sympairs=False,
symtriples=False, io=None, edat=None):
"""
Add data from a Modeller alignment to this MDT.
This method will first scan through all proteins, pairs of proteins,
or triples of proteins in the alignment (it will scan all triples if
the :class:`mdt.Library` contains features defined on all of
proteins 0, 1 and 2, pairs if the features are defined on two
different proteins, and individual proteins otherwise). Within each
protein, it may then scan through all residues, atoms, etc. if the
features request it (see :ref:`the scan types table <scantypes>`).
:Parameters:
- `aln`: Modeller alignment.
- `distngh`: distance below which residues are considered neighbors.
Used by :class:`features.NeighborhoodDifference`.
- `surftyp`: 1 for PSA contact area, 2 for surface area.
Used by :class:`features.AtomAccessibility`.
- `accessibility_type`: PSA accessibility type (1-10).
Used by :class:`features.AtomAccessibility`.
- `residue_span_range`: sequence separation (inclusive) for
:ref:`residue pair <residue_pair_features>`,
:ref:`atom pair <atom_pair_features>` and
:ref:`tuple pair <tuple_pair_features>` features. For the two
residue indices r1 and r2 in the tuple-tuple and atom- atom cases,
or two alignment position indices in the residue-residue case,
the following must be true:
*residue_span_range[0] <= (r2 - r1) <= residue_span_range[1]*
*residue_span_range[2] <= (r2 - r1) <= residue_span_range[3]*
For symmetric residue-residue features, only one condition
must be met:
*residue_span_range[2] <= abs(r2 - r1) <= residue_span_range[3]*
For example, the default value of (-99999, -2, 2, 99999) excludes
all pairs within the same residue (for which the sequence
separation is 0) or within adjacent residues (for which the
separation is 1 or -1).
- `chain_span_range`: works like `residue_span_range`, but for the
chain indices. It is used only by the
:ref:`atom pair <atom_pair_features>` and
:ref:`tuple pair <tuple_pair_features>` features. The default value
of (-99999, 0, 0, 99999) allows all interactions. For example,
using (-99999, -1, 1, 99999) instead would exclude all interactions
within the same chain.
- `bond_span_range`: if given, it should be a list of two integers
which specify the minimum and maximum number of bonds that separate
a pair of atoms in the scan. It is used only by the
:ref:`atom pair <atom_pair_features>` and
:ref:`tuple pair <tuple_pair_features>` features. (See
:class:`features.AtomBondSeparation` for more details.) The bond
library (see :attr:`Library.bond_classes`) must be loaded to use
this. For example, using (1, 2) will include only atoms that
are directly chemically bonded or that are both bonded to a third
atom, while (0, 9999) will only exclude pairs of atoms that have
no path of bonds between them (e.g. atoms in different chains or
when at least one of the atoms is not involved in any bonds).
As a special case, if the maximum span is negative, no limit is
enforced. For example, (2, 99999) will include all atoms that have
a path of bonds between them except directly bonded pairs (and
thus exclude pairs in different chains) while (2, -1) will also
include inter-chain interactions.
- `disulfide`: if True, then the `bond_span_range` considers
disulfide bonds (defined as any pair of SG atoms in CYS residues
less than 2.5 angstroms apart) when calculating the bond separation
between atoms. Only disulfide bridges within 3 residues of the
atom pair are considered for computational efficiency.
- `exclude_bonds`: if True, then all pairs of atoms involved in a
chemical bond (see :attr:`Library.bond_classes`) are excluded from
:ref:`atom pair <atom_pair_features>` and
:ref:`tuple pair <tuple_pair_features>` features.
- `exclude_angles`: if True, then the 1-3 pair of atoms from each
angle are excluded (see `exclude_bonds`).
- `exclude_dihedrals`: if True, then the 1-4 pair of atoms from each
dihedral are excluded (see `exclude_bonds`).
- `sympairs`: if True, then protein pair scans are done in a
symmetric fashion - e.g. when scanning an alignment of A, B and
C, the following pairs are scanned: AB, BC, AC. By default a
non-symmetric scan is performed, scanning AB, BC, AC, BA, CB, CA.
- `symtriples`: if True, then protein triple scans are done in a
symmetric fashion - e.g. when scanning an alignment of A, B and
C, the following triples are scanned: ABC, ACB, BAC. By default a
non-symmetric scan is performed, scanning ABC, ACB, BAC, CBA,
BCA, CAB.
"""
if io is None:
io = self._mlib._env.io
if edat is None:
edat = self._mlib._env.edat
_mdt.mdt_add_alignment(self._modpt, self._mlib._modpt, aln.modpt,
distngh, False, surftyp, accessibility_type,
residue_span_range, chain_span_range,
_prepare_bond_span(bond_span_range), disulfide,
exclude_bonds, exclude_angles,
exclude_dihedrals, sympairs, symtriples,
io.modpt, edat.modpt)
def add_alignment_witherr(self, aln, distngh=6.0, surftyp=1,
accessibility_type=8,
residue_span_range=(-99999, -2, 2, 99999),
chain_span_range=(-99999, 0, 0, 99999),
bond_span_range=None, disulfide=False,
exclude_bonds=False, exclude_angles=False,
exclude_dihedrals=False,
sympairs=False, symtriples=False, io=None,
edat=None, errorscale=1):
"""
Add data from a Modeller alignment to this MDT. Same as add_alignment
except the errors in data are taken into account.
The parameter errorscale controls how the error is used:
- `0`: the errors are ignored; this function is the same as
add_alignment.
- `>0` : the errors are taken into account by propagating the errors
in each axis of each atom into the calculated distances
or angles. The errors in the position of individual
atoms are first calculated using B-iso, X-ray resolution,
and R-factor, and then divided by this errorscale value.
"""
if io is None:
io = self._mlib._env.io
if edat is None:
edat = self._mlib._env.edat
_mdt.mdt_add_alignment_witherr(self._modpt, self._mlib._modpt,
aln.modpt, distngh, False, surftyp,
accessibility_type, residue_span_range,
chain_span_range,
_prepare_bond_span(bond_span_range),
disulfide,
exclude_bonds,
exclude_angles, exclude_dihedrals,
sympairs, symtriples, io.modpt,
edat.modpt, errorscale)
def open_alignment(self, aln, distngh=6.0, surftyp=1, accessibility_type=8,
sympairs=False, symtriples=False, io=None, edat=None):
"""
Open a Modeller alignment to allow MDT indices to be queried
(see :class:`Source`). Arguments are as for
:meth:`Table.add_alignment`.
:rtype: :class:`Source`
"""
return Source(self, self._mlib, aln, distngh, surftyp,
accessibility_type, sympairs, symtriples, io, edat)
def _features_to_ifeat(self, features):
"""Utility function to map objects from `mdt.features` to
integer feature types"""
ifeat = []
if not isinstance(features, (list, tuple)):
features = (features,)
for feat in features:
if hasattr(feat, '_get_ifeat'):
ifeat.append(feat._get_ifeat(self._mlib))
else:
raise TypeError("features should be objects from mdt.features")
return ifeat
def __get_pdf(self):
return _mdt.mdt_pdf_get(self._modpt)
def __get_n_proteins(self):
return _mdt.mdt_n_proteins_get(self._modpt)
def __get_n_protein_pairs(self):
return _mdt.mdt_n_protein_pairs_get(self._modpt)
def __get_sample_size(self):
return _mdt.mdt_sample_size_get(self._modpt)
def __get_symmetric(self):
return _mdt.mdt_symmetric_get(self._modpt)
def __get_basept(self):
return _mdt.mdt_base_get(self._modpt)
_basept = property(__get_basept)
pdf = property(__get_pdf, doc="Whether this MDT is a PDF")
n_proteins = property(__get_n_proteins, doc="Number of proteins")
n_protein_pairs = property(__get_n_protein_pairs,
doc="Number of protein pairs")
sample_size = property(__get_sample_size, doc="Number of sample points")
symmetric = property(__get_symmetric,
doc="True if a symmetric scan can be performed")
class _FeatureList(modlist.FixList):
"""A list of all features in an MDT."""
def __init__(self, mdt):
self.__mdt = mdt
self.__removed_rank = mdt._get_removed_rank()
modlist.FixList.__init__(self)
def __len__(self):
return _mdt.mod_mdt_nfeat_get(self.__mdt._basept) - self.__removed_rank
def _getfunc(self, indx):
return Feature(self.__mdt, indx + self.__removed_rank)
class Feature(object):
"""A single feature in an MDT. Generally accessed as
:attr:`TableSection.features`."""
def __init__(self, mdt, indx):
self._mdt = mdt
self._indx = indx
def __get_ifeat(self):
return _mdt.mod_mdt_feature_ifeat_get(self._modpt)
def __get_bins(self):
return _BinList(self)
def __get_offset(self):
return _mdt.mod_mdt_feature_istart_get(self._modpt) - 1
def __get_periodic(self):
return _mdt.mdt_feature_periodic_get(self._mdt._mlib._modpt,
self.ifeat)
def __get_modpt(self):
return _mdt.mod_mdt_feature_get(self._mdt._basept, self._indx)
_modpt = property(__get_modpt)
ifeat = property(__get_ifeat, doc="Integer type")
bins = property(__get_bins,
doc="Feature bins; a list of :class:`Bin` objects")
offset = property(__get_offset,
doc="Offset of first bin compared to the MDT library "
"feature (usually 0, but can be changed with "
":meth:`Table.reshape`)")
periodic = property(__get_periodic, doc="Whether feature is periodic")
class _BinList(modlist.FixList):
"""A list of all bins in a feature."""
def __init__(self, feature):
self.__feature = feature
self._mdt = feature._mdt
modlist.FixList.__init__(self)
def __len__(self):
return _mdt.mod_mdt_feature_nbins_get(self.__feature._modpt)
def _getfunc(self, indx):
return Bin(self.__feature, indx)
class Bin(object):
"""A single bin in a feature. Generally accessed as
:attr:`Feature.bins`."""
def __init__(self, feature, indx):
self.__feature = feature
self.__indx = indx
def __get_symb(self):
return _mdt.mod_mdt_bin_symbol_get(self._modpt)
def __get_range(self):
return (_mdt.mod_mdt_bin_rang1_get(self._modpt),
_mdt.mod_mdt_bin_rang2_get(self._modpt))
def __get_modpt(self):
nfeat = self.__feature._indx
mdt = self.__feature._mdt._basept
mlib = self.__feature._mdt._mlib._modpt
return _mdt.mdt_library_bin_get(mdt, mlib, nfeat, self.__indx)
_modpt = property(__get_modpt)
symbol = property(__get_symb, doc="Bin symbol")
range = property(__get_range,
doc="Bin range; usually the minimum and maximum "
"floating-point values for the feature to be "
"placed in this bin.")
class Source(object):
"""A source of data for an MDT (generally a Modeller alignment, opened
with :meth:`Table.open_alignment`)."""
def __init__(self, mdt, mlib, aln, distngh, surftyp, accessibility_type,
sympairs, symtriples, io, edat):
self._mdt = mdt
self._mlib = mlib
self._aln = aln
if io is None:
io = mlib._env.io
if edat is None:
edat = mlib._env.edat
self._edat = edat
self._modpt = _mdt.mdt_alignment_open(mdt._modpt, mlib._modpt,
aln.modpt, distngh, False,
surftyp, accessibility_type,
sympairs, symtriples, io.modpt)
def __del__(self):
if hasattr(self, "_modpt"):
_mdt.mdt_alignment_close(self._modpt)
def sum(self, residue_span_range=(-99999, -2, 2, 99999),
chain_span_range=(-99999, 0, 0, 99999),
bond_span_range=None, disulfide=False,
exclude_bonds=False, exclude_angles=False,
exclude_dihedrals=False):
"""Scan all data points in the source, and return the sum.
See :meth:`Table.add_alignment` for a description of the
`residue_span_range`, `chain_span_range` and `exclude_*`
arguments."""
f = _mdt.mdt_source_sum
return f(self._modpt, self._mdt._modpt, self._mlib._modpt,
residue_span_range, chain_span_range,
_prepare_bond_span(bond_span_range), disulfide,
exclude_bonds, exclude_angles, exclude_dihedrals,
self._edat.modpt)
def index(self, feat, is1, ip1, is2, ir1, ir2, ir1p, ir2p, ia1, ia1p,
ip2, ibnd1, ibnd1p, is3, ir3, ir3p):
"""
Return the bin index (starting at 1) of a single MDT feature.
(Arguments ending in 2 and 3 are used for features involving pairs
or triples of proteins.)
.. warning:: This is a low-level interface, and no bounds checking is
performed on these parameters. Avoid this function if possible.
:Parameters:
- `feat`: MDT feature object from :mod:`mdt.features` module.
- `is1`: index of the sequence within the alignment.
- `ip1`: position within the sequence (i.e. including gaps).
- `ir1`: residue index (i.e. not including alignment gaps).
- `ir1p`: second residue index for residue-residue features.
- `ia1`: atom index.
- `ia1p`: second atom index for atom-atom features.
- `ibnd1`: bond or tuple index.
- `ibnd1p`: second bond/tuple index for bond-bond or tuple-tuple
features.
"""
f = _mdt.mdt_alignment_index
return f(self._modpt, feat._ifeat, is1, ip1, is2, ir1, ir2, ir1p, ir2p,
ia1, ia1p, ip2, ibnd1, ibnd1p, is3, ir3, ir3p,
self._mlib._modpt, self._edat.modpt)
def _pass_cutoffs(mdt, num, bin, density_cutoff, entropy_cutoff):
if density_cutoff is not None:
sum = mdt[num].sum()
if sum < density_cutoff:
print("Restraint %s skipped: density %.4f below cutoff %.4f"
% (bin.symbol, sum, density_cutoff))
return False
if entropy_cutoff is not None:
entropy = mdt[num].entropy()
if entropy > entropy_cutoff:
print("Restraint %s skipped: entropy %.4f above cutoff %.4f"
% (bin.symbol, entropy, entropy_cutoff))
return False
return True
def _write_meanstdevlib(fh, mdt, numat, phystype, feattype, convfunc,
density_cutoff=None, entropy_cutoff=None):
fh.write("# residue atoms mean stdev\n")
fh.write("_params = [\n")
for (num, bin) in enumerate(mdt.features[0].bins):
if _pass_cutoffs(mdt, num, bin, density_cutoff, entropy_cutoff):
symbols = bin.symbol.split(':')
res = symbols[0]
ats = tuple(symbols[1:])
if len(ats) != numat:
example_atoms = ['CA', 'C', 'N', 'O']
raise ValueError("Bin name '%s' should be residue name "
"plus %d atoms, separated by colons, e.g. %s"
% (bin.symbol, numat,
'ALA:' + ':'.join(example_atoms[:numat])))
mean, stdev = mdt[num].mean_stdev()
fh.write(" ( '%s', %s, %.4f, %.4f ),\n"
% (res, str(ats), convfunc(mean), convfunc(stdev)))
fh.write(""" ]
def make_restraints(atmsel, restraints, num_selected):
from modeller import forms, physical, features
for (res, atoms, mean, stdev) in _params:
for a in atmsel.find_atoms(res, atoms, num_selected):
r = forms.gaussian(%s, %s(*a), mean,
stdev)
restraints.add(r)\n""" % (phystype, feattype))
def _noconv(a):
return a
def _degrees_to_radians(a):
pi = 3.14159265358979323846
return a / 180.0 * pi
def write_bondlib(fh, mdt, density_cutoff=None, entropy_cutoff=None):
"""
Write out a Modeller bond library file from an MDT. The input MDT should be
a 2D table (usually of bond type and bond distance). For each bond type,
the 1D MDT section (see :class:`TableSection`) of bond distance is
examined, and its mean and standard deviation used to generate a
Modeller harmonic restraint.
:Parameters:
- `fh`: Python file to write to
- `mdt`: input MDT :class:`Table` object
- `density_cutoff`: if specified, MDT bond distance sections with sums
below this value are not used
- `entropy_cutoff`: if specified, MDT bond distance sections with
entropies above this value are not used
"""
_write_meanstdevlib(fh, mdt, 2, "physical.bond", "features.distance",
_noconv, density_cutoff, entropy_cutoff)
def write_anglelib(fh, mdt, density_cutoff=None, entropy_cutoff=None):
"""
Write out a Modeller angle library file from an MDT. See
:func:`write_bondlib` for more details. The MDT should be a 2D table,
usually of angle type and bond angle.
"""
_write_meanstdevlib(fh, mdt, 3, "physical.angle", "features.angle",
_degrees_to_radians, density_cutoff, entropy_cutoff)
def write_improperlib(fh, mdt, density_cutoff=None, entropy_cutoff=None):
"""
Write out a Modeller dihedral angle library file from an MDT. See
:func:`write_bondlib` for more details. The MDT should be a 2D table,
usually of dihedral type and bond dihedral angle.
"""
_write_meanstdevlib(fh, mdt, 4, "physical.improper", "features.dihedral",
_degrees_to_radians, density_cutoff, entropy_cutoff)
def _get_splinerange(feat):
periodic = feat.periodic
# histogram bin size in real units:
startrange = feat.bins[0].range
endrange = feat.bins[-1].range
dx = startrange[1] - startrange[0]
# left and right value of the range of the spline:
x1 = startrange[0] + 0.5 * dx
if periodic:
x2 = endrange[1] + 0.5 * dx
else:
x2 = endrange[1] - 0.5 * dx
dx = _degrees_to_radians(dx)
x1 = _degrees_to_radians(x1)
x2 = _degrees_to_radians(x2)
return periodic, dx, x1, x2
def write_statpot(fh, mdt):
"""
Write out a Modeller statistical potential file (as accepted by
group_restraints.append()). The MDT is assumed to be a 3D table of distance
against the types of the two atoms. No special processing
is done, so it is expected that the user has first done any necessary
transformations (e.g. normalization with :meth:`Table.normalize` to
convert raw counts into a PDF, negative log transform with
:meth:`Table.log_transform` and :meth:`Table.linear_transform` to
convert a PDF into a statistical potential).
"""
modality = len(mdt.features[0].bins)
delta = mdt.features[0].bins[0].range[1]
low = mdt.features[0].bins[0].range[0] + delta / 2.
high = mdt.features[0].bins[-1].range[0] + delta / 2.
fh.write("MOD5\n")
fmt = "R" + " %4d" * 7 + " %5s %5s " + " %9.4f" * 6 + " "
for na1, a1 in enumerate(mdt.features[1].bins):
for na2, a2 in enumerate(mdt.features[2].bins[na1:]):
splinevals = ["%9.4f" % mdt[x, na1, na2] for x in range(modality)]
fh.write(fmt % (10, modality, 1, 31, 2, modality + 6, 0, a1.symbol,
a2.symbol, 0., low, high, delta, 0., 0.)
+ " ".join(splinevals) + "\n")
def write_splinelib(fh, mdt, dihtype, density_cutoff=None,
entropy_cutoff=None):
"""
Write out a Modeller 1D spline library file from an MDT.
The MDT should be a 2D table, usually of residue type and a chi dihedral
angle. `dihtype` should identify the dihedral type
(i.e. chi1/chi2/chi3/chi4). The operation is similar to
:func:`write_bondlib`,
but each MDT section is treated as the spline values. No special processing
is done, so it is expected that the user has first done any necessary
transformations (e.g. normalization with :meth:`Table.normalize` to
convert raw counts into a PDF, negative log transform with
:meth:`Table.log_transform` and :meth:`Table.linear_transform` to
convert a PDF into a statistical potential).
"""
(periodic, dx, x1, x2) = _get_splinerange(mdt.features[1])
fh.write("# residue spline values\n")
fh.write("_params = [\n")
for (nx, bin) in enumerate(mdt.features[0].bins):
if _pass_cutoffs(mdt, nx, bin, density_cutoff, entropy_cutoff):
splinevals = ["%.4f" % mdt[nx, ny]
for ny in range(len(mdt.features[1].bins))]
fh.write(" ( '%s', (%s) ),\n"
% (bin.symbol, ', '.join(splinevals)))
fh.write(""" ]
def make_restraints(atmsel, restraints, num_selected):
from modeller import forms, physical, features
for (res, values) in _params:
arr = True
for a in atmsel.find_%s_dihedrals(res, num_selected):
r = forms.spline(physical.%s_dihedral,
features.dihedral(*a), open=%s, low=%.5f,
high=%.5f, delta=%.5f, lowderiv=0,
highderiv=0, values=values, use_array=arr)
arr = restraints.add(r)\n""" % (dihtype, dihtype, not periodic,
x1, x2, dx))
def write_2dsplinelib(fh, mdt, density_cutoff=None, entropy_cutoff=None):
"""
Write out a Modeller 2D spline library file from an MDT.
See :func:`write_splinelib` for more details. The input MDT should be
a 3D table, e.g. of residue type, phi angle, and psi angle.
"""
(yperiodic, dy, y1, y2) = _get_splinerange(mdt.features[1])
(zperiodic, dz, z1, z2) = _get_splinerange(mdt.features[2])
fh.write("# residue spline values\n")
fh.write("_params = [\n")
for (nx, bin) in enumerate(mdt.features[0].bins):
if _pass_cutoffs(mdt, nx, bin, density_cutoff, entropy_cutoff):
splinevals = []
for ny in range(len(mdt.features[1].bins)):
for nz in range(len(mdt.features[2].bins)):
splinevals.append("%.4f" % mdt[nx, ny, nz])
fh.write(" ( '%s', (%s) ),\n"
% (bin.symbol, ', '.join(splinevals)))
fh.write(""" ]
def make_restraints(atmsel, restraints, num_selected):
from modeller import forms, physical, features
for (res, values) in _params:
arr = True
for a in atmsel.find_atoms(res, ('-C', 'N', 'CA', 'C',
'N', 'CA', 'C', '+N'), num_selected):
r = forms.nd_spline(physical.phi_psi_dihedral, values,
dimensions=(%d,%d), use_array=arr)
r.add_dimension(features.dihedral(*a[:4]), open=%s,
low=%.5f, high=%.5f, delta=%.5f,
lowderiv=0., highderiv=0.)
r.add_dimension(features.dihedral(*a[4:]), open=%s,
low=%.5f, high=%.5f, delta=%.5f,
lowderiv=0., highderiv=0.)
arr = restraints.add(r)\n""" % (len(mdt.features[1].bins),
len(mdt.features[2].bins),
not yperiodic, y1, y2, dy,
not zperiodic, z1, z2, dz))
def uniform_bins(num, start, width):
"""Make a list of `num` equally-sized bins, each of which has the given
`width`, and starting at `start`. This is suitable for input to any of
the classes in :mod:`mdt.features` which need a list of bins."""
bins = []
for i in range(num):
st = start + width * i
en = st + width
sym = "%.1f" % st
bins.append((st, en, sym))
return bins
|
salilab/mdt
|
pyext/mdt/__init__.py
|
Python
|
gpl-2.0
| 60,664
|
[
"CHARMM",
"Gaussian"
] |
adc03f7c88f5722c19e5ec54c84b84aec534b0bf2007652a7feddbc165d0381c
|
# -*- coding: utf-8 -*-
#
# testiaf.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
'''
IAF Neuron example
------------------
A DC current is injected into the neuron using a current generator
device. The membrane potential as well as the spiking activity are
recorded by corresponding devices.
It can be observed how the current charges the membrane, a spike
is emitted, the neuron becomes absolute refractory, and finally
starts to recover.
'''
'''
First, we import all necessary modules for simulation and plotting
'''
import nest
import pylab
'''
Second the Function build_network is defined to build the network and
return the handles of the spike detector and the voltmeter
'''
def build_network(dt):
nest.ResetKernel()
nest.SetKernelStatus({"local_num_threads": 1, "resolution": dt})
neuron = nest.Create('iaf_psc_alpha')
nest.SetStatus(neuron, "I_e", 376.0)
vm = nest.Create('voltmeter')
nest.SetStatus(vm, "withtime", True)
sd = nest.Create('spike_detector')
nest.Connect(vm, neuron)
nest.Connect(neuron, sd)
return vm, sd
'''
The function build_network takes the resolution as argument.
First the Kernel is reset and the number of threads is set to zero as
well as the resolution to the specified value dt. The iaf_psc_alpha is
created and the handle is stored in the variable neuron The status of
the neuron is changed so it receives an external current. Next the
voltmeter is created and the handle stored in vm and the option
'withtime' is set, therefore times are given in the times vector in
events. Now the spike_detecor is created and its handle is stored in
sd.
Voltmeter and spikedetector are then connected to the neuron. The
connect function takes the handles as input. The Voltmeter is
connected to the neuron and the neuron to the spikedetector because
the neuron sends spikes to the detector and the voltmeter 'observes'
the neuron.
'''
'''
The neuron is simulated for three different resolutions and then
the voltage trace is plotted
'''
for dt in [0.1, 0.5, 1.0]:
print("Running simulation with dt=%.2f" % dt)
vm, sd = build_network(dt)
'''
First using build_network the network is build and the handles of
the spike detector and the voltmeter are stored in vm and sd
'''
nest.Simulate(1000.0)
'''
The network is simulated using `Simulate`, which takes the desired
simulation time in milliseconds and advances the network state by
this amount of time. During simulation, the `spike_detector`
counts the spikes of the target neuron and the total number is
read out at the end of the simulation period.
'''
potentials = nest.GetStatus(vm, "events")[0]["V_m"]
times = nest.GetStatus(vm, "events")[0]["times"]
'''
The values of the voltage recorded by the voltmeter are read out
and the values for the membrane potential are stored in potential
and the corresponding times in the times array
'''
pylab.plot(times, potentials, label="dt=%.2f" % dt)
print(" Number of spikes: {0}".format(nest.GetStatus(sd, "n_events")[0]))
'''
Using the pylab library the voltage trace is plotted over time
'''
pylab.legend(loc=3)
pylab.xlabel("time (ms)")
pylab.ylabel("V_m (mV)")
'''
Finally the axis are labelled and a legend is generated
'''
|
tillschumann/nest-simulator
|
pynest/examples/testiaf.py
|
Python
|
gpl-2.0
| 3,991
|
[
"NEURON"
] |
d41d919616f98e560d2a97c130f23ab14dae8bfece943437ac36487a7dcc9cbe
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""An estimator is a rule for calculating an estimate of a given quantity.
# Estimators
* **Estimators** are used to train and evaluate TensorFlow models.
They support regression and classification problems.
* **Classifiers** are functions that have discrete outcomes.
* **Regressors** are functions that predict continuous values.
## Choosing the correct estimator
* For **Regression** problems use one of the following:
* `LinearRegressor`: Uses linear model.
* `DNNRegressor`: Uses DNN.
* `DNNLinearCombinedRegressor`: Uses Wide & Deep.
* `TensorForestEstimator`: Uses RandomForest. Use `.predict()` for
regression problems.
* `Estimator`: Use when you need a custom model.
* For **Classification** problems use one of the following:
* `LinearClassifier`: Multiclass classifier using Linear model.
* `DNNClassifier`: Multiclass classifier using DNN.
* `DNNLinearCombinedClassifier`: Multiclass classifier using Wide & Deep.
* `TensorForestEstimator`: Uses RandomForest. Use `.predict_proba()` when
using for binary classification problems.
* `SVM`: Binary classifier using linear SVMs.
* `LogisticRegressor`: Use when you need custom model for binary
classification.
* `Estimator`: Use when you need custom model for N class classification.
## Pre-canned Estimators
Pre-canned estimators are machine learning estimators premade for general
purpose problems. If you need more customization, you can always write your
own custom estimator as described in the section below.
Pre-canned estimators are tested and optimized for speed and quality.
### Define the feature columns
Here are some possible types of feature columns used as inputs to a pre-canned
estimator.
Feature columns may vary based on the estimator used. So you can see which
feature columns are fed to each estimator in the below section.
```python
sparse_feature_a = sparse_column_with_keys(
column_name="sparse_feature_a", keys=["AB", "CD", ...])
embedding_feature_a = embedding_column(
sparse_id_column=sparse_feature_a, dimension=3, combiner="sum")
sparse_feature_b = sparse_column_with_hash_bucket(
column_name="sparse_feature_b", hash_bucket_size=1000)
embedding_feature_b = embedding_column(
sparse_id_column=sparse_feature_b, dimension=16, combiner="sum")
crossed_feature_a_x_b = crossed_column(
columns=[sparse_feature_a, sparse_feature_b], hash_bucket_size=10000)
real_feature = real_valued_column("real_feature")
real_feature_buckets = bucketized_column(
source_column=real_feature,
boundaries=[18, 25, 30, 35, 40, 45, 50, 55, 60, 65])
```
### Create the pre-canned estimator
DNNClassifier, DNNRegressor, and DNNLinearCombinedClassifier are all pretty
similar to each other in how you use them. You can easily plug in an
optimizer and/or regularization to those estimators.
#### DNNClassifier
A classifier for TensorFlow DNN models.
```python
my_features = [embedding_feature_a, embedding_feature_b]
estimator = DNNClassifier(
feature_columns=my_features,
hidden_units=[1024, 512, 256],
optimizer=tf.train.ProximalAdagradOptimizer(
learning_rate=0.1,
l1_regularization_strength=0.001
))
```
#### DNNRegressor
A regressor for TensorFlow DNN models.
```python
my_features = [embedding_feature_a, embedding_feature_b]
estimator = DNNRegressor(
feature_columns=my_features,
hidden_units=[1024, 512, 256])
# Or estimator using the ProximalAdagradOptimizer optimizer with
# regularization.
estimator = DNNRegressor(
feature_columns=my_features,
hidden_units=[1024, 512, 256],
optimizer=tf.train.ProximalAdagradOptimizer(
learning_rate=0.1,
l1_regularization_strength=0.001
))
```
#### DNNLinearCombinedClassifier
A classifier for TensorFlow Linear and DNN joined training models.
* Wide and deep model
* Multi class (2 by default)
```python
my_linear_features = [crossed_feature_a_x_b]
my_deep_features = [embedding_feature_a, embedding_feature_b]
estimator = DNNLinearCombinedClassifier(
# Common settings
n_classes=n_classes,
weight_column_name=weight_column_name,
# Wide settings
linear_feature_columns=my_linear_features,
linear_optimizer=tf.train.FtrlOptimizer(...),
# Deep settings
dnn_feature_columns=my_deep_features,
dnn_hidden_units=[1000, 500, 100],
dnn_optimizer=tf.train.AdagradOptimizer(...))
```
#### LinearClassifier
Train a linear model to classify instances into one of multiple possible
classes. When number of possible classes is 2, this is binary classification.
```python
my_features = [sparse_feature_b, crossed_feature_a_x_b]
estimator = LinearClassifier(
feature_columns=my_features,
optimizer=tf.train.FtrlOptimizer(
learning_rate=0.1,
l1_regularization_strength=0.001
))
```
#### LinearRegressor
Train a linear regression model to predict a label value given observation of
feature values.
```python
my_features = [sparse_feature_b, crossed_feature_a_x_b]
estimator = LinearRegressor(
feature_columns=my_features)
```
### LogisticRegressor
Logistic regression estimator for binary classification.
```python
# See tf.contrib.learn.Estimator(...) for details on model_fn structure
def my_model_fn(...):
pass
estimator = LogisticRegressor(model_fn=my_model_fn)
# Input builders
def input_fn_train:
pass
estimator.fit(input_fn=input_fn_train)
estimator.predict(x=x)
```
#### SVM - Support Vector Machine
Support Vector Machine (SVM) model for binary classification.
Currently only linear SVMs are supported.
```python
my_features = [real_feature, sparse_feature_a]
estimator = SVM(
example_id_column='example_id',
feature_columns=my_features,
l2_regularization=10.0)
```
#### TensorForestEstimator
Supports regression and binary classification.
```python
params = tf.contrib.tensor_forest.python.tensor_forest.ForestHParams(
num_classes=2, num_features=40, num_trees=10, max_nodes=1000)
# Estimator using the default graph builder.
estimator = TensorForestEstimator(params, model_dir=model_dir)
# Or estimator using TrainingLossForest as the graph builder.
estimator = TensorForestEstimator(
params, graph_builder_class=tensor_forest.TrainingLossForest,
model_dir=model_dir)
# Input builders
def input_fn_train: # returns x, y
...
def input_fn_eval: # returns x, y
...
estimator.fit(input_fn=input_fn_train)
estimator.evaluate(input_fn=input_fn_eval)
estimator.predict(x=x)
```
### Use the estimator
There are two main functions for using estimators, one of which is for
training, and one of which is for evaluation.
You can specify different data sources for each one in order to use different
datasets for train and eval.
```python
# Input builders
def input_fn_train: # returns x, Y
...
estimator.fit(input_fn=input_fn_train)
def input_fn_eval: # returns x, Y
...
estimator.evaluate(input_fn=input_fn_eval)
estimator.predict(x=x)
```
## Creating Custom Estimator
To create a custom `Estimator`, provide a function to `Estimator`'s
constructor that builds your model (`model_fn`, below):
```python
estimator = tf.contrib.learn.Estimator(
model_fn=model_fn,
model_dir=model_dir) # Where the model's data (e.g., checkpoints)
# are saved.
```
Here is a skeleton of this function, with descriptions of its arguments and
return values in the accompanying tables:
```python
def model_fn(features, targets, mode, params):
# Logic to do the following:
# 1. Configure the model via TensorFlow operations
# 2. Define the loss function for training/evaluation
# 3. Define the training operation/optimizer
# 4. Generate predictions
return predictions, loss, train_op
```
You may use `mode` and check against
`tf.contrib.learn.ModeKeys.{TRAIN, EVAL, INFER}` to parameterize `model_fn`.
In the Further Reading section below, there is an end-to-end TensorFlow
tutorial for building a custom estimator.
## Additional Estimators
There is an additional estimators under
`tensorflow.contrib.factorization.python.ops`:
* Gaussian mixture model (GMM) clustering
## Further reading
For further reading, there are several tutorials with relevant topics,
including:
* [Overview of linear models](../../../tutorials/linear/overview.md)
* [Linear model tutorial](../../../tutorials/wide/index.md)
* [Wide and deep learning tutorial](../../../tutorials/wide_and_deep/index.md)
* [Custom estimator tutorial](../../../tutorials/estimators/index.md)
* [Building input functions](../../../tutorials/input_fn/index.md)
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.learn.python.learn.estimators._sklearn import NotFittedError
from tensorflow.contrib.learn.python.learn.estimators.constants import ProblemType
from tensorflow.contrib.learn.python.learn.estimators.dnn import DNNClassifier
from tensorflow.contrib.learn.python.learn.estimators.dnn import DNNRegressor
from tensorflow.contrib.learn.python.learn.estimators.dnn_linear_combined import DNNLinearCombinedClassifier
from tensorflow.contrib.learn.python.learn.estimators.dnn_linear_combined import DNNLinearCombinedRegressor
from tensorflow.contrib.learn.python.learn.estimators.estimator import BaseEstimator
from tensorflow.contrib.learn.python.learn.estimators.estimator import Estimator
from tensorflow.contrib.learn.python.learn.estimators.estimator import infer_real_valued_columns_from_input
from tensorflow.contrib.learn.python.learn.estimators.estimator import infer_real_valued_columns_from_input_fn
from tensorflow.contrib.learn.python.learn.estimators.estimator import SKCompat
from tensorflow.contrib.learn.python.learn.estimators.kmeans import KMeansClustering
from tensorflow.contrib.learn.python.learn.estimators.linear import LinearClassifier
from tensorflow.contrib.learn.python.learn.estimators.linear import LinearRegressor
from tensorflow.contrib.learn.python.learn.estimators.logistic_regressor import LogisticRegressor
from tensorflow.contrib.learn.python.learn.estimators.metric_key import MetricKey
from tensorflow.contrib.learn.python.learn.estimators.model_fn import ModeKeys
from tensorflow.contrib.learn.python.learn.estimators.model_fn import ModelFnOps
from tensorflow.contrib.learn.python.learn.estimators.prediction_key import PredictionKey
from tensorflow.contrib.learn.python.learn.estimators.run_config import ClusterConfig
from tensorflow.contrib.learn.python.learn.estimators.run_config import Environment
from tensorflow.contrib.learn.python.learn.estimators.run_config import RunConfig
from tensorflow.contrib.learn.python.learn.estimators.run_config import TaskType
from tensorflow.contrib.learn.python.learn.estimators.svm import SVM
|
jjas0nn/solvem
|
tensorflow/lib/python2.7/site-packages/tensorflow/contrib/learn/python/learn/estimators/__init__.py
|
Python
|
mit
| 11,510
|
[
"Gaussian"
] |
25b4611cefa8af80214c66f58a7b5a636988fd1eae8995c923fdc71c3a17aacd
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2010-2017 GEM Foundation
#
# The OQ-CATK (Lite) is free software: you can redistribute
# it and/or modify it under the terms of the GNU Affero General Public
# License as published by the Free Software Foundation, either version
# 3 of the License, or (at your option) any later version.
#
# OQ-CATK (Lite) is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# with this download. If not, see <http://www.gnu.org/licenses/>
#
# Author: Poggi Valerio
import matplotlib.pyplot as plt
import numpy as np
import scipy.signal as sig
import OQCatk.Selection as Sel
#-----------------------------------------------------------------------------------------
def GetHypocenter(Db, All=False):
x = Db.Extract('Longitude', All)
y = Db.Extract('Latitude', All)
z = Db.Extract('Depth', All)
return x, y, z
#-----------------------------------------------------------------------------------------
def GetMagnitudePair(Db, Code1, Code2):
Mout = [[],[],[],[]]
for E in Db.Events:
m1 = None
m2 = None
for M in E['Magnitude']:
MC = M['MagCode']
MT = M['MagType']
MS = M['MagSize']
ME = M['MagError']
if (MC == Code1[0] or Code1[0] == '*') and MT == Code1[1]:
m1 = MS
e1 = ME
if (MC == Code2[0] or Code2[0] == '*') and MT == Code2[1]:
m2 = MS
e2 = ME
if m1 and m2:
Mout[0].append(m1)
Mout[1].append(m2)
Mout[2].append(e1)
Mout[3].append(e2)
return Mout
#-----------------------------------------------------------------------------------------
def GetKeyHisto(Db, Key, Bins=[], Bmin=[], Bmax=[], Bnum=10, Blog=False,
Norm=True, Plot=True, OutFile=[]):
Data = Db.Extract(Key)
# Remove Nans
Data = [D for D in Data if D is not None]
if not Bins:
if not Bmin:
Bmin = min(Data)
if not Bmax:
Bmax = max(Data)
if Blog:
Bins = np.logspace(np.log10(Bmin), np.log10(Bmax), Bnum)
else:
Bins = np.linspace(Bmin, Bmax, Bnum)
Hist = np.histogram(Data, Bins)[0]
Bmid = np.diff(Bins)/2.+Bins[:-1]
Bdlt = np.diff(Bins)
if Norm:
Hist = Hist.astype('float32') / len(Data)
# Plot time histogram
if Plot:
fig = plt.figure(figsize=(6,3.5))
plt.bar(Bmid, Hist, Bdlt, color=[1,0,0], edgecolor=[1,1,1])
plt.xlabel(Key, fontsize=14, fontweight='bold')
plt.ylabel('Nr. Events', fontsize=14, fontweight='bold')
plt.tight_layout()
plt.show(block=False)
if OutFile:
plt.savefig(OutFile, bbox_inches='tight', dpi=150)
return Hist, Bmid
#-----------------------------------------------------------------------------------------
def AgencyReport(Db, Code, Key=[], LogFile=[], Threshold=0):
if Code in ['Magnitude','Mag','M']:
ItL, ItD = Db.KeyStat('MagCode')
elif Code in ['Location','Loc','L']:
ItL, ItD = Db.KeyStat('LocCode')
else:
print('Error: No valid code')
return
# Report only specific keys
if Key:
ItLs = []
ItDs = {}
if type(Key) != list:
Key = [Key]
for K in Key:
if K in ItL:
ItLs.append(K)
ItDs[K] = ItD[K]
ItL = ItLs
ItD = ItDs
StrLog = ''
for It in ItL:
if ItD[It] >= Threshold:
StrLog += 'Agency: {0} | Occurrence: {1}'.format(It, ItD[It])
if Code in ['Magnitude','Mag','M']:
DbC = Db.Filter('MagCode',It,Owrite=False)
MaL, MaD = DbC.KeyStat('MagType')
StrLog += ' | Types:'
for Ma in MaL:
StrLog += ' {0} ({1})'.format(Ma, MaD[Ma])
StrLog += '\n'
else:
break
if LogFile:
# Open input ascii file
with open(LogFile, 'w') as f:
f.write(StrLog)
f.close()
return
# Warn user if model file does not exist
print('Cannot open file')
else:
print(StrLog)
#-----------------------------------------------------------------------------------------
def KeyTimeHisto(Db, Code, Key=[],
Year0=[], Year1=[], Delta=5,
Threshold=0, OutFile=[]):
if not Year0:
Year0 = min(Db.Extract('Year'))
if not Year1:
Year1 = max(Db.Extract('Year'))
YBins = np.arange(Year0, Year1+Delta, Delta)
ItL, ItD = Db.KeyStat(Code)
# Filter by threshold
ItL = [K for K in ItL if ItD[K] >= Threshold]
ItD = {K:V for (K,V) in ItD.items() if V >= Threshold}
# Filter by key
if Key:
ItL = [K for K in ItL if K in Key]
ItD = {K:ItD[K] for K in ItL}
for N, Agn in enumerate(ItL):
DbA = Db.Filter(Code, Agn, Owrite=0)
YearArray = DbA.Extract('Year')
NewRow = np.histogram(YearArray, YBins)
if N == 0:
Histo = NewRow[0]
else:
Histo = np.vstack([Histo, NewRow[0]])
# Plot time histogram
fig = plt.figure(figsize=(8, 5))
X = YBins
Y = np.arange(0, len(ItL)+1)
Z = np.log(Histo.clip(min=1E-10))
plt.pcolor(X, Y, Z, cmap='Purples',
vmin=0,
vmax=np.max(Z))
plt.xticks(X, map(str,X), rotation='45')
plt.yticks(Y+0.5, ItL, rotation='horizontal')
plt.margins(0)
plt.gca().yaxis.tick_right()
plt.axes().yaxis.grid(True)
plt.gca().xaxis.set_ticks_position('none')
plt.gca().yaxis.set_ticks_position('none')
plt.xlabel('Year', fontsize=14, fontweight='bold')
plt.ylabel('Agency Code', fontsize=14, fontweight='bold')
plt.tight_layout()
plt.show(block=False)
if OutFile:
plt.savefig(OutFile, bbox_inches='tight', dpi=150)
#-----------------------------------------------------------------------------------------
def MagTimeBars(Db, Mag0=[], Mag1=[], MBin=0.5,
Year0=[], Year1=[], Delta=5,
OutFile=[]):
if not Mag0:
Mag0 = min(Db.Extract('MagSize'))
if not Mag1:
Mag1 = max(Db.Extract('MagSize'))
MBins = np.arange(Mag0, Mag1+MBin, MBin)
if not Year0:
Year0 = min(Db.Extract('Year'))
if not Year1:
Year1 = max(Db.Extract('Year'))
YBins = np.arange(Year0, Year1+Delta, Delta)
plt.figure(figsize=(8, 4))
for C,MB in enumerate(MBins):
DbM = Db.Filter('MagSize', MB, Opr='>=', Owrite=0)
YArray = DbM.Extract('Year')
YHist = np.histogram(YArray, YBins)[0]
Cnum = float(len(MBins))
C = (Cnum-C)/Cnum
X = YBins[:-1]
Y = YHist
if any(Y):
plt.bar(X, Y, Delta, color=[C,C,C],
log=True,
label=r'$\geq${0}'.format(MB))
plt.xticks(X, map(str,X), rotation='45')
plt.margins(0)
plt.gca().yaxis.tick_right()
plt.gca().xaxis.set_ticks_position('none')
plt.gca().yaxis.set_ticks_position('none')
plt.xlabel('Years', fontsize=14, fontweight='bold')
plt.ylabel('Nr. Events', fontsize=14, fontweight='bold')
plt.tight_layout()
plt.legend(loc=2)
plt.show(block=False)
if OutFile:
plt.savefig(OutFile, bbox_inches='tight', dpi=150)
#-----------------------------------------------------------------------------------------
def MagTimePlot(Db, Mag0=[], Mag1=[],
Year0=[], Year1=[],
CompTable=[],
OutFile=[]):
if not Mag0:
Mag0 = min(Db.Extract('MagSize'))
if not Mag1:
Mag1 = max(Db.Extract('MagSize'))
if not Year0:
Year0 = min(Db.Extract('Year'))
if not Year1:
Year1 = max(Db.Extract('Year'))
DbS = Sel.MagRangeSelect(Db, Mag0, Mag1, Owrite=0, TopEdge=True)
DbS = Sel.TimeSelect(DbS, Year0, Year1, Owrite=0)
X = DbS.Extract('Year')
Y = DbS.Extract('MagSize')
plt.figure(figsize=(7, 4))
plt.plot(X, Y, 'o',markersize=3,
color=[0,0,0],
markeredgecolor=[0,0,0],
markeredgewidth=1.5)
# Plot completeness
if CompTable:
PlotCompTable(CompTable)
plt.gca().yaxis.grid(color='0.',linestyle='-')
plt.gca().xaxis.grid(color='0.65',linestyle='--')
plt.gca().xaxis.set_ticks_position('none')
plt.gca().yaxis.set_ticks_position('none')
plt.title('Time-Magnitude Distribution', fontsize=14, fontweight='bold')
plt.xlabel('Years', fontsize=14, fontweight='bold')
plt.ylabel('Magnitude', fontsize=14, fontweight='bold')
plt.axis([Year0, Year1, Mag0, Mag1])
# plt.tight_layout()
plt.show(block=False)
if OutFile:
plt.savefig(OutFile, bbox_inches='tight', dpi=150)
#-----------------------------------------------------------------------------------------
def RateDensityPlot(Db, Mag0=[], Mag1=[], MBin=0.25,
Year0=[], Year1=[], Delta=2,
CompTable=[],
Normalise=True,
OutFile=[]):
if not Mag0:
Mag0 = min(Db.Extract('MagSize'))
if not Mag1:
Mag1 = max(Db.Extract('MagSize'))
MBins = np.arange(Mag0, Mag1+MBin, MBin)
if not Year0:
Year0 = min(Db.Extract('Year'))
if not Year1:
Year1 = max(Db.Extract('Year'))
YBins = np.arange(Year0, Year1+Delta, Delta)
Histo = np.zeros((np.size(MBins), np.size(YBins)))
# Catalogue selection (Magnitude-Year)
DbM = Sel.MagRangeSelect(Db, Mag0, Mag1, TopEdge=True)
DbY = Sel.TimeSelect(DbM, Year0, Year1)
M = DbY.Extract('MagSize')
Y = DbY.Extract('Year')
Hist = np.histogram2d(Y, M, bins=(YBins, MBins))[0]
Hist = np.transpose(Hist)
if Normalise:
for I in range(0,len(Hist)):
Max = np.max(Hist[I])
if Max > 0:
Hist[I] = Hist[I]/Max
# Plot
plt.figure(figsize=(7, 4))
plt.pcolormesh(YBins, MBins, Hist, cmap='Greys', vmin=0)
# Plot completeness
if CompTable:
PlotCompTable(CompTable)
plt.gca().xaxis.grid(color='0.65',linestyle='--')
plt.gca().yaxis.grid(color='0.',linestyle='-')
plt.gca().xaxis.set_ticks_position('none')
plt.gca().yaxis.set_ticks_position('none')
plt.title('Occurrence Rate Density', fontsize=14, fontweight='bold')
plt.xlabel('Years', fontsize=12, fontweight='bold')
plt.ylabel('Magnitude', fontsize=12, fontweight='bold')
plt.gca().xaxis.grid(color='0.65',linestyle='-')
plt.gca().yaxis.grid(color='0.65',linestyle='-')
plt.axis([Year0, Year1, Mag0, Mag1])
# plt.tight_layout()
plt.show(block=False)
if OutFile:
plt.savefig(OutFile, bbox_inches = 'tight', dpi = 150)
def PlotCompTable(CompTable):
for CT in CompTable:
X = [CT[2], CT[3], CT[3], CT[2], CT[2]]
Y = [CT[0], CT[0], CT[0]+CT[1], CT[0]+CT[1], CT[0]]
plt.plot(X, Y, 'r--', linewidth=2)
plt.fill(X, Y, color='y',alpha=0.1)
#-----------------------------------------------------------------------------------------
def DuplicateCheck(Log, Tmax=[], Smax=[],
Tnum=[], Snum=[],
Smooth=[],
OutFile=[]):
"""
"""
dT = [I[4] for I in Log if I[4] > 0]
dS = [I[5] for I in Log if I[5] > 0]
if not Tmax:
Tmax = np.max(dT)
if not Smax:
Smax = np.max(dS)
if not Tnum:
Tnum = 100
if not Snum:
Snum = 100
XBins = np.linspace(0, Tmax, Tnum)
YBins = np.linspace(0, Smax, Snum)
H = np.histogram2d(dT, dS, [YBins, XBins])
def Gaussian(Size,Sigma):
x = np.arange(0, Size[0], 1, float)
y = np.arange(0, Size[1], 1, float)
Gx = np.exp(-(x-Size[0]/2)**2/Sigma[0]**2)
Gy = np.exp(-(y-Size[1]/2)**2/Sigma[1]**2)
return np.outer(Gy,Gx)
if any(Smooth):
# kern = np.ones((Smooth,Smooth))/Smooth
kern = Gaussian((Tnum,Snum),Smooth)
H0 = sig.convolve2d(H[0], kern, mode='same')
else:
H0 = H[0]
# Plot time histogram
fig = plt.figure(figsize=(5, 5))
plt.pcolor(XBins, YBins, H0, cmap='Purples')
plt.xlabel('Time', fontsize=12, fontweight='bold')
plt.ylabel('Distance', fontsize=12, fontweight='bold')
plt.grid('on')
plt.tight_layout()
plt.show(block=False)
if OutFile:
plt.savefig(OutFile, bbox_inches='tight', dpi=150)
|
GEMScienceTools/CatalogueTool-Lite
|
OQCatk/Exploration.py
|
Python
|
agpl-3.0
| 12,041
|
[
"Gaussian"
] |
2ef1b8e16ffa7b1c2014d16d5ab99d688275d5ca8f96a59e57758b8158479e9b
|
""" Failover Transfer
The failover transfer client exposes the following methods:
- transferAndRegisterFile()
- transferAndRegisterFileFailover()
Initially these methods were developed inside workflow modules but
have evolved to a generic 'transfer file with failover' client.
The transferAndRegisterFile() method will correctly set registration
requests in case of failure.
The transferAndRegisterFileFailover() method will attempt to upload
a file to a list of alternative SEs and set appropriate replication
to the original target SE as well as the removal request for the
temporary replica.
"""
__RCSID__ = "$Id$"
from DIRAC import S_OK, S_ERROR, gLogger
from DIRAC.DataManagementSystem.Client.DataManager import DataManager
from DIRAC.Resources.Storage.StorageElement import StorageElement
from DIRAC.Resources.Catalog.FileCatalog import FileCatalog
from DIRAC.RequestManagementSystem.Client.Request import Request
from DIRAC.RequestManagementSystem.Client.Operation import Operation
from DIRAC.RequestManagementSystem.Client.File import File
from DIRAC.RequestManagementSystem.private.RequestValidator import RequestValidator
from DIRAC.RequestManagementSystem.Client.ReqClient import ReqClient
from DIRAC.DataManagementSystem.Utilities.DMSHelpers import DMSHelpers
class FailoverTransfer( object ):
""" .. class:: FailoverTransfer
"""
#############################################################################
def __init__( self, requestObject = None, log = None, defaultChecksumType = 'ADLER32' ):
""" Constructor function, can specify request object to instantiate
FailoverTransfer or a new request object is created.
"""
self.log = log
if not self.log:
self.log = gLogger.getSubLogger( "FailoverTransfer" )
self.request = requestObject
if not self.request:
self.request = Request()
self.request.RequestName = 'noname_request'
self.request.SourceComponent = 'FailoverTransfer'
self.defaultChecksumType = defaultChecksumType
self.registrationProtocols = DMSHelpers().getRegistrationProtocols()
#############################################################################
def transferAndRegisterFile( self,
fileName,
localPath,
lfn,
destinationSEList,
fileMetaDict,
fileCatalog = None,
masterCatalogOnly = False ):
"""Performs the transfer and register operation with failover.
"""
errorList = []
fileGUID = fileMetaDict.get( "GUID", None )
fileChecksum = fileMetaDict.get( "Checksum", None )
for se in destinationSEList:
self.log.info( "Attempting dm.putAndRegister('%s','%s','%s',guid='%s',catalog='%s', checksum = '%s')" % ( lfn,
localPath,
se,
fileGUID,
fileCatalog, fileChecksum ) )
result = DataManager( catalogs = fileCatalog, masterCatalogOnly = masterCatalogOnly ).putAndRegister( lfn, localPath, se, guid = fileGUID,
checksum = fileChecksum )
self.log.verbose( result )
if not result['OK']:
self.log.error( 'dm.putAndRegister failed with message', result['Message'] )
errorList.append( result['Message'] )
continue
if not result['Value']['Failed']:
self.log.info( 'dm.putAndRegister successfully uploaded and registered %s to %s' % ( fileName, se ) )
return S_OK( {'uploadedSE':se, 'lfn':lfn} )
# Now we know something went wrong
self.log.warn( "Didn't manage to do everything, now adding requests for the missing operation" )
errorDict = result['Value']['Failed'][lfn]
if 'register' not in errorDict:
self.log.error( 'dm.putAndRegister failed with unknown error', str( errorDict ) )
errorList.append( 'Unknown error while attempting upload to %s' % se )
continue
# fileDict = errorDict['register']
# Therefore the registration failed but the upload was successful
if not fileCatalog:
fileCatalog = ''
if masterCatalogOnly:
fileCatalog = FileCatalog().getMasterCatalogNames()['Value']
result = self._setRegistrationRequest( lfn, se, fileMetaDict, fileCatalog )
if not result['OK']:
self.log.error( 'Failed to set registration request', 'SE %s and metadata: \n%s' % ( se, fileMetaDict ) )
errorList.append( 'Failed to set registration request for: SE %s and metadata: \n%s' % ( se, fileMetaDict ) )
continue
else:
self.log.info( 'Successfully set registration request for: SE %s and metadata: \n%s' % ( se, fileMetaDict ) )
metadata = {}
metadata['filedict'] = fileMetaDict
metadata['uploadedSE'] = se
metadata['lfn'] = lfn
metadata['registration'] = 'request'
return S_OK( metadata )
self.log.error( 'Failed to upload output data file', 'Encountered %s errors' % len( errorList ) )
return S_ERROR( 'Failed to upload output data file' )
#############################################################################
def transferAndRegisterFileFailover( self,
fileName,
localPath,
lfn,
targetSE,
failoverSEList,
fileMetaDict,
fileCatalog = None,
masterCatalogOnly = False ):
"""Performs the transfer and register operation to failover storage and sets the
necessary replication and removal requests to recover.
"""
failover = self.transferAndRegisterFile( fileName, localPath, lfn, failoverSEList, fileMetaDict, fileCatalog, masterCatalogOnly = masterCatalogOnly )
if not failover['OK']:
self.log.error( 'Could not upload file to failover SEs', failover['Message'] )
return failover
# set removal requests and replication requests
result = self._setFileReplicationRequest( lfn, targetSE, fileMetaDict, sourceSE = failover['Value']['uploadedSE'] )
if not result['OK']:
self.log.error( 'Could not set file replication request', result['Message'] )
return result
lfn = failover['Value']['lfn']
failoverSE = failover['Value']['uploadedSE']
self.log.info( 'Attempting to set replica removal request for LFN %s at failover SE %s' % ( lfn, failoverSE ) )
result = self._setReplicaRemovalRequest( lfn, failoverSE )
if not result['OK']:
self.log.error( 'Could not set removal request', result['Message'] )
return result
return S_OK( {'uploadedSE':failoverSE, 'lfn':lfn} )
def getRequest( self ):
""" get the accumulated request object
"""
return self.request
def commitRequest( self ):
""" Send request to the Request Management Service
"""
if self.request.isEmpty():
return S_OK()
isValid = RequestValidator().validate( self.request )
if not isValid["OK"]:
return S_ERROR( "Failover request is not valid: %s" % isValid["Message"] )
else:
requestClient = ReqClient()
result = requestClient.putRequest( self.request )
return result
#############################################################################
def _setFileReplicationRequest( self, lfn, targetSE, fileMetaDict, sourceSE = '' ):
""" Sets a registration request.
"""
self.log.info( 'Setting ReplicateAndRegister request for %s to %s' % ( lfn, targetSE ) )
transfer = Operation()
transfer.Type = "ReplicateAndRegister"
transfer.TargetSE = targetSE
if sourceSE:
transfer.SourceSE = sourceSE
trFile = File()
trFile.LFN = lfn
cksm = fileMetaDict.get( "Checksum", None )
cksmType = fileMetaDict.get( "ChecksumType", self.defaultChecksumType )
if cksm and cksmType:
trFile.Checksum = cksm
trFile.ChecksumType = cksmType
size = fileMetaDict.get( "Size", 0 )
if size:
trFile.Size = size
guid = fileMetaDict.get( "GUID", "" )
if guid:
trFile.GUID = guid
transfer.addFile( trFile )
self.request.addOperation( transfer )
return S_OK()
#############################################################################
def _setRegistrationRequest( self, lfn, targetSE, fileDict, catalog ):
""" Sets a registration request
:param str lfn: LFN
:param list se: list of SE (or just string)
:param list catalog: list (or string) of catalogs to use
:param dict fileDict: file metadata
"""
self.log.info( 'Setting registration request for %s at %s.' % ( lfn, targetSE ) )
if not isinstance( catalog, list ):
catalog = [catalog]
for cat in catalog:
register = Operation()
register.Type = "RegisterFile"
register.Catalog = cat
register.TargetSE = targetSE
regFile = File()
regFile.LFN = lfn
regFile.Checksum = fileDict.get( "Checksum", "" )
regFile.ChecksumType = fileDict.get( "ChecksumType", self.defaultChecksumType )
regFile.Size = fileDict.get( "Size", 0 )
regFile.GUID = fileDict.get( "GUID", "" )
se = StorageElement( targetSE )
pfn = se.getURL( lfn, self.registrationProtocols )
if not pfn["OK"] or lfn not in pfn["Value"]['Successful']:
self.log.error( "Unable to get PFN for LFN", "%s" % pfn.get( 'Message', pfn.get( 'Value', {} ).get( 'Failed', {} ).get( lfn ) ) )
return pfn
regFile.PFN = pfn["Value"]['Successful'][lfn]
register.addFile( regFile )
self.request.addOperation( register )
return S_OK()
#############################################################################
def _setReplicaRemovalRequest( self, lfn, se ):
""" Sets a removal request for a replica.
:param str lfn: LFN
:param se:
"""
if isinstance( se, str ):
se = ",".join( [ se.strip() for se in se.split( "," ) if se.strip() ] )
removeReplica = Operation()
removeReplica.Type = "RemoveReplica"
removeReplica.TargetSE = se
replicaToRemove = File()
replicaToRemove.LFN = lfn
removeReplica.addFile( replicaToRemove )
self.request.addOperation( removeReplica )
return S_OK()
#############################################################################
def _setFileRemovalRequest( self, lfn, se = '', pfn = '' ):
""" Sets a removal request for a file including all replicas.
"""
remove = Operation()
remove.Type = "RemoveFile"
if se:
remove.TargetSE = se
rmFile = File()
rmFile.LFN = lfn
if pfn:
rmFile.PFN = pfn
remove.addFile( rmFile )
self.request.addOperation( remove )
return S_OK()
|
Andrew-McNab-UK/DIRAC
|
DataManagementSystem/Client/FailoverTransfer.py
|
Python
|
gpl-3.0
| 11,448
|
[
"DIRAC"
] |
9d08de36d33c11f2ee9158a319194a634a81d575ff06ee3043aacd7b3724629a
|
from datetime import datetime
from django.http import HttpResponse
from hippocampus import HIPPOCAMPUS_COOKIE_NAME
from hippocampus.models import Visit
def log_exit(request):
cookie_id = request.COOKIES.get(HIPPOCAMPUS_COOKIE_NAME)
if cookie_id is not None:
try:
last_visit = Visit.objects.filter(
cookie_id=cookie_id).order_by('-enter')[0]
except IndexError:
pass
else:
last_visit.exit = datetime.now()
last_visit.exit_url = request.GET.get('outbound', '')
last_visit.save()
return HttpResponse('')
|
marinho/hippocampus
|
hippocampus/views.py
|
Python
|
bsd-3-clause
| 619
|
[
"VisIt"
] |
46acff07dd07464456a55f0f3c690b8d88638bb8102e911880b3fe9bf1b88d3a
|
#! /usr/bin/python
"""
Varmint Pest - Attempts to frighten away varmints such as raccoons
Created 7/04/17 by Greg Griffes
"""
import time, sys, os
import logging
from automat import MethodicalMachine
##import RPi.GPIO as GPIO
##import pigpio # http://abyz.co.uk/rpi/pigpio/python.html
##import spidev
##import Adafruit_ADS1x15
##from collections import deque
##import threading
##import datetime
##from picamera import PiCamera
# for graphics
##from webcolors import name_to_rgb
##import pygame
##from pygame.locals import *
# Global variables
##global flow_count
##flow_count = 0
#camera = PiCamera()
# Provides a logging solution for troubleshooting
logging.basicConfig(level=logging.DEBUG, format='(%(threadName)-10s) %(message)s')
# -----------------------------------
# Constants
# -----------------------------------
NIGHT = 0
DAY = 1
# -----------------------------------
# Initialize
# -----------------------------------
# =======================================
# Local classes
# =======================================
class _daylight_sensor(object):
# Create the daylight sensor class
def __init__(self):
self.night = NIGHT
self.day = DAY
def __del__(self):
logging.debug('daylight sensor class closed')
def read(self):
logging.debug("Daylight sensor indicates night time")
return self.night
##########################
class varmintpest_fsm(object):
##########################
"""varmintpest_fsm is a state machine using the Automat MethodicalMachine
There are three states:
S1: Daytime (the initial state)
S2: Nighttime-motion (waiting for motion)
S3: Nighttime-IR (upon IR signature, set off the varmint blast)
State: S1 (Daytime) parameters:
S1I1: Input 1: daylight-yes
S1I2: Input 2: daylight-no
S1T1: Transition 1: upon daylight-yes
S1O1: Output 1: Play bird songs
S1T1ns: Next state: enter daytime
S1T2: Transition 2: upon daylight-no
S1O2: Output 2: message entering nighttime and test for motion
S1T2ns: Next state: enter nighttime-motion
State: S2 (nighttime-motion) parameters:
S2I1: Input 1: daylight-yes
S2I2: Input 2: daylight-no
S2I3: Input 3: motion-detected
S2I4: Input 4: motion-not-detected
S2T1: Transition 1: upon daylight-yes
S2O1: message start of daytime
S2T1ns: enter S1 daytime
S2T2: Transition 2: upon daylight-no
S2T2ns: enter S2 nighttime-motion
S2T3: Transition 3: upon motion-detected
S2O2: message motion detected
S2T3ns: enter S3 nighttime-IR
S2T4: Transition 4: upon motion-not-detected
S2T4ns: enter S2 nighttime-motion
State: S3 (Nighttime-IR) parameters:
S3I1: Input 1: IR-detected
S3I2: Input 2: IR-not-detected
S3T1: Transition 1: upon IR-detected
S3O1: Output 1: Perform Varmint-blast
S3T1ns: Next state: enter nighttime-motion
S3T2: Transition 2: upon IR-not-detected
S3O2: Output 2: log false-motion-detection
S3T2ns: Next state: enter S2 nighttime-motion
"""
# Create the state machine class from Automat
_machine = MethodicalMachine()
#=======================================
# Inputs
#=======================================
@_machine.input()
def daylightYes(self):
"Light sensor indicates day time"
@_machine.input()
def daylightNo(self):
"Light sensor indicates night time"
@_machine.input()
def motionDetected(self):
"Motion - possible varmint"
@_machine.input()
def motionNotDetected(self):
"Nothing found, keep looking"
@_machine.input()
def IRDetected(self):
"IR signature indicates varmint"
@_machine.input()
def IRNotDetected(self):
"Nothing found, keep looking"
#=======================================
# Outputs
#=======================================
@_machine.output()
def _play_bird_sounds(self):
logging.debug('playing bird sounds')
time.sleep(1.0)
@_machine.output()
def _nighttime(self):
logging.debug('entering nighttime-motion state')
time.sleep(1.0)
@_machine.output()
def _messageStartOfDaytime(self):
logging.debug('entering daytime state')
time.sleep(1.0)
@_machine.output()
def _messageMotionDetected(self):
logging.debug('Motion detected! Entering motion-IR state')
time.sleep(1.0)
@_machine.output()
def _performVarmintBlast(self):
logging.debug('IR detected! Performing Varmint Blast!')
time.sleep(1.0)
@_machine.output()
def _messageFalseMotionDetected(self):
logging.debug('Motion detected but no IR signature')
time.sleep(1.0)
#=======================================
# S1 State
#=======================================
@_machine.state(initial=True)
def daytime(self):
"wait for nighttime"
#=======================================
# S2 State - nighttime-motion
#=======================================
@_machine.state()
def nighttimeMotion(self):
"wait for motion or daytime"
#=======================================
# S3 State - nighttime-IR
#=======================================
@_machine.state()
def nighttimeIR(self):
"wait for IR signature"
#=======================================
# S1 Transition logic
#=======================================
daytime.upon(daylightYes, enter=daytime, outputs=[_play_bird_sounds])
daytime.upon(daylightNo, enter=nighttimeMotion, outputs=[_nighttime])
#=======================================
# S2 Transition logic
#=======================================
nighttimeMotion.upon(daylightYes, enter=daytime, outputs=[_messageStartOfDaytime])
nighttimeMotion.upon(daylightNo, enter=nighttimeMotion, outputs=[])
nighttimeMotion.upon(motionDetected, enter=nighttimeIR, outputs=[_messageMotionDetected])
nighttimeMotion.upon(motionNotDetected, enter=nighttimeMotion, outputs=[])
#=======================================
# S3 Transition logic
#=======================================
nighttimeIR.upon(IRDetected, enter=nighttimeMotion, outputs=[_performVarmintBlast])
nighttimeIR.upon(IRNotDetected, enter=nighttimeMotion, outputs=[_messageFalseMotionDetected])
###############################################################
# Main program
###############################################################
if __name__ == '__main__':
# -----------------------------------
# Initialize variables
# -----------------------------------
# -----------------------------------
# Exception handler
# -----------------------------------
try:
logging.debug('debug logging is on')
# -----------------------------------
# Create objects
# -----------------------------------
varmintpest = varmintpest_fsm()
# daylight_sensor = _daylight_sensor()
# -----------------------------------
# Create and start daemons
# -----------------------------------
# -----------------------------------
# Create and start graphics
# -----------------------------------
###############################################################
# Main program
###############################################################
varmintpest.daylightYes() # move to day time state
varmintpest.daylightNo() # move to night time state
varmintpest.motionNotDetected() # move to day time state
varmintpest.motionNotDetected() # move to day time state
varmintpest.motionNotDetected() # move to day time state
varmintpest.motionNotDetected() # move to day time state
varmintpest.motionDetected() # move to day time state
varmintpest.IRNotDetected() # move to day time state
varmintpest.motionDetected() # move to day time state
varmintpest.IRDetected() # move to day time state
varmintpest.motionNotDetected() # move to day time state
varmintpest.motionNotDetected() # move to day time state
varmintpest.motionNotDetected() # move to day time state
varmintpest.motionNotDetected() # move to day time state
varmintpest.daylightYes() # move to day time state
time.sleep(3.0)
###########################################################
# END
###########################################################
except KeyboardInterrupt:
logging.debug("Keyboard Interrupt exception!")
exit()
except BaseException as e:
logging.error('General exception!: ' + str(e))
# normal exit
|
griffegg/varmintpest
|
varmintpest.py
|
Python
|
mit
| 8,715
|
[
"BLAST"
] |
8935db6c4c5d76e35b90eec889c94e14e5587940c3ba88b19b30a9ef73055fdd
|
# ----------------------------------------------------------------------------
# Copyright (c) 2015, The Deblur Development Team.
#
# Distributed under the terms of the BSD 3-clause License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
from os.path import splitext, join, basename, isfile, split
from datetime import datetime
from os import stat
from glob import glob
import logging
import re
import scipy
import numpy as np
import subprocess
import time
import warnings
import io
import os
import skbio
from biom.table import Table
from biom.util import biom_open
from biom import load_table
from deblur.deblurring import deblur
sniff_fasta = skbio.io.io_registry.get_sniffer('fasta')
sniff_fastq = skbio.io.io_registry.get_sniffer('fastq')
def _get_fastq_variant(input_fp):
# http://scikit-bio.org/docs/latest/generated/skbio.io.format.fastq.html#format-parameters
variant = None
variants = ['illumina1.8', 'illumina1.3', 'solexa', 'sanger']
for v in variants:
try:
next(skbio.read(input_fp, format='fastq', variant=v))
except:
continue
else:
variant = v
break
if variant is None:
raise ValueError("Unknown variant, unable to interpret PHRED")
return variant
def sequence_generator(input_fp):
"""Yield (id, sequence) from an input file
Parameters
----------
input_fp : filepath
A filepath, which can be any valid fasta or fastq file within the
limitations of scikit-bio's IO registry.
Notes
-----
The use of this method is a stopgap to replicate the existing `parse_fasta`
functionality while at the same time allowing for fastq support.
Raises
------
skbio.io.FormatIdentificationWarning
If the format of the input file cannot be determined.
Returns
-------
(str, str)
The ID and sequence.
"""
logger = logging.getLogger(__name__)
kw = {}
if sniff_fasta(input_fp)[0]:
format = 'fasta'
elif sniff_fastq(input_fp)[0]:
format = 'fastq'
kw['variant'] = _get_fastq_variant(input_fp)
else:
# usually happens when the fasta file is empty
# so need to return no sequences (and warn)
msg = "input file %s does not appear to be FASTA or FASTQ" % input_fp
logger.warn(msg)
warnings.warn(msg, UserWarning)
return
# some of the test code is using file paths, some is using StringIO.
if isinstance(input_fp, io.TextIOBase):
input_fp.seek(0)
for record in skbio.read(input_fp, format=format, **kw):
yield (record.metadata['id'], str(record))
def trim_seqs(input_seqs, trim_len, left_trim_len):
"""Trim FASTA sequences to specified length.
Parameters
----------
input_seqs : iterable of (str, str)
The list of input sequences in (label, sequence) format
trim_len : int
Sequence trimming length. Specify a value of -1 to disable trimming.
left_trim_len : int
Sequence trimming from the 5' end. A value of 0 will disable this trim.
Returns
-------
Generator of (str, str)
The trimmed sequences in (label, sequence) format
"""
# counters for the number of trimmed and total sequences
logger = logging.getLogger(__name__)
okseqs = 0
totseqs = 0
if trim_len < -1:
raise ValueError("Invalid trim_len: %d" % trim_len)
for label, seq in input_seqs:
totseqs += 1
if trim_len == -1:
okseqs += 1
yield label, seq
elif len(seq) >= trim_len:
okseqs += 1
yield label, seq[left_trim_len:trim_len]
if okseqs < 0.01*totseqs:
logger = logging.getLogger(__name__)
errmsg = 'Vast majority of sequences (%d / %d) are shorter ' \
'than the trim length (%d). ' \
'Are you using the correct -t trim length?' \
% (totseqs-okseqs, totseqs, trim_len)
logger.warn(errmsg)
warnings.warn(errmsg, UserWarning)
else:
logger.debug('trimmed to length %d (%d / %d remaining)'
% (trim_len, okseqs, totseqs))
def dereplicate_seqs(seqs_fp,
output_fp,
min_size=2,
use_log=False,
threads=1):
"""Dereplicate FASTA sequences and remove singletons using VSEARCH.
Parameters
----------
seqs_fp : string
filepath to FASTA sequence file
output_fp : string
file path to dereplicated sequences (FASTA format)
min_size : integer, optional
discard sequences with an abundance value smaller
than integer
use_log: boolean, optional
save the vsearch logfile as well (to output_fp.log)
default=False
threads : int, optional
number of threads to use (0 for all available)
"""
logger = logging.getLogger(__name__)
logger.info('dereplicate seqs file %s' % seqs_fp)
log_name = "%s.log" % output_fp
params = ['vsearch', '--derep_fulllength', seqs_fp,
'--output', output_fp, '--sizeout',
'--fasta_width', '0', '--minuniquesize', str(min_size),
'--quiet', '--threads', str(threads)]
if use_log:
params.extend(['--log', log_name])
sout, serr, res = _system_call(params)
if not res == 0:
logger.error('Problem running vsearch dereplication on file %s' %
seqs_fp)
logger.debug('parameters used:\n%s' % params)
logger.debug('stdout: %s' % sout)
logger.debug('stderr: %s' % serr)
return
def build_index_sortmerna(ref_fp, working_dir):
"""Build a SortMeRNA index for all reference databases.
Parameters
----------
ref_fp: tuple
filepaths to FASTA reference databases
working_dir: string
working directory path where to store the indexed database
Returns
-------
all_db: tuple
filepaths to SortMeRNA indexed reference databases
"""
logger = logging.getLogger(__name__)
logger.info('build_index_sortmerna files %s to'
' dir %s' % (ref_fp, working_dir))
all_db = []
for db in ref_fp:
fasta_dir, fasta_filename = split(db)
index_basename = splitext(fasta_filename)[0]
db_output = join(working_dir, index_basename)
logger.debug('processing file %s into location %s' % (db, db_output))
params = ['indexdb_rna', '--ref', '%s,%s' %
(db, db_output), '--tmpdir', working_dir]
sout, serr, res = _system_call(params)
if not res == 0:
logger.error('Problem running indexdb_rna on file %s to dir %s. '
'database not indexed' % (db, db_output))
logger.debug('stdout: %s' % sout)
logger.debug('stderr: %s' % serr)
logger.critical('execution halted')
raise RuntimeError('Cannot index database file %s' % db)
logger.debug('file %s indexed' % db)
all_db.append(db_output)
return all_db
def filter_minreads_samples_from_table(table, minreads=1, inplace=True):
"""Filter samples from biom table that have less than
minreads reads total
Paraneters
----------
table : biom.Table
the biom table to filter
minreads : int (optional)
the minimal number of reads in a sample in order to keep it
inplace : bool (optional)
if True, filter the biom table in place, if false create a new copy
Returns
-------
table : biom.Table
the filtered biom table
"""
logger = logging.getLogger(__name__)
logger.debug('filter_minreads_started. minreads=%d' % minreads)
samp_sum = table.sum(axis='sample')
samp_ids = table.ids(axis='sample')
bad_samples = samp_ids[samp_sum < minreads]
if len(bad_samples) > 0:
logger.warn('removed %d samples with reads per sample<%d'
% (len(bad_samples), minreads))
table = table.filter(bad_samples, axis='sample',
inplace=inplace, invert=True)
else:
logger.debug('all samples contain > %d reads' % minreads)
return table
def fasta_from_biom(table, fasta_file_name):
'''Save sequences from a biom table to a fasta file
Parameters
----------
table : biom.Table
The biom table containing the sequences
fasta_file_name : str
Name of the fasta output file
'''
logger = logging.getLogger(__name__)
logger.debug('saving biom table sequences to fasta file %s' % fasta_file_name)
with open(fasta_file_name, 'w') as f:
for cseq in table.ids(axis='observation'):
f.write('>%s\n%s\n' % (cseq, cseq))
logger.info('saved biom table sequences to fasta file %s' % fasta_file_name)
def remove_artifacts_from_biom_table(table_filename,
fasta_filename,
ref_fp,
biom_table_dir,
ref_db_fp,
threads=1,
verbose=False,
sim_thresh=None,
coverage_thresh=None):
"""Remove artifacts from a biom table using SortMeRNA
Parameters
----------
table : str
name of the biom table file
fasta_filename : str
the fasta file containing all the sequences of the biom table
Returns
-------
tmp_files : list of str
The temp files created during the artifact removal step
"""
logger = logging.getLogger(__name__)
logger.info('getting 16s sequences from the biom table')
# remove artifacts from the fasta file. output is in clean_fp fasta file
clean_fp, num_seqs_left, tmp_files = remove_artifacts_seqs(fasta_filename, ref_fp,
working_dir=biom_table_dir,
ref_db_fp=ref_db_fp,
negate=False, threads=threads,
verbose=verbose,
sim_thresh=sim_thresh,
coverage_thresh=coverage_thresh)
if clean_fp is None:
logger.warn("No clean sequences in %s" % fasta_filename)
return tmp_files
logger.debug('removed artifacts from sequences input %s'
' to output %s' % (fasta_filename, clean_fp))
# read the clean fasta file
good_seqs = {s for _, s in sequence_generator(clean_fp)}
logger.debug('loaded %d sequences from cleaned biom table'
' fasta file' % len(good_seqs))
logger.debug('loading biom table %s' % table_filename)
table = load_table(table_filename)
# filter and save the artifact biom table
artifact_table = table.filter(list(good_seqs),
axis='observation', inplace=False,
invert=True)
# remove the samples with 0 reads
filter_minreads_samples_from_table(artifact_table)
output_nomatch_fp = join(biom_table_dir, 'reference-non-hit.biom')
write_biom_table(artifact_table, output_nomatch_fp)
logger.info('wrote artifact only filtered biom table to %s'
% output_nomatch_fp)
# and save the reference-non-hit fasta file
output_nomatch_fasta_fp = join(biom_table_dir, 'reference-non-hit.seqs.fa')
fasta_from_biom(artifact_table, output_nomatch_fasta_fp)
# filter and save the only 16s biom table
table.filter(list(good_seqs), axis='observation')
# remove the samples with 0 reads
filter_minreads_samples_from_table(table)
output_fp = join(biom_table_dir, 'reference-hit.biom')
write_biom_table(table, output_fp)
logger.info('wrote 16s filtered biom table to %s' % output_fp)
# and save the reference-non-hit fasta file
output_match_fasta_fp = join(biom_table_dir, 'reference-hit.seqs.fa')
fasta_from_biom(table, output_match_fasta_fp)
# we also don't need the cleaned fasta file
tmp_files.append(clean_fp)
return tmp_files
def remove_artifacts_seqs(seqs_fp,
ref_fp,
working_dir,
ref_db_fp,
negate=False,
threads=1,
verbose=False,
sim_thresh=None,
coverage_thresh=None):
"""Remove artifacts from FASTA file using SortMeRNA.
Parameters
----------
seqs_fp: string
file path to FASTA input sequence file
ref_fp: tuple
file path(s) to FASTA database file
working_dir: string
working directory path
ref_db_fp: tuple
file path(s) to indexed FASTA database
negate: boolean, optional
if True, discard all input sequences aligning
to reference database
threads: integer, optional
number of threads to use for SortMeRNA
verbose: boolean, optional
If true, output SortMeRNA errors
sim_thresh: float, optional
The minimal similarity threshold (between 0 and 1)
for keeping the sequence
if None, the default values used are 0.65 for negate=False,
0.95 for negate=True
coverage_thresh: float, optional
The minimal coverage threshold (between 0 and 1)
for alignments for keeping the sequence
if None, the default values used are 0.5 for negate=False,
0.95 for negate=True
Returns
-------
output_fp : str
Name of the artifact removed fasta file
okseqs : int
The number of sequences left after artifact removal
tmp_files : list of str
Names of the tmp files created
"""
logger = logging.getLogger(__name__)
logger.info('remove_artifacts_seqs file %s' % seqs_fp)
if stat(seqs_fp).st_size == 0:
logger.warn('file %s has size 0, continuing' % seqs_fp)
return None, 0, []
if coverage_thresh is None:
if negate:
coverage_thresh = 0.95 * 100
else:
coverage_thresh = 0.5 * 100
if sim_thresh is None:
if negate:
sim_thresh = 0.95 * 100
else:
sim_thresh = 0.65 * 100
# the minimal average bitscore per nucleotide
bitscore_thresh = 0.65
output_fp = join(working_dir,
"%s.no_artifacts" % basename(seqs_fp))
blast_output = join(working_dir,
'%s.sortmerna' % basename(seqs_fp))
aligned_seq_ids = set()
for i, db in enumerate(ref_fp):
logger.debug('running on ref_fp %s working dir %s refdb_fp %s seqs %s'
% (db, working_dir, ref_db_fp[i], seqs_fp))
# run SortMeRNA
# we use -e 100 to remove E-value based filtering by sortmerna
# since we use bitscore/identity/coverage filtering instead
params = ['sortmerna', '--reads', seqs_fp, '--ref', '%s,%s' %
(db, ref_db_fp[i]),
'--aligned', blast_output, '--blast', '3', '--best', '1',
'--print_all_reads', '-v', '-e', '100']
sout, serr, res = _system_call(params)
if not res == 0:
logger.error('sortmerna error on file %s' % seqs_fp)
logger.error('stdout : %s' % sout)
logger.error('stderr : %s' % serr)
return output_fp, 0, []
blast_output_filename = '%s.blast' % blast_output
with open(blast_output_filename, 'r') as bfl:
for line in bfl:
line = line.strip().split('\t')
# if * means no match
if line[1] == '*':
continue
# check if % identity[2] and coverage[13] are large enough
if (float(line[2]) >= sim_thresh) and \
(float(line[13]) >= coverage_thresh) and \
(float(line[11]) >= bitscore_thresh * len(line[0])):
aligned_seq_ids.add(line[0])
if negate:
def op(x): return x not in aligned_seq_ids
else:
def op(x): return x in aligned_seq_ids
# if negate = False, only output sequences
# matching to at least one of the databases
totalseqs = 0
okseqs = 0
badseqs = 0
with open(output_fp, 'w') as out_f:
for label, seq in sequence_generator(seqs_fp):
totalseqs += 1
label = label.split()[0]
if op(label):
out_f.write(">%s\n%s\n" % (label, seq))
okseqs += 1
else:
badseqs += 1
logger.info('total sequences %d, passing sequences %d, '
'failing sequences %d' % (totalseqs, okseqs, badseqs))
return output_fp, okseqs, [blast_output_filename]
def multiple_sequence_alignment(seqs_fp, threads=1):
"""Perform multiple sequence alignment on FASTA file using MAFFT.
Parameters
----------
seqs_fp: string
filepath to FASTA file for multiple sequence alignment
threads: integer, optional
number of threads to use. 0 to use all threads
Returns
-------
msa_fp : str
name of output alignment file or None if error encountered
"""
logger = logging.getLogger(__name__)
logger.info('multiple_sequence_alignment seqs file %s' % seqs_fp)
# for mafft we use -1 to denote all threads and not 0
if threads == 0:
threads = -1
if stat(seqs_fp).st_size == 0:
logger.warning('msa failed. file %s has no reads' % seqs_fp)
return None
msa_fp = seqs_fp + '.msa'
params = ['mafft', '--quiet', '--preservecase', '--parttree', '--auto',
'--thread', str(threads), seqs_fp]
sout, serr, res = _system_call(params, stdoutfilename=msa_fp)
if not res == 0:
logger.info('msa failed for file %s (maybe only 1 read?)' % seqs_fp)
logger.debug('stderr : %s' % serr)
return None
return msa_fp
def remove_chimeras_denovo_from_seqs(seqs_fp, working_dir, threads=1):
"""Remove chimeras de novo using UCHIME (VSEARCH implementation).
Parameters
----------
seqs_fp: string
file path to FASTA input sequence file
output_fp: string
file path to store chimera-free results
threads : int
number of threads (0 for all cores)
Returns
-------
output_fp
the chimera removed fasta file name
"""
logger = logging.getLogger(__name__)
logger.info('remove_chimeras_denovo_from_seqs seqs file %s'
'to working dir %s' % (seqs_fp, working_dir))
output_fp = join(
working_dir, "%s.no_chimeras" % basename(seqs_fp))
# we use the parameters dn=0.000001, xn=1000, minh=10000000
# so 1 mismatch in the A/B region will cancel it being labeled as chimera
# and ~3 unique reads in each region will make it a chimera if
# no mismatches
params = ['vsearch', '--uchime_denovo', seqs_fp,
'--nonchimeras', output_fp,
'-dn', '0.000001', '-xn', '1000',
'-minh', '10000000', '--mindiffs', '5',
'--fasta_width', '0', '--threads', str(threads)]
sout, serr, res = _system_call(params)
if not res == 0:
logger.error('problem with chimera removal for file %s' % seqs_fp)
logger.debug('stdout : %s' % sout)
logger.debug('stderr : %s' % serr)
return output_fp
def sample_id_from_read_id(readid):
"""Get SampleID from the split_libraries_fastq.py output
fasta file read header
Parameters
----------
readid : str
the fasta file read name
Returns
-------
sampleid : str
the sample id
"""
# get the sampleid_readid field
sampleread = readid.split(' ')[0]
# get the sampleid field
sampleid = sampleread.rsplit('_', 1)[0]
return sampleid
def split_sequence_file_on_sample_ids_to_files(seqs,
outdir):
"""Split FASTA file on sample IDs.
Parameters
----------
seqs: file handler
file handler to demultiplexed FASTA file
outdir: string
dirpath to output split FASTA files
"""
logger = logging.getLogger(__name__)
logger.info('split_sequence_file_on_sample_ids_to_files'
' for file %s into dir %s' % (seqs, outdir))
outputs = {}
for bits in sequence_generator(seqs):
sample = sample_id_from_read_id(bits[0])
if sample not in outputs:
outputs[sample] = open(join(outdir, sample + '.fasta'), 'w')
outputs[sample].write(">%s\n%s\n" % (bits[0], bits[1]))
for sample in outputs:
outputs[sample].close()
logger.info('split to %d files' % len(outputs))
def write_biom_table(table, biom_fp):
"""Write BIOM table to file.
Parameters
----------
table: biom.table
an instance of a BIOM table
biom_fp: string
filepath to output BIOM table
"""
logger = logging.getLogger(__name__)
logger.debug('write_biom_table to file %s' % biom_fp)
with biom_open(biom_fp, 'w') as f:
table.to_hdf5(h5grp=f, generated_by="deblur")
logger.debug('wrote to BIOM file %s' % biom_fp)
def get_files_for_table(input_dir,
file_end='.trim.derep.no_artifacts'
'.msa.deblur.no_chimeras'):
"""Get a list of files to add to the output table
Parameters:
-----------
input_dir : string
name of the directory containing the deblurred fasta files
file_end : string
the ending of all the fasta files to be added to the table
(default '.fasta.trim.derep.no_artifacts.msa.deblur.no_chimeras')
Returns
-------
names : list of tuples of (string,string)
list of tuples of:
name of fasta files to be added to the biom table
sampleid (file names without the file_end and path)
"""
logger = logging.getLogger(__name__)
logger.debug('get_files_for_table input dir %s, '
'file-ending %s' % (input_dir, file_end))
names = []
for cfile in glob(join(input_dir, "*%s" % file_end)):
if not isfile(cfile):
continue
sample_id = basename(cfile)[:-len(file_end)]
sample_id = os.path.splitext(sample_id)[0]
names.append((cfile, sample_id))
logger.debug('found %d files' % len(names))
return names
def create_otu_table(output_fp, deblurred_list,
outputfasta_fp=None, minreads=0):
"""Create a biom table out of all files in a directory
Parameters
----------
output_fp : string
filepath to output BIOM table
deblurred_list : list of (str, str)
list of file names (including path), sampleid of all deblurred
fasta files to add to the table
outputfasta_fp : str, optional
name of output fasta file (of all sequences in the table) or None
to not write
minreads : int, optional
minimal number of reads per bacterial sequence in order to write
it to the biom table and fasta file or 0 to write all
"""
logger = logging.getLogger(__name__)
logger.info('create_otu_table for %d samples, '
'into output table %s' % (len(deblurred_list), output_fp))
# the regexp for finding the number of reads of a sequence
sizeregexp = re.compile('(?<=size=)\w+')
seqdict = {}
seqlist = []
sampset = set()
samplist = []
# arbitrary size for the sparse results matrix so we won't run out of space
obs = scipy.sparse.dok_matrix((1E9, len(deblurred_list)), dtype=np.double)
# load the sequences from all samples into a sprase matrix
sneaking_extensions = {'fasta', 'fastq', 'fna', 'fq', 'fa'}
for (cfilename, csampleid) in deblurred_list:
if csampleid.rsplit('.', 1)[-1] in sneaking_extensions:
csampleid = csampleid.rsplit('.', 1)[0]
# test if sample has already been processed
if csampleid in sampset:
warnings.warn('sample %s already in table!', UserWarning)
logger.error('sample %s already in table!' % csampleid)
continue
sampset.add(csampleid)
samplist.append(csampleid)
csampidx = len(sampset)-1
# read the fasta file and add to the matrix
for chead, cseq in sequence_generator(cfilename):
if cseq not in seqdict:
seqdict[cseq] = len(seqlist)
seqlist.append(cseq)
cseqidx = seqdict[cseq]
cfreq = float(sizeregexp.search(chead).group(0))
try:
obs[cseqidx, csampidx] = cfreq
except IndexError:
# exception means we ran out of space - add more OTUs
shape = obs.shape
obs.resize((shape[0]*2, shape[1]))
obs[cseqidx, csampidx] = cfreq
logger.info('for output biom table loaded %d samples, %d unique sequences'
% (len(samplist), len(seqlist)))
# and now make the sparse matrix the real size
obs.resize((len(seqlist), len(samplist)))
# do the minimal reads per otu filtering
if minreads > 0:
readsperotu = obs.sum(axis=1)
keep = np.where(readsperotu >= minreads)[0]
logger.info('keeping %d (out of %d sequences) with >=%d reads' %
(len(keep), len(seqlist), minreads))
obs = obs[keep, :]
seqlist = list(np.array(seqlist)[keep])
logger.debug('filtering completed')
# convert the matrix to a biom table
table = Table(obs.tocsr(), seqlist, samplist,
observation_metadata=None,
sample_metadata=None, table_id=None,
generated_by="deblur",
create_date=datetime.now().isoformat())
logger.debug('converted to biom table')
# remove samples with 0 reads
filter_minreads_samples_from_table(table)
# save the merged otu table
write_biom_table(table, output_fp)
logger.info('saved to biom file %s' % output_fp)
# and save the fasta file
if outputfasta_fp is not None:
logger.debug('saving fasta file')
with open(outputfasta_fp, 'w') as f:
for cseq in seqlist:
f.write('>%s\n%s\n' % (cseq, cseq))
logger.info('saved sequence fasta file to %s' % outputfasta_fp)
def launch_workflow(seqs_fp, working_dir, mean_error, error_dist,
indel_prob, indel_max, trim_length, left_trim_length,
min_size, ref_fp, ref_db_fp, threads_per_sample=1,
sim_thresh=None, coverage_thresh=None):
"""Launch full deblur workflow for a single post split-libraries fasta file
Parameters
----------
seqs_fp: string
a post split library fasta file for debluring
working_dir: string
working directory path
mean_error: float
mean error for original sequence estimate
error_dist: list
list of error probabilities for each hamming distance
indel_prob: float
insertion/deletion (indel) probability
indel_max: integer
maximal indel number
trim_length: integer
sequence trim length
left_trim_length: integer
trim the first n reads
min_size: integer
upper limit on sequence abundance (discard sequences below limit)
ref_fp: tuple
filepath(s) to FASTA reference database for artifact removal
ref_db_fp: tuple
filepath(s) to SortMeRNA indexed database for artifact removal
threads_per_sample: integer, optional
number of threads to use for SortMeRNA/mafft/vsearch
(0 for max available)
sim_thresh: float, optional
the minimal similarity for a sequence to the database.
if None, take the defaults (0.65 for negate=False,
0.95 for negate=True)
coverage_thresh: float, optional
the minimal coverage for alignment of a sequence to the database.
if None, take the defaults (0.3 for negate=False, 0.95 for negate=True)
Return
------
output_no_chimers_fp : string
filepath to fasta file with no chimeras of None if error encountered
"""
logger = logging.getLogger(__name__)
logger.info('--------------------------------------------------------')
logger.info('launch_workflow for file %s' % seqs_fp)
# Step 1: Trim sequences to specified length
output_trim_fp = join(working_dir, "%s.trim" % basename(seqs_fp))
with open(output_trim_fp, 'w') as out_f:
for label, seq in trim_seqs(
input_seqs=sequence_generator(seqs_fp),
trim_len=trim_length,
left_trim_len=left_trim_length):
out_f.write(">%s\n%s\n" % (label, seq))
# Step 2: Dereplicate sequences
output_derep_fp = join(working_dir,
"%s.derep" % basename(output_trim_fp))
dereplicate_seqs(seqs_fp=output_trim_fp,
output_fp=output_derep_fp,
min_size=min_size, threads=threads_per_sample)
# Step 3: Remove artifacts
output_artif_fp, num_seqs_left, _ = remove_artifacts_seqs(seqs_fp=output_derep_fp,
ref_fp=ref_fp,
working_dir=working_dir,
ref_db_fp=ref_db_fp,
negate=True,
threads=threads_per_sample,
sim_thresh=sim_thresh)
if not output_artif_fp:
warnings.warn('Problem removing artifacts from file %s' %
seqs_fp, UserWarning)
logger.warning('remove artifacts failed, aborting')
return None
# Step 4: Multiple sequence alignment
if num_seqs_left > 1:
output_msa_fp = join(working_dir,
"%s.msa" % basename(output_artif_fp))
alignment = multiple_sequence_alignment(seqs_fp=output_artif_fp,
threads=threads_per_sample)
if not alignment:
warnings.warn('Problem performing multiple sequence alignment '
'on file %s' % seqs_fp, UserWarning)
logger.warning('msa failed. aborting')
return None
elif num_seqs_left == 1:
# only one sequence after remove artifacts (but could be many reads)
# no need to run MSA - just use the pre-msa file as input for next step
output_msa_fp = output_artif_fp
else:
err_msg = ('No sequences left after artifact removal in '
'file %s' % seqs_fp)
warnings.warn(err_msg, UserWarning)
logger.warning(err_msg)
return None
# Step 5: Launch deblur
output_deblur_fp = join(working_dir,
"%s.deblur" % basename(output_msa_fp))
with open(output_deblur_fp, 'w') as f:
seqs = deblur(sequence_generator(output_msa_fp), mean_error,
error_dist, indel_prob, indel_max)
if seqs is None:
warnings.warn('multiple sequence alignment file %s contains '
'no sequences' % output_msa_fp, UserWarning)
logger.warn('no sequences returned from deblur for file %s' %
output_msa_fp)
return None
for s in seqs:
# remove '-' from aligned sequences
s.sequence = s.sequence.replace('-', '')
f.write(s.to_fasta())
# Step 6: Chimera removal
output_no_chimeras_fp = remove_chimeras_denovo_from_seqs(
output_deblur_fp, working_dir, threads=threads_per_sample)
logger.info('finished processing file')
return output_no_chimeras_fp
def start_log(level=logging.DEBUG, filename=None):
"""start the logger for the run
Parameters
----------
level : int, optional
logging.DEBUG, logging.INFO etc. for the log level (between 0-50).
filename : str, optional
name of the filename to save the log to or
None (default) to use deblur.log.TIMESTAMP
"""
if filename is None:
tstr = time.ctime()
tstr = tstr.replace(' ', '.')
tstr = tstr.replace(':', '.')
filename = 'deblur.log.%s' % tstr
logging.basicConfig(filename=filename, level=level,
format='%(levelname)s(%(thread)d)'
'%(asctime)s:%(message)s')
logger = logging.getLogger(__name__)
logger.info('*************************')
logger.info('deblurring started')
def _system_call(cmd, stdoutfilename=None):
"""Execute the command `cmd`
Parameters
----------
cmd : str
The string containing the command to be run.
stdoutfilename : str
Name of the file to save stdout to or None
(default) to not save to file
stderrfilename : str
Name of the file to save stderr to or None
(default) to not save to file
Returns
-------
tuple of (str, str, int)
The standard output, standard error and exist status of the
executed command
Notes
-----
This function is ported and modified from QIIME
(http://www.qiime.org), previously named
qiime_system_call. QIIME is a GPL project, but we obtained permission from
the authors of this function to port it to Qiita and keep it under BSD
license.
"""
logger = logging.getLogger(__name__)
logger.debug('system call: %s' % cmd)
if stdoutfilename:
with open(stdoutfilename, 'w') as f:
proc = subprocess.Popen(cmd, universal_newlines=True,
shell=False, stdout=f,
stderr=subprocess.PIPE)
else:
proc = subprocess.Popen(cmd, universal_newlines=True,
shell=False, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
# Communicate pulls all stdout/stderr from the PIPEs
# This call blocks until the command is done
stdout, stderr = proc.communicate()
return_value = proc.returncode
return stdout, stderr, return_value
|
josenavas/deblur
|
deblur/workflow.py
|
Python
|
bsd-3-clause
| 34,775
|
[
"BLAST",
"scikit-bio"
] |
045d654c42552dfc6fedfcb207f1ce1b431ddc4aef4043791a2fdb5c092c31df
|
#!/usr/bin/env python
""" MultiQC module to parse output from Samblaster """
from __future__ import print_function
import os
from collections import OrderedDict
import logging
import re
from multiqc.plots import bargraph
from multiqc.modules.base_module import BaseMultiqcModule
# Initialise the logger
log = logging.getLogger(__name__)
class MultiqcModule(BaseMultiqcModule):
"""Samblaster"""
def __init__(self):
# Initialise the parent object
super(MultiqcModule, self).__init__(
name="Samblaster",
anchor="samblaster",
href="https://github.com/GregoryFaust/samblaster",
info="is a tool to mark duplicates and extract discordant and split reads from sam files.",
doi="10.1093/bioinformatics/btu314",
)
self.samblaster_data = dict()
for f in self.find_log_files("samblaster", filehandles=True):
self.parse_samblaster(f)
# Filter to strip out ignored sample names
self.samblaster_data = self.ignore_samples(self.samblaster_data)
if len(self.samblaster_data) == 0:
raise UserWarning
headers = OrderedDict()
headers["pct_dups"] = {
"title": "% Dups",
"description": "Percent Duplication",
"max": 100,
"min": 0,
"suffix": "%",
"scale": "OrRd",
}
self.general_stats_addcols(self.samblaster_data, headers)
# Write parsed report data to a file
self.write_data_file(self.samblaster_data, "multiqc_samblaster")
log.info("Found {} reports".format(len(self.samblaster_data)))
self.add_barplot()
def add_barplot(self):
"""Generate the Samblaster bar plot."""
cats = OrderedDict()
cats["n_nondups"] = {"name": "Non-duplicates"}
cats["n_dups"] = {"name": "Duplicates"}
pconfig = {
"id": "samblaster_duplicates",
"title": "Samblaster: Number of duplicate reads",
"ylab": "Number of reads",
}
self.add_section(plot=bargraph.plot(self.samblaster_data, cats, pconfig))
def parse_samblaster(self, f):
"""Go through log file looking for samblaster output.
If the
Grab the name from the RG tag of the preceding bwa command"""
# Should capture the following:
# samblaster: Marked 1134898 of 43791982 (2.592%) total read ids as duplicates using 753336k \
# memory in 1M1S(60.884S) CPU seconds and 3M53S(233S) wall time.
dups_regex = (
r"samblaster: (Removed|Marked)\s+(\d+)\s+of\s+(\d+) \((\d+.\d+)%\)\s*(total)?\s*read ids as duplicates"
)
input_file_regex = "samblaster: Opening (\S+) for read."
rgtag_name_regex = "\\\\tID:(\S*?)\\\\t"
data = {}
s_name = None
fh = f["f"]
for l in fh:
# try to find name from RG-tag. If bwa mem is used upstream samblaster with pipes, then the bwa mem command
# including the read group will be written in the log
match = re.search(rgtag_name_regex, l)
if match:
s_name = self.clean_s_name(match.group(1), f)
# try to find name from the input file name, if used
match = re.search(input_file_regex, l)
if match:
basefn = os.path.basename(match.group(1))
fname, ext = os.path.splitext(basefn)
# if it's stdin, then try bwa RG-tag instead
if fname != "stdin":
s_name = self.clean_s_name(fname, f)
match = re.search(dups_regex, l)
if match:
data["n_dups"] = int(match.group(2))
data["n_tot"] = int(match.group(3))
data["n_nondups"] = data["n_tot"] - data["n_dups"]
data["pct_dups"] = float(match.group(4))
if s_name is None:
s_name = f["s_name"]
if len(data) > 0:
if s_name in self.samblaster_data:
log.debug("Duplicate sample name found in {}! Overwriting: {}".format(f["fn"], s_name))
self.add_data_source(f, s_name)
self.samblaster_data[s_name] = data
|
ewels/MultiQC
|
multiqc/modules/samblaster/samblaster.py
|
Python
|
gpl-3.0
| 4,262
|
[
"BWA"
] |
c80c5029a0628c67bdc8c60692e6eeef65ecda840695bfb81d8cb5ef72df6f75
|
# (C) 2013, Markus Wildi, markus.wildi@bluewin.ch
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
# Or visit http://www.gnu.org/licenses/gpl.html.
#
import unittest
import subprocess
import os
import sys
import pwd
import grp
import signal
import logging
import time
import glob
from rts2.json import JSONProxy
from rts2saf.config import Configuration
from rts2saf.environ import Environment
from rts2saf.createdevices import CreateFilters, CreateFilterWheels, CreateFocuser, CreateCCD
from rts2saf.checkdevices import CheckDevices
from rts2saf.focus import Focus
## ToDo ugly
if not os.path.isdir('/tmp/rts2saf_log'):
os.mkdir('/tmp/rts2saf_log')
logging.basicConfig(filename='/tmp/rts2saf_log/unittest.log', level=logging.INFO, format='%(asctime)s %(levelname)s %(message)s')
logger = logging.getLogger()
class Args(object):
def __init__(self):
pass
class RTS2Environment(unittest.TestCase):
def tearDown(self):
processes=['rts2-centrald','rts2-executor', 'rts2-httpd','rts2-focusd-dummy','rts2-filterd-dummy', 'rts2-camd-dummy']
p = subprocess.Popen(['ps', 'aux'], stdout=subprocess.PIPE)
out, err = p.communicate()
for line in out.splitlines():
# wildi 7432 0.0 0.1 24692 5192 pts/1 S 17:34 0:01 /usr/local/bin/rts2-centrald
itms= line.split()
exe= itms[10].split('/')[-1]
if self.uid in itms[0] and exe in processes:
pid = int(itms[1])
os.kill(pid, signal.SIGTERM)
# reove th lock files
for fn in glob.glob(self.lockPrefix):
os.unlink (fn)
def setUp(self):
# by name
self.uid=pwd.getpwuid(os.getuid())[0]
self.gid= grp.getgrgid(os.getgid())[0]
# lock prefix
self.lockPrefix = '/tmp/rts2_{}'.format(self.uid)
# sometimes they are present
self.tearDown()
# set up rts2saf
# read configuration
self.rt = Configuration(logger=logger)
self.ev=Environment(debug=False, rt=self.rt,logger=logger)
self.fileName='./rts2saf-bootes-2-autonomous.cfg'
self.success=self.rt.readConfiguration(fileName=self.fileName)
# set up RTS2
# rts2-centrald
cmd=[ '/usr/local/bin/rts2-centrald',
'--run-as', '{}.{}'.format(self.uid,self.gid),
'--local-port', '1617',
'--logfile', '/tmp/rts2saf_log/rts2-debug',
'--lock-prefix', self.lockPrefix,
'--config', './rts2-unittest.ini'
]
self.p_centrald= subprocess.Popen(cmd)
# rts2-executor
cmd=[ '/usr/local/bin/rts2-executor',
'--run-as', '{}.{}'.format(self.uid,self.gid),
'--lock-prefix', self.lockPrefix,
'--config', './rts2-unittest.ini',
'--server', '127.0.0.1:1617',
'--noauth'
]
self.p_exec= subprocess.Popen(cmd)
# rts2-httpd
cmd=[ '/usr/local/bin/rts2-httpd',
'--run-as', '{}.{}'.format(self.uid,self.gid),
'--lock-prefix', self.lockPrefix,
'--config', './rts2-unittest.ini',
'--server', '127.0.0.1:1617',
'-p', '9999',
'--noauth'
]
self.p_httpd= subprocess.Popen(cmd)
# rts2-focusd-dummy
focName=self.rt.cfg['FOCUSER_NAME']
cmd=[ '/usr/local/bin/rts2-focusd-dummy',
'--run-as', '{}.{}'.format(self.uid,self.gid),
'--lock-prefix', self.lockPrefix,
'--server', '127.0.0.1:1617',
'-d', focName,
'--modefile', './f0.modefile'
]
self.p_focusd_dummy= subprocess.Popen(cmd)
# rts2-filterd-dummy
ftwns=list()
for ftwn in self.rt.cfg['inuse']:
ftwns.append(ftwn)
cmd=[ '/usr/local/bin/rts2-filterd-dummy',
'--run-as', '{}.{}'.format(self.uid,self.gid),
'--lock-prefix', self.lockPrefix,
'--server', '127.0.0.1:1617',
'-d', ftwn
]
ftnames=str()
for ftn in self.rt.cfg['FILTER WHEEL DEFINITIONS'][ftwn]:
ftnames += '{}:'.format(ftn)
if len(ftnames)>0:
cmd.append('-F')
cmd.append(ftnames)
self.p_filterd_dummy= subprocess.Popen(cmd)
# rts2-camd-dummy
name=self.rt.cfg['CCD_NAME']
# '--wheeldev', 'COLWSLT', '--filter-offsets', '1:2:3:4:5:6:7:8'
cmd=[ '/usr/local/bin/rts2-camd-dummy',
'--run-as', '{}.{}'.format(self.uid,self.gid),
'--lock-prefix', self.lockPrefix,
'--server', '127.0.0.1:1617',
'-d', name,
'--focdev', focName
]
for nm in ftwns:
cmd.append('--wheeldev')
cmd.append(nm)
if nm in self.rt.cfg['inuse'][0]:
cmd.append('--filter-offsets')
cmd.append('1:2:3:4:5:6:7:8')
self.p_camd_dummy= subprocess.Popen(cmd)
#
time.sleep(20)
def setupDevices(self, blind=False):
# setup rts2saf
# fake arguments
self.args=Args()
self.args.sxDebug=False
self.args.blind=blind
self.args.verbose=False
self.args.check=True
self.args.fetchOffsets=True
self.args.exposure= 1.887
self.args.catalogAnalysis=False
self.args.Ds9Display=False
self.args.FitDisplay=False
self.args.flux=True
self.args.dryFitsFiles='../samples_bootes2'
# JSON
self.proxy=JSONProxy(url=self.rt.cfg['URL'],username=self.rt.cfg['USERNAME'],password=self.rt.cfg['PASSWORD'])
# create Focuser
self.foc= CreateFocuser(debug=False, proxy=self.proxy, check=self.args.check, rt=self.rt, logger=logger).create()
# create filters
fts=CreateFilters(debug=False, proxy=self.proxy, check=self.args.check, rt=self.rt, logger=logger).create()
# create filter wheels
ftwc= CreateFilterWheels(debug=False, proxy=self.proxy, check=self.args.check, rt=self.rt, logger=logger, filters=fts, foc=self.foc)
ftws=ftwc.create()
if not ftwc.checkBounds():
logger.error('setupDevice: filter focus ranges out of bounds, exiting')
sys.exit(1)
# create ccd
ccd= CreateCCD(debug=False, proxy=self.proxy, check=self.args.check, rt=self.rt, logger=logger, ftws=ftws, fetchOffsets=self.args.fetchOffsets).create()
cdv= CheckDevices(debug=False, proxy=self.proxy, blind=self.args.blind, verbose=self.args.verbose, ccd=ccd, ftws=ftws, foc=self.foc, logger=logger)
cdv.summaryDevices()
cdv.printProperties()
cdv.deviceWriteAccess()
dryFitsFiles=None
if self.args.dryFitsFiles:
dryFitsFiles=glob.glob('{0}/{1}'.format(self.args.dryFitsFiles, self.rt.cfg['FILE_GLOB']))
if len(dryFitsFiles)==0:
logger.error('setupDevice: no FITS files found in:{}'.format(self.args.dryFitsFiles))
logger.info('setupDevice: download a sample from wget http://azug.minpet.unibas.ch/~wildi/rts2saf-test-focus-2013-09-14.tgz')
sys.exit(1)
# ok evrything is there
self.rts2safFoc= Focus(debug=False, proxy=self.proxy, args=self.args, dryFitsFiles=dryFitsFiles, ccd=ccd, foc=self.foc, ftws=ftws, rt=self.rt, ev=self.ev, logger=logger)
|
zguangyu/rts2
|
scripts/rts2saf/unittest/rts2_environment.py
|
Python
|
gpl-2.0
| 8,243
|
[
"VisIt"
] |
a5fc5dc66117115930397593e3d49b877b64f71d9e2b42295fc22017e1e4fee1
|
#!/usr/bin/env python
"""
killMS, a package for calibration in radio interferometry.
Copyright (C) 2013-2017 Cyril Tasse, l'Observatoire de Paris,
SKA South Africa, Rhodes University
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""
#!/usr/bin/env python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import optparse
import pickle
import numpy as np
import numpy as np
#import pylab
import os
from DDFacet.Other import logger
from DDFacet.Other import ModColor
log=logger.getLogger("ClassInterpol")
from DDFacet.Other.AsyncProcessPool import APP
from DDFacet.Other import Multiprocessing
#from DDFacet.Array import shared_dict
from killMS.Array import NpShared
IdSharedMem=str(int(os.getpid()))+"."
from DDFacet.Other import AsyncProcessPool
from killMS.Other import ClassFitTEC
from killMS.Other import ClassFitAmp
from killMS.Other import ClassClip
import scipy.ndimage.filters
from pyrap.tables import table
# # ##############################
# # Catch numpy warning
# np.seterr(all='raise')
# import warnings
# #with warnings.catch_warnings():
# # warnings.filterwarnings('error')
# warnings.catch_warnings()
# warnings.filterwarnings('error')
# # ##############################
from killMS.Other.ClassTimeIt import ClassTimeIt
#from killMS.Other.least_squares import least_squares
from scipy.optimize import least_squares
import copy
from pyrap.tables import table
SaveName="last_InterPol.obj"
def read_options():
desc="""Questions and suggestions: cyril.tasse@obspm.fr"""
global options
opt = optparse.OptionParser(usage='Usage: %prog --ms=somename.MS <options>',version='%prog version 1.0',description=desc)
group = optparse.OptionGroup(opt, "* Data-related options", "Won't work if not specified.")
group.add_option('--SolsFileIn',help='SolfileIn [no default]',default=None)
group.add_option('--SolsFileOut',help='SolfileOut [no default]',default=None)
group.add_option('--InterpMode',help='Interpolation mode TEC and/or Amp [default is %default]',type="str",default="TEC,Amp")
group.add_option('--CrossMode',help='Use cross gains maode for TEC [default is %default]',type=int,default=1)
group.add_option('--RemoveAmpBias',help='Remove amplitude bias (along time) before smoothing [default is %default]',type=int,default=0)
group.add_option('--RemoveMedianAmp',help='Remove median amplitude (along freq) after fitting [default is %default]',type=int,default=1)
group.add_option('--Amp-SmoothType',help='Interpolation Type for the amplitude [default is %default]',type="str",default="Gauss")
group.add_option('--Amp-PolyOrder',help='Order of the polynomial to do the amplitude',type="int",default=3)
group.add_option('--Amp-GaussKernel',help='',type="str",default="1,3")
group.add_option('--NCPU',help='Number of CPU to use',type="int",default=0)
opt.add_option_group(group)
options, arguments = opt.parse_args()
exec("options.Amp_GaussKernel=%s"%options.Amp_GaussKernel)
f = open(SaveName,"wb")
pickle.dump(options,f)
def TECToPhase(TEC,freq):
K=8.4479745e9
phase=K*TEC*(1./freq)
return phase
def TECToZ(TEC,ConstPhase,freq):
return np.exp(1j*(TECToPhase(TEC,freq)+ConstPhase))
class ClassInterpol():
def __init__(self,InSolsName,OutSolsName,
InterpMode="TEC",PolMode="Scalar",Amp_PolyOrder=3,NCPU=0,
Amp_GaussKernel=(0,5), Amp_SmoothType="Poly",
CrossMode=1,
RemoveAmpBias=0,
RemoveMedianAmp=True):
if type(InterpMode)==str:
InterpMode=InterpMode.split(",")#[InterpMode]
self.InSolsName=InSolsName
self.OutSolsName=OutSolsName
self.RemoveMedianAmp=RemoveMedianAmp
log.print("Loading %s"%self.InSolsName)
self.DicoFile=dict(np.load(self.InSolsName,allow_pickle=True))
self.Sols=self.DicoFile["Sols"].view(np.recarray)
if "MaskedSols" in self.DicoFile.keys():
MaskFreq=np.logical_not(np.all(np.all(np.all(self.DicoFile["MaskedSols"][...,0,0],axis=0),axis=1),axis=1))
nt,_,na,nd,_,_=self.Sols.G.shape
self.DicoFile["FreqDomains"]=self.DicoFile["FreqDomains"][MaskFreq]
NFreqsOut=np.count_nonzero(MaskFreq)
log.print("There are %i non-zero freq channels"%NFreqsOut)
SolsOut=np.zeros((nt,),dtype=[("t0",np.float64),("t1",np.float64),
("G",np.complex64,(NFreqsOut,na,nd,2,2)),
("Stats",np.float32,(NFreqsOut,na,4))])
SolsOut=SolsOut.view(np.recarray)
SolsOut.G=self.Sols.G[:,MaskFreq,...]
SolsOut.t0=self.Sols.t0
SolsOut.t1=self.Sols.t1
self.Sols=self.DicoFile["Sols"]=SolsOut
del(self.DicoFile["MaskedSols"])
#self.Sols=self.Sols[0:10].copy()
self.CrossMode=CrossMode
self.CentralFreqs=np.mean(self.DicoFile["FreqDomains"],axis=1)
self.incrCross=11
iii=0
NTEC=101
NConstPhase=51
TECGridAmp=0.1
TECGrid,CPhase=np.mgrid[-TECGridAmp:TECGridAmp:NTEC*1j,-np.pi:np.pi:NConstPhase*1j]
Z=TECToZ(TECGrid.reshape((-1,1)),CPhase.reshape((-1,1)),self.CentralFreqs.reshape((1,-1)))
self.Z=Z
self.TECGrid,self.CPhase=TECGrid,CPhase
self.InterpMode=InterpMode
self.Amp_PolyOrder=Amp_PolyOrder
self.RemoveAmpBias=RemoveAmpBias
if self.RemoveAmpBias:
self.CalcFreqAmpSystematics()
self.Sols.G/=self.G0
self.GOut=NpShared.ToShared("%sGOut"%IdSharedMem,self.Sols.G.copy())
self.PolMode=PolMode
self.Amp_GaussKernel=Amp_GaussKernel
if len(self.Amp_GaussKernel)!=2:
raise ValueError("GaussKernel should be of size 2")
self.Amp_SmoothType=Amp_SmoothType
if "TEC" in self.InterpMode:
log.print( " Smooth phases using a TEC model")
if self.CrossMode:
log.print(ModColor.Str("Using CrossMode"))
if "Amp" in self.InterpMode:
if Amp_SmoothType=="Poly":
log.print( " Smooth amplitudes using polynomial model of order %i"%self.Amp_PolyOrder)
if Amp_SmoothType=="Gauss":
log.print( " Smooth amplitudes using Gaussian kernel of %s (Time/Freq) bins"%str(Amp_GaussKernel))
if self.RemoveAmpBias:
self.GOut*=self.G0
APP.registerJobHandlers(self)
AsyncProcessPool.init(ncpu=NCPU,affinity=0)
def TECInterPol(self):
Sols0=self.Sols
nt,nch,na,nd,_,_=Sols0.G.shape
for iAnt in range(na):
for iDir in range(nd):
for it in range(nt):
self.FitThisTEC(it,iAnt,iDir)
def CalcFreqAmpSystematics(self):
log.print( " Calculating amplitude systematics...")
Sols0=self.Sols
nt,nch,na,nd,_,_=Sols0.G.shape
self.G0=np.zeros((1,nch,na,nd,1,1),np.float32)
for iAnt in range(na):
for iDir in range(nd):
G=Sols0.G[:,:,iAnt,iDir,0,0]
G0=np.mean(np.abs(G),axis=0)
self.G0[0,:,iAnt,iDir,:,:]=G0.reshape((nch,1,1))
def InterpolParallel(self):
Sols0=self.Sols
nt,nch,na,nd,_,_=Sols0.G.shape
log.print(" #Times: %i"%nt)
log.print(" #Channels: %i"%nch)
log.print(" #Antennas: %i"%na)
log.print(" #Directions: %i"%nd)
# APP.terminate()
# APP.shutdown()
# Multiprocessing.cleanupShm()
APP.startWorkers()
iJob=0
# for iAnt in [49]:#range(na):
# for iDir in [0]:#range(nd):
if "TEC" in self.InterpMode:
#APP.runJob("FitThisTEC_%d"%iJob, self.FitThisTEC, args=(208,)); iJob+=1
self.TECArray=NpShared.ToShared("%sTECArray"%IdSharedMem,np.zeros((nt,nd,na),np.float32))
self.CPhaseArray=NpShared.ToShared("%sCPhaseArray"%IdSharedMem,np.zeros((nt,nd,na),np.float32))
for it in range(nt):
# for iDir in range(nd):
APP.runJob("FitThisTEC_%d"%iJob, self.FitThisTEC, args=(it,))#,serial=True)
iJob+=1
workers_res=APP.awaitJobResults("FitThisTEC*", progress="Fit TEC")
if "Amp" in self.InterpMode:
for iAnt in range(na):
for iDir in range(nd):
APP.runJob("FitThisAmp_%d"%iJob, self.FitThisAmp, args=(iAnt,iDir))#,serial=True)
iJob+=1
workers_res=APP.awaitJobResults("FitThisAmp*", progress="Smooth Amp")
if "PolyAmp" in self.InterpMode:
for iDir in range(nd):
APP.runJob("FitThisPolyAmp_%d"%iJob, self.FitThisPolyAmp, args=(iDir,))
iJob+=1
workers_res=APP.awaitJobResults("FitThisPolyAmp*", progress="Smooth Amp")
if "Clip" in self.InterpMode:
for iDir in range(nd):
APP.runJob("ClipThisDir_%d"%iJob, self.ClipThisDir, args=(iDir,),serial=True)
iJob+=1
workers_res=APP.awaitJobResults("ClipThisDir*", progress="Clip Amp")
#APP.terminate()
APP.shutdown()
Multiprocessing.cleanupShm()
# ###########################
# import pylab
# op0=np.abs
# op1=np.angle
# #for iDir in range(nd):
# for iAnt in range(40,na):
# pylab.clf()
# A=op0(self.Sols.G[:,:,iAnt,iDir,0,0])
# v0,v1=0,A.max()
# pylab.subplot(2,3,1)
# pylab.imshow(op0(self.Sols.G[:,:,iAnt,iDir,0,0]),interpolation="nearest",aspect="auto",vmin=v0,vmax=v1)
# pylab.title("Raw Solution (Amp)")
# pylab.xlabel("Freq bin")
# pylab.ylabel("Time bin")
# pylab.subplot(2,3,2)
# pylab.imshow(op0(self.GOut[:,:,iAnt,iDir,0,0]),interpolation="nearest",aspect="auto",vmin=v0,vmax=v1)
# pylab.title("Smoothed Solution (Amp)")
# pylab.xlabel("Freq bin")
# pylab.ylabel("Time bin")
# pylab.subplot(2,3,3)
# pylab.imshow(op0(self.Sols.G[:,:,iAnt,iDir,0,0])-op0(self.GOut[:,:,iAnt,iDir,0,0]),interpolation="nearest",
# aspect="auto",vmin=v0,vmax=v1)
# pylab.xlabel("Freq bin")
# pylab.ylabel("Time bin")
# pylab.title("Residual (Amp)")
# #pylab.colorbar()
# A=op1(self.Sols.G[:,:,iAnt,iDir,0,0])
# v0,v1=A.min(),A.max()
# pylab.subplot(2,3,4)
# pylab.imshow(op1(self.Sols.G[:,:,iAnt,iDir,0,0]),interpolation="nearest",aspect="auto",vmin=v0,vmax=v1)
# pylab.title("Raw Solution (Phase)")
# pylab.xlabel("Freq bin")
# pylab.ylabel("Time bin")
# pylab.subplot(2,3,5)
# pylab.imshow(op1(self.GOut[:,:,iAnt,iDir,0,0]),interpolation="nearest",aspect="auto",vmin=v0,vmax=v1)
# pylab.title("Smoothed Solution (Phase)")
# pylab.xlabel("Freq bin")
# pylab.ylabel("Time bin")
# pylab.subplot(2,3,6)
# pylab.imshow(op1(self.Sols.G[:,:,iAnt,iDir,0,0])-op1(self.GOut[:,:,iAnt,iDir,0,0]),
# interpolation="nearest",aspect="auto",vmin=v0,vmax=v1)
# pylab.title("Residual (Phase)")
# pylab.xlabel("Freq bin")
# pylab.ylabel("Time bin")
# pylab.suptitle("(iAnt, iDir) = (%i, %i)"%(iAnt,iDir))
# pylab.tight_layout()
# pylab.draw()
# pylab.show()#False)
# pylab.pause(0.1)
# #stop
def FitThisTEC(self,it):
nt,nch,na,nd,_,_=self.Sols.G.shape
TECArray=NpShared.GiveArray("%sTECArray"%IdSharedMem)
CPhaseArray=NpShared.GiveArray("%sCPhaseArray"%IdSharedMem)
for iDir in range(nd):
# for it in range(nt):
Est=None
if it>0:
E_TEC=TECArray[it-1,iDir,:]
E_CPhase=CPhaseArray[it-1,iDir,:]
Est=(E_TEC,E_CPhase)
gz,TEC,CPhase=self.FitThisTECTime(it,iDir,Est=Est)
GOut=NpShared.GiveArray("%sGOut"%IdSharedMem)
GOut[it,:,:,iDir,0,0]=gz
GOut[it,:,:,iDir,1,1]=gz
TECArray[it,iDir,:]=TEC
CPhaseArray[it,iDir,:]=CPhase
def FitThisTECTime(self,it,iDir,Est=None):
GOut=NpShared.GiveArray("%sGOut"%IdSharedMem)
nt,nch,na,nd,_,_=self.Sols.G.shape
T=ClassTimeIt("CrossFit")
T.disable()
Mode=["TEC","CPhase"]
Mode=["TEC"]
TEC0CPhase0=np.zeros((len(Mode),na),np.float32)
for iAnt in range(na):
_,t0,c0=self.EstimateThisTECTime(it,iAnt,iDir)
TEC0CPhase0[0,iAnt]=t0
if "CPhase" in Mode:
TEC0CPhase0[1,iAnt]=c0
T.timeit("init")
# ######################################
# Changing method
#print "!!!!!!!!!!!!!!!!!!!!!!!!!!!!"
#print it,iDir
TECMachine=ClassFitTEC.ClassFitTEC(self.Sols.G[it,:,:,iDir,0,0],self.CentralFreqs,
Tol=5.e-2,
Mode=Mode)
TECMachine.setX0(TEC0CPhase0.ravel())
X=TECMachine.doFit()
if "CPhase" in Mode:
TEC,CPhase=X.reshape((len(Mode),na))
else:
TEC,=X.reshape((len(Mode),na))
CPhase=np.zeros((1,na),np.float32)
TEC-=TEC[0]
CPhase-=CPhase[0]
GThis=np.abs(GOut[it,:,:,iDir,0,0]).T*TECToZ(TEC.reshape((-1,1)),CPhase.reshape((-1,1)),self.CentralFreqs.reshape((1,-1)))
T.timeit("done %i %i %i"%(it,iDir,TECMachine.Current_iIter))
return GThis.T,TEC,CPhase
# ######################################
G=GOut[it,:,:,iDir,0,0].T.copy()
if self.CrossMode:
A0,A1=np.mgrid[0:na,0:na]
gg_meas=G[A0.ravel(),:]*G[A1.ravel(),:].conj()
gg_meas_reim=np.array([gg_meas.real,gg_meas.imag]).ravel()[::self.incrCross]
else:
self.incrCross=1
A0,A1=np.mgrid[0:na],None
gg_meas=G[A0.ravel(),:]
gg_meas_reim=np.array([gg_meas.real,gg_meas.imag]).ravel()[::self.incrCross]
# for ibl in range(gg_meas.shape[0])[::-1]:
# import pylab
# pylab.clf()
# pylab.subplot(2,1,1)
# pylab.scatter(self.CentralFreqs,np.abs(gg_meas[ibl]))
# pylab.ylim(0,5)
# pylab.subplot(2,1,2)
# pylab.scatter(self.CentralFreqs,np.angle(gg_meas[ibl]))
# pylab.ylim(-np.pi,np.pi)
# pylab.draw()
# pylab.show(False)
# pylab.pause(0.1)
iIter=np.array([0])
tIter=np.array([0],np.float64)
def _f_resid(TecConst,A0,A1,ggmeas,iIter,tIter):
T2=ClassTimeIt("resid")
T2.disable()
TEC,CPhase=TecConst.reshape((2,na))
GThis=TECToZ(TEC.reshape((-1,1)),CPhase.reshape((-1,1)),self.CentralFreqs.reshape((1,-1)))
#T2.timeit("1")
if self.CrossMode:
gg_pred=GThis[A0.ravel(),:]*GThis[A1.ravel(),:].conj()
else:
gg_pred=GThis[A0.ravel(),:]
#T2.timeit("2")
gg_pred_reim=np.array([gg_pred.real,gg_pred.imag]).ravel()[::self.incrCross]
#T2.timeit("3")
r=(ggmeas-gg_pred_reim).ravel()
#print r.shape
#T2.timeit("4")
#return np.angle((ggmeas-gg_pred).ravel())
#print np.mean(np.abs(r))
iIter+=1
#tIter+=T2.timeit("all")
#print iIter[0]
return r
#print _f_resid(TEC0CPhase0,A0,A1,ggmeas)
Sol=least_squares(_f_resid,
TEC0CPhase0.ravel(),
#method="trf",
method="lm",
args=(A0,A1,gg_meas_reim,iIter,tIter),
ftol=1e-2,gtol=1e-2,xtol=1e-2)#,ftol=1,gtol=1,xtol=1,max_nfev=1)
#Sol=leastsq(_f_resid, TEC0CPhase0.ravel(), args=(A0,A1,gg_meas_reim,iIter),ftol=1e-2,gtol=1e-2,xtol=1e-2)
#T.timeit("Done %3i %3i %5i"%(it,iDir,iIter[0]))
#print "total time f=%f"%tIter[0]
TEC,CPhase=Sol.x.reshape((2,na))
TEC-=TEC[0]
CPhase-=CPhase[0]
GThis=np.abs(GOut[it,:,:,iDir,0,0]).T*TECToZ(TEC.reshape((-1,1)),CPhase.reshape((-1,1)),self.CentralFreqs.reshape((1,-1)))
T.timeit("done")
return GThis.T,TEC,CPhase
# # ###########################
# TEC0,CPhase0=TEC0CPhase0
# GThis0=TECToZ(TEC0.reshape((-1,1)),CPhase0.reshape((-1,1)),self.CentralFreqs.reshape((1,-1)))
# for iAnt in range(na):
# print "!!!!!!!!!!!!!!",iAnt,iDir
# ga=GOut[it,:,iAnt,iDir,0,0]
# ge=GThis[iAnt,:]
# ge0=GThis0[iAnt,:]
# #if iAnt==0: continue
# #f=np.linspace(self.CentralFreqs.min(),self.CentralFreqs.max(),100)
# #ztec=TECToZ(TECGrid.ravel()[iTec],CPhase.ravel()[iTec],f)
# import pylab
# pylab.clf()
# pylab.subplot(1,2,1)
# pylab.scatter(self.CentralFreqs,np.abs(ga),color="black")
# pylab.plot(self.CentralFreqs,np.abs(ge),ls=":",color="black")
# pylab.subplot(1,2,2)
# pylab.scatter(self.CentralFreqs,np.angle(ga),color="black")
# pylab.plot(self.CentralFreqs,np.angle(ge),ls=":",color="black")
# pylab.plot(self.CentralFreqs,np.angle(ge0),ls=":",color="red")
# #pylab.plot(f,np.angle(ztec),ls=":",color="black")
# pylab.ylim(-np.pi,np.pi)
# pylab.draw()
# pylab.show(False)
# pylab.pause(0.1)
# # ###############################
def FitThisPolyAmp(self,iDir):
nt,nch,na,nd,_,_=self.Sols.G.shape
GOut=NpShared.GiveArray("%sGOut"%IdSharedMem)
g=GOut[:,:,:,iDir,0,0]
AmpMachine=ClassFitAmp.ClassFitAmp(self.Sols.G[:,:,:,iDir,0,0],self.CentralFreqs,RemoveMedianAmp=self.RemoveMedianAmp)
gf=AmpMachine.doSmooth()
#print "Done %i"%iDir
gf=gf*g/np.abs(g)
GOut[:,:,:,iDir,0,0]=gf[:,:,:]
GOut[:,:,:,iDir,1,1]=gf[:,:,:]
def ClipThisDir(self,iDir):
nt,nch,na,nd,_,_=self.Sols.G.shape
GOut=NpShared.GiveArray("%sGOut"%IdSharedMem)
# g=GOut[:,:,:,iDir,0,0]
AmpMachine=ClassClip.ClassClip(self.Sols.G[:,:,:,iDir,0,0],self.CentralFreqs,RemoveMedianAmp=self.RemoveMedianAmp)
gf=AmpMachine.doClip()
GOut[:,:,:,iDir,0,0]=gf[:,:,:]
AmpMachine=ClassClip.ClassClip(self.Sols.G[:,:,:,iDir,1,1],self.CentralFreqs,RemoveMedianAmp=self.RemoveMedianAmp)
gf=AmpMachine.doClip()
GOut[:,:,:,iDir,1,1]=gf[:,:,:]
def FitThisAmp(self,iAnt,iDir):
nt,nch,na,nd,_,_=self.Sols.G.shape
# if "TEC" in self.InterpMode:
# for it in range(nt):
# gz,t0,c0=self.FitThisTECTime(it,iAnt,iDir)
# GOut=NpShared.GiveArray("%sGOut"%IdSharedMem)
# GOut[it,:,iAnt,iDir,0,0]=gz
# GOut[it,:,iAnt,iDir,1,1]=gz
if self.Amp_SmoothType=="Poly":
for it in range(nt):
self.FitThisAmpTimePoly(it,iAnt,iDir)
elif self.Amp_SmoothType=="Gauss":
self.GaussSmoothAmp(iAnt,iDir)
def EstimateThisTECTime(self,it,iAnt,iDir):
GOut=NpShared.GiveArray("%sGOut"%IdSharedMem)
g=GOut[it,:,iAnt,iDir,0,0]
g0=g/np.abs(g)
W=np.ones(g0.shape,np.float32)
W[g==1.]=0
Z=self.Z
for iTry in range(5):
R=(g0.reshape((1,-1))-Z)*W.reshape((1,-1))
Chi2=np.sum(np.abs(R)**2,axis=1)
iTec=np.argmin(Chi2)
rBest=R[iTec]
if np.max(np.abs(rBest))==0: break
Sig=np.sum(np.abs(rBest*W))/np.sum(W)
ind=np.where(np.abs(rBest*W)>5.*Sig)[0]
if ind.size==0: break
W[ind]=0
# gz=TECToZ(TECGrid.ravel()[iTec],CPhase.ravel()[iTec],self.CentralFreqs)
# import pylab
# pylab.clf()
# pylab.subplot(2,1,1)
# pylab.scatter(self.CentralFreqs,rBest)
# pylab.scatter(self.CentralFreqs[ind],rBest[ind],color="red")
# pylab.subplot(2,1,2)
# pylab.scatter(self.CentralFreqs,rBest)
# pylab.scatter(self.CentralFreqs[ind],rBest[ind],color="red")
# pylab.draw()
# pylab.show()
# # ###########################
# print iAnt,iDir
# if iAnt==0: return
# f=np.linspace(self.CentralFreqs.min(),self.CentralFreqs.max(),100)
# ztec=TECToZ(TECGrid.ravel()[iTec],CPhase.ravel()[iTec],f)
# import pylab
# pylab.clf()
# pylab.subplot(1,2,1)
# pylab.scatter(self.CentralFreqs,np.abs(g),color="black")
# pylab.plot(self.CentralFreqs,np.abs(gz),ls=":",color="black")
# pylab.plot(self.CentralFreqs,np.abs(gz)-np.abs(g),ls=":",color="red")
# pylab.subplot(1,2,2)
# pylab.scatter(self.CentralFreqs,np.angle(g),color="black")
# pylab.plot(self.CentralFreqs,np.angle(gz),ls=":",color="black")
# pylab.plot(self.CentralFreqs,np.angle(gz)-np.angle(g),ls=":",color="red")
# #pylab.plot(f,np.angle(ztec),ls=":",color="black")
# pylab.ylim(-np.pi,np.pi)
# pylab.draw()
# pylab.show(False)
# pylab.pause(0.1)
# # ###############################
t0=self.TECGrid.ravel()[iTec]
c0=self.CPhase.ravel()[iTec]
gz=np.abs(g)*TECToZ(t0,c0,self.CentralFreqs)
return gz,t0,c0
def FitThisAmpTimePoly(self,it,iAnt,iDir):
GOut=NpShared.GiveArray("%sGOut"%IdSharedMem)
g=GOut[it,:,iAnt,iDir,0,0]
g0=np.abs(g)
W=np.ones(g0.shape,np.float32)
W[g0==1.]=0
if np.count_nonzero(W)<self.Amp_PolyOrder*3: return
for iTry in range(5):
if np.max(W)==0: return
z = np.polyfit(self.CentralFreqs, g0, self.Amp_PolyOrder,w=W)
p = np.poly1d(z)
gz=p(self.CentralFreqs)*g/np.abs(g)
rBest=(g0-gz)
if np.max(np.abs(rBest))==0: break
Sig=np.sum(np.abs(rBest*W))/np.sum(W)
ind=np.where(np.abs(rBest*W)>5.*Sig)[0]
if ind.size==0: break
W[ind]=0
GOut[it,:,iAnt,iDir,0,0]=gz
GOut[it,:,iAnt,iDir,1,1]=gz
def GaussSmoothAmp(self,iAnt,iDir):
#print iAnt,iDir
GOut=NpShared.GiveArray("%sGOut"%IdSharedMem)
g=GOut[:,:,iAnt,iDir,0,0]
g0=np.abs(g)
sg0=scipy.ndimage.filters.gaussian_filter(g0,self.Amp_GaussKernel)
gz=sg0*g/np.abs(g)
#print iAnt,iDir,GOut.shape,gz.shape
GOut[:,:,iAnt,iDir,0,0]=gz[:,:]
GOut[:,:,iAnt,iDir,1,1]=gz[:,:]
#print np.max(GOut[:,:,iAnt,iDir,0,0]-gz[:,:])
# def smoothGPR(self):
# nt,nch,na,nd,_,_=self.GOut.shape
def SpacialSmoothTEC(self):
log.print("Do the spacial smoothing...")
t=table("/data/tasse/P025+41/L593429_SB132_uv.pre-cal_12A2A9C48t_148MHz.pre-cal.ms/ANTENNA")
X,Y,Z=t.getcol("POSITION").T
dx=X.reshape((-1,1))-X.reshape((1,-1))
dy=Y.reshape((-1,1))-Y.reshape((1,-1))
dz=Z.reshape((-1,1))-Z.reshape((1,-1))
D=np.sqrt(dx**2+dy**2+dz**2)
D0=500.
WW=np.exp(-D**2/(2.*D0**2))
WWsum=np.sum(WW,axis=0)
nt,nch,na,nd,_,_=self.GOut.shape
nt,nd,na = self.TECArray.shape
for it in range(nt):
for iDir in range(nd):
TEC=Tec=self.TECArray[it,iDir]
TMean=np.dot(WW,Tec.reshape((-1,1))).ravel()
TMean/=WWsum.ravel()
# import pylab
# pylab.clf()
# pylab.plot(TEC.ravel())
# pylab.plot(TMean.ravel())
# pylab.draw()
# pylab.show(False)
# pylab.pause(0.5)
# stop
self.TECArray[it,iDir,:]=TMean[:]
CPhase=self.CPhaseArray[it,iDir]
CPMean=np.dot(WW,CPhase.reshape((-1,1))).ravel()
CPMean/=WWsum.ravel()
self.CPhaseArray[it,iDir,:]=CPMean[:]
z=np.abs(self.GOut[it,:,:,iDir,0,0]).T*TECToZ(TMean.reshape((-1,1)),
CPMean.reshape((-1,1)),
self.CentralFreqs.reshape((1,-1)))
self.GOut[it,:,:,iDir,0,0]=z.T
self.GOut[it,:,:,iDir,1,1]=z.T
def Save(self):
OutFile=self.OutSolsName
if not ".npz" in OutFile: OutFile+=".npz"
#self.SpacialSmoothTEC()
if "TEC" in self.InterpMode:
# OutFileTEC="%s.TEC_CPhase.npz"%OutFile
# log.print(" Saving TEC/CPhase solution file as: %s"%OutFileTEC)
# np.savez(OutFileTEC,
# TEC=self.TECArray,
# CPhase=self.CPhaseArray)
self.DicoFile["SolsTEC"]=self.TECArray
self.DicoFile["SolsCPhase"]=self.CPhaseArray
log.print(" Saving interpolated solution file as: %s"%OutFile)
self.DicoFile["SmoothMode"]=self.InterpMode
self.DicoFile["SolsOrig"]=copy.deepcopy(self.DicoFile["Sols"])
self.DicoFile["SolsOrig"]["G"][:]=self.DicoFile["Sols"]["G"][:]
self.DicoFile["Sols"]["G"][:]=self.GOut[:]
np.savez(OutFile,**(self.DicoFile))
# import PlotSolsIm
# G=self.DicoFile["Sols"]["G"].view(np.recarray)
# iAnt,iDir=10,0
# import pylab
# pylab.clf()
# A=self.GOut[:,:,iAnt,iDir,0,0]
# B=G[:,:,iAnt,iDir,0,0]
# Gs=np.load(OutFile)["Sols"]["G"].view(np.recarray)
# C=Gs[:,:,iAnt,iDir,0,0]
# pylab.subplot(1,3,1)
# pylab.imshow(np.abs(A).T,interpolation="nearest",aspect="auto")
# pylab.subplot(1,3,2)
# pylab.imshow(np.abs(B).T,interpolation="nearest",aspect="auto")
# pylab.subplot(1,3,3)
# pylab.imshow(np.abs(C).T,interpolation="nearest",aspect="auto")
# pylab.draw()
# pylab.show()
# PlotSolsIm.Plot([self.DicoFile["Sols"].view(np.recarray)])
NpShared.DelAll("%s"%IdSharedMem)
# ############################################
def test():
FileName="L401839.killms_f_ap_deep.merged.npz"
OutFile="TestMerge.Interp.npz"
CI=ClassInterpol(FileName,OutFile)
CI.InterpolParallel()
return CI.Save()
def main(options=None):
if options==None:
f = open(SaveName,'rb')
options = pickle.load(f)
#FileName="killMS.KAFCA.sols.npz"
if options.SolsFileIn is None or options.SolsFileOut is None:
raise RuntimeError("You have to specify In/Out solution file names")
CI=ClassInterpol(options.SolsFileIn,
options.SolsFileOut,
InterpMode=options.InterpMode,
Amp_PolyOrder=options.Amp_PolyOrder,
Amp_GaussKernel=options.Amp_GaussKernel,
Amp_SmoothType=options.Amp_SmoothType,
NCPU=options.NCPU,CrossMode=options.CrossMode,RemoveMedianAmp=options.RemoveMedianAmp)
CI.InterpolParallel()
CI.Save()
if __name__=="__main__":
read_options()
f = open(SaveName,'rb')
options = pickle.load(f)
main(options=options)
|
saopicc/killMS
|
killMS/SmoothSols.py
|
Python
|
gpl-2.0
| 28,755
|
[
"Gaussian"
] |
ef81d54216ced000dc9dad51ac4cf4278067c2b2d2c3c2afad00ca1175af1fc7
|
#lab04_hog.py
from scipy.ndimage import filters
from PIL import Image
from pylab import *
def Gausian_response(img,sigma=1):
""" Compute Gaussian response function
for each pixel in a graylevel image. """
# Gausian response
img_sigma = zeros(img.shape)
filters.gaussian_filter(img, (sigma,sigma), (0,0), img_sigma)
return img_sigma
def getKeypoints(img):
sigma1=4
keypoints=ones(img.shape, dtype=bool)
img_sigma1=Gausian_response(img,sigma1)
for i in range(8):
img_sigma1=Gausian_response(img,sigma1)
sigma2=sigma1*1.414
img_sigma2=Gausian_response(img,sigma2)
keypoints[keypoints]=(img_sigma2[keypoints]-img_sigma1[keypoints])>14
y,x = nonzero(keypoints)
return x,y
def getHog(img, posx,posy):
'''to compute histogram of gradient at pos.
Note that this fucntion may access out of bound pixel'''
sigma=4
posx=int(posx)
posy=int(posy)
img8=img[posx-8:posx+8,posy-8:posy+8]
imgx = zeros(img8.shape)
filters.gaussian_filter(img8, (sigma,sigma), (0,1), imgx)
imgy = zeros(img8.shape)
filters.gaussian_filter(img8, (sigma,sigma), (1,0), imgy)
theta=arctan2(imgy,imgx)
t=4*(theta.flatten()/pi+1)
t=t.astype(int)
#generate histogram
desc = zeros(8)
desc=bincount(t,None,8)
#shift the max to the left
n=desc.argmax()
desc=concatenate((desc[n:],desc[:n]),axis=0)
desc=desc/linalg.norm(desc)*0.99999
return desc
subplot(1,2,1)
img = array(Image.open('img/remote1.jpg').convert('L'))
set_cmap('gray')
imshow(img)
x,y=getKeypoints(img)
for i in range(len(x)):
plot(x,y,'rx')
title('remote1')
subplot(1,2,2)
img = array(Image.open('img/remote2.jpg').convert('L'))
set_cmap('gray')
imshow(img)
x,y=getKeypoints(img)
plot(x,y,'rx')
title('remote2')
|
wasit7/cs634
|
2016/lab4_matching.py
|
Python
|
bsd-2-clause
| 1,841
|
[
"Gaussian"
] |
b42448491a77c7cc6180982ecce328b00921d2b024d27ccfaece123d765083d3
|
r"""
==================================
Constants (:mod:`scipy.constants`)
==================================
.. currentmodule:: scipy.constants
Physical and mathematical constants and units.
Mathematical constants
======================
================ =================================================================
``pi`` Pi
``golden`` Golden ratio
``golden_ratio`` Golden ratio
================ =================================================================
Physical constants
==================
=========================== =================================================================
``c`` speed of light in vacuum
``speed_of_light`` speed of light in vacuum
``mu_0`` the magnetic constant :math:`\mu_0`
``epsilon_0`` the electric constant (vacuum permittivity), :math:`\epsilon_0`
``h`` the Planck constant :math:`h`
``Planck`` the Planck constant :math:`h`
``hbar`` :math:`\hbar = h/(2\pi)`
``G`` Newtonian constant of gravitation
``gravitational_constant`` Newtonian constant of gravitation
``g`` standard acceleration of gravity
``e`` elementary charge
``elementary_charge`` elementary charge
``R`` molar gas constant
``gas_constant`` molar gas constant
``alpha`` fine-structure constant
``fine_structure`` fine-structure constant
``N_A`` Avogadro constant
``Avogadro`` Avogadro constant
``k`` Boltzmann constant
``Boltzmann`` Boltzmann constant
``sigma`` Stefan-Boltzmann constant :math:`\sigma`
``Stefan_Boltzmann`` Stefan-Boltzmann constant :math:`\sigma`
``Wien`` Wien displacement law constant
``Rydberg`` Rydberg constant
``m_e`` electron mass
``electron_mass`` electron mass
``m_p`` proton mass
``proton_mass`` proton mass
``m_n`` neutron mass
``neutron_mass`` neutron mass
=========================== =================================================================
Constants database
------------------
In addition to the above variables, :mod:`scipy.constants` also contains the
2014 CODATA recommended values [CODATA2014]_ database containing more physical
constants.
.. autosummary::
:toctree: generated/
value -- Value in physical_constants indexed by key
unit -- Unit in physical_constants indexed by key
precision -- Relative precision in physical_constants indexed by key
find -- Return list of physical_constant keys with a given string
ConstantWarning -- Constant sought not in newest CODATA data set
.. data:: physical_constants
Dictionary of physical constants, of the format
``physical_constants[name] = (value, unit, uncertainty)``.
Available constants:
====================================================================== ====
%(constant_names)s
====================================================================== ====
Units
=====
SI prefixes
-----------
============ =================================================================
``yotta`` :math:`10^{24}`
``zetta`` :math:`10^{21}`
``exa`` :math:`10^{18}`
``peta`` :math:`10^{15}`
``tera`` :math:`10^{12}`
``giga`` :math:`10^{9}`
``mega`` :math:`10^{6}`
``kilo`` :math:`10^{3}`
``hecto`` :math:`10^{2}`
``deka`` :math:`10^{1}`
``deci`` :math:`10^{-1}`
``centi`` :math:`10^{-2}`
``milli`` :math:`10^{-3}`
``micro`` :math:`10^{-6}`
``nano`` :math:`10^{-9}`
``pico`` :math:`10^{-12}`
``femto`` :math:`10^{-15}`
``atto`` :math:`10^{-18}`
``zepto`` :math:`10^{-21}`
============ =================================================================
Binary prefixes
---------------
============ =================================================================
``kibi`` :math:`2^{10}`
``mebi`` :math:`2^{20}`
``gibi`` :math:`2^{30}`
``tebi`` :math:`2^{40}`
``pebi`` :math:`2^{50}`
``exbi`` :math:`2^{60}`
``zebi`` :math:`2^{70}`
``yobi`` :math:`2^{80}`
============ =================================================================
Mass
----
================= ============================================================
``gram`` :math:`10^{-3}` kg
``metric_ton`` :math:`10^{3}` kg
``grain`` one grain in kg
``lb`` one pound (avoirdupous) in kg
``pound`` one pound (avoirdupous) in kg
``blob`` one inch version of a slug in kg (added in 1.0.0)
``slinch`` one inch version of a slug in kg (added in 1.0.0)
``slug`` one slug in kg (added in 1.0.0)
``oz`` one ounce in kg
``ounce`` one ounce in kg
``stone`` one stone in kg
``grain`` one grain in kg
``long_ton`` one long ton in kg
``short_ton`` one short ton in kg
``troy_ounce`` one Troy ounce in kg
``troy_pound`` one Troy pound in kg
``carat`` one carat in kg
``m_u`` atomic mass constant (in kg)
``u`` atomic mass constant (in kg)
``atomic_mass`` atomic mass constant (in kg)
================= ============================================================
Angle
-----
================= ============================================================
``degree`` degree in radians
``arcmin`` arc minute in radians
``arcminute`` arc minute in radians
``arcsec`` arc second in radians
``arcsecond`` arc second in radians
================= ============================================================
Time
----
================= ============================================================
``minute`` one minute in seconds
``hour`` one hour in seconds
``day`` one day in seconds
``week`` one week in seconds
``year`` one year (365 days) in seconds
``Julian_year`` one Julian year (365.25 days) in seconds
================= ============================================================
Length
------
===================== ============================================================
``inch`` one inch in meters
``foot`` one foot in meters
``yard`` one yard in meters
``mile`` one mile in meters
``mil`` one mil in meters
``pt`` one point in meters
``point`` one point in meters
``survey_foot`` one survey foot in meters
``survey_mile`` one survey mile in meters
``nautical_mile`` one nautical mile in meters
``fermi`` one Fermi in meters
``angstrom`` one Angstrom in meters
``micron`` one micron in meters
``au`` one astronomical unit in meters
``astronomical_unit`` one astronomical unit in meters
``light_year`` one light year in meters
``parsec`` one parsec in meters
===================== ============================================================
Pressure
--------
================= ============================================================
``atm`` standard atmosphere in pascals
``atmosphere`` standard atmosphere in pascals
``bar`` one bar in pascals
``torr`` one torr (mmHg) in pascals
``mmHg`` one torr (mmHg) in pascals
``psi`` one psi in pascals
================= ============================================================
Area
----
================= ============================================================
``hectare`` one hectare in square meters
``acre`` one acre in square meters
================= ============================================================
Volume
------
=================== ========================================================
``liter`` one liter in cubic meters
``litre`` one liter in cubic meters
``gallon`` one gallon (US) in cubic meters
``gallon_US`` one gallon (US) in cubic meters
``gallon_imp`` one gallon (UK) in cubic meters
``fluid_ounce`` one fluid ounce (US) in cubic meters
``fluid_ounce_US`` one fluid ounce (US) in cubic meters
``fluid_ounce_imp`` one fluid ounce (UK) in cubic meters
``bbl`` one barrel in cubic meters
``barrel`` one barrel in cubic meters
=================== ========================================================
Speed
-----
================== ==========================================================
``kmh`` kilometers per hour in meters per second
``mph`` miles per hour in meters per second
``mach`` one Mach (approx., at 15 C, 1 atm) in meters per second
``speed_of_sound`` one Mach (approx., at 15 C, 1 atm) in meters per second
``knot`` one knot in meters per second
================== ==========================================================
Temperature
-----------
===================== =======================================================
``zero_Celsius`` zero of Celsius scale in Kelvin
``degree_Fahrenheit`` one Fahrenheit (only differences) in Kelvins
===================== =======================================================
.. autosummary::
:toctree: generated/
convert_temperature
Energy
------
==================== =======================================================
``eV`` one electron volt in Joules
``electron_volt`` one electron volt in Joules
``calorie`` one calorie (thermochemical) in Joules
``calorie_th`` one calorie (thermochemical) in Joules
``calorie_IT`` one calorie (International Steam Table calorie, 1956) in Joules
``erg`` one erg in Joules
``Btu`` one British thermal unit (International Steam Table) in Joules
``Btu_IT`` one British thermal unit (International Steam Table) in Joules
``Btu_th`` one British thermal unit (thermochemical) in Joules
``ton_TNT`` one ton of TNT in Joules
==================== =======================================================
Power
-----
==================== =======================================================
``hp`` one horsepower in watts
``horsepower`` one horsepower in watts
==================== =======================================================
Force
-----
==================== =======================================================
``dyn`` one dyne in newtons
``dyne`` one dyne in newtons
``lbf`` one pound force in newtons
``pound_force`` one pound force in newtons
``kgf`` one kilogram force in newtons
``kilogram_force`` one kilogram force in newtons
==================== =======================================================
Optics
------
.. autosummary::
:toctree: generated/
lambda2nu
nu2lambda
References
==========
.. [CODATA2014] CODATA Recommended Values of the Fundamental
Physical Constants 2014.
https://physics.nist.gov/cuu/Constants/
"""
from __future__ import division, print_function, absolute_import
# Modules contributed by BasSw (wegwerp@gmail.com)
from .codata import *
from .constants import *
from .codata import _obsolete_constants
_constant_names = [(_k.lower(), _k, _v)
for _k, _v in physical_constants.items()
if _k not in _obsolete_constants]
_constant_names = "\n".join(["``%s``%s %s %s" % (_x[1], " "*(66-len(_x[1])),
_x[2][0], _x[2][1])
for _x in sorted(_constant_names)])
if __doc__ is not None:
__doc__ = __doc__ % dict(constant_names=_constant_names)
del _constant_names
__all__ = [s for s in dir() if not s.startswith('_')]
from scipy._lib._testutils import PytestTester
test = PytestTester(__name__)
del PytestTester
|
lhilt/scipy
|
scipy/constants/__init__.py
|
Python
|
bsd-3-clause
| 12,194
|
[
"Avogadro"
] |
652aec33804958c190973849e3f361b813ec785b1b55bc0d61169dfd1b55a83a
|
from datetime import datetime
from pytz import timezone
WEDDING_DATE = datetime(2015, 04, 10, 14, 0, tzinfo=timezone('US/Central'))
ENGAGEMENT_DATE = 'Sunday, May 11, 2014'
ABOUT_TEXT = {
'about_groom': 'Kyle grew up in Wisconsin until he was 19 years of age. He then moved to San Francisco, California to ' +
'attend the Academy of Art University where he studied 3D character animation. After graduation he ' +
'landed a job in Austin, TX at a studio that developed Playstation 3 games.',
'about_bride': 'Emily was born in Canada but is a Texan at heart. She grew up in Dallas and moved to Austin to attend the ' +
'University of Texas. After a short time as a high school algebra teacher in Austin, she returned to the' +
'University as a software developer.',
'engagement_story': 'It was a warm late spring Texas day with a slight breeze. Kyle and Emily decide to spend the afternoon at the '+
'Driftwood Winery where they are members. Kyle packed a picnic bag with the usuals but unknown to Emily there '+
'was also a hidden treasure. After talking most of the afternoon away, Kyle began to tell Emily about how amazing '+
'the last 2 years of his life has been with her. He then got down on one knee and asked if she would like to marry him. '+
'She said YES!',
'how_we_met': 'With 3 days left on Kyle\'s match.com account, and only 3 days into the start of Emily''s match.com account ' +
'(Emily always was the lucky one), little did they know, this would be the start of an amazing adventure. ' +
'After a few messages back and forth, Kyle cut to the chase and asked Emily to meet up at the local coffee shop. ' +
'It was a beautiful spring afternoon in downtown Austin. They talked for hours before realizing they hadn\'t ordered ' +
'their coffee or tea yet. The rest, as they say, is history!'
}
DETAILS_TEXT = {
'ceremony': 'Our ceremony will be outdoors at the Winfield Inn at 6pm. In case of inclement weather, we will be married in a covered pavilion at the venue.',
'reception': 'Cocktail hour, dinner and dancing will follow at the Winfield Inn.',
'dress_code': 'Dressy Casual',
'driving_directions_text': 'We invite you to join us at the Winfield Inn at 900 Scott Street in Kyle, Texas. It\'s about 20 miles south of downtown Austin. ' +
'Expect some traffic on the way to the venue since it will be Friday evening. Plenty of parking will be available at the venue.',
'map_url': 'https://www.google.com/maps/embed/v1/place?q=The%20Winfield%20Inn%2C%20Scott%20Street%2C%20Kyle%2C%20TX%2C%20United%20States&key=AIzaSyD51KGRYE17Jnmouarr0-VubPV8Q_KXUpY'
}
HOTEL_TEXT = {
'accomodations_text': '<p>We will be sharing our wedding weekend with many exciting events in Austin. ' +
'Unfortunately, this means that we are unable to reserve a block at any of the hotels in town.' +
'</p>' +
'<p>' +
'We suggest taking advantage of the usual travel sites (expedia.com, hotels.com, etc) as well ' +
'as trying out <a href="https://www.airbnb.com/">airbnb</a> and <a href="http://www.homeaway.com/">HomeAway</a>.' +
'</p>',
'hotel_list': '',
'activity_list': ''
}
|
rockmans/KyleAndEmily
|
kyleandemily/site_text.py
|
Python
|
mit
| 3,563
|
[
"exciting"
] |
06d249e887ab91520612ec8aa78385d7cc7d2498d8f41bad50cad713d745439f
|
from _collections import OrderedDict
import numpy as np
import theano
import theano.tensor as TT
from . import neuron
from .learned_termination import LearnedTermination
class hPESTermination(LearnedTermination):
#theta_tau = 0.02
# The scaling factor is to increase
# the influence of unsupervised learning
scaling_factor = 10.
theta_tau = .020 # from nengo
supervision_ratio = 0.5 # from nengo
learning_rate = 5e-7 # from nengo
def __init__(self, *args, **kwargs):
"""
"""
if kwargs.has_key('supervision_ratio'):
self.supervision_rate = kwargs['supervision_ratio']
del kwargs['supervision_ratio']
super(hPESTermination, self).__init__(*args, **kwargs)
# get the theano instantaneous spike raster
# of the pre- and post-synaptic neurons
self.pre_spikes = self.pre.neurons.output
self.post_spikes = self.post.neurons.output
# get the decoded error signal
self.error_value = self.error.decoded_output
# get gains (alphas) for post neurons
self.encoders = self.post.encoders.astype('float32')
self.gains = np.sqrt(
(self.post.encoders ** 2).sum(axis=-1)).astype('float32')
self.initial_theta = np.float32(np.random.uniform(low=5e-5, high=15e-5,
size=(self.post.array_size, self.post.neurons_num)))
# Assumption: high gain -> high theta
self.initial_theta *= self.gains
self.theta = theano.shared(self.initial_theta, name='hPES.theta')
self.pre_filtered = theano.shared(
self.pre_spikes.get_value(), name='hPES.pre_filtered')
self.post_filtered = theano.shared(
self.post_spikes.get_value(), name='hPES.post_filtered')
def reset(self):
"""
"""
super(hPESTermination, self).reset()
self.theta.set_value(self.initial_theta)
def learn(self):
"""
"""
# get the error as represented by the post neurons
# should be a vector, (post_neurons x error.dimensions)
encoded_error = TT.sum(self.encoders * TT.reshape( self.error_value,
(self.post.array_size, 1, self.post.dimensions)) , axis=-1)
supervised_rate = self.learning_rate
#TODO: more efficient rewrite with theano batch command?
# this will be a matrix, same size as the connection weight matrix
delta_supervised = [
encoded_error[i % self.post.array_size][:,None] *
supervised_rate * self.pre_filtered[self.pre_index(i)][None,:]
for i in range(self.post.array_size * self.pre.array_size) ]
unsupervised_rate = TT.cast(
self.learning_rate * self.scaling_factor, dtype='float32')
#TODO: more efficient rewrite with theano batch command?
# this will be a matrix, same size as the connection weight matrix
delta_unsupervised = [
(
self.post_filtered[i % self.post.array_size] *
(
self.post_filtered[i % self.post.array_size] -
self.theta[i % self.post.array_size]
) *
self.gains[i % self.post.array_size]
)[:,None]
* unsupervised_rate * self.pre_filtered[self.pre_index(i)][None,:]
for i in range(self.post.array_size * self.pre.array_size) ]
new_wm = (self.weight_matrix
+ TT.cast(self.supervision_ratio, 'float32') * delta_supervised
+ TT.cast(1. - self.supervision_ratio, 'float32')
* delta_unsupervised)
return new_wm
def pre_index(self, i):
"""This method calculates the index of the pre-synaptic ensemble
that should be accessed given a current index value i
int(np.ceil((i + 1) / float(self.post.array_size)) - 1)
generates 0 post.array_size times, then 1 post.array_size times,
then 2 post.array_size times, etc so with
pre.array_size = post.array_size = 2 we're connecting it up in order
[pre[0]-post[0], pre[0]-post[1], pre[1]-post[0], pre[1]-post[1]]
:param int i: the current index value,
value from 0 to post.array_size * pre.array_size
:returns: the desired pre-synaptic ensemble index
"""
return int(np.ceil((i + 1) / float(self.post.array_size)) - 1)
def update(self, dt):
"""
"""
# update filtered inputs
alpha = TT.cast(dt / self.pstc, dtype='float32')
new_pre = self.pre_filtered + alpha * (
self.pre_spikes - self.pre_filtered)
new_post = self.post_filtered + alpha * (
self.post_spikes - self.post_filtered)
# update theta
alpha = TT.cast(dt / self.theta_tau, dtype='float32')
new_theta = self.theta + alpha * (new_post - self.theta)
return OrderedDict({
self.weight_matrix: self.learn(),
self.pre_filtered: new_pre,
self.post_filtered: new_post,
self.theta: new_theta,
})
|
ctn-waterloo/nengo_theano
|
nengo_theano/hPES_termination.py
|
Python
|
mit
| 5,171
|
[
"NEURON"
] |
6fe77d2949303594677a1627786a934ab0a8e59269fd3aa85fc6c2bdc635162a
|
# coding=utf-8
from datetime import datetime
from time import sleep
import plotly.graph_objs as go
import plotly.plotly as py
import ms5637
__author__ = 'Moe'
__copyright__ = 'Copyright 2017 Moe'
__license__ = 'MIT'
__version__ = '0.0.2'
# Bari sensor of MS5637
sensor = ms5637.Chip()
YUPPER = 40
YLOWER = -40
ZERO_SAMPLE = 100
SETSIZE_MINI = 1000
SETSIZE_MAXI = 10000
OVERWRITE = True # overwrites datafile
# OVERWRITE = False # extends datafile
credentials = py.get_credentials()
username = credentials['username']
api_key = credentials['api_key']
stream_token = credentials['stream_ids']
py.sign_in(username, api_key)
stream_level_maxi = {'token': stream_token[0], 'maxpoints': SETSIZE_MAXI}
stream_level_mini = {'token': stream_token[1], 'maxpoints': SETSIZE_MINI}
trace_maxi = {
'stream': stream_level_maxi,
"x": [],
"y": [],
"hoverinfo": "y+x",
"mode": "lines",
"name": "About 100X that ",
"type": "scatter",
"xaxis": "x",
"yaxis": "y",
"line": {"shape": "spline", "width": 1}
}
trace_mini = {
'stream': stream_level_mini,
"x": [],
"y": [],
"hoverinfo": "y",
"mode": "lines",
"name": "Last 2 Minutes, or so",
"type": "scatter",
"xaxis": "x2",
"yaxis": "y2",
"line": {"shape": "spline", "width": 2}
}
data = go.Data([trace_maxi, trace_mini])
# data = go.Data([trace_maxi])
layout = {
"autosize": True,
##"hovermode": "closest",
"legend": {"x": 0.44,
"y": 1.03,
"orientation": "v",
"bgcolor": "rgba(209, 205, 205, 0.01)",
"traceorder": "reversed"
},
## "margin": {"r": 50,
## "t": 80,
## "b": 80,
## "l": 80,
## "pad": 30},
"paper_bgcolor": "rgba(209, 205, 205, 0.4)",
"plot_bgcolor": "rgba(249, 245, 245. 0,3)",
"showlegend": True,
"title": "Parramatta River Levels @ Drummoyne Wharf",
"xaxis": {"autorange": True,
"domain": [0, 1],
"showgrid": True,
"showticklabels": True,
"tickfont": {"color": "rgb(14, 10, 19)", "size": 10},
# "side": "top",
"type": "date",
"zeroline": False},
"xaxis2": {"autorange": True,
"anchor": "y2",
"domain": [0, 1],
# "title": "Local Time, Sydney AEDT",
# "side": "top",
"tickfont": {"color": "rgb(48, 13, 89)", "size": 10},
"type": "date"},
"yaxis": {"anchor": "x",
"autorange": True,
"domain": [0.0, 0.5],
"range": [YLOWER, YUPPER],
"showgrid": True,
"showticklabels": True,
"side": "left",
"ticks": "inside",
"title": "Approx CM",
"titlefont": {"size": 10},
"type": "spline",
"zeroline": True},
"yaxis2": {"anchor": "x2",
"autorange": False,
"domain": [0.55, 1.0],
"range": [YLOWER, YUPPER],
"showgrid": True,
"showticklabels": True,
"side": "left",
"ticks": "inside",
"title": "Approx CM",
"titlefont": {"size": 10},
"type": "spline",
"zeroline": True},
}
figure = go.Figure(data=data, layout=layout)
if OVERWRITE is True:
py.plot(figure, filename='BariFFT', auto_open=False) # Overwrites the error you just introduced
else:
py.plot(figure, filename='BariFFT', auto_open=False, fileopt='extend') # Appends trace spools
stream_maxi_data = py.Stream(stream_id=stream_token[0])
stream_mini_data = py.Stream(stream_id=stream_token[1])
stream_maxi_data.open()
stream_mini_data.open()
rightnow = 0
sum_of_current_list = 0.0
current_readings = []
count = 0
count2 = 0
while True:
now = datetime.now()
rightnow = now.strftime('%Y-%m-%d %H:%M:%S.%f')
try:
pressure, temperatue = sensor.get_data()
except OSError:
sensor.__init__()
pressure, temperatue = sensor.get_data()
if len(current_readings) >= ZERO_SAMPLE:
sum_of_current_list -= current_readings.pop()
sum_of_current_list += pressure
current_readings.append(pressure)
floating_zero = sum_of_current_list / len(current_readings) # floating zero
pressure -= floating_zero
pressure /= 50
count += 1
stream_mini_data.write({"x": rightnow, "y": pressure})
if count >= 9:
count = 0
count2 += 1
stream_maxi_data.write({"x": rightnow, "y": pressure})
now_counter = int(now.strftime('%H%M'))
# print('*')
if now_counter % 100 == 0 and count2 >= 30: # Every hour
sensor.reset() # recalibrate for ambient temperature/pressure change DDS, does't do shit. Twice an hour, in the same minute
# print('Get knocked down and get back up again')
count2 = 0
# SMOKO
sleep(.1)
stream_maxi_data.close()
stream_mini_data.close()
## End
|
wadda/Bari
|
bari_plotter.py
|
Python
|
mit
| 5,105
|
[
"MOE"
] |
349e6fbe75a8f6ca9d5b9d72d4d213dd35ed9483f23b25100c7f164a26319f79
|
# Copyright (C) 2016
# Jakub Krajniak (jkrajniak at gmail.com)
# Copyright (C) 2012,2013
# Max Planck Institute for Polymer Research
# Copyright (C) 2008,2009,2010,2011
# Max-Planck-Institute for Polymer Research & Fraunhofer SCAI
#
# This file is part of ESPResSo++.
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
r"""
********************************
espressopp.interaction.Tabulated
********************************
.. function:: espressopp.interaction.Tabulated(itype, filename, cutoff)
:param itype:
:param filename:
:param cutoff: (default: infinity)
:type itype:
:type filename:
:type cutoff:
.. function:: espressopp.interaction.VerletListAdressTabulated(vl, fixedtupleList)
:param vl:
:param fixedtupleList:
:type vl:
:type fixedtupleList:
.. function:: espressopp.interaction.VerletListAdressTabulated.setPotentialAT(type1, type2, potential)
:param type1:
:param type2:
:param potential:
:type type1:
:type type2:
:type potential:
.. function:: espressopp.interaction.VerletListAdressTabulated.setPotentialCG(type1, type2, potential)
:param type1:
:param type2:
:param potential:
:type type1:
:type type2:
:type potential:
.. function:: espressopp.interaction.VerletListHadressTabulated(vl, fixedtupleList)
:param vl:
:param fixedtupleList:
:type vl:
:type fixedtupleList:
.. function:: espressopp.interaction.VerletListHadressTabulated.setPotentialAT(type1, type2, potential)
:param type1:
:param type2:
:param potential:
:type type1:
:type type2:
:type potential:
.. function:: espressopp.interaction.VerletListHadressTabulated.setPotentialCG(type1, type2, potential)
:param type1:
:param type2:
:param potential:
:type type1:
:type type2:
:type potential:
.. function:: espressopp.interaction.VerletListTabulated(vl)
:param vl:
:type vl:
.. function:: espressopp.interaction.VerletListTabulated.getPotential(type1, type2)
:param type1:
:param type2:
:type type1:
:type type2:
:rtype:
.. function:: espressopp.interaction.VerletListTabulated.setPotential(type1, type2, potential)
:param type1:
:param type2:
:param potential:
:type type1:
:type type2:
:type potential:
.. function:: espressopp.interaction.CellListTabulated(stor)
:param stor:
:type stor:
.. function:: espressopp.interaction.CellListTabulated.setPotential(type1, type2, potential)
:param type1:
:param type2:
:param potential:
:type type1:
:type type2:
:type potential:
.. function:: espressopp.interaction.FixedPairListTabulated(system, vl, potential)
:param system:
:param vl:
:param potential:
:type system:
:type vl:
:type potential:
.. function:: espressopp.interaction.FixedPairListTabulated.setPotential(potential)
:param potential:
:type potential:
.. function:: espressopp.interaction.FixedPairListTypesTabulated(system, ftl)
:param system: The Espresso++ system object.
:type system: espressopp.System
:param ftl: The FixedPair list.
:type ftl: espressopp.FixedPairList
.. function:: espressopp.interaction.FixedPairListTypesTabulated.setPotential(type1, type2, potential)
Defines bond potential for interaction between particles of types type1-type2-type3.
:param type1: Type of particle 1.
:type type1: int
:param type2: Type of particle 2.
:type type2: int
:param potential: The potential to set up.
:type potential: espressopp.interaction.Potential
"""
# -*- coding: iso-8859-1 -*-
from espressopp import pmi, infinity
from espressopp.esutil import *
from espressopp.interaction.Potential import *
from espressopp.interaction.Interaction import *
from _espressopp import interaction_Tabulated, \
interaction_VerletListTabulated, \
interaction_VerletListAdressTabulated, \
interaction_VerletListHadressTabulated, \
interaction_CellListTabulated, \
interaction_FixedPairListTabulated, \
interaction_FixedPairListTypesTabulated
class TabulatedLocal(PotentialLocal, interaction_Tabulated):
def __init__(self, itype, filename, cutoff=infinity):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
cxxinit(self, interaction_Tabulated, itype, filename, cutoff)
class VerletListAdressTabulatedLocal(InteractionLocal, interaction_VerletListAdressTabulated):
def __init__(self, vl, fixedtupleList):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
cxxinit(self, interaction_VerletListAdressTabulated, vl, fixedtupleList)
def setPotentialAT(self, type1, type2, potential):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
self.cxxclass.setPotentialAT(self, type1, type2, potential)
def setPotentialCG(self, type1, type2, potential):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
self.cxxclass.setPotentialCG(self, type1, type2, potential)
class VerletListHadressTabulatedLocal(InteractionLocal, interaction_VerletListHadressTabulated):
def __init__(self, vl, fixedtupleList):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
cxxinit(self, interaction_VerletListHadressTabulated, vl, fixedtupleList)
def setPotentialAT(self, type1, type2, potential):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
self.cxxclass.setPotentialAT(self, type1, type2, potential)
def setPotentialCG(self, type1, type2, potential):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
self.cxxclass.setPotentialCG(self, type1, type2, potential)
class VerletListTabulatedLocal(InteractionLocal, interaction_VerletListTabulated):
def __init__(self, vl):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
cxxinit(self, interaction_VerletListTabulated, vl)
def setPotential(self, type1, type2, potential):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
self.cxxclass.setPotential(self, type1, type2, potential)
def getPotential(self, type1, type2):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
return self.cxxclass.getPotential(self, type1, type2)
class CellListTabulatedLocal(InteractionLocal, interaction_CellListTabulated):
def __init__(self, stor):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
cxxinit(self, interaction_CellListTabulated, stor)
def setPotential(self, type1, type2, potential):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
self.cxxclass.setPotential(self, type1, type2, potential)
class FixedPairListTabulatedLocal(InteractionLocal, interaction_FixedPairListTabulated):
def __init__(self, system, vl, potential):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
cxxinit(self, interaction_FixedPairListTabulated, system, vl, potential)
def setPotential(self, potential):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
self.cxxclass.setPotential(self, potential)
class FixedPairListTypesTabulatedLocal(InteractionLocal, interaction_FixedPairListTypesTabulated):
def __init__(self, system, vl):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
cxxinit(self, interaction_FixedPairListTypesTabulated, system, vl)
def setPotential(self, type1, type2, potential):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
self.cxxclass.setPotential(self, type1, type2, potential)
def getPotential(self, type1, type2):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
return self.cxxclass.getPotential(self, type1, type2)
def setFixedPairList(self, fixedpairlist):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
self.cxxclass.setFixedPairList(self, fixedpairlist)
def getFixedPairList(self):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
return self.cxxclass.getFixedPairList(self)
if pmi.isController:
class Tabulated(Potential):
'The Tabulated potential.'
pmiproxydefs = dict(
cls = 'espressopp.interaction.TabulatedLocal',
pmiproperty = ['itype', 'filename', 'cutoff']
)
class VerletListAdressTabulated(Interaction):
__metaclass__ = pmi.Proxy
pmiproxydefs = dict(
cls = 'espressopp.interaction.VerletListAdressTabulatedLocal',
pmicall = ['setPotentialAT', 'setPotentialCG']
)
class VerletListHadressTabulated(Interaction):
__metaclass__ = pmi.Proxy
pmiproxydefs = dict(
cls = 'espressopp.interaction.VerletListHadressTabulatedLocal',
pmicall = ['setPotentialAT', 'setPotentialCG']
)
class VerletListTabulated(Interaction):
__metaclass__ = pmi.Proxy
pmiproxydefs = dict(
cls = 'espressopp.interaction.VerletListTabulatedLocal',
pmicall = ['setPotential','getPotential']
)
class CellListTabulated(Interaction):
__metaclass__ = pmi.Proxy
pmiproxydefs = dict(
cls = 'espressopp.interaction.CellListTabulatedLocal',
pmicall = ['setPotential']
)
class FixedPairListTabulated(Interaction):
__metaclass__ = pmi.Proxy
pmiproxydefs = dict(
cls = 'espressopp.interaction.FixedPairListTabulatedLocal',
pmicall = ['setPotential', 'setFixedPairList', 'getFixedPairList']
)
class FixedPairListTypesTabulated(Interaction):
__metaclass__ = pmi.Proxy
pmiproxydefs = dict(
cls = 'espressopp.interaction.FixedPairListTypesTabulatedLocal',
pmicall = ['setPotential','getPotential','setFixedPairList','getFixedPairList']
)
|
fedepad/espressopp
|
src/interaction/Tabulated.py
|
Python
|
gpl-3.0
| 11,704
|
[
"ESPResSo"
] |
afc77e7c109c84e4852d960d2c7e599544529ff933a4e598bd95c9a0703723f1
|
from functools import partial
from gym.spaces import Box, Dict, Tuple
import numpy as np
from scipy.stats import beta, norm
import tree # pip install dm_tree
import unittest
from ray.rllib.models.jax.jax_action_dist import JAXCategorical
from ray.rllib.models.tf.tf_action_dist import (
Beta,
Categorical,
DiagGaussian,
GumbelSoftmax,
MultiActionDistribution,
MultiCategorical,
SquashedGaussian,
)
from ray.rllib.models.torch.torch_action_dist import (
TorchBeta,
TorchCategorical,
TorchDiagGaussian,
TorchMultiActionDistribution,
TorchMultiCategorical,
TorchSquashedGaussian,
)
from ray.rllib.utils.framework import try_import_tf, try_import_torch
from ray.rllib.utils.numpy import (
MIN_LOG_NN_OUTPUT,
MAX_LOG_NN_OUTPUT,
softmax,
SMALL_NUMBER,
LARGE_INTEGER,
)
from ray.rllib.utils.test_utils import check, framework_iterator
tf1, tf, tfv = try_import_tf()
torch, _ = try_import_torch()
class TestDistributions(unittest.TestCase):
"""Tests ActionDistribution classes."""
@classmethod
def setUpClass(cls) -> None:
# Set seeds for deterministic tests (make sure we don't fail
# because of "bad" sampling).
np.random.seed(42 + 1)
torch.manual_seed(42 + 1)
def _stability_test(
self,
distribution_cls,
network_output_shape,
fw,
sess=None,
bounds=None,
extra_kwargs=None,
):
extreme_values = [
0.0,
float(LARGE_INTEGER),
-float(LARGE_INTEGER),
1.1e-34,
1.1e34,
-1.1e-34,
-1.1e34,
SMALL_NUMBER,
-SMALL_NUMBER,
]
inputs = np.zeros(shape=network_output_shape, dtype=np.float32)
for batch_item in range(network_output_shape[0]):
for num in range(len(inputs[batch_item]) // 2):
inputs[batch_item][num] = np.random.choice(extreme_values)
else:
# For Gaussians, the second half of the vector is
# log standard deviations, and should therefore be
# the log of a positive number >= 1.
inputs[batch_item][num] = np.log(
max(1, np.random.choice((extreme_values)))
)
dist = distribution_cls(inputs, {}, **(extra_kwargs or {}))
for _ in range(100):
sample = dist.sample()
if fw == "jax":
sample_check = sample
elif fw != "tf":
sample_check = sample.numpy()
else:
sample_check = sess.run(sample)
assert not np.any(np.isnan(sample_check))
assert np.all(np.isfinite(sample_check))
if bounds:
assert np.min(sample_check) >= bounds[0]
assert np.max(sample_check) <= bounds[1]
# Make sure bounds make sense and are actually also being
# sampled.
if isinstance(bounds[0], int):
assert isinstance(bounds[1], int)
assert bounds[0] in sample_check
assert bounds[1] in sample_check
logp = dist.logp(sample)
if fw == "jax":
logp_check = logp
elif fw != "tf":
logp_check = logp.numpy()
else:
logp_check = sess.run(logp)
assert not np.any(np.isnan(logp_check))
assert np.all(np.isfinite(logp_check))
def test_categorical(self):
batch_size = 10000
num_categories = 4
# Create categorical distribution with n categories.
inputs_space = Box(
-1.0, 2.0, shape=(batch_size, num_categories), dtype=np.float32
)
inputs_space.seed(42)
values_space = Box(0, num_categories - 1, shape=(batch_size,), dtype=np.int32)
values_space.seed(42)
inputs = inputs_space.sample()
for fw, sess in framework_iterator(session=True):
# Create the correct distribution object.
cls = (
JAXCategorical
if fw == "jax"
else Categorical
if fw != "torch"
else TorchCategorical
)
categorical = cls(inputs, {})
# Do a stability test using extreme NN outputs to see whether
# sampling and logp'ing result in NaN or +/-inf values.
self._stability_test(
cls,
inputs_space.shape,
fw=fw,
sess=sess,
bounds=(0, num_categories - 1),
)
# Batch of size=3 and deterministic (True).
expected = np.transpose(np.argmax(inputs, axis=-1))
# Sample, expect always max value
# (max likelihood for deterministic draw).
out = categorical.deterministic_sample()
check(out, expected)
# Batch of size=3 and non-deterministic -> expect roughly the mean.
out = categorical.sample()
check(
np.mean(out)
if fw == "jax"
else tf.reduce_mean(out)
if fw != "torch"
else torch.mean(out.float()),
1.0,
decimals=0,
)
# Test log-likelihood outputs.
probs = softmax(inputs)
values = values_space.sample()
out = categorical.logp(values if fw != "torch" else torch.Tensor(values))
expected = []
for i in range(batch_size):
expected.append(np.sum(np.log(np.array(probs[i][values[i]]))))
check(out, expected, decimals=4)
# Test entropy outputs.
out = categorical.entropy()
expected_entropy = -np.sum(probs * np.log(probs), -1)
check(out, expected_entropy)
def test_multi_categorical(self):
batch_size = 100
num_categories = 3
num_sub_distributions = 5
# Create 5 categorical distributions of 3 categories each.
inputs_space = Box(
-1.0, 2.0, shape=(batch_size, num_sub_distributions * num_categories)
)
inputs_space.seed(42)
values_space = Box(
0,
num_categories - 1,
shape=(num_sub_distributions, batch_size),
dtype=np.int32,
)
values_space.seed(42)
inputs = inputs_space.sample()
input_lengths = [num_categories] * num_sub_distributions
inputs_split = np.split(inputs, num_sub_distributions, axis=1)
for fw, sess in framework_iterator(session=True):
# Create the correct distribution object.
cls = MultiCategorical if fw != "torch" else TorchMultiCategorical
multi_categorical = cls(inputs, None, input_lengths)
# Do a stability test using extreme NN outputs to see whether
# sampling and logp'ing result in NaN or +/-inf values.
self._stability_test(
cls,
inputs_space.shape,
fw=fw,
sess=sess,
bounds=(0, num_categories - 1),
extra_kwargs={"input_lens": input_lengths},
)
# Batch of size=3 and deterministic (True).
expected = np.transpose(np.argmax(inputs_split, axis=-1))
# Sample, expect always max value
# (max likelihood for deterministic draw).
out = multi_categorical.deterministic_sample()
check(out, expected)
# Batch of size=3 and non-deterministic -> expect roughly the mean.
out = multi_categorical.sample()
check(
tf.reduce_mean(out) if fw != "torch" else torch.mean(out.float()),
1.0,
decimals=0,
)
# Test log-likelihood outputs.
probs = softmax(inputs_split)
values = values_space.sample()
out = multi_categorical.logp(
values
if fw != "torch"
else [torch.Tensor(values[i]) for i in range(num_sub_distributions)]
) # v in np.stack(values, 1)])
expected = []
for i in range(batch_size):
expected.append(
np.sum(
np.log(
np.array(
[
probs[j][i][values[j][i]]
for j in range(num_sub_distributions)
]
)
)
)
)
check(out, expected, decimals=4)
# Test entropy outputs.
out = multi_categorical.entropy()
expected_entropy = -np.sum(np.sum(probs * np.log(probs), 0), -1)
check(out, expected_entropy)
def test_squashed_gaussian(self):
"""Tests the SquashedGaussian ActionDistribution for all frameworks."""
input_space = Box(-2.0, 2.0, shape=(2000, 10))
input_space.seed(42)
low, high = -2.0, 1.0
for fw, sess in framework_iterator(session=True):
cls = SquashedGaussian if fw != "torch" else TorchSquashedGaussian
# Do a stability test using extreme NN outputs to see whether
# sampling and logp'ing result in NaN or +/-inf values.
self._stability_test(
cls, input_space.shape, fw=fw, sess=sess, bounds=(low, high)
)
# Batch of size=n and deterministic.
inputs = input_space.sample()
means, _ = np.split(inputs, 2, axis=-1)
squashed_distribution = cls(inputs, {}, low=low, high=high)
expected = ((np.tanh(means) + 1.0) / 2.0) * (high - low) + low
# Sample n times, expect always mean value (deterministic draw).
out = squashed_distribution.deterministic_sample()
check(out, expected)
# Batch of size=n and non-deterministic -> expect roughly the mean.
inputs = input_space.sample()
means, log_stds = np.split(inputs, 2, axis=-1)
squashed_distribution = cls(inputs, {}, low=low, high=high)
expected = ((np.tanh(means) + 1.0) / 2.0) * (high - low) + low
values = squashed_distribution.sample()
if sess:
values = sess.run(values)
else:
values = values.numpy()
self.assertTrue(np.max(values) <= high)
self.assertTrue(np.min(values) >= low)
check(np.mean(values), expected.mean(), decimals=1)
# Test log-likelihood outputs.
sampled_action_logp = squashed_distribution.logp(
values if fw != "torch" else torch.Tensor(values)
)
if sess:
sampled_action_logp = sess.run(sampled_action_logp)
else:
sampled_action_logp = sampled_action_logp.numpy()
# Convert to parameters for distr.
stds = np.exp(np.clip(log_stds, MIN_LOG_NN_OUTPUT, MAX_LOG_NN_OUTPUT))
# Unsquash values, then get log-llh from regular gaussian.
# atanh_in = np.clip((values - low) / (high - low) * 2.0 - 1.0,
# -1.0 + SMALL_NUMBER, 1.0 - SMALL_NUMBER)
normed_values = (values - low) / (high - low) * 2.0 - 1.0
save_normed_values = np.clip(
normed_values, -1.0 + SMALL_NUMBER, 1.0 - SMALL_NUMBER
)
unsquashed_values = np.arctanh(save_normed_values)
log_prob_unsquashed = np.sum(
np.log(norm.pdf(unsquashed_values, means, stds)), -1
)
log_prob = log_prob_unsquashed - np.sum(
np.log(1 - np.tanh(unsquashed_values) ** 2), axis=-1
)
check(np.sum(sampled_action_logp), np.sum(log_prob), rtol=0.05)
# NN output.
means = np.array(
[[0.1, 0.2, 0.3, 0.4, 50.0], [-0.1, -0.2, -0.3, -0.4, -1.0]]
)
log_stds = np.array(
[[0.8, -0.2, 0.3, -1.0, 2.0], [0.7, -0.3, 0.4, -0.9, 2.0]]
)
squashed_distribution = cls(
inputs=np.concatenate([means, log_stds], axis=-1),
model={},
low=low,
high=high,
)
# Convert to parameters for distr.
stds = np.exp(log_stds)
# Values to get log-likelihoods for.
values = np.array(
[[0.9, 0.2, 0.4, -0.1, -1.05], [-0.9, -0.2, 0.4, -0.1, -1.05]]
)
# Unsquash values, then get log-llh from regular gaussian.
unsquashed_values = np.arctanh((values - low) / (high - low) * 2.0 - 1.0)
log_prob_unsquashed = np.sum(
np.log(norm.pdf(unsquashed_values, means, stds)), -1
)
log_prob = log_prob_unsquashed - np.sum(
np.log(1 - np.tanh(unsquashed_values) ** 2), axis=-1
)
outs = squashed_distribution.logp(
values if fw != "torch" else torch.Tensor(values)
)
if sess:
outs = sess.run(outs)
check(outs, log_prob, decimals=4)
def test_diag_gaussian(self):
"""Tests the DiagGaussian ActionDistribution for all frameworks."""
input_space = Box(-2.0, 1.0, shape=(2000, 10))
input_space.seed(42)
for fw, sess in framework_iterator(session=True):
cls = DiagGaussian if fw != "torch" else TorchDiagGaussian
# Do a stability test using extreme NN outputs to see whether
# sampling and logp'ing result in NaN or +/-inf values.
self._stability_test(cls, input_space.shape, fw=fw, sess=sess)
# Batch of size=n and deterministic.
inputs = input_space.sample()
means, _ = np.split(inputs, 2, axis=-1)
diag_distribution = cls(inputs, {})
expected = means
# Sample n times, expect always mean value (deterministic draw).
out = diag_distribution.deterministic_sample()
check(out, expected)
# Batch of size=n and non-deterministic -> expect roughly the mean.
inputs = input_space.sample()
means, log_stds = np.split(inputs, 2, axis=-1)
diag_distribution = cls(inputs, {})
expected = means
values = diag_distribution.sample()
if sess:
values = sess.run(values)
else:
values = values.numpy()
check(np.mean(values), expected.mean(), decimals=1)
# Test log-likelihood outputs.
sampled_action_logp = diag_distribution.logp(
values if fw != "torch" else torch.Tensor(values)
)
if sess:
sampled_action_logp = sess.run(sampled_action_logp)
else:
sampled_action_logp = sampled_action_logp.numpy()
# NN output.
means = np.array(
[[0.1, 0.2, 0.3, 0.4, 50.0], [-0.1, -0.2, -0.3, -0.4, -1.0]],
dtype=np.float32,
)
log_stds = np.array(
[[0.8, -0.2, 0.3, -1.0, 2.0], [0.7, -0.3, 0.4, -0.9, 2.0]],
dtype=np.float32,
)
diag_distribution = cls(
inputs=np.concatenate([means, log_stds], axis=-1), model={}
)
# Convert to parameters for distr.
stds = np.exp(log_stds)
# Values to get log-likelihoods for.
values = np.array(
[[0.9, 0.2, 0.4, -0.1, -1.05], [-0.9, -0.2, 0.4, -0.1, -1.05]]
)
# get log-llh from regular gaussian.
log_prob = np.sum(np.log(norm.pdf(values, means, stds)), -1)
outs = diag_distribution.logp(
values if fw != "torch" else torch.Tensor(values)
)
if sess:
outs = sess.run(outs)
check(outs, log_prob, decimals=4)
def test_beta(self):
input_space = Box(-2.0, 1.0, shape=(2000, 10))
input_space.seed(42)
low, high = -1.0, 2.0
plain_beta_value_space = Box(0.0, 1.0, shape=(2000, 5))
plain_beta_value_space.seed(42)
for fw, sess in framework_iterator(session=True):
cls = TorchBeta if fw == "torch" else Beta
inputs = input_space.sample()
beta_distribution = cls(inputs, {}, low=low, high=high)
inputs = beta_distribution.inputs
if sess:
inputs = sess.run(inputs)
else:
inputs = inputs.numpy()
alpha, beta_ = np.split(inputs, 2, axis=-1)
# Mean for a Beta distribution: 1 / [1 + (beta/alpha)]
expected = (1.0 / (1.0 + beta_ / alpha)) * (high - low) + low
# Sample n times, expect always mean value (deterministic draw).
out = beta_distribution.deterministic_sample()
check(out, expected, rtol=0.01)
# Batch of size=n and non-deterministic -> expect roughly the mean.
values = beta_distribution.sample()
if sess:
values = sess.run(values)
else:
values = values.numpy()
self.assertTrue(np.max(values) <= high)
self.assertTrue(np.min(values) >= low)
check(np.mean(values), expected.mean(), decimals=1)
# Test log-likelihood outputs (against scipy).
inputs = input_space.sample()
beta_distribution = cls(inputs, {}, low=low, high=high)
inputs = beta_distribution.inputs
if sess:
inputs = sess.run(inputs)
else:
inputs = inputs.numpy()
alpha, beta_ = np.split(inputs, 2, axis=-1)
values = plain_beta_value_space.sample()
values_scaled = values * (high - low) + low
if fw == "torch":
values_scaled = torch.Tensor(values_scaled)
print(values_scaled)
out = beta_distribution.logp(values_scaled)
check(out, np.sum(np.log(beta.pdf(values, alpha, beta_)), -1), rtol=0.01)
# TODO(sven): Test entropy outputs (against scipy).
def test_gumbel_softmax(self):
"""Tests the GumbelSoftmax ActionDistribution (tf + eager only)."""
for fw, sess in framework_iterator(
frameworks=("tf2", "tf", "tfe"), session=True
):
batch_size = 1000
num_categories = 5
input_space = Box(-1.0, 1.0, shape=(batch_size, num_categories))
input_space.seed(42)
# Batch of size=n and deterministic.
inputs = input_space.sample()
gumbel_softmax = GumbelSoftmax(inputs, {}, temperature=1.0)
expected = softmax(inputs)
# Sample n times, expect always mean value (deterministic draw).
out = gumbel_softmax.deterministic_sample()
check(out, expected)
# Batch of size=n and non-deterministic -> expect roughly that
# the max-likelihood (argmax) ints are output (most of the time).
inputs = input_space.sample()
gumbel_softmax = GumbelSoftmax(inputs, {}, temperature=1.0)
expected_mean = np.mean(np.argmax(inputs, -1)).astype(np.float32)
outs = gumbel_softmax.sample()
if sess:
outs = sess.run(outs)
check(np.mean(np.argmax(outs, -1)), expected_mean, rtol=0.08)
def test_multi_action_distribution(self):
"""Tests the MultiActionDistribution (across all frameworks)."""
batch_size = 1000
input_space = Tuple(
[
Box(-10.0, 10.0, shape=(batch_size, 4)),
Box(
-2.0,
2.0,
shape=(
batch_size,
6,
),
),
Dict({"a": Box(-1.0, 1.0, shape=(batch_size, 4))}),
]
)
input_space.seed(42)
std_space = Box(
-0.05,
0.05,
shape=(
batch_size,
3,
),
)
std_space.seed(42)
low, high = -1.0, 1.0
value_space = Tuple(
[
Box(0, 3, shape=(batch_size,), dtype=np.int32),
Box(-2.0, 2.0, shape=(batch_size, 3), dtype=np.float32),
Dict({"a": Box(0.0, 1.0, shape=(batch_size, 2), dtype=np.float32)}),
]
)
value_space.seed(42)
for fw, sess in framework_iterator(session=True):
if fw == "torch":
cls = TorchMultiActionDistribution
child_distr_cls = [
TorchCategorical,
TorchDiagGaussian,
partial(TorchBeta, low=low, high=high),
]
else:
cls = MultiActionDistribution
child_distr_cls = [
Categorical,
DiagGaussian,
partial(Beta, low=low, high=high),
]
inputs = list(input_space.sample())
distr = cls(
np.concatenate([inputs[0], inputs[1], inputs[2]["a"]], axis=1),
model={},
action_space=value_space,
child_distributions=child_distr_cls,
input_lens=[4, 6, 4],
)
# Adjust inputs for the Beta distr just as Beta itself does.
inputs[2]["a"] = np.clip(
inputs[2]["a"], np.log(SMALL_NUMBER), -np.log(SMALL_NUMBER)
)
inputs[2]["a"] = np.log(np.exp(inputs[2]["a"]) + 1.0) + 1.0
# Sample deterministically.
expected_det = [
np.argmax(inputs[0], axis=-1),
inputs[1][:, :3], # [:3]=Mean values.
# Mean for a Beta distribution:
# 1 / [1 + (beta/alpha)] * range + low
(1.0 / (1.0 + inputs[2]["a"][:, 2:] / inputs[2]["a"][:, 0:2]))
* (high - low)
+ low,
]
out = distr.deterministic_sample()
if sess:
out = sess.run(out)
check(out[0], expected_det[0])
check(out[1], expected_det[1])
check(out[2]["a"], expected_det[2])
# Stochastic sampling -> expect roughly the mean.
inputs = list(input_space.sample())
# Fix categorical inputs (not needed for distribution itself, but
# for our expectation calculations).
inputs[0] = softmax(inputs[0], -1)
# Fix std inputs (shouldn't be too large for this test).
inputs[1][:, 3:] = std_space.sample()
# Adjust inputs for the Beta distr just as Beta itself does.
inputs[2]["a"] = np.clip(
inputs[2]["a"], np.log(SMALL_NUMBER), -np.log(SMALL_NUMBER)
)
inputs[2]["a"] = np.log(np.exp(inputs[2]["a"]) + 1.0) + 1.0
distr = cls(
np.concatenate([inputs[0], inputs[1], inputs[2]["a"]], axis=1),
model={},
action_space=value_space,
child_distributions=child_distr_cls,
input_lens=[4, 6, 4],
)
expected_mean = [
np.mean(np.sum(inputs[0] * np.array([0, 1, 2, 3]), -1)),
inputs[1][:, :3], # [:3]=Mean values.
# Mean for a Beta distribution:
# 1 / [1 + (beta/alpha)] * range + low
(1.0 / (1.0 + inputs[2]["a"][:, 2:] / inputs[2]["a"][:, :2]))
* (high - low)
+ low,
]
out = distr.sample()
if sess:
out = sess.run(out)
out = list(out)
if fw == "torch":
out[0] = out[0].numpy()
out[1] = out[1].numpy()
out[2]["a"] = out[2]["a"].numpy()
check(np.mean(out[0]), expected_mean[0], decimals=1)
check(np.mean(out[1], 0), np.mean(expected_mean[1], 0), decimals=1)
check(np.mean(out[2]["a"], 0), np.mean(expected_mean[2], 0), decimals=1)
# Test log-likelihood outputs.
# Make sure beta-values are within 0.0 and 1.0 for the numpy
# calculation (which doesn't have scaling).
inputs = list(input_space.sample())
# Adjust inputs for the Beta distr just as Beta itself does.
inputs[2]["a"] = np.clip(
inputs[2]["a"], np.log(SMALL_NUMBER), -np.log(SMALL_NUMBER)
)
inputs[2]["a"] = np.log(np.exp(inputs[2]["a"]) + 1.0) + 1.0
distr = cls(
np.concatenate([inputs[0], inputs[1], inputs[2]["a"]], axis=1),
model={},
action_space=value_space,
child_distributions=child_distr_cls,
input_lens=[4, 6, 4],
)
inputs[0] = softmax(inputs[0], -1)
values = list(value_space.sample())
log_prob_beta = np.log(
beta.pdf(values[2]["a"], inputs[2]["a"][:, :2], inputs[2]["a"][:, 2:])
)
# Now do the up-scaling for [2] (beta values) to be between
# low/high.
values[2]["a"] = values[2]["a"] * (high - low) + low
inputs[1][:, 3:] = np.exp(inputs[1][:, 3:])
expected_log_llh = np.sum(
np.concatenate(
[
np.expand_dims(
np.log([i[values[0][j]] for j, i in enumerate(inputs[0])]),
-1,
),
np.log(norm.pdf(values[1], inputs[1][:, :3], inputs[1][:, 3:])),
log_prob_beta,
],
-1,
),
-1,
)
values[0] = np.expand_dims(values[0], -1)
if fw == "torch":
values = tree.map_structure(lambda s: torch.Tensor(s), values)
# Test all flattened input.
concat = np.concatenate(tree.flatten(values), -1).astype(np.float32)
out = distr.logp(concat)
if sess:
out = sess.run(out)
check(out, expected_log_llh, atol=15)
# Test structured input.
out = distr.logp(values)
if sess:
out = sess.run(out)
check(out, expected_log_llh, atol=15)
# Test flattened input.
out = distr.logp(tree.flatten(values))
if sess:
out = sess.run(out)
check(out, expected_log_llh, atol=15)
if __name__ == "__main__":
import pytest
import sys
sys.exit(pytest.main(["-v", __file__]))
|
ray-project/ray
|
rllib/models/tests/test_distributions.py
|
Python
|
apache-2.0
| 27,352
|
[
"Gaussian"
] |
c508df630cb576c3b5bb008d38186e3125ceba734a6ff926449ce99b0ddc2194
|
from dateutil.relativedelta import relativedelta
from edc_constants.constants import SCREENED
from edc_registration.models import RegisteredSubject
from edc_identifier.models import SubjectIdentifier
from edc_constants.constants import FAILED_ELIGIBILITY, OFF_STUDY, SCHEDULED
from edc_meta_data.models import RequisitionMetaData
from edc_appointment.models import Appointment
from td_maternal.models import MaternalVisit
from td_maternal.tests import BaseTestCase
from td_maternal.tests.factories import (MaternalUltraSoundIniFactory, MaternalEligibilityFactory,
MaternalConsentFactory, AntenatalEnrollmentFactory,
AntenatalVisitMembershipFactory, MaternalLabourDelFactory)
from .factories import InfantBirthFactory
class TestInfantBirthMembership(BaseTestCase):
def setUp(self):
super(TestInfantBirthMembership, self).setUp()
self.maternal_eligibility = MaternalEligibilityFactory()
self.maternal_consent = MaternalConsentFactory(
maternal_eligibility=self.maternal_eligibility)
self.registered_subject = self.maternal_eligibility.registered_subject
# maternal visit created here.
self.antenatal_enrollment = AntenatalEnrollmentFactory(registered_subject=self.registered_subject)
self.maternal_visit = MaternalVisit.objects.get(
appointment__registered_subject=self.registered_subject,
reason=SCHEDULED,
appointment__visit_definition__code='1000M')
self.maternal_ultrasound = MaternalUltraSoundIniFactory(maternal_visit=self.maternal_visit,
number_of_gestations=1)
self.maternal_visits_membership = AntenatalVisitMembershipFactory(registered_subject=self.registered_subject)
self.maternal_labour_del = MaternalLabourDelFactory(registered_subject=self.registered_subject,
live_infants_to_register=1)
def test_create_appointments(self):
infant_birth = InfantBirthFactory(
maternal_labour_del=self.maternal_labour_del,
registered_subject=RegisteredSubject.objects.get(
relative_identifier=self.maternal_consent.subject_identifier))
self.assertEqual(Appointment.objects.filter(
registered_subject=RegisteredSubject.objects.get(
relative_identifier=self.maternal_consent.subject_identifier)).count(), 6)
|
botswana-harvard/tshilo-dikotla
|
td_infant/tests/test_infant_birth_membership.py
|
Python
|
gpl-2.0
| 2,538
|
[
"VisIt"
] |
4b81128cf85a672ce557be2e0e1feb4b362e9f13953b6efe1bd8b11590f605df
|
#!/usr/bin/python3
#
# Copyright (c) 2012 Mikkel Schubert <MikkelSch@gmail.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
import collections
import glob
import itertools
import logging
import os
import re
import string
from typing import Any, Dict, Iterable, Optional, Tuple
import paleomix.common.sequences as sequences
from paleomix.common.bamfiles import BAM_PLATFORMS
from paleomix.common.fileutils import get_files_glob
from paleomix.common.formats.fasta import FASTA
from paleomix.common.makefile import (
REQUIRED_VALUE,
And,
DeprecatedOption,
IsAny,
IsBoolean,
IsFloat,
IsInt,
IsListOf,
IsNone,
IsStr,
IsUnsignedInt,
MakefileError,
Not,
Or,
StringIn,
StringStartsWith,
ValueIn,
ValuesIntersect,
ValuesSubsetOf,
WithoutDefaults,
process_makefile,
read_makefile,
)
from paleomix.common.utilities import fill_dict
_READ_TYPES = set(("Single", "Singleton", "Collapsed", "CollapsedTruncated", "Paired"))
# The maximum reference sequence length supported by the BAI index format:
# https://samtools.github.io/hts-specs/SAMv1.pdf
_BAM_MAX_SEQUENCE_LENGTH = 2 ** 29 - 1
def read_makefiles(filenames, pipeline_variant="bam"):
logger = logging.getLogger(__name__)
makefiles = []
for filename in filenames:
logger.info("Reading makefile %r", filename)
data = read_makefile(filename, MAKEFILE_SPECIFICATION)
options = data.pop("Options")
genomes = data.pop("Genomes")
makefiles.append(
{
"Filename": filename,
"Options": options,
"Genomes": _postprocess_genomes(genomes),
"Samples": _postprocess_samples(data, options),
}
)
return finalize_makefiles(makefiles, pipeline_variant)
def _alphanum_check(whitelist, min_len=1):
description = "characters a-z, A-Z, 0-9%s allowed"
description %= (", and %r" % whitelist,) if whitelist else ""
whitelist += string.ascii_letters + string.digits
return And(
IsStr(min_len=min_len), ValuesSubsetOf(whitelist, description=description)
)
# Valid names for genomes
_VALID_GENOME_NAME = And(
_alphanum_check(whitelist="._-*"),
Not(StringIn(["Options"])),
)
# Valid paths for genomes; avoids some problems with e.g. Bowtie2
_VALID_GENOME_PATH = And(
IsStr(), Not(ValuesIntersect('\\:?"<>|() \t\n\v\f\r')), default=REQUIRED_VALUE
)
# Valid strings for samples / libraries / lanes
_VALID_FILENAME = _alphanum_check(whitelist="._-", min_len=2)
_VALIDATION_OPTIONS = {
# Sequencing platform, used to tag read-groups.
"Platform": StringIn(BAM_PLATFORMS, default="ILLUMINA"),
# Offset for quality scores in FASTQ files.
"QualityOffset": ValueIn((33, 64, "Solexa"), default=33),
# Split a lane into multiple entries, one for each (pair of) file(s)
"AdapterRemoval": {
"--adapter1": IsStr,
"--adapter2": IsStr,
"--adapter-list": IsStr,
"--maxns": IsUnsignedInt,
"--minquality": IsUnsignedInt,
"--trimns": IsNone,
"--trimqualities": IsNone,
"--collapse": IsNone,
"--mm": Or(IsFloat, IsUnsignedInt),
"--minlength": IsUnsignedInt,
"--maxlength": IsUnsignedInt,
"--minalignmentlength": IsUnsignedInt,
"--minadapteroverlap": IsUnsignedInt,
"--shift": IsUnsignedInt,
"--qualitymax": IsUnsignedInt,
"--mate-separator": IsStr,
"--trimwindows": Or(IsInt, IsFloat),
"--preserve5p": IsNone,
"--collapse-deterministic": IsNone,
"--collapse-conservatively": IsNone,
},
# Which aliger/mapper to use (BWA/Bowtie2)
"Aligners": {
"Program": ValueIn(("BWA", "Bowtie2"), default="BWA"),
"BWA": {
# Mapping algorithm; availability depends on BWA version
"Algorithm": StringIn(
("backtrack", "mem", "mem2", "bwasw"),
default="mem",
),
# Minimum mapping quality (PHREAD) of reads to retain
"MinQuality": IsUnsignedInt(default=0),
# Remove unmapped reads or not
"FilterUnmappedReads": IsBoolean(default=True),
# Use seed region during mapping
# Verbose name for command-line option "-l 65535"
"UseSeed": IsBoolean(default=True),
# Any number of user specific options
StringStartsWith("-"): Or(
IsListOf(IsStr, IsInt, IsFloat), Or(IsStr, IsInt, IsFloat, IsNone)
),
},
"Bowtie2": {
# Minimum mapping quality (PHREAD) of reads to retain
"MinQuality": IsUnsignedInt(default=0),
# Remove unmapped reads or not
"FilterUnmappedReads": IsBoolean(default=True),
# Any number of user specific options
StringStartsWith("-"): Or(
IsListOf(IsStr, IsInt, IsFloat), Or(IsStr, IsInt, IsFloat, IsNone)
),
},
},
"mapDamage": {
# Tabulation options
"--downsample": Or(IsUnsignedInt, IsFloat),
"--length": IsUnsignedInt,
"--around": IsUnsignedInt,
"--min-basequal": IsUnsignedInt,
# Plotting options
"--ymax": IsFloat,
"--readplot": IsUnsignedInt,
"--refplot": IsUnsignedInt,
# Model options
"--rand": IsUnsignedInt,
"--burn": IsUnsignedInt,
"--adjust": IsUnsignedInt,
"--iter": IsUnsignedInt,
"--forward": IsNone,
"--reverse": IsNone,
"--var-disp": IsNone,
"--jukes-cantor": IsNone,
"--diff-hangs": IsNone,
"--fix-nicks": IsNone,
"--use-raw-nick-freq": IsNone,
"--single-stranded": IsNone,
"--seq-length": IsUnsignedInt,
},
# Exclude READ_TYPES from alignment/analysis
"ExcludeReads": {
"Single": IsBoolean(default=False),
"Collapsed": IsBoolean(default=False),
"CollapsedTruncated": IsBoolean(default=False),
"Paired": IsBoolean(default=False),
"Singleton": IsBoolean(default=False),
},
# Features of pipeline
"Features": {
"mapDamage": StringIn(("rescale", "model", "plot", True, False), default=False),
"PCRDuplicates": StringIn((True, False, "mark", "filter"), default="filter"),
# TODO: Statistics to be combined into new report (HTML + JSON?)
"Coverage": DeprecatedOption(IsBoolean(default=True)),
"Depths": DeprecatedOption(IsBoolean(default=True)),
"Summary": DeprecatedOption(IsBoolean(default=True)),
},
}
# validation of a complex lane, containing trimmed reads and/or options
_VALIDATE_LANE = {
"Single": IsStr,
"Collapsed": IsStr,
"CollapsedTruncated": IsStr,
"Paired": IsStr,
"Singleton": IsStr,
"Untrimmed": IsStr,
"Options": WithoutDefaults(_VALIDATION_OPTIONS),
}
MAKEFILE_SPECIFICATION = {
"Options": _VALIDATION_OPTIONS,
"Genomes": {
_VALID_GENOME_NAME: {
"Path": _VALID_GENOME_PATH,
},
},
_VALID_FILENAME: { # Group
_VALID_FILENAME: { # Sample
_VALID_FILENAME: { # Library
# Validation of lanes is performed in `_postprocess_samples`
_VALID_FILENAME: IsAny,
"Options": WithoutDefaults(_VALIDATION_OPTIONS),
},
"Options": WithoutDefaults(_VALIDATION_OPTIONS),
},
"Options": WithoutDefaults(_VALIDATION_OPTIONS),
},
}
########################################################################################
# Post processing of user defined target genomes
def _postprocess_genomes(genomes):
result = {}
for name, values in genomes.items():
if "*" in name[:-1]:
raise MakefileError(
"The character '*' is not allowed in Genome names; if you wish to "
"select multiple .fasta files using a search-string, then use the "
"genome name '%s*' instead and specify the wildcards in the 'Path'."
% (name.replace("*", ""))
)
elif name.endswith("*"):
for name, filename in _glob_genomes(values["Path"]):
if name in result:
raise MakefileError(f"Multiple genomes named {name}")
result[name] = {
"Name": name,
"Path": filename,
}
elif name in result:
raise MakefileError(f"Multiple genomes named {name}")
else:
result[name] = {
"Name": name,
"Path": values["Path"],
}
return result
def _glob_genomes(pattern):
filename = None
for filename in glob.iglob(pattern):
name = os.path.basename(filename).split(".")[0]
_VALID_GENOME_NAME(("Genomes", name), name)
yield (name, filename)
if filename is None:
raise MakefileError(f"Did not find any genomes using wildcards {pattern!r}")
########################################################################################
# Post processing of user samples / sample groups
def _postprocess_samples(data, global_options):
top_level_features = ("Coverage", "Depths", "mapDamage", "PCRDuplicates", "Summary")
for (group, samples) in tuple(data.items()):
# Options common to a specific group
group_options = _combine_options(
options=global_options,
data=samples,
path=(group,),
valid_features=top_level_features,
)
for sample, libraries in samples.items():
# Options common to a specific sample in a group
sample_options = _combine_options(
options=group_options,
data=libraries,
path=(group, sample),
valid_features=top_level_features,
)
for library, lanes in libraries.items():
# Options common to a specific library in a sample
library_options = _combine_options(
options=sample_options,
data=lanes,
path=(group, sample, library),
valid_features=("mapDamage", "PCRDuplicates"),
)
for barcode, record in lanes.items():
path = (group, sample, library, barcode)
# Split a trimmed/untrimmed lane into one record per input file
lanes[barcode] = _split_lane(record, path, library_options)
return data
def _combine_options(
options: Dict[str, Any],
data: Dict[str, Any],
path: Tuple[str, ...],
valid_features: Iterable[str],
) -> Dict[str, Any]:
item_options = data.pop("Options", {})
if not item_options:
return options
features = item_options.get("Features", {})
invalid_features = features.keys() - valid_features
if invalid_features:
raise MakefileError(
"Cannot override %s at %s"
% (", ".join(map(repr, invalid_features)), _path_to_str(path))
)
if "mapDamage" in item_options and "mapDamage" not in valid_features:
raise MakefileError(f"Cannot set mapDamage options at {_path_to_str(path)}")
# Fill out missing values using those of prior levels
return fill_dict(destination=item_options, source=options)
def _split_lane(data, path, options):
if isinstance(data, str):
data = {"Untrimmed": data}
# the structure needs to be validated here, since the specification uses an IsAny
data = process_makefile(
data=data,
specification=_VALIDATE_LANE,
path=path,
apply_defaults=False,
)
# Generate final options for this lane
options = _combine_options(
options=options,
data=data,
path=path,
valid_features=(),
)
return [
{
"Path": files,
"Type": read_type,
"Shortname": _filenames_to_shortname(files),
"Options": options,
}
for read_type, files in _collect_files_and_split_lane(data, path)
]
def _collect_files_and_split_lane(data, path):
if "Untrimmed" in data and len(data) > 1:
raise MakefileError(f"both untrimmed and trimmed reads at {_path_to_str(path)}")
for read_type, filepath in data.items():
if read_type == "Untrimmed":
for files in _collect_files(path, filepath):
yield read_type, files
elif read_type == "Paired":
if not _is_paired_end(filepath):
raise MakefileError(
"Paired data path at %s does not have a '{Pair}' key: %s"
% (_path_to_str(path + (read_type,)), filepath)
)
yield read_type, (filepath.format(Pair=1), filepath.format(Pair=2))
else:
yield read_type, (filepath, None)
def _filenames_to_shortname(filenames):
if not (1 <= len(filenames) <= 2):
raise ValueError(filenames)
basenames = []
for filename in filenames:
if filename is not None:
basenames.append(os.path.basename(filename))
filename = get_files_glob(basenames)
if filename is None:
raise ValueError(filenames)
return filename.replace("?", "x")
def _collect_files(path, template) -> Iterable[Tuple[str, Optional[str]]]:
if _is_paired_end(template):
files_1 = _sorted_glob(template.format(Pair=1))
files_2 = _sorted_glob(template.format(Pair=2))
if len(files_1) != len(files_2):
raise MakefileError(
"Unequal number of mate 1 and mate 2 files found at %r; found %i "
"mate 1 files and %i mate 2 files; specified in makefile at %r. "
% (template, len(files_1), len(files_2), _path_to_str(path))
)
elif not (files_1 and files_2):
return [(template, None)]
return zip(files_1, files_2)
else:
files = _sorted_glob(template)
if not files:
return [(template, None)]
return [(filename, None) for filename in files]
def _sorted_glob(filename):
if _GLOB_MAGIC.search(filename):
return sorted(glob.iglob(filename))
return [filename]
def _is_paired_end(template):
"""Returns true if a template contains a Pair component."""
return template.format(Pair=1) != template
# based on check in `glob`
_GLOB_MAGIC = re.compile("[*?[]")
########################################################################################
def finalize_makefiles(makefiles, pipeline_variant):
sample_names = set()
duplicate_samples = set()
for makefile in makefiles:
results = {}
# Groups are used to structure the YAML file and can be discarded for simplicity
for samples in makefile["Samples"].values():
for sample, libraries in samples.items():
if sample in sample_names:
duplicate_samples.add(sample)
sample_names.add(sample)
results[sample] = libraries
makefile["Samples"] = results
if duplicate_samples:
log = logging.getLogger(__name__)
log.error("One or more sample names have been used multiple times:")
for idx, sample in enumerate(sorted(duplicate_samples), start=1):
log.error(" %i. %s", idx, sample)
log.error("All samples must have a unique name")
raise MakefileError("Duplicate sample names found")
for makefile in makefiles:
_validate_makefile_options(makefile)
_validate_makefiles_duplicate_files(makefiles)
_validate_prefixes(makefiles, pipeline_variant)
return makefiles
def _validate_makefile_options(makefile):
for (sample, library, barcode, record) in _iterate_over_records(makefile):
path = (sample, library, barcode)
if record["Type"] != "Untrimmed":
if record["Options"]["QualityOffset"] != 33:
raise MakefileError(
"Pre-trimmed data must have quality offset 33 (Phred+33). "
"Please convert your FASTQ files using e.g. seqtk before "
"continuing: {}".format(_path_to_str(path))
)
_validate_makefile_adapters(record, path)
def _validate_makefile_adapters(record, path):
"""Checks for the default adapter sequences specified in the wrong
orientation for AdapterRemoval, which is a typical mistake when using
the --pcr2 option.
"""
# The non-reverse complemented mate 2 adapter, as seen in raw FASTQ reads
adapter_2 = "AGATCGGAAGAGCGTCGTGTAGGGAAAGAGTGTAGATCTCGGTGGTCGCCGTATCATT"
tests = {
# --pcr2 expects the reverse complement of the mate 2 adapter seq.
"--pcr2": adapter_2,
# --adapter2 (AdapterRemoval v2) expects the regular sequence
"--adapter2": sequences.reverse_complement(adapter_2),
}
results = {}
options = record["Options"]
for key, value in tests.items():
results[key] = options.get(key, "").upper() == value
if any(results.values()):
logger = logging.getLogger(__name__)
logger.warn(
"An adapter specified for AdapterRemoval corresponds to the default "
"sequence, but is reverse complemented. Please make sure that this is "
"intended! "
)
if results["--pcr2"]:
logger.warn(
"For --pcr2, the sequence given should be the "
"reverse complement of the sequence observed in the "
"mate 2 FASTQ file."
)
if results["--adapter2"]:
logger.warn(
"For --adapter2 (AdapterRemoval v2, only) the value "
"should be exactly as observed in the FASTQ reads."
)
def _validate_makefiles_duplicate_files(makefiles):
filenames = collections.defaultdict(list)
for makefile in makefiles:
for (sample, library, barcode, record) in _iterate_over_records(makefile):
for filepath in record["Path"]:
if filepath is not None:
realpath = os.path.realpath(filepath)
filenames[realpath].append((sample, library, barcode))
has_overlap = {}
for (filename, records) in filenames.items():
if len(records) > 1:
has_overlap[filename] = list(set(records))
logger = logging.getLogger(__name__)
by_records = sorted(zip(list(has_overlap.values()), list(has_overlap.keys())))
for (records, pairs) in itertools.groupby(by_records, lambda x: x[0]):
description = _describe_files_in_multiple_records(records, pairs)
if len(set(record[0] for record in records)) != len(records):
message = "FASTQ files are used multiple times in sample:\n"
raise MakefileError(message + description)
else:
logger.warn("WARNING: Path included in multiple samples:\n%s", description)
def _describe_files_in_multiple_records(records, pairs):
lines = []
prefix = "Filename"
for (_, filename) in sorted(pairs):
lines.append(" {0} {1}".format(prefix, filename))
prefix = " " * len(prefix)
prefix = "Found at"
for record in sorted(records):
# FIXME: Show the glob that found the above files
lines.append(" {0} {1} :: {2} :: {3}".format(prefix, *record))
prefix = " " * len(prefix)
return "\n".join(lines)
def _validate_prefixes(makefiles, pipeline_variant):
logger = logging.getLogger(__name__)
already_validated = {}
logger.info("Validating FASTA files")
for makefile in makefiles:
for prefix in makefile["Genomes"].values():
path = prefix["Path"]
if path in already_validated:
prefix["IndexFormat"] = already_validated[path]["IndexFormat"]
continue
# Must be set to a valid value, even if FASTA file does not exist
prefix["IndexFormat"] = ".bai"
if not os.path.exists(path):
level = logging.ERROR if pipeline_variant == "bam" else logging.WARNING
logger.log(level, "Reference FASTA file does not exist: %r", path)
continue
elif not os.path.exists(path + ".fai"):
logger.info("Indexing FASTA at %r", path)
try:
contigs = FASTA.index_and_collect_contigs(path)
except Exception as error:
if pipeline_variant == "bam":
raise MakefileError(f"Error reading/indexing FASTA: {error}")
logging.warn("Error reading/indexing FASTA: %s", error)
if max(contigs.values()) > _BAM_MAX_SEQUENCE_LENGTH:
logger.warn(
"FASTA file %r contains sequences longer "
"than %i! CSI index files will be used instead "
"of BAI index files.",
path,
_BAM_MAX_SEQUENCE_LENGTH,
)
prefix["IndexFormat"] = ".csi"
already_validated[path] = prefix
def _path_to_str(path):
return " :: ".join(path)
def _iterate_over_records(makefile):
for (sample, libraries) in tuple(makefile["Samples"].items()):
for (library, barcodes) in tuple(libraries.items()):
for (barcode, records) in tuple(barcodes.items()):
for record in records:
yield sample, library, barcode, record
|
MikkelSchubert/paleomix
|
paleomix/pipelines/bam/makefile.py
|
Python
|
mit
| 22,702
|
[
"BWA"
] |
374dd41ac34b843f383c0145892fa4b7821f2ef2567fc49a6b0ff613c64ee7f3
|
from django.contrib import admin
from models import (
Item, Buy, UserProfile, Occasion, Visit, MagicLink, MagicLinkClick
)
class ItemAdmin(admin.ModelAdmin):
list_filter = ('multi_item', 'already_given', 'surprise', 'user',)
list_display = ('name', 'multi_item', 'already_given',)
class Meta:
model = Item
class OccasionAdmin(admin.ModelAdmin):
list_display = ('name', 'user', 'month', 'day',)
class Meta:
model = Occasion
class UserProfileAdmin(admin.ModelAdmin):
list_display = ('user', 'uuid', 'subscribed_to_email', 'per_item_email',)
list_filter = ('subscribed_to_email', 'per_item_email',)
class VisitAdmin(admin.ModelAdmin):
list_display = ('user', 'path', 'created',)
class MagicLinkAdmin(admin.ModelAdmin):
list_display = ('user', 'uuid', 'created', 'is_expired', 'url')
class MagicLinkClickAdmin(admin.ModelAdmin):
list_display = ('link', 'user', 'created',)
admin.site.register(Item, ItemAdmin)
admin.site.register(Buy)
admin.site.register(Visit, VisitAdmin)
admin.site.register(UserProfile, UserProfileAdmin)
admin.site.register(Occasion, OccasionAdmin)
admin.site.register(MagicLink, MagicLinkAdmin)
admin.site.register(MagicLinkClick, MagicLinkClickAdmin)
|
honza/wishlist-app
|
web/admin.py
|
Python
|
bsd-2-clause
| 1,246
|
[
"VisIt"
] |
8df9fa631951d6f2c8c6da1e58e7004a7f33127461b075c73b136f1f81e60735
|
"""
Testing for the forest module (sklearn.ensemble.forest).
"""
# Authors: Gilles Louppe, Brian Holt, Andreas Mueller
# License: BSD 3 clause
from collections import defaultdict
import numpy as np
from numpy.testing import assert_array_equal
from numpy.testing import assert_array_almost_equal
from numpy.testing import assert_equal
from numpy.testing import assert_almost_equal
from nose.tools import assert_false, assert_true
from sklearn.utils.testing import assert_less, assert_greater
from sklearn.grid_search import GridSearchCV
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import RandomForestRegressor
from sklearn.ensemble import RandomTreesEmbedding
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.svm import LinearSVC
from sklearn.decomposition import TruncatedSVD
from sklearn import datasets
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [-1, 1, 1]
# also load the iris dataset
# and randomly permute it
iris = datasets.load_iris()
rng = np.random.RandomState(0)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
# also load the boston dataset
# and randomly permute it
boston = datasets.load_boston()
perm = rng.permutation(boston.target.size)
boston.data = boston.data[perm]
boston.target = boston.target[perm]
def test_classification_toy():
"""Check classification on a toy dataset."""
# Random forest
clf = RandomForestClassifier(n_estimators=10, random_state=1)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(10, len(clf))
clf = RandomForestClassifier(n_estimators=10, max_features=1,
random_state=1)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(10, len(clf))
# also test apply
leaf_indices = clf.apply(X)
assert_equal(leaf_indices.shape, (len(X), clf.n_estimators))
# Extra-trees
clf = ExtraTreesClassifier(n_estimators=10, random_state=1)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(10, len(clf))
clf = ExtraTreesClassifier(n_estimators=10, max_features=1,
random_state=1)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(10, len(clf))
# also test apply
leaf_indices = clf.apply(X)
assert_equal(leaf_indices.shape, (len(X), clf.n_estimators))
def test_iris():
"""Check consistency on dataset iris."""
for c in ("gini", "entropy"):
# Random forest
clf = RandomForestClassifier(n_estimators=10, criterion=c,
random_state=1)
clf.fit(iris.data, iris.target)
score = clf.score(iris.data, iris.target)
assert score > 0.9, "Failed with criterion %s and score = %f" % (c,
score)
clf = RandomForestClassifier(n_estimators=10, criterion=c,
max_features=2, random_state=1)
clf.fit(iris.data, iris.target)
score = clf.score(iris.data, iris.target)
assert score > 0.5, "Failed with criterion %s and score = %f" % (c,
score)
# Extra-trees
clf = ExtraTreesClassifier(n_estimators=10, criterion=c,
random_state=1)
clf.fit(iris.data, iris.target)
score = clf.score(iris.data, iris.target)
assert score > 0.9, "Failed with criterion %s and score = %f" % (c,
score)
clf = ExtraTreesClassifier(n_estimators=10, criterion=c,
max_features=2, random_state=1)
clf.fit(iris.data, iris.target)
score = clf.score(iris.data, iris.target)
assert score > 0.9, "Failed with criterion %s and score = %f" % (c,
score)
def test_boston():
"""Check consistency on dataset boston house prices."""
for c in ("mse",):
# Random forest
clf = RandomForestRegressor(n_estimators=5, criterion=c,
random_state=1)
clf.fit(boston.data, boston.target)
score = clf.score(boston.data, boston.target)
assert score > 0.95, ("Failed with max_features=None, "
"criterion %s and score = %f" % (c, score))
clf = RandomForestRegressor(n_estimators=5, criterion=c,
max_features=6, random_state=1)
clf.fit(boston.data, boston.target)
score = clf.score(boston.data, boston.target)
assert score > 0.95, ("Failed with max_features=6, "
"criterion %s and score = %f" % (c, score))
# Extra-trees
clf = ExtraTreesRegressor(n_estimators=5, criterion=c, random_state=1)
clf.fit(boston.data, boston.target)
score = clf.score(boston.data, boston.target)
assert score > 0.95, ("Failed with max_features=None, "
"criterion %s and score = %f" % (c, score))
clf = ExtraTreesRegressor(n_estimators=5, criterion=c, max_features=6,
random_state=1)
clf.fit(boston.data, boston.target)
score = clf.score(boston.data, boston.target)
assert score > 0.95, ("Failed with max_features=6, "
"criterion %s and score = %f" % (c, score))
def test_regressor_attributes():
"""Regression models should not have a classes_ attribute."""
r = RandomForestRegressor()
assert_false(hasattr(r, "classes_"))
assert_false(hasattr(r, "n_classes_"))
r.fit([[1, 2, 3], [4, 5, 6]], [1, 2])
assert_false(hasattr(r, "classes_"))
assert_false(hasattr(r, "n_classes_"))
def test_probability():
"""Predict probabilities."""
olderr = np.seterr(divide="ignore")
# Random forest
clf = RandomForestClassifier(n_estimators=10, random_state=1,
max_features=1, max_depth=1)
clf.fit(iris.data, iris.target)
assert_array_almost_equal(np.sum(clf.predict_proba(iris.data), axis=1),
np.ones(iris.data.shape[0]))
assert_array_almost_equal(clf.predict_proba(iris.data),
np.exp(clf.predict_log_proba(iris.data)))
# Extra-trees
clf = ExtraTreesClassifier(n_estimators=10, random_state=1, max_features=1,
max_depth=1)
clf.fit(iris.data, iris.target)
assert_array_almost_equal(np.sum(clf.predict_proba(iris.data), axis=1),
np.ones(iris.data.shape[0]))
assert_array_almost_equal(clf.predict_proba(iris.data),
np.exp(clf.predict_log_proba(iris.data)))
np.seterr(**olderr)
def test_importances():
"""Check variable importances."""
X, y = datasets.make_classification(n_samples=1000,
n_features=10,
n_informative=3,
n_redundant=0,
n_repeated=0,
shuffle=False,
random_state=0)
clf = RandomForestClassifier(n_estimators=10)
clf.fit(X, y)
importances = clf.feature_importances_
n_important = sum(importances > 0.1)
assert_equal(importances.shape[0], 10)
assert_equal(n_important, 3)
X_new = clf.transform(X, threshold="mean")
assert_less(0 < X_new.shape[1], X.shape[1])
# Check with sample weights
sample_weight = np.ones(y.shape)
sample_weight[y == 1] *= 100
clf = RandomForestClassifier(n_estimators=50, random_state=0)
clf.fit(X, y, sample_weight=sample_weight)
importances = clf.feature_importances_
assert np.all(importances >= 0.0)
clf = RandomForestClassifier(n_estimators=50, random_state=0)
clf.fit(X, y, sample_weight=3*sample_weight)
importances_bis = clf.feature_importances_
assert_almost_equal(importances, importances_bis)
def test_oob_score_classification():
"""Check that oob prediction is a good estimation of the generalization
error."""
clf = RandomForestClassifier(oob_score=True, random_state=rng)
n_samples = iris.data.shape[0]
clf.fit(iris.data[:n_samples // 2, :], iris.target[:n_samples // 2])
test_score = clf.score(iris.data[n_samples // 2:, :],
iris.target[n_samples // 2:])
assert_less(abs(test_score - clf.oob_score_), 0.1)
def test_oob_score_classification_for_non_contiguous_target():
"""Check that oob prediction is a good estimation of the generalization
error for non-contiguous targets."""
iris_target = iris.target * 2 + 1
clf = RandomForestClassifier(n_estimators=50,
oob_score=True, random_state=rng)
n_samples = iris.data.shape[0]
clf.fit(iris.data[:n_samples // 2, :], iris_target[:n_samples // 2])
test_score = clf.score(iris.data[n_samples // 2:, :],
iris_target[n_samples // 2:])
assert_less(abs(test_score - clf.oob_score_), 0.1)
def test_oob_score_regression():
"""Check that oob prediction is pessimistic estimate.
Not really a good test that prediction is independent."""
clf = RandomForestRegressor(n_estimators=50, oob_score=True,
random_state=rng)
n_samples = boston.data.shape[0]
clf.fit(boston.data[:n_samples // 2, :], boston.target[:n_samples // 2])
test_score = clf.score(boston.data[n_samples // 2:, :],
boston.target[n_samples // 2:])
assert_greater(test_score, clf.oob_score_)
assert_greater(clf.oob_score_, .8)
def test_gridsearch():
"""Check that base trees can be grid-searched."""
# Random forest
forest = RandomForestClassifier()
parameters = {'n_estimators': (1, 2),
'max_depth': (1, 2)}
clf = GridSearchCV(forest, parameters)
clf.fit(iris.data, iris.target)
# Extra-trees
forest = ExtraTreesClassifier()
parameters = {'n_estimators': (1, 2),
'max_depth': (1, 2)}
clf = GridSearchCV(forest, parameters)
clf.fit(iris.data, iris.target)
def test_parallel():
"""Check parallel computations."""
# Classification
forest = RandomForestClassifier(n_estimators=10, n_jobs=3, random_state=0)
forest.fit(iris.data, iris.target)
assert_true(10 == len(forest))
forest.set_params(n_jobs=1)
y1 = forest.predict(iris.data)
forest.set_params(n_jobs=2)
y2 = forest.predict(iris.data)
assert_array_equal(y1, y2)
# Regression
forest = RandomForestRegressor(n_estimators=10, n_jobs=3, random_state=0)
forest.fit(boston.data, boston.target)
assert_true(10 == len(forest))
forest.set_params(n_jobs=1)
y1 = forest.predict(boston.data)
forest.set_params(n_jobs=2)
y2 = forest.predict(boston.data)
assert_array_almost_equal(y1, y2, 3)
def test_pickle():
"""Check pickability."""
import pickle
# Random forest
obj = RandomForestClassifier(random_state=0)
obj.fit(iris.data, iris.target)
score = obj.score(iris.data, iris.target)
s = pickle.dumps(obj)
obj2 = pickle.loads(s)
assert_equal(type(obj2), obj.__class__)
score2 = obj2.score(iris.data, iris.target)
assert_true(score == score2)
obj = RandomForestRegressor(random_state=0)
obj.fit(boston.data, boston.target)
score = obj.score(boston.data, boston.target)
s = pickle.dumps(obj)
obj2 = pickle.loads(s)
assert_equal(type(obj2), obj.__class__)
score2 = obj2.score(boston.data, boston.target)
assert_true(score == score2)
# Extra-trees
obj = ExtraTreesClassifier(random_state=0)
obj.fit(iris.data, iris.target)
score = obj.score(iris.data, iris.target)
s = pickle.dumps(obj)
obj2 = pickle.loads(s)
assert_equal(type(obj2), obj.__class__)
score2 = obj2.score(iris.data, iris.target)
assert_true(score == score2)
obj = ExtraTreesRegressor(random_state=0)
obj.fit(boston.data, boston.target)
score = obj.score(boston.data, boston.target)
s = pickle.dumps(obj)
obj2 = pickle.loads(s)
assert_equal(type(obj2), obj.__class__)
score2 = obj2.score(boston.data, boston.target)
assert_true(score == score2)
def test_multioutput():
"""Check estimators on multi-output problems."""
olderr = np.seterr(divide="ignore")
X = [[-2, -1],
[-1, -1],
[-1, -2],
[1, 1],
[1, 2],
[2, 1],
[-2, 1],
[-1, 1],
[-1, 2],
[2, -1],
[1, -1],
[1, -2]]
y = [[-1, 0],
[-1, 0],
[-1, 0],
[1, 1],
[1, 1],
[1, 1],
[-1, 2],
[-1, 2],
[-1, 2],
[1, 3],
[1, 3],
[1, 3]]
T = [[-1, -1], [1, 1], [-1, 1], [1, -1]]
y_true = [[-1, 0], [1, 1], [-1, 2], [1, 3]]
# toy classification problem
clf = ExtraTreesClassifier(random_state=0)
y_hat = clf.fit(X, y).predict(T)
assert_array_equal(y_hat, y_true)
assert_equal(y_hat.shape, (4, 2))
proba = clf.predict_proba(T)
assert_equal(len(proba), 2)
assert_equal(proba[0].shape, (4, 2))
assert_equal(proba[1].shape, (4, 4))
log_proba = clf.predict_log_proba(T)
assert_equal(len(log_proba), 2)
assert_equal(log_proba[0].shape, (4, 2))
assert_equal(log_proba[1].shape, (4, 4))
# toy regression problem
clf = ExtraTreesRegressor(random_state=0)
y_hat = clf.fit(X, y).predict(T)
assert_almost_equal(y_hat, y_true)
assert_equal(y_hat.shape, (4, 2))
np.seterr(**olderr)
def test_classes_shape():
"""Test that n_classes_ and classes_ have proper shape."""
# Classification, single output
clf = RandomForestClassifier()
clf.fit(X, y)
assert_equal(clf.n_classes_, 2)
assert_equal(clf.classes_, [-1, 1])
# Classification, multi-output
_y = np.vstack((y, np.array(y) * 2)).T
clf = RandomForestClassifier()
clf.fit(X, _y)
assert_equal(len(clf.n_classes_), 2)
assert_equal(len(clf.classes_), 2)
assert_equal(clf.n_classes_, [2, 2])
assert_equal(clf.classes_, [[-1, 1], [-2, 2]])
def test_random_hasher():
# test random forest hashing on circles dataset
# make sure that it is linearly separable.
# even after projected to two SVD dimensions
# Note: Not all random_states produce perfect results.
hasher = RandomTreesEmbedding(n_estimators=30, random_state=1)
X, y = datasets.make_circles(factor=0.5)
X_transformed = hasher.fit_transform(X)
# test fit and transform:
hasher = RandomTreesEmbedding(n_estimators=30, random_state=1)
assert_array_equal(hasher.fit(X).transform(X).toarray(),
X_transformed.toarray())
# one leaf active per data point per forest
assert_equal(X_transformed.shape[0], X.shape[0])
assert_array_equal(X_transformed.sum(axis=1), hasher.n_estimators)
svd = TruncatedSVD(n_components=2)
X_reduced = svd.fit_transform(X_transformed)
linear_clf = LinearSVC()
linear_clf.fit(X_reduced, y)
assert_equal(linear_clf.score(X_reduced, y), 1.)
def test_parallel_train():
rng = np.random.RandomState(12321)
n_samples, n_features = 80, 30
X_train = rng.randn(n_samples, n_features)
y_train = rng.randint(0, 2, n_samples)
clfs = [
RandomForestClassifier(n_estimators=20,
n_jobs=n_jobs,
random_state=12345)
for n_jobs in [1, 2, 3, 8, 16, 32]
]
for clf in clfs:
clf.fit(X_train, y_train)
X_test = rng.randn(n_samples, n_features)
probas = []
for clf in clfs:
proba = clf.predict_proba(X_test)
probas.append(proba)
for proba1, proba2 in zip(probas, probas[1:]):
assert_array_almost_equal(proba1, proba2)
def test_distribution():
rng = np.random.RandomState(12321)
# Single variable with 4 values
X = rng.randint(0, 4, size=(1000, 1))
y = rng.rand(1000)
n_trees = 500
clf = ExtraTreesRegressor(n_estimators=n_trees, random_state=42).fit(X, y)
uniques = defaultdict(int)
for tree in clf.estimators_:
tree = "".join(("%d,%d/" % (f, int(t)) if f >= 0 else "-")
for f, t in zip(tree.tree_.feature,
tree.tree_.threshold))
uniques[tree] += 1
uniques = sorted([(1. * count / n_trees, tree)
for tree, count in uniques.items()])
# On a single variable problem where X_0 has 4 equiprobable values, there
# are 5 ways to build a random tree. The more compact (0,1/0,0/--0,2/--) of
# them has probability 1/3 while the 4 others have probability 1/6.
assert_equal(len(uniques), 5)
assert_greater(0.20, uniques[0][0]) # Rough approximation of 1/6.
assert_greater(0.20, uniques[1][0])
assert_greater(0.20, uniques[2][0])
assert_greater(0.20, uniques[3][0])
assert_greater(uniques[4][0], 0.3)
assert_equal(uniques[4][1], "0,1/0,0/--0,2/--")
# Two variables, one with 2 values, one with 3 values
X = np.empty((1000, 2))
X[:, 0] = np.random.randint(0, 2, 1000)
X[:, 1] = np.random.randint(0, 3, 1000)
y = rng.rand(1000)
clf = ExtraTreesRegressor(n_estimators=100,
max_features=1,
random_state=1).fit(X, y)
uniques = defaultdict(int)
for tree in clf.estimators_:
tree = "".join(("%d,%d/" % (f, int(t)) if f >= 0 else "-")
for f, t in zip(tree.tree_.feature,
tree.tree_.threshold))
uniques[tree] += 1
uniques = [(count, tree) for tree, count in uniques.items()]
assert_equal(len(uniques), 8)
def test_max_leaf_nodes_max_depth():
"""Test preceedence of max_leaf_nodes over max_depth. """
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
all_forests = [RandomForestClassifier,
RandomForestRegressor,
RandomTreesEmbedding,
ExtraTreesClassifier,
ExtraTreesRegressor]
k = 4
for ForestEstimator in all_forests:
est = ForestEstimator(max_depth=1, max_leaf_nodes=k).fit(X, y)
tree = est.estimators_[0].tree_
assert_greater(tree.max_depth, 1)
est = ForestEstimator(max_depth=1).fit(X, y)
tree = est.estimators_[0].tree_
assert_equal(tree.max_depth, 1)
if __name__ == "__main__":
import nose
nose.runmodule()
|
loli/sklearn-ensembletrees
|
sklearn/ensemble/tests/test_forest.py
|
Python
|
bsd-3-clause
| 19,091
|
[
"Brian"
] |
dbaafe458c3d88a565d42d52b25b03c9c0e013e1608e08ff468174bf7574734b
|
"""
TherMOF command line interface.
"""
import os
import argparse
from thermof import Simulation
from thermof import Parameters
def main():
parser = argparse.ArgumentParser(
description="""
----------------------------------------------------------------------------
████████╗██╗ ██╗███████╗██████╗ ███╗ ███╗ ██████╗ ███████╗
╚══██╔══╝██║ ██║██╔════╝██╔══██╗████╗ ████║██╔═══██╗██╔════╝
██║ ███████║█████╗ ██████╔╝██╔████╔██║██║ ██║█████╗
██║ ██╔══██║██╔══╝ ██╔══██╗██║╚██╔╝██║██║ ██║██╔══╝
██║ ██║ ██║███████╗██║ ██║██║ ╚═╝ ██║╚██████╔╝██║
╚═╝ ╚═╝ ╚═╝╚══════╝╚═╝ ╚═╝╚═╝ ╚═╝ ╚═════╝ ╚═╝
TherMOF: Thermal transport in Metal-Organic Frameworks
-----------------------------------------------------------------------------
""",
epilog="""
Example:
>>> python thermof_write.py IRMOF-1.cif
would read IRMOF-1 cif file, analyze topology, assign force field (default: UFF) and
create input files for a Lammps simulation.
>>> python thermof_write.py IRMOF-1.cif --forcefield UFF4MOF --fix MIN NPT NVT NVE --scheduler pbs
would initialize Lammps simulation files with UFF4MOF force field for following procedure:
Minimization, NPT, NVT, and NVE emsembles. It would also create a job submission script for pbs scheduler.
""",
formatter_class=argparse.RawDescriptionHelpFormatter)
default_params = {}
# Positional arguments
parser.add_argument('molecule', type=str, help='Molecule file to read (must be in .cif format).')
# Optional arguments
parser.add_argument('--runs', '-r', default=1, type=int, metavar='',
help='Number of runs (different seed number is used for each run).')
parser.add_argument('--forcefield', '-ff', default='UFF', type=str, metavar='',
help='Force field for molecule file ([UFF] / BTW_FF / Dreiding / UFF4MOF / Dubbeldam).')
parser.add_argument('--fix', nargs='+', default=['NVT'], type=str,
help='Lammps fix types (MIN / NPT / NVT / NVE).')
parser.add_argument('--scheduler', default='slurm', type=str, metavar='',
help='Job scheduler (pbs / [slurm] / slurm-scratch).')
# Parse arguments
args = parser.parse_args()
# Initialize simulation
simpar = Parameters()
sim = Simulation(mof=args.molecule, parameters=simpar)
mof_name = os.path.splitext(os.path.basename(args.molecule))[0]
sim.simdir = os.path.join(os.path.dirname(args.molecule), mof_name)
sim.parameters.lammps['force_field'] = args.forcefield
sim.parameters.lammps['mol_ff'] = args.forcefield
sim.parameters.thermof['fix'] = args.fix
sim.parameters.job['scheduler'] = args.scheduler
try:
if args.runs == 1:
sim.initialize()
elif args.runs > 1:
sim.initialize_runs(args.runs)
except Exception as e:
print(e)
if __name__ == '__main__':
main()
|
kbsezginel/tee_mof
|
thermof/cli/thermof_write.py
|
Python
|
mit
| 3,588
|
[
"LAMMPS"
] |
2714bb0ca38fc07ff96f35c965c59c6753eb10539a7577c02994b727bbe6c0bd
|
# coding: utf-8
from __future__ import division, unicode_literals
"""
Created on Jul 22, 2012
"""
__author__ = "Shyue Ping Ong"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyuep@gmail.com"
__date__ = "Jul 22, 2012"
import unittest
import os
from pymatgen.command_line.enumlib_caller import EnumlibAdaptor
from pymatgen import Element, Structure
from pymatgen.transformations.standard_transformations import \
SubstitutionTransformation
from monty.os.path import which
from pymatgen.transformations.site_transformations import \
RemoveSitesTransformation
from pymatgen.util.testing import PymatgenTest
enumlib_present = which('multienum.x') and which('makestr.x')
@unittest.skipIf(not enumlib_present, "enum_lib not present.")
class EnumlibAdaptorTest(PymatgenTest):
def test_init(self):
test_dir = os.path.join(os.path.dirname(__file__), "..", "..", "..",
'test_files')
struct = self.get_structure("LiFePO4")
subtrans = SubstitutionTransformation({'Li': {'Li': 0.5}})
adaptor = EnumlibAdaptor(subtrans.apply_transformation(struct), 1, 2)
adaptor.run()
structures = adaptor.structures
self.assertEqual(len(structures), 86)
for s in structures:
self.assertAlmostEqual(
s.composition.get_atomic_fraction(Element("Li")), 0.5 / 6.5)
adaptor = EnumlibAdaptor(subtrans.apply_transformation(struct), 1, 2,
refine_structure=True)
adaptor.run()
structures = adaptor.structures
self.assertEqual(len(structures), 52)
subtrans = SubstitutionTransformation({'Li': {'Li': 0.25}})
adaptor = EnumlibAdaptor(subtrans.apply_transformation(struct), 1, 1,
refine_structure=True)
adaptor.run()
structures = adaptor.structures
self.assertEqual(len(structures), 1)
for s in structures:
self.assertAlmostEqual(s.composition
.get_atomic_fraction(Element("Li")),
0.25 / 6.25)
#Make sure it works for completely disordered structures.
struct = Structure([[10, 0, 0], [0, 10, 0], [0, 0, 10]], [{'Fe': 0.5}],
[[0, 0, 0]])
adaptor = EnumlibAdaptor(struct, 1, 2)
adaptor.run()
self.assertEqual(len(adaptor.structures), 3)
#Make sure it works properly when symmetry is broken by ordered sites.
struct = self.get_structure("LiFePO4")
subtrans = SubstitutionTransformation({'Li': {'Li': 0.25}})
s = subtrans.apply_transformation(struct)
#REmove some ordered sites to break symmetry.
removetrans = RemoveSitesTransformation([4, 7])
s = removetrans.apply_transformation(s)
adaptor = EnumlibAdaptor(s, 1, 1, enum_precision_parameter=0.01)
adaptor.run()
structures = adaptor.structures
self.assertEqual(len(structures), 4)
struct = Structure([[3, 0, 0], [0, 3, 0], [0, 0, 3]],
[{"Si": 0.5}] * 2, [[0, 0, 0], [0.5, 0.5, 0.5]])
adaptor = EnumlibAdaptor(struct, 1, 3, enum_precision_parameter=0.01)
adaptor.run()
structures = adaptor.structures
self.assertEqual(len(structures), 10)
struct = Structure.from_file(
os.path.join(test_dir, "EnumerateTest.json"))
adaptor = EnumlibAdaptor(struct, 1, 1)
adaptor.run()
structures = adaptor.structures
self.assertEqual(len(structures), 2)
if __name__ == '__main__':
unittest.main()
|
rousseab/pymatgen
|
pymatgen/command_line/tests/test_enumlib_caller.py
|
Python
|
mit
| 3,721
|
[
"pymatgen"
] |
2bb69e0f98635b790016a6913ae6a1c6553ecc77b1374d05933a43903c4abe1e
|
import PolyLibScan.Analysis.pymol_visualisation as pymol
import pathlib2 as pl
import mock
import numpy as np
import unittest as ut
local_path = pl.Path(__file__).absolute().parent
class TestPymolVis(ut.TestCase):
def __init__(self, *args, **kwargs):
super(TestPymolVis, self).__init__(*args, **kwargs)
job = mock.Mock()
self.pdb_file = local_path.joinpath('data', 'static', '4cha.pdb')
self.py_vis = pymol.PymolVisualisation(self.pdb_file)
def test_protein_path(self):
self.assertEqual(str(self.py_vis.protein_path), str(self.pdb_file))
def test_init_protein_path(self):
results1 = self.py_vis._init_protein_path(self.pdb_file)
self.assertEqual(results1, self.pdb_file)
results2 = self.py_vis._init_protein_path(None, search_path=self.pdb_file.parent)
self.assertEqual(results2, self.pdb_file)
# def test_poly_poses(self):
# self.py_vis._poly_poses()
if __name__ == '__main__':
ut.main(verbosity=2)
|
luminescence/PolyLibScan
|
Analysis/tests/test_pymolvisualise.py
|
Python
|
mit
| 1,015
|
[
"PyMOL"
] |
e15126eea11f7d3419545ec48af537d324a3633ae36d73c65149fe45192a19a6
|
'''
SYNBIOCHEM-DB (c) University of Manchester 2015
SYNBIOCHEM-DB is licensed under the MIT License.
To view a copy of this license, visit <http://opensource.org/licenses/MIT/>.
@author: neilswainston
'''
from sbcdb.enzyme_utils import EnzymeManager
class ReactionManager(object):
'''Class to implement a manager of Reaction data.'''
def __init__(self):
'''Constructor.'''
self.__nodes = {}
self.__reac_ids = {}
self.__reac_enz_rels = []
self.__org_enz_rels = []
self.__enz_man = EnzymeManager()
def write_files(self, writer):
'''Write neo4j import files.'''
return ([writer.write_nodes(self.__nodes.values(),
'Reaction'),
writer.write_nodes(self.__enz_man.get_nodes(),
'Enzyme')],
[writer.write_rels(self.__reac_enz_rels,
'Reaction', 'Enzyme'),
writer.write_rels(self.__enz_man.get_org_enz_rels(),
'Organism', 'Enzyme')])
def add_reaction(self, source, reac_id, properties):
'''Adds a reaction to the collection of nodes, ensuring uniqueness.'''
reac_id = self.__reac_ids[source + reac_id] \
if source + reac_id in self.__reac_ids else reac_id
if reac_id not in self.__nodes:
properties[':LABEL'] = 'Reaction'
properties['id:ID(Reaction)'] = reac_id
properties['source'] = source
properties[source] = reac_id
self.__nodes[reac_id] = properties
if 'mnx' in properties:
self.__reac_ids['mnx' + properties['mnx']] = reac_id
if 'kegg.reaction' in properties:
self.__reac_ids[
'kegg.reaction' + properties['kegg.reaction']] = reac_id
if 'rhea' in properties:
self.__reac_ids['rhea' + properties['rhea']] = reac_id
else:
self.__nodes[reac_id].update(properties)
return reac_id
def add_react_to_enz(self, data, source, num_threads=0):
'''Submit data to the graph.'''
# Create Reaction and Enzyme nodes:
enzyme_ids = self.__create_react_enz(data, source)
# Create Enzyme nodes:
self.__enz_man.add_uniprot_data(enzyme_ids, source, num_threads)
def __create_react_enz(self, data, source):
'''Creates Reaction and Enzyme nodes and their Relationships.'''
enzyme_ids = []
for reac_id, uniprot_ids in data.iteritems():
reac_id = self.add_reaction(source, reac_id, {})
for uniprot_id in uniprot_ids:
enzyme_ids.append(uniprot_id)
self.__reac_enz_rels.append([reac_id, 'catalysed_by',
uniprot_id,
{'source': source}])
return list(set(enzyme_ids))
|
synbiochem/biochem4j
|
sbcdb/reaction_utils.py
|
Python
|
mit
| 2,979
|
[
"VisIt"
] |
3d1d8d3beb00cfd72e214644c239a4bccf855e8ada9d04fb69874cb0fc9fa659
|
#!/usr/bin/env python
#
# This example demonstrates the creation of multiple actors and the
# manipulation of their properties and transformations. It is a
# derivative of Cone.py, see that example for more information.
#
import vtk
import time
#
# Next we create an instance of vtkConeSource and set some of its
# properties. The instance of vtkConeSource "cone" is part of a visualization
# pipeline (it is a source process object); it produces data (output type is
# vtkPolyData) which other filters may process.
#
cone = vtk.vtkConeSource ()
cone.SetHeight( 3.0 )
cone.SetRadius( 1.0 )
cone.SetResolution( 10 )
#
# In this example we terminate the pipeline with a mapper process object.
# (Intermediate filters such as vtkShrinkPolyData could be inserted in
# between the source and the mapper.) We create an instance of
# vtkPolyDataMapper to map the polygonal data into graphics primitives. We
# connect the output of the cone souece to the input of this mapper.
#
coneMapper = vtk.vtkPolyDataMapper()
coneMapper.SetInputConnection(cone.GetOutputPort())
#
# Create an actor to represent the first cone. The actor's properties are
# modified to give it different surface properties. By default, an actor
# is create with a property so the GetProperty() method can be used.
#
coneActor = vtk.vtkActor()
coneActor.SetMapper(coneMapper)
coneActor.GetProperty().SetColor(0.2, 0.63, 0.79)
coneActor.GetProperty().SetDiffuse(0.7)
coneActor.GetProperty().SetSpecular(0.4)
coneActor.GetProperty().SetSpecularPower(20)
#
# Create a property and directly manipulate it. Assign it to the
# second actor.
#
property = vtk.vtkProperty()
property.SetColor(1.0, 0.3882, 0.2784)
property.SetDiffuse(0.7)
property.SetSpecular(0.4)
property.SetSpecularPower(20)
#
# Create a second actor and a property. The property is directly
# manipulated and then assigned to the actor. In this way, a single
# property can be shared among many actors. Note also that we use the
# same mapper as the first actor did. This way we avoid duplicating
# geometry, which may save lots of memory if the geoemtry is large.
coneActor2 = vtk.vtkActor()
coneActor2.SetMapper(coneMapper)
coneActor2.GetProperty().SetColor(0.2, 0.63, 0.79)
coneActor2.SetProperty(property)
coneActor2.SetPosition(0, 2, 0)
#
# Create the Renderer and assign actors to it. A renderer is like a
# viewport. It is part or all of a window on the screen and it is responsible
# for drawing the actors it has. We also set the background color here.
#
ren1 = vtk.vtkRenderer()
ren1.AddActor(coneActor)
ren1.AddActor(coneActor2)
ren1.SetBackground(0.1, 0.2, 0.4)
#
# Finally we create the render window which will show up on the screen
# We put our renderer into the render window using AddRenderer. We also
# set the size to be 300 pixels by 300.
#
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren1)
renWin.SetSize(300, 300)
#
# Now we loop over 360 degreeees and render the cone each time.
#
for i in range(0,360):
time.sleep(0.03)
renWin.Render()
ren1.GetActiveCamera().Azimuth( 1 )
|
arnaudgelas/VTK
|
Examples/Tutorial/Step4/Python/Cone4.py
|
Python
|
bsd-3-clause
| 3,059
|
[
"VTK"
] |
4851aa0553268aec383f7eab52bcc43bbd44af23599e887c9baad9df55269be5
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.