text
stringlengths 12
1.05M
| repo_name
stringlengths 5
86
| path
stringlengths 4
191
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 12
1.05M
| keyword
listlengths 1
23
| text_hash
stringlengths 64
64
|
|---|---|---|---|---|---|---|---|
# This component calculates the humidity ratio from the ladybug weather file import parameters
#
# Ladybug: A Plugin for Environmental Analysis (GPL) started by Mostapha Sadeghipour Roudsari
#
# This file is part of Ladybug.
#
# Copyright (c) 2013-2015, Chris Mackey <chris@mackeyarchitecture.com>
# Ladybug is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published
# by the Free Software Foundation; either version 3 of the License,
# or (at your option) any later version.
#
# Ladybug is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ladybug; If not, see <http://www.gnu.org/licenses/>.
#
# @license GPL-3.0+ <http://spdx.org/licenses/GPL-3.0+>
#Conversion formulas are taken from the following publications:
#Vaisala. (2013) Humidity Conversion Formulas: Calculation Formulas for Humidity. www.vaisala.com/Vaisala%20Documents/Application%20notes/Humidity_Conversion_Formulas_B210973EN-F.pdf
#W. Wagner and A. Pru:" The IAPWS Formulation 1995 for the Thermodynamic Properties of Ordinary Water Substance for General and Scientific Use ", Journal of Physical and Chemical Reference Data, June 2002 ,Volume 31, Issue 2, pp. 387535
"""
Calculates the humidity ratio from the ladybug weather file import parameters
Conversion formulas are taken from the following publications:
Vaisala. (2013) Humidity Conversion Formulas: Calculation Formulas for Humidity. www.vaisala.com/Vaisala%20Documents/Application%20notes/Humidity_Conversion_Formulas_B210973EN-F.pdf
W. Wagner and A. Pru:" The IAPWS Formulation 1995 for the Thermodynamic Properties of Ordinary Water Substance for General and Scientific Use ", Journal of Physical and Chemical Reference Data, June 2002 ,Volume 31, Issue 2, pp. 387535
-
Provided by Ladybug 0.0.60
Args:
_dryBulbTemperature: The dry bulb temperature from the Import epw component.
_relativeHumidity: The relative humidity from the Import epw component.
_barometricPressure: The barometric pressure from the Import epw component.
Returns:
readMe!: ...
humidityRatio: The hourly humidity ratio (kg water / kg air).
enthalpy: The hourly enthalpy of the air (kJ / kg).
partialPressure: The hourly partial pressure of water vapor in the atmosphere (Pa).
saturationPressure: The saturation pressure of water vapor in the atmosphere (Pa).
"""
ghenv.Component.Name = "Ladybug_Humidity Ratio Calculator"
ghenv.Component.NickName = 'CalcHumidityRatio'
ghenv.Component.Message = 'VER 0.0.60\nJUL_06_2015'
ghenv.Component.Category = "Ladybug"
ghenv.Component.SubCategory = "1 | AnalyzeWeatherData"
#compatibleLBVersion = VER 0.0.59\nFEB_01_2015
try: ghenv.Component.AdditionalHelpFromDocStrings = "0"
except: pass
import math
import scriptcontext as sc
def checkTheData():
try:
hourlyDBTemp = _dryBulbTemperature
if 'Temperature' in hourlyDBTemp[2] and hourlyDBTemp[4] == 'Hourly': checkData1 = True
else: checkData1 = False
hourlyRH = _relativeHumidity
if 'Relative Humidity' in hourlyRH[2] and hourlyRH[4] == 'Hourly': checkData2 = True
else: checkData2 = False
barPress = _barometricPressure
if 'Barometric Pressure' in barPress[2] and barPress[4] == 'Hourly': checkData3 = True
else: checkData3 = False
if checkData1 == True and checkData2 == True and checkData3 == True: checkData = True
except: checkData = False
return checkData
def main():
# import the classes
if sc.sticky.has_key('ladybug_release'):
try:
if not sc.sticky['ladybug_release'].isCompatible(ghenv.Component): return -1
except:
warning = "You need a newer version of Ladybug to use this compoent." + \
"Use updateLadybug component to update userObjects.\n" + \
"If you have already updated userObjects drag Ladybug_Ladybug component " + \
"into canvas and try again."
w = gh.GH_RuntimeMessageLevel.Warning
ghenv.Component.AddRuntimeMessage(w, warning)
return -1
lb_comfortModels = sc.sticky["ladybug_ComfortModels"]()
#Separate the numbers from the header strings
Tnumbers = []
Tstr = []
for item in _dryBulbTemperature:
try: Tnumbers.append(float(item))
except: Tstr.append(item)
Rnumbers = []
Rstr = []
for item in _relativeHumidity:
try: Rnumbers.append(float(item))
except: Rstr.append(item)
Bnumbers = []
Bstr = []
for item in _barometricPressure:
try: Bnumbers.append(float(item))
except: Bstr.append(item)
#Calculate the Humidity Ratio.
HRCalc, ENCalc, vapPress, satPress = lb_comfortModels.calcHumidRatio(Tnumbers, Rnumbers, Bnumbers)
#Build the strings and add it to the final calculation outputs
HR = []
HR.append(Tstr[0])
HR.append(Tstr[1])
HR.append('Humidity Ratio')
HR.append('kg water / kg air')
HR.append(Tstr[4])
HR.append(Tstr[5])
HR.append(Tstr[6])
for item in HRCalc:
HR.append(item)
EN = []
EN.append(Tstr[0])
EN.append(Tstr[1])
EN.append('Enthalpy')
EN.append('kJ/kg')
EN.append(Tstr[4])
EN.append(Tstr[5])
EN.append(Tstr[6])
for item in ENCalc:
EN.append(item)
SP = []
SP.append(Tstr[0])
SP.append(Tstr[1])
SP.append('Saturation Pressure')
SP.append('Pa')
SP.append(Tstr[4])
SP.append(Tstr[5])
SP.append(Tstr[6])
satPress100 = []
for item in satPress:
satPress100.append(item*100)
for item in satPress100:
SP.append(item)
VP = []
VP.append(Tstr[0])
VP.append(Tstr[1])
VP.append('Vapor Pressure')
VP.append('Pa')
VP.append('Hourly')
VP.append(Tstr[5])
VP.append(Tstr[6])
vapPress100 = []
for item in vapPress:
vapPress100.append(item*100)
for item in vapPress100:
VP.append(item)
return HR, EN, VP, SP
else:
print "You should first let the Ladybug fly..."
w = gh.GH_RuntimeMessageLevel.Warning
ghenv.Component.AddRuntimeMessage(w, "You should first let the Ladybug fly...")
return None, None, None, None
#Check the data to make sure it is the correct type
checkData = checkTheData()
if checkData == True:
res = main()
if res!=-1:
humidityRatio, enthalpy, partialPressure, saturationPressure = res
print 'Humidity ratio calculation completed successfully!'
else:
print 'Please provide all of the required annual data inputs.'
|
samuto/ladybug
|
src/Ladybug_Humidity Ratio Calculator.py
|
Python
|
gpl-3.0
| 7,319
|
[
"EPW"
] |
68e85b18f5fa580ecbc86bbc4fae366406406835df3b5c6a879d02c7c50a2e89
|
import sys
sys.path.insert(1, "../../../")
import h2o, tests
import random
def random_attack():
def attack(family, train, valid, x, y):
kwargs = {}
kwargs['family'] = family
gaussian_links = ["inverse", "log", "identity"]
binomial_links = ["logit"]
poisson_links = ["log", "identity"]
gamma_links = ["inverse", "log", "identity"]
# randomly select parameters and their corresponding values
if random.randint(0,1): kwargs['max_iterations'] = random.randint(1,50)
if random.random() > 0.8: kwargs['beta_epsilon'] = random.random()
if random.randint(0,1): kwargs['solver'] = ["IRLSM", "L_BFGS"][random.randint(0,1)]
if random.randint(0,1): kwargs['standardize'] = [True, False][random.randint(0,1)]
if random.randint(0,1):
if family == "gaussian": kwargs['link'] = gaussian_links[random.randint(0,2)]
elif family == "binomial": kwargs['link'] = binomial_links[random.randint(0,0)]
elif family == "poisson" : kwargs['link'] = poisson_links[random.randint(0,1)]
elif family == "gamma" : kwargs['link'] = gamma_links[random.randint(0,2)]
if random.randint(0,1): kwargs['alpha'] = [random.random()]
if family == "binomial":
if random.randint(0,1): kwargs['prior'] = random.random()
if random.randint(0,1): kwargs['lambda_search'] = [True, False][random.randint(0,1)]
if 'lambda_search' in kwargs.keys():
if random.randint(0,1): kwargs['nlambdas'] = random.randint(2,10)
do_validation = [True, False][random.randint(0,1)]
# beta constraints
if random.randint(0,1):
bc = []
for n in x:
name = train.names[n]
lower_bound = random.uniform(-1,1)
upper_bound = lower_bound + random.random()
bc.append([name, lower_bound, upper_bound])
beta_constraints = h2o.H2OFrame(python_obj=bc)
beta_constraints.setNames(['names', 'lower_bounds', 'upper_bounds'])
kwargs['beta_constraints'] = beta_constraints.send_frame()
# display the parameters and their corresponding values
print "-----------------------"
print "x: {0}".format(x)
print "y: {0}".format(y)
print "validation: {0}".format(do_validation)
for k, v in zip(kwargs.keys(), kwargs.values()):
if k == 'beta_constraints':
print k + ": "
beta_constraints.show()
else:
print k + ": {0}".format(v)
if do_validation: h2o.glm(x=train[x], y=train[y], validation_x=valid[x], validation_y=valid[y], **kwargs)
else: h2o.glm(x=train[x], y=train[y], **kwargs)
print "-----------------------"
print "Import and data munging..."
pros = h2o.upload_file(h2o.locate("smalldata/prostate/prostate.csv.zip"))
pros[1] = pros[1].asfactor()
r = pros[0].runif() # a column of length pros.nrow with values between 0 and 1
# ~80/20 train/validation split
pros_train = pros[r > .2]
pros_valid = pros[r <= .2]
cars = h2o.upload_file(h2o.locate("smalldata/junit/cars.csv"))
r = cars[0].runif()
cars_train = cars[r > .2]
cars_valid = cars[r <= .2]
print
print "======================================================================"
print "============================== Binomial =============================="
print "======================================================================"
for i in range(10):
attack("binomial", pros_train, pros_valid, random.sample([2,3,4,5,6,7,8],random.randint(1,7)), 1)
print
print "======================================================================"
print "============================== Gaussian =============================="
print "======================================================================"
for i in range(10):
attack("gaussian", cars_train, cars_valid, random.sample([2,3,4,5,6,7],random.randint(1,6)), 1)
print
print "======================================================================"
print "============================== Poisson =============================="
print "======================================================================"
for i in range(10):
attack("poisson", cars_train, cars_valid, random.sample([1,3,4,5,6,7],random.randint(1,6)), 2)
print
print "======================================================================"
print "============================== Gamma =============================="
print "======================================================================"
for i in range(10):
attack("gamma", pros_train, pros_valid, random.sample([1,2,3,5,6,7,8],random.randint(1,7)), 4)
if __name__ == "__main__":
tests.run_test(sys.argv, random_attack)
|
tarasane/h2o-3
|
h2o-py/tests/testdir_algos/glm/pyunit_NOPASS_random_attack_medium.py
|
Python
|
apache-2.0
| 4,940
|
[
"Gaussian"
] |
cadf19a9bbcddf46a16b9b434d25ad51dfca065a75c462a4cf737549a2bbc4c1
|
"""
this script used NaN loss -- dunno where
Followed from https://wiseodd.github.io/techblog/2016/12/10/variational-autoencoder/
"""
#import SetPub
#SetPub.set_pub()
from tensorflow.examples.tutorials.mnist import input_data
from keras.layers import Input, Dense, Lambda
from keras.models import Model
from keras import optimizers
from keras import losses
# from keras.objectives import binary_crossentropy
from keras.callbacks import LearningRateScheduler
import numpy as np
import matplotlib.pyplot as plt
import keras.backend as K
import tensorflow as tf
original_dim = 2549 #2551 # mnist ~ 784
intermediate_dim1 = 1024 #
intermediate_dim = 512 #
latent_dim = 10
totalFiles = 256 #256
TestFiles = 32 #128
batch_size = 8
num_epochs = 50 #110 #50
epsilon_mean = 1.0 # 1.0
epsilon_std = 1.0 # 1.0
learning_rate = 1e-7
decay_rate = 0.0
# Q(z|X) -- encoder
inputs = Input(shape=(original_dim,))
h_q1 = Dense(intermediate_dim1, activation='relu')(inputs) # ADDED intermediate layer
h_q = Dense(intermediate_dim, activation='relu')(h_q1)
mu = Dense(latent_dim, activation='linear')(h_q)
log_sigma = Dense(latent_dim, activation='linear')(h_q)
# ----------------------------------------------------------------------------
def sample_z(args):
mu, log_sigma = args
###eps = K.random_normal(shape=(m, n_z), mean=0., std=1.)
eps = K.random_normal(shape=(batch_size, latent_dim), mean=epsilon_mean, stddev=epsilon_std)
return mu + K.exp(log_sigma / 2) * eps
# Sample z ~ Q(z|X)
z = Lambda(sample_z)([mu, log_sigma])
# ----------------------------------------------------------------------------
# P(X|z) -- decoder
decoder_hidden = Dense(latent_dim, activation='relu')
decoder_hidden1 = Dense(intermediate_dim, activation='relu') # ADDED intermediate layer
decoder_hidden2 = Dense(intermediate_dim1, activation='relu') # ADDED intermediate layer
decoder_out = Dense(original_dim, activation='sigmoid')
h_p1 = decoder_hidden(z)
h_p2 = decoder_hidden1(h_p1) # ADDED intermediate layer
h_p3 = decoder_hidden2(h_p2) # ADDED intermediate layer
outputs = decoder_out(h_p3)
# ----------------------------------------------------------------------------
# Overall VAE model, for reconstruction and training
vae = Model(inputs, outputs)
# Encoder model, to encode input into latent variable
# We use the mean as the output as it is the center point, the representative of the gaussian
encoder = Model(inputs, mu)
# Generator model, generate new data given latent variable z
# d_in = Input(shape=(latent_dim,))
# d_h = decoder_hidden(d_in)
# d_h1 = decoder_hidden1(d_h)
# d_h2 = decoder_hidden2(d_h1)
# d_out = decoder_out(d_h2)
# decoder = Model(d_in, d_out)
# build a digit generator that can sample from the learned distribution
decoder_input = Input(shape=(latent_dim,))
_h_decoded = decoder_hidden(decoder_input)
_h1_decoded = decoder_hidden1(_h_decoded) ## ADDED layer_1
_h0_decoded = decoder_hidden2(_h1_decoded) ## ADDED --- should replicate decoder arch
_x_decoded_mean = decoder_out(_h0_decoded)
decoder = Model(decoder_input, _x_decoded_mean)
# -------------------------------------------------------------
#CUSTOM LOSS
def vae_loss(y_true, y_pred):
""" Calculate loss = reconstruction loss + KL loss for each data in minibatch """
# E[log P(X|z)]
recon = K.sum(K.binary_crossentropy(y_pred, y_true), axis=1)
# recon = K.categorical_crossentropy(y_pred, y_true)
# recon = losses.mean_squared_error(y_pred, y_true)
# D_KL(Q(z|X) || P(z|X)); calculate in closed form as both dist. are Gaussian
kl = 0.5*K.sum(K.exp(log_sigma) + K.square(mu) - 1. - log_sigma, axis=1)
return recon + kl
#-------------------------------------------------------------
# LOAD
# from keras.datasets import mnist
#
# (x_train, y_train), (x_test, y_test) = mnist.load_data()
#
# X_train = x_train.astype('float32') / 255.
# ## X_test = x_test.astype('float32') / 255.
# X_train = x_train.reshape((len(x_train), np.prod(x_train.shape[1:])))
# ## X_test = x_test.reshape((len(x_test), np.prod(x_test.shape[1:])))
# mnist = input_data.read_data_sets("../MNIST_data/", one_hot=True)
# X_train = mnist.train.images
# X_train = X_train.astype('float32') / 255.
#
# X_test = mnist.test.images
# X_test = X_test.astype('float32') / 255.
# Y_test = mnist.test.labels
# -------------------------------------------------------------
# ----------------------------- i/o ------------------------------------------
import Cl_load
Dir0 = '../../../AllTrainTestSets/'
# density_file = '../Cl_data/Cl_'+str(nsize)+'.npy'
# density_file = '../Cl_data/LatinCl_'+str(nsize)+'.npy'
train_path = Dir0+'Cl_data/Data/LatinCl_'+str(totalFiles)+'.npy'
train_target_path = Dir0+ 'Cl_data/Data/LatinPara5_'+str(totalFiles)+'.npy'
test_path = Dir0+'Cl_data/Data/LatinCl_'+str(TestFiles)+'.npy'
test_target_path = Dir0+ 'Cl_data/Data/LatinPara5_'+str(TestFiles)+'.npy'
# halo_para_file = '../Cl_data/Para5_'+str(nsize)+'.npy'
# halo_para_file = '../Cl_data/LatinPara5_'+str(nsize)+'.npy'
# pk = pk_load.density_profile(data_path = density_file, para_path = halo_para_file)
camb_in = Cl_load.cmb_profile(train_path = train_path, train_target_path = train_target_path , test_path = test_path, test_target_path = test_target_path, num_para=5)
(x_train, y_train), (x_test, y_test) = camb_in.load_data()
x_train = x_train[:,2:]
x_test = x_test[:,2:]
print(x_train.shape, 'train sequences')
print(x_test.shape, 'test sequences')
print(y_train.shape, 'train sequences')
print(y_test.shape, 'test sequences')
# meanFactor = np.mean( [np.mean(x_train), np.mean(x_test ) ])
# print('-------mean factor:', meanFactor)
# x_train = x_train.astype('float32') - meanFactor #/ 255.
# x_test = x_test.astype('float32') - meanFactor #/ 255.
# np.save('../Cl_data/Data/meanfactor_'+str(totalFiles)+'.npy', meanFactor)
#
normFactor = np.max( [np.max(x_train), np.max(x_test ) ])
# normFactor = np.mean( [np.std(x_train), np.std(x_test ) ])
print('-------normalization factor:', normFactor)
x_train = x_train.astype('float32')/normFactor #/ 255.
x_test = x_test.astype('float32')/normFactor #/ 255.
np.save(Dir0+'Cl_data/Data/normfactor_'+str(totalFiles)+'.npy', normFactor)
x_train = x_train.reshape((len(x_train), np.prod(x_train.shape[1:])))
x_test = x_test.reshape((len(x_test), np.prod(x_test.shape[1:])))
## Trying to get x_train ~ (-1, 1) -- doesn't work well
# x_mean = np.mean(x_train, axis = 0)
# x_train = x_train - x_mean
# x_test = x_test - x_mean
## ADD noise
noise_factor = 0.00
x_train_noisy = x_train + noise_factor * np.random.normal(loc=0.0, scale=1.0, size=x_train.shape)
x_test_noisy = x_test + noise_factor * np.random.normal(loc=0.0, scale=1.0, size=x_test.shape)
x_train_noisy = np.clip(x_train_noisy, 0., 1.)
x_test_noisy = np.clip(x_test_noisy, 0., 1.)
# plt.plot(x_train_noisy.T)
# ------------------------------------------------------------------------------
#TRAIN -- NaN losses Uhhh
adam = optimizers.Adam(lr=learning_rate, beta_1=0.9, beta_2=0.999, epsilon=None, decay=decay_rate)
vae.compile(optimizer='adam', loss=vae_loss)
vae.fit(x_train_noisy, x_train, shuffle=True, batch_size=batch_size, nb_epoch=num_epochs, verbose=2,
validation_data=(x_test_noisy, x_test))
# ----------------------------------------------------------------------------
# y_pred = encoder.predict(x_train[10:20,:])
# display a 2D plot of the digit classes in the latent space
plt.figure(figsize=(6, 6))
x_train_encoded = encoder.predict(x_train)
plt.scatter(x_train_encoded[:, 0], x_train_encoded[:, 1], c=y_train[:, 0], cmap='spring')
plt.colorbar()
x_test_encoded = encoder.predict(x_test)
plt.scatter(x_test_encoded[:, 0], x_test_encoded[:, 1], c=y_test[:, 0], cmap='copper')
plt.colorbar()
plt.show()
x_train_encoded = encoder.predict(x_train)
x_decoded = decoder.predict(x_train_encoded)
np.save(Dir0+'Cl_data/Data/encoded_xtrain_'+str(totalFiles)+'.npy', x_train_encoded)
# ----------------------------------------------------------------------------
ls = np.load(Dir0+'Cl_data/Data/ls_'+str(totalFiles)+'.npy')[2:]
PlotSample = False
if PlotSample:
for i in range(3,10):
plt.figure(91, figsize=(8,6))
plt.plot(ls, x_decoded[i], 'r--', alpha = 0.8)
plt.plot(ls, x_train[i], 'b--', alpha = 0.8)
# plt.xscale('log')
# plt.yscale('log')
plt.title('reconstructed - red')
plt.show()
plotLoss = False
if plotLoss:
import matplotlib.pylab as plt
epochs = np.arange(1, num_epochs+1)
train_loss = vae.history.history['loss']
val_loss = vae.history.history['val_loss']
fig, ax = plt.subplots(1,1, sharex= True, figsize = (8,6))
# fig.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=None, hspace= 0.02)
ax.plot(epochs,train_loss, '-', lw =1.5)
ax.plot(epochs,val_loss, '-', lw = 1.5)
ax.set_ylabel('loss')
ax.set_xlabel('epochs')
# ax[0].set_ylim([0,1])
# ax[0].set_title('Loss')
ax.legend(['train loss','val loss'])
plt.tight_layout()
# plt.savefig('../Cl_data/Plots/Training_loss.png')
plt.show()
SaveModel = False
if SaveModel:
epochs = np.arange(1, num_epochs+1)
train_loss = vae.history.history['loss']
val_loss = vae.history.history['val_loss']
training_hist = np.vstack([epochs, train_loss, val_loss])
# fileOut = 'Stack_opti' + str(opti_id) + '_loss' + str(loss_id) + '_lr' + str(learning_rate) + '_decay' + str(decay_rate) + '_batch' + str(batch_size) + '_epoch' + str(num_epoch)
fileOut = 'DenoiseModel_'+str(totalFiles)
vae.save(Dir0+'Cl_data/Model/fullAE_' + fileOut + '.hdf5')
encoder.save(Dir0+'Cl_data/Model/Encoder_' + fileOut + '.hdf5')
decoder.save(Dir0+'Cl_data/Model/Decoder_' + fileOut + '.hdf5')
np.save(Dir0+'Cl_data/Model/TrainingHistory_'+fileOut+'.npy', training_hist)
PlotModel = False
if PlotModel:
from keras.utils.vis_utils import plot_model
fileOut = Dir0+'Cl_data/Plots/ArchitectureFullAE.png'
plot_model(vae, to_file=fileOut, show_shapes=True, show_layer_names=True)
fileOut = Dir0+'Cl_data/Plots/ArchitectureEncoder.png'
plot_model(encoder, to_file=fileOut, show_shapes=True, show_layer_names=True)
fileOut = Dir0+'Cl_data/Plots/ArchitectureDecoder.png'
plot_model(decoder, to_file=fileOut, show_shapes=True, show_layer_names=True)
|
hep-cce/ml_classification_studies
|
cosmoDNN/AutoEncoder/Cl_denoiseVAE.py
|
Python
|
gpl-3.0
| 10,348
|
[
"Gaussian"
] |
0d46571b8585082cf656081dd2482a0c812964b600bd1c2d2624e44205fe6819
|
import os
from six import string_types
from .destination import submit_params
from .setup_handler import build as build_setup_handler
from .job_directory import RemoteJobDirectory
from .decorators import parseJson
from .decorators import retry
from .util import json_dumps
from .util import json_loads
from .util import copy
from .util import ensure_directory
from .util import to_base64_json
from .action_mapper import (
path_type,
actions,
)
import logging
log = logging.getLogger(__name__)
CACHE_WAIT_SECONDS = 3
class OutputNotFoundException(Exception):
def __init__(self, path):
self.path = path
def __str__(self):
return "No remote output found for path %s" % self.path
class BaseJobClient(object):
def __init__(self, destination_params, job_id):
destination_params = destination_params or {}
self.destination_params = destination_params
self.job_id = job_id
if "jobs_directory" in destination_params:
staging_directory = destination_params["jobs_directory"]
sep = destination_params.get("remote_sep", os.sep)
job_directory = RemoteJobDirectory(
remote_staging_directory=staging_directory,
remote_id=job_id,
remote_sep=sep,
)
else:
job_directory = None
for attr in ["ssh_key", "ssh_user", "ssh_host", "ssh_port"]:
setattr(self, attr, destination_params.get(attr, None))
self.env = destination_params.get("env", [])
self.files_endpoint = destination_params.get("files_endpoint", None)
self.job_directory = job_directory
default_file_action = self.destination_params.get("default_file_action", "transfer")
if default_file_action not in actions:
raise Exception("Unknown Pulsar default file action type %s" % default_file_action)
self.default_file_action = default_file_action
self.action_config_path = self.destination_params.get("file_action_config", None)
self.setup_handler = build_setup_handler(self, destination_params)
def setup(self, tool_id=None, tool_version=None):
"""
Setup remote Pulsar server to run this job.
"""
setup_args = {"job_id": self.job_id}
if tool_id:
setup_args["tool_id"] = tool_id
if tool_version:
setup_args["tool_version"] = tool_version
return self.setup_handler.setup(**setup_args)
@property
def prefer_local_staging(self):
# If doing a job directory is defined, calculate paths here and stage
# remotely.
return self.job_directory is None
class JobClient(BaseJobClient):
"""
Objects of this client class perform low-level communication with a remote Pulsar server.
**Parameters**
destination_params : dict or str
connection parameters, either url with dict containing url (and optionally `private_token`).
job_id : str
Galaxy job/task id.
"""
def __init__(self, destination_params, job_id, job_manager_interface):
super(JobClient, self).__init__(destination_params, job_id)
self.job_manager_interface = job_manager_interface
def launch(self, command_line, dependencies_description=None, env=[], remote_staging=[], job_config=None):
"""
Queue up the execution of the supplied `command_line` on the remote
server. Called launch for historical reasons, should be renamed to
enqueue or something like that.
**Parameters**
command_line : str
Command to execute.
"""
launch_params = dict(command_line=command_line, job_id=self.job_id)
submit_params_dict = submit_params(self.destination_params)
if submit_params_dict:
launch_params['params'] = json_dumps(submit_params_dict)
if dependencies_description:
launch_params['dependencies_description'] = json_dumps(dependencies_description.to_dict())
if env:
launch_params['env'] = json_dumps(env)
if remote_staging:
launch_params['remote_staging'] = json_dumps(remote_staging)
if job_config and self.setup_handler.local:
# Setup not yet called, job properties were inferred from
# destination arguments. Hence, must have Pulsar setup job
# before queueing.
setup_params = _setup_params_from_job_config(job_config)
launch_params["setup_params"] = json_dumps(setup_params)
return self._raw_execute("submit", launch_params)
def full_status(self):
""" Return a dictionary summarizing final state of job.
"""
return self.raw_check_complete()
def kill(self):
"""
Cancel remote job, either removing from the queue or killing it.
"""
return self._raw_execute("cancel", {"job_id": self.job_id})
@retry()
@parseJson()
def raw_check_complete(self):
"""
Get check_complete response from the remote server.
"""
check_complete_response = self._raw_execute("status", {"job_id": self.job_id})
return check_complete_response
def get_status(self):
check_complete_response = self.raw_check_complete()
# Older Pulsar instances won't set status so use 'complete', at some
# point drop backward compatibility.
status = check_complete_response.get("status", None)
return status
def clean(self):
"""
Cleanup the remote job.
"""
self._raw_execute("clean", {"job_id": self.job_id})
@parseJson()
def remote_setup(self, **setup_args):
"""
Setup remote Pulsar server to run this job.
"""
return self._raw_execute("setup", setup_args)
def put_file(self, path, input_type, name=None, contents=None, action_type='transfer'):
if not name:
name = os.path.basename(path)
args = {"job_id": self.job_id, "name": name, "type": input_type}
input_path = path
if contents:
input_path = None
# action type == 'message' should either copy or transfer
# depending on default not just fallback to transfer.
if action_type in ['transfer', 'message']:
if isinstance(contents, string_types):
contents = contents.encode("utf-8")
return self._upload_file(args, contents, input_path)
elif action_type == 'copy':
path_response = self._raw_execute('path', args)
pulsar_path = json_loads(path_response)['path']
copy(path, pulsar_path)
return {'path': pulsar_path}
def fetch_output(self, path, name, working_directory, action_type, output_type):
"""
Fetch (transfer, copy, etc...) an output from the remote Pulsar server.
**Parameters**
path : str
Local path of the dataset.
name : str
Remote name of file (i.e. path relative to remote staging output
or working directory).
working_directory : str
Local working_directory for the job.
action_type : str
Where to find file on Pulsar (output_workdir or output). legacy is also
an option in this case Pulsar is asked for location - this will only be
used if targetting an older Pulsar server that didn't return statuses
allowing this to be inferred.
"""
if output_type == 'output_workdir':
self._fetch_work_dir_output(name, working_directory, path, action_type=action_type)
elif output_type == 'output':
self._fetch_output(path=path, name=name, action_type=action_type)
else:
raise Exception("Unknown output_type %s" % output_type)
def _raw_execute(self, command, args={}, data=None, input_path=None, output_path=None):
return self.job_manager_interface.execute(command, args, data, input_path, output_path)
def _fetch_output(self, path, name=None, check_exists_remotely=False, action_type='transfer'):
if not name:
# Extra files will send in the path.
name = os.path.basename(path)
self.__populate_output_path(name, path, action_type)
def _fetch_work_dir_output(self, name, working_directory, output_path, action_type='transfer'):
ensure_directory(output_path)
if action_type == 'transfer':
self.__raw_download_output(name, self.job_id, path_type.OUTPUT_WORKDIR, output_path)
else: # Even if action is none - Pulsar has a different work_dir so this needs to be copied.
pulsar_path = self._output_path(name, self.job_id, path_type.OUTPUT_WORKDIR)['path']
copy(pulsar_path, output_path)
def __populate_output_path(self, name, output_path, action_type):
ensure_directory(output_path)
if action_type == 'transfer':
self.__raw_download_output(name, self.job_id, path_type.OUTPUT, output_path)
elif action_type == 'copy':
pulsar_path = self._output_path(name, self.job_id, path_type.OUTPUT)['path']
copy(pulsar_path, output_path)
@parseJson()
def _upload_file(self, args, contents, input_path):
return self._raw_execute("upload_file", args, contents, input_path)
@parseJson()
def _output_path(self, name, job_id, output_type):
return self._raw_execute("path",
{"name": name,
"job_id": self.job_id,
"type": output_type})
@retry()
def __raw_download_output(self, name, job_id, output_type, output_path):
output_params = {
"name": name,
"job_id": self.job_id,
"type": output_type
}
self._raw_execute("download_output", output_params, output_path=output_path)
class BaseMessageJobClient(BaseJobClient):
def __init__(self, destination_params, job_id, client_manager):
super(BaseMessageJobClient, self).__init__(destination_params, job_id)
if not self.job_directory:
error_message = "Message-queue based Pulsar client requires destination define a remote job_directory to stage files into."
raise Exception(error_message)
self.client_manager = client_manager
def clean(self):
del self.client_manager.status_cache[self.job_id]
def full_status(self):
full_status = self.client_manager.status_cache.get(self.job_id, None)
if full_status is None:
raise Exception("full_status() called before a final status was properly cached with cilent manager.")
return full_status
def _build_setup_message(self, command_line, dependencies_description, env, remote_staging, job_config):
"""
"""
launch_params = dict(command_line=command_line, job_id=self.job_id)
submit_params_dict = submit_params(self.destination_params)
if submit_params_dict:
launch_params['submit_params'] = submit_params_dict
if dependencies_description:
launch_params['dependencies_description'] = dependencies_description.to_dict()
if env:
launch_params['env'] = env
if remote_staging:
launch_params['remote_staging'] = remote_staging
launch_params['remote_staging']['ssh_key'] = self.ssh_key
if job_config and self.setup_handler.local:
# Setup not yet called, job properties were inferred from
# destination arguments. Hence, must have Pulsar setup job
# before queueing.
setup_params = _setup_params_from_job_config(job_config)
launch_params["setup_params"] = setup_params
return launch_params
class MessageJobClient(BaseMessageJobClient):
def launch(self, command_line, dependencies_description=None, env=[], remote_staging=[], job_config=None):
"""
"""
launch_params = self._build_setup_message(
command_line,
dependencies_description=dependencies_description,
env=env,
remote_staging=remote_staging,
job_config=job_config
)
response = self.client_manager.exchange.publish("setup", launch_params)
log.info("Job published to setup message queue.")
return response
def kill(self):
self.client_manager.exchange.publish("kill", dict(job_id=self.job_id))
class MessageCLIJobClient(BaseMessageJobClient):
def __init__(self, destination_params, job_id, client_manager, shell):
super(MessageCLIJobClient, self).__init__(destination_params, job_id, client_manager)
self.remote_pulsar_path = destination_params["remote_pulsar_path"]
self.shell = shell
def launch(self, command_line, dependencies_description=None, env=[], remote_staging=[], job_config=None):
"""
"""
launch_params = self._build_setup_message(
command_line,
dependencies_description=dependencies_description,
env=env,
remote_staging=remote_staging,
job_config=job_config
)
base64_message = to_base64_json(launch_params)
submit_command = os.path.join(self.remote_pulsar_path, "scripts", "submit.bash")
# TODO: Allow configuration of manager, app, and ini path...
self.shell.execute("nohup %s --base64 %s &" % (submit_command, base64_message))
def kill(self):
# TODO
pass
class InputCachingJobClient(JobClient):
"""
Beta client that cache's staged files to prevent duplication.
"""
def __init__(self, destination_params, job_id, job_manager_interface, client_cacher):
super(InputCachingJobClient, self).__init__(destination_params, job_id, job_manager_interface)
self.client_cacher = client_cacher
@parseJson()
def _upload_file(self, args, contents, input_path):
action = "upload_file"
if contents:
input_path = None
return self._raw_execute(action, args, contents, input_path)
else:
event_holder = self.client_cacher.acquire_event(input_path)
cache_required = self.cache_required(input_path)
if cache_required:
self.client_cacher.queue_transfer(self, input_path)
while not event_holder.failed:
available = self.file_available(input_path)
if available['ready']:
token = available['token']
args["cache_token"] = token
return self._raw_execute(action, args)
event_holder.event.wait(30)
if event_holder.failed:
raise Exception("Failed to transfer file %s" % input_path)
@parseJson()
def cache_required(self, path):
return self._raw_execute("cache_required", {"path": path})
@parseJson()
def cache_insert(self, path):
return self._raw_execute("cache_insert", {"path": path}, None, path)
@parseJson()
def file_available(self, path):
return self._raw_execute("file_available", {"path": path})
def _setup_params_from_job_config(job_config):
job_id = job_config.get("job_id", None)
tool_id = job_config.get("tool_id", None)
tool_version = job_config.get("tool_version", None)
return dict(
job_id=job_id,
tool_id=tool_id,
tool_version=tool_version
)
|
ssorgatem/pulsar
|
pulsar/client/client.py
|
Python
|
apache-2.0
| 15,600
|
[
"Galaxy"
] |
a8e029ef345f5a023278cab52dceaa481275fdd9a8d4aef88d73ab12b45b7256
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
"""
import argparse
import vcf
import sys
import gzip
import os
from collections import namedtuple
from operator import attrgetter
# import io
from Bio import SeqIO
from Bio.Seq import Seq
# from Bio.SeqRecord import SeqRecord
# from Bio.SeqIO.FastaIO import SimpleFastaParse
# BI: whether a allele is bialleleic 1 or not 0
# CG: change: transversion TV or TS
# SYN: 0 synonymous, 1 non-synonymous
# EFF: Effect, one of Premature Stop Codon, putative PROmoter disruption inTERgenic, inTRAgenic
Result = namedtuple('Result', 'CHROM POS REF ALT BI CHG SYN EFF')
DEBUG=False
def get_args():
parser = argparse.ArgumentParser(
description="Given a VCF and a genbank file, writes out a report")
parser.add_argument('vcf', help="path to vcf")
parser.add_argument("gbk", help="path to genbank")
parser.add_argument("-t", "--trans_table",
help="translation table; default 11",
type=int, default=11)
parser.add_argument("-f", "--feature",
help="which feature to consider: gene or cds",
choices=["gene", "cds"],
default="gene")
parser.add_argument("-b", "--binding_width",
help="width to mark whether snps might disrupt RBS",
type=int, default=15)
args = parser.parse_args()
return(args)
def tvts(ref, alt):
valid = ["A", "T", "C", "G"]
for nuc in [ref, alt]:
if nuc not in valid:
sys.stderr.write("Non-standard nucleotide: %s\n" % nuc)
return ("x")
refs = {
"A": {"A": "-",
"T": "tv",
"C": "tv",
"G": "ts"},
"T": {"A": "tv",
"T": "-",
"C": "ts",
"G": "tv"},
"C": {"A": "tv",
"T": "ts",
"C": "-",
"G": "tv"},
"G": {"A": "ts",
"T": "tv",
"C": "tv",
"G": "-"}
}
result = refs[ref.upper()][alt.upper()]
return (result)
def subs_nuc(refseq, start, end, pos, alt):
#gene ------
# ----;----;
# *
# snp: 8
# start = 5
# start = 4 in python, but biopython uses 0-based, but vcf used 1-based
# end = 10
# region seq[5 - 1: 8 - 1 ] + SNP + seq[ SNP : end ]
thisseq = refseq[start : pos ] + alt +refseq[pos + 1 : end ]
# print(refseq[start: end])
# print(thisseq)
assert len(thisseq) == len(refseq[start: end]), "bad reconstruction of reference; ref length is %i and reconstructed length is %i" %(len(refseq[start: end]), len(thisseq))
return thisseq
def test_subs_nuc_psc():
refseq = "ATGCCCAAATTTTACTAG"
mutseq = "ATGCCCAAATTTTAGTAG"
newseq = subs_nuc(refseq, start=0, end=18, pos=14, alt="G")
assert mutseq == newseq, "error in sub_nuc function"
def test_subs_nuc_norm():
refseq = "ATGCCCAAATTTTACTAG"
mutseq = "ATGCCCAAATTTTATTAG"
assert mutseq == subs_nuc(refseq, start=0, end=18, pos=14, alt="T"), "error in sub_nuc function"
def test_pmc():
refseq = "ATGCCCAAATTTTACTAG"
mutseq = "ATGCCCAAATTTTAGTAG"
thisp = Seq(mutseq).translate(table=11, to_stop=True)
refp = Seq(refseq).translate(table=11, to_stop=True)
print(thisp)
print(refp)
assert len(refp) != len(thisp), "error detecting premature stop codon!"
def process_region(args, vcf_data, chrom, start, end, rec, strand, is_locus=False):
if is_locus:
assert rec is not None, "must provide rec for loci"
assert strand is not None, "must provide feature for loci"
nucseq = rec.seq[start: end]
if strand == 1:
nucseqp = nucseq.translate(table=args.trans_table, to_stop=True)
else:
nucseqp = nucseq.reverse_complement().translate(table=args.trans_table, to_stop=True)
these_vcfs = vcf_data[chrom][start: end]
ignored = 0
for pos, ref, altlist, PROCESS in these_vcfs:
if len(ref) != 1:
ignored = ignored + 1
continue
if not PROCESS:
continue
bialleleic = False
if len(altlist) > 1:
biallelic = True
for alt in altlist:
if len(alt) > 1:
ignored = ignored + 1
continue
thiststv = tvts(ref, str(alt))
if is_locus:
try:
thisseq = subs_nuc(rec.seq, start, end, pos, str(alt))
except AssertionError:
sys.stderr.write("start: %i; end %i; pos: %i ; alt: %s\n" %(start, end, pos, str(alt)))
sys.exit(1)
assert len(thisseq) == len(nucseq), "bad reconstruction of reference"
if strand == 1:
thisseqp = thisseq.translate(table=args.trans_table, to_stop=True)
else:
thisseqp = thisseq.reverse_complement().translate(table=args.trans_table, to_stop=True)
if DEBUG:
print(nucseq)
print(thisseq)
print(rec.seq[start : pos ] + ref)
print(nucseqp)
print(thisseqp)
SYN = 1
EFF = "TRA"
if nucseqp != thisseqp:
SYN = 0
if len(thisseqp) != len(nucseqp):
EFF = "PSC"
# back to 1-indexed
thisres = Result(chrom, pos+1, ref, alt, bialleleic, thiststv, SYN, EFF)
sys.stdout.write("%s\t%i\t%s\t%s\t%i\t%s\t%i\t%s\n" % thisres)
else:
# process intergenic region
EFF = "TER"
if (
(pos - start) < args.binding_width or
(end - pos) < args.binding_width
):
EFF = "PRO"
thisres = Result(chrom, pos, ref, alt, 0, thiststv, 0, EFF)
sys.stdout.write("%s\t%i\t%s\t%s\t%i\t%s\t%i\t%s\n" % thisres)
return ignored
def main(args=None):
"""
"""
if args is None:
args = get_args()
gbk_open_fun = open
vcf_open_fun = open
if os.path.splitext(args.gbk)[-1] in ['.gz', '.gzip']:
gbk_open_fun = gzip.open
if os.path.splitext(args.vcf)[-1] in ['.gz', '.gzip']:
vcf_open_fun = gzip.open
vcf_reader = vcf.Reader(vcf_open_fun(args.vcf, 'r'))
found_one = False
chroms = []
sys.stderr.write("Getting IDs from Genbank\n")
vcf_data = {}
with gbk_open_fun(args.gbk, "r") as ingbk:
for rec in SeqIO.parse(ingbk, "genbank"):
# print(rec.features[2])
# sys.exit()
vcf_data[rec.id.split(".")[0]] = []
sys.stderr.write("Reading in vcf\n")
# we do this weird counter thing so that we have an entry for each position in the genome
# its a dumb idea until you have to deal with subsets of this list, in which case trading off the ram for the speed
#
prev_pos = 0 # we keep track of previous position se we know when to reset the counter for new contigs
for i, v in enumerate(vcf_reader):
# if (i % 1000) == 0:
# sys.stderr.write(str(i) + " ")
# here wer set to counter
if v.POS < prev_pos or i == 0:
counter = 1
if v.POS > 200000000:
sys.stderr.write("Warning: long sequence detected, only processing the first 20Mb")
break
# this pads out for the non-snp regions
while counter != v.POS and counter < v.POS:
vcf_data[v.CHROM].append([counter, "-", "-", False])
counter = counter + 1
assert counter == v.POS, "error syncing counters;\n -chrom: %s \n-position: %i \n -previous: %i \n -counter: %i" % (v.CHROM, v.POS, prev_pos, counter)
# make 0-indexed
vcf_data[v.CHROM].append([v.POS-1, v.REF, v.ALT, True])
counter = counter + 1
prev_pos = v.POS
last_gene_end = 0
# first process all the coding sequences, then hit the remaining intergenic loci
ignored_positons = 0
with gbk_open_fun(args.gbk, "r") as ingbk:
for rec in SeqIO.parse(ingbk, "genbank"):
thischrom = rec.id.split(".")[0]
sys.stderr.write("Processing %s\n" % thischrom)
for feat in rec.features:
#if feat.type not in ["source"]:
if feat.type == args.feature:
# process coding region
ig = process_region(
args, vcf_data,
chrom=thischrom,
start=feat.location.start,
end=feat.location.end,
rec=rec,
strand=feat.strand,
is_locus=True)
ignored_positons = ignored_positons + ig
ig = process_region(
args, vcf_data,
chrom=thischrom,
start=last_gene_end,
end=feat.location.start,
rec=rec,
strand=feat.strand,
is_locus=False)
ignored_positons = ignored_positons + ig
# keep track of where the last gene ended
last_gene_end = feat.location.end
if ignored_positons != 0:
sys.stderr.write("ignored %d complex entries\n" %ignored_positons)
if __name__ == '__main__':
main()
|
nickp60/open_utils
|
vcfortless/vcfortless/main.py
|
Python
|
mit
| 9,628
|
[
"Biopython"
] |
7d55822a4fc10b1e009f6b305babcdac1d249d2a6bbe9cdbba4ba3aa59a825f9
|
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
'''The 'grit build' tool along with integration for this tool with the
SCons build system.
'''
import codecs
import filecmp
import getopt
import os
import shutil
import sys
from grit import grd_reader
from grit import shortcuts
from grit import util
from grit.format import minifier
from grit.node import include
from grit.node import message
from grit.node import structure
from grit.tool import interface
# It would be cleaner to have each module register itself, but that would
# require importing all of them on every run of GRIT.
'''Map from <output> node types to modules under grit.format.'''
_format_modules = {
'android': 'android_xml',
'c_format': 'c_format',
'chrome_messages_json': 'chrome_messages_json',
'data_package': 'data_pack',
'js_map_format': 'js_map_format',
'rc_all': 'rc',
'rc_translateable': 'rc',
'rc_nontranslateable': 'rc',
'rc_header': 'rc_header',
'resource_map_header': 'resource_map',
'resource_map_source': 'resource_map',
'resource_file_map_source': 'resource_map',
}
_format_modules.update(
(type, 'policy_templates.template_formatter') for type in
[ 'adm', 'admx', 'adml', 'reg', 'doc', 'json',
'plist', 'plist_strings', 'android_policy' ])
def GetFormatter(type):
modulename = 'grit.format.' + _format_modules[type]
__import__(modulename)
module = sys.modules[modulename]
try:
return module.Format
except AttributeError:
return module.GetFormatter(type)
class RcBuilder(interface.Tool):
'''A tool that builds RC files and resource header files for compilation.
Usage: grit build [-o OUTPUTDIR] [-D NAME[=VAL]]*
All output options for this tool are specified in the input file (see
'grit help' for details on how to specify the input file - it is a global
option).
Options:
-a FILE Assert that the given file is an output. There can be
multiple "-a" flags listed for multiple outputs. If a "-a"
or "--assert-file-list" argument is present, then the list
of asserted files must match the output files or the tool
will fail. The use-case is for the build system to maintain
separate lists of output files and to catch errors if the
build system's list and the grit list are out-of-sync.
--assert-file-list Provide a file listing multiple asserted output files.
There is one file name per line. This acts like specifying
each file with "-a" on the command line, but without the
possibility of running into OS line-length limits for very
long lists.
-o OUTPUTDIR Specify what directory output paths are relative to.
Defaults to the current directory.
-D NAME[=VAL] Specify a C-preprocessor-like define NAME with optional
value VAL (defaults to 1) which will be used to control
conditional inclusion of resources.
-E NAME=VALUE Set environment variable NAME to VALUE (within grit).
-f FIRSTIDSFILE Path to a python file that specifies the first id of
value to use for resources. A non-empty value here will
override the value specified in the <grit> node's
first_ids_file.
-w WHITELISTFILE Path to a file containing the string names of the
resources to include. Anything not listed is dropped.
-t PLATFORM Specifies the platform the build is targeting; defaults
to the value of sys.platform. The value provided via this
flag should match what sys.platform would report for your
target platform; see grit.node.base.EvaluateCondition.
-h HEADERFORMAT Custom format string to use for generating rc header files.
The string should have two placeholders: {textual_id}
and {numeric_id}. E.g. "#define {textual_id} {numeric_id}"
Otherwise it will use the default "#define SYMBOL 1234"
--output-all-resource-defines
--no-output-all-resource-defines If specified, overrides the value of the
output_all_resource_defines attribute of the root <grit>
element of the input .grd file.
--write-only-new flag
If flag is non-0, write output files to a temporary file
first, and copy it to the real output only if the new file
is different from the old file. This allows some build
systems to realize that dependent build steps might be
unnecessary, at the cost of comparing the output data at
grit time.
--depend-on-stamp
If specified along with --depfile and --depdir, the depfile
generated will depend on a stampfile instead of the first
output in the input .grd file.
--js-minifier A command to run the Javascript minifier. If not set then
Javascript won't be minified. The command should read the
original Javascript from standard input, and output the
minified Javascript to standard output. A non-zero exit
status will be taken as indicating failure.
Conditional inclusion of resources only affects the output of files which
control which resources get linked into a binary, e.g. it affects .rc files
meant for compilation but it does not affect resource header files (that define
IDs). This helps ensure that values of IDs stay the same, that all messages
are exported to translation interchange files (e.g. XMB files), etc.
'''
def ShortDescription(self):
return 'A tool that builds RC files for compilation.'
def Run(self, opts, args):
self.output_directory = '.'
first_ids_file = None
whitelist_filenames = []
assert_output_files = []
target_platform = None
depfile = None
depdir = None
rc_header_format = None
output_all_resource_defines = None
write_only_new = False
depend_on_stamp = False
js_minifier = None
replace_ellipsis = True
(own_opts, args) = getopt.getopt(args, 'a:o:D:E:f:w:t:h:',
('depdir=','depfile=','assert-file-list=',
'output-all-resource-defines',
'no-output-all-resource-defines',
'no-replace-ellipsis',
'depend-on-stamp',
'js-minifier=',
'write-only-new='))
for (key, val) in own_opts:
if key == '-a':
assert_output_files.append(val)
elif key == '--assert-file-list':
with open(val) as f:
assert_output_files += f.read().splitlines()
elif key == '-o':
self.output_directory = val
elif key == '-D':
name, val = util.ParseDefine(val)
self.defines[name] = val
elif key == '-E':
(env_name, env_value) = val.split('=', 1)
os.environ[env_name] = env_value
elif key == '-f':
# TODO(joi@chromium.org): Remove this override once change
# lands in WebKit.grd to specify the first_ids_file in the
# .grd itself.
first_ids_file = val
elif key == '-w':
whitelist_filenames.append(val)
elif key == '--output-all-resource-defines':
output_all_resource_defines = True
elif key == '--no-output-all-resource-defines':
output_all_resource_defines = False
elif key == '--no-replace-ellipsis':
replace_ellipsis = False
elif key == '-t':
target_platform = val
elif key == '-h':
rc_header_format = val
elif key == '--depdir':
depdir = val
elif key == '--depfile':
depfile = val
elif key == '--write-only-new':
write_only_new = val != '0'
elif key == '--depend-on-stamp':
depend_on_stamp = True
elif key == '--js-minifier':
js_minifier = val
if len(args):
print 'This tool takes no tool-specific arguments.'
return 2
self.SetOptions(opts)
if self.scons_targets:
self.VerboseOut('Using SCons targets to identify files to output.\n')
else:
self.VerboseOut('Output directory: %s (absolute path: %s)\n' %
(self.output_directory,
os.path.abspath(self.output_directory)))
if whitelist_filenames:
self.whitelist_names = set()
for whitelist_filename in whitelist_filenames:
self.VerboseOut('Using whitelist: %s\n' % whitelist_filename);
whitelist_contents = util.ReadFile(whitelist_filename, util.RAW_TEXT)
self.whitelist_names.update(whitelist_contents.strip().split('\n'))
if js_minifier:
minifier.SetJsMinifier(js_minifier)
self.write_only_new = write_only_new
self.res = grd_reader.Parse(opts.input,
debug=opts.extra_verbose,
first_ids_file=first_ids_file,
defines=self.defines,
target_platform=target_platform)
# If the output_all_resource_defines option is specified, override the value
# found in the grd file.
if output_all_resource_defines is not None:
self.res.SetShouldOutputAllResourceDefines(output_all_resource_defines)
# Set an output context so that conditionals can use defines during the
# gathering stage; we use a dummy language here since we are not outputting
# a specific language.
self.res.SetOutputLanguage('en')
if rc_header_format:
self.res.AssignRcHeaderFormat(rc_header_format)
self.res.RunGatherers()
# Replace ... with the single-character version. http://crbug.com/621772
if replace_ellipsis:
for node in self.res:
if isinstance(node, message.MessageNode):
node.SetReplaceEllipsis(True)
self.Process()
if assert_output_files:
if not self.CheckAssertedOutputFiles(assert_output_files):
return 2
if depfile and depdir:
self.GenerateDepfile(depfile, depdir, first_ids_file, depend_on_stamp)
return 0
def __init__(self, defines=None):
# Default file-creation function is codecs.open(). Only done to allow
# overriding by unit test.
self.fo_create = codecs.open
# key/value pairs of C-preprocessor like defines that are used for
# conditional output of resources
self.defines = defines or {}
# self.res is a fully-populated resource tree if Run()
# has been called, otherwise None.
self.res = None
# Set to a list of filenames for the output nodes that are relative
# to the current working directory. They are in the same order as the
# output nodes in the file.
self.scons_targets = None
# The set of names that are whitelisted to actually be included in the
# output.
self.whitelist_names = None
# Whether to compare outputs to their old contents before writing.
self.write_only_new = False
@staticmethod
def AddWhitelistTags(start_node, whitelist_names):
# Walk the tree of nodes added attributes for the nodes that shouldn't
# be written into the target files (skip markers).
for node in start_node:
# Same trick data_pack.py uses to see what nodes actually result in
# real items.
if (isinstance(node, include.IncludeNode) or
isinstance(node, message.MessageNode) or
isinstance(node, structure.StructureNode)):
text_ids = node.GetTextualIds()
# Mark the item to be skipped if it wasn't in the whitelist.
if text_ids and text_ids[0] not in whitelist_names:
node.SetWhitelistMarkedAsSkip(True)
@staticmethod
def ProcessNode(node, output_node, outfile):
'''Processes a node in-order, calling its formatter before and after
recursing to its children.
Args:
node: grit.node.base.Node subclass
output_node: grit.node.io.OutputNode
outfile: open filehandle
'''
base_dir = util.dirname(output_node.GetOutputFilename())
formatter = GetFormatter(output_node.GetType())
formatted = formatter(node, output_node.GetLanguage(), output_dir=base_dir)
outfile.writelines(formatted)
def Process(self):
# Update filenames with those provided by SCons if we're being invoked
# from SCons. The list of SCons targets also includes all <structure>
# node outputs, but it starts with our output files, in the order they
# occur in the .grd
if self.scons_targets:
assert len(self.scons_targets) >= len(self.res.GetOutputFiles())
outfiles = self.res.GetOutputFiles()
for ix in range(len(outfiles)):
outfiles[ix].output_filename = os.path.abspath(
self.scons_targets[ix])
else:
for output in self.res.GetOutputFiles():
output.output_filename = os.path.abspath(os.path.join(
self.output_directory, output.GetFilename()))
# If there are whitelisted names, tag the tree once up front, this way
# while looping through the actual output, it is just an attribute check.
if self.whitelist_names:
self.AddWhitelistTags(self.res, self.whitelist_names)
for output in self.res.GetOutputFiles():
self.VerboseOut('Creating %s...' % output.GetFilename())
# Microsoft's RC compiler can only deal with single-byte or double-byte
# files (no UTF-8), so we make all RC files UTF-16 to support all
# character sets.
if output.GetType() in ('rc_header', 'resource_map_header',
'resource_map_source', 'resource_file_map_source'):
encoding = 'cp1252'
elif output.GetType() in ('android', 'c_format', 'js_map_format', 'plist',
'plist_strings', 'doc', 'json', 'android_policy'):
encoding = 'utf_8'
elif output.GetType() in ('chrome_messages_json'):
# Chrome Web Store currently expects BOM for UTF-8 files :-(
encoding = 'utf-8-sig'
else:
# TODO(gfeher) modify here to set utf-8 encoding for admx/adml
encoding = 'utf_16'
# Set the context, for conditional inclusion of resources
self.res.SetOutputLanguage(output.GetLanguage())
self.res.SetOutputContext(output.GetContext())
self.res.SetFallbackToDefaultLayout(output.GetFallbackToDefaultLayout())
self.res.SetDefines(self.defines)
# Make the output directory if it doesn't exist.
self.MakeDirectoriesTo(output.GetOutputFilename())
# Write the results to a temporary file and only overwrite the original
# if the file changed. This avoids unnecessary rebuilds.
outfile = self.fo_create(output.GetOutputFilename() + '.tmp', 'wb')
if output.GetType() != 'data_package':
outfile = util.WrapOutputStream(outfile, encoding)
# Iterate in-order through entire resource tree, calling formatters on
# the entry into a node and on exit out of it.
with outfile:
self.ProcessNode(self.res, output, outfile)
# Now copy from the temp file back to the real output, but on Windows,
# only if the real output doesn't exist or the contents of the file
# changed. This prevents identical headers from being written and .cc
# files from recompiling (which is painful on Windows).
if not os.path.exists(output.GetOutputFilename()):
os.rename(output.GetOutputFilename() + '.tmp',
output.GetOutputFilename())
else:
# CHROMIUM SPECIFIC CHANGE.
# This clashes with gyp + vstudio, which expect the output timestamp
# to change on a rebuild, even if nothing has changed, so only do
# it when opted in.
if not self.write_only_new:
write_file = True
else:
files_match = filecmp.cmp(output.GetOutputFilename(),
output.GetOutputFilename() + '.tmp')
write_file = not files_match
if write_file:
shutil.copy2(output.GetOutputFilename() + '.tmp',
output.GetOutputFilename())
os.remove(output.GetOutputFilename() + '.tmp')
self.VerboseOut(' done.\n')
# Print warnings if there are any duplicate shortcuts.
warnings = shortcuts.GenerateDuplicateShortcutsWarnings(
self.res.UberClique(), self.res.GetTcProject())
if warnings:
print '\n'.join(warnings)
# Print out any fallback warnings, and missing translation errors, and
# exit with an error code if there are missing translations in a non-pseudo
# and non-official build.
warnings = (self.res.UberClique().MissingTranslationsReport().
encode('ascii', 'replace'))
if warnings:
self.VerboseOut(warnings)
if self.res.UberClique().HasMissingTranslations():
print self.res.UberClique().missing_translations_
sys.exit(-1)
def CheckAssertedOutputFiles(self, assert_output_files):
'''Checks that the asserted output files are specified in the given list.
Returns true if the asserted files are present. If they are not, returns
False and prints the failure.
'''
# Compare the absolute path names, sorted.
asserted = sorted([os.path.abspath(i) for i in assert_output_files])
actual = sorted([
os.path.abspath(os.path.join(self.output_directory, i.GetFilename()))
for i in self.res.GetOutputFiles()])
if asserted != actual:
missing = list(set(actual) - set(asserted))
extra = list(set(asserted) - set(actual))
error = '''Asserted file list does not match.
Expected output files:
%s
Actual output files:
%s
Missing output files:
%s
Extra output files:
%s
'''
print error % ('\n'.join(asserted), '\n'.join(actual), '\n'.join(missing),
'\n'.join(extra))
return False
return True
def GenerateDepfile(self, depfile, depdir, first_ids_file, depend_on_stamp):
'''Generate a depfile that contains the imlicit dependencies of the input
grd. The depfile will be in the same format as a makefile, and will contain
references to files relative to |depdir|. It will be put in |depfile|.
For example, supposing we have three files in a directory src/
src/
blah.grd <- depends on input{1,2}.xtb
input1.xtb
input2.xtb
and we run
grit -i blah.grd -o ../out/gen --depdir ../out --depfile ../out/gen/blah.rd.d
from the directory src/ we will generate a depfile ../out/gen/blah.grd.d
that has the contents
gen/blah.h: ../src/input1.xtb ../src/input2.xtb
Where "gen/blah.h" is the first output (Ninja expects the .d file to list
the first output in cases where there is more than one). If the flag
--depend-on-stamp is specified, "gen/blah.rd.d.stamp" will be used that is
'touched' whenever a new depfile is generated.
Note that all paths in the depfile are relative to ../out, the depdir.
'''
depfile = os.path.abspath(depfile)
depdir = os.path.abspath(depdir)
infiles = self.res.GetInputFiles()
# We want to trigger a rebuild if the first ids change.
if first_ids_file is not None:
infiles.append(first_ids_file)
if (depend_on_stamp):
output_file = depfile + ".stamp"
# Touch the stamp file before generating the depfile.
with open(output_file, 'a'):
os.utime(output_file, None)
else:
# Get the first output file relative to the depdir.
outputs = self.res.GetOutputFiles()
output_file = os.path.join(self.output_directory,
outputs[0].GetFilename())
output_file = os.path.relpath(output_file, depdir)
# The path prefix to prepend to dependencies in the depfile.
prefix = os.path.relpath(os.getcwd(), depdir)
deps_text = ' '.join([os.path.join(prefix, i) for i in infiles])
depfile_contents = output_file + ': ' + deps_text
self.MakeDirectoriesTo(depfile)
outfile = self.fo_create(depfile, 'w', encoding='utf-8')
outfile.writelines(depfile_contents)
@staticmethod
def MakeDirectoriesTo(file):
'''Creates directories necessary to contain |file|.'''
dir = os.path.split(file)[0]
if not os.path.exists(dir):
os.makedirs(dir)
|
geminy/aidear
|
oss/qt/qt-everywhere-opensource-src-5.9.0/qtwebengine/src/3rdparty/chromium/tools/grit/grit/tool/build.py
|
Python
|
gpl-3.0
| 20,550
|
[
"xTB"
] |
87fc7bbc794b5c5a2dfe51280b091839c1af9fe555903fd6e58d750fbf8fc5c7
|
#!/usr/bin/env python3
#
# QAPI parser test harness
#
# Copyright (c) 2013 Red Hat Inc.
#
# Authors:
# Markus Armbruster <armbru@redhat.com>
#
# This work is licensed under the terms of the GNU GPL, version 2 or later.
# See the COPYING file in the top-level directory.
#
import argparse
import difflib
import os
import sys
from io import StringIO
from qapi.error import QAPIError
from qapi.schema import QAPISchema, QAPISchemaVisitor
class QAPISchemaTestVisitor(QAPISchemaVisitor):
def visit_module(self, name):
print('module %s' % name)
def visit_include(self, name, info):
print('include %s' % name)
def visit_enum_type(self, name, info, ifcond, features, members, prefix):
print('enum %s' % name)
if prefix:
print(' prefix %s' % prefix)
for m in members:
print(' member %s' % m.name)
self._print_if(m.ifcond, indent=8)
self._print_if(ifcond)
self._print_features(features)
def visit_array_type(self, name, info, ifcond, element_type):
if not info:
return # suppress built-in arrays
print('array %s %s' % (name, element_type.name))
self._print_if(ifcond)
def visit_object_type(self, name, info, ifcond, features,
base, members, variants):
print('object %s' % name)
if base:
print(' base %s' % base.name)
for m in members:
print(' member %s: %s optional=%s'
% (m.name, m.type.name, m.optional))
self._print_if(m.ifcond, 8)
self._print_features(m.features, indent=8)
self._print_variants(variants)
self._print_if(ifcond)
self._print_features(features)
def visit_alternate_type(self, name, info, ifcond, features, variants):
print('alternate %s' % name)
self._print_variants(variants)
self._print_if(ifcond)
self._print_features(features)
def visit_command(self, name, info, ifcond, features,
arg_type, ret_type, gen, success_response, boxed,
allow_oob, allow_preconfig):
print('command %s %s -> %s'
% (name, arg_type and arg_type.name,
ret_type and ret_type.name))
print(' gen=%s success_response=%s boxed=%s oob=%s preconfig=%s'
% (gen, success_response, boxed, allow_oob, allow_preconfig))
self._print_if(ifcond)
self._print_features(features)
def visit_event(self, name, info, ifcond, features, arg_type, boxed):
print('event %s %s' % (name, arg_type and arg_type.name))
print(' boxed=%s' % boxed)
self._print_if(ifcond)
self._print_features(features)
@staticmethod
def _print_variants(variants):
if variants:
print(' tag %s' % variants.tag_member.name)
for v in variants.variants:
print(' case %s: %s' % (v.name, v.type.name))
QAPISchemaTestVisitor._print_if(v.ifcond, indent=8)
@staticmethod
def _print_if(ifcond, indent=4):
if ifcond:
print('%sif %s' % (' ' * indent, ifcond))
@classmethod
def _print_features(cls, features, indent=4):
if features:
for f in features:
print('%sfeature %s' % (' ' * indent, f.name))
cls._print_if(f.ifcond, indent + 4)
def test_frontend(fname):
schema = QAPISchema(fname)
schema.visit(QAPISchemaTestVisitor())
for doc in schema.docs:
if doc.symbol:
print('doc symbol=%s' % doc.symbol)
else:
print('doc freeform')
print(' body=\n%s' % doc.body.text)
for arg, section in doc.args.items():
print(' arg=%s\n%s' % (arg, section.text))
for feat, section in doc.features.items():
print(' feature=%s\n%s' % (feat, section.text))
for section in doc.sections:
print(' section=%s\n%s' % (section.name, section.text))
def test_and_diff(test_name, dir_name, update):
sys.stdout = StringIO()
try:
test_frontend(os.path.join(dir_name, test_name + '.json'))
except QAPIError as err:
if err.info.fname is None:
print("%s" % err, file=sys.stderr)
return 2
errstr = str(err) + '\n'
if dir_name:
errstr = errstr.replace(dir_name + '/', '')
actual_err = errstr.splitlines(True)
else:
actual_err = []
finally:
actual_out = sys.stdout.getvalue().splitlines(True)
sys.stdout.close()
sys.stdout = sys.__stdout__
mode = 'r+' if update else 'r'
try:
outfp = open(os.path.join(dir_name, test_name + '.out'), mode)
errfp = open(os.path.join(dir_name, test_name + '.err'), mode)
expected_out = outfp.readlines()
expected_err = errfp.readlines()
except IOError as err:
print("%s: can't open '%s': %s"
% (sys.argv[0], err.filename, err.strerror),
file=sys.stderr)
return 2
if actual_out == expected_out and actual_err == expected_err:
return 0
print("%s %s" % (test_name, 'UPDATE' if update else 'FAIL'),
file=sys.stderr)
out_diff = difflib.unified_diff(expected_out, actual_out, outfp.name)
err_diff = difflib.unified_diff(expected_err, actual_err, errfp.name)
sys.stdout.writelines(out_diff)
sys.stdout.writelines(err_diff)
if not update:
return 1
try:
outfp.truncate(0)
outfp.seek(0)
outfp.writelines(actual_out)
errfp.truncate(0)
errfp.seek(0)
errfp.writelines(actual_err)
except IOError as err:
print("%s: can't write '%s': %s"
% (sys.argv[0], err.filename, err.strerror),
file=sys.stderr)
return 2
return 0
def main(argv):
parser = argparse.ArgumentParser(
description='QAPI schema tester')
parser.add_argument('-d', '--dir', action='store', default='',
help="directory containing tests")
parser.add_argument('-u', '--update', action='store_true',
help="update expected test results")
parser.add_argument('tests', nargs='*', metavar='TEST', action='store')
args = parser.parse_args()
status = 0
for t in args.tests:
(dir_name, base_name) = os.path.split(t)
dir_name = dir_name or args.dir
test_name = os.path.splitext(base_name)[0]
status |= test_and_diff(test_name, dir_name, args.update)
exit(status)
if __name__ == '__main__':
main(sys.argv)
exit(0)
|
dslutz/qemu
|
tests/qapi-schema/test-qapi.py
|
Python
|
gpl-2.0
| 6,716
|
[
"VisIt"
] |
31e3bdabf32d1685249f617d732e6ce54aa62829af827d2607a0f34d22ed46b2
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
# For more information please visit: https://wiki.openstack.org/wiki/TaskFlow
from taskflow.listeners import base
from taskflow.listeners import logging as logging_listener
from taskflow import task
from cinder import exception
from cinder.openstack.common import log as logging
LOG = logging.getLogger(__name__)
def _make_task_name(cls, addons=None):
"""Makes a pretty name for a task class."""
base_name = ".".join([cls.__module__, cls.__name__])
extra = ''
if addons:
extra = ';%s' % (", ".join([str(a) for a in addons]))
return base_name + extra
class CinderTask(task.Task):
"""The root task class for all cinder tasks.
It automatically names the given task using the module and class that
implement the given task as the task name.
"""
def __init__(self, addons=None, **kwargs):
super(CinderTask, self).__init__(_make_task_name(self.__class__,
addons),
**kwargs)
class DynamicLogListener(logging_listener.DynamicLoggingListener):
"""This is used to attach to taskflow engines while they are running.
It provides a bunch of useful features that expose the actions happening
inside a taskflow engine, which can be useful for developers for debugging,
for operations folks for monitoring and tracking of the resource actions
and more...
"""
#: Exception is an excepted case, don't include traceback in log if fails.
_NO_TRACE_EXCEPTIONS = (exception.InvalidInput, exception.QuotaError)
def __init__(self, engine,
task_listen_for=base.DEFAULT_LISTEN_FOR,
flow_listen_for=base.DEFAULT_LISTEN_FOR,
retry_listen_for=base.DEFAULT_LISTEN_FOR,
logger=LOG):
super(DynamicLogListener, self).__init__(
engine,
task_listen_for=task_listen_for,
flow_listen_for=flow_listen_for,
retry_listen_for=retry_listen_for,
log=logger)
def _format_failure(self, fail):
if fail.check(*self._NO_TRACE_EXCEPTIONS) is not None:
exc_info = None
exc_details = '%s%s' % (os.linesep, fail.pformat(traceback=False))
return (exc_info, exc_details)
else:
return super(DynamicLogListener, self)._format_failure(fail)
|
Akrog/cinder
|
cinder/flow_utils.py
|
Python
|
apache-2.0
| 2,976
|
[
"VisIt"
] |
ff067f4b4d6eb4f1a7bdb71f835d78a8cc82d24402e0579618e6d24679d1b053
|
"""Classes for running 0MQ Devices in the background.
Authors
-------
* MinRK
* Brian Granger
"""
#
# Copyright (c) 2010 Min Ragan-Kelley, Brian Granger
#
# This file is part of pyzmq.
#
# pyzmq is free software; you can redistribute it and/or modify it under
# the terms of the Lesser GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# pyzmq is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# Lesser GNU General Public License for more details.
#
# You should have received a copy of the Lesser GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
import time
from threading import Thread
try:
from multiprocessing import Process
except ImportError:
Process = None
from zmq.core import device, Context
#-----------------------------------------------------------------------------
# Classes
#-----------------------------------------------------------------------------
class Device:
"""A Threadsafe 0MQ Device.
*Warning* as with most 'threadsafe' Python objects, this is only
threadsafe as long as you do not use private methods or attributes.
Private names are prefixed with '_', such as 'self._setup_socket()'.
For thread safety, you do not pass Sockets to this, but rather Socket
types::
Device(device_type, in_socket_type, out_socket_type)
For instance::
dev = Device(zmq.QUEUE, zmq.XREQ, zmq.XREP)
Similar to zmq.device, but socket types instead of sockets themselves are
passed, and the sockets are created in the work thread, to avoid issues
with thread safety. As a result, additional bind_{in|out} and
connect_{in|out} methods and setsockopt_{in|out} allow users to specify
connections for the sockets.
Parameters
----------
device_type : int
The 0MQ Device type
{in|out}_type : int
zmq socket types, to be passed later to context.socket(). e.g.
zmq.PUB, zmq.SUB, zmq.REQ. If out_type is < 0, then in_socket is used
for both in_socket and out_socket.
Methods
-------
bind_{in_out}(iface)
passthrough for {in|out}_socket.bind(iface), to be called in the thread
connect_{in_out}(iface)
passthrough for {in|out}_socket.connect(iface), to be called in the
thread
setsockopt_{in_out}(opt,value)
passthrough for {in|out}_socket.setsockopt(opt, value), to be called in
the thread
Attributes
----------
daemon: int
sets whether the thread should be run as a daemon
Default is true, because if it is false, the thread will not
exit unless it is killed
"""
def __init__(self, device_type, in_type, out_type):
self.device_type = device_type
self.in_type = in_type
self.out_type = out_type
self._in_binds = list()
self._in_connects = list()
self._in_sockopts = list()
self._out_binds = list()
self._out_connects = list()
self._out_sockopts = list()
self.daemon = True
self.done = False
def bind_in(self, addr):
"""Enqueue ZMQ address for binding on in_socket.
See ``zmq.Socket.bind`` for details.
"""
self._in_binds.append(addr)
def connect_in(self, addr):
"""Enqueue ZMQ address for connecting on in_socket.
See ``zmq.Socket.connect`` for details.
"""
self._in_connects.append(addr)
def setsockopt_in(self, opt, value):
"""Enqueue setsockopt(opt, value) for in_socket
See ``zmq.Socket.setsockopt`` for details.
"""
self._in_sockopts.append((opt, value))
def bind_out(self, iface):
"""Enqueue ZMQ address for binding on out_socket.
See ``zmq.Socket.bind`` for details.
"""
self._out_binds.append(iface)
def connect_out(self, iface):
"""Enqueue ZMQ address for connecting on out_socket.
See ``zmq.Socket.connect`` for details.
"""
self._out_connects.append(iface)
def setsockopt_out(self, opt, value):
"""Enqueue setsockopt(opt, value) for out_socket
See ``zmq.Socket.setsockopt`` for details.
"""
self._out_sockopts.append((opt, value))
def _setup_sockets(self):
ctx = Context()
self._context = ctx
# create the sockets
ins = ctx.socket(self.in_type)
if self.out_type < 0:
outs = ins
else:
outs = ctx.socket(self.out_type)
# set sockopts (must be done first, in case of zmq.IDENTITY)
for opt,value in self._in_sockopts:
ins.setsockopt(opt, value)
for opt,value in self._out_sockopts:
outs.setsockopt(opt, value)
for iface in self._in_binds:
ins.bind(iface)
for iface in self._out_binds:
outs.bind(iface)
for iface in self._in_connects:
ins.connect(iface)
for iface in self._out_connects:
outs.connect(iface)
return ins,outs
def run(self):
"""The runner method.
Do not call me directly, instead call ``self.start()``, just like a
Thread.
"""
ins,outs = self._setup_sockets()
rc = device(self.device_type, ins, outs)
self.done = True
return rc
def start(self):
"""Start the device. Override me in subclass for other launchers."""
return self.run()
def join(self,timeout=None):
tic = time.time()
toc = tic
while not self.done and not (timeout is not None and toc-tic > timeout):
time.sleep(.001)
toc = time.time()
class BackgroundDevice(Device):
"""Base class for launching Devices in background processes and threads."""
launcher=None
launch_class=None
def start(self):
self.launcher = self.launch_class(target=self.run)
self.launcher.daemon = self.daemon
return self.launcher.start()
def join(self, timeout=None):
return self.launcher.join(timeout=timeout)
class ThreadDevice(BackgroundDevice):
"""A Device that will be run in a background Thread.
See `Device` for details.
"""
launch_class=Thread
class ProcessDevice(BackgroundDevice):
"""A Device that will be run in a background Process.
See `Device` for details.
"""
launch_class=Process
__all__ = [ 'Device', 'ThreadDevice']
if Process is not None:
__all__.append('ProcessDevice')
|
takluyver/pyzmq
|
zmq/devices/basedevice.py
|
Python
|
lgpl-3.0
| 7,034
|
[
"Brian"
] |
d81749bba87391a60367b0ef9bcd7238512e75588e022f97c1dae8648cc2e651
|
#!/usr/bin/python2.7
# encoding: utf-8
from __future__ import division
import numpy as np
#import netCDF4 as nc
import sys
import os
from utide import solve, reconstruct
from scipy.io import netcdf
from scipy.io import savemat
from scipy.io import loadmat
from pydap.client import open_url
import cPickle as pkl
import copy
# Need to add closest point
#Add local path to utilities
sys.path.append('../utilities/')
#Utility import
from shortest_element_path import shortest_element_path
from object_from_dict import ObjectFromDict
from miscellaneous import findFiles, _load_nc
#Local import
from variablesStation import _load_var, _load_grid
from functionsStation import *
from functionsStationThreeD import *
from plotsStation import *
class Station:
'''
Description:
----------
A class/structure for Station data.
Functionality structured as follows:
_Data. = raw netcdf file data
|_Variables. = fvcom station variables and quantities
|_Grid. = fvcom station grid data
|_History = Quality Control metadata
testFvcom._|_Utils2D. = set of useful functions for 2D and 3D station
|_Utils3D. = set of useful functions for 3D station
|_Plots. = plotting functions
|_Harmonic_analysis = harmonic analysis based UTide package
|_Harmonic_reconstruction = harmonic reconstruction based UTide package
Inputs:
------
- filename = path to netcdf file or folder, string,
ex: testFvcom=Station('./path_to_FVOM_output_file/filename')
testFvcom=Station('./path_to_FVOM_output_file/folder/')
Note that if the path point to a folder all the similar netCDF station files
will be stack together.
Note that the file can be a pickle file (i.e. *.p) or a netcdf file
(i.e. *.nc).
Options:
-------
- elements = indices to extract, list of integers
Notes:
-----
Throughout the package, the following conventions aplly:
- Date = string of 'yyyy-mm-ddThh:mm:ss'
- Coordinates = decimal degrees East and North
- Directions = in degrees, between -180 and 180 deg., i.e. 0=East, 90=North,
+/-180=West, -90=South
- Depth = 0m is the free surface and depth is negative
'''
def __init__(self, filename, elements=slice(None), debug=False):
#Class attributs
self._debug = debug
self._isMulti(filename)
if not self._multi:
self._load(filename, elements)
self.Plots = PlotsStation(self.Variables,
self.Grid,
self._debug)
self.Util2D = FunctionsStation(self.Variables,
self.Grid,
self.Plots,
self.History,
self._debug)
if self.Variables._3D:
self.Util3D = FunctionsStationThreeD(
self.Variables,
self.Grid,
self.Plots,
self.History,
self._debug)
else:
print "---Finding matching files---"
self._matches = findFiles(filename, 'STATION')
filename = self._matches.pop(0)
self._load(filename, elements, debug=debug )
self.Plots = PlotsStation(self.Variables,
self.Grid,
self._debug)
self.Util2D = FunctionsStation(self.Variables,
self.Grid,
self.Plots,
self.History,
self._debug)
if self.Variables._3D:
self.Util3D = FunctionsStationThreeD(
self.Variables,
self.Grid,
self.Plots,
self.History,
self._debug)
for entry in self._matches:
#Define new
text = 'Created from ' + entry
tmp = {}
tmp['Data'] = _load_nc(entry)
tmp['History'] = [text]
tmp['Grid'] = _load_grid(tmp['Data'], elements, [], debug=self._debug)
tmp['Variables'] = _load_var(tmp['Data'], elements, tmp['Grid'], [],
debug=self._debug)
tmp = ObjectFromDict(tmp)
self = self.__add__(tmp)
def _isMulti(self, filename):
"""Tells if filename point to a file or a folder"""
split = filename.split('/')
if split[-1]:
self._multi = False
else:
self._multi = True
def _load(self, filename, elements, debug=False):
"""Loads data from *.nc, *.p and OpenDap url"""
#Loading pickle file
if filename.endswith('.p'):
f = open(filename, "rb")
data = pkl.load(f)
self._origin_file = data['Origin']
self.History = data['History']
if debug: print "Turn keys into attributs"
self.Grid = ObjectFromDict(data['Grid'])
self.Variables = ObjectFromDict(data['Variables'])
try:
if self._origin_file.startswith('http'):
#Look for file through OpenDAP server
print "Retrieving data through OpenDap server..."
self.Data = open_url(data['Origin'])
#Create fake attribut to be consistent with the rest of the code
self.Data.variables = self.Data
else:
#WB_Alternative: self.Data = sio.netcdf.netcdf_file(filename, 'r')
#WB_comments: scipy has causes some errors, and even though can be
# faster, can be unreliable
#self.Data = nc.Dataset(data['Origin'], 'r')
self.Data = netcdf.netcdf_file(data['Origin'], 'r',mmap=True)
except: #TR: need to precise the type of error here
print "the original *.nc file has not been found"
pass
#Loading netcdf file
elif filename.endswith('.nc'):
if filename.startswith('http'):
#Look for file through OpenDAP server
print "Retrieving data through OpenDap server..."
self.Data = open_url(filename)
#Create fake attribut to be consistent with the rest of the code
self.Data.variables = self.Data
else:
#Look for file locally
print "Retrieving data from " + filename + " ..."
#WB_Alternative: self.Data = sio.netcdf.netcdf_file(filename, 'r')
#WB_comments: scipy has causes some errors, and even though can be
# faster, can be unreliable
#self.Data = nc.Dataset(filename, 'r')
self.Data = netcdf.netcdf_file(filename, 'r',mmap=True)
#Metadata
text = 'Created from ' + filename
self._origin_file = filename
self.History = [text]
# Calling sub-class
print "Initialisation..."
try:
self.Grid = _load_grid(self.Data,
elements,
self.History,
debug=self._debug)
self.Variables = _load_var(self.Data,
elements,
self.Grid,
self.History,
debug=self._debug)
except MemoryError:
print '---Data too large for machine memory---'
print 'Tip: use ax or tx during class initialisation'
print '--- to use partial data'
raise
elif filename.endswith('.mat'):
print "---Functionality not yet implemented---"
sys.exit()
else:
print "---Wrong file format---"
sys.exit()
#Special methods
def __add__(self, StationClass, debug=False):
"""
This special method permit to stack variables
of 2 Station objects through a simple addition:
station1 += station2
Notes:
-----
- station1 and station2 have to cover the exact
same spatial domain
- last time step of station1 must be <= to the
first time step of station2
"""
debug = debug or self._debug
if debug: print "Find matching elements..."
#Find matching elements
origNele = self.Grid.nele
origEle = []
#origName = self.Grid.name
origX = self.Grid.x[:]
origY = self.Grid.y[:]
newNele = StationClass.Grid.nele
newEle = []
#newName = StationClass.Grid.name
newX = StationClass.Grid.x[:]
newY = StationClass.Grid.y[:]
for i in range(origNele):
for j in range(newNele):
#Match based on names
#if (all(origName[i,:]==newName[j,:])):
# origEle.append(i)
# newEle.append(j)
#Match based on coordinates
if ((origX[i]==newX[j]) and (origY[i]==newY[j])):
origEle.append(i)
newEle.append(j)
print len(origEle), " points will be stacked..."
if len(origEle)==0:
print "---No matching element found---"
sys.exit()
elif not (self.Variables._3D == StationClass.Variables._3D):
print "---Data dimensions do not match---"
sys.exit()
else:
if not (self.Variables.julianTime[-1]<=
StationClass.Variables.julianTime[0]):
print "---Data not consecutive in time---"
sys.exit()
#Copy self to newself
newself = copy.copy(self)
#TR comment: it still points toward self and modifies it
# so cannot do Station3 = Station1 + Station2
if debug:
print 'Stacking variables...'
#keyword list for hstack
kwl=['matlabTime', 'julianTime', 'secondTime']
for key in kwl:
tmpN = getattr(newself.Variables, key)
tmpO = getattr(StationClass.Variables, key)
setattr(newself.Variables, key,
np.hstack((tmpN[:], tmpO[:])))
#keyword list for vstack
kwl=['u', 'v', 'w', 'tke', 'gls', 'ua', 'va','el']
kwl2D=['ua', 'va','el']
for key in kwl:
try:
if key in kwl2D:
tmpN = getattr(newself.Variables, key)\
[:,newEle[:]]
tmpO = getattr(StationClass.Variables, key)\
[:,origEle[:]]
setattr(newself.Variables, key,
np.vstack((tmpN[:], tmpO[:])))
if debug: print "Stacking " + key + "..."
else:
tmpN = getattr(newself.Variables, key)\
[:,:,newEle[:]]
tmpO = getattr(StationClass.Variables, key)\
[:,:,origEle[:]]
setattr(newself.Variables, key,
np.vstack((tmpN[:], tmpO[:])))
if debug: print "Stacking " + key + "..."
except AttributeError:
continue
#New time dimension
newself.Grid.ntime = newself.Grid.ntime + StationClass.Grid.ntime
#Keep only matching names
newself.Grid.name = self.Grid.name[origEle[:],:]
#Append to new object history
text = 'Data from ' + StationClass.History[0].split('/')[-1] \
+ ' has been stacked'
newself.History.append(text)
return newself
#Methods
def Save_as(self, filename, fileformat='pickle', debug=False):
"""
Save the current Station structure as:
- *.p, i.e. python file
- *.mat, i.e. Matlab file
Inputs:
------
- filename = path + name of the file to be saved, string
Keywords:
--------
- fileformat = format of the file to be saved, i.e. 'pickle' or 'matlab'
"""
debug = debug or self._debug
if debug:
print 'Saving file...'
#Define bounding box
if debug:
print "Computing bounding box..."
if self.Grid._ax == []:
lon = self.Grid.lon[:]
lat = self.Grid.lat[:]
self.Grid._ax = [lon.min(), lon.max(),
lat.min(), lat.max()]
#Save as different formats
if fileformat=='pickle':
filename = filename + ".p"
f = open(filename, "wb")
data = {}
data['Origin'] = self._origin_file
data['History'] = self.History
data['Grid'] = self.Grid.__dict__
data['Variables'] = self.Variables.__dict__
#TR: Force caching Variables otherwise error during loading
# with 'netcdf4.Variable' type (see above)
for key in data['Variables']:
listkeys=['Variable', 'ArrayProxy', 'BaseType']
if any([type(data['Variables'][key]).__name__==x for x in listkeys]):
if debug:
print "Force caching for " + key
data['Variables'][key] = data['Variables'][key][:]
#Unpickleable objects
data['Grid'].pop("triangle", None)
#TR: Force caching Variables otherwise error during loading
# with 'netcdf4.Variable' type (see above)
for key in data['Grid']:
listkeys=['Variable', 'ArrayProxy', 'BaseType']
if any([type(data['Grid'][key]).__name__==x for x in listkeys]):
if debug:
print "Force caching for " + key
data['Grid'][key] = data['Grid'][key][:]
#Save in pickle file
if debug:
print 'Dumping in pickle file...'
try:
pkl.dump(data, f, protocol=pkl.HIGHEST_PROTOCOL)
except SystemError:
print '---Data too large for machine memory---'
print 'Tip: use ax or tx during class initialisation'
print '--- to use partial data'
raise
f.close()
elif fileformat=='matlab':
filename = filename + ".mat"
#TR comment: based on MitchellO'Flaherty-Sproul's code
dtype = float
data = {}
Grd = {}
Var = {}
data['Origin'] = self._origin_file
data['History'] = self.History
Grd = self.Grid.__dict__
Var = self.Variables.__dict__
#TR: Force caching Variables otherwise error during loading
# with 'netcdf4.Variable' type (see above)
for key in Var:
listkeys=['Variable', 'ArrayProxy', 'BaseType']
if any([type(Var[key]).__name__==x for x in listkeys]):
if debug:
print "Force caching for " + key
Var[key] = Var[key][:]
#keyV = key + '-var'
#data[keyV] = Var[key]
data[key] = Var[key]
#Unpickleable objects
Grd.pop("triangle", None)
for key in Grd:
listkeys=['Variable', 'ArrayProxy', 'BaseType']
if any([type(Grd[key]).__name__==x for x in listkeys]):
if debug:
print "Force caching for " + key
Grd[key] = Grd[key][:]
#keyG = key + '-grd'
#data[keyG] = Grd[key]
data[key] = Grd[key]
#Save in mat file file
if debug:
print 'Dumping in matlab file...'
savemat(filename, data, oned_as='column')
else:
print "---Wrong file format---"
#if __name__ == '__main__':
#filename = '/array2/data3/rkarsten/dncoarse_3D/output2/dn_coarse_station_timeseries.nc'
#filename = '/array2/data3/rkarsten/dncoarse_3D/output2/dn_coarse_station_timeseries.nc'
#filename = '/EcoII/EcoEII_server_data_tree/data/simulated/FVCOM/dngrid/june_2013_3D/'
#multi = True
#if multi:
#filename = '/home/wesley/ncfiles/'
# filename = '/EcoII/EcoEII_server_data_tree/workspace/simulated/FVCOM/dngrid/june_2013_3D/output/'
#else:
# filename = '/home/wesley/ncfiles/dn_coarse_station_timeseries.nc'
#data = station(filename)
|
rsignell-usgs/PySeidon
|
pyseidon/stationClass/stationClass.py
|
Python
|
agpl-3.0
| 17,603
|
[
"NetCDF"
] |
310f6af34f916188a6e5faf8f65ffc745ab7e6f9f971bb736a99d48f50227ba3
|
import functools
import inspect
import os
import sys
import warnings
from collections import defaultdict
from collections import deque
from types import TracebackType
from typing import Any
from typing import Callable
from typing import cast
from typing import Dict
from typing import Generator
from typing import Generic
from typing import Iterable
from typing import Iterator
from typing import List
from typing import Optional
from typing import Sequence
from typing import Set
from typing import Tuple
from typing import TypeVar
from typing import Union
import attr
import py
import _pytest
from _pytest._code import getfslineno
from _pytest._code.code import FormattedExcinfo
from _pytest._code.code import TerminalRepr
from _pytest._io import TerminalWriter
from _pytest.compat import _format_args
from _pytest.compat import _PytestWrapper
from _pytest.compat import final
from _pytest.compat import get_real_func
from _pytest.compat import get_real_method
from _pytest.compat import getfuncargnames
from _pytest.compat import getimfunc
from _pytest.compat import getlocation
from _pytest.compat import is_generator
from _pytest.compat import NOTSET
from _pytest.compat import order_preserving_dict
from _pytest.compat import overload
from _pytest.compat import safe_getattr
from _pytest.compat import TYPE_CHECKING
from _pytest.config import _PluggyPlugin
from _pytest.config import Config
from _pytest.config.argparsing import Parser
from _pytest.deprecated import FILLFUNCARGS
from _pytest.mark import Mark
from _pytest.mark import ParameterSet
from _pytest.outcomes import fail
from _pytest.outcomes import TEST_OUTCOME
from _pytest.pathlib import absolutepath
if TYPE_CHECKING:
from typing import Deque
from typing import NoReturn
from typing import Type
from typing_extensions import Literal
from _pytest import nodes
from _pytest.main import Session
from _pytest.python import CallSpec2
from _pytest.python import Function
from _pytest.python import Metafunc
_Scope = Literal["session", "package", "module", "class", "function"]
# The value of the fixture -- return/yield of the fixture function (type variable).
_FixtureValue = TypeVar("_FixtureValue")
# The type of the fixture function (type variable).
_FixtureFunction = TypeVar("_FixtureFunction", bound=Callable[..., object])
# The type of a fixture function (type alias generic in fixture value).
_FixtureFunc = Union[
Callable[..., _FixtureValue], Callable[..., Generator[_FixtureValue, None, None]]
]
# The type of FixtureDef.cached_result (type alias generic in fixture value).
_FixtureCachedResult = Union[
Tuple[
# The result.
_FixtureValue,
# Cache key.
object,
None,
],
Tuple[
None,
# Cache key.
object,
# Exc info if raised.
Tuple["Type[BaseException]", BaseException, TracebackType],
],
]
@attr.s(frozen=True)
class PseudoFixtureDef(Generic[_FixtureValue]):
cached_result = attr.ib(type="_FixtureCachedResult[_FixtureValue]")
scope = attr.ib(type="_Scope")
def pytest_sessionstart(session: "Session") -> None:
import _pytest.python
import _pytest.nodes
scopename2class.update(
{
"package": _pytest.python.Package,
"class": _pytest.python.Class,
"module": _pytest.python.Module,
"function": _pytest.nodes.Item,
"session": _pytest.main.Session,
}
)
session._fixturemanager = FixtureManager(session)
scopename2class = {} # type: Dict[str, Type[nodes.Node]]
def get_scope_package(node, fixturedef: "FixtureDef[object]"):
import pytest
cls = pytest.Package
current = node
fixture_package_name = "{}/{}".format(fixturedef.baseid, "__init__.py")
while current and (
type(current) is not cls or fixture_package_name != current.nodeid
):
current = current.parent
if current is None:
return node.session
return current
def get_scope_node(node, scope):
cls = scopename2class.get(scope)
if cls is None:
raise ValueError("unknown scope")
return node.getparent(cls)
def add_funcarg_pseudo_fixture_def(
collector, metafunc: "Metafunc", fixturemanager: "FixtureManager"
) -> None:
# This function will transform all collected calls to functions
# if they use direct funcargs (i.e. direct parametrization)
# because we want later test execution to be able to rely on
# an existing FixtureDef structure for all arguments.
# XXX we can probably avoid this algorithm if we modify CallSpec2
# to directly care for creating the fixturedefs within its methods.
if not metafunc._calls[0].funcargs:
# This function call does not have direct parametrization.
return
# Collect funcargs of all callspecs into a list of values.
arg2params = {} # type: Dict[str, List[object]]
arg2scope = {} # type: Dict[str, _Scope]
for callspec in metafunc._calls:
for argname, argvalue in callspec.funcargs.items():
assert argname not in callspec.params
callspec.params[argname] = argvalue
arg2params_list = arg2params.setdefault(argname, [])
callspec.indices[argname] = len(arg2params_list)
arg2params_list.append(argvalue)
if argname not in arg2scope:
scopenum = callspec._arg2scopenum.get(argname, scopenum_function)
arg2scope[argname] = scopes[scopenum]
callspec.funcargs.clear()
# Register artificial FixtureDef's so that later at test execution
# time we can rely on a proper FixtureDef to exist for fixture setup.
arg2fixturedefs = metafunc._arg2fixturedefs
for argname, valuelist in arg2params.items():
# If we have a scope that is higher than function, we need
# to make sure we only ever create an according fixturedef on
# a per-scope basis. We thus store and cache the fixturedef on the
# node related to the scope.
scope = arg2scope[argname]
node = None
if scope != "function":
node = get_scope_node(collector, scope)
if node is None:
assert scope == "class" and isinstance(collector, _pytest.python.Module)
# Use module-level collector for class-scope (for now).
node = collector
if node and argname in node._name2pseudofixturedef:
arg2fixturedefs[argname] = [node._name2pseudofixturedef[argname]]
else:
fixturedef = FixtureDef(
fixturemanager=fixturemanager,
baseid="",
argname=argname,
func=get_direct_param_fixture_func,
scope=arg2scope[argname],
params=valuelist,
unittest=False,
ids=None,
)
arg2fixturedefs[argname] = [fixturedef]
if node is not None:
node._name2pseudofixturedef[argname] = fixturedef
def getfixturemarker(obj: object) -> Optional["FixtureFunctionMarker"]:
"""Return fixturemarker or None if it doesn't exist or raised
exceptions."""
try:
fixturemarker = getattr(
obj, "_pytestfixturefunction", None
) # type: Optional[FixtureFunctionMarker]
except TEST_OUTCOME:
# some objects raise errors like request (from flask import request)
# we don't expect them to be fixture functions
return None
return fixturemarker
# Parametrized fixture key, helper alias for code below.
_Key = Tuple[object, ...]
def get_parametrized_fixture_keys(item: "nodes.Item", scopenum: int) -> Iterator[_Key]:
"""Return list of keys for all parametrized arguments which match
the specified scope. """
assert scopenum < scopenum_function # function
try:
callspec = item.callspec # type: ignore[attr-defined]
except AttributeError:
pass
else:
cs = callspec # type: CallSpec2
# cs.indices.items() is random order of argnames. Need to
# sort this so that different calls to
# get_parametrized_fixture_keys will be deterministic.
for argname, param_index in sorted(cs.indices.items()):
if cs._arg2scopenum[argname] != scopenum:
continue
if scopenum == 0: # session
key = (argname, param_index) # type: _Key
elif scopenum == 1: # package
key = (argname, param_index, item.fspath.dirpath())
elif scopenum == 2: # module
key = (argname, param_index, item.fspath)
elif scopenum == 3: # class
item_cls = item.cls # type: ignore[attr-defined]
key = (argname, param_index, item.fspath, item_cls)
yield key
# Algorithm for sorting on a per-parametrized resource setup basis.
# It is called for scopenum==0 (session) first and performs sorting
# down to the lower scopes such as to minimize number of "high scope"
# setups and teardowns.
def reorder_items(items: "Sequence[nodes.Item]") -> "List[nodes.Item]":
argkeys_cache = {} # type: Dict[int, Dict[nodes.Item, Dict[_Key, None]]]
items_by_argkey = {} # type: Dict[int, Dict[_Key, Deque[nodes.Item]]]
for scopenum in range(0, scopenum_function):
d = {} # type: Dict[nodes.Item, Dict[_Key, None]]
argkeys_cache[scopenum] = d
item_d = defaultdict(deque) # type: Dict[_Key, Deque[nodes.Item]]
items_by_argkey[scopenum] = item_d
for item in items:
# cast is a workaround for https://github.com/python/typeshed/issues/3800.
keys = cast(
"Dict[_Key, None]",
order_preserving_dict.fromkeys(
get_parametrized_fixture_keys(item, scopenum), None
),
)
if keys:
d[item] = keys
for key in keys:
item_d[key].append(item)
# cast is a workaround for https://github.com/python/typeshed/issues/3800.
items_dict = cast(
"Dict[nodes.Item, None]", order_preserving_dict.fromkeys(items, None)
)
return list(reorder_items_atscope(items_dict, argkeys_cache, items_by_argkey, 0))
def fix_cache_order(
item: "nodes.Item",
argkeys_cache: "Dict[int, Dict[nodes.Item, Dict[_Key, None]]]",
items_by_argkey: "Dict[int, Dict[_Key, Deque[nodes.Item]]]",
) -> None:
for scopenum in range(0, scopenum_function):
for key in argkeys_cache[scopenum].get(item, []):
items_by_argkey[scopenum][key].appendleft(item)
def reorder_items_atscope(
items: "Dict[nodes.Item, None]",
argkeys_cache: "Dict[int, Dict[nodes.Item, Dict[_Key, None]]]",
items_by_argkey: "Dict[int, Dict[_Key, Deque[nodes.Item]]]",
scopenum: int,
) -> "Dict[nodes.Item, None]":
if scopenum >= scopenum_function or len(items) < 3:
return items
ignore = set() # type: Set[Optional[_Key]]
items_deque = deque(items)
items_done = order_preserving_dict() # type: Dict[nodes.Item, None]
scoped_items_by_argkey = items_by_argkey[scopenum]
scoped_argkeys_cache = argkeys_cache[scopenum]
while items_deque:
no_argkey_group = order_preserving_dict() # type: Dict[nodes.Item, None]
slicing_argkey = None
while items_deque:
item = items_deque.popleft()
if item in items_done or item in no_argkey_group:
continue
argkeys = order_preserving_dict.fromkeys(
(k for k in scoped_argkeys_cache.get(item, []) if k not in ignore), None
)
if not argkeys:
no_argkey_group[item] = None
else:
slicing_argkey, _ = argkeys.popitem()
# We don't have to remove relevant items from later in the
# deque because they'll just be ignored.
matching_items = [
i for i in scoped_items_by_argkey[slicing_argkey] if i in items
]
for i in reversed(matching_items):
fix_cache_order(i, argkeys_cache, items_by_argkey)
items_deque.appendleft(i)
break
if no_argkey_group:
no_argkey_group = reorder_items_atscope(
no_argkey_group, argkeys_cache, items_by_argkey, scopenum + 1
)
for item in no_argkey_group:
items_done[item] = None
ignore.add(slicing_argkey)
return items_done
def _fillfuncargs(function: "Function") -> None:
"""Fill missing fixtures for a test function, old public API (deprecated)."""
warnings.warn(FILLFUNCARGS.format(name="pytest._fillfuncargs()"), stacklevel=2)
_fill_fixtures_impl(function)
def fillfixtures(function: "Function") -> None:
"""Fill missing fixtures for a test function (deprecated)."""
warnings.warn(
FILLFUNCARGS.format(name="_pytest.fixtures.fillfixtures()"), stacklevel=2
)
_fill_fixtures_impl(function)
def _fill_fixtures_impl(function: "Function") -> None:
"""Internal implementation to fill fixtures on the given function object."""
try:
request = function._request
except AttributeError:
# XXX this special code path is only expected to execute
# with the oejskit plugin. It uses classes with funcargs
# and we thus have to work a bit to allow this.
fm = function.session._fixturemanager
assert function.parent is not None
fi = fm.getfixtureinfo(function.parent, function.obj, None)
function._fixtureinfo = fi
request = function._request = FixtureRequest(function)
request._fillfixtures()
# Prune out funcargs for jstests.
newfuncargs = {}
for name in fi.argnames:
newfuncargs[name] = function.funcargs[name]
function.funcargs = newfuncargs
else:
request._fillfixtures()
def get_direct_param_fixture_func(request):
return request.param
@attr.s(slots=True)
class FuncFixtureInfo:
# Original function argument names.
argnames = attr.ib(type=Tuple[str, ...])
# Argnames that function immediately requires. These include argnames +
# fixture names specified via usefixtures and via autouse=True in fixture
# definitions.
initialnames = attr.ib(type=Tuple[str, ...])
names_closure = attr.ib(type=List[str])
name2fixturedefs = attr.ib(type=Dict[str, Sequence["FixtureDef[Any]"]])
def prune_dependency_tree(self) -> None:
"""Recompute names_closure from initialnames and name2fixturedefs.
Can only reduce names_closure, which means that the new closure will
always be a subset of the old one. The order is preserved.
This method is needed because direct parametrization may shadow some
of the fixtures that were included in the originally built dependency
tree. In this way the dependency tree can get pruned, and the closure
of argnames may get reduced.
"""
closure = set() # type: Set[str]
working_set = set(self.initialnames)
while working_set:
argname = working_set.pop()
# Argname may be smth not included in the original names_closure,
# in which case we ignore it. This currently happens with pseudo
# FixtureDefs which wrap 'get_direct_param_fixture_func(request)'.
# So they introduce the new dependency 'request' which might have
# been missing in the original tree (closure).
if argname not in closure and argname in self.names_closure:
closure.add(argname)
if argname in self.name2fixturedefs:
working_set.update(self.name2fixturedefs[argname][-1].argnames)
self.names_closure[:] = sorted(closure, key=self.names_closure.index)
class FixtureRequest:
"""A request for a fixture from a test or fixture function.
A request object gives access to the requesting test context and has
an optional ``param`` attribute in case the fixture is parametrized
indirectly.
"""
def __init__(self, pyfuncitem) -> None:
self._pyfuncitem = pyfuncitem
#: Fixture for which this request is being performed.
self.fixturename = None # type: Optional[str]
#: Scope string, one of "function", "class", "module", "session".
self.scope = "function" # type: _Scope
self._fixture_defs = {} # type: Dict[str, FixtureDef[Any]]
fixtureinfo = pyfuncitem._fixtureinfo # type: FuncFixtureInfo
self._arg2fixturedefs = fixtureinfo.name2fixturedefs.copy()
self._arg2index = {} # type: Dict[str, int]
self._fixturemanager = (
pyfuncitem.session._fixturemanager
) # type: FixtureManager
@property
def fixturenames(self) -> List[str]:
"""Names of all active fixtures in this request."""
result = list(self._pyfuncitem._fixtureinfo.names_closure)
result.extend(set(self._fixture_defs).difference(result))
return result
@property
def node(self):
"""Underlying collection node (depends on current request scope)."""
return self._getscopeitem(self.scope)
def _getnextfixturedef(self, argname: str) -> "FixtureDef[Any]":
fixturedefs = self._arg2fixturedefs.get(argname, None)
if fixturedefs is None:
# We arrive here because of a dynamic call to
# getfixturevalue(argname) usage which was naturally
# not known at parsing/collection time.
assert self._pyfuncitem.parent is not None
parentid = self._pyfuncitem.parent.nodeid
fixturedefs = self._fixturemanager.getfixturedefs(argname, parentid)
# TODO: Fix this type ignore. Either add assert or adjust types.
# Can this be None here?
self._arg2fixturedefs[argname] = fixturedefs # type: ignore[assignment]
# fixturedefs list is immutable so we maintain a decreasing index.
index = self._arg2index.get(argname, 0) - 1
if fixturedefs is None or (-index > len(fixturedefs)):
raise FixtureLookupError(argname, self)
self._arg2index[argname] = index
return fixturedefs[index]
@property
def config(self) -> Config:
"""The pytest config object associated with this request."""
return self._pyfuncitem.config # type: ignore[no-any-return] # noqa: F723
@property
def function(self):
"""Test function object if the request has a per-function scope."""
if self.scope != "function":
raise AttributeError(
"function not available in {}-scoped context".format(self.scope)
)
return self._pyfuncitem.obj
@property
def cls(self):
"""Class (can be None) where the test function was collected."""
if self.scope not in ("class", "function"):
raise AttributeError(
"cls not available in {}-scoped context".format(self.scope)
)
clscol = self._pyfuncitem.getparent(_pytest.python.Class)
if clscol:
return clscol.obj
@property
def instance(self):
"""Instance (can be None) on which test function was collected."""
# unittest support hack, see _pytest.unittest.TestCaseFunction.
try:
return self._pyfuncitem._testcase
except AttributeError:
function = getattr(self, "function", None)
return getattr(function, "__self__", None)
@property
def module(self):
"""Python module object where the test function was collected."""
if self.scope not in ("function", "class", "module"):
raise AttributeError(
"module not available in {}-scoped context".format(self.scope)
)
return self._pyfuncitem.getparent(_pytest.python.Module).obj
@property
def fspath(self) -> py.path.local:
"""The file system path of the test module which collected this test."""
if self.scope not in ("function", "class", "module", "package"):
raise AttributeError(
"module not available in {}-scoped context".format(self.scope)
)
# TODO: Remove ignore once _pyfuncitem is properly typed.
return self._pyfuncitem.fspath # type: ignore
@property
def keywords(self):
"""Keywords/markers dictionary for the underlying node."""
return self.node.keywords
@property
def session(self):
"""Pytest session object."""
return self._pyfuncitem.session
def addfinalizer(self, finalizer: Callable[[], object]) -> None:
"""Add finalizer/teardown function to be called after the last test
within the requesting test context finished execution."""
# XXX usually this method is shadowed by fixturedef specific ones.
self._addfinalizer(finalizer, scope=self.scope)
def _addfinalizer(self, finalizer: Callable[[], object], scope) -> None:
colitem = self._getscopeitem(scope)
self._pyfuncitem.session._setupstate.addfinalizer(
finalizer=finalizer, colitem=colitem
)
def applymarker(self, marker) -> None:
"""Apply a marker to a single test function invocation.
This method is useful if you don't want to have a keyword/marker
on all function invocations.
:param marker:
A :py:class:`_pytest.mark.MarkDecorator` object created by a call
to ``pytest.mark.NAME(...)``.
"""
self.node.add_marker(marker)
def raiseerror(self, msg: Optional[str]) -> "NoReturn":
"""Raise a FixtureLookupError with the given message."""
raise self._fixturemanager.FixtureLookupError(None, self, msg)
def _fillfixtures(self) -> None:
item = self._pyfuncitem
fixturenames = getattr(item, "fixturenames", self.fixturenames)
for argname in fixturenames:
if argname not in item.funcargs:
item.funcargs[argname] = self.getfixturevalue(argname)
def getfixturevalue(self, argname: str) -> Any:
"""Dynamically run a named fixture function.
Declaring fixtures via function argument is recommended where possible.
But if you can only decide whether to use another fixture at test
setup time, you may use this function to retrieve it inside a fixture
or test function body.
:raises pytest.FixtureLookupError:
If the given fixture could not be found.
"""
fixturedef = self._get_active_fixturedef(argname)
assert fixturedef.cached_result is not None
return fixturedef.cached_result[0]
def _get_active_fixturedef(
self, argname: str
) -> Union["FixtureDef[object]", PseudoFixtureDef[object]]:
try:
return self._fixture_defs[argname]
except KeyError:
try:
fixturedef = self._getnextfixturedef(argname)
except FixtureLookupError:
if argname == "request":
cached_result = (self, [0], None)
scope = "function" # type: _Scope
return PseudoFixtureDef(cached_result, scope)
raise
# Remove indent to prevent the python3 exception
# from leaking into the call.
self._compute_fixture_value(fixturedef)
self._fixture_defs[argname] = fixturedef
return fixturedef
def _get_fixturestack(self) -> List["FixtureDef[Any]"]:
current = self
values = [] # type: List[FixtureDef[Any]]
while 1:
fixturedef = getattr(current, "_fixturedef", None)
if fixturedef is None:
values.reverse()
return values
values.append(fixturedef)
assert isinstance(current, SubRequest)
current = current._parent_request
def _compute_fixture_value(self, fixturedef: "FixtureDef[object]") -> None:
"""Create a SubRequest based on "self" and call the execute method
of the given FixtureDef object.
This will force the FixtureDef object to throw away any previous
results and compute a new fixture value, which will be stored into
the FixtureDef object itself.
"""
# prepare a subrequest object before calling fixture function
# (latter managed by fixturedef)
argname = fixturedef.argname
funcitem = self._pyfuncitem
scope = fixturedef.scope
try:
param = funcitem.callspec.getparam(argname)
except (AttributeError, ValueError):
param = NOTSET
param_index = 0
has_params = fixturedef.params is not None
fixtures_not_supported = getattr(funcitem, "nofuncargs", False)
if has_params and fixtures_not_supported:
msg = (
"{name} does not support fixtures, maybe unittest.TestCase subclass?\n"
"Node id: {nodeid}\n"
"Function type: {typename}"
).format(
name=funcitem.name,
nodeid=funcitem.nodeid,
typename=type(funcitem).__name__,
)
fail(msg, pytrace=False)
if has_params:
frame = inspect.stack()[3]
frameinfo = inspect.getframeinfo(frame[0])
source_path = py.path.local(frameinfo.filename)
source_lineno = frameinfo.lineno
rel_source_path = source_path.relto(funcitem.config.rootdir)
if rel_source_path:
source_path_str = rel_source_path
else:
source_path_str = str(source_path)
msg = (
"The requested fixture has no parameter defined for test:\n"
" {}\n\n"
"Requested fixture '{}' defined in:\n{}"
"\n\nRequested here:\n{}:{}".format(
funcitem.nodeid,
fixturedef.argname,
getlocation(fixturedef.func, funcitem.config.rootdir),
source_path_str,
source_lineno,
)
)
fail(msg, pytrace=False)
else:
param_index = funcitem.callspec.indices[argname]
# If a parametrize invocation set a scope it will override
# the static scope defined with the fixture function.
paramscopenum = funcitem.callspec._arg2scopenum.get(argname)
if paramscopenum is not None:
scope = scopes[paramscopenum]
subrequest = SubRequest(self, scope, param, param_index, fixturedef)
# Check if a higher-level scoped fixture accesses a lower level one.
subrequest._check_scope(argname, self.scope, scope)
try:
# Call the fixture function.
fixturedef.execute(request=subrequest)
finally:
self._schedule_finalizers(fixturedef, subrequest)
def _schedule_finalizers(
self, fixturedef: "FixtureDef[object]", subrequest: "SubRequest"
) -> None:
# If fixture function failed it might have registered finalizers.
self.session._setupstate.addfinalizer(
functools.partial(fixturedef.finish, request=subrequest), subrequest.node
)
def _check_scope(self, argname, invoking_scope: "_Scope", requested_scope) -> None:
if argname == "request":
return
if scopemismatch(invoking_scope, requested_scope):
# Try to report something helpful.
lines = self._factorytraceback()
fail(
"ScopeMismatch: You tried to access the %r scoped "
"fixture %r with a %r scoped request object, "
"involved factories\n%s"
% ((requested_scope, argname, invoking_scope, "\n".join(lines))),
pytrace=False,
)
def _factorytraceback(self) -> List[str]:
lines = []
for fixturedef in self._get_fixturestack():
factory = fixturedef.func
fs, lineno = getfslineno(factory)
p = self._pyfuncitem.session.fspath.bestrelpath(fs)
args = _format_args(factory)
lines.append("%s:%d: def %s%s" % (p, lineno + 1, factory.__name__, args))
return lines
def _getscopeitem(self, scope):
if scope == "function":
# This might also be a non-function Item despite its attribute name.
return self._pyfuncitem
if scope == "package":
# FIXME: _fixturedef is not defined on FixtureRequest (this class),
# but on FixtureRequest (a subclass).
node = get_scope_package(self._pyfuncitem, self._fixturedef) # type: ignore[attr-defined]
else:
node = get_scope_node(self._pyfuncitem, scope)
if node is None and scope == "class":
# Fallback to function item itself.
node = self._pyfuncitem
assert node, 'Could not obtain a node for scope "{}" for function {!r}'.format(
scope, self._pyfuncitem
)
return node
def __repr__(self) -> str:
return "<FixtureRequest for %r>" % (self.node)
@final
class SubRequest(FixtureRequest):
"""A sub request for handling getting a fixture from a test function/fixture."""
def __init__(
self,
request: "FixtureRequest",
scope: "_Scope",
param,
param_index: int,
fixturedef: "FixtureDef[object]",
) -> None:
self._parent_request = request
self.fixturename = fixturedef.argname
if param is not NOTSET:
self.param = param
self.param_index = param_index
self.scope = scope
self._fixturedef = fixturedef
self._pyfuncitem = request._pyfuncitem
self._fixture_defs = request._fixture_defs
self._arg2fixturedefs = request._arg2fixturedefs
self._arg2index = request._arg2index
self._fixturemanager = request._fixturemanager
def __repr__(self) -> str:
return "<SubRequest {!r} for {!r}>".format(self.fixturename, self._pyfuncitem)
def addfinalizer(self, finalizer: Callable[[], object]) -> None:
self._fixturedef.addfinalizer(finalizer)
def _schedule_finalizers(
self, fixturedef: "FixtureDef[object]", subrequest: "SubRequest"
) -> None:
# If the executing fixturedef was not explicitly requested in the argument list (via
# getfixturevalue inside the fixture call) then ensure this fixture def will be finished
# first.
if fixturedef.argname not in self.fixturenames:
fixturedef.addfinalizer(
functools.partial(self._fixturedef.finish, request=self)
)
super()._schedule_finalizers(fixturedef, subrequest)
scopes = ["session", "package", "module", "class", "function"] # type: List[_Scope]
scopenum_function = scopes.index("function")
def scopemismatch(currentscope: "_Scope", newscope: "_Scope") -> bool:
return scopes.index(newscope) > scopes.index(currentscope)
def scope2index(scope: str, descr: str, where: Optional[str] = None) -> int:
"""Look up the index of ``scope`` and raise a descriptive value error
if not defined."""
strscopes = scopes # type: Sequence[str]
try:
return strscopes.index(scope)
except ValueError:
fail(
"{} {}got an unexpected scope value '{}'".format(
descr, "from {} ".format(where) if where else "", scope
),
pytrace=False,
)
@final
class FixtureLookupError(LookupError):
"""Could not return a requested fixture (missing or invalid)."""
def __init__(
self, argname: Optional[str], request: FixtureRequest, msg: Optional[str] = None
) -> None:
self.argname = argname
self.request = request
self.fixturestack = request._get_fixturestack()
self.msg = msg
def formatrepr(self) -> "FixtureLookupErrorRepr":
tblines = [] # type: List[str]
addline = tblines.append
stack = [self.request._pyfuncitem.obj]
stack.extend(map(lambda x: x.func, self.fixturestack))
msg = self.msg
if msg is not None:
# The last fixture raise an error, let's present
# it at the requesting side.
stack = stack[:-1]
for function in stack:
fspath, lineno = getfslineno(function)
try:
lines, _ = inspect.getsourcelines(get_real_func(function))
except (OSError, IndexError, TypeError):
error_msg = "file %s, line %s: source code not available"
addline(error_msg % (fspath, lineno + 1))
else:
addline("file {}, line {}".format(fspath, lineno + 1))
for i, line in enumerate(lines):
line = line.rstrip()
addline(" " + line)
if line.lstrip().startswith("def"):
break
if msg is None:
fm = self.request._fixturemanager
available = set()
parentid = self.request._pyfuncitem.parent.nodeid
for name, fixturedefs in fm._arg2fixturedefs.items():
faclist = list(fm._matchfactories(fixturedefs, parentid))
if faclist:
available.add(name)
if self.argname in available:
msg = " recursive dependency involving fixture '{}' detected".format(
self.argname
)
else:
msg = "fixture '{}' not found".format(self.argname)
msg += "\n available fixtures: {}".format(", ".join(sorted(available)))
msg += "\n use 'pytest --fixtures [testpath]' for help on them."
return FixtureLookupErrorRepr(fspath, lineno, tblines, msg, self.argname)
class FixtureLookupErrorRepr(TerminalRepr):
def __init__(
self,
filename: Union[str, py.path.local],
firstlineno: int,
tblines: Sequence[str],
errorstring: str,
argname: Optional[str],
) -> None:
self.tblines = tblines
self.errorstring = errorstring
self.filename = filename
self.firstlineno = firstlineno
self.argname = argname
def toterminal(self, tw: TerminalWriter) -> None:
# tw.line("FixtureLookupError: %s" %(self.argname), red=True)
for tbline in self.tblines:
tw.line(tbline.rstrip())
lines = self.errorstring.split("\n")
if lines:
tw.line(
"{} {}".format(FormattedExcinfo.fail_marker, lines[0].strip()),
red=True,
)
for line in lines[1:]:
tw.line(
"{} {}".format(FormattedExcinfo.flow_marker, line.strip()),
red=True,
)
tw.line()
tw.line("%s:%d" % (self.filename, self.firstlineno + 1))
def fail_fixturefunc(fixturefunc, msg: str) -> "NoReturn":
fs, lineno = getfslineno(fixturefunc)
location = "{}:{}".format(fs, lineno + 1)
source = _pytest._code.Source(fixturefunc)
fail(msg + ":\n\n" + str(source.indent()) + "\n" + location, pytrace=False)
def call_fixture_func(
fixturefunc: "_FixtureFunc[_FixtureValue]", request: FixtureRequest, kwargs
) -> _FixtureValue:
if is_generator(fixturefunc):
fixturefunc = cast(
Callable[..., Generator[_FixtureValue, None, None]], fixturefunc
)
generator = fixturefunc(**kwargs)
try:
fixture_result = next(generator)
except StopIteration:
raise ValueError(
"{} did not yield a value".format(request.fixturename)
) from None
finalizer = functools.partial(_teardown_yield_fixture, fixturefunc, generator)
request.addfinalizer(finalizer)
else:
fixturefunc = cast(Callable[..., _FixtureValue], fixturefunc)
fixture_result = fixturefunc(**kwargs)
return fixture_result
def _teardown_yield_fixture(fixturefunc, it) -> None:
"""Execute the teardown of a fixture function by advancing the iterator
after the yield and ensure the iteration ends (if not it means there is
more than one yield in the function)."""
try:
next(it)
except StopIteration:
pass
else:
fail_fixturefunc(fixturefunc, "fixture function has more than one 'yield'")
def _eval_scope_callable(
scope_callable: "Callable[[str, Config], _Scope]",
fixture_name: str,
config: Config,
) -> "_Scope":
try:
# Type ignored because there is no typing mechanism to specify
# keyword arguments, currently.
result = scope_callable(fixture_name=fixture_name, config=config) # type: ignore[call-arg]
except Exception as e:
raise TypeError(
"Error evaluating {} while defining fixture '{}'.\n"
"Expected a function with the signature (*, fixture_name, config)".format(
scope_callable, fixture_name
)
) from e
if not isinstance(result, str):
fail(
"Expected {} to return a 'str' while defining fixture '{}', but it returned:\n"
"{!r}".format(scope_callable, fixture_name, result),
pytrace=False,
)
return result
@final
class FixtureDef(Generic[_FixtureValue]):
"""A container for a factory definition."""
def __init__(
self,
fixturemanager: "FixtureManager",
baseid,
argname: str,
func: "_FixtureFunc[_FixtureValue]",
scope: "Union[_Scope, Callable[[str, Config], _Scope]]",
params: Optional[Sequence[object]],
unittest: bool = False,
ids: Optional[
Union[
Tuple[Union[None, str, float, int, bool], ...],
Callable[[Any], Optional[object]],
]
] = None,
) -> None:
self._fixturemanager = fixturemanager
self.baseid = baseid or ""
self.has_location = baseid is not None
self.func = func
self.argname = argname
if callable(scope):
scope_ = _eval_scope_callable(scope, argname, fixturemanager.config)
else:
scope_ = scope
self.scopenum = scope2index(
# TODO: Check if the `or` here is really necessary.
scope_ or "function", # type: ignore[unreachable]
descr="Fixture '{}'".format(func.__name__),
where=baseid,
)
self.scope = scope_
self.params = params # type: Optional[Sequence[object]]
self.argnames = getfuncargnames(
func, name=argname, is_method=unittest
) # type: Tuple[str, ...]
self.unittest = unittest
self.ids = ids
self.cached_result = None # type: Optional[_FixtureCachedResult[_FixtureValue]]
self._finalizers = [] # type: List[Callable[[], object]]
def addfinalizer(self, finalizer: Callable[[], object]) -> None:
self._finalizers.append(finalizer)
def finish(self, request: SubRequest) -> None:
exc = None
try:
while self._finalizers:
try:
func = self._finalizers.pop()
func()
except BaseException as e:
# XXX Only first exception will be seen by user,
# ideally all should be reported.
if exc is None:
exc = e
if exc:
raise exc
finally:
hook = self._fixturemanager.session.gethookproxy(request.node.fspath)
hook.pytest_fixture_post_finalizer(fixturedef=self, request=request)
# Even if finalization fails, we invalidate the cached fixture
# value and remove all finalizers because they may be bound methods
# which will keep instances alive.
self.cached_result = None
self._finalizers = []
def execute(self, request: SubRequest) -> _FixtureValue:
# Get required arguments and register our own finish()
# with their finalization.
for argname in self.argnames:
fixturedef = request._get_active_fixturedef(argname)
if argname != "request":
# PseudoFixtureDef is only for "request".
assert isinstance(fixturedef, FixtureDef)
fixturedef.addfinalizer(functools.partial(self.finish, request=request))
my_cache_key = self.cache_key(request)
if self.cached_result is not None:
# note: comparison with `==` can fail (or be expensive) for e.g.
# numpy arrays (#6497).
cache_key = self.cached_result[1]
if my_cache_key is cache_key:
if self.cached_result[2] is not None:
_, val, tb = self.cached_result[2]
raise val.with_traceback(tb)
else:
result = self.cached_result[0]
return result
# We have a previous but differently parametrized fixture instance
# so we need to tear it down before creating a new one.
self.finish(request)
assert self.cached_result is None
hook = self._fixturemanager.session.gethookproxy(request.node.fspath)
result = hook.pytest_fixture_setup(fixturedef=self, request=request)
return result
def cache_key(self, request: SubRequest) -> object:
return request.param_index if not hasattr(request, "param") else request.param
def __repr__(self) -> str:
return "<FixtureDef argname={!r} scope={!r} baseid={!r}>".format(
self.argname, self.scope, self.baseid
)
def resolve_fixture_function(
fixturedef: FixtureDef[_FixtureValue], request: FixtureRequest
) -> "_FixtureFunc[_FixtureValue]":
"""Get the actual callable that can be called to obtain the fixture
value, dealing with unittest-specific instances and bound methods."""
fixturefunc = fixturedef.func
if fixturedef.unittest:
if request.instance is not None:
# Bind the unbound method to the TestCase instance.
fixturefunc = fixturedef.func.__get__(request.instance) # type: ignore[union-attr]
else:
# The fixture function needs to be bound to the actual
# request.instance so that code working with "fixturedef" behaves
# as expected.
if request.instance is not None:
# Handle the case where fixture is defined not in a test class, but some other class
# (for example a plugin class with a fixture), see #2270.
if hasattr(fixturefunc, "__self__") and not isinstance(
request.instance, fixturefunc.__self__.__class__ # type: ignore[union-attr]
):
return fixturefunc
fixturefunc = getimfunc(fixturedef.func)
if fixturefunc != fixturedef.func:
fixturefunc = fixturefunc.__get__(request.instance) # type: ignore[union-attr]
return fixturefunc
def pytest_fixture_setup(
fixturedef: FixtureDef[_FixtureValue], request: SubRequest
) -> _FixtureValue:
"""Execution of fixture setup."""
kwargs = {}
for argname in fixturedef.argnames:
fixdef = request._get_active_fixturedef(argname)
assert fixdef.cached_result is not None
result, arg_cache_key, exc = fixdef.cached_result
request._check_scope(argname, request.scope, fixdef.scope)
kwargs[argname] = result
fixturefunc = resolve_fixture_function(fixturedef, request)
my_cache_key = fixturedef.cache_key(request)
try:
result = call_fixture_func(fixturefunc, request, kwargs)
except TEST_OUTCOME:
exc_info = sys.exc_info()
assert exc_info[0] is not None
fixturedef.cached_result = (None, my_cache_key, exc_info)
raise
fixturedef.cached_result = (result, my_cache_key, None)
return result
def _ensure_immutable_ids(
ids: Optional[
Union[
Iterable[Union[None, str, float, int, bool]],
Callable[[Any], Optional[object]],
]
],
) -> Optional[
Union[
Tuple[Union[None, str, float, int, bool], ...],
Callable[[Any], Optional[object]],
]
]:
if ids is None:
return None
if callable(ids):
return ids
return tuple(ids)
def _params_converter(
params: Optional[Iterable[object]],
) -> Optional[Tuple[object, ...]]:
return tuple(params) if params is not None else None
def wrap_function_to_error_out_if_called_directly(function, fixture_marker):
"""Wrap the given fixture function so we can raise an error about it being called directly,
instead of used as an argument in a test function."""
message = (
'Fixture "{name}" called directly. Fixtures are not meant to be called directly,\n'
"but are created automatically when test functions request them as parameters.\n"
"See https://docs.pytest.org/en/stable/fixture.html for more information about fixtures, and\n"
"https://docs.pytest.org/en/stable/deprecations.html#calling-fixtures-directly about how to update your code."
).format(name=fixture_marker.name or function.__name__)
@functools.wraps(function)
def result(*args, **kwargs):
fail(message, pytrace=False)
# Keep reference to the original function in our own custom attribute so we don't unwrap
# further than this point and lose useful wrappings like @mock.patch (#3774).
result.__pytest_wrapped__ = _PytestWrapper(function) # type: ignore[attr-defined]
return result
@final
@attr.s(frozen=True)
class FixtureFunctionMarker:
scope = attr.ib(type="Union[_Scope, Callable[[str, Config], _Scope]]")
params = attr.ib(type=Optional[Tuple[object, ...]], converter=_params_converter)
autouse = attr.ib(type=bool, default=False)
ids = attr.ib(
type=Union[
Tuple[Union[None, str, float, int, bool], ...],
Callable[[Any], Optional[object]],
],
default=None,
converter=_ensure_immutable_ids,
)
name = attr.ib(type=Optional[str], default=None)
def __call__(self, function: _FixtureFunction) -> _FixtureFunction:
if inspect.isclass(function):
raise ValueError("class fixtures not supported (maybe in the future)")
if getattr(function, "_pytestfixturefunction", False):
raise ValueError(
"fixture is being applied more than once to the same function"
)
function = wrap_function_to_error_out_if_called_directly(function, self)
name = self.name or function.__name__
if name == "request":
location = getlocation(function)
fail(
"'request' is a reserved word for fixtures, use another name:\n {}".format(
location
),
pytrace=False,
)
# Type ignored because https://github.com/python/mypy/issues/2087.
function._pytestfixturefunction = self # type: ignore[attr-defined]
return function
@overload
def fixture(
fixture_function: _FixtureFunction,
*,
scope: "Union[_Scope, Callable[[str, Config], _Scope]]" = ...,
params: Optional[Iterable[object]] = ...,
autouse: bool = ...,
ids: Optional[
Union[
Iterable[Union[None, str, float, int, bool]],
Callable[[Any], Optional[object]],
]
] = ...,
name: Optional[str] = ...
) -> _FixtureFunction:
...
@overload # noqa: F811
def fixture( # noqa: F811
fixture_function: None = ...,
*,
scope: "Union[_Scope, Callable[[str, Config], _Scope]]" = ...,
params: Optional[Iterable[object]] = ...,
autouse: bool = ...,
ids: Optional[
Union[
Iterable[Union[None, str, float, int, bool]],
Callable[[Any], Optional[object]],
]
] = ...,
name: Optional[str] = None
) -> FixtureFunctionMarker:
...
def fixture( # noqa: F811
fixture_function: Optional[_FixtureFunction] = None,
*,
scope: "Union[_Scope, Callable[[str, Config], _Scope]]" = "function",
params: Optional[Iterable[object]] = None,
autouse: bool = False,
ids: Optional[
Union[
Iterable[Union[None, str, float, int, bool]],
Callable[[Any], Optional[object]],
]
] = None,
name: Optional[str] = None
) -> Union[FixtureFunctionMarker, _FixtureFunction]:
"""Decorator to mark a fixture factory function.
This decorator can be used, with or without parameters, to define a
fixture function.
The name of the fixture function can later be referenced to cause its
invocation ahead of running tests: test modules or classes can use the
``pytest.mark.usefixtures(fixturename)`` marker.
Test functions can directly use fixture names as input arguments in which
case the fixture instance returned from the fixture function will be
injected.
Fixtures can provide their values to test functions using ``return`` or
``yield`` statements. When using ``yield`` the code block after the
``yield`` statement is executed as teardown code regardless of the test
outcome, and must yield exactly once.
:param scope:
The scope for which this fixture is shared; one of ``"function"``
(default), ``"class"``, ``"module"``, ``"package"`` or ``"session"``.
This parameter may also be a callable which receives ``(fixture_name, config)``
as parameters, and must return a ``str`` with one of the values mentioned above.
See :ref:`dynamic scope` in the docs for more information.
:param params:
An optional list of parameters which will cause multiple invocations
of the fixture function and all of the tests using it. The current
parameter is available in ``request.param``.
:param autouse:
If True, the fixture func is activated for all tests that can see it.
If False (the default), an explicit reference is needed to activate
the fixture.
:param ids:
List of string ids each corresponding to the params so that they are
part of the test id. If no ids are provided they will be generated
automatically from the params.
:param name:
The name of the fixture. This defaults to the name of the decorated
function. If a fixture is used in the same module in which it is
defined, the function name of the fixture will be shadowed by the
function arg that requests the fixture; one way to resolve this is to
name the decorated function ``fixture_<fixturename>`` and then use
``@pytest.fixture(name='<fixturename>')``.
"""
fixture_marker = FixtureFunctionMarker(
scope=scope, params=params, autouse=autouse, ids=ids, name=name,
)
# Direct decoration.
if fixture_function:
return fixture_marker(fixture_function)
return fixture_marker
def yield_fixture(
fixture_function=None,
*args,
scope="function",
params=None,
autouse=False,
ids=None,
name=None
):
"""(Return a) decorator to mark a yield-fixture factory function.
.. deprecated:: 3.0
Use :py:func:`pytest.fixture` directly instead.
"""
return fixture(
fixture_function,
*args,
scope=scope,
params=params,
autouse=autouse,
ids=ids,
name=name,
)
@fixture(scope="session")
def pytestconfig(request: FixtureRequest) -> Config:
"""Session-scoped fixture that returns the :class:`_pytest.config.Config` object.
Example::
def test_foo(pytestconfig):
if pytestconfig.getoption("verbose") > 0:
...
"""
return request.config
def pytest_addoption(parser: Parser) -> None:
parser.addini(
"usefixtures",
type="args",
default=[],
help="list of default fixtures to be used with this project",
)
class FixtureManager:
"""pytest fixture definitions and information is stored and managed
from this class.
During collection fm.parsefactories() is called multiple times to parse
fixture function definitions into FixtureDef objects and internal
data structures.
During collection of test functions, metafunc-mechanics instantiate
a FuncFixtureInfo object which is cached per node/func-name.
This FuncFixtureInfo object is later retrieved by Function nodes
which themselves offer a fixturenames attribute.
The FuncFixtureInfo object holds information about fixtures and FixtureDefs
relevant for a particular function. An initial list of fixtures is
assembled like this:
- ini-defined usefixtures
- autouse-marked fixtures along the collection chain up from the function
- usefixtures markers at module/class/function level
- test function funcargs
Subsequently the funcfixtureinfo.fixturenames attribute is computed
as the closure of the fixtures needed to setup the initial fixtures,
i.e. fixtures needed by fixture functions themselves are appended
to the fixturenames list.
Upon the test-setup phases all fixturenames are instantiated, retrieved
by a lookup of their FuncFixtureInfo.
"""
FixtureLookupError = FixtureLookupError
FixtureLookupErrorRepr = FixtureLookupErrorRepr
def __init__(self, session: "Session") -> None:
self.session = session
self.config = session.config # type: Config
self._arg2fixturedefs = {} # type: Dict[str, List[FixtureDef[Any]]]
self._holderobjseen = set() # type: Set[object]
self._nodeid_and_autousenames = [
("", self.config.getini("usefixtures"))
] # type: List[Tuple[str, List[str]]]
session.config.pluginmanager.register(self, "funcmanage")
def _get_direct_parametrize_args(self, node: "nodes.Node") -> List[str]:
"""Return all direct parametrization arguments of a node, so we don't
mistake them for fixtures.
Check https://github.com/pytest-dev/pytest/issues/5036.
These things are done later as well when dealing with parametrization
so this could be improved.
"""
parametrize_argnames = [] # type: List[str]
for marker in node.iter_markers(name="parametrize"):
if not marker.kwargs.get("indirect", False):
p_argnames, _ = ParameterSet._parse_parametrize_args(
*marker.args, **marker.kwargs
)
parametrize_argnames.extend(p_argnames)
return parametrize_argnames
def getfixtureinfo(
self, node: "nodes.Node", func, cls, funcargs: bool = True
) -> FuncFixtureInfo:
if funcargs and not getattr(node, "nofuncargs", False):
argnames = getfuncargnames(func, name=node.name, cls=cls)
else:
argnames = ()
usefixtures = tuple(
arg for mark in node.iter_markers(name="usefixtures") for arg in mark.args
)
initialnames = usefixtures + argnames
fm = node.session._fixturemanager
initialnames, names_closure, arg2fixturedefs = fm.getfixtureclosure(
initialnames, node, ignore_args=self._get_direct_parametrize_args(node)
)
return FuncFixtureInfo(argnames, initialnames, names_closure, arg2fixturedefs)
def pytest_plugin_registered(self, plugin: _PluggyPlugin) -> None:
nodeid = None
try:
p = absolutepath(plugin.__file__) # type: ignore[attr-defined]
except AttributeError:
pass
else:
from _pytest import nodes
# Construct the base nodeid which is later used to check
# what fixtures are visible for particular tests (as denoted
# by their test id).
if p.name.startswith("conftest.py"):
try:
nodeid = str(p.parent.relative_to(self.config.rootpath))
except ValueError:
nodeid = ""
if nodeid == ".":
nodeid = ""
if os.sep != nodes.SEP:
nodeid = nodeid.replace(os.sep, nodes.SEP)
self.parsefactories(plugin, nodeid)
def _getautousenames(self, nodeid: str) -> List[str]:
"""Return a list of fixture names to be used."""
autousenames = [] # type: List[str]
for baseid, basenames in self._nodeid_and_autousenames:
if nodeid.startswith(baseid):
if baseid:
i = len(baseid)
nextchar = nodeid[i : i + 1]
if nextchar and nextchar not in ":/":
continue
autousenames.extend(basenames)
return autousenames
def getfixtureclosure(
self, fixturenames: Tuple[str, ...], parentnode, ignore_args: Sequence[str] = ()
) -> Tuple[Tuple[str, ...], List[str], Dict[str, Sequence[FixtureDef[Any]]]]:
# Collect the closure of all fixtures, starting with the given
# fixturenames as the initial set. As we have to visit all
# factory definitions anyway, we also return an arg2fixturedefs
# mapping so that the caller can reuse it and does not have
# to re-discover fixturedefs again for each fixturename
# (discovering matching fixtures for a given name/node is expensive).
parentid = parentnode.nodeid
fixturenames_closure = self._getautousenames(parentid)
def merge(otherlist: Iterable[str]) -> None:
for arg in otherlist:
if arg not in fixturenames_closure:
fixturenames_closure.append(arg)
merge(fixturenames)
# At this point, fixturenames_closure contains what we call "initialnames",
# which is a set of fixturenames the function immediately requests. We
# need to return it as well, so save this.
initialnames = tuple(fixturenames_closure)
arg2fixturedefs = {} # type: Dict[str, Sequence[FixtureDef[Any]]]
lastlen = -1
while lastlen != len(fixturenames_closure):
lastlen = len(fixturenames_closure)
for argname in fixturenames_closure:
if argname in ignore_args:
continue
if argname in arg2fixturedefs:
continue
fixturedefs = self.getfixturedefs(argname, parentid)
if fixturedefs:
arg2fixturedefs[argname] = fixturedefs
merge(fixturedefs[-1].argnames)
def sort_by_scope(arg_name: str) -> int:
try:
fixturedefs = arg2fixturedefs[arg_name]
except KeyError:
return scopes.index("function")
else:
return fixturedefs[-1].scopenum
fixturenames_closure.sort(key=sort_by_scope)
return initialnames, fixturenames_closure, arg2fixturedefs
def pytest_generate_tests(self, metafunc: "Metafunc") -> None:
"""Generate new tests based on parametrized fixtures used by the given metafunc"""
def get_parametrize_mark_argnames(mark: Mark) -> Sequence[str]:
args, _ = ParameterSet._parse_parametrize_args(*mark.args, **mark.kwargs)
return args
for argname in metafunc.fixturenames:
# Get the FixtureDefs for the argname.
fixture_defs = metafunc._arg2fixturedefs.get(argname)
if not fixture_defs:
# Will raise FixtureLookupError at setup time if not parametrized somewhere
# else (e.g @pytest.mark.parametrize)
continue
# If the test itself parametrizes using this argname, give it
# precedence.
if any(
argname in get_parametrize_mark_argnames(mark)
for mark in metafunc.definition.iter_markers("parametrize")
):
continue
# In the common case we only look at the fixture def with the
# closest scope (last in the list). But if the fixture overrides
# another fixture, while requesting the super fixture, keep going
# in case the super fixture is parametrized (#1953).
for fixturedef in reversed(fixture_defs):
# Fixture is parametrized, apply it and stop.
if fixturedef.params is not None:
metafunc.parametrize(
argname,
fixturedef.params,
indirect=True,
scope=fixturedef.scope,
ids=fixturedef.ids,
)
break
# Not requesting the overridden super fixture, stop.
if argname not in fixturedef.argnames:
break
# Try next super fixture, if any.
def pytest_collection_modifyitems(self, items: "List[nodes.Item]") -> None:
# Separate parametrized setups.
items[:] = reorder_items(items)
def parsefactories(
self, node_or_obj, nodeid=NOTSET, unittest: bool = False
) -> None:
if nodeid is not NOTSET:
holderobj = node_or_obj
else:
holderobj = node_or_obj.obj
nodeid = node_or_obj.nodeid
if holderobj in self._holderobjseen:
return
self._holderobjseen.add(holderobj)
autousenames = []
for name in dir(holderobj):
# The attribute can be an arbitrary descriptor, so the attribute
# access below can raise. safe_getatt() ignores such exceptions.
obj = safe_getattr(holderobj, name, None)
marker = getfixturemarker(obj)
if not isinstance(marker, FixtureFunctionMarker):
# Magic globals with __getattr__ might have got us a wrong
# fixture attribute.
continue
if marker.name:
name = marker.name
# During fixture definition we wrap the original fixture function
# to issue a warning if called directly, so here we unwrap it in
# order to not emit the warning when pytest itself calls the
# fixture function.
obj = get_real_method(obj, holderobj)
fixture_def = FixtureDef(
fixturemanager=self,
baseid=nodeid,
argname=name,
func=obj,
scope=marker.scope,
params=marker.params,
unittest=unittest,
ids=marker.ids,
)
faclist = self._arg2fixturedefs.setdefault(name, [])
if fixture_def.has_location:
faclist.append(fixture_def)
else:
# fixturedefs with no location are at the front
# so this inserts the current fixturedef after the
# existing fixturedefs from external plugins but
# before the fixturedefs provided in conftests.
i = len([f for f in faclist if not f.has_location])
faclist.insert(i, fixture_def)
if marker.autouse:
autousenames.append(name)
if autousenames:
self._nodeid_and_autousenames.append((nodeid or "", autousenames))
def getfixturedefs(
self, argname: str, nodeid: str
) -> Optional[Sequence[FixtureDef[Any]]]:
"""Get a list of fixtures which are applicable to the given node id.
:param str argname: Name of the fixture to search for.
:param str nodeid: Full node id of the requesting test.
:rtype: Sequence[FixtureDef]
"""
try:
fixturedefs = self._arg2fixturedefs[argname]
except KeyError:
return None
return tuple(self._matchfactories(fixturedefs, nodeid))
def _matchfactories(
self, fixturedefs: Iterable[FixtureDef[Any]], nodeid: str
) -> Iterator[FixtureDef[Any]]:
from _pytest import nodes
for fixturedef in fixturedefs:
if nodes.ischildnode(fixturedef.baseid, nodeid):
yield fixturedef
|
TeamSPoon/logicmoo_workspace
|
packs_web/butterfly/lib/python3.7/site-packages/_pytest/fixtures.py
|
Python
|
mit
| 65,079
|
[
"VisIt"
] |
4c82944a123b47724f1c691a25cfa7dba0b2d1ba1f8630c8e260de33be28820f
|
import numpy as np
import scipy as scipy
import lxmls.classifiers.linear_classifier as lc
import sys
from lxmls.distributions.gaussian import *
class MultinomialNaiveBayes(lc.LinearClassifier):
def __init__(self, xtype="gaussian"):
lc.LinearClassifier.__init__(self)
self.trained = False
self.likelihood = 0
self.prior = 0
self.smooth = True
self.smooth_param = 1
def train(self, x, y):
# n_docs = no. of documents
# n_words = no. of unique words
n_docs, n_words = x.shape
# classes = a list of possible classes
classes = np.unique(y)
# n_classes = no. of classes
n_classes = np.unique(y).shape[0]
# initialization of the prior and likelihood variables
prior = np.zeros(n_classes)
likelihood = np.zeros((n_words, n_classes))
# TODO: This is where you have to write your code!
# You need to compute the values of the prior and likelihood parameters
# and place them in the variables called "prior" and "likelihood".
# Examples:
# prior[0] is the prior probability of a document being of class 0
# likelihood[4, 0] is the likelihood of the fifth(*) feature being
# active, given that the document is of class 0
# (*) recall that Python starts indices at 0, so an index of 4
# corresponds to the fifth feature!
# ----------
# Solution to Exercise 1.1
for i in xrange(n_classes):
docs_in_class, _ = np.nonzero(y == classes[i]) # docs_in_class = indices of documents in class i
prior[i] = 1.0 * len(docs_in_class) / n_docs # prior = fraction of documents with this class
# word_count_in_class = count of word occurrences in documents of class i
word_count_in_class = x[docs_in_class, :].sum(0)
total_words_in_class = word_count_in_class.sum() # total_words_in_class = total number of words in documents of class i
if not self.smooth:
# likelihood = count of occurrences of a word in a class
likelihood[:, i] = word_count_in_class / total_words_in_class
else:
likelihood[:, i] = (word_count_in_class+self.smooth_param) / (total_words_in_class + self.smooth_param*n_words)
# End solution to Exercise 1.1
# ----------
params = np.zeros((n_words+1, n_classes))
for i in xrange(n_classes):
params[0, i] = np.log(prior[i])
params[1:, i] = np.nan_to_num(np.log(likelihood[:, i]))
self.likelihood = likelihood
self.prior = prior
self.trained = True
return params
|
jnobre/lxmls-toolkit-2017
|
lxmls/classifiers/multinomial_naive_bayes.py
|
Python
|
mit
| 2,715
|
[
"Gaussian"
] |
1d46d04c491c7d99c25c34838d166cb27b85b775c5e903117d96654560130c52
|
# Auxiliary functions for analysis of
# ll_4320 mitgcm simulation
# crocha, sio summer 2014
import numpy as np
def rmean(A):
""" Removes time-mean of llc_4320 3d fields; axis=2 is time"""
ix,jx,kx = A.shape
Am = np.repeat(A.mean(axis=2),kx)
Am = Am.reshape(ix,jx,kx)
return A-Am
def spec_est_meridional(U,dx):
""" Computes 1d (meridional) spectral estimates of 3d llc_4320 fields"""
ix,jx,kx = U.shape
N = ix # record length
df = 1./(N*dx) # frequency resolution [cycles / (unit time)]
fNy = 1./(2*dx) # Nyquist frequency
an = np.fft.fft(U,axis=0)
an = an[1:N/2-1,:,:]
E = 2*(an*an.conj())/df/(N**2) # spectral estimate
f = np.arange(1,N/2-1)*df
return E.mean(axis=2),f,df,fNy
def spec_est_zonal(U,dx):
""" Computes 1d (zonal) spectral estimates of 3d llc_4320 fields"""
ix,jx,kx = U.shape
N = jx # record length
df = 1./(N*dx) # frequency resolution [cycles / (unit time)]
fNy = 1./(2*dx) # Nyquist frequency
an = np.fft.fft(U,axis=1)
an = an[:,1:N/2-1,:]
E = 2*(an*an.conj())/df/(N**2) # spectral estimate
f = np.arange(1,N/2-1)*df
return E.mean(axis=2),f,df,fNy
def spec_est_time(U,dt):
""" Computes spectral estimate in time (axis=2) """
ix,jx,kx = U.shape
N = kx # record length
df = 1./(N*dt) # frequency resolution [cycles / (unit time)]
fNy = 1./(2*dt) # Nyquist frequency
an = np.fft.fft(U,axis=2)
an = an[:,:,1:N/2-1]
E = 2*(an*an.conj())/df/(N**2) # spectral estimate
f = np.arange(1,N/2-1)*df
return E.mean(axis=0),f,df,fNy
def spec_error(E,sn,ci):
""" Computes confidence interval for spectral
estimate E.
sn is the number of spectral realizations (dof/2)
ci = .95 for 95 % confidence interval
returns lower (El) and upper (Eu) bounds on E
as well as pdf and cdf used to estimate errors """
## params
dbin = .001
yN = np.arange(0,5.+dbin,dbin)
dof = 2.*sn # DOF = 2 x # of spectral estimates
## PDF for E/E0, where E (E0) is the estimate (true)
## process spectrum (basically a chi^2 distribution)
C = dof / ( (2**sn) * np.math.gamma(sn) ) # constant
pdf_yN = C * ( (dof*yN)**(sn-1) ) * np.exp( -(sn*yN) ) # chi^2(E/E0)
## CDF
cdf_yN = np.cumsum(pdf_yN*dbin) # trapezoidal-like integration
## compute confidence limits
# lower
el = ci
fl = np.where( np.abs(cdf_yN - el) == np.abs(cdf_yN - el).min())
El = E/yN[fl]
# upper
eu = 1 - ci
fu = np.where( np.abs(cdf_yN - eu) == np.abs(cdf_yN - eu).min())
Eu = E/yN[fu]
return El, Eu, cdf_yN, pdf_yN
# if sn larger than 150, assume it is normally-distributed (e.g., Bendat and Piersol)
def spec_error2(E,sn):
std_E = (1/np.sqrt(sn))
El = E/(1 + 2*std_E)
Eu = E/(1 - 2*std_E)
return El, Eu, std_E
def spectral_slope(k,E,kmin,kmax,stdE):
''' compute spectral slope in log space in
a wavenumber subrange [kmin,kmax],
m: spectral slope; mm: uncertainty'''
fr = np.where((k>=kmin)&(k<=kmax))
ki = np.matrix((np.log10(k[fr]))).T
Ei = np.matrix(np.log10(np.real(E[fr]))).T
dd = np.matrix(np.eye(ki.size)*((np.abs(np.log10(stdE)))**2))
G = np.matrix(np.append(np.ones((ki.size,1)),ki,axis=1))
Gg = ((G.T*G).I)*G.T
m = Gg*Ei
mm = np.sqrt(np.array(Gg*dd*Gg.T)[1,1])
yfit = np.array(G*m)
m = np.array(m)[1]
return m, mm
def leg_width(lg,fs):
"""" Sets the linewidth of each legend object """
for legobj in lg.legendHandles:
legobj.set_linewidth(fs)
def auto_corr(x):
""" Computes auto-correlation of 1d array """
a = np.correlate(x,x,mode='full')
a = a[x.size-1:]
a = a/a[0]
return a
def fit_gauss(x,y):
""" Estimate characteristic scale of a auto-correlation function
by fitting a Gaussian to auto_corr"""
y = np.matrix(np.log(y)).T
A1 = np .matrix(np.ones((x.size,1)))
A2 = np.matrix(-(x**2)).T
A = A2
xmax = 650
we = np.float(xmax) - x
we = (we/(xmax))**2
We = np.matrix(np.diag(we))
Gg = ((A.T*We*A).I)*A.T
c = Gg*y
Lfit = np.sqrt(1/c[-1])
yfit = np.exp( A*c )
return Lfit
def block_ave(dist,U,dx):
""" Block-averages the array u onto grid with resolution dx """
ix,jx,kx = U.shape
disti = np.arange(dx/2.,dist[-1]+dx/2.,dx)
Ui = np.zeros((disti.size,jx,kx))
for i in range(0,disti.size):
fn = ((dist >= disti[i]-dx/2.) & (dist <= disti[i]+dx/2.))
fns = np.sum(fn)
if fns>0:
Ui[i,:,:] = np.nansum(U[fn,:,:],axis=0)/fns
else:
Ui[i,:,:] = np.nan
return Ui
|
crocha700/dp_spectra
|
synthetic/aux_func_3dfields.py
|
Python
|
mit
| 4,729
|
[
"Gaussian"
] |
f5c01b1a1da079b35005311ac4fbeadd842492b21f6cbdf86e1b71bcb04eaf3d
|
'''
Created on Jul 15, 2011
@author: sean
'''
from __future__ import print_function
import _ast
from ...asttools import Visitor
from string import Formatter
import sys
from ...utils import py3op, py2op
if sys.version_info.major < 3:
from StringIO import StringIO
else:
from io import StringIO
class ASTFormatter(Formatter):
def format_field(self, value, format_spec):
if format_spec == 'node':
gen = ExprSourceGen()
gen.visit(value)
return gen.dumps()
elif value == '':
return value
else:
return super(ASTFormatter, self).format_field(value, format_spec)
def get_value(self, key, args, kwargs):
if key == '':
return args[0]
elif key in kwargs:
return kwargs[key]
elif isinstance(key, int):
return args[key]
key = int(key)
return args[key]
raise Exception
def str_node(node):
gen = ExprSourceGen()
gen.visit(node)
return gen.dumps()
def simple_string(value):
def visitNode(self, node):
self.print(value, **node.__dict__)
return visitNode
class ExprSourceGen(Visitor):
def __init__(self):
self.out = StringIO()
self.formatter = ASTFormatter()
self.indent = ' '
self.level = 0
@property
def indenter(self):
return Indenter(self)
@property
def no_indent(self):
return NoIndent(self)
def dump(self, file=sys.stdout):
self.out.seek(0)
print(self.out.read(), file=file)
def dumps(self):
self.out.seek(0)
value = self.out.read()
return value
def print(self, line, *args, **kwargs):
line = self.formatter.format(line, *args, **kwargs)
level = kwargs.get('level')
prx = self.indent * (level if level else self.level)
print(prx, line, sep='', end='', file=self.out)
def print_lines(self, lines,):
prx = self.indent * self.level
for line in lines:
print(prx, line, sep='', file=self.out)
def visitName(self, node):
self.print(node.id)
@py2op
def visitarguments(self, node):
# ('args', 'vararg', 'kwarg', 'defaults')
defaults = [None] * (len(node.args) - len(node.defaults))
defaults.extend(node.defaults)
i = 0
args = list(node.args)
if args:
i += 1
arg = args.pop(0)
default = defaults.pop(0)
self.visit(arg)
if default is not None:
self.print('={:node}', default)
while args:
arg = args.pop(0)
default = defaults.pop(0)
self.print(', ')
self.visit(arg)
if default is not None:
self.print('={:node}', default)
if node.vararg:
self.print('{0}*{1}', ', ' if i else '', node.vararg)
if node.kwarg:
self.print('{0}**{1}', ', ' if i else '', node.kwarg)
@visitarguments.py3op
def visitarguments(self, node):
# ('args', 'vararg', 'kwarg', 'defaults')
defaults = [None] * (len(node.args) - len(node.defaults))
defaults.extend(node.defaults)
i = 0
args = list(node.args)
if args:
i += 1
arg = args.pop(0)
default = defaults.pop(0)
self.visit(arg)
if default is not None:
self.print('={:node}', default)
while args:
arg = args.pop(0)
default = defaults.pop(0)
self.print(', ')
self.visit(arg)
if default is not None:
self.print('={:node}', default)
if node.vararg:
self.print('{0}*{1}', ', ' if i else '', node.vararg)
if node.varargannotation:
self.print(':{:node}', node.varargannotation)
elif node.kwonlyargs:
self.print('{0}*', ', ' if i else '')
kwonlyargs = list(node.kwonlyargs)
if kwonlyargs:
i += 1
kw_defaults = [None] * (len(kwonlyargs) - len(node.kw_defaults))
kw_defaults.extend(node.kw_defaults)
while kwonlyargs:
kw_arg = kwonlyargs.pop(0)
kw_default = kw_defaults.pop(0)
self.print(', ')
self.visit(kw_arg)
if kw_default is not None:
self.print('={:node}', kw_default)
if node.kwarg:
self.print('{0}**{1}', ', ' if i else '', node.kwarg)
if node.varargannotation:
self.print(':{:node}', node.kwargannotation)
def visitNum(self, node):
self.print(repr(node.n))
def visitBinOp(self, node):
self.print('({left:node} {op:node} {right:node})', left=node.left, op=node.op, right=node.right)
def visitAdd(self, node):
self.print('+')
def visitalias(self, node):
if node.asname is None:
self.print("{0}", node.name)
else:
self.print("{0} as {1}", node.name, node.asname)
def visitCall(self, node):
self.print('{func:node}(' , func=node.func)
i = 0
print_comma = lambda i: self.print(", ") if i > 0 else None
with self.no_indent:
for arg in node.args:
print_comma(i)
self.print('{:node}', arg)
i += 1
for kw in node.keywords:
print_comma(i)
self.print('{:node}', kw)
i += 1
if node.starargs:
print_comma(i)
self.print('*{:node}', node.starargs)
i += 1
if node.kwargs:
print_comma(i)
self.print('**{:node}', node.kwargs)
i += 1
self.print(')')
def visitkeyword(self, node):
self.print("{0}={1:node}", node.arg, node.value)
def visitStr(self, node):
self.print(repr(node.s))
def visitMod(self, node):
self.print('%')
def visitTuple(self, node, brace='()'):
self.print(brace[0])
print_comma = lambda i: self.print(", ") if i > 0 else None
i = 0
with self.no_indent:
for elt in node.elts:
print_comma(i)
self.print('{:node}', elt)
i += 1
if len(node.elts) == 1:
self.print(',')
self.print(brace[1])
def visitCompare(self, node):
self.print('({0:node} ', node.left)
with self.no_indent:
for (op, right) in zip(node.ops, node.comparators):
self.print('{0:node} {1:node}' , op, right)
self.print(')')
@py2op
def visitRaise(self, node):
self.print('raise ')
with self.no_indent:
if node.type:
self.print('{:node}' , node.type)
if node.inst:
self.print(', {:node}' , node.inst)
if node.tback:
self.print(', {:node}' , node.tback)
@visitRaise.py3op
def visitRaise(self, node):
self.print('raise ')
with self.no_indent:
if node.exc:
self.print('{:node}' , node.exc)
if node.cause:
self.print(' from {:node}' , node.cause)
def visitAttribute(self, node):
self.print('{:node}.{attr}', node.value, attr=node.attr)
def visitDict(self, node):
self.print('{{')
items = zip(node.keys, node.values)
with self.no_indent:
i = 0
pc = lambda : self.print(", ") if i > 0 else None
for key, value in items:
pc()
self.print('{0:node}:{1:node}', key, value)
i += 1
self.print('}}')
def visitSet(self, node):
self.print('{{')
items = node.elts
with self.no_indent:
i = 0
pc = lambda : self.print(", ") if i > 0 else None
for value in items:
pc()
self.print('{0:node}', value)
i += 1
self.print('}}')
def visitList(self, node):
self.print('[')
with self.no_indent:
i = 0
pc = lambda : self.print(", ") if i > 0 else None
for item in node.elts:
pc()
self.print('{:node}', item)
i += 1
self.print(']')
def visitSubscript(self, node):
self.print('{0:node}[{1:node}]', node.value, node.slice)
def visitIndex(self, node):
if isinstance(node.value, _ast.Tuple):
with self.no_indent:
self.visit(node.value, brace=['', ''])
else:
self.print('{:node}', node.value)
def visitSlice(self, node):
with self.no_indent:
if node.lower is not None:
self.print('{:node}', node.lower)
self.print(':')
if node.upper is not None:
self.print('{:node}', node.upper)
if node.step is not None:
self.print(':')
self.print('{:node}', node.step)
def visitExtSlice(self, node):
dims = list(node.dims)
with self.no_indent:
dim = dims.pop(0)
self.print('{0:node}', dim)
while dims:
dim = dims.pop(0)
self.print(', {0:node}', dim)
def visitUnaryOp(self, node):
self.print('({0:node}{1:node})', node.op, node.operand)
def visitAssert(self, node):
self.print('assert {0:node}', node.test)
if node.msg:
with self.no_indent:
self.print(', {0:node}', node.msg)
visitUSub = simple_string('-')
visitUAdd = simple_string('+')
visitNot = simple_string('not ')
visitInvert = simple_string('~')
visitAnd = simple_string('and')
visitOr = simple_string('or')
visitSub = simple_string('-')
visitFloorDiv = simple_string('//')
visitDiv = simple_string('/')
visitMod = simple_string('%')
visitMult = simple_string('*')
visitPow = simple_string('**')
visitEq = simple_string('==')
visitNotEq = simple_string('!=')
visitLt = simple_string('<')
visitGt = simple_string('>')
visitLtE = simple_string('<=')
visitGtE = simple_string('>=')
visitLShift = simple_string('<<')
visitRShift = simple_string('>>')
visitIn = simple_string('in')
visitNotIn = simple_string('not in')
visitIs = simple_string('is')
visitIsNot = simple_string('is not')
visitBitAnd = simple_string('&')
visitBitOr = simple_string('|')
visitBitXor = simple_string('^')
visitEllipsis = simple_string('...')
visitYield = simple_string('yield {value:node}')
def visitBoolOp(self, node):
with self.no_indent:
values = list(node.values)
left = values.pop(0)
self.print('({:node} ', left)
while values:
left = values.pop(0)
self.print('{0:node} {1:node})', node.op, left)
def visitIfExp(self, node):
self.print('{body:node} if {test:node} else {orelse:node}', **node.__dict__)
def visitLambda(self, node):
self.print('lambda {0:node}: {1:node}', node.args, node.body)
def visitListComp(self, node):
self.print('[{0:node}', node.elt)
generators = list(node.generators)
with self.no_indent:
while generators:
generator = generators.pop(0)
self.print('{0:node}', generator)
self.print(']')
def visitSetComp(self, node):
self.print('{{{0:node}', node.elt)
generators = list(node.generators)
with self.no_indent:
while generators:
generator = generators.pop(0)
self.print('{0:node}', generator)
self.print('}}')
def visitDictComp(self, node):
self.print('{{{0:node}:{1:node}', node.key, node.value)
generators = list(node.generators)
with self.no_indent:
while generators:
generator = generators.pop(0)
self.print('{0:node}', generator)
self.print('}}')
def visitcomprehension(self, node):
self.print(' for {0:node} in {1:node}', node.target, node.iter)
ifs = list(node.ifs)
while ifs:
if_ = ifs.pop(0)
self.print(" if {0:node}", if_)
@py3op
def visitarg(self, node):
self.print(node.arg)
if node.annotation:
with self.no_indent:
self.print(':{0:node}', node.annotation)
def visit_expr(node):
gen = ExprSourceGen()
gen.visit(node)
return gen.dumps()
class NoIndent(object):
def __init__(self, gen):
self.gen = gen
def __enter__(self):
self.level = self.gen.level
self.gen.level = 0
def __exit__(self, *args):
self.gen.level = self.level
class Indenter(object):
def __init__(self, gen):
self.gen = gen
def __enter__(self):
self.gen.print('\n', level=0)
self.gen.level += 1
def __exit__(self, *args):
self.gen.level -= 1
class SourceGen(ExprSourceGen):
def __init__(self, header=''):
super(SourceGen, self).__init__()
print(header, file=self.out)
def visitModule(self, node):
children = list(self.children(node))
if children and isinstance(children[0], _ast.Expr):
if isinstance(children[0].value, _ast.Str):
doc = children.pop(0).value
self.print("'''")
self.print_lines(doc.s.split('\n'))
self.print_lines(["'''", '\n', '\n'])
for node in children:
self.visit(node)
def visitFor(self, node):
self.print('for {0:node} in {1:node}:', node.target, node.iter)
with self.indenter:
for stmnt in node.body:
self.visit(stmnt)
if node.orelse:
self.print('else:')
with self.indenter:
for stmnt in node.orelse:
self.visit(stmnt)
@py2op
def visitFunctionDef(self, node):
#fields = ('name', 'args', 'body', 'decorator_list')
for decorator in node.decorator_list:
self.print('@{decorator:node}\n', decorator=decorator)
args = visit_expr(node.args)
self.print('def {name}({args}):' , name=node.name, args=args)
with self.indenter:
for child in node.body:
self.visit(child)
return
@visitFunctionDef.py3op
def visitFunctionDef(self, node):
for decorator in node.decorator_list:
self.print('@{decorator:node}\n', decorator=decorator)
args = visit_expr(node.args)
self.print('def {name}({args})' , name=node.name, args=args)
with self.no_indent:
if node.returns:
self.print(' -> {:node}:', node.returns)
else:
self.print(':', node.returns)
with self.indenter:
for child in node.body:
self.visit(child)
return
def visitAssign(self, node):
targets = [visit_expr(target) for target in node.targets]
self.print('{targets} = {value:node}\n', targets=' = '.join(targets), value=node.value)
def visitAugAssign(self, node):
self.print('{target:node} {op:node}= {value:node}\n', **node.__dict__)
def visitIf(self, node, indent_first=True):
self.print('if {:node}:', node.test, level=self.level if indent_first else 0)
with self.indenter:
if node.body:
for expr in node.body:
self.visit(expr)
else:
self.print('pass')
if node.orelse and len(node.orelse) == 1 and isinstance(node.orelse[0], _ast.If):
self.print('el'); self.visit(node.orelse[0], indent_first=False)
elif node.orelse:
self.print('else:')
with self.indenter:
for expr in node.orelse:
self.visit(expr)
self.print('\n')
def visitImportFrom(self, node):
for name in node.names:
self.print("from {0} import {1:node}\n", node.module, name)
def visitImport(self, node):
for name in node.names:
self.print("import {:node}\n", name)
def visitPrint(self, node):
self.print("print ")
with self.no_indent:
if node.dest:
self.print(">> {:node}" , node.dest)
if not node.values and node.nl:
self.print("\n")
return
self.print(", ")
i = 0
pc = lambda : self.print(", ") if i > 0 else None
for value in node.values:
pc()
self.print("{:node}" , value)
if not node.nl:
self.print(",")
self.print("\n")
def visitExec(self, node):
self.print('exec {0:node} in {1}, {2}\n', node.body,
'None' if node.globals is None else str_node(node.globals),
'None' if node.locals is None else str_node(node.locals))
def visitWith(self, node):
self.print('with {0:node}', node.context_expr)
if node.optional_vars is not None:
self.print(' as {0:node}', node.optional_vars, level=0)
self.print(':', level=0)
with self.indenter:
if node.body:
for expr in node.body:
self.visit(expr)
else:
self.print('pass\n')
def visitGlobal(self, node):
self.print('global ')
with self.no_indent:
names = list(node.names)
if names:
name = names.pop(0)
self.print(name)
while names:
name = names.pop(0)
self.print(', {0}', name)
self.print('\n')
def visitDelete(self, node):
self.print('del ')
targets = list(node.targets)
with self.no_indent:
target = targets.pop(0)
self.print('{0:node}', target)
while targets:
target = targets.pop(0)
self.print(', {0:node}', target)
self.print('\n')
def visitWhile(self, node):
self.print('while {0:node}:', node.test)
with self.indenter:
if node.body:
for expr in node.body:
self.visit(expr)
else:
self.print("pass")
if node.orelse:
self.print('else:')
with self.indenter:
for expr in node.orelse:
self.visit(expr)
self.print('\n')
self.print('\n')
def visitExpr(self, node):
self.print('{:node}\n', node.value)
visitBreak = simple_string('break\n')
visitPass = simple_string('pass\n')
visitContinue = simple_string('continue\n')
def visitReturn(self, node):
if node.value is not None:
self.print('return {:node}\n', node.value)
def visitTryExcept(self, node):
self.print('try:')
with self.indenter:
if node.body:
for stmnt in node.body:
self.visit(stmnt)
else:
self.print('pass')
for hndlr in node.handlers:
self.visit(hndlr)
if node.orelse:
self.print('else:')
with self.indenter:
for stmnt in node.orelse:
self.visit(stmnt)
@py2op
def visitExceptHandler(self, node):
self.print('except')
with self.no_indent:
if node.type:
self.print(" {0:node}", node.type)
if node.name:
self.print(" as {0:node}", node.name)
self.print(":")
with self.indenter:
if node.body:
for stmnt in node.body:
self.visit(stmnt)
else:
self.print('pass')
@visitExceptHandler.py3op
def visitExceptHandler(self, node):
self.print('except')
with self.no_indent:
if node.type:
self.print(" {0:node}", node.type)
if node.name:
self.print(" as {0}", node.name)
self.print(":")
with self.indenter:
for stmnt in node.body:
self.visit(stmnt)
def visitTryFinally(self, node):
for item in node.body:
self.visit(item)
self.print('finally:')
with self.indenter:
for item in node.finalbody:
self.visit(item)
@py2op
def visitClassDef(self, node):
for decorator in node.decorator_list:
self.print('@{0:node}\n', decorator)
self.print('class {0}', node.name)
with self.no_indent:
self.print('(')
bases = list(node.bases)
if bases:
base = bases.pop(0)
self.print("{0:node}", base)
while bases:
base = bases.pop(0)
self.print(", {0:node}", base)
self.print(')')
self.print(":")
with self.indenter:
if node.body:
for stmnt in node.body:
self.visit(stmnt)
else:
self.print("pass\n\n")
@visitClassDef.py3op
def visitClassDef(self, node):
for decorator in node.decorator_list:
self.print('@{0:node}\n', decorator)
self.print('class {0}', node.name)
with self.no_indent:
self.print('(')
bases = list(node.bases)
i = 0
if bases:
i += 1
base = bases.pop(0)
self.print("{0:node}", base)
while bases:
base = bases.pop(0)
self.print(", {0:node}", base)
keywords = list(node.keywords)
if keywords:
if i: self.print(', ')
i += 1
keyword = keywords.pop(0)
self.print("{0:node}", keyword)
while keywords:
base = keywords.pop(0)
self.print(", {0:node}", keyword)
if node.starargs:
if i: self.print(', ')
i += 1
self.print("*{0:node}", node.starargs)
if node.kwargs:
if i: self.print(', ')
i += 1
self.print("*{0:node}", node.kwargs)
self.print(')')
self.print(":")
with self.indenter:
if node.body:
for stmnt in node.body:
self.visit(stmnt)
else:
self.print("pass\n\n")
def python_source(ast, file=sys.stdout):
'''
Generate executable python source code from an ast node.
:param ast: ast node
:param file: file to write output to.
'''
gen = SourceGen()
gen.visit(ast)
gen.dump(file)
def dump_python_source(ast):
'''
:return: a string containing executable python source code from an ast node.
:param ast: ast node
:param file: file to write output to.
'''
gen = SourceGen()
gen.visit(ast)
return gen.dumps()
|
jasonyaw/SFrame
|
oss_src/unity/python/sframe/meta/asttools/visitors/pysourcegen.py
|
Python
|
bsd-3-clause
| 23,969
|
[
"VisIt"
] |
a007dc8b9e731fb49142e434f66a0a89ef9eae12b1ed65f3dcdfa7768977c9c5
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""This module provides functions and classes related to Task objects."""
from __future__ import division, print_function, unicode_literals, absolute_import
import os
import time
import datetime
import shutil
import collections
import abc
import copy
import yaml
import six
import numpy as np
from pprint import pprint
from itertools import product
from six.moves import map, zip, StringIO
from monty.dev import deprecated
from monty.string import is_string, list_strings
from monty.termcolor import colored, cprint
from monty.collections import AttrDict
from monty.functools import lazy_property, return_none_if_raise
from monty.json import MSONable
from monty.fnmatch import WildCard
from pymatgen.core.units import Memory
from pymatgen.serializers.json_coders import json_pretty_dump, pmg_serialize
from .utils import File, Directory, irdvars_for_ext, abi_splitext, FilepathFixer, Condition, SparseHistogram
from .qadapters import make_qadapter, QueueAdapter, QueueAdapterError
from . import qutils as qu
from .db import DBConnector
from .nodes import Status, Node, NodeError, NodeResults, NodeCorrections, FileNode, check_spectator
from . import abiinspect
from . import events
__author__ = "Matteo Giantomassi"
__copyright__ = "Copyright 2013, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Matteo Giantomassi"
__all__ = [
"TaskManager",
"AbinitBuild",
"ParalHintsParser",
"ParalHints",
"AbinitTask",
"ScfTask",
"NscfTask",
"RelaxTask",
"DdkTask",
"PhononTask",
"SigmaTask",
"OpticTask",
"AnaddbTask",
]
import logging
logger = logging.getLogger(__name__)
# Tools and helper functions.
def straceback():
"""Returns a string with the traceback."""
import traceback
return traceback.format_exc()
def lennone(PropperOrNone):
if PropperOrNone is None:
return 0
else:
return len(PropperOrNone)
def nmltostring(nml):
"""Convert a dictionary representing a Fortran namelist into a string."""
if not isinstance(nml,dict):
raise ValueError("nml should be a dict !")
curstr = ""
for key,group in nml.items():
namelist = ["&" + key]
for k, v in group.items():
if isinstance(v, list) or isinstance(v, tuple):
namelist.append(k + " = " + ",".join(map(str, v)) + ",")
elif is_string(v):
namelist.append(k + " = '" + str(v) + "',")
else:
namelist.append(k + " = " + str(v) + ",")
namelist.append("/")
curstr = curstr + "\n".join(namelist) + "\n"
return curstr
class TaskResults(NodeResults):
JSON_SCHEMA = NodeResults.JSON_SCHEMA.copy()
JSON_SCHEMA["properties"] = {
"executable": {"type": "string", "required": True},
}
@classmethod
def from_node(cls, task):
"""Initialize an instance from an :class:`AbinitTask` instance."""
new = super(TaskResults, cls).from_node(task)
new.update(
executable=task.executable,
#executable_version:
#task_events=
pseudos=[p.as_dict() for p in task.input.pseudos],
#input=task.input
)
new.register_gridfs_files(
run_abi=(task.input_file.path, "t"),
run_abo=(task.output_file.path, "t"),
)
return new
class ParalConf(AttrDict):
"""
This object store the parameters associated to one
of the possible parallel configurations reported by ABINIT.
Essentially it is a dictionary whose values can also be accessed
as attributes. It also provides default values for selected keys
that might not be present in the ABINIT dictionary.
Example:
--- !Autoparal
info:
version: 1
autoparal: 1
max_ncpus: 108
configurations:
- tot_ncpus: 2 # Total number of CPUs
mpi_ncpus: 2 # Number of MPI processes.
omp_ncpus: 1 # Number of OMP threads (1 if not present)
mem_per_cpu: 10 # Estimated memory requirement per MPI processor in Megabytes.
efficiency: 0.4 # 1.0 corresponds to an "expected" optimal efficiency (strong scaling).
vars: { # Dictionary with the variables that should be added to the input.
varname1: varvalue1
varname2: varvalue2
}
-
...
For paral_kgb we have:
nproc npkpt npspinor npband npfft bandpp weight
108 1 1 12 9 2 0.25
108 1 1 108 1 2 27.00
96 1 1 24 4 1 1.50
84 1 1 12 7 2 0.25
"""
_DEFAULTS = {
"omp_ncpus": 1,
"mem_per_cpu": 0.0,
"vars": {}
}
def __init__(self, *args, **kwargs):
super(ParalConf, self).__init__(*args, **kwargs)
# Add default values if not already in self.
for k, v in self._DEFAULTS.items():
if k not in self:
self[k] = v
def __str__(self):
stream = StringIO()
pprint(self, stream=stream)
return stream.getvalue()
# TODO: Change name in abinit
# Remove tot_ncpus from Abinit
@property
def num_cores(self):
return self.mpi_procs * self.omp_threads
@property
def mem_per_proc(self):
return self.mem_per_cpu
@property
def mpi_procs(self):
return self.mpi_ncpus
@property
def omp_threads(self):
return self.omp_ncpus
@property
def speedup(self):
"""Estimated speedup reported by ABINIT."""
return self.efficiency * self.num_cores
@property
def tot_mem(self):
"""Estimated total memory in Mbs (computed from mem_per_proc)"""
return self.mem_per_proc * self.mpi_procs
class ParalHintsError(Exception):
"""Base error class for `ParalHints`."""
class ParalHintsParser(object):
Error = ParalHintsError
def __init__(self):
# Used to push error strings.
self._errors = collections.deque(maxlen=100)
def add_error(self, errmsg):
self._errors.append(errmsg)
def parse(self, filename):
"""
Read the `AutoParal` section (YAML format) from filename.
Assumes the file contains only one section.
"""
with abiinspect.YamlTokenizer(filename) as r:
doc = r.next_doc_with_tag("!Autoparal")
try:
d = yaml.load(doc.text_notag)
return ParalHints(info=d["info"], confs=d["configurations"])
except:
import traceback
sexc = traceback.format_exc()
err_msg = "Wrong YAML doc:\n%s\n\nException:\n%s" % (doc.text, sexc)
self.add_error(err_msg)
logger.critical(err_msg)
raise self.Error(err_msg)
class ParalHints(collections.Iterable):
"""
Iterable with the hints for the parallel execution reported by ABINIT.
"""
Error = ParalHintsError
def __init__(self, info, confs):
self.info = info
self._confs = [ParalConf(**d) for d in confs]
@classmethod
def from_mpi_omp_lists(cls, mpi_procs, omp_threads):
"""
Build a list of Parallel configurations from two lists
containing the number of MPI processes and the number of OpenMP threads
i.e. product(mpi_procs, omp_threads).
The configuration have parallel efficiency set to 1.0 and no input variables.
Mainly used for preparing benchmarks.
"""
info = {}
confs = [ParalConf(mpi_ncpus=p, omp_ncpus=p, efficiency=1.0)
for p, t in product(mpi_procs, omp_threads)]
return cls(info, confs)
def __getitem__(self, key):
return self._confs[key]
def __iter__(self):
return self._confs.__iter__()
def __len__(self):
return self._confs.__len__()
def __repr__(self):
return "\n".join(str(conf) for conf in self)
def __str__(self):
return repr(self)
@lazy_property
def max_cores(self):
"""Maximum number of cores."""
return max(c.mpi_procs * c.omp_threads for c in self)
@lazy_property
def max_mem_per_proc(self):
"""Maximum memory per MPI process."""
return max(c.mem_per_proc for c in self)
@lazy_property
def max_speedup(self):
"""Maximum speedup."""
return max(c.speedup for c in self)
@lazy_property
def max_efficiency(self):
"""Maximum parallel efficiency."""
return max(c.efficiency for c in self)
@pmg_serialize
def as_dict(self, **kwargs):
return {"info": self.info, "confs": self._confs}
@classmethod
def from_dict(cls, d):
return cls(info=d["info"], confs=d["confs"])
def copy(self):
"""Shallow copy of self."""
return copy.copy(self)
def select_with_condition(self, condition, key=None):
"""
Remove all the configurations that do not satisfy the given condition.
Args:
condition: dict or :class:`Condition` object with operators expressed with a Mongodb-like syntax
key: Selects the sub-dictionary on which condition is applied, e.g. key="vars"
if we have to filter the configurations depending on the values in vars
"""
condition = Condition.as_condition(condition)
new_confs = []
for conf in self:
# Select the object on which condition is applied
obj = conf if key is None else AttrDict(conf[key])
add_it = condition(obj=obj)
#if key is "vars": print("conf", conf, "added:", add_it)
if add_it: new_confs.append(conf)
self._confs = new_confs
def sort_by_efficiency(self, reverse=True):
"""Sort the configurations in place. items with highest efficiency come first"""
self._confs.sort(key=lambda c: c.efficiency, reverse=reverse)
return self
def sort_by_speedup(self, reverse=True):
"""Sort the configurations in place. items with highest speedup come first"""
self._confs.sort(key=lambda c: c.speedup, reverse=reverse)
return self
def sort_by_mem_per_proc(self, reverse=False):
"""Sort the configurations in place. items with lowest memory per proc come first."""
# Avoid sorting if mem_per_cpu is not available.
if any(c.mem_per_proc > 0.0 for c in self):
self._confs.sort(key=lambda c: c.mem_per_proc, reverse=reverse)
return self
def multidimensional_optimization(self, priorities=("speedup", "efficiency")):
# Mapping property --> options passed to sparse_histogram
opts = dict(speedup=dict(step=1.0), efficiency=dict(step=0.1), mem_per_proc=dict(memory=1024))
#opts = dict(zip(priorities, bin_widths))
opt_confs = self._confs
for priority in priorities:
histogram = SparseHistogram(opt_confs, key=lambda c: getattr(c, priority), **opts[priority])
pos = 0 if priority == "mem_per_proc" else -1
opt_confs = histogram.values[pos]
#histogram.plot(show=True, savefig="hello.pdf")
return self.__class__(info=self.info, confs=opt_confs)
#def histogram_efficiency(self, step=0.1):
# """Returns a :class:`SparseHistogram` with configuration grouped by parallel efficiency."""
# return SparseHistogram(self._confs, key=lambda c: c.efficiency, step=step)
#def histogram_speedup(self, step=1.0):
# """Returns a :class:`SparseHistogram` with configuration grouped by parallel speedup."""
# return SparseHistogram(self._confs, key=lambda c: c.speedup, step=step)
#def histogram_memory(self, step=1024):
# """Returns a :class:`SparseHistogram` with configuration grouped by memory."""
# return SparseHistogram(self._confs, key=lambda c: c.speedup, step=step)
#def filter(self, qadapter):
# """Return a new list of configurations that can be executed on the `QueueAdapter` qadapter."""
# new_confs = [pconf for pconf in self if qadapter.can_run_pconf(pconf)]
# return self.__class__(info=self.info, confs=new_confs)
def get_ordered_with_policy(self, policy, max_ncpus):
"""
Sort and return a new list of configurations ordered according to the :class:`TaskPolicy` policy.
"""
# Build new list since we are gonna change the object in place.
hints = self.__class__(self.info, confs=[c for c in self if c.num_cores <= max_ncpus])
# First select the configurations satisfying the condition specified by the user (if any)
bkp_hints = hints.copy()
if policy.condition:
logger.info("Applying condition %s" % str(policy.condition))
hints.select_with_condition(policy.condition)
# Undo change if no configuration fullfills the requirements.
if not hints:
hints = bkp_hints
logger.warning("Empty list of configurations after policy.condition")
# Now filter the configurations depending on the values in vars
bkp_hints = hints.copy()
if policy.vars_condition:
logger.info("Applying vars_condition %s" % str(policy.vars_condition))
hints.select_with_condition(policy.vars_condition, key="vars")
# Undo change if no configuration fullfills the requirements.
if not hints:
hints = bkp_hints
logger.warning("Empty list of configurations after policy.vars_condition")
if len(policy.autoparal_priorities) == 1:
# Example: hints.sort_by_speedup()
if policy.autoparal_priorities[0] in ['efficiency', 'speedup', 'mem_per_proc']:
getattr(hints, "sort_by_" + policy.autoparal_priorities[0])()
elif isinstance(policy.autoparal_priorities[0], collections.Mapping):
if policy.autoparal_priorities[0]['meta_priority'] == 'highest_speedup_minimum_efficiency_cutoff':
min_efficiency = policy.autoparal_priorities[0].get('minimum_efficiency', 1.0)
hints.select_with_condition({'efficiency': {'$gte': min_efficiency}})
hints.sort_by_speedup()
else:
hints = hints.multidimensional_optimization(priorities=policy.autoparal_priorities)
if len(hints) == 0: raise ValueError("len(hints) == 0")
#TODO: make sure that num_cores == 1 is never selected when we have more than one configuration
#if len(hints) > 1:
# hints.select_with_condition(dict(num_cores={"$eq": 1)))
# Return final (orderded ) list of configurations (best first).
return hints
class TaskPolicy(object):
"""
This object stores the parameters used by the :class:`TaskManager` to
create the submission script and/or to modify the ABINIT variables
governing the parallel execution. A `TaskPolicy` object contains
a set of variables that specify the launcher, as well as the options
and the conditions used to select the optimal configuration for the parallel run
"""
@classmethod
def as_policy(cls, obj):
"""
Converts an object obj into a `:class:`TaskPolicy. Accepts:
* None
* TaskPolicy
* dict-like object
"""
if obj is None:
# Use default policy.
return TaskPolicy()
else:
if isinstance(obj, cls):
return obj
elif isinstance(obj, collections.Mapping):
return cls(**obj)
else:
raise TypeError("Don't know how to convert type %s to %s" % (type(obj), cls))
@classmethod
def autodoc(cls):
return """
autoparal: # (integer). 0 to disable the autoparal feature (DEFAULT: 1 i.e. autoparal is on)
condition: # condition used to filter the autoparal configurations (Mongodb-like syntax).
# DEFAULT: empty i.e. ignored.
vars_condition: # Condition used to filter the list of ABINIT variables reported by autoparal
# (Mongodb-like syntax). DEFAULT: empty i.e. ignored.
frozen_timeout: # A job is considered frozen and its status is set to ERROR if no change to
# the output file has been done for `frozen_timeout` seconds. Accepts int with seconds or
# string in slurm form i.e. days-hours:minutes:seconds. DEFAULT: 1 hour.
precedence: # Under development.
autoparal_priorities: # Under development.
"""
def __init__(self, **kwargs):
"""
See autodoc
"""
self.autoparal = kwargs.pop("autoparal", 1)
self.condition = Condition(kwargs.pop("condition", {}))
self.vars_condition = Condition(kwargs.pop("vars_condition", {}))
self.precedence = kwargs.pop("precedence", "autoparal_conf")
self.autoparal_priorities = kwargs.pop("autoparal_priorities", ["speedup"])
#self.autoparal_priorities = kwargs.pop("autoparal_priorities", ["speedup", "efficiecy", "memory"]
# TODO frozen_timeout could be computed as a fraction of the timelimit of the qadapter!
self.frozen_timeout = qu.slurm_parse_timestr(kwargs.pop("frozen_timeout", "0-1:00:00"))
if kwargs:
raise ValueError("Found invalid keywords in policy section:\n %s" % str(kwargs.keys()))
# Consistency check.
if self.precedence not in ("qadapter", "autoparal_conf"):
raise ValueError("Wrong value for policy.precedence, should be qadapter or autoparal_conf")
def __str__(self):
lines = []
app = lines.append
for k, v in self.__dict__.items():
if k.startswith("_"): continue
app("%s: %s" % (k, v))
return "\n".join(lines)
class ManagerIncreaseError(Exception):
"""
Exception raised by the manager if the increase request failed
"""
class FixQueueCriticalError(Exception):
"""
error raised when an error could not be fixed at the task level
"""
# Global variable used to store the task manager returned by `from_user_config`.
_USER_CONFIG_TASKMANAGER = None
class TaskManager(MSONable):
"""
A `TaskManager` is responsible for the generation of the job script and the submission
of the task, as well as for the specification of the parameters passed to the resource manager
(e.g. Slurm, PBS ...) and/or the run-time specification of the ABINIT variables governing the parallel execution.
A `TaskManager` delegates the generation of the submission script and the submission of the task to the :class:`QueueAdapter`.
A `TaskManager` has a :class:`TaskPolicy` that governs the specification of the parameters for the parallel executions.
Ideally, the TaskManager should be the **main entry point** used by the task to deal with job submission/optimization
"""
YAML_FILE = "manager.yml"
USER_CONFIG_DIR = os.path.join(os.path.expanduser("~"), ".abinit", "abipy")
ENTRIES = {"policy", "qadapters", "db_connector", "batch_adapter"}
@classmethod
def autodoc(cls):
from .db import DBConnector
s = """
# TaskManager configuration file (YAML Format)
policy:
# Dictionary with options used to control the execution of the tasks.
qadapters:
# List of qadapters objects (mandatory)
- # qadapter_1
- # qadapter_2
db_connector:
# Connection to MongoDB database (optional)
batch_adapter:
# Adapter used to submit flows with batch script. (optional)
##########################################
# Individual entries are documented below:
##########################################
"""
s += "policy: " + TaskPolicy.autodoc() + "\n"
s += "qadapter: " + QueueAdapter.autodoc() + "\n"
#s += "db_connector: " + DBConnector.autodoc()
return s
@classmethod
def from_user_config(cls):
"""
Initialize the :class:`TaskManager` from the YAML file 'manager.yaml'.
Search first in the working directory and then in the abipy configuration directory.
Raises:
RuntimeError if file is not found.
"""
global _USER_CONFIG_TASKMANAGER
if _USER_CONFIG_TASKMANAGER is not None:
return _USER_CONFIG_TASKMANAGER
# Try in the current directory then in user configuration directory.
path = os.path.join(os.getcwd(), cls.YAML_FILE)
if not os.path.exists(path):
path = os.path.join(cls.USER_CONFIG_DIR, cls.YAML_FILE)
if not os.path.exists(path):
raise RuntimeError(colored(
"\nCannot locate %s neither in current directory nor in %s\n"
"!!! PLEASE READ THIS: !!!\n"
"To use abipy to run jobs this file must be present\n"
"It provides a description of the cluster/computer you are running on\n"
"Examples are provided in abipy/data/managers." % (cls.YAML_FILE, path), color="red"))
_USER_CONFIG_TASKMANAGER = cls.from_file(path)
return _USER_CONFIG_TASKMANAGER
@classmethod
def from_file(cls, filename):
"""Read the configuration parameters from the Yaml file filename."""
try:
with open(filename, "r") as fh:
return cls.from_dict(yaml.load(fh))
except Exception as exc:
print("Error while reading TaskManager parameters from %s\n" % filename)
raise
@classmethod
def from_string(cls, s):
"""Create an instance from string s containing a YAML dictionary."""
return cls.from_dict(yaml.load(s))
@classmethod
def as_manager(cls, obj):
"""
Convert obj into TaskManager instance. Accepts string, filepath, dictionary, `TaskManager` object.
If obj is None, the manager is initialized from the user config file.
"""
if isinstance(obj, cls): return obj
if obj is None: return cls.from_user_config()
if is_string(obj):
if os.path.exists(obj):
return cls.from_file(obj)
else:
return cls.from_string(obj)
elif isinstance(obj, collections.Mapping):
return cls.from_dict(obj)
else:
raise TypeError("Don't know how to convert type %s to TaskManager" % type(obj))
@classmethod
def from_dict(cls, d):
"""Create an instance from a dictionary."""
return cls(**{k: v for k, v in d.items() if k in cls.ENTRIES})
@pmg_serialize
def as_dict(self):
return self._kwargs
def __init__(self, **kwargs):
"""
Args:
policy:None
qadapters:List of qadapters in YAML format
db_connector:Dictionary with data used to connect to the database (optional)
"""
# Keep a copy of kwargs
self._kwargs = copy.deepcopy(kwargs)
self.policy = TaskPolicy.as_policy(kwargs.pop("policy", None))
# Initialize database connector (if specified)
self.db_connector = DBConnector(**kwargs.pop("db_connector", {}))
# Build list of QAdapters. Neglect entry if priority == 0 or `enabled: no"
qads = []
for d in kwargs.pop("qadapters"):
if d.get("enabled", False): continue
qad = make_qadapter(**d)
if qad.priority > 0:
qads.append(qad)
elif qad.priority < 0:
raise ValueError("qadapter cannot have negative priority:\n %s" % qad)
if not qads:
raise ValueError("Received emtpy list of qadapters")
#if len(qads) != 1:
# raise NotImplementedError("For the time being multiple qadapters are not supported! Please use one adapter")
# Order qdapters according to priority.
qads = sorted(qads, key=lambda q: q.priority)
priorities = [q.priority for q in qads]
if len(priorities) != len(set(priorities)):
raise ValueError("Two or more qadapters have same priority. This is not allowed. Check taskmanager.yml")
self._qads, self._qadpos = tuple(qads), 0
# Initialize the qadapter for batch script submission.
d = kwargs.pop("batch_adapter", None)
self.batch_adapter = None
if d: self.batch_adapter = make_qadapter(**d)
#print("batch_adapter", self.batch_adapter)
if kwargs:
raise ValueError("Found invalid keywords in the taskmanager file:\n %s" % str(list(kwargs.keys())))
def to_shell_manager(self, mpi_procs=1):
"""
Returns a new `TaskManager` with the same parameters as self but replace the :class:`QueueAdapter`
with a :class:`ShellAdapter` with mpi_procs so that we can submit the job without passing through the queue.
"""
my_kwargs = copy.deepcopy(self._kwargs)
my_kwargs["policy"] = TaskPolicy(autoparal=0)
# On BlueGene we need at least two qadapters.
# One for running jobs on the computing nodes and another one
# for running small jobs on the fronted. These two qadapters
# will have different enviroments and different executables.
# If None of the q-adapters has qtype==shell, we change qtype to shell
# and we return a new Manager for sequential jobs with the same parameters as self.
# If the list contains a qadapter with qtype == shell, we ignore the remaining qadapters
# when we build the new Manager.
has_shell_qad = False
for d in my_kwargs["qadapters"]:
if d["queue"]["qtype"] == "shell": has_shell_qad = True
if has_shell_qad:
my_kwargs["qadapters"] = [d for d in my_kwargs["qadapters"] if d["queue"]["qtype"] == "shell"]
for d in my_kwargs["qadapters"]:
d["queue"]["qtype"] = "shell"
d["limits"]["min_cores"] = mpi_procs
d["limits"]["max_cores"] = mpi_procs
# If shell_runner is specified, replace mpi_runner with shell_runner
# in the script used to run jobs on the frontend.
# On same machines based on Slurm, indeed, mpirun/mpiexec is not available
# and jobs should be executed with `srun -n4 exec` when running on the computing nodes
# or with `exec` when running in sequential on the frontend.
if "job" in d and "shell_runner" in d["job"]:
shell_runner = d["job"]["shell_runner"]
#print("shell_runner:", shell_runner, type(shell_runner))
if not shell_runner or shell_runner == "None": shell_runner = ""
d["job"]["mpi_runner"] = shell_runner
#print("shell_runner:", shell_runner)
#print(my_kwargs)
new = self.__class__(**my_kwargs)
new.set_mpi_procs(mpi_procs)
return new
def new_with_fixed_mpi_omp(self, mpi_procs, omp_threads):
"""
Return a new `TaskManager` in which autoparal has been disabled.
The jobs will be executed with `mpi_procs` MPI processes and `omp_threads` OpenMP threads.
Useful for generating input files for benchmarks.
"""
new = self.deepcopy()
new.policy.autoparal = 0
new.set_mpi_procs(mpi_procs)
new.set_omp_threads(omp_threads)
return new
@property
def has_queue(self):
"""True if we are submitting jobs via a queue manager."""
return self.qadapter.QTYPE.lower() != "shell"
@property
def qads(self):
"""List of :class:`QueueAdapter` objects sorted according to priorities (highest comes first)"""
return self._qads
@property
def qadapter(self):
"""The qadapter used to submit jobs."""
return self._qads[self._qadpos]
def select_qadapter(self, pconfs):
"""
Given a list of parallel configurations, pconfs, this method select an `optimal` configuration
according to some criterion as well as the :class:`QueueAdapter` to use.
Args:
pconfs: :class:`ParalHints` object with the list of parallel configurations
Returns:
:class:`ParallelConf` object with the `optimal` configuration.
"""
# Order the list of configurations according to policy.
policy, max_ncpus = self.policy, self.max_cores
pconfs = pconfs.get_ordered_with_policy(policy, max_ncpus)
if policy.precedence == "qadapter":
# Try to run on the qadapter with the highest priority.
for qadpos, qad in enumerate(self.qads):
possible_pconfs = [pc for pc in pconfs if qad.can_run_pconf(pc)]
if qad.allocation == "nodes":
#if qad.allocation in ["nodes", "force_nodes"]:
# Select the configuration divisible by nodes if possible.
for pconf in possible_pconfs:
if pconf.num_cores % qad.hw.cores_per_node == 0:
return self._use_qadpos_pconf(qadpos, pconf)
# Here we select the first one.
if possible_pconfs:
return self._use_qadpos_pconf(qadpos, possible_pconfs[0])
elif policy.precedence == "autoparal_conf":
# Try to run on the first pconf irrespectively of the priority of the qadapter.
for pconf in pconfs:
for qadpos, qad in enumerate(self.qads):
if qad.allocation == "nodes" and not pconf.num_cores % qad.hw.cores_per_node == 0:
continue # Ignore it. not very clean
if qad.can_run_pconf(pconf):
return self._use_qadpos_pconf(qadpos, pconf)
else:
raise ValueError("Wrong value of policy.precedence = %s" % policy.precedence)
# No qadapter could be found
raise RuntimeError("Cannot find qadapter for this run!")
def _use_qadpos_pconf(self, qadpos, pconf):
"""
This function is called when we have accepted the :class:`ParalConf` pconf.
Returns pconf
"""
self._qadpos = qadpos
# Change the number of MPI/OMP cores.
self.set_mpi_procs(pconf.mpi_procs)
if self.has_omp: self.set_omp_threads(pconf.omp_threads)
# Set memory per proc.
#FIXME: Fixer may have changed the memory per proc and should not be resetted by ParalConf
#self.set_mem_per_proc(pconf.mem_per_proc)
return pconf
def __str__(self):
"""String representation."""
lines = []
app = lines.append
#app("[Task policy]\n%s" % str(self.policy))
for i, qad in enumerate(self.qads):
app("[Qadapter %d]\n%s" % (i, str(qad)))
app("Qadapter selected: %d" % self._qadpos)
if self.has_db:
app("[MongoDB database]:")
app(str(self.db_connector))
return "\n".join(lines)
@property
def has_db(self):
"""True if we are using MongoDB database"""
return bool(self.db_connector)
@property
def has_omp(self):
"""True if we are using OpenMP parallelization."""
return self.qadapter.has_omp
@property
def num_cores(self):
"""Total number of CPUs used to run the task."""
return self.qadapter.num_cores
@property
def mpi_procs(self):
"""Number of MPI processes."""
return self.qadapter.mpi_procs
@property
def mem_per_proc(self):
"""Memory per MPI process."""
return self.qadapter.mem_per_proc
@property
def omp_threads(self):
"""Number of OpenMP threads"""
return self.qadapter.omp_threads
def deepcopy(self):
"""Deep copy of self."""
return copy.deepcopy(self)
def set_mpi_procs(self, mpi_procs):
"""Set the number of MPI processes to use."""
self.qadapter.set_mpi_procs(mpi_procs)
def set_omp_threads(self, omp_threads):
"""Set the number of OpenMp threads to use."""
self.qadapter.set_omp_threads(omp_threads)
def set_mem_per_proc(self, mem_mb):
"""Set the memory (in Megabytes) per CPU."""
self.qadapter.set_mem_per_proc(mem_mb)
@property
def max_cores(self):
"""
Maximum number of cores that can be used.
This value is mainly used in the autoparal part to get the list of possible configurations.
"""
return max(q.hint_cores for q in self.qads)
def get_njobs_in_queue(self, username=None):
"""
returns the number of jobs in the queue,
returns None when the number of jobs cannot be determined.
Args:
username: (str) the username of the jobs to count (default is to autodetect)
"""
return self.qadapter.get_njobs_in_queue(username=username)
def cancel(self, job_id):
"""Cancel the job. Returns exit status."""
return self.qadapter.cancel(job_id)
def write_jobfile(self, task, **kwargs):
"""
Write the submission script. Return the path of the script
================ ============================================
kwargs Meaning
================ ============================================
exec_args List of arguments passed to task.executable.
Default: no arguments.
================ ============================================
"""
script = self.qadapter.get_script_str(
job_name=task.name,
launch_dir=task.workdir,
executable=task.executable,
qout_path=task.qout_file.path,
qerr_path=task.qerr_file.path,
stdin=task.files_file.path,
stdout=task.log_file.path,
stderr=task.stderr_file.path,
exec_args=kwargs.pop("exec_args", []),
)
# Write the script.
with open(task.job_file.path, "w") as fh:
fh.write(script)
task.job_file.chmod(0o740)
return task.job_file.path
def launch(self, task, **kwargs):
"""
Build the input files and submit the task via the :class:`Qadapter`
Args:
task: :class:`TaskObject`
Returns:
Process object.
"""
if task.status == task.S_LOCKED:
raise ValueError("You shall not submit a locked task!")
# Build the task
task.build()
# Pass information on the time limit to Abinit (we always assume ndtset == 1)
#if False and isinstance(task, AbinitTask):
if isinstance(task, AbinitTask):
args = kwargs.get("exec_args", [])
if args is None: args = []
args = args[:]
args.append("--timelimit %s" % qu.time2slurm(self.qadapter.timelimit))
kwargs["exec_args"] = args
logger.info("Will pass timelimit option to abinit %s:" % args)
# Write the submission script
script_file = self.write_jobfile(task, **kwargs)
# Submit the task and save the queue id.
try:
qjob, process = self.qadapter.submit_to_queue(script_file)
task.set_status(task.S_SUB, msg='Submitted to queue')
task.set_qjob(qjob)
return process
except self.qadapter.MaxNumLaunchesError as exc:
# TODO: Here we should try to switch to another qadapter
# 1) Find a new parallel configuration in those stored in task.pconfs
# 2) Change the input file.
# 3) Regenerate the submission script
# 4) Relaunch
task.set_status(task.S_ERROR, msg="max_num_launches reached: %s" % str(exc))
raise
def get_collection(self, **kwargs):
"""Return the MongoDB collection used to store the results."""
return self.db_connector.get_collection(**kwargs)
def increase_mem(self):
# OLD
# with GW calculations in mind with GW mem = 10,
# the response fuction is in memory and not distributed
# we need to increase memory if jobs fail ...
# return self.qadapter.more_mem_per_proc()
try:
self.qadapter.more_mem_per_proc()
except QueueAdapterError:
# here we should try to switch to an other qadapter
raise ManagerIncreaseError('manager failed to increase mem')
def increase_ncpus(self):
"""
increase the number of cpus, first ask the current quadapter, if that one raises a QadapterIncreaseError
switch to the next qadapter. If all fail raise an ManagerIncreaseError
"""
try:
self.qadapter.more_cores()
except QueueAdapterError:
# here we should try to switch to an other qadapter
raise ManagerIncreaseError('manager failed to increase ncpu')
def increase_resources(self):
try:
self.qadapter.more_cores()
return
except QueueAdapterError:
pass
try:
self.qadapter.more_mem_per_proc()
except QueueAdapterError:
# here we should try to switch to an other qadapter
raise ManagerIncreaseError('manager failed to increase resources')
def exclude_nodes(self, nodes):
try:
self.qadapter.exclude_nodes(nodes=nodes)
except QueueAdapterError:
# here we should try to switch to an other qadapter
raise ManagerIncreaseError('manager failed to exclude nodes')
def increase_time(self):
try:
self.qadapter.more_time()
except QueueAdapterError:
# here we should try to switch to an other qadapter
raise ManagerIncreaseError('manager failed to increase time')
class AbinitBuild(object):
"""
This object stores information on the options used to build Abinit
.. attribute:: info
String with build information as produced by `abinit -b`
.. attribute:: version
Abinit version number e.g 8.0.1 (string)
.. attribute:: has_netcdf
True if netcdf is enabled.
.. attribute:: has_omp
True if OpenMP is enabled.
.. attribute:: has_mpi
True if MPI is enabled.
.. attribute:: has_mpiio
True if MPI-IO is supported.
"""
def __init__(self, workdir=None, manager=None):
manager = TaskManager.as_manager(manager).to_shell_manager(mpi_procs=1)
# Build a simple manager to run the job in a shell subprocess
import tempfile
workdir = tempfile.mkdtemp() if workdir is None else workdir
# Generate a shell script to execute `abinit -b`
stdout = os.path.join(workdir, "run.abo")
script = manager.qadapter.get_script_str(
job_name="abinit_b",
launch_dir=workdir,
executable="abinit",
qout_path=os.path.join(workdir, "queue.qout"),
qerr_path=os.path.join(workdir, "queue.qerr"),
#stdin=os.path.join(workdir, "run.files"),
stdout=stdout,
stderr=os.path.join(workdir, "run.err"),
exec_args=["-b"],
)
# Execute the script.
script_file = os.path.join(workdir, "job.sh")
with open(script_file, "wt") as fh:
fh.write(script)
qjob, process = manager.qadapter.submit_to_queue(script_file)
process.wait()
# To avoid: ResourceWarning: unclosed file <_io.BufferedReader name=87> in py3k
process.stderr.close()
if process.returncode != 0:
logger.critical("Error while executing %s" % script_file)
with open(stdout, "rt") as fh:
self.info = fh.read()
# info string has the following format.
"""
=== Build Information ===
Version : 8.0.1
Build target : x86_64_darwin15.0.0_gnu5.3
Build date : 20160122
=== Compiler Suite ===
C compiler : gnu
C++ compiler : gnuApple
Fortran compiler : gnu5.3
CFLAGS : -g -O2 -mtune=native -march=native
CXXFLAGS : -g -O2 -mtune=native -march=native
FCFLAGS : -g -ffree-line-length-none
FC_LDFLAGS :
=== Optimizations ===
Debug level : basic
Optimization level : standard
Architecture : unknown_unknown
=== Multicore ===
Parallel build : yes
Parallel I/O : yes
openMP support : no
GPU support : no
=== Connectors / Fallbacks ===
Connectors on : yes
Fallbacks on : yes
DFT flavor : libxc-fallback+atompaw-fallback+wannier90-fallback
FFT flavor : none
LINALG flavor : netlib
MATH flavor : none
TIMER flavor : abinit
TRIO flavor : netcdf+etsf_io-fallback
=== Experimental features ===
Bindings : @enable_bindings@
Exports : no
GW double-precision : yes
=== Bazaar branch information ===
Branch ID : gmatteo@gmac-20160112110440-lf6exhneqim9082h
Revision : 1226
Committed : 0
"""
self.has_netcdf = False
self.has_omp = False
self.has_mpi, self.has_mpiio = False, False
def yesno2bool(line):
ans = line.split()[-1]
return dict(yes=True, no=False)[ans]
# Parse info.
for line in self.info.splitlines():
if "Version" in line: self.version = line.split()[-1]
if "TRIO flavor" in line:
self.has_netcdf = "netcdf" in line
if "openMP support" in line: self.has_omp = yesno2bool(line)
if "Parallel build" in line: self.has_mpi = yesno2bool(line)
if "Parallel I/O" in line: self.has_mpiio = yesno2bool(line)
def __str__(self):
lines = []
app = lines.append
app("Abinit Build Information:")
app(" Abinit version: %s" % self.version)
app(" MPI: %s, MPI-IO: %s, OpenMP: %s" % (self.has_mpi, self.has_mpiio, self.has_omp))
app(" Netcdf: %s" % self.has_netcdf)
return "\n".join(lines)
class FakeProcess(object):
"""
This object is attached to a :class:`Task` instance if the task has not been submitted
This trick allows us to simulate a process that is still running so that
we can safely poll task.process.
"""
def poll(self):
return None
def wait(self):
raise RuntimeError("Cannot wait a FakeProcess")
def communicate(self, input=None):
raise RuntimeError("Cannot communicate with a FakeProcess")
def kill(self):
raise RuntimeError("Cannot kill a FakeProcess")
@property
def returncode(self):
return None
class MyTimedelta(datetime.timedelta):
"""A customized version of timedelta whose __str__ method doesn't print microseconds."""
def __new__(cls, days, seconds, microseconds):
return datetime.timedelta.__new__(cls, days, seconds, microseconds)
def __str__(self):
"""Remove microseconds from timedelta default __str__"""
s = super(MyTimedelta, self).__str__()
microsec = s.find(".")
if microsec != -1: s = s[:microsec]
return s
@classmethod
def as_timedelta(cls, delta):
"""Convert delta into a MyTimedelta object."""
# Cannot monkey patch the __class__ and must pass through __new__ as the object is immutable.
if isinstance(delta, cls): return delta
return cls(delta.days, delta.seconds, delta.microseconds)
class TaskDateTimes(object):
"""
Small object containing useful :class:`datetime.datatime` objects associated to important events.
.. attributes:
init: initialization datetime
submission: submission datetime
start: Begin of execution.
end: End of execution.
"""
def __init__(self):
self.init = datetime.datetime.now()
self.submission, self.start, self.end = None, None, None
def __str__(self):
lines = []
app = lines.append
app("Initialization done on: %s" % self.init)
if self.submission is not None: app("Submitted on: %s" % self.submission)
if self.start is not None: app("Started on: %s" % self.start)
if self.end is not None: app("Completed on: %s" % self.end)
return "\n".join(lines)
def reset(self):
"""Reinitialize the counters."""
self = self.__class__()
def get_runtime(self):
""":class:`timedelta` with the run-time, None if the Task is not running"""
if self.start is None: return None
if self.end is None:
delta = datetime.datetime.now() - self.start
else:
delta = self.end - self.start
return MyTimedelta.as_timedelta(delta)
def get_time_inqueue(self):
"""
:class:`timedelta` with the time spent in the Queue, None if the Task is not running
.. note:
This value is always greater than the real value computed by the resource manager
as we start to count only when check_status sets the `Task` status to S_RUN.
"""
if self.submission is None: return None
if self.start is None:
delta = datetime.datetime.now() - self.submission
else:
delta = self.start - self.submission
# This happens when we read the exact start datetime from the ABINIT log file.
if delta.total_seconds() < 0: delta = datetime.timedelta(seconds=0)
return MyTimedelta.as_timedelta(delta)
class TaskError(NodeError):
"""Base Exception for :class:`Task` methods"""
class TaskRestartError(TaskError):
"""Exception raised while trying to restart the :class:`Task`."""
class Task(six.with_metaclass(abc.ABCMeta, Node)):
"""A Task is a node that performs some kind of calculation."""
# Use class attributes for TaskErrors so that we don't have to import them.
Error = TaskError
RestartError = TaskRestartError
# List of `AbinitEvent` subclasses that are tested in the check_status method.
# Subclasses should provide their own list if they need to check the converge status.
CRITICAL_EVENTS = []
# Prefixes for Abinit (input, output, temporary) files.
Prefix = collections.namedtuple("Prefix", "idata odata tdata")
pj = os.path.join
prefix = Prefix(pj("indata", "in"), pj("outdata", "out"), pj("tmpdata", "tmp"))
del Prefix, pj
def __init__(self, input, workdir=None, manager=None, deps=None):
"""
Args:
input: :class:`AbinitInput` object.
workdir: Path to the working directory.
manager: :class:`TaskManager` object.
deps: Dictionary specifying the dependency of this node.
None means that this Task has no dependency.
"""
# Init the node
super(Task, self).__init__()
self._input = input
if workdir is not None:
self.set_workdir(workdir)
if manager is not None:
self.set_manager(manager)
# Handle possible dependencies.
if deps:
self.add_deps(deps)
# Date-time associated to submission, start and end.
self.datetimes = TaskDateTimes()
# Count the number of restarts.
self.num_restarts = 0
self._qjob = None
self.queue_errors = []
self.abi_errors = []
# two flags that provide, dynamically, information on the scaling behavious of a task. If any process of fixing
# finds none scaling behaviour, they should be switched. If a task type is clearly not scaling they should be
# swiched.
self.mem_scales = True
self.load_scales = True
def __getstate__(self):
"""
Return state is pickled as the contents for the instance.
In this case we just remove the process since Subprocess objects cannot be pickled.
This is the reason why we have to store the returncode in self._returncode instead
of using self.process.returncode.
"""
return {k: v for k, v in self.__dict__.items() if k not in ["_process"]}
#@check_spectator
def set_workdir(self, workdir, chroot=False):
"""Set the working directory. Cannot be set more than once unless chroot is True"""
if not chroot and hasattr(self, "workdir") and self.workdir != workdir:
raise ValueError("self.workdir != workdir: %s, %s" % (self.workdir, workdir))
self.workdir = os.path.abspath(workdir)
# Files required for the execution.
self.input_file = File(os.path.join(self.workdir, "run.abi"))
self.output_file = File(os.path.join(self.workdir, "run.abo"))
self.files_file = File(os.path.join(self.workdir, "run.files"))
self.job_file = File(os.path.join(self.workdir, "job.sh"))
self.log_file = File(os.path.join(self.workdir, "run.log"))
self.stderr_file = File(os.path.join(self.workdir, "run.err"))
self.start_lockfile = File(os.path.join(self.workdir, "__startlock__"))
# This file is produced by Abinit if nprocs > 1 and MPI_ABORT.
self.mpiabort_file = File(os.path.join(self.workdir, "__ABI_MPIABORTFILE__"))
# Directories with input|output|temporary data.
self.indir = Directory(os.path.join(self.workdir, "indata"))
self.outdir = Directory(os.path.join(self.workdir, "outdata"))
self.tmpdir = Directory(os.path.join(self.workdir, "tmpdata"))
# stderr and output file of the queue manager. Note extensions.
self.qerr_file = File(os.path.join(self.workdir, "queue.qerr"))
self.qout_file = File(os.path.join(self.workdir, "queue.qout"))
def set_manager(self, manager):
"""Set the :class:`TaskManager` used to launch the Task."""
self.manager = manager.deepcopy()
@property
def work(self):
"""The :class:`Work` containing this `Task`."""
return self._work
def set_work(self, work):
"""Set the :class:`Work` associated to this `Task`."""
if not hasattr(self, "_work"):
self._work = work
else:
if self._work != work:
raise ValueError("self._work != work")
@property
def flow(self):
"""The :class:`Flow` containing this `Task`."""
return self.work.flow
@lazy_property
def pos(self):
"""The position of the task in the :class:`Flow`"""
for i, task in enumerate(self.work):
if self == task:
return self.work.pos, i
raise ValueError("Cannot find the position of %s in flow %s" % (self, self.flow))
@property
def pos_str(self):
"""String representation of self.pos"""
return "w" + str(self.pos[0]) + "_t" + str(self.pos[1])
@property
def num_launches(self):
"""
Number of launches performed. This number includes both possible ABINIT restarts
as well as possible launches done due to errors encountered with the resource manager
or the hardware/software."""
return sum(q.num_launches for q in self.manager.qads)
@property
def input(self):
"""AbinitInput object."""
return self._input
def get_inpvar(self, varname, default=None):
"""Return the value of the ABINIT variable varname, None if not present."""
return self.input.get(varname, default)
@deprecated(message="_set_inpvars is deprecated. Use set_vars")
def _set_inpvars(self, *args, **kwargs):
return self.set_vars(*args, **kwargs)
def set_vars(self, *args, **kwargs):
"""
Set the values of the ABINIT variables in the input file. Return dict with old values.
"""
kwargs.update(dict(*args))
old_values = {vname: self.input.get(vname) for vname in kwargs}
self.input.set_vars(**kwargs)
if kwargs or old_values:
self.history.info("Setting input variables: %s" % str(kwargs))
self.history.info("Old values: %s" % str(old_values))
return old_values
@property
def initial_structure(self):
"""Initial structure of the task."""
return self.input.structure
def make_input(self, with_header=False):
"""Construct the input file of the calculation."""
s = str(self.input)
if with_header: s = str(self) + "\n" + s
return s
def ipath_from_ext(self, ext):
"""
Returns the path of the input file with extension ext.
Use it when the file does not exist yet.
"""
return os.path.join(self.workdir, self.prefix.idata + "_" + ext)
def opath_from_ext(self, ext):
"""
Returns the path of the output file with extension ext.
Use it when the file does not exist yet.
"""
return os.path.join(self.workdir, self.prefix.odata + "_" + ext)
@abc.abstractproperty
def executable(self):
"""
Path to the executable associated to the task (internally stored in self._executable).
"""
def set_executable(self, executable):
"""Set the executable associate to this task."""
self._executable = executable
@property
def process(self):
try:
return self._process
except AttributeError:
# Attach a fake process so that we can poll it.
return FakeProcess()
@property
def is_completed(self):
"""True if the task has been executed."""
return self.status >= self.S_DONE
@property
def can_run(self):
"""The task can run if its status is < S_SUB and all the other dependencies (if any) are done!"""
all_ok = all(stat == self.S_OK for stat in self.deps_status)
return self.status < self.S_SUB and self.status != self.S_LOCKED and all_ok
#@check_spectator
def cancel(self):
"""Cancel the job. Returns 1 if job was cancelled."""
if self.queue_id is None: return 0
if self.status >= self.S_DONE: return 0
exit_status = self.manager.cancel(self.queue_id)
if exit_status != 0:
logger.warning("manager.cancel returned exit_status: %s" % exit_status)
return 0
# Remove output files and reset the status.
self.history.info("Job %s cancelled by user" % self.queue_id)
self.reset()
return 1
def with_fixed_mpi_omp(self, mpi_procs, omp_threads):
"""
Disable autoparal and force execution with `mpi_procs` MPI processes
and `omp_threads` OpenMP threads. Useful for generating benchmarks.
"""
manager = self.manager if hasattr(self, "manager") else self.flow.manager
self.manager = manager.new_with_fixed_mpi_omp(mpi_procs, omp_threads)
#@check_spectator
def _on_done(self):
self.fix_ofiles()
#@check_spectator
def _on_ok(self):
# Fix output file names.
self.fix_ofiles()
# Get results
results = self.on_ok()
self.finalized = True
return results
#@check_spectator
def on_ok(self):
"""
This method is called once the `Task` has reached status S_OK.
Subclasses should provide their own implementation
Returns:
Dictionary that must contain at least the following entries:
returncode:
0 on success.
message:
a string that should provide a human-readable description of what has been performed.
"""
return dict(returncode=0, message="Calling on_all_ok of the base class!")
#@check_spectator
def fix_ofiles(self):
"""
This method is called when the task reaches S_OK.
It changes the extension of particular output files
produced by Abinit so that the 'official' extension
is preserved e.g. out_1WF14 --> out_1WF
"""
filepaths = self.outdir.list_filepaths()
logger.info("in fix_ofiles with filepaths %s" % list(filepaths))
old2new = FilepathFixer().fix_paths(filepaths)
for old, new in old2new.items():
self.history.info("will rename old %s to new %s" % (old, new))
os.rename(old, new)
#@check_spectator
def _restart(self, submit=True):
"""
Called by restart once we have finished preparing the task for restarting.
Return:
True if task has been restarted
"""
self.set_status(self.S_READY, msg="Restarted on %s" % time.asctime())
# Increase the counter.
self.num_restarts += 1
self.history.info("Restarted, num_restarts %d" % self.num_restarts)
# Reset datetimes
self.datetimes.reset()
if submit:
# Remove the lock file
self.start_lockfile.remove()
# Relaunch the task.
fired = self.start()
if not fired: self.history.warning("Restart failed")
else:
fired = False
return fired
#@check_spectator
def restart(self):
"""
Restart the calculation. Subclasses should provide a concrete version that
performs all the actions needed for preparing the restart and then calls self._restart
to restart the task. The default implementation is empty.
Returns:
1 if job was restarted, 0 otherwise.
"""
logger.debug("Calling the **empty** restart method of the base class")
return 0
def poll(self):
"""Check if child process has terminated. Set and return returncode attribute."""
self._returncode = self.process.poll()
if self._returncode is not None:
self.set_status(self.S_DONE, "status set to Done")
return self._returncode
def wait(self):
"""Wait for child process to terminate. Set and return returncode attribute."""
self._returncode = self.process.wait()
try:
self.process.stderr.close()
except:
pass
self.set_status(self.S_DONE, "status set to Done")
return self._returncode
def communicate(self, input=None):
"""
Interact with process: Send data to stdin. Read data from stdout and stderr, until end-of-file is reached.
Wait for process to terminate. The optional input argument should be a string to be sent to the
child process, or None, if no data should be sent to the child.
communicate() returns a tuple (stdoutdata, stderrdata).
"""
stdoutdata, stderrdata = self.process.communicate(input=input)
self._returncode = self.process.returncode
self.set_status(self.S_DONE, "status set to Done")
return stdoutdata, stderrdata
def kill(self):
"""Kill the child."""
self.process.kill()
self.set_status(self.S_ERROR, "status set to Error by task.kill")
self._returncode = self.process.returncode
@property
def returncode(self):
"""
The child return code, set by poll() and wait() (and indirectly by communicate()).
A None value indicates that the process hasn't terminated yet.
A negative value -N indicates that the child was terminated by signal N (Unix only).
"""
try:
return self._returncode
except AttributeError:
return 0
def reset(self):
"""
Reset the task status. Mainly used if we made a silly mistake in the initial
setup of the queue manager and we want to fix it and rerun the task.
Returns:
0 on success, 1 if reset failed.
"""
# Can only reset tasks that are done.
# One should be able to reset 'Submitted' tasks (sometimes, they are not in the queue
# and we want to restart them)
if self.status != self.S_SUB and self.status < self.S_DONE: return 1
# Remove output files otherwise the EventParser will think the job is still running
self.output_file.remove()
self.log_file.remove()
self.stderr_file.remove()
self.start_lockfile.remove()
self.qerr_file.remove()
self.qout_file.remove()
self.set_status(self.S_INIT, msg="Reset on %s" % time.asctime())
self.set_qjob(None)
return 0
@property
@return_none_if_raise(AttributeError)
def queue_id(self):
"""Queue identifier returned by the Queue manager. None if not set"""
return self.qjob.qid
@property
@return_none_if_raise(AttributeError)
def qname(self):
"""Queue name identifier returned by the Queue manager. None if not set"""
return self.qjob.qname
@property
def qjob(self):
return self._qjob
def set_qjob(self, qjob):
"""Set info on queue after submission."""
self._qjob = qjob
@property
def has_queue(self):
"""True if we are submitting jobs via a queue manager."""
return self.manager.qadapter.QTYPE.lower() != "shell"
@property
def num_cores(self):
"""Total number of CPUs used to run the task."""
return self.manager.num_cores
@property
def mpi_procs(self):
"""Number of CPUs used for MPI."""
return self.manager.mpi_procs
@property
def omp_threads(self):
"""Number of CPUs used for OpenMP."""
return self.manager.omp_threads
@property
def mem_per_proc(self):
"""Memory per MPI process."""
return Memory(self.manager.mem_per_proc, "Mb")
@property
def status(self):
"""Gives the status of the task."""
return self._status
def lock(self, source_node):
"""Lock the task, source is the :class:`Node` that applies the lock."""
if self.status != self.S_INIT:
raise ValueError("Trying to lock a task with status %s" % self.status)
self._status = self.S_LOCKED
self.history.info("Locked by node %s", source_node)
def unlock(self, source_node, check_status=True):
"""
Unlock the task, set its status to `S_READY` so that the scheduler can submit it.
source_node is the :class:`Node` that removed the lock
Call task.check_status if check_status is True.
"""
if self.status != self.S_LOCKED:
raise RuntimeError("Trying to unlock a task with status %s" % self.status)
self._status = self.S_READY
if check_status: self.check_status()
self.history.info("Unlocked by %s", source_node)
#@check_spectator
def set_status(self, status, msg):
"""
Set and return the status of the task.
Args:
status: Status object or string representation of the status
msg: string with human-readable message used in the case of errors.
"""
# truncate string if it's long. msg will be logged in the object and we don't want to waste memory.
if len(msg) > 2000:
msg = msg[:2000]
msg += "\n... snip ...\n"
# Locked files must be explicitly unlocked
if self.status == self.S_LOCKED or status == self.S_LOCKED:
err_msg = (
"Locked files must be explicitly unlocked before calling set_status but\n"
"task.status = %s, input status = %s" % (self.status, status))
raise RuntimeError(err_msg)
status = Status.as_status(status)
changed = True
if hasattr(self, "_status"):
changed = (status != self._status)
self._status = status
if status == self.S_RUN:
# Set datetimes.start when the task enters S_RUN
if self.datetimes.start is None:
self.datetimes.start = datetime.datetime.now()
# Add new entry to history only if the status has changed.
if changed:
if status == self.S_SUB:
self.datetimes.submission = datetime.datetime.now()
self.history.info("Submitted with MPI=%s, Omp=%s, Memproc=%.1f [Gb] %s " % (
self.mpi_procs, self.omp_threads, self.mem_per_proc.to("Gb"), msg))
elif status == self.S_OK:
self.history.info("Task completed %s", msg)
elif status == self.S_ABICRITICAL:
self.history.info("Status set to S_ABI_CRITICAL due to: %s", msg)
else:
self.history.info("Status changed to %s. msg: %s", status, msg)
#######################################################
# The section belows contains callbacks that should not
# be executed if we are in spectator_mode
#######################################################
if status == self.S_DONE:
# Execute the callback
self._on_done()
if status == self.S_OK:
# Finalize the task.
if not self.finalized:
self._on_ok()
# here we remove the output files of the task and of its parents.
if self.gc is not None and self.gc.policy == "task":
self.clean_output_files()
self.send_signal(self.S_OK)
return status
def check_status(self):
"""
This function checks the status of the task by inspecting the output and the
error files produced by the application and by the queue manager.
"""
# 1) see it the job is blocked
# 2) see if an error occured at submitting the job the job was submitted, TODO these problems can be solved
# 3) see if there is output
# 4) see if abinit reports problems
# 5) see if both err files exist and are empty
# 6) no output and no err files, the job must still be running
# 7) try to find out what caused the problems
# 8) there is a problem but we did not figure out what ...
# 9) the only way of landing here is if there is a output file but no err files...
# 1) A locked task can only be unlocked by calling set_status explicitly.
# an errored task, should not end up here but just to be sure
black_list = (self.S_LOCKED, self.S_ERROR)
#if self.status in black_list: return self.status
# 2) Check the returncode of the process (the process of submitting the job) first.
# this point type of problem should also be handled by the scheduler error parser
if self.returncode != 0:
# The job was not submitted properly
return self.set_status(self.S_QCRITICAL, msg="return code %s" % self.returncode)
# If we have an abort file produced by Abinit
if self.mpiabort_file.exists:
return self.set_status(self.S_ABICRITICAL, msg="Found ABINIT abort file")
# Analyze the stderr file for Fortran runtime errors.
# getsize is 0 if the file is empty or it does not exist.
err_msg = None
if self.stderr_file.getsize() != 0:
#if self.stderr_file.exists:
err_msg = self.stderr_file.read()
# Analyze the stderr file of the resource manager runtime errors.
# TODO: Why are we looking for errors in queue.qerr?
qerr_info = None
if self.qerr_file.getsize() != 0:
#if self.qerr_file.exists:
qerr_info = self.qerr_file.read()
# Analyze the stdout file of the resource manager (needed for PBS !)
qout_info = None
if self.qout_file.getsize():
#if self.qout_file.exists:
qout_info = self.qout_file.read()
# Start to check ABINIT status if the output file has been created.
#if self.output_file.getsize() != 0:
if self.output_file.exists:
try:
report = self.get_event_report()
except Exception as exc:
msg = "%s exception while parsing event_report:\n%s" % (self, exc)
return self.set_status(self.S_ABICRITICAL, msg=msg)
if report is None:
return self.set_status(self.S_ERROR, msg="got None report!")
if report.run_completed:
# Here we set the correct timing data reported by Abinit
self.datetimes.start = report.start_datetime
self.datetimes.end = report.end_datetime
# Check if the calculation converged.
not_ok = report.filter_types(self.CRITICAL_EVENTS)
if not_ok:
return self.set_status(self.S_UNCONVERGED, msg='status set to unconverged based on abiout')
else:
return self.set_status(self.S_OK, msg="status set to ok based on abiout")
# Calculation still running or errors?
if report.errors:
# Abinit reported problems
logger.debug('Found errors in report')
for error in report.errors:
logger.debug(str(error))
try:
self.abi_errors.append(error)
except AttributeError:
self.abi_errors = [error]
# The job is unfixable due to ABINIT errors
logger.debug("%s: Found Errors or Bugs in ABINIT main output!" % self)
msg = "\n".join(map(repr, report.errors))
return self.set_status(self.S_ABICRITICAL, msg=msg)
# 5)
if self.stderr_file.exists and not err_msg:
if self.qerr_file.exists and not qerr_info:
# there is output and no errors
# The job still seems to be running
return self.set_status(self.S_RUN, msg='there is output and no errors: job still seems to be running')
# 6)
if not self.output_file.exists:
logger.debug("output_file does not exists")
if not self.stderr_file.exists and not self.qerr_file.exists:
# No output at allThe job is still in the queue.
return self.status
# 7) Analyze the files of the resource manager and abinit and execution err (mvs)
if qerr_info or qout_info:
from pymatgen.io.abinit.scheduler_error_parsers import get_parser
scheduler_parser = get_parser(self.manager.qadapter.QTYPE, err_file=self.qerr_file.path,
out_file=self.qout_file.path, run_err_file=self.stderr_file.path)
if scheduler_parser is None:
return self.set_status(self.S_QCRITICAL,
msg="Cannot find scheduler_parser for qtype %s" % self.manager.qadapter.QTYPE)
scheduler_parser.parse()
if scheduler_parser.errors:
self.queue_errors = scheduler_parser.errors
# the queue errors in the task
msg = "scheduler errors found:\n%s" % str(scheduler_parser.errors)
# self.history.critical(msg)
return self.set_status(self.S_QCRITICAL, msg=msg)
# The job is killed or crashed and we know what happened
elif lennone(qerr_info) > 0:
# if only qout_info, we are not necessarily in QCRITICAL state,
# since there will always be info in the qout file
msg = 'found unknown messages in the queue error: %s' % str(qerr_info)
logger.history.info(msg)
print(msg)
# self.num_waiting += 1
# if self.num_waiting > 1000:
rt = self.datetimes.get_runtime().seconds
tl = self.manager.qadapter.timelimit
if rt > tl:
msg += 'set to error : runtime (%s) exceded walltime (%s)' % (rt, tl)
print(msg)
return self.set_status(self.S_ERROR, msg=msg)
# The job may be killed or crashed but we don't know what happened
# It may also be that an innocent message was written to qerr, so we wait for a while
# it is set to QCritical, we will attempt to fix it by running on more resources
# 8) analizing the err files and abinit output did not identify a problem
# but if the files are not empty we do have a problem but no way of solving it:
if lennone(err_msg) > 0:
msg = 'found error message:\n %s' % str(err_msg)
return self.set_status(self.S_QCRITICAL, msg=msg)
# The job is killed or crashed but we don't know what happend
# it is set to QCritical, we will attempt to fix it by running on more resources
# 9) if we still haven't returned there is no indication of any error and the job can only still be running
# but we should actually never land here, or we have delays in the file system ....
# print('the job still seems to be running maybe it is hanging without producing output... ')
# Check time of last modification.
if self.output_file.exists and \
(time.time() - self.output_file.get_stat().st_mtime > self.manager.policy.frozen_timeout):
msg = "Task seems to be frozen, last change more than %s [s] ago" % self.manager.policy.frozen_timeout
return self.set_status(self.S_ERROR, msg=msg)
# Handle weird case in which either run.abo, or run.log have not been produced
#if self.status not in (self.S_INIT, self.S_READY) and (not self.output.file.exists or not self.log_file.exits):
# msg = "Task have been submitted but cannot find the log file or the output file"
# return self.set_status(self.S_ERROR, msg)
return self.set_status(self.S_RUN, msg='final option: nothing seems to be wrong, the job must still be running')
def reduce_memory_demand(self):
"""
Method that can be called by the scheduler to decrease the memory demand of a specific task.
Returns True in case of success, False in case of Failure.
Should be overwritten by specific tasks.
"""
return False
def speed_up(self):
"""
Method that can be called by the flow to decrease the time needed for a specific task.
Returns True in case of success, False in case of Failure
Should be overwritten by specific tasks.
"""
return False
def out_to_in(self, out_file):
"""
Move an output file to the output data directory of the `Task`
and rename the file so that ABINIT will read it as an input data file.
Returns:
The absolute path of the new file in the indata directory.
"""
in_file = os.path.basename(out_file).replace("out", "in", 1)
dest = os.path.join(self.indir.path, in_file)
if os.path.exists(dest) and not os.path.islink(dest):
logger.warning("Will overwrite %s with %s" % (dest, out_file))
os.rename(out_file, dest)
return dest
def inlink_file(self, filepath):
"""
Create a symbolic link to the specified file in the
directory containing the input files of the task.
"""
if not os.path.exists(filepath):
logger.debug("Creating symbolic link to not existent file %s" % filepath)
# Extract the Abinit extension and add the prefix for input files.
root, abiext = abi_splitext(filepath)
infile = "in_" + abiext
infile = self.indir.path_in(infile)
# Link path to dest if dest link does not exist.
# else check that it points to the expected file.
self.history.info("Linking path %s --> %s" % (filepath, infile))
if not os.path.exists(infile):
os.symlink(filepath, infile)
else:
if os.path.realpath(infile) != filepath:
raise self.Error("infile %s does not point to filepath %s" % (infile, filepath))
def make_links(self):
"""
Create symbolic links to the output files produced by the other tasks.
.. warning::
This method should be called only when the calculation is READY because
it uses a heuristic approach to find the file to link.
"""
for dep in self.deps:
filepaths, exts = dep.get_filepaths_and_exts()
for path, ext in zip(filepaths, exts):
logger.info("Need path %s with ext %s" % (path, ext))
dest = self.ipath_from_ext(ext)
if not os.path.exists(path):
# Try netcdf file.
# TODO: this case should be treated in a cleaner way.
path += ".nc"
if os.path.exists(path): dest += ".nc"
if not os.path.exists(path):
raise self.Error("%s: %s is needed by this task but it does not exist" % (self, path))
if path.endswith(".nc") and not dest.endswith(".nc"): # NC --> NC file
dest += ".nc"
# Link path to dest if dest link does not exist.
# else check that it points to the expected file.
logger.debug("Linking path %s --> %s" % (path, dest))
if not os.path.exists(dest):
os.symlink(path, dest)
else:
# check links but only if we haven't performed the restart.
# in this case, indeed we may have replaced the file pointer with the
# previous output file of the present task.
if os.path.realpath(dest) != path and self.num_restarts == 0:
raise self.Error("dest %s does not point to path %s" % (dest, path))
@abc.abstractmethod
def setup(self):
"""Public method called before submitting the task."""
def _setup(self):
"""
This method calls self.setup after having performed additional operations
such as the creation of the symbolic links needed to connect different tasks.
"""
self.make_links()
self.setup()
def get_event_report(self, source="log"):
"""
Analyzes the main logfile of the calculation for possible Errors or Warnings.
If the ABINIT abort file is found, the error found in this file are added to
the output report.
Args:
source: "output" for the main output file,"log" for the log file.
Returns:
:class:`EventReport` instance or None if the source file file does not exist.
"""
# By default, we inspect the main log file.
ofile = {
"output": self.output_file,
"log": self.log_file}[source]
parser = events.EventsParser()
if not ofile.exists:
if not self.mpiabort_file.exists:
return None
else:
# ABINIT abort file without log!
abort_report = parser.parse(self.mpiabort_file.path)
return abort_report
try:
report = parser.parse(ofile.path)
#self._prev_reports[source] = report
# Add events found in the ABI_MPIABORTFILE.
if self.mpiabort_file.exists:
logger.critical("Found ABI_MPIABORTFILE!!!!!")
abort_report = parser.parse(self.mpiabort_file.path)
if len(abort_report) != 1:
logger.critical("Found more than one event in ABI_MPIABORTFILE")
# Weird case: empty abort file, let's skip the part
# below and hope that the log file contains the error message.
#if not len(abort_report): return report
# Add it to the initial report only if it differs
# from the last one found in the main log file.
last_abort_event = abort_report[-1]
if report and last_abort_event != report[-1]:
report.append(last_abort_event)
else:
report.append(last_abort_event)
return report
#except parser.Error as exc:
except Exception as exc:
# Return a report with an error entry with info on the exception.
msg = "%s: Exception while parsing ABINIT events:\n %s" % (ofile, str(exc))
self.set_status(self.S_ABICRITICAL, msg=msg)
return parser.report_exception(ofile.path, exc)
def get_results(self, **kwargs):
"""
Returns :class:`NodeResults` instance.
Subclasses should extend this method (if needed) by adding
specialized code that performs some kind of post-processing.
"""
# Check whether the process completed.
if self.returncode is None:
raise self.Error("return code is None, you should call wait, communitate or poll")
if self.status is None or self.status < self.S_DONE:
raise self.Error("Task is not completed")
return self.Results.from_node(self)
def move(self, dest, is_abspath=False):
"""
Recursively move self.workdir to another location. This is similar to the Unix "mv" command.
The destination path must not already exist. If the destination already exists
but is not a directory, it may be overwritten depending on os.rename() semantics.
Be default, dest is located in the parent directory of self.workdir.
Use is_abspath=True to specify an absolute path.
"""
if not is_abspath:
dest = os.path.join(os.path.dirname(self.workdir), dest)
shutil.move(self.workdir, dest)
def in_files(self):
"""Return all the input data files used."""
return self.indir.list_filepaths()
def out_files(self):
"""Return all the output data files produced."""
return self.outdir.list_filepaths()
def tmp_files(self):
"""Return all the input data files produced."""
return self.tmpdir.list_filepaths()
def path_in_workdir(self, filename):
"""Create the absolute path of filename in the top-level working directory."""
return os.path.join(self.workdir, filename)
def rename(self, src_basename, dest_basename, datadir="outdir"):
"""
Rename a file located in datadir.
src_basename and dest_basename are the basename of the source file
and of the destination file, respectively.
"""
directory = {
"indir": self.indir,
"outdir": self.outdir,
"tmpdir": self.tmpdir,
}[datadir]
src = directory.path_in(src_basename)
dest = directory.path_in(dest_basename)
os.rename(src, dest)
#@check_spectator
def build(self, *args, **kwargs):
"""
Creates the working directory and the input files of the :class:`Task`.
It does not overwrite files if they already exist.
"""
# Create dirs for input, output and tmp data.
self.indir.makedirs()
self.outdir.makedirs()
self.tmpdir.makedirs()
# Write files file and input file.
if not self.files_file.exists:
self.files_file.write(self.filesfile_string)
self.input_file.write(self.make_input())
self.manager.write_jobfile(self)
#@check_spectator
def rmtree(self, exclude_wildcard=""):
"""
Remove all files and directories in the working directory
Args:
exclude_wildcard: Optional string with regular expressions separated by |.
Files matching one of the regular expressions will be preserved.
example: exclude_wildcard="*.nc|*.txt" preserves all the files whose extension is in ["nc", "txt"].
"""
if not exclude_wildcard:
shutil.rmtree(self.workdir)
else:
w = WildCard(exclude_wildcard)
for dirpath, dirnames, filenames in os.walk(self.workdir):
for fname in filenames:
filepath = os.path.join(dirpath, fname)
if not w.match(fname):
os.remove(filepath)
def remove_files(self, *filenames):
"""Remove all the files listed in filenames."""
filenames = list_strings(filenames)
for dirpath, dirnames, fnames in os.walk(self.workdir):
for fname in fnames:
if fname in filenames:
filepath = os.path.join(dirpath, fname)
os.remove(filepath)
def clean_output_files(self, follow_parents=True):
"""
This method is called when the task reaches S_OK. It removes all the output files
produced by the task that are not needed by its children as well as the output files
produced by its parents if no other node needs them.
Args:
follow_parents: If true, the output files of the parents nodes will be removed if possible.
Return:
list with the absolute paths of the files that have been removed.
"""
paths = []
if self.status != self.S_OK:
logger.warning("Calling task.clean_output_files on a task whose status != S_OK")
# Remove all files in tmpdir.
self.tmpdir.clean()
# Find the file extensions that should be preserved since these files are still
# needed by the children who haven't reached S_OK
except_exts = set()
for child in self.get_children():
if child.status == self.S_OK: continue
# Find the position of self in child.deps and add the extensions.
i = [dep.node for dep in child.deps].index(self)
except_exts.update(child.deps[i].exts)
# Remove the files in the outdir of the task but keep except_exts.
exts = self.gc.exts.difference(except_exts)
#print("Will remove its extensions: ", exts)
paths += self.outdir.remove_exts(exts)
if not follow_parents: return paths
# Remove the files in the outdir of my parents if all the possible dependencies have been fulfilled.
for parent in self.get_parents():
# Here we build a dictionary file extension --> list of child nodes requiring this file from parent
# e.g {"WFK": [node1, node2]}
ext2nodes = collections.defaultdict(list)
for child in parent.get_children():
if child.status == child.S_OK: continue
i = [d.node for d in child.deps].index(parent)
for ext in child.deps[i].exts:
ext2nodes[ext].append(child)
# Remove extension only if no node depends on it!
except_exts = [k for k, lst in ext2nodes.items() if lst]
exts = self.gc.exts.difference(except_exts)
#print("%s removes extensions %s from parent node %s" % (self, exts, parent))
paths += parent.outdir.remove_exts(exts)
self.history.info("Removed files: %s" % paths)
return paths
def setup(self):
"""Base class does not provide any hook."""
#@check_spectator
def start(self, **kwargs):
"""
Starts the calculation by performing the following steps:
- build dirs and files
- call the _setup method
- execute the job file by executing/submitting the job script.
Main entry point for the `Launcher`.
============== ==============================================================
kwargs Meaning
============== ==============================================================
autoparal False to skip the autoparal step (default True)
exec_args List of arguments passed to executable.
============== ==============================================================
Returns:
1 if task was started, 0 otherwise.
"""
if self.status >= self.S_SUB:
raise self.Error("Task status: %s" % str(self.status))
if self.start_lockfile.exists:
self.history.warning("Found lock file: %s" % self.start_lockfile.path)
return 0
self.start_lockfile.write("Started on %s" % time.asctime())
self.build()
self._setup()
# Add the variables needed to connect the node.
for d in self.deps:
cvars = d.connecting_vars()
self.history.info("Adding connecting vars %s" % cvars)
self.set_vars(cvars)
# Get (python) data from other nodes
d.apply_getters(self)
# Automatic parallelization
if kwargs.pop("autoparal", True) and hasattr(self, "autoparal_run"):
try:
self.autoparal_run()
except QueueAdapterError as exc:
# If autoparal cannot find a qadapter to run the calculation raises an Exception
self.history.critical(exc)
msg = "Error while trying to run autoparal in task:%s\n%s" % (repr(task), straceback())
cprint(msg, "yellow")
self.set_status(self.S_QCRITICAL, msg=msg)
return 0
except Exception as exc:
# Sometimes autoparal_run fails because Abinit aborts
# at the level of the parser e.g. cannot find the spacegroup
# due to some numerical noise in the structure.
# In this case we call fix_abicritical and then we try to run autoparal again.
self.history.critical("First call to autoparal failed with `%s`. Will try fix_abicritical" % exc)
msg = "autoparal_fake_run raised:\n%s" % straceback()
logger.critical(msg)
fixed = self.fix_abicritical()
if not fixed:
self.set_status(self.S_ABICRITICAL, msg="fix_abicritical could not solve the problem")
return 0
try:
self.autoparal_run()
self.history.info("Second call to autoparal succeeded!")
#cprint("Second call to autoparal succeeded!", "green")
except Exception as exc:
self.history.critical("Second call to autoparal failed with %s. Cannot recover!", exc)
msg = "Tried autoparal again but got:\n%s" % straceback()
cprint(msg, "red")
self.set_status(self.S_ABICRITICAL, msg=msg)
return 0
# Start the calculation in a subprocess and return.
self._process = self.manager.launch(self, **kwargs)
return 1
def start_and_wait(self, *args, **kwargs):
"""
Helper method to start the task and wait for completetion.
Mainly used when we are submitting the task via the shell without passing through a queue manager.
"""
self.start(*args, **kwargs)
retcode = self.wait()
return retcode
class DecreaseDemandsError(Exception):
"""
exception to be raised by a task if the request to decrease some demand, load or memory, could not be performed
"""
class AbinitTask(Task):
"""
Base class defining an ABINIT calculation
"""
Results = TaskResults
@classmethod
def from_input(cls, input, workdir=None, manager=None):
"""
Create an instance of `AbinitTask` from an ABINIT input.
Args:
ainput: `AbinitInput` object.
workdir: Path to the working directory.
manager: :class:`TaskManager` object.
"""
return cls(input, workdir=workdir, manager=manager)
@classmethod
def temp_shell_task(cls, inp, workdir=None, manager=None):
"""
Build a Task with a temporary workdir. The task is executed via the shell with 1 MPI proc.
Mainly used for invoking Abinit to get important parameters needed to prepare the real task.
"""
# Build a simple manager to run the job in a shell subprocess
import tempfile
workdir = tempfile.mkdtemp() if workdir is None else workdir
if manager is None: manager = TaskManager.from_user_config()
# Construct the task and run it
task = cls.from_input(inp, workdir=workdir, manager=manager.to_shell_manager(mpi_procs=1))
task.set_name('temp_shell_task')
return task
def setup(self):
"""
Abinit has the very *bad* habit of changing the file extension by appending the characters in [A,B ..., Z]
to the output file, and this breaks a lot of code that relies of the use of a unique file extension.
Here we fix this issue by renaming run.abo to run.abo_[number] if the output file "run.abo" already
exists. A few lines of code in python, a lot of problems if you try to implement this trick in Fortran90.
"""
def rename_file(afile):
"""Helper function to rename :class:`File` objects. Return string for logging purpose."""
# Find the index of the last file (if any).
# TODO: Maybe it's better to use run.abo --> run(1).abo
fnames = [f for f in os.listdir(self.workdir) if f.startswith(afile.basename)]
nums = [int(f) for f in [f.split("_")[-1] for f in fnames] if f.isdigit()]
last = max(nums) if nums else 0
new_path = afile.path + "_" + str(last+1)
os.rename(afile.path, new_path)
return "Will rename %s to %s" % (afile.path, new_path)
logs = []
if self.output_file.exists: logs.append(rename_file(self.output_file))
if self.log_file.exists: logs.append(rename_file(self.log_file))
if logs:
self.history.info("\n".join(logs))
@property
def executable(self):
"""Path to the executable required for running the Task."""
try:
return self._executable
except AttributeError:
return "abinit"
@property
def pseudos(self):
"""List of pseudos used in the calculation."""
return self.input.pseudos
@property
def isnc(self):
"""True if norm-conserving calculation."""
return self.input.isnc
@property
def ispaw(self):
"""True if PAW calculation"""
return self.input.ispaw
@property
def filesfile_string(self):
"""String with the list of files and prefixes needed to execute ABINIT."""
lines = []
app = lines.append
pj = os.path.join
app(self.input_file.path) # Path to the input file
app(self.output_file.path) # Path to the output file
app(pj(self.workdir, self.prefix.idata)) # Prefix for input data
app(pj(self.workdir, self.prefix.odata)) # Prefix for output data
app(pj(self.workdir, self.prefix.tdata)) # Prefix for temporary data
# Paths to the pseudopotential files.
# Note that here the pseudos **must** be sorted according to znucl.
# Here we reorder the pseudos if the order is wrong.
ord_pseudos = []
znucl = [specie.number for specie in
self.input.structure.types_of_specie]
for z in znucl:
for p in self.pseudos:
if p.Z == z:
ord_pseudos.append(p)
break
else:
raise ValueError("Cannot find pseudo with znucl %s in pseudos:\n%s" % (z, self.pseudos))
for pseudo in ord_pseudos:
app(pseudo.path)
return "\n".join(lines)
def set_pconfs(self, pconfs):
"""Set the list of autoparal configurations."""
self._pconfs = pconfs
@property
def pconfs(self):
"""List of autoparal configurations."""
try:
return self._pconfs
except AttributeError:
return None
def uses_paral_kgb(self, value=1):
"""True if the task is a GS Task and uses paral_kgb with the given value."""
paral_kgb = self.get_inpvar("paral_kgb", 0)
# paral_kgb is used only in the GS part.
return paral_kgb == value and isinstance(self, GsTask)
def _change_structure(self, new_structure):
"""Change the input structure."""
# Compare new and old structure for logging purpose.
# TODO: Write method of structure to compare self and other and return a dictionary
old_structure = self.input.structure
old_lattice = old_structure.lattice
abc_diff = np.array(new_structure.lattice.abc) - np.array(old_lattice.abc)
angles_diff = np.array(new_structure.lattice.angles) - np.array(old_lattice.angles)
cart_diff = new_structure.cart_coords - old_structure.cart_coords
displs = np.array([np.sqrt(np.dot(v, v)) for v in cart_diff])
recs, tol_angle, tol_length = [], 10**-2, 10**-5
if np.any(np.abs(angles_diff) > tol_angle):
recs.append("new_agles - old_angles = %s" % angles_diff)
if np.any(np.abs(abc_diff) > tol_length):
recs.append("new_abc - old_abc = %s" % abc_diff)
if np.any(np.abs(displs) > tol_length):
min_pos, max_pos = displs.argmin(), displs.argmax()
recs.append("Mean displ: %.2E, Max_displ: %.2E (site %d), min_displ: %.2E (site %d)" %
(displs.mean(), displs[max_pos], max_pos, displs[min_pos], min_pos))
self.history.info("Changing structure (only significant diffs are shown):")
if not recs:
self.history.info("Input and output structure seems to be equal within the given tolerances")
else:
for rec in recs:
self.history.info(rec)
self.input.set_structure(new_structure)
#assert self.input.structure == new_structure
def autoparal_run(self):
"""
Find an optimal set of parameters for the execution of the task
This method can change the ABINIT input variables and/or the
submission parameters e.g. the number of CPUs for MPI and OpenMp.
Set:
self.pconfs where pconfs is a :class:`ParalHints` object with the configuration reported by
autoparal and optimal is the optimal configuration selected.
Returns 0 if success
"""
policy = self.manager.policy
if policy.autoparal == 0: # or policy.max_ncpus in [None, 1]:
logger.info("Nothing to do in autoparal, returning (None, None)")
return 0
if policy.autoparal != 1:
raise NotImplementedError("autoparal != 1")
############################################################################
# Run ABINIT in sequential to get the possible configurations with max_ncpus
############################################################################
# Set the variables for automatic parallelization
# Will get all the possible configurations up to max_ncpus
# Return immediately if max_ncpus == 1
max_ncpus = self.manager.max_cores
if max_ncpus == 1: return 0
autoparal_vars = dict(autoparal=policy.autoparal, max_ncpus=max_ncpus)
self.set_vars(autoparal_vars)
# Run the job in a shell subprocess with mpi_procs = 1
# we don't want to make a request to the queue manager for this simple job!
# Return code is always != 0
process = self.manager.to_shell_manager(mpi_procs=1).launch(self)
self.history.pop()
retcode = process.wait()
# To avoid: ResourceWarning: unclosed file <_io.BufferedReader name=87> in py3k
process.stderr.close()
# Remove the variables added for the automatic parallelization
self.input.remove_vars(list(autoparal_vars.keys()))
##############################################################
# Parse the autoparal configurations from the main output file
##############################################################
parser = ParalHintsParser()
try:
pconfs = parser.parse(self.output_file.path)
except parser.Error:
logger.critical("Error while parsing Autoparal section:\n%s" % straceback())
return 2
######################################################
# Select the optimal configuration according to policy
######################################################
optconf = self.find_optconf(pconfs)
####################################################
# Change the input file and/or the submission script
####################################################
self.set_vars(optconf.vars)
# Write autoparal configurations to JSON file.
d = pconfs.as_dict()
d["optimal_conf"] = optconf
json_pretty_dump(d, os.path.join(self.workdir, "autoparal.json"))
##############
# Finalization
##############
# Reset the status, remove garbage files ...
self.set_status(self.S_INIT, msg='finished autoparallel run')
# Remove the output file since Abinit likes to create new files
# with extension .outA, .outB if the file already exists.
os.remove(self.output_file.path)
os.remove(self.log_file.path)
os.remove(self.stderr_file.path)
return 0
def find_optconf(self, pconfs):
"""Find the optimal Parallel configuration."""
# Save pconfs for future reference.
self.set_pconfs(pconfs)
# Select the partition on which we'll be running and set MPI/OMP cores.
optconf = self.manager.select_qadapter(pconfs)
return optconf
def select_files(self, what="o"):
"""
Helper function used to select the files of a task.
Args:
what: string with the list of characters selecting the file type
Possible choices:
i ==> input_file,
o ==> output_file,
f ==> files_file,
j ==> job_file,
l ==> log_file,
e ==> stderr_file,
q ==> qout_file,
all ==> all files.
"""
choices = collections.OrderedDict([
("i", self.input_file),
("o", self.output_file),
("f", self.files_file),
("j", self.job_file),
("l", self.log_file),
("e", self.stderr_file),
("q", self.qout_file),
])
if what == "all":
return [getattr(v, "path") for v in choices.values()]
selected = []
for c in what:
try:
selected.append(getattr(choices[c], "path"))
except KeyError:
logger.warning("Wrong keyword %s" % c)
return selected
def restart(self):
"""
general restart used when scheduler problems have been taken care of
"""
return self._restart()
#@check_spectator
def reset_from_scratch(self):
"""
Restart from scratch, this is to be used if a job is restarted with more resources after a crash
Move output files produced in workdir to _reset otherwise check_status continues
to see the task as crashed even if the job did not run
"""
# Create reset directory if not already done.
reset_dir = os.path.join(self.workdir, "_reset")
reset_file = os.path.join(reset_dir, "_counter")
if not os.path.exists(reset_dir):
os.mkdir(reset_dir)
num_reset = 1
else:
with open(reset_file, "rt") as fh:
num_reset = 1 + int(fh.read())
# Move files to reset and append digit with reset index.
def move_file(f):
if not f.exists: return
try:
f.move(os.path.join(reset_dir, f.basename + "_" + str(num_reset)))
except OSError as exc:
logger.warning("Couldn't move file {}. exc: {}".format(f, str(exc)))
for fname in ("output_file", "log_file", "stderr_file", "qout_file", "qerr_file"):
move_file(getattr(self, fname))
with open(reset_file, "wt") as fh:
fh.write(str(num_reset))
self.start_lockfile.remove()
# Reset datetimes
self.datetimes.reset()
return self._restart(submit=False)
#@check_spectator
def fix_abicritical(self):
"""
method to fix crashes/error caused by abinit
Returns:
1 if task has been fixed else 0.
"""
event_handlers = self.event_handlers
if not event_handlers:
self.set_status(status=self.S_ERROR, msg='Empty list of event handlers. Cannot fix abi_critical errors')
return 0
count, done = 0, len(event_handlers) * [0]
report = self.get_event_report()
if report is None:
self.set_status(status=self.S_ERROR, msg='get_event_report returned None')
return 0
# Note we have loop over all possible events (slow, I know)
# because we can have handlers for Error, Bug or Warning
# (ideally only for CriticalWarnings but this is not done yet)
for event in report:
for i, handler in enumerate(self.event_handlers):
if handler.can_handle(event) and not done[i]:
logger.info("handler %s will try to fix event %s" % (handler, event))
try:
d = handler.handle_task_event(self, event)
if d:
done[i] += 1
count += 1
except Exception as exc:
logger.critical(str(exc))
if count:
self.reset_from_scratch()
return 1
self.set_status(status=self.S_ERROR, msg='We encountered AbiCritical events that could not be fixed')
return 0
#@check_spectator
def fix_queue_critical(self):
"""
This function tries to fix critical events originating from the queue submission system.
General strategy, first try to increase resources in order to fix the problem,
if this is not possible, call a task specific method to attempt to decrease the demands.
Returns:
1 if task has been fixed else 0.
"""
from pymatgen.io.abinit.scheduler_error_parsers import NodeFailureError, MemoryCancelError, TimeCancelError
#assert isinstance(self.manager, TaskManager)
self.history.info('fixing queue critical')
ret = "task.fix_queue_critical: "
if not self.queue_errors:
# TODO
# paral_kgb = 1 leads to nasty sigegv that are seen as Qcritical errors!
# Try to fallback to the conjugate gradient.
#if self.uses_paral_kgb(1):
# logger.critical("QCRITICAL with PARAL_KGB==1. Will try CG!")
# self.set_vars(paral_kgb=0)
# self.reset_from_scratch()
# return
# queue error but no errors detected, try to solve by increasing ncpus if the task scales
# if resources are at maximum the task is definitively turned to errored
if self.mem_scales or self.load_scales:
try:
self.manager.increase_resources() # acts either on the policy or on the qadapter
self.reset_from_scratch()
ret += "increased resources"
return ret
except ManagerIncreaseError:
self.set_status(self.S_ERROR, msg='unknown queue error, could not increase resources any further')
raise FixQueueCriticalError
else:
self.set_status(self.S_ERROR, msg='unknown queue error, no options left')
raise FixQueueCriticalError
else:
print("Fix_qcritical: received %d queue_errors" % len(self.queue_errors))
print("type_list: %s" % list(type(qe) for qe in self.queue_errors))
for error in self.queue_errors:
self.history.info('fixing: %s' % str(error))
ret += str(error)
if isinstance(error, NodeFailureError):
# if the problematic node is known, exclude it
if error.nodes is not None:
try:
self.manager.exclude_nodes(error.nodes)
self.reset_from_scratch()
self.set_status(self.S_READY, msg='excluding nodes')
except:
raise FixQueueCriticalError
else:
self.set_status(self.S_ERROR, msg='Node error but no node identified.')
raise FixQueueCriticalError
elif isinstance(error, MemoryCancelError):
# ask the qadapter to provide more resources, i.e. more cpu's so more total memory if the code
# scales this should fix the memeory problem
# increase both max and min ncpu of the autoparalel and rerun autoparalel
if self.mem_scales:
try:
self.manager.increase_ncpus()
self.reset_from_scratch()
self.set_status(self.S_READY, msg='increased ncps to solve memory problem')
return
except ManagerIncreaseError:
self.history.warning('increasing ncpus failed')
# if the max is reached, try to increase the memory per cpu:
try:
self.manager.increase_mem()
self.reset_from_scratch()
self.set_status(self.S_READY, msg='increased mem')
return
except ManagerIncreaseError:
self.history.warning('increasing mem failed')
# if this failed ask the task to provide a method to reduce the memory demand
try:
self.reduce_memory_demand()
self.reset_from_scratch()
self.set_status(self.S_READY, msg='decreased mem demand')
return
except DecreaseDemandsError:
self.history.warning('decreasing demands failed')
msg = ('Memory error detected but the memory could not be increased neigther could the\n'
'memory demand be decreased. Unrecoverable error.')
self.set_status(self.S_ERROR, msg)
raise FixQueueCriticalError
elif isinstance(error, TimeCancelError):
# ask the qadapter to provide more time
print('trying to increase time')
try:
self.manager.increase_time()
self.reset_from_scratch()
self.set_status(self.S_READY, msg='increased wall time')
return
except ManagerIncreaseError:
self.history.warning('increasing the waltime failed')
# if this fails ask the qadapter to increase the number of cpus
if self.load_scales:
try:
self.manager.increase_ncpus()
self.reset_from_scratch()
self.set_status(self.S_READY, msg='increased number of cpus')
return
except ManagerIncreaseError:
self.history.warning('increase ncpus to speed up the calculation to stay in the walltime failed')
# if this failed ask the task to provide a method to speed up the task
try:
self.speed_up()
self.reset_from_scratch()
self.set_status(self.S_READY, msg='task speedup')
return
except DecreaseDemandsError:
self.history.warning('decreasing demands failed')
msg = ('Time cancel error detected but the time could not be increased neither could\n'
'the time demand be decreased by speedup of increasing the number of cpus.\n'
'Unrecoverable error.')
self.set_status(self.S_ERROR, msg)
else:
msg = 'No solution provided for error %s. Unrecoverable error.' % error.name
self.set_status(self.S_ERROR, msg)
return 0
def parse_timing(self):
"""
Parse the timer data in the main output file of Abinit.
Requires timopt /= 0 in the input file (usually timopt = -1)
Return: :class:`AbinitTimerParser` instance, None if error.
"""
from .abitimer import AbinitTimerParser
parser = AbinitTimerParser()
read_ok = parser.parse(self.output_file.path)
if read_ok:
return parser
return None
class ProduceHist(object):
"""
Mixin class for an :class:`AbinitTask` producing a HIST file.
Provide the method `open_hist` that reads and return a HIST file.
"""
@property
def hist_path(self):
"""Absolute path of the HIST file. Empty string if file is not present."""
# Lazy property to avoid multiple calls to has_abiext.
try:
return self._hist_path
except AttributeError:
path = self.outdir.has_abiext("HIST")
if path: self._hist_path = path
return path
def open_hist(self):
"""
Open the HIST file located in the in self.outdir.
Returns :class:`HistFile` object, None if file could not be found or file is not readable.
"""
if not self.hist_path:
if self.status == self.S_OK:
logger.critical("%s reached S_OK but didn't produce a HIST file in %s" % (self, self.outdir))
return None
# Open the HIST file
from abipy.dynamics.hist import HistFile
try:
return HistFile(self.hist_path)
except Exception as exc:
logger.critical("Exception while reading HIST file at %s:\n%s" % (self.hist_path, str(exc)))
return None
class GsTask(AbinitTask):
"""
Base class for ground-state tasks. A ground state task produces a GSR file
Provides the method `open_gsr` that reads and returns a GSR file.
"""
@property
def gsr_path(self):
"""Absolute path of the GSR file. Empty string if file is not present."""
# Lazy property to avoid multiple calls to has_abiext.
try:
return self._gsr_path
except AttributeError:
path = self.outdir.has_abiext("GSR")
if path: self._gsr_path = path
return path
def open_gsr(self):
"""
Open the GSR file located in the in self.outdir.
Returns :class:`GsrFile` object, None if file could not be found or file is not readable.
"""
gsr_path = self.gsr_path
if not gsr_path:
if self.status == self.S_OK:
logger.critical("%s reached S_OK but didn't produce a GSR file in %s" % (self, self.outdir))
return None
# Open the GSR file.
from abipy.electrons.gsr import GsrFile
try:
return GsrFile(gsr_path)
except Exception as exc:
logger.critical("Exception while reading GSR file at %s:\n%s" % (gsr_path, str(exc)))
return None
class ScfTask(GsTask):
"""
Self-consistent ground-state calculations.
Provide support for in-place restart via (WFK|DEN) files
"""
CRITICAL_EVENTS = [
events.ScfConvergenceWarning,
]
color_rgb = np.array((255, 0, 0)) / 255
def restart(self):
"""SCF calculations can be restarted if we have either the WFK file or the DEN file."""
# Prefer WFK over DEN files since we can reuse the wavefunctions.
for ext in ("WFK", "DEN"):
restart_file = self.outdir.has_abiext(ext)
irdvars = irdvars_for_ext(ext)
if restart_file: break
else:
raise self.RestartError("%s: Cannot find WFK or DEN file to restart from." % self)
# Move out --> in.
self.out_to_in(restart_file)
# Add the appropriate variable for restarting.
self.set_vars(irdvars)
# Now we can resubmit the job.
self.history.info("Will restart from %s", restart_file)
return self._restart()
def inspect(self, **kwargs):
"""
Plot the SCF cycle results with matplotlib.
Returns
`matplotlib` figure, None if some error occurred.
"""
try:
scf_cycle = abiinspect.GroundStateScfCycle.from_file(self.output_file.path)
except IOError:
return None
if scf_cycle is not None:
if "title" not in kwargs: kwargs["title"] = str(self)
return scf_cycle.plot(**kwargs)
return None
def get_results(self, **kwargs):
results = super(ScfTask, self).get_results(**kwargs)
# Open the GSR file and add its data to results.out
with self.open_gsr() as gsr:
results["out"].update(gsr.as_dict())
# Add files to GridFS
results.register_gridfs_files(GSR=gsr.filepath)
return results
class CollinearThenNonCollinearScfTask(ScfTask):
"""
A specialized ScfTaks that performs an initial SCF run with nsppol = 2.
The spin polarized WFK file is then used to start a non-collinear SCF run (nspinor == 2)
initialized from the previous WFK file.
"""
def __init__(self, input, workdir=None, manager=None, deps=None):
super(CollinearThenNonCollinearScfTask, self).__init__(input, workdir=workdir, manager=manager, deps=deps)
# Enforce nspinor = 1, nsppol = 2 and prtwf = 1.
self._input = self.input.deepcopy()
self.input.set_spin_mode("polarized")
self.input.set_vars(prtwf=1)
self.collinear_done = False
def _on_ok(self):
results = super(CollinearThenNonCollinearScfTask, self)._on_ok()
if not self.collinear_done:
self.input.set_spin_mode("spinor")
self.collinear_done = True
self.finalized = False
self.restart()
return results
class NscfTask(GsTask):
"""
Non-Self-consistent GS calculation. Provide in-place restart via WFK files
"""
CRITICAL_EVENTS = [
events.NscfConvergenceWarning,
]
color_rgb = np.array((255, 122, 122)) / 255
def restart(self):
"""NSCF calculations can be restarted only if we have the WFK file."""
ext = "WFK"
restart_file = self.outdir.has_abiext(ext)
if not restart_file:
raise self.RestartError("%s: Cannot find the WFK file to restart from." % self)
# Move out --> in.
self.out_to_in(restart_file)
# Add the appropriate variable for restarting.
irdvars = irdvars_for_ext(ext)
self.set_vars(irdvars)
# Now we can resubmit the job.
self.history.info("Will restart from %s", restart_file)
return self._restart()
def get_results(self, **kwargs):
results = super(NscfTask, self).get_results(**kwargs)
# Read the GSR file.
with self.open_gsr() as gsr:
results["out"].update(gsr.as_dict())
# Add files to GridFS
results.register_gridfs_files(GSR=gsr.filepath)
return results
class RelaxTask(GsTask, ProduceHist):
"""
Task for structural optimizations.
"""
# TODO possible ScfConvergenceWarning?
CRITICAL_EVENTS = [
events.RelaxConvergenceWarning,
]
color_rgb = np.array((255, 61, 255)) / 255
def get_final_structure(self):
"""Read the final structure from the GSR file."""
try:
with self.open_gsr() as gsr:
return gsr.structure
except AttributeError:
raise RuntimeError("Cannot find the GSR file with the final structure to restart from.")
def restart(self):
"""
Restart the structural relaxation.
Structure relaxations can be restarted only if we have the WFK file or the DEN or the GSR file.
from which we can read the last structure (mandatory) and the wavefunctions (not mandatory but useful).
Prefer WFK over other files since we can reuse the wavefunctions.
.. note::
The problem in the present approach is that some parameters in the input
are computed from the initial structure and may not be consistent with
the modification of the structure done during the structure relaxation.
"""
restart_file = None
# Try to restart from the WFK file if possible.
# FIXME: This part has been disabled because WFK=IO is a mess if paral_kgb == 1
# This is also the reason why I wrote my own MPI-IO code for the GW part!
wfk_file = self.outdir.has_abiext("WFK")
if False and wfk_file:
irdvars = irdvars_for_ext("WFK")
restart_file = self.out_to_in(wfk_file)
# Fallback to DEN file. Note that here we look for out_DEN instead of out_TIM?_DEN
# This happens when the previous run completed and task.on_done has been performed.
# ********************************************************************************
# Note that it's possible to have an undected error if we have multiple restarts
# and the last relax died badly. In this case indeed out_DEN is the file produced
# by the last run that has executed on_done.
# ********************************************************************************
if restart_file is None:
for ext in ("", ".nc"):
out_den = self.outdir.path_in("out_DEN" + ext)
if os.path.exists(out_den):
irdvars = irdvars_for_ext("DEN")
restart_file = self.out_to_in(out_den)
break
if restart_file is None:
# Try to restart from the last TIM?_DEN file.
# This should happen if the previous run didn't complete in clean way.
# Find the last TIM?_DEN file.
last_timden = self.outdir.find_last_timden_file()
if last_timden is not None:
if last_timden.path.endswith(".nc"):
ofile = self.outdir.path_in("out_DEN.nc")
else:
ofile = self.outdir.path_in("out_DEN")
os.rename(last_timden.path, ofile)
restart_file = self.out_to_in(ofile)
irdvars = irdvars_for_ext("DEN")
if restart_file is None:
# Don't raise RestartError as we can still change the structure.
self.history.warning("Cannot find the WFK|DEN|TIM?_DEN file to restart from.")
else:
# Add the appropriate variable for restarting.
self.set_vars(irdvars)
self.history.info("Will restart from %s", restart_file)
# FIXME Here we should read the HIST file but restartxf if broken!
#self.set_vars({"restartxf": -1})
# Read the relaxed structure from the GSR file and change the input.
self._change_structure(self.get_final_structure())
# Now we can resubmit the job.
return self._restart()
def inspect(self, **kwargs):
"""
Plot the evolution of the structural relaxation with matplotlib.
Args:
what: Either "hist" or "scf". The first option (default) extracts data
from the HIST file and plot the evolution of the structural
parameters, forces, pressures and energies.
The second option, extracts data from the main output file and
plot the evolution of the SCF cycles (etotal, residuals, etc).
Returns:
`matplotlib` figure, None if some error occurred.
"""
what = kwargs.pop("what", "hist")
if what == "hist":
# Read the hist file to get access to the structure.
with self.open_hist() as hist:
return hist.plot(**kwargs) if hist else None
elif what == "scf":
# Get info on the different SCF cycles
relaxation = abiinspect.Relaxation.from_file(self.output_file.path)
if "title" not in kwargs: kwargs["title"] = str(self)
return relaxation.plot(**kwargs) if relaxation is not None else None
else:
raise ValueError("Wrong value for what %s" % what)
def get_results(self, **kwargs):
results = super(RelaxTask, self).get_results(**kwargs)
# Open the GSR file and add its data to results.out
with self.open_gsr() as gsr:
results["out"].update(gsr.as_dict())
# Add files to GridFS
results.register_gridfs_files(GSR=gsr.filepath)
return results
def reduce_dilatmx(self, target=1.01):
actual_dilatmx = self.get_inpvar('dilatmx', 1.)
new_dilatmx = actual_dilatmx - min((actual_dilatmx-target), actual_dilatmx*0.05)
self.set_vars(dilatmx=new_dilatmx)
def fix_ofiles(self):
"""
Note that ABINIT produces lots of out_TIM1_DEN files for each step.
Here we list all TIM*_DEN files, we select the last one and we rename it in out_DEN
This change is needed so that we can specify dependencies with the syntax {node: "DEN"}
without having to know the number of iterations needed to converge the run in node!
"""
super(RelaxTask, self).fix_ofiles()
# Find the last TIM?_DEN file.
last_timden = self.outdir.find_last_timden_file()
if last_timden is None:
logger.warning("Cannot find TIM?_DEN files")
return
# Rename last TIMDEN with out_DEN.
ofile = self.outdir.path_in("out_DEN")
if last_timden.path.endswith(".nc"): ofile += ".nc"
self.history.info("Renaming last_denfile %s --> %s" % (last_timden.path, ofile))
os.rename(last_timden.path, ofile)
class DfptTask(AbinitTask):
"""
Base class for DFPT tasks (Phonons, ...)
Mainly used to implement methods that are common to DFPT calculations with Abinit.
Provide the method `open_ddb` that reads and return a Ddb file.
.. warning::
This class should not be instantiated directly.
"""
@property
def ddb_path(self):
"""Absolute path of the DDB file. Empty string if file is not present."""
# Lazy property to avoid multiple calls to has_abiext.
try:
return self._ddb_path
except AttributeError:
path = self.outdir.has_abiext("DDB")
if path: self._ddb_path = path
return path
def open_ddb(self):
"""
Open the DDB file located in the in self.outdir.
Returns :class:`DdbFile` object, None if file could not be found or file is not readable.
"""
ddb_path = self.ddb_path
if not ddb_path:
if self.status == self.S_OK:
logger.critical("%s reached S_OK but didn't produce a DDB file in %s" % (self, self.outdir))
return None
# Open the DDB file.
from abipy.dfpt.ddb import DdbFile
try:
return DdbFile(ddb_path)
except Exception as exc:
logger.critical("Exception while reading DDB file at %s:\n%s" % (ddb_path, str(exc)))
return None
class DdeTask(DfptTask):
"""Task for DDE calculations."""
def make_links(self):
"""Replace the default behaviour of make_links"""
for dep in self.deps:
if dep.exts == ["DDK"]:
ddk_task = dep.node
out_ddk = ddk_task.outdir.has_abiext("DDK")
if not out_ddk:
raise RuntimeError("%s didn't produce the DDK file" % ddk_task)
# Get (fortran) idir and costruct the name of the 1WF expected by Abinit
rfdir = list(ddk_task.input["rfdir"])
if rfdir.count(1) != 1:
raise RuntimeError("Only one direction should be specifned in rfdir but rfdir = %s" % rfdir)
idir = rfdir.index(1) + 1
ddk_case = idir + 3 * len(ddk_task.input.structure)
infile = self.indir.path_in("in_1WF%d" % ddk_case)
os.symlink(out_ddk, infile)
elif dep.exts == ["WFK"]:
gs_task = dep.node
out_wfk = gs_task.outdir.has_abiext("WFK")
if not out_wfk:
raise RuntimeError("%s didn't produce the WFK file" % gs_task)
if not os.path.exists(self.indir.path_in("in_WFK")):
os.symlink(out_wfk, self.indir.path_in("in_WFK"))
else:
raise ValueError("Don't know how to handle extension: %s" % dep.exts)
def get_results(self, **kwargs):
results = super(DdeTask, self).get_results(**kwargs)
return results.register_gridfs_file(DDB=(self.outdir.has_abiext("DDE"), "t"))
class DteTask(DfptTask):
"""Task for DTE calculations."""
# @check_spectator
def start(self, **kwargs):
kwargs['autoparal'] = False
return super(DteTask, self).start(**kwargs)
def make_links(self):
"""Replace the default behaviour of make_links"""
for dep in self.deps:
for d in dep.exts:
if d == "DDK":
ddk_task = dep.node
out_ddk = ddk_task.outdir.has_abiext("DDK")
if not out_ddk:
raise RuntimeError("%s didn't produce the DDK file" % ddk_task)
# Get (fortran) idir and costruct the name of the 1WF expected by Abinit
rfdir = list(ddk_task.input["rfdir"])
if rfdir.count(1) != 1:
raise RuntimeError("Only one direction should be specifned in rfdir but rfdir = %s" % rfdir)
idir = rfdir.index(1) + 1
ddk_case = idir + 3 * len(ddk_task.input.structure)
infile = self.indir.path_in("in_1WF%d" % ddk_case)
os.symlink(out_ddk, infile)
elif d == "WFK":
gs_task = dep.node
out_wfk = gs_task.outdir.has_abiext("WFK")
if not out_wfk:
raise RuntimeError("%s didn't produce the WFK file" % gs_task)
if not os.path.exists(self.indir.path_in("in_WFK")):
os.symlink(out_wfk, self.indir.path_in("in_WFK"))
elif d == "DEN":
gs_task = dep.node
out_wfk = gs_task.outdir.has_abiext("DEN")
if not out_wfk:
raise RuntimeError("%s didn't produce the WFK file" % gs_task)
if not os.path.exists(self.indir.path_in("in_DEN")):
os.symlink(out_wfk, self.indir.path_in("in_DEN"))
elif d == "1WF":
gs_task = dep.node
out_wfk = gs_task.outdir.has_abiext("1WF")
if not out_wfk:
raise RuntimeError("%s didn't produce the 1WF file" % gs_task)
dest = self.indir.path_in("in_" + out_wfk.split("_")[-1])
if not os.path.exists(dest):
os.symlink(out_wfk, dest)
elif d == "1DEN":
gs_task = dep.node
out_wfk = gs_task.outdir.has_abiext("DEN")
if not out_wfk:
raise RuntimeError("%s didn't produce the 1WF file" % gs_task)
dest = self.indir.path_in("in_" + out_wfk.split("_")[-1])
if not os.path.exists(dest):
os.symlink(out_wfk, dest)
else:
raise ValueError("Don't know how to handle extension: %s" % dep.exts)
def get_results(self, **kwargs):
results = super(DdeTask, self).get_results(**kwargs)
return results.register_gridfs_file(DDB=(self.outdir.has_abiext("DDE"), "t"))
class DdkTask(DfptTask):
"""Task for DDK calculations."""
color_rgb = np.array((61, 158, 255)) / 255
#@check_spectator
def _on_ok(self):
super(DdkTask, self)._on_ok()
# Copy instead of removing, otherwise optic tests fail
# Fixing this problem requires a rationalization of file extensions.
#if self.outdir.rename_abiext('1WF', 'DDK') > 0:
#if self.outdir.copy_abiext('1WF', 'DDK') > 0:
self.outdir.symlink_abiext('1WF', 'DDK')
def get_results(self, **kwargs):
results = super(DdkTask, self).get_results(**kwargs)
return results.register_gridfs_file(DDK=(self.outdir.has_abiext("DDK"), "t"))
class BecTask(DfptTask):
"""
Task for the calculation of Born effective charges.
bec_deps = {ddk_task: "DDK" for ddk_task in ddk_tasks}
bec_deps.update({scf_task: "WFK"})
"""
color_rgb = np.array((122, 122, 255)) / 255
def make_links(self):
"""Replace the default behaviour of make_links"""
#print("In BEC make_links")
for dep in self.deps:
if dep.exts == ["DDK"]:
ddk_task = dep.node
out_ddk = ddk_task.outdir.has_abiext("DDK")
if not out_ddk:
raise RuntimeError("%s didn't produce the DDK file" % ddk_task)
# Get (fortran) idir and costruct the name of the 1WF expected by Abinit
rfdir = list(ddk_task.input["rfdir"])
if rfdir.count(1) != 1:
raise RuntimeError("Only one direction should be specifned in rfdir but rfdir = %s" % rfdir)
idir = rfdir.index(1) + 1
ddk_case = idir + 3 * len(ddk_task.input.structure)
infile = self.indir.path_in("in_1WF%d" % ddk_case)
os.symlink(out_ddk, infile)
elif dep.exts == ["WFK"]:
gs_task = dep.node
out_wfk = gs_task.outdir.has_abiext("WFK")
if not out_wfk:
raise RuntimeError("%s didn't produce the WFK file" % gs_task)
os.symlink(out_wfk, self.indir.path_in("in_WFK"))
else:
raise ValueError("Don't know how to handle extension: %s" % dep.exts)
class PhononTask(DfptTask):
"""
DFPT calculations for a single atomic perturbation.
Provide support for in-place restart via (1WF|1DEN) files
"""
# TODO:
# for the time being we don't discern between GS and PhononCalculations.
CRITICAL_EVENTS = [
events.ScfConvergenceWarning,
]
color_rgb = np.array((0, 0, 255)) / 255
def restart(self):
"""
Phonon calculations can be restarted only if we have the 1WF file or the 1DEN file.
from which we can read the first-order wavefunctions or the first order density.
Prefer 1WF over 1DEN since we can reuse the wavefunctions.
"""
# Abinit adds the idir-ipert index at the end of the file and this breaks the extension
# e.g. out_1WF4, out_DEN4. find_1wf_files and find_1den_files returns the list of files found
restart_file, irdvars = None, None
# Highest priority to the 1WF file because restart is more efficient.
wf_files = self.outdir.find_1wf_files()
if wf_files is not None:
restart_file = wf_files[0].path
irdvars = irdvars_for_ext("1WF")
if len(wf_files) != 1:
restart_file = None
logger.critical("Found more than one 1WF file. Restart is ambiguous!")
if restart_file is None:
den_files = self.outdir.find_1den_files()
if den_files is not None:
restart_file = den_files[0].path
irdvars = {"ird1den": 1}
if len(den_files) != 1:
restart_file = None
logger.critical("Found more than one 1DEN file. Restart is ambiguous!")
if restart_file is None:
# Raise because otherwise restart is equivalent to a run from scratch --> infinite loop!
raise self.RestartError("%s: Cannot find the 1WF|1DEN file to restart from." % self)
# Move file.
self.history.info("Will restart from %s", restart_file)
restart_file = self.out_to_in(restart_file)
# Add the appropriate variable for restarting.
self.set_vars(irdvars)
# Now we can resubmit the job.
return self._restart()
def inspect(self, **kwargs):
"""
Plot the Phonon SCF cycle results with matplotlib.
Returns:
`matplotlib` figure, None if some error occurred.
"""
scf_cycle = abiinspect.PhononScfCycle.from_file(self.output_file.path)
if scf_cycle is not None:
if "title" not in kwargs: kwargs["title"] = str(self)
return scf_cycle.plot(**kwargs)
def get_results(self, **kwargs):
results = super(PhononTask, self).get_results(**kwargs)
return results.register_gridfs_files(DDB=(self.outdir.has_abiext("DDB"), "t"))
def make_links(self):
super(PhononTask, self).make_links()
# fix the problem that abinit uses the 1WF extension for the DDK output file but reads it with the irdddk flag
#if self.indir.has_abiext('DDK'):
# self.indir.rename_abiext('DDK', '1WF')
class EphTask(AbinitTask):
"""
Class for electron-phonon calculations.
"""
color_rgb = np.array((255, 128, 0)) / 255
class ManyBodyTask(AbinitTask):
"""
Base class for Many-body tasks (Screening, Sigma, Bethe-Salpeter)
Mainly used to implement methods that are common to MBPT calculations with Abinit.
.. warning::
This class should not be instantiated directly.
"""
def reduce_memory_demand(self):
"""
Method that can be called by the scheduler to decrease the memory demand of a specific task.
Returns True in case of success, False in case of Failure.
"""
# The first digit governs the storage of W(q), the second digit the storage of u(r)
# Try to avoid the storage of u(r) first since reading W(q) from file will lead to a drammatic slowdown.
prev_gwmem = int(self.get_inpvar("gwmem", default=11))
first_dig, second_dig = prev_gwmem // 10, prev_gwmem % 10
if second_dig == 1:
self.set_vars(gwmem="%.2d" % (10 * first_dig))
return True
if first_dig == 1:
self.set_vars(gwmem="%.2d" % 00)
return True
# gwmem 00 d'oh!
return False
class ScrTask(ManyBodyTask):
"""Tasks for SCREENING calculations """
color_rgb = np.array((255, 128, 0)) / 255
#def inspect(self, **kwargs):
# """Plot graph showing the number of q-points computed and the wall-time used"""
@property
def scr_path(self):
"""Absolute path of the SCR file. Empty string if file is not present."""
# Lazy property to avoid multiple calls to has_abiext.
try:
return self._scr_path
except AttributeError:
path = self.outdir.has_abiext("SCR.nc")
if path: self._scr_path = path
return path
def open_scr(self):
"""
Open the SIGRES file located in the in self.outdir.
Returns :class:`ScrFile` object, None if file could not be found or file is not readable.
"""
scr_path = self.scr_path
if not scr_path:
logger.critical("%s didn't produce a SCR.nc file in %s" % (self, self.outdir))
return None
# Open the GSR file and add its data to results.out
from abipy.electrons.scr import ScrFile
try:
return ScrFile(scr_path)
except Exception as exc:
logger.critical("Exception while reading SCR file at %s:\n%s" % (scr_path, str(exc)))
return None
class SigmaTask(ManyBodyTask):
"""
Tasks for SIGMA calculations. Provides support for in-place restart via QPS files
"""
CRITICAL_EVENTS = [
events.QPSConvergenceWarning,
]
color_rgb = np.array((0, 255, 0)) / 255
def restart(self):
# G calculations can be restarted only if we have the QPS file
# from which we can read the results of the previous step.
ext = "QPS"
restart_file = self.outdir.has_abiext(ext)
if not restart_file:
raise self.RestartError("%s: Cannot find the QPS file to restart from." % self)
self.out_to_in(restart_file)
# Add the appropriate variable for restarting.
irdvars = irdvars_for_ext(ext)
self.set_vars(irdvars)
# Now we can resubmit the job.
self.history.info("Will restart from %s", restart_file)
return self._restart()
#def inspect(self, **kwargs):
# """Plot graph showing the number of k-points computed and the wall-time used"""
@property
def sigres_path(self):
"""Absolute path of the SIGRES file. Empty string if file is not present."""
# Lazy property to avoid multiple calls to has_abiext.
try:
return self._sigres_path
except AttributeError:
path = self.outdir.has_abiext("SIGRES")
if path: self._sigres_path = path
return path
def open_sigres(self):
"""
Open the SIGRES file located in the in self.outdir.
Returns :class:`SigresFile` object, None if file could not be found or file is not readable.
"""
sigres_path = self.sigres_path
if not sigres_path:
logger.critical("%s didn't produce a SIGRES file in %s" % (self, self.outdir))
return None
# Open the SIGRES file and add its data to results.out
from abipy.electrons.gw import SigresFile
try:
return SigresFile(sigres_path)
except Exception as exc:
logger.critical("Exception while reading SIGRES file at %s:\n%s" % (sigres_path, str(exc)))
return None
def get_scissors_builder(self):
"""
Returns an instance of :class:`ScissorsBuilder` from the SIGRES file.
Raise:
`RuntimeError` if SIGRES file is not found.
"""
from abipy.electrons.scissors import ScissorsBuilder
if self.sigres_path:
return ScissorsBuilder.from_file(self.sigres_path)
else:
raise RuntimeError("Cannot find SIGRES file!")
def get_results(self, **kwargs):
results = super(SigmaTask, self).get_results(**kwargs)
# Open the SIGRES file and add its data to results.out
with self.open_sigres() as sigres:
#results["out"].update(sigres.as_dict())
results.register_gridfs_files(SIGRES=sigres.filepath)
return results
class BseTask(ManyBodyTask):
"""
Task for Bethe-Salpeter calculations.
.. note::
The BSE codes provides both iterative and direct schemes for the computation of the dielectric function.
The direct diagonalization cannot be restarted whereas Haydock and CG support restarting.
"""
CRITICAL_EVENTS = [
events.HaydockConvergenceWarning,
#events.BseIterativeDiagoConvergenceWarning,
]
color_rgb = np.array((128, 0, 255)) / 255
def restart(self):
"""
BSE calculations with Haydock can be restarted only if we have the
excitonic Hamiltonian and the HAYDR_SAVE file.
"""
# TODO: This version seems to work but the main output file is truncated
# TODO: Handle restart if CG method is used
# TODO: restart should receive a list of critical events
# the log file is complete though.
irdvars = {}
# Move the BSE blocks to indata.
# This is done only once at the end of the first run.
# Successive restarts will use the BSR|BSC files in the indir directory
# to initialize the excitonic Hamiltonian
count = 0
for ext in ("BSR", "BSC"):
ofile = self.outdir.has_abiext(ext)
if ofile:
count += 1
irdvars.update(irdvars_for_ext(ext))
self.out_to_in(ofile)
if not count:
# outdir does not contain the BSR|BSC file.
# This means that num_restart > 1 and the files should be in task.indir
count = 0
for ext in ("BSR", "BSC"):
ifile = self.indir.has_abiext(ext)
if ifile:
count += 1
if not count:
raise self.RestartError("%s: Cannot find BSR|BSC files in %s" % (self, self.indir))
# Rename HAYDR_SAVE files
count = 0
for ext in ("HAYDR_SAVE", "HAYDC_SAVE"):
ofile = self.outdir.has_abiext(ext)
if ofile:
count += 1
irdvars.update(irdvars_for_ext(ext))
self.out_to_in(ofile)
if not count:
raise self.RestartError("%s: Cannot find the HAYDR_SAVE file to restart from." % self)
# Add the appropriate variable for restarting.
self.set_vars(irdvars)
# Now we can resubmit the job.
#self.history.info("Will restart from %s", restart_file)
return self._restart()
#def inspect(self, **kwargs):
# """
# Plot the Haydock iterations with matplotlib.
#
# Returns
# `matplotlib` figure, None if some error occurred.
# """
# haydock_cycle = abiinspect.HaydockIterations.from_file(self.output_file.path)
# if haydock_cycle is not None:
# if "title" not in kwargs: kwargs["title"] = str(self)
# return haydock_cycle.plot(**kwargs)
@property
def mdf_path(self):
"""Absolute path of the MDF file. Empty string if file is not present."""
# Lazy property to avoid multiple calls to has_abiext.
try:
return self._mdf_path
except AttributeError:
path = self.outdir.has_abiext("MDF.nc")
if path: self._mdf_path = path
return path
def open_mdf(self):
"""
Open the MDF file located in the in self.outdir.
Returns :class:`MdfFile` object, None if file could not be found or file is not readable.
"""
mdf_path = self.mdf_path
if not mdf_path:
logger.critical("%s didn't produce a MDF file in %s" % (self, self.outdir))
return None
# Open the DFF file and add its data to results.out
from abipy.electrons.bse import MdfFile
try:
return MdfFile(mdf_path)
except Exception as exc:
logger.critical("Exception while reading MDF file at %s:\n%s" % (mdf_path, str(exc)))
return None
def get_results(self, **kwargs):
results = super(BseTask, self).get_results(**kwargs)
with self.open_mdf() as mdf:
#results["out"].update(mdf.as_dict())
#epsilon_infinity optical_gap
results.register_gridfs_files(MDF=mdf.filepath)
return results
class OpticTask(Task):
"""
Task for the computation of optical spectra with optic i.e.
RPA without local-field effects and velocity operator computed from DDK files.
"""
color_rgb = np.array((255, 204, 102)) / 255
def __init__(self, optic_input, nscf_node, ddk_nodes, workdir=None, manager=None):
"""
Create an instance of :class:`OpticTask` from an string containing the input.
Args:
optic_input: string with the optic variables (filepaths will be added at run time).
nscf_node: The NSCF task that will produce thw WFK file or string with the path of the WFK file.
ddk_nodes: List of :class:`DdkTask` nodes that will produce the DDK files or list of DDF paths.
workdir: Path to the working directory.
manager: :class:`TaskManager` object.
"""
# Convert paths to FileNodes
self.nscf_node = Node.as_node(nscf_node)
self.ddk_nodes = [Node.as_node(n) for n in ddk_nodes]
assert len(ddk_nodes) == 3
#print(self.nscf_node, self.ddk_nodes)
# Use DDK extension instead of 1WF
deps = {n: "1WF" for n in self.ddk_nodes}
#deps = {n: "DDK" for n in self.ddk_nodes}
deps.update({self.nscf_node: "WFK"})
super(OpticTask, self).__init__(optic_input, workdir=workdir, manager=manager, deps=deps)
def set_workdir(self, workdir, chroot=False):
"""Set the working directory of the task."""
super(OpticTask, self).set_workdir(workdir, chroot=chroot)
# Small hack: the log file of optics is actually the main output file.
self.output_file = self.log_file
@deprecated(message="_set_inpvars is deprecated. Use set_vars")
def _set_inpvars(self, *args, **kwargs):
return self.set_vars(*args, **kwargs)
def set_vars(self, *args, **kwargs):
"""
Optic does not use `get` or `ird` variables hence we should never try
to change the input when we connect this task
"""
kwargs.update(dict(*args))
self.history.info("OpticTask intercepted set_vars with args %s" % kwargs)
if "autoparal" in kwargs: self.input.set_vars(autoparal=kwargs["autoparal"])
if "max_ncpus" in kwargs: self.input.set_vars(max_ncpus=kwargs["max_ncpus"])
@property
def executable(self):
"""Path to the executable required for running the :class:`OpticTask`."""
try:
return self._executable
except AttributeError:
return "optic"
@property
def filesfile_string(self):
"""String with the list of files and prefixes needed to execute ABINIT."""
lines = []
app = lines.append
#optic.in ! Name of input file
#optic.out ! Unused
#optic ! Root name for all files that will be produced
app(self.input_file.path) # Path to the input file
app(os.path.join(self.workdir, "unused")) # Path to the output file
app(os.path.join(self.workdir, self.prefix.odata)) # Prefix for output data
return "\n".join(lines)
@property
def wfk_filepath(self):
"""Returns (at runtime) the absolute path of the WFK file produced by the NSCF run."""
return self.nscf_node.outdir.has_abiext("WFK")
@property
def ddk_filepaths(self):
"""Returns (at runtime) the absolute path of the DDK files produced by the DDK runs."""
return [ddk_task.outdir.has_abiext("1WF") for ddk_task in self.ddk_nodes]
def make_input(self):
"""Construct and write the input file of the calculation."""
# Set the file paths.
all_files ={"ddkfile_"+str(n+1) : ddk for n,ddk in enumerate(self.ddk_filepaths)}
all_files.update({"wfkfile" : self.wfk_filepath})
files_nml = {"FILES" : all_files}
files= nmltostring(files_nml)
# Get the input specified by the user
user_file = nmltostring(self.input.as_dict())
# Join them.
return files + user_file
def setup(self):
"""Public method called before submitting the task."""
def make_links(self):
"""
Optic allows the user to specify the paths of the input file.
hence we don't need to create symbolic links.
"""
def get_results(self, **kwargs):
results = super(OpticTask, self).get_results(**kwargs)
#results.update(
#"epsilon_infinity":
#))
return results
def fix_abicritical(self):
"""
Cannot fix abicritical errors for optic
"""
return 0
#@check_spectator
def reset_from_scratch(self):
"""
restart from scratch, this is to be used if a job is restarted with more resources after a crash
"""
# Move output files produced in workdir to _reset otherwise check_status continues
# to see the task as crashed even if the job did not run
# Create reset directory if not already done.
reset_dir = os.path.join(self.workdir, "_reset")
reset_file = os.path.join(reset_dir, "_counter")
if not os.path.exists(reset_dir):
os.mkdir(reset_dir)
num_reset = 1
else:
with open(reset_file, "rt") as fh:
num_reset = 1 + int(fh.read())
# Move files to reset and append digit with reset index.
def move_file(f):
if not f.exists: return
try:
f.move(os.path.join(reset_dir, f.basename + "_" + str(num_reset)))
except OSError as exc:
logger.warning("Couldn't move file {}. exc: {}".format(f, str(exc)))
for fname in ("output_file", "log_file", "stderr_file", "qout_file", "qerr_file", "mpiabort_file"):
move_file(getattr(self, fname))
with open(reset_file, "wt") as fh:
fh.write(str(num_reset))
self.start_lockfile.remove()
# Reset datetimes
self.datetimes.reset()
return self._restart(submit=False)
def fix_queue_critical(self):
"""
This function tries to fix critical events originating from the queue submission system.
General strategy, first try to increase resources in order to fix the problem,
if this is not possible, call a task specific method to attempt to decrease the demands.
Returns:
1 if task has been fixed else 0.
"""
from pymatgen.io.abinit.scheduler_error_parsers import NodeFailureError, MemoryCancelError, TimeCancelError
#assert isinstance(self.manager, TaskManager)
if not self.queue_errors:
if self.mem_scales or self.load_scales:
try:
self.manager.increase_resources() # acts either on the policy or on the qadapter
self.reset_from_scratch()
return
except ManagerIncreaseError:
self.set_status(self.S_ERROR, msg='unknown queue error, could not increase resources any further')
raise FixQueueCriticalError
else:
self.set_status(self.S_ERROR, msg='unknown queue error, no options left')
raise FixQueueCriticalError
else:
for error in self.queue_errors:
logger.info('fixing: %s' % str(error))
if isinstance(error, NodeFailureError):
# if the problematic node is known, exclude it
if error.nodes is not None:
try:
self.manager.exclude_nodes(error.nodes)
self.reset_from_scratch()
self.set_status(self.S_READY, msg='excluding nodes')
except:
raise FixQueueCriticalError
else:
self.set_status(self.S_ERROR, msg='Node error but no node identified.')
raise FixQueueCriticalError
elif isinstance(error, MemoryCancelError):
# ask the qadapter to provide more resources, i.e. more cpu's so more total memory if the code
# scales this should fix the memeory problem
# increase both max and min ncpu of the autoparalel and rerun autoparalel
if self.mem_scales:
try:
self.manager.increase_ncpus()
self.reset_from_scratch()
self.set_status(self.S_READY, msg='increased ncps to solve memory problem')
return
except ManagerIncreaseError:
logger.warning('increasing ncpus failed')
# if the max is reached, try to increase the memory per cpu:
try:
self.manager.increase_mem()
self.reset_from_scratch()
self.set_status(self.S_READY, msg='increased mem')
return
except ManagerIncreaseError:
logger.warning('increasing mem failed')
# if this failed ask the task to provide a method to reduce the memory demand
try:
self.reduce_memory_demand()
self.reset_from_scratch()
self.set_status(self.S_READY, msg='decreased mem demand')
return
except DecreaseDemandsError:
logger.warning('decreasing demands failed')
msg = ('Memory error detected but the memory could not be increased neigther could the\n'
'memory demand be decreased. Unrecoverable error.')
self.set_status(self.S_ERROR, msg)
raise FixQueueCriticalError
elif isinstance(error, TimeCancelError):
# ask the qadapter to provide more time
try:
self.manager.increase_time()
self.reset_from_scratch()
self.set_status(self.S_READY, msg='increased wall time')
return
except ManagerIncreaseError:
logger.warning('increasing the waltime failed')
# if this fails ask the qadapter to increase the number of cpus
if self.load_scales:
try:
self.manager.increase_ncpus()
self.reset_from_scratch()
self.set_status(self.S_READY, msg='increased number of cpus')
return
except ManagerIncreaseError:
logger.warning('increase ncpus to speed up the calculation to stay in the walltime failed')
# if this failed ask the task to provide a method to speed up the task
try:
self.speed_up()
self.reset_from_scratch()
self.set_status(self.S_READY, msg='task speedup')
return
except DecreaseDemandsError:
logger.warning('decreasing demands failed')
msg = ('Time cancel error detected but the time could not be increased neither could\n'
'the time demand be decreased by speedup of increasing the number of cpus.\n'
'Unrecoverable error.')
self.set_status(self.S_ERROR, msg)
else:
msg = 'No solution provided for error %s. Unrecoverable error.' % error.name
self.set_status(self.S_ERROR, msg)
return 0
def autoparal_run(self):
"""
Find an optimal set of parameters for the execution of the Optic task
This method can change the submission parameters e.g. the number of CPUs for MPI and OpenMp.
Returns 0 if success
"""
policy = self.manager.policy
if policy.autoparal == 0: # or policy.max_ncpus in [None, 1]:
logger.info("Nothing to do in autoparal, returning (None, None)")
return 0
if policy.autoparal != 1:
raise NotImplementedError("autoparal != 1")
############################################################################
# Run ABINIT in sequential to get the possible configurations with max_ncpus
############################################################################
# Set the variables for automatic parallelization
# Will get all the possible configurations up to max_ncpus
# Return immediately if max_ncpus == 1
max_ncpus = self.manager.max_cores
if max_ncpus == 1: return 0
autoparal_vars = dict(autoparal=policy.autoparal, max_ncpus=max_ncpus)
self.set_vars(autoparal_vars)
# Run the job in a shell subprocess with mpi_procs = 1
# we don't want to make a request to the queue manager for this simple job!
# Return code is always != 0
process = self.manager.to_shell_manager(mpi_procs=1).launch(self)
self.history.pop()
retcode = process.wait()
# To avoid: ResourceWarning: unclosed file <_io.BufferedReader name=87> in py3k
process.stderr.close()
# Remove the variables added for the automatic parallelization
self.input.remove_vars(list(autoparal_vars.keys()))
##############################################################
# Parse the autoparal configurations from the main output file
##############################################################
parser = ParalHintsParser()
try:
pconfs = parser.parse(self.output_file.path)
except parser.Error:
logger.critical("Error while parsing Autoparal section:\n%s" % straceback())
return 2
######################################################
# Select the optimal configuration according to policy
######################################################
#optconf = self.find_optconf(pconfs)
# Select the partition on which we'll be running and set MPI/OMP cores.
optconf = self.manager.select_qadapter(pconfs)
####################################################
# Change the input file and/or the submission script
####################################################
self.set_vars(optconf.vars)
# Write autoparal configurations to JSON file.
d = pconfs.as_dict()
d["optimal_conf"] = optconf
json_pretty_dump(d, os.path.join(self.workdir, "autoparal.json"))
##############
# Finalization
##############
# Reset the status, remove garbage files ...
self.set_status(self.S_INIT, msg='finished auto paralell')
# Remove the output file since Abinit likes to create new files
# with extension .outA, .outB if the file already exists.
os.remove(self.output_file.path)
#os.remove(self.log_file.path)
os.remove(self.stderr_file.path)
return 0
class AnaddbTask(Task):
"""Task for Anaddb runs (post-processing of DFPT calculations)."""
color_rgb = np.array((204, 102, 255)) / 255
def __init__(self, anaddb_input, ddb_node,
gkk_node=None, md_node=None, ddk_node=None, workdir=None, manager=None):
"""
Create an instance of :class:`AnaddbTask` from a string containing the input.
Args:
anaddb_input: string with the anaddb variables.
ddb_node: The node that will produce the DDB file. Accept :class:`Task`, :class:`Work` or filepath.
gkk_node: The node that will produce the GKK file (optional). Accept :class:`Task`, :class:`Work` or filepath.
md_node: The node that will produce the MD file (optional). Accept `Task`, `Work` or filepath.
gkk_node: The node that will produce the GKK file (optional). Accept `Task`, `Work` or filepath.
workdir: Path to the working directory (optional).
manager: :class:`TaskManager` object (optional).
"""
# Keep a reference to the nodes.
self.ddb_node = Node.as_node(ddb_node)
deps = {self.ddb_node: "DDB"}
self.gkk_node = Node.as_node(gkk_node)
if self.gkk_node is not None:
deps.update({self.gkk_node: "GKK"})
# I never used it!
self.md_node = Node.as_node(md_node)
if self.md_node is not None:
deps.update({self.md_node: "MD"})
self.ddk_node = Node.as_node(ddk_node)
if self.ddk_node is not None:
deps.update({self.ddk_node: "DDK"})
super(AnaddbTask, self).__init__(input=anaddb_input, workdir=workdir, manager=manager, deps=deps)
@classmethod
def temp_shell_task(cls, inp, ddb_node,
gkk_node=None, md_node=None, ddk_node=None, workdir=None, manager=None):
"""
Build a :class:`AnaddbTask` with a temporary workdir. The task is executed via
the shell with 1 MPI proc. Mainly used for post-processing the DDB files.
Args:
anaddb_input: string with the anaddb variables.
ddb_node: The node that will produce the DDB file. Accept :class:`Task`, :class:`Work` or filepath.
See `AnaddbInit` for the meaning of the other arguments.
"""
# Build a simple manager to run the job in a shell subprocess
import tempfile
workdir = tempfile.mkdtemp() if workdir is None else workdir
if manager is None: manager = TaskManager.from_user_config()
# Construct the task and run it
return cls(inp, ddb_node,
gkk_node=gkk_node, md_node=md_node, ddk_node=ddk_node,
workdir=workdir, manager=manager.to_shell_manager(mpi_procs=1))
@property
def executable(self):
"""Path to the executable required for running the :class:`AnaddbTask`."""
try:
return self._executable
except AttributeError:
return "anaddb"
@property
def filesfile_string(self):
"""String with the list of files and prefixes needed to execute ABINIT."""
lines = []
app = lines.append
app(self.input_file.path) # 1) Path of the input file
app(self.output_file.path) # 2) Path of the output file
app(self.ddb_filepath) # 3) Input derivative database e.g. t13.ddb.in
app(self.md_filepath) # 4) Output molecular dynamics e.g. t13.md
app(self.gkk_filepath) # 5) Input elphon matrix elements (GKK file)
app(self.outdir.path_join("out")) # 6) Base name for elphon output files e.g. t13
app(self.ddk_filepath) # 7) File containing ddk filenames for elphon/transport.
return "\n".join(lines)
@property
def ddb_filepath(self):
"""Returns (at runtime) the absolute path of the input DDB file."""
# This is not very elegant! A possible approach could to be path self.ddb_node.outdir!
if isinstance(self.ddb_node, FileNode): return self.ddb_node.filepath
path = self.ddb_node.outdir.has_abiext("DDB")
return path if path else "DDB_FILE_DOES_NOT_EXIST"
@property
def md_filepath(self):
"""Returns (at runtime) the absolute path of the input MD file."""
if self.md_node is None: return "MD_FILE_DOES_NOT_EXIST"
if isinstance(self.md_node, FileNode): return self.md_node.filepath
path = self.md_node.outdir.has_abiext("MD")
return path if path else "MD_FILE_DOES_NOT_EXIST"
@property
def gkk_filepath(self):
"""Returns (at runtime) the absolute path of the input GKK file."""
if self.gkk_node is None: return "GKK_FILE_DOES_NOT_EXIST"
if isinstance(self.gkk_node, FileNode): return self.gkk_node.filepath
path = self.gkk_node.outdir.has_abiext("GKK")
return path if path else "GKK_FILE_DOES_NOT_EXIST"
@property
def ddk_filepath(self):
"""Returns (at runtime) the absolute path of the input DKK file."""
if self.ddk_node is None: return "DDK_FILE_DOES_NOT_EXIST"
if isinstance(self.ddk_node, FileNode): return self.ddk_node.filepath
path = self.ddk_node.outdir.has_abiext("DDK")
return path if path else "DDK_FILE_DOES_NOT_EXIST"
def setup(self):
"""Public method called before submitting the task."""
def make_links(self):
"""
Anaddb allows the user to specify the paths of the input file.
hence we don't need to create symbolic links.
"""
def open_phbst(self):
"""Open PHBST file produced by Anaddb and returns :class:`PhbstFile` object."""
from abipy.dfpt.phonons import PhbstFile
phbst_path = os.path.join(self.workdir, "run.abo_PHBST.nc")
if not phbst_path:
if self.status == self.S_OK:
logger.critical("%s reached S_OK but didn't produce a PHBST file in %s" % (self, self.outdir))
return None
try:
return PhbstFile(phbst_path)
except Exception as exc:
logger.critical("Exception while reading GSR file at %s:\n%s" % (phbst_path, str(exc)))
return None
def open_phdos(self):
"""Open PHDOS file produced by Anaddb and returns :class:`PhdosFile` object."""
from abipy.dfpt.phonons import PhdosFile
phdos_path = os.path.join(self.workdir, "run.abo_PHDOS.nc")
if not phdos_path:
if self.status == self.S_OK:
logger.critical("%s reached S_OK but didn't produce a PHBST file in %s" % (self, self.outdir))
return None
try:
return PhdosFile(phdos_path)
except Exception as exc:
logger.critical("Exception while reading GSR file at %s:\n%s" % (phdos_path, str(exc)))
return None
def get_results(self, **kwargs):
results = super(AnaddbTask, self).get_results(**kwargs)
return results
|
xhqu1981/pymatgen
|
pymatgen/io/abinit/tasks.py
|
Python
|
mit
| 171,911
|
[
"ABINIT",
"NetCDF",
"Wannier90",
"pymatgen"
] |
2212330934bc017511117271fcaaac86ce06255eee94815d03a5e95d052c813b
|
import numpy as np
import theano
import theano.tensor as T
from theano.tensor.shared_randomstreams import RandomStreams
from theano import shared
from collections import OrderedDict
from logistic_sgd import LogisticRegression
from AutoEncoder import AutoEncoder, BernoulliAutoEncoder, GaussianAutoEncoder, ReluAutoEncoder
class SdA(object):
"""Stacked denoising auto-encoder class (SdA)
A stacked denoising autoencoder model is obtained by stacking several
dAs. The hidden layer of the dA at layer `i` becomes the input of
the dA at layer `i+1`. The first layer dA gets as input the input of
the SdA, and the hidden layer of the last dA represents the output.
"""
def __init__(self, numpy_rng, theano_rng=None, n_ins=784,
hidden_layers_sizes=[500, 500], n_outs=-1,
corruption_levels=[0.1, 0.1], layer_types=['ReLU','ReLU'],
loss='squared', dropout_rates = None, sparse_init=-1, opt_method = 'NAG'):
""" This class is made to support a variable number of layers
:type numpy_rng: numpy.random.RandomState
:param numpy_rng: numpy random number generator used to draw initial
weights
:type theano_rng: theano.tensor.shared_randomstreams.RandomStreams
:param theano_rng: Theano random generator; if None is given one is
generated based on a seed drawn from `rng`
:type n_ins: int
:param n_ins: dimension of the input to the sdA
:type n_layers_sizes: list of ints
:param n_layers_sizes: intermediate layers size, must contain
at least one value
:type n_outs: int
:param n_outs: dimension of the output of the network. Negative if
there is no logistic layer on top.
:type corruption_levels: list of float
:param corruption_levels: amount of corruption to use for each
layer
:type layer_types: list of string
:param layer_types: each entry specifies the AutoEncoder sub-class to
instatiate for each layer.
:type loss: string
:param loss: specify what loss function to use for reconstruction error
Currently supported: 'squared','xent','softplus'
:type dropout_rates: list of float
:param dropout_rates: proportion of output units to drop from this layer
Default is to retain all units in all layers
:type sparse_init: int
:param sparse_init: Initialize the weight matrices using Martens sparse initialization (Martens ICML 2010)
>0 specifies the number of units in the layer that have initial weights drawn from
a N(0,1). Use -1 for dense init.
:type opt_method: string
:param opt_method: specifies the optimization method used to fit the model parameters.
Accepted values are {'CM': Classical Momentum, 'NAG': Nesterov Accelerated Gradient.}
"""
self.dA_layers = []
self.params = []
self.layer_types = layer_types
# keep track of previous parameter updates so we can use momentum
self.updates = OrderedDict()
self.n_outs = n_outs
self.corruption_levels = corruption_levels
self.n_layers = len(hidden_layers_sizes)
# Calculate dropout params (or set if provided)
if dropout_rates is not None:
self.dropout_rates = dropout_rates
assert len(dropout_rates) == len(layer_types)
assert dropout_rates[-1] == 1.0
else:
self.dropout_rates = [1.0 for l in layer_types]
# sanity checks on parameter list sizes
assert self.n_layers > 0
assert len(hidden_layers_sizes) == len(corruption_levels) == len(layer_types)
if not theano_rng:
theano_rng = RandomStreams(numpy_rng.randint(2 ** 30))
# allocate symbolic variables for the data
self.x = T.matrix('x') # the training input
self.x_prime = T.matrix('X_prime') # the encoded output of the highest layer
if n_outs > 0:
self.y = T.ivector('y') # the labels (if present) are presented as 1D vector of
# [int] labels
# sanity check on loss parameter
assert loss.lower() in ['squared', 'xent', 'softplus']
self.use_loss = loss.lower()
# sanity check on optimization method
assert opt_method.upper() in ['CM','NAG']
self.opt_method = opt_method.upper()
# build each layer dynamically
layer_classes = {'gaussian': GaussianAutoEncoder, 'bernoulli': BernoulliAutoEncoder, 'relu': ReluAutoEncoder}
for i in xrange(self.n_layers):
# the size of the input is either the number of hidden units of
# the layer below or the input size if we are on the first layer.
# the input to this layer is either the activation of the hidden
# layer below or the input of the SdA if you are on the first
# layer
if i == 0:
input_size = n_ins
layer_input = self.x
else:
input_size = hidden_layers_sizes[i - 1]
layer_input = self.dA_layers[-1].output
# Call the appropriate dA subclass constructor
w_name = 'W_' + str(i)
bvis_name = 'bvis_' + str(i)
bhid_name = 'bhid_' + str(i)
dA_layer = layer_classes[layer_types[i]].class_from_values(numpy_rng=numpy_rng,
theano_rng=theano_rng,
input=layer_input,
n_visible=input_size,
n_hidden=int(hidden_layers_sizes[i]),
W_name=w_name,
bvis_name=bvis_name,
bhid_name=bhid_name,
sparse_init=sparse_init)
self.dA_layers.append(dA_layer)
self.params.extend(dA_layer.params)
# Keep track of parameter updates so we may use momentum
for param in self.params:
init = np.zeros(param.get_value(borrow=True).shape,
dtype=theano.config.floatX)
update_name = param.name + '_update'
self.updates[param] = theano.shared(init, name=update_name)
if n_outs > 0:
self.logLayer = LogisticRegression(
input=self.dA_layers[-1].output,
n_in=hidden_layers_sizes[-1], n_out=n_outs)
self.params.extend(self.logLayer.params)
# compute the cost for second phase of training,
# defined as the negative log likelihood
self.finetune_cost = self.logLayer.negative_log_likelihood(self.y)
# compute the gradients with respect to the model parameters
# symbolic variable that points to the number of errors made on the
# minibatch given by self.x and self.y
self.errors = self.logLayer.errors(self.y)
else:
self.finish_sda_unsupervised()
def finish_sda_unsupervised(self):
""" Finish up unsupervised property settings for the model: set self.loss, self.finetune_cost, self.output, self.errors """
loss_dict = {'squared': self.squared_loss, 'xent': self.xent_loss, 'softplus': self.softplus_loss}
self.loss = loss_dict[self.use_loss]
self.finetune_cost = self.reconstruction_error(self.x)
self.output = self.encode(self.x)
self.errors = self.reconstruction_error(self.x)
def squared_loss(self,X,Z):
""" Return the theano expression for squared error loss
:type X: theano.tensor.TensorType
:param X: Shared variable that contains data
:type Z: theano.tensor.TensorType
:param Z: Shared variable that contains the reconstruction
of the data under the model)
"""
return T.sum((X - Z) **2, axis = 1)
def softplus_loss(self,X,Z):
""" Return the theano expression for softplus error loss
:type X: theano.tensor.TensorType
:param X: Shared variable that contains data
:type Z: theano.tensor.TensorType
:param Z: Shared variable that contains the reconstruction
of the data under the model)
"""
return T.sum((X - T.nnet.softplus(Z)) **2, axis = 1)
def xent_loss(self,X,Z):
""" Return the theano expression for cross entropy error loss
:type X: theano.tensor.TensorType
:param X: Shared variable that contains data
:type Z: theano.tensor.TensorType
:param Z: Shared variable that contains the reconstruction
of the data under the model)
"""
return -T.sum(X * T.log(Z) + (1 - X) * T.log(1 - Z), axis=1)
def reconstruct_input(self, X):
""" Given data X, provide the symbolic computation of
\hat{X} where \hat{X} is the reconstructed data output of the 'unrolled' SdA
:type X: theano.tensor.TensorType
:param X: Shared variable that contains data
to be pushed through the SdA (i.e reconstructed)
"""
X_prime = X
for dA in self.dA_layers:
X_prime = dA.get_hidden_values(X_prime)
for dA in self.dA_layers[::-1]:
X_prime = dA.get_reconstructed_input(X_prime)
return X_prime
def reconstruct_input_limited(self, X, i):
""" Given data X, provide the symbolic computation of
\hat{X} where \hat{X} is the reconstructed data output
using only the first i (counting from 0) layers of the 'unrolled' SdA """
X_prime = X
for dA in self.dA_layers[:i]:
X_prime = dA.get_hidden_values(X_prime)
for dA in self.dA_layers[i-1::-1]:
X_prime = dA.get_reconstructed_input(X_prime)
return X_prime
def reconstruct_input_dropout(self, X):
""" Given data X, provide the symbolic computation of
\hat{X} where \hat{X} is the reconstructed data vector output of the 'unrolled' SdA
Apply a dropout mask to the output of the previous layer
:type X: theano.tensor.TensorType
:param X: Shared variable that contains data
to be pushed through the SdA (i.e reconstructed)
"""
X_prime = X
for dA, p in zip(self.dA_layers,self.dropout_rates):
hidden = dA.get_hidden_values(X_prime)
X_prime = dA.dropout_from_layer(hidden,p)
for dA in self.dA_layers[::-1]:
X_prime = dA.get_reconstructed_input(X_prime)
return X_prime
def reconstruction_error(self, X):
""" Calculate the reconstruction error. Take a matrix of
training examples where X[i,:] is one data vector, return
the squared error between X, Z where Z is the reconstructed data.
:type X: theano.tensor.TensorType
:param X: Shared variable that contains a batch of datapoints
to be reconstructed
"""
Z = self.reconstruct_input(X)
L = self.loss(X,Z)
return T.mean(L)
def reconstruction_error_limited(self, X, limit):
""" Calculate the reconstruction error using a limited number of layers
in the SdA.
:type X: theano.tensor.TensorType
:param X: Shared variable that contains a batch of datapoints
to be reconstructed
:type limit: int
:param limit: Use the first 'limit' layers of the SdA for reconstruction
"""
Z = self.reconstruct_input_limited(X, limit)
L = self.loss(X,Z)
return T.mean(L)
def reconstruction_error_dropout(self, X):
""" Calculate the reconstruction error. Take a matrix of
training examples where X[i,:] is one data vector, return
the squared error between X, Z where Z is the reconstructed data.
:type X: theano.tensor.TensorType
:param X: Shared variable that contains a batch of datapoints
to be reconstructed
"""
Z = self.reconstruct_input_dropout(X)
L = self.loss(X,Z)
return T.mean(L)
def scale_dA_weights(self,factors):
""" Scale each dA weight matrix by some factor. Used primarily when encoding
data trained with an SdA where droput was used in finetuning.
:type factors: list of floats
:param factors: scale the weight matrices by the factors in the list
"""
for dA,p in zip(self.dA_layers,factors):
W,meh,bah = dA.get_params()
W.set_value(W.get_value(borrow=True) * p, borrow=True)
def encode(self,X):
""" Given data X, provide the symbolic computation of X_prime, by
passing X forward through to the top (lowest dimensional) layer of
the SdA
:type X: theano.tensor.TensorType
:param X: Shared variable that contains data
to be pushed through the SdA (i.e reconstructed)
"""
X_prime = X
for dA in self.dA_layers:
X_prime = dA.get_hidden_values(X_prime)
self.x_prime = X_prime
return self.x_prime
############################## Regularization functions #########
def max_norm_regularization(self):
'''
Define and return a list of theano function objects implementing max norm
regularization for each weight matrix in each layer of the SdA.
'''
norm_limit = T.scalar('norm_limit')
max_norm_updates = OrderedDict()
for param in self.params:
if param.get_value(borrow=True).ndim == 2:
# max-norm column regularization as per Pylearn2 MLP lib
col_norms = T.sqrt(T.sum(T.sqr(param), axis=0))
desired_norms = T.clip(col_norms, 0, norm_limit)
updated_W = param * (desired_norms / (1e-7 + col_norms))
max_norm_updates[param] = updated_W
fn = theano.function([norm_limit], [], updates = max_norm_updates)
return fn
def nag_param_update(self):
''' Define and return a theano function to apply momentum updates to each
parameter that is part of momentum updates '''
momentum = T.fscalar('momentum')
delta_t_updates = OrderedDict()
for param in self.params:
if param in self.updates:
delta_t = self.updates[param]
delta_t_updates[param] = param + momentum * delta_t
fn = theano.function([momentum], [], updates = delta_t_updates)
return fn
def sgd_cm(self, learning_rate, momentum, gparams):
''' Returns a dictionary of theano symbolic variables indicating how
the shared variable parameters in the SdA should be updated, using classical
momentum.
N.B: learning_rate should be a theano.shared variable declared in the
code driving the (pre)training of this SdA.
:type momentum: theano.TensorVariable
:param momenum: momentum parameter for SGD parameter updates
:type learning_rate: theano.tensor.shared
:param learning_rate: the learning rate for pretraining
:type gparams: list of tuples
:param gparams: list of tuples, each of which contains (param, gparam)
i.e the partial derivative of cost by each SdA parameter '''
updates = OrderedDict()
for param, grad_update in gparams:
if param in self.updates:
last_update = self.updates[param]
delta = momentum * last_update - learning_rate * grad_update
updates[param] = param + delta
# update value of theano.shared in self.updates[param]
updates[last_update] = delta
return updates
def sgd_cm_wd(self, learning_rate, momentum, weight_decay, gparams):
''' Returns a dictionary of theano symbolic variables indicating how
the shared variable parameters in the SdA should be updated, using classical
momentum.
N.B: learning_rate should be a theano.shared variable declared in the
code driving the (pre)training of this SdA.
:type momentum: theano.TensorVariable
:param momenum: momentum parameter for SGD parameter updates
:type weight_decay: theano.TensorVariable
:param weight_decay: weight decay regularization parameter for SGD parameter updates
:type learning_rate: theano.tensor.shared
:param learning_rate: the learning rate for pretraining
:type gparams: list of tuples
:param gparams: list of tuples, each of which contains (param, gparam)
i.e the partial derivative of cost by each SdA parameter '''
updates = OrderedDict()
for param, grad_update in gparams:
if param in self.updates:
last_update = self.updates[param]
delta = momentum * last_update - learning_rate * grad_update - learning_rate * weight_decay * last_update
updates[param] = param + delta
# update value of theano.shared in self.updates[param]
updates[last_update] = delta
return updates
def sgd_adagrad_momentum(self, momentum, learning_rate, gparams):
''' Returns a dictionary of theano symbolic variables indicating how
the shared variable parameters in the SdA should be updated, using AdaGrad
but with a decaying average of the gradients rather than sum
:type momentum: theano.TensorVariable
:param momenum: momentum parameter for SGD parameter updates
:type learning_rate: theano.tensor.shared
:param learning_rate: the base or master learning rate shared for all parameters
:type gparams: list of tuples
:param gparams: list of tuples, each of which contains (param, gparam)
i.e the partial derivative of cost by each SdA parameter '''
updates = OrderedDict()
for param, gparam in gparams:
grad_sqrd_hist = self.updates[param]
grad_sqrd = momentum * grad_sqrd_hist + (1 - momentum) * (gparam **2)
param_update_val = param - learning_rate * gparam / (1e-7 + (grad_sqrd)** 0.5)
updates[param] = param_update_val
# update value of theano.shared in self.updates[param]
updates[grad_sqrd_hist] = grad_sqrd
return updates
def sgd_adagrad_momentum_wd(self, momentum, learning_rate, weight_decay, gparams):
''' Returns a dictionary of theano symbolic variables indicating how
the shared variable parameters in the SdA should be updated, using AdaGrad
but with a decaying average of the gradients rather than sum
:type momentum: theano.TensorVariable
:param momenum: momentum parameter for SGD parameter updates
:type weight_decay: theano.TensorVariable
:param weight_decay: weight decay regularization parameter for SGD parameter updates
:type learning_rate: theano.tensor.shared
:param learning_rate: the base or master learning rate shared for all parameters
:type gparams: list of tuples
:param gparams: list of tuples, each of which contains (param, gparam)
i.e the partial derivative of cost by each SdA parameter '''
updates = OrderedDict()
for param, gparam in gparams:
grad_sqrd_hist = self.updates[param]
grad_sqrd = momentum * grad_sqrd_hist + (1 - momentum) * (gparam **2)
param_update_val = param - learning_rate * gparam / (1e-7 + (grad_sqrd)** 0.5) - learning_rate * weight_decay * param
updates[param] = param_update_val
# update value of theano.shared in self.updates[param]
updates[grad_sqrd_hist] = grad_sqrd
return updates
def sgd_adagrad(self, learning_rate, gparams):
''' Returns a dictionary of theano symbolic variables indicating how
the shared variable parameters in the SdA should be updated, using AdaGrad
:type learning_rate: theano.tensor.shared
:param learning_rate: the base or master learning rate shared for all parameters
:type gparams: list of tuples
:param gparams: list of tuples, each of which contains (param, gparam)
i.e the partial derivative of cost by each SdA parameter '''
updates = OrderedDict()
for param, gparam in gparams:
grad_sqrd_hist = self.updates[param]
grad_sqrd = grad_sqrd_hist + gparam **2
param_update_val = param - learning_rate * gparam / (1e-7 + (grad_sqrd)** 0.5)
updates[param] = param_update_val
updates[grad_sqrd_hist] = grad_sqrd
return updates
############################## Training functions ##########################
def pretraining_functions(self, train_set_x, batch_size, learning_rate,method='cm'):
''' Generates a list of functions, each of them implementing one
step in training the dA corresponding to the layer with same index.
The function takes a minibatch index, and so training one dA layer
corresponds to iterating this layer-specific training function in the
list over all minibatch indexes.
N.B: learning_rate should be a theano.shared variable declared in the
code driving the (pre)training of this SdA.
:type train_set_x: theano.tensor.TensorType
:param train_set_x: Shared variable that contains all datapoints used
for training the dA
:type batch_size: int
:param batch_size: size of a [mini]batch
:type learning_rate: theano.tensor.shared
:param learning_rate: the learning rate for pretraining
:type method: string
:param method: specifies the flavour of SGD used to train each dA layer. Accepted values are 'cm', 'adagrad', 'adagrad_momentum'
'''
# index to a minibatch
index = T.lscalar('index')
# % of corruption to use
corruption_level = T.scalar('corruption')
# momentum rate to use
momentum = T.scalar('momentum')
assert method in ['cm','adagrad','adagrad_momentum']
# begining of a batch, given `index`
batch_begin = index * batch_size
# ending of a batch given `index`
batch_end = batch_begin + batch_size
pretrain_fns = []
for dA in self.dA_layers:
# get the cost and the updates list
cost, updates = dA.get_cost_gparams(corruption_level,learning_rate)
# apply the updates in accordnace with the SGD method
if method == 'cm':
mod_updates = self.sgd_cm(learning_rate, momentum, updates)
input_list = [index,momentum,theano.Param(corruption_level, default=0.25)]
elif method == 'adagrad':
mod_updates = self.sgd_adagrad(learning_rate, updates)
input_list = [index,theano.Param(corruption_level, default=0.25)]
else:
mod_updates = self.sgd_adagrad_momentum(momentum, learning_rate, updates)
input_list = [index,momentum,theano.Param(corruption_level, default=0.25)]
# compile the theano function
fn = theano.function(inputs=input_list,
outputs=cost,
updates=mod_updates,
givens={self.x: train_set_x[batch_begin:
batch_end]})
# append `fn` to the list of functions
pretrain_fns.append(fn)
return pretrain_fns
def build_finetune_limited_reconstruction(self, train_set_x, batch_size, learning_rate, method='cm'):
''' Generates a list of theano functions, each of them implementing one
step in hybrid pretraining. Hybrid pretraining is traning to minimize the
reconstruction error of the data against the representation produced using
two or more layers of the SdA.
N.B: learning_rate should be a theano.shared variable declared in the
code driving the (pre)training of this SdA.
:type train_set_x: theano.tensor.TensorType
:param train_set_x: Shared variable that contains all datapoints used
for training the dA
:type batch_size: int
:param batch_size: size of a [mini]batch
:type learning_rate: theano.tensor.shared
:param learning_rate: the learning rate for pretraining
:type method: string
:param method: specifies the flavour of SGD used to train each dA layer. Accepted values are 'cm', 'adagrad', 'adagrad_momentum' '''
# index to a minibatch
index = T.lscalar('index')
# momentum rate to use
momentum = T.scalar('momentum')
# weight decay to use
weight_decay = T.scalar('weight_decay')
# begining of a batch, given `index`
batch_begin = index * batch_size
# ending of a batch given `index`
batch_end = batch_begin + batch_size
# sanity check on number of layers
assert 2 < len(self.dA_layers)
# Check on SGD method
assert method in ['cm','adagrad','adagrad_momentum','cm_wd','adagrad_momentum_wd']
hybrid_train_fns = []
for i in xrange(2,len(self.dA_layers)):
# get the subset of model params involved in the limited reconstruction
limited_params = self.params[:i*3]
# compute the gradients with respect to the partial model parameters
gparams = T.grad(self.reconstruction_error_limited(self.x, i), limited_params)
# Ensure that gparams has same size as limited_params
assert len(gparams) == len(limited_params)
# apply the updates in accordnace with the SGD method
if method == 'cm':
mod_updates = self.sgd_cm(learning_rate, momentum, zip(limited_params,gparams))
input_list = [index,momentum]
elif method == 'adagrad':
mod_updates = self.sgd_adagrad(learning_rate, zip(limited_params,gparams))
input_list = [index]
elif method == 'adagrad_momentum':
mod_updates = self.sgd_adagrad_momentum(momentum, learning_rate, zip(limited_params,gparams))
input_list = [index,momentum]
elif method == 'cm_wd':
mod_updates = self.sgd_cm_wd(learning_rate, momentum, weight_decay, zip(limited_params,gparams))
input_list = [index,momentum,weight_decay]
else:
mod_updates = self.sgd_adagrad_momentum_wd(momentum, learning_rate, weight_decay, zip(limited_params,gparams))
input_list = [index,momentum,weight_decay]
# the hybrid pre-training function now takes into account the update algorithm and proper input
fn = theano.function(inputs=input_list,
outputs=self.reconstruction_error_limited(self.x, i),
updates=mod_updates,
givens={self.x: train_set_x[batch_begin:
batch_end]})
# append `fn` to the list of functions
hybrid_train_fns.append(fn)
return hybrid_train_fns
def build_finetune_full_reconstruction(self, datasets, batch_size, learning_rate, method='cm'):
'''
Generates a function `train` that implements one step of
finetuning, a function `validate` that computes the reconstruction
error on a batch from the validation set
:type datasets: tuple of theano.tensor.TensorType
:param datasets: A tuple of two datasets;
`train`, `valid` in this order, each
one is a T.dmatrix of datapoints
:type batch_size: int
:param batch_size: size of a minibatch
:type learning_rate: theano.tensor.shared
:param learning_rate: learning rate used during finetune stage
:type method: string
:param method: specifies the flavour of SGD used to train each dA layer. Accepted values are 'cm', 'adagrad', 'adagrad_momentum'
'''
(train_set_x, valid_set_x) = datasets
# compute number of minibatches for training, validation and testing
n_valid_batches = valid_set_x.get_value(borrow=True).shape[0]
n_valid_batches /= batch_size
index = T.lscalar('index') # index to a [mini]batch
# compute the gradients with respect to the model parameters
gparams = T.grad(self.finetune_cost, self.params)
# momentum rate to use
momentum = T.scalar('momentum')
# weight decay value to use
weight_decay = T.scalar('weight_decay')
assert method in ['cm','adagrad','adagrad_momentum','cm_wd','adagrad_momentum_wd']
# apply the updates in accordnace with the SGD method
if method == 'cm':
mod_updates = self.sgd_cm(learning_rate, momentum, zip(self.params,gparams))
input_list = [index,momentum]
elif method == 'adagrad':
mod_updates = self.sgd_adagrad(learning_rate, zip(self.params,gparams))
input_list = [index]
elif method == 'adagrad_momentum':
mod_updates = self.sgd_adagrad_momentum(momentum, learning_rate, zip(self.params,gparams))
input_list = [index,momentum]
elif method == 'cm_wd':
mod_updates = self.sgd_cm_wd(learning_rate, momentum, weight_decay, zip(self.params,gparams))
input_list = [index,momentum,weight_decay]
else:
mod_updates = self.sgd_adagrad_momentum_wd(momentum, learning_rate, weight_decay, zip(self.params,gparams))
input_list = [index,momentum,weight_decay]
# compile the fine-tuning theano function, taking into account the update algorithm
train_fn = theano.function(inputs=input_list,
outputs=self.finetune_cost,
updates=mod_updates,
givens={
self.x: train_set_x[index * batch_size:
(index + 1) * batch_size]})
valid_score_i = theano.function([index], self.errors,
givens={
self.x: valid_set_x[index * batch_size:
(index + 1) * batch_size]})
# Create a function that scans the entire validation set
def valid_score():
return [valid_score_i(i) for i in xrange(n_valid_batches)]
return train_fn, valid_score
def build_encoding_functions(self, dataset):
''' Generates a function `encode` that feeds the data forward
through the layers of the SdA and results in a lower dimensional
output, which is the representation of the highest layer.
:type dataset: theano.tensor.TensorType
:param dataset: A T.dmatrix of datapoints to be fed through the SdA
'''
start = T.lscalar('start')
end = T.lscalar('end')
encode_fn = theano.function(inputs=[start,end],
outputs=self.output,
givens={self.x: dataset[start:end]})
return encode_fn
def test_gradient(self,dataset,index=1,batch_size=1):
''' Return a Theano function that will evaluate
the gradient wrt some points sampled from the provided dataset)
Example provided by http://deeplearning.net/software/theano/tutorial/gradients.html#tutcomputinggrads
x = T.dmatrix('x')
s = T.sum(1 / (1 + T.exp(-x)))
gs = T.grad(s, x)
dlogistic = function([x], gs)
dlogistic([[0, 1], [-1, -2]])
:type dataset: theano.tensor.TensorType
:param dataset: A T.dmatrix of datapoints, should be a shared variable.
:type index: int
:param index: identifies the start of the gradient test batch of data, a subset of dataset.
:type batch_size: int
:param batch_size: size of the test batch.
'''
index_val = T.lscalar('gtestindex') # index to a [mini]batch
# compute the gradients with respect to the model parameters
gparams = T.grad(self.finetune_cost, self.params)
# create a function to evaluate the gradient on the batch at index
eval_grad = theano.function(inputs=[index_val], outputs=gparams, givens= {self.x: dataset[index_val * batch_size: (index_val + 1) * batch_size]})
return eval_grad
##################### Pickling functions ###############################
def __getstate__(self):
""" Pickle this SdA by tupling up the layers, output size, dA param lists, corruption levels and layer types. """
W_list = []
bhid_list = []
bvis_list = []
for layer in self.dA_layers:
W, bhid, bvis = layer.get_params()
W_list.append(W.get_value(borrow=True))
bhid_list.append(bhid.get_value(borrow=True))
bvis_list.append(bvis.get_value(borrow=True))
return (self.n_layers, self.n_outs, W_list, bhid_list, bvis_list, self.corruption_levels, self.layer_types, self.use_loss, self.dropout_rates, self.opt_method)
def __setstate__(self, state):
""" Unpickle an SdA model by restoring the list of dA layers.
The input should be provided to the initial layer, and the input of layer i+1 is set to the output of layer i.
Fill up the self.params from the dA params lists. """
(layers, n_outs, dA_W_list, dA_bhid_list, dA_bvis_list, corruption_levels, layer_types, use_loss, dropout_rates, opt_method) = state
self.n_layers = layers
self.n_outs = n_outs
self.corruption_levels = corruption_levels
self.layer_types = layer_types
self.dA_layers = []
self.use_loss = use_loss
self.opt_method = opt_method
self.params = []
self.x = T.matrix('x') # symbolic input for the training data
self.x_prime = T.matrix('X_prime') # symbolic output for the top layer dA
numpy_rng = np.random.RandomState(123)
theano_rng = RandomStreams(numpy_rng.randint(2 ** 30))
# Set the dropout rates
if dropout_rates is not None:
self.dropout_rates = dropout_rates
else:
self.dropout_rates = [1.0 for i in xrange(self.n_layers)]
# build each layer dynamically
layer_classes = {'gaussian': GaussianAutoEncoder, 'bernoulli': BernoulliAutoEncoder, 'relu': ReluAutoEncoder}
for i in xrange(self.n_layers):
# the input to this layer is either the activation of the hidden
# layer below or the input of the SdA if you are on the first
# layer
if i == 0:
layer_input = self.x
else:
layer_input = self.dA_layers[i-1].output
# Rebuild the dA layer from the values provided in layer_types, dA_<param>_lists
n_visible,n_hidden = dA_W_list[i].shape
w_name = 'W_' + str(i)
bhid_name = 'bhid_' + str(i)
bvis_name = 'bvis_' + str(i)
lt = layer_types[i].lower()
dA_layer = layer_classes[lt](numpy_rng=numpy_rng,
theano_rng=theano_rng,
input=layer_input,
n_visible=n_visible,
n_hidden=n_hidden,
W=shared(value=dA_W_list[i],name=w_name),
bhid=shared(value=dA_bhid_list[i],name=bhid_name),
bvis=shared(value=dA_bvis_list[i],name=bvis_name))
self.dA_layers.append(dA_layer)
self.params.extend(self.dA_layers[i].params)
# Reconstruct the dictionary of shared vars for parameter updates
# so we can use momentum when training.
self.updates = {}
for param in self.params:
init = np.zeros(param.get_value(borrow=True).shape,
dtype=theano.config.floatX)
update_name = param.name + '_update'
self.updates[param] = theano.shared(init, name=update_name)
# Reconstruct the finetuning cost functions
if n_outs > 0:
self.reconstruct_loglayer(n_outs)
else:
self.finish_sda_unsupervised()
#################### Legacy code below: logistic layer top for SdA that were intended for dual MLP
#################### and the associated supervised fine-tuning function.
def reconstruct_loglayer(self, n_outs = 10):
""" Reconstruct a logistic layer on top of a previously trained SdA """
# We now need to add a logistic layer on top of the MLP
self.logLayer = LogisticRegression(
input=self.dA_layers[-1].output,
n_in=self.dA_layers[-1].n_hidden, n_out=n_outs)
self.params.extend(self.logLayer.params)
# construct a function that implements one step of finetunining
# compute the cost for second phase of training,
# defined as the negative log likelihood
self.finetune_cost = self.logLayer.negative_log_likelihood(self.y)
# compute the gradients with respect to the model parameters
# symbolic variable that points to the number of errors made on the
# minibatch given by self.x and self.y
self.errors = self.logLayer.errors(self.y)
def build_finetune_functions(self, datasets, batch_size, learning_rate):
'''Generates a function `train` that implements one step of
finetuning, a function `validate` that computes the error on
a batch from the validation set, and a function `test` that
computes the error on a batch from the testing set
:type datasets: list of pairs of theano.tensor.TensorType
:param datasets: It is a list that contain all the datasets;
the has to contain three pairs, `train`,
`valid`, `test` in this order, where each pair
is formed of two Theano variables, one for the
datapoints, the other for the labels
:type batch_size: int
:param batch_size: size of a minibatch
:type learning_rate: float
:param learning_rate: learning rate used during finetune stage
'''
(train_set_x, train_set_y) = datasets[0]
(valid_set_x, valid_set_y) = datasets[1]
(test_set_x, test_set_y) = datasets[2]
# compute number of minibatches for training, validation and testing
n_valid_batches = valid_set_x.get_value(borrow=True).shape[0]
n_valid_batches /= batch_size
n_test_batches = test_set_x.get_value(borrow=True).shape[0]
n_test_batches /= batch_size
index = T.lscalar('index') # index to a [mini]batch
# compute the gradients with respect to the model parameters
gparams = T.grad(self.finetune_cost, self.params)
# compute list of fine-tuning updates
updates = []
for param, gparam in zip(self.params, gparams):
updates.append((param, param - gparam * learning_rate))
train_fn = theano.function(inputs=[index],
outputs=self.finetune_cost,
updates=updates,
givens={
self.x: train_set_x[index * batch_size:
(index + 1) * batch_size],
self.y: train_set_y[index * batch_size:
(index + 1) * batch_size]})
test_score_i = theano.function([index], self.errors,
givens={
self.x: test_set_x[index * batch_size:
(index + 1) * batch_size],
self.y: test_set_y[index * batch_size:
(index + 1) * batch_size]})
valid_score_i = theano.function([index], self.errors,
givens={
self.x: valid_set_x[index * batch_size:
(index + 1) * batch_size],
self.y: valid_set_y[index * batch_size:
(index + 1) * batch_size]})
# Create a function that scans the entire validation set
def valid_score():
return [valid_score_i(i) for i in xrange(n_valid_batches)]
# Create a function that scans the entire test set
def test_score():
return [test_score_i(i) for i in xrange(n_test_batches)]
return train_fn, valid_score, test_score
|
lzamparo/SdA_reduce
|
theano_models/SdA/SdA.py
|
Python
|
bsd-3-clause
| 43,640
|
[
"Gaussian"
] |
2b100a688a4c6a585a75637e2f31a22b894c62ac2026611614f1c5b1c4a41ead
|
"""PDBFetcher: A simple python API for querying the RCSB PDB and
downloading PDB files"""
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
DOCLINES = __doc__.split("\n")
import os
import sys
import tempfile
import shutil
import subprocess
from glob import glob
from distutils.version import StrictVersion
from distutils.command.build_scripts import build_scripts
from setuptools import setup
PY3 = sys.version_info >= (3,0)
#########################################
VERSION = "0.0.1"
ISRELEASED = False
__author__ = "Christian Schwantes"
__version__ = VERSION
########################################
def warn_on_version(module_name, minimum=None, package_name=None, recommend_conda=True):
if package_name is None:
package_name = module_name
class VersionError(Exception):
pass
msg = None
try:
package = __import__(module_name)
if minimum is not None:
try:
v = package.version.short_version
except AttributeError:
v = package.__version__
if StrictVersion(v) < StrictVersion(minimum):
raise VersionError
except ImportError:
if minimum is None:
msg = 'pdbfetcher requires the python package "%s", which is not installed.' % package_name
else:
msg = 'pdbfetcher requires the python package "%s", version %s or later.' % (package_name, minimum)
except VersionError:
msg = ('pdbfetcher requires the python package "%s", version %s or '
' later. You have version %s installed. You will need to upgrade.') % (package_name, minimum, v)
if recommend_conda:
install = ('\nTo install %s, we recommend the conda package manger. See http://conda.pydata.org for info on conda.\n'
'Using conda, you can install it with::\n\n $ conda install %s') % (package_name, package_name)
install += '\n\nAlternatively, with pip you can install the package with:\n\n $ pip install %s' % package_name
else:
install = '\nWith pip you can install the package with:\n\n $ pip install %s' % package_name
if msg:
banner = ('==' * 40)
print('\n'.join([banner, banner, "", msg, install, "", banner, banner]))
# metadata for setup()
metadata = {
'name': 'pdbfetcher',
'version': VERSION,
'author': __author__,
'author_email': 'schwancr@stanford.edu',
'license': 'GPL v3.0',
'url': 'github.com/schwancr/pdbfetcher',
'download_url': 'github.com/schwancr/pdbfetcher',
'platforms': ["Linux", "Mac OS X"],
'description': DOCLINES[0],
'long_description':"\n".join(DOCLINES[2:]),
'packages': ['pdbfetcher', 'pdbfetcher.scripts'],
'package_dir': {'pdbfetcher': 'pdbfetcher', 'pdbfetcher.scripts': 'scripts'},
'zip_safe': False,
'entry_points': {'console_scripts':
['get_pdb.py = pdbfetcher.scripts.get_pdb:entry_point']}
}
# Return the git revision as a string
# copied from numpy setup.py
def git_version():
def _minimal_ext_cmd(cmd):
# construct minimal environment
env = {}
for k in ['SYSTEMROOT', 'PATH']:
v = os.environ.get(k)
if v is not None:
env[k] = v
# LANGUAGE is used on win32
env['LANGUAGE'] = 'C'
env['LANG'] = 'C'
env['LC_ALL'] = 'C'
out = subprocess.Popen(cmd, stdout = subprocess.PIPE, env=env).communicate()[0]
return out
try:
out = _minimal_ext_cmd(['git', 'rev-parse', 'HEAD'])
GIT_REVISION = out.strip().decode('ascii')
except OSError:
GIT_REVISION = "Unknown"
return GIT_REVISION
def write_version_py(filename='pdbfetcher/version.py'):
cnt = """
# THIS FILE IS GENERATED FROM PDBFETCHER SETUP.PY
short_version = '%(version)s'
version = '%(version)s'
full_version = '%(full_version)s'
git_revision = '%(git_revision)s'
release = %(isrelease)s
if not release:
version = full_version
"""
# Adding the git rev number needs to be done inside write_version_py(),
# otherwise the import of numpy.version messes up the build under Python 3.
FULLVERSION = VERSION
if os.path.exists('.git'):
GIT_REVISION = git_version()
else:
GIT_REVISION = "Unknown"
if not ISRELEASED:
FULLVERSION += '.dev-' + GIT_REVISION[:7]
a = open(filename, 'w')
try:
a.write(cnt % {'version': VERSION,
'full_version' : FULLVERSION,
'git_revision' : GIT_REVISION,
'isrelease': str(ISRELEASED)})
finally:
a.close()
write_version_py()
setup(**metadata)
# running these after setup() ensures that they show
# at the bottom of the output, since setup() prints
# a lot to stdout. helps them not get lost
#warn_on_version('numpy', '1.6.0')
#warn_on_version('scipy', '0.11.0')
#warn_on_version('tables', '2.4.0', package_name='pytables')
#warn_on_version('fastcluster', '1.1.13')
#warn_on_version('yaml', package_name='pyyaml')
warn_on_version('mdtraj', '0.8.0')
|
schwancr/pdbfetcher
|
setup.py
|
Python
|
mit
| 5,152
|
[
"MDTraj"
] |
98076a49c1eb8469e3ccec3535778078adc54a2e1f09bf81eca9ed39977c18f8
|
"""Provide variant calling with VarScan from TGI at Wash U.
http://varscan.sourceforge.net/
"""
import os
import sys
from bcbio import broad, utils
from bcbio.distributed.transaction import file_transaction, tx_tmpdir
from bcbio.pipeline import config_utils
from bcbio.provenance import do
from bcbio.variation import samtools, vcfutils
from bcbio.variation.vcfutils import (combine_variant_files, write_empty_vcf,
get_paired_bams, bgzip_and_index)
import pysam
def run_varscan(align_bams, items, ref_file, assoc_files,
region=None, out_file=None):
paired = get_paired_bams(align_bams, items)
if paired and paired.normal_bam and paired.tumor_bam:
call_file = samtools.shared_variantcall(_varscan_paired, "varscan",
align_bams, ref_file, items,
assoc_files, region, out_file)
else:
vcfutils.check_paired_problems(items)
call_file = samtools.shared_variantcall(_varscan_work, "varscan",
align_bams, ref_file,
items, assoc_files,
region, out_file)
return call_file
def _get_jvm_opts(config, tmp_dir):
"""Retrieve common options for running VarScan.
Handles jvm_opts, setting user and country to English to avoid issues
with different locales producing non-compliant VCF.
"""
resources = config_utils.get_resources("varscan", config)
jvm_opts = resources.get("jvm_opts", ["-Xmx750m", "-Xmx2g"])
jvm_opts = config_utils.adjust_opts(jvm_opts,
{"algorithm": {"memory_adjust":
{"magnitude": 1.1, "direction": "decrease"}}})
jvm_opts += ["-Duser.language=en", "-Duser.country=US"]
jvm_opts += broad.get_default_jvm_opts(tmp_dir)
return " ".join(jvm_opts)
def _varscan_options_from_config(config):
"""Retrieve additional options for VarScan from the configuration.
"""
opts = ["--min-coverage 5", "--p-value 0.98", "--strand-filter 1"]
resources = config_utils.get_resources("varscan", config)
if resources.get("options"):
opts += [str(x) for x in resources["options"]]
return opts
def spv_freq_filter(line, tumor_index):
"""Filter VarScan calls based on the SPV value and frequency.
Removes calls with SPV < 0.05 and a tumor FREQ > 0.35.
False positives dominate these higher frequency, low SPV calls. They appear
to be primarily non-somatic/germline variants not removed by other filters.
"""
if line.startswith("#CHROM"):
headers = [('##FILTER=<ID=SpvFreq,Description="High frequency (tumor FREQ > 0.35) '
'and low p-value for somatic (SPV < 0.05)">')]
return "\n".join(headers) + "\n" + line
elif line.startswith("#"):
return line
else:
parts = line.split("\t")
sample_ft = {a: v for (a, v) in zip(parts[8].split(":"), parts[9 + tumor_index].split(":"))}
freq = utils.safe_to_float(sample_ft.get("FREQ"))
spvs = [x for x in parts[7].split(";") if x.startswith("SPV=")]
spv = utils.safe_to_float(spvs[0].split("=")[-1] if spvs else None)
fname = None
if spv is not None and freq is not None:
if spv < 0.05 and freq > 0.35:
fname = "SpvFreq"
if fname:
if parts[6] in set([".", "PASS"]):
parts[6] = fname
else:
parts[6] += ";%s" % fname
line = "\t".join(parts)
return line
def _varscan_paired(align_bams, ref_file, items, target_regions, out_file):
"""Run a paired VarScan analysis, also known as "somatic". """
max_read_depth = "1000"
config = items[0]["config"]
paired = get_paired_bams(align_bams, items)
if not paired.normal_bam:
affected_batch = items[0]["metadata"]["batch"]
message = ("Batch {} requires both tumor and normal BAM files for"
" VarScan cancer calling").format(affected_batch)
raise ValueError(message)
if not utils.file_exists(out_file):
assert out_file.endswith(".vcf.gz"), "Expect bgzipped output to VarScan"
normal_mpileup_cl = samtools.prep_mpileup([paired.normal_bam], ref_file,
config, max_read_depth,
target_regions=target_regions,
want_bcf=False)
tumor_mpileup_cl = samtools.prep_mpileup([paired.tumor_bam], ref_file,
config, max_read_depth,
target_regions=target_regions,
want_bcf=False)
base, ext = utils.splitext_plus(out_file)
indel_file = base + "-indel.vcf"
snp_file = base + "-snp.vcf"
with file_transaction(config, indel_file, snp_file) as (tx_indel, tx_snp):
with tx_tmpdir(items[0]) as tmp_dir:
jvm_opts = _get_jvm_opts(config, tmp_dir)
opts = " ".join(_varscan_options_from_config(config))
remove_zerocoverage = r"{ ifne grep -v -P '\t0\t\t$' || true; }"
export = utils.local_path_export()
varscan_cmd = ("{export} varscan {jvm_opts} somatic "
"<({normal_mpileup_cl} | {remove_zerocoverage}) "
"<({tumor_mpileup_cl} | {remove_zerocoverage}) "
"--output-snp {tx_snp} --output-indel {tx_indel} "
"--output-vcf {opts} ")
# add minimum AF
min_af = float(utils.get_in(paired.tumor_config, ("algorithm",
"min_allele_fraction"), 10)) / 100.0
varscan_cmd += "--min-var-freq {min_af} "
do.run(varscan_cmd.format(**locals()), "Varscan", None, None)
to_combine = []
for fname in [snp_file, indel_file]:
if utils.file_exists(fname):
fix_file = "%s-fix.vcf.gz" % (utils.splitext_plus(fname)[0])
with file_transaction(config, fix_file) as tx_fix_file:
fix_ambig_ref = vcfutils.fix_ambiguous_cl()
fix_ambig_alt = vcfutils.fix_ambiguous_cl(5)
py_cl = os.path.join(os.path.dirname(sys.executable), "py")
normal_name = paired.normal_name
tumor_name = paired.tumor_name
cmd = ("cat {fname} | "
"{py_cl} -x 'bcbio.variation.varscan.fix_varscan_output(x,"
""" "{normal_name}", "{tumor_name}")' | """
"{fix_ambig_ref} | {fix_ambig_alt} | ifne vcfuniqalleles | "
"""{py_cl} -x 'bcbio.variation.vcfutils.add_contig_to_header(x, "{ref_file}")' | """
"""bcftools filter -m + -s REJECT -e "SS != '.' && SS != '2'" 2> /dev/null | """
"bgzip -c > {tx_fix_file}")
do.run(cmd.format(**locals()), "Varscan paired fix")
to_combine.append(fix_file)
if not to_combine:
out_file = write_empty_vcf(out_file, config)
else:
out_file = combine_variant_files(to_combine,
out_file, ref_file, config,
region=target_regions)
if os.path.getsize(out_file) == 0:
write_empty_vcf(out_file)
if out_file.endswith(".gz"):
out_file = bgzip_and_index(out_file, config)
def fix_varscan_output(line, normal_name="", tumor_name=""):
"""Fix a varscan VCF line.
Fixes the ALT column and also fixes floating point values
output as strings to by Floats: FREQ, SSC.
This function was contributed by Sean Davis <sdavis2@mail.nih.gov>,
with minor modifications by Luca Beltrame <luca.beltrame@marionegri.it>.
"""
line = line.strip()
tofix = ("##INFO=<ID=SSC", "##FORMAT=<ID=FREQ")
if(line.startswith("##")):
if line.startswith(tofix):
line = line.replace('Number=1,Type=String',
'Number=1,Type=Float')
return line
line = line.split("\t")
if line[0].startswith("#CHROM"):
if tumor_name and normal_name:
mapping = {"NORMAL": normal_name, "TUMOR": tumor_name}
base_header = line[:9]
old_samples = line[9:]
if len(old_samples) == 0:
return "\t".join(line)
samples = [mapping[sample_name] for sample_name in old_samples]
assert len(old_samples) == len(samples)
return "\t".join(base_header + samples)
else:
return "\t".join(line)
try:
REF, ALT = line[3:5]
except ValueError:
return "\t".join(line)
def _normalize_freq(line, sample_i):
"""Ensure FREQ genotype value is float as defined in header.
"""
ft_parts = line[8].split(":")
dat = line[sample_i].split(":")
# Non-conforming no-call sample, don't try to fix FREQ
if len(dat) != len(ft_parts):
return line
freq_i = ft_parts.index("FREQ")
try:
dat[freq_i] = str(float(dat[freq_i].rstrip("%")) / 100)
except ValueError: # illegal binary characters -- set frequency to zero
dat[freq_i] = "0.0"
line[sample_i] = ":".join(dat)
return line
if len(line) > 9:
line = _normalize_freq(line, 9)
if len(line) > 10:
line = _normalize_freq(line, 10)
# HACK: The position of the SS= changes, so we just search for it
ss_vals = [item for item in line[7].split(";") if item.startswith("SS=")]
if len(ss_vals) > 0:
somatic_status = int(ss_vals[0].split("=")[1]) # Get the number
else:
somatic_status = None
if somatic_status == 5:
# "Unknown" states are broken in current versions of VarScan
# so we just bail out here for now
return
# fix FREQ for any additional samples -- multi-sample VarScan calling
if len(line) > 11:
for i in range(11, len(line)):
line = _normalize_freq(line, i)
#FIXME: VarScan also produces invalid REF records (e.g. CAA/A)
# This is not handled yet.
if "+" in ALT or "-" in ALT:
if "/" not in ALT:
if ALT[0] == "+":
R = REF
A = REF + ALT[1:]
elif ALT[0] == "-":
R = REF + ALT[1:]
A = REF
else:
Ins = [p[1:] for p in ALT.split("/") if p[0] == "+"]
Del = [p[1:] for p in ALT.split("/") if p[0] == "-"]
if len(Del):
REF += sorted(Del, key=lambda x: len(x))[-1]
A = ",".join([REF[::-1].replace(p[::-1], "", 1)[::-1]
for p in Del] + [REF + p for p in Ins])
R = REF
REF = R
ALT = A
else:
ALT = ALT.replace('/', ',')
line[3] = REF
line[4] = ALT
return "\t".join(line)
def _create_sample_list(in_bams, vcf_file):
"""Pull sample names from input BAMs and create input sample list.
"""
out_file = "%s-sample_list.txt" % os.path.splitext(vcf_file)[0]
with open(out_file, "w") as out_handle:
for in_bam in in_bams:
with pysam.Samfile(in_bam, "rb") as work_bam:
for rg in work_bam.header.get("RG", []):
out_handle.write("%s\n" % rg["SM"])
return out_file
def _varscan_work(align_bams, ref_file, items, target_regions, out_file):
"""Perform SNP and indel genotyping with VarScan.
"""
config = items[0]["config"]
orig_out_file = out_file
out_file = orig_out_file.replace(".vcf.gz", ".vcf")
max_read_depth = "1000"
sample_list = _create_sample_list(align_bams, out_file)
mpileup = samtools.prep_mpileup(align_bams, ref_file, config, max_read_depth,
target_regions=target_regions, want_bcf=False)
# VarScan fails to generate a header on files that start with
# zerocoverage calls; strip these with grep, we're not going to
# call on them
remove_zerocoverage = r"{ ifne grep -v -P '\t0\t\t$' || true; }"
# we use ifne from moreutils to ensure we process only on files with input, skipping otherwise
# http://manpages.ubuntu.com/manpages/natty/man1/ifne.1.html
with tx_tmpdir(items[0]) as tmp_dir:
jvm_opts = _get_jvm_opts(config, tmp_dir)
opts = " ".join(_varscan_options_from_config(config))
min_af = float(utils.get_in(config, ("algorithm", "min_allele_fraction"), 10)) / 100.0
fix_ambig_ref = vcfutils.fix_ambiguous_cl()
fix_ambig_alt = vcfutils.fix_ambiguous_cl(5)
py_cl = os.path.join(os.path.dirname(sys.executable), "py")
export = utils.local_path_export()
cmd = ("{export} {mpileup} | {remove_zerocoverage} | "
"ifne varscan {jvm_opts} mpileup2cns {opts} "
"--vcf-sample-list {sample_list} --min-var-freq {min_af} --output-vcf --variants | "
"""{py_cl} -x 'bcbio.variation.vcfutils.add_contig_to_header(x, "{ref_file}")' | """
"{py_cl} -x 'bcbio.variation.varscan.fix_varscan_output(x)' | "
"{fix_ambig_ref} | {fix_ambig_alt} | ifne vcfuniqalleles > {out_file}")
do.run(cmd.format(**locals()), "Varscan", None,
[do.file_exists(out_file)])
os.remove(sample_list)
# VarScan can create completely empty files in regions without
# variants, so we create a correctly formatted empty file
if os.path.getsize(out_file) == 0:
write_empty_vcf(out_file)
if orig_out_file.endswith(".gz"):
vcfutils.bgzip_and_index(out_file, config)
|
vladsaveliev/bcbio-nextgen
|
bcbio/variation/varscan.py
|
Python
|
mit
| 14,283
|
[
"pysam"
] |
92610d547943849f6975470050ceeba764b5e3b35906f1ef78e02b00a61cd028
|
#!/usr/bin/env python
# Copyright 2014-2020 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: Qiming Sun <osirpt.sun@gmail.com>
#
import warnings
import numpy
from pyscf import lib
from pyscf import ao2mo
from pyscf.ao2mo import _ao2mo
from pyscf.ao2mo.incore import iden_coeffs, _conc_mos
from pyscf.pbc.df.df_jk import zdotNN, zdotNC
from pyscf.pbc.df.fft_ao2mo import _format_kpts, _iskconserv
from pyscf.pbc.lib import kpts_helper
from pyscf.pbc.lib.kpts_helper import is_zero, gamma_point, unique
from pyscf import __config__
def get_eri(mydf, kpts=None,
compact=getattr(__config__, 'pbc_df_ao2mo_get_eri_compact', True)):
if mydf._cderi is None:
mydf.build()
cell = mydf.cell
nao = cell.nao_nr()
kptijkl = _format_kpts(kpts)
if not _iskconserv(cell, kptijkl):
lib.logger.warn(cell, 'df_ao2mo: momentum conservation not found in '
'the given k-points %s', kptijkl)
return numpy.zeros((nao,nao,nao,nao))
kpti, kptj, kptk, kptl = kptijkl
nao_pair = nao * (nao+1) // 2
max_memory = max(2000, mydf.max_memory-lib.current_memory()[0]-nao**4*16/1e6)
####################
# gamma point, the integral is real and with s4 symmetry
if gamma_point(kptijkl):
eriR = numpy.zeros((nao_pair,nao_pair))
for LpqR, LpqI, sign in mydf.sr_loop(kptijkl[:2], max_memory, True):
lib.ddot(LpqR.T, LpqR, sign, eriR, 1)
LpqR = LpqI = None
if not compact:
eriR = ao2mo.restore(1, eriR, nao).reshape(nao**2,-1)
return eriR
elif is_zero(kpti-kptk) and is_zero(kptj-kptl):
eriR = numpy.zeros((nao*nao,nao*nao))
eriI = numpy.zeros((nao*nao,nao*nao))
for LpqR, LpqI, sign in mydf.sr_loop(kptijkl[:2], max_memory, False):
zdotNN(LpqR.T, LpqI.T, LpqR, LpqI, sign, eriR, eriI, 1)
LpqR = LpqI = None
return eriR + eriI*1j
####################
# (kpt) i == j == k == l != 0
#
# (kpt) i == l && j == k && i != j && j != k =>
# both vbar and ovlp are zero. It corresponds to the exchange integral.
#
# complex integrals, N^4 elements
elif is_zero(kpti-kptl) and is_zero(kptj-kptk):
eriR = numpy.zeros((nao*nao,nao*nao))
eriI = numpy.zeros((nao*nao,nao*nao))
for LpqR, LpqI, sign in mydf.sr_loop(kptijkl[:2], max_memory, False):
zdotNC(LpqR.T, LpqI.T, LpqR, LpqI, sign, eriR, eriI, 1)
LpqR = LpqI = None
# transpose(0,1,3,2) because
# j == k && i == l =>
# (L|ij).transpose(0,2,1).conj() = (L^*|ji) = (L^*|kl) => (M|kl)
eri = lib.transpose((eriR+eriI*1j).reshape(-1,nao,nao), axes=(0,2,1))
return eri.reshape(nao**2,-1)
####################
# aosym = s1, complex integrals
#
# kpti == kptj => kptl == kptk
# If kpti == kptj, (kptl-kptk)*a has to be multiples of 2pi because of the wave
# vector symmetry. k is a fraction of reciprocal basis, 0 < k/b < 1, by definition.
# So kptl/b - kptk/b must be -1 < k/b < 1.
#
else:
eriR = numpy.zeros((nao*nao,nao*nao))
eriI = numpy.zeros((nao*nao,nao*nao))
blksize = int(max_memory*.4e6/16/nao**2)
for (LpqR, LpqI, sign), (LrsR, LrsI, sign1) in \
lib.izip(mydf.sr_loop(kptijkl[:2], max_memory, False, blksize),
mydf.sr_loop(kptijkl[2:], max_memory, False, blksize)):
zdotNN(LpqR.T, LpqI.T, LrsR, LrsI, sign, eriR, eriI, 1)
LpqR = LpqI = LrsR = LrsI = None
return eriR + eriI*1j
def general(mydf, mo_coeffs, kpts=None,
compact=getattr(__config__, 'pbc_df_ao2mo_general_compact', True)):
warn_pbc2d_eri(mydf)
if mydf._cderi is None:
mydf.build()
cell = mydf.cell
kptijkl = _format_kpts(kpts)
kpti, kptj, kptk, kptl = kptijkl
if isinstance(mo_coeffs, numpy.ndarray) and mo_coeffs.ndim == 2:
mo_coeffs = (mo_coeffs,) * 4
if not _iskconserv(cell, kptijkl):
lib.logger.warn(cell, 'df_ao2mo: momentum conservation not found in '
'the given k-points %s', kptijkl)
return numpy.zeros([mo.shape[1] for mo in mo_coeffs])
all_real = not any(numpy.iscomplexobj(mo) for mo in mo_coeffs)
max_memory = max(2000, (mydf.max_memory - lib.current_memory()[0]))
####################
# gamma point, the integral is real and with s4 symmetry
if gamma_point(kptijkl) and all_real:
ijmosym, nij_pair, moij, ijslice = _conc_mos(mo_coeffs[0], mo_coeffs[1], compact)
klmosym, nkl_pair, mokl, klslice = _conc_mos(mo_coeffs[2], mo_coeffs[3], compact)
eri_mo = numpy.zeros((nij_pair,nkl_pair))
sym = (iden_coeffs(mo_coeffs[0], mo_coeffs[2]) and
iden_coeffs(mo_coeffs[1], mo_coeffs[3]))
ijR = klR = None
for LpqR, LpqI, sign in mydf.sr_loop(kptijkl[:2], max_memory, True):
ijR, klR = _dtrans(LpqR, ijR, ijmosym, moij, ijslice,
LpqR, klR, klmosym, mokl, klslice, sym)
lib.ddot(ijR.T, klR, sign, eri_mo, 1)
LpqR = LpqI = None
return eri_mo
elif is_zero(kpti-kptk) and is_zero(kptj-kptl):
mo_coeffs = _mo_as_complex(mo_coeffs)
nij_pair, moij, ijslice = _conc_mos(mo_coeffs[0], mo_coeffs[1])[1:]
nkl_pair, mokl, klslice = _conc_mos(mo_coeffs[2], mo_coeffs[3])[1:]
eri_mo = numpy.zeros((nij_pair,nkl_pair), dtype=numpy.complex128)
sym = (iden_coeffs(mo_coeffs[0], mo_coeffs[2]) and
iden_coeffs(mo_coeffs[1], mo_coeffs[3]))
zij = zkl = None
for LpqR, LpqI, sign in mydf.sr_loop(kptijkl[:2], max_memory, False):
buf = LpqR+LpqI*1j
zij, zkl = _ztrans(buf, zij, moij, ijslice,
buf, zkl, mokl, klslice, sym)
lib.dot(zij.T, zkl, sign, eri_mo, 1)
LpqR = LpqI = buf = None
return eri_mo
####################
# (kpt) i == j == k == l != 0
# (kpt) i == l && j == k && i != j && j != k =>
#
elif is_zero(kpti-kptl) and is_zero(kptj-kptk):
mo_coeffs = _mo_as_complex(mo_coeffs)
nij_pair, moij, ijslice = _conc_mos(mo_coeffs[0], mo_coeffs[1])[1:]
nlk_pair, molk, lkslice = _conc_mos(mo_coeffs[3], mo_coeffs[2])[1:]
eri_mo = numpy.zeros((nij_pair,nlk_pair), dtype=numpy.complex128)
sym = (iden_coeffs(mo_coeffs[0], mo_coeffs[3]) and
iden_coeffs(mo_coeffs[1], mo_coeffs[2]))
zij = zlk = None
for LpqR, LpqI, sign in mydf.sr_loop(kptijkl[:2], max_memory, False):
buf = LpqR+LpqI*1j
zij, zlk = _ztrans(buf, zij, moij, ijslice,
buf, zlk, molk, lkslice, sym)
lib.dot(zij.T, zlk.conj(), sign, eri_mo, 1)
LpqR = LpqI = buf = None
nmok = mo_coeffs[2].shape[1]
nmol = mo_coeffs[3].shape[1]
eri_mo = lib.transpose(eri_mo.reshape(-1,nmol,nmok), axes=(0,2,1))
return eri_mo.reshape(nij_pair,nlk_pair)
####################
# aosym = s1, complex integrals
#
# If kpti == kptj, (kptl-kptk)*a has to be multiples of 2pi because of the wave
# vector symmetry. k is a fraction of reciprocal basis, 0 < k/b < 1, by definition.
# So kptl/b - kptk/b must be -1 < k/b < 1. => kptl == kptk
#
else:
mo_coeffs = _mo_as_complex(mo_coeffs)
nij_pair, moij, ijslice = _conc_mos(mo_coeffs[0], mo_coeffs[1])[1:]
nkl_pair, mokl, klslice = _conc_mos(mo_coeffs[2], mo_coeffs[3])[1:]
nao = mo_coeffs[0].shape[0]
eri_mo = numpy.zeros((nij_pair,nkl_pair), dtype=numpy.complex128)
blksize = int(min(max_memory*.3e6/16/nij_pair,
max_memory*.3e6/16/nkl_pair,
max_memory*.3e6/16/nao**2))
zij = zkl = None
for (LpqR, LpqI, sign), (LrsR, LrsI, sign1) in \
lib.izip(mydf.sr_loop(kptijkl[:2], max_memory, False, blksize),
mydf.sr_loop(kptijkl[2:], max_memory, False, blksize)):
zij, zkl = _ztrans(LpqR+LpqI*1j, zij, moij, ijslice,
LrsR+LrsI*1j, zkl, mokl, klslice, False)
lib.dot(zij.T, zkl, sign, eri_mo, 1)
LpqR = LpqI = LrsR = LrsI = None
return eri_mo
def ao2mo_7d(mydf, mo_coeff_kpts, kpts=None, factor=1, out=None):
cell = mydf.cell
if kpts is None:
kpts = mydf.kpts
nkpts = len(kpts)
if isinstance(mo_coeff_kpts, numpy.ndarray) and mo_coeff_kpts.ndim == 3:
mo_coeff_kpts = [mo_coeff_kpts] * 4
else:
mo_coeff_kpts = list(mo_coeff_kpts)
# Shape of the orbitals can be different on different k-points. The
# orbital coefficients must be formatted (padded by zeros) so that the
# shape of the orbital coefficients are the same on all k-points. This can
# be achieved by calling pbc.mp.kmp2.padded_mo_coeff function
nmoi, nmoj, nmok, nmol = [x.shape[2] for x in mo_coeff_kpts]
eri_shape = (nkpts, nkpts, nkpts, nmoi, nmoj, nmok, nmol)
if gamma_point(kpts):
dtype = numpy.result_type(*mo_coeff_kpts)
else:
dtype = numpy.complex128
if out is None:
out = numpy.empty(eri_shape, dtype=dtype)
else:
assert(out.shape == eri_shape)
kptij_lst = numpy.array([(ki, kj) for ki in kpts for kj in kpts])
kptis_lst = kptij_lst[:,0]
kptjs_lst = kptij_lst[:,1]
kpt_ji = kptjs_lst - kptis_lst
uniq_kpts, uniq_index, uniq_inverse = unique(kpt_ji)
nao = cell.nao_nr()
max_memory = max(2000, mydf.max_memory-lib.current_memory()[0]-nao**4*16/1e6) * .5
tao = []
ao_loc = None
kconserv = kpts_helper.get_kconserv(cell, kpts)
for uniq_id, kpt in enumerate(uniq_kpts):
adapted_ji_idx = numpy.where(uniq_inverse == uniq_id)[0]
for ji, ji_idx in enumerate(adapted_ji_idx):
ki = ji_idx // nkpts
kj = ji_idx % nkpts
moij, ijslice = _conc_mos(mo_coeff_kpts[0][ki], mo_coeff_kpts[1][kj])[2:]
zij = []
for LpqR, LpqI, sign in mydf.sr_loop(kpts[[ki,kj]], max_memory, False, mydf.blockdim):
zij.append(_ao2mo.r_e2(LpqR+LpqI*1j, moij, ijslice, tao, ao_loc))
for kk in range(nkpts):
kl = kconserv[ki, kj, kk]
mokl, klslice = _conc_mos(mo_coeff_kpts[2][kk], mo_coeff_kpts[3][kl])[2:]
eri_mo = numpy.zeros((nmoi*nmoj,nmok*nmol), dtype=numpy.complex128)
for i, (LrsR, LrsI, sign) in \
enumerate(mydf.sr_loop(kpts[[kk,kl]], max_memory, False, mydf.blockdim)):
zkl = _ao2mo.r_e2(LrsR+LrsI*1j, mokl, klslice, tao, ao_loc)
lib.dot(zij[i].T, zkl, sign*factor, eri_mo, 1)
if dtype == numpy.double:
eri_mo = eri_mo.real
out[ki,kj,kk] = eri_mo.reshape(eri_shape[3:])
return out
def _mo_as_complex(mo_coeffs):
mos = []
for c in mo_coeffs:
if c.dtype == numpy.float64:
mos.append(c+0j)
else:
mos.append(c)
return mos
def _dtrans(Lpq, Lij, ijmosym, moij, ijslice,
Lrs, Lkl, klmosym, mokl, klslice, sym):
Lij = _ao2mo.nr_e2(Lpq, moij, ijslice, aosym='s2', mosym=ijmosym, out=Lij)
if sym:
Lkl = Lij
else:
Lkl = _ao2mo.nr_e2(Lrs, mokl, klslice, aosym='s2', mosym=klmosym, out=Lkl)
return Lij, Lkl
def _ztrans(Lpq, zij, moij, ijslice, Lrs, zkl, mokl, klslice, sym):
tao = []
ao_loc = None
zij = _ao2mo.r_e2(Lpq, moij, ijslice, tao, ao_loc, out=zij)
if sym:
zkl = zij
else:
zkl = _ao2mo.r_e2(Lrs, mokl, klslice, tao, ao_loc, out=zkl)
return zij, zkl
class PBC2DIntegralsWarning(RuntimeWarning):
pass
def warn_pbc2d_eri(mydf):
cell = mydf.cell
if cell.dimension == 2 and cell.low_dim_ft_type == 'inf_vacuum':
with warnings.catch_warnings():
warnings.simplefilter('once', PBC2DIntegralsWarning)
warnings.warn('\nERIs of PBC-2D systems with infinity vacuum are '
'singular. cell.low_dim_ft_type = None should be '
'set.\n')
if __name__ == '__main__':
from pyscf.pbc import gto as pgto
from pyscf.pbc.df import DF
L = 5.
n = 11
cell = pgto.Cell()
cell.a = numpy.diag([L,L,L])
cell.mesh = numpy.array([n,n,n])
cell.atom = '''He 3. 2. 3.
He 1. 1. 1.'''
#cell.basis = {'He': [[0, (1.0, 1.0)]]}
#cell.basis = '631g'
#cell.basis = {'He': [[0, (2.4, 1)], [1, (1.1, 1)]]}
cell.basis = 'ccpvdz'
cell.verbose = 0
cell.build(0,0)
nao = cell.nao_nr()
numpy.random.seed(1)
kpts = numpy.random.random((4,3))
kpts[3] = -numpy.einsum('ij->j', kpts[:3])
with_df = DF(cell, kpts)
with_df.auxbasis = 'weigend'
with_df.mesh = [n] * 3
mo =(numpy.random.random((nao,nao)) +
numpy.random.random((nao,nao))*1j)
eri = with_df.get_eri(kpts).reshape((nao,)*4)
eri0 = numpy.einsum('pjkl,pi->ijkl', eri , mo.conj())
eri0 = numpy.einsum('ipkl,pj->ijkl', eri0, mo )
eri0 = numpy.einsum('ijpl,pk->ijkl', eri0, mo.conj())
eri0 = numpy.einsum('ijkp,pl->ijkl', eri0, mo )
eri1 = with_df.ao2mo(mo, kpts)
print(abs(eri1-eri0).sum())
|
sunqm/pyscf
|
pyscf/pbc/df/df_ao2mo.py
|
Python
|
apache-2.0
| 13,851
|
[
"PySCF"
] |
9265c4f897dc96710ca4e6ae2bc307bf4b05948ad2747f0f91ff308508330fb5
|
"""
Graph processing.
using Brian Ling's edge triple (inlet, outlet, weight)
with reference to Guido van Russum essay "implementing graphs"
Bruce Wernick
10 June 2021
"""
def has_key(graph, n, c):
'true if graph has an inlet or outlet node called n but not c'
for e in graph:
if c in [e[0],e[1]]: continue
if n == e[0]: return True
if n == e[1]: return True
return False
def adjacent(graph, n):
'return nodes adjacent to node n (in and out)'
adj = []
for e in graph:
if n==e[0]: adj.append(e[1])
if n==e[1]: adj.append(e[0])
return adj
def find_path(graph, a, b, path=[], c=None):
'from a to b, path starts empty, c is where we came from (for housekeeping)'
path = path + [a] # we are at node a so add it to path
if a == b: # reached the end so return the path
return path
# if a not an outlet to somewhere (but not where we came from)
# then we have reaches a dead end
if not has_key(graph, a, c):
return None
for n in adjacent(graph, a): # visit each node leading out of node a
if n not in path: # if we have not already visited node n, then...
# build a new path from n (but exclude node a, the one we came from)
newpath = find_path(graph, n, b, path, a)
if newpath: # if there is a newpath then return it
return newpath
return None
def mst(graph):
'minimum spanning tree'
tree=[]
basic=[]
graph.sort(key=lambda a: a[2])
for e in graph:
if not find_path(tree,e[0],e[1]):
tree.append(e)
else:
basic.append(e)
return tree,basic
def get_edge(graph, a, b):
'return edge between a and b'
for i in range(len(graph)):
e=graph[i]
if a==e[0] and b==e[1]: return (i,1.0)
if a==e[1] and b==e[0]: return (i,-1.0)
return (None,0)
def edge_list(graph, path):
'return edge list from node path'
edge=[]
for i in range(1,len(path)):
sgn,e=get_edge(graph, path[i-1], path[i])
if e:
edge.append((sgn,e))
return edge
# ---------------------------------------------------------------------
if __name__=='__main__':
## julie bridge
graph = [[1, 2, 'a'],
[1, 4, 'b'],
[2, 3, 'c'],
[4, 3, 'd'],
[2, 5, 'e'],
[3, 5, 'f'],
[4, 5, 'g']]
print('Find Path')
print(find_path(graph, 1, 4))
print()
print('Mesh')
tree, basic = mst(graph)
for b in basic:
path = find_path(tree, b[1], b[0])
mesh = edge_list(tree, path)
print (mesh)
print()
|
bru32/magz
|
magz/edge_triple.py
|
Python
|
mit
| 2,575
|
[
"Brian",
"VisIt"
] |
6031d9a48c3363cd434363db0dca2a56057763df52de1470f541b170d1166b30
|
import os
import sys
import random
import math
from time import *
import decimal
print('Welcome to PythonMinecraftTools')
sleep(1)
print('A Console based Minecraft tool with many purposes')
sleep(1)
print('Press "1" to use the Resource Calculator')
print('Press "2" to use the Basic Minecraft Time Converstion Table')
print('Press "3" to use the Nether Portal Linking Calculator')
while True:
try:
tooltype = int(input('Here: '))
if tooltype >= 4:
print('Please enter a valid option')
tooltype = int(input('Here: '))
if tooltype >= 4:
print('Please enter a valid option')
tooltype = int(input('IVE GIVE YOU ENOUGH TIMES TO GET IT RIGHT.. ENTER A NUMBER 1 OR 2!!!!: '))
if tooltype >= 4:
print('Screw you!! Re-open the program because im not going to error check your stupidity')
break
except:
print('You must enter a valid number')
if tooltype == 1:
sleep(1)
print('-')
print('Minecraft Resource Calculator')
sleep(1)
print('If you need help visit the README')
print('-')
print('Enter the individual items and the calculator will')
print('tell you how many chest or stacks it is!')
print('*Only works for items that stack up to 64*')
sleep(1)
while True:
try:
print('Enter an amount of individual items')
numinput = float(input('Here: '))
stacks = (numinput) / 64
chests = (numinput) / 1728
dubchest = (numinput) / 3456
print(round(stacks,2), "Stack(s)")
print(round(chests,2), "Chest(s)")
print(round(dubchest,2), "Double Chest(s)")
break
except:
print('You must enter a number!')
sleep(2)
input('Press ENTER to exit')
if tooltype == 2:
print('-')
print('Times Converstions in Minecraft')
print('-')
sleep(1)
print('MC Time to Real Time')
print('1 Minute = 0.8 Seconds')
print('1 Hour = 50 Seconds')
print('1 Day = 20 Minutes')
print('1 Week = 2.3 Hours')
print('1 Month = 10 Hours')
print('1 Year = 5 Days')
print('-')
print('Day Time = 10 Minutes')
print('Sunset/Dusk = 1.5 Minutes')
print('Night Time = 7 Minutes')
print('Sunrise/dawn = 1.5 Minutes')
print('-')
sleep(1)
input('Press ENTER to exit')
if tooltype == 3:
sleep(1)
print('-')
print('Nether Portal linking calculator')
sleep(1)
print('If you need help visit the README')
print('-')
print('Enter the X, Y and Z Co-ords')
print('then press enter')
print('-')
sleep(1)
print('For Nether to Overworld press 1 or for Overworld to Nether press 2')
NorO = float(input())
#Nether to Overworld
if NorO == 1:
print('Nether to Overworld')
print('Only type numbers!!')
sleep(1)
while True:
try:
xin = int(input('Nether X Co-Ords: '))
break
except:
print('You must enter a number!')
while True:
try:
yin = int(input('Nether Y Co-Ords: '))
break
except:
print('You must enter a number!')
while True:
try:
zin = int(input('Nether Z Co-Ords: '))
break
except:
print('You must enter a number!')
xout = (xin) * 8
yout = (yin) * 8
zout = (zin) * 8
print('Build a portal at:' ,xout,yout,zout, 'In the nether')
sleep(2)
input('Press ENTER to exit')
#Overworld to Nether
if NorO == 2:
print('Overworld to Nether')
print('Only type numbers!!')
sleep(1)
while True:
try:
xino = int(input('Overworld X Co-Ords: '))
break
except:
print('You must enter a number!')
while True:
try:
yino = int(input('Overworld Y Co-Ords: '))
break
except:
print('You must enter a number!')
while True:
try:
zino = int(input('Overworld Z Co-Ords: '))
break
except:
print('You must enter a number!')
xouto = (xino) / 8
youto = (yino) / 8
zouto = (zino) / 8
print('Build a portal at:' ,xouto,youto,zouto, 'In the nether')
sleep(2)
input('Press ENTER to exit')
|
TheUncannyScrub/PythonMinecraftTools
|
Tools/MasterMinecraftTool.py
|
Python
|
mit
| 4,514
|
[
"VisIt"
] |
3507c2abcbfffac878cdb1dbdaec8ca904b7558fb84ac16098e58564ea59aa54
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2020, Brian Scholer <@briantist>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: win_psrepository_info
version_added: '2.10'
short_description: Gather information about PSRepositories
description:
- Gather information about all or a specific PSRepository.
options:
name:
description:
- The name of the repository to retrieve.
- Supports any wildcard pattern supported by C(Get-PSRepository).
- If omitted then all repositories will returned.
type: str
default: '*'
requirements:
- C(PowerShellGet) module
seealso:
- module: win_psrepository
author:
- Brian Scholer (@briantist)
'''
EXAMPLES = r'''
- name: Get info for a single repository
win_psrepository_info:
name: PSGallery
register: repo_info
- name: Find all repositories that start with 'MyCompany'
win_psrepository_info:
name: MyCompany*
- name: Get info for all repositories
win_psrepository_info:
register: repo_info
- name: Remove all repositories that don't have a publish_location set
win_psrepository:
name: "{{ item }}"
state: absent
loop: "{{ repo_info.repositories | rejectattr('publish_location', 'none') | list }}"
'''
RETURN = r'''
repositories:
description:
- A list of repositories (or an empty list is there are none).
returned: always
type: list
elements: dict
contains:
name:
description:
- The name of the repository.
type: str
sample: PSGallery
installation_policy:
description:
- The installation policy of the repository. The sample values are the only possible values.
type: str
sample:
- Trusted
- Untrusted
trusted:
description:
- A boolean flag reflecting the value of C(installation_policy) as to whether the repository is trusted.
type: bool
package_management_provider:
description:
- The name of the package management provider for this repository.
type: str
sample: NuGet
provider_options:
description:
- Provider-specific options for this repository.
type: dict
source_location:
description:
- The location used to find and retrieve modules. This should always have a value.
type: str
sample: https://www.powershellgallery.com/api/v2
publish_location:
description:
- The location used to publish modules.
type: str
sample: https://www.powershellgallery.com/api/v2/package/
script_source_location:
description:
- The location used to find and retrieve scripts.
type: str
sample: https://www.powershellgallery.com/api/v2/items/psscript
script_publish_location:
description:
- The location used to publish scripts.
type: str
sample: https://www.powershellgallery.com/api/v2/package/
registered:
description:
- Whether the module is registered. Should always be C(True)
type: bool
'''
|
roadmapper/ansible
|
lib/ansible/modules/windows/win_psrepository_info.py
|
Python
|
gpl-3.0
| 3,236
|
[
"Brian"
] |
fdccd83befd8c917d6370c62a96796c12f58e87c90db539f3e7b0ed34703fcf2
|
import argparse
parser = argparse.ArgumentParser(description='Run simulation for nora w 3d layers')
parser.add_argument('t', metavar='threads', type=int,
default=1,
help='number of nest threads')
parser.add_argument('n', metavar='nn',
default=3000,
help='desired number of neurons')
args = parser.parse_args()
# Quality of graphics
dpi_n = 120
number_of_threads = args.t
# Number of neurons
NN = args.n
# T - simulation time | dt - simulation pause step
T = 1000.
dt = 10.
# Neurons number for spike detector
N_detect = 100
# Neurons number for multimeter
N_volt = 3
# Generator delay
pg_delay = 10.
# Synapse weights
w_Glu = 3.
w_GABA = -w_Glu * 2
w_ACh = 8.
w_NA_ex = 13.
w_NA_in = -w_NA_ex
w_DA_ex = 13.
w_DA_in = -w_DA_ex
w_SERO_ex = 13.
w_SERO_in = -w_SERO_ex
# Minimal number of neurons
NN_minimal = 10
# Additional settings
serotonin_flag = True
noradrenaline_flag = True # noradrenaline modulation flag
dopamine_flag = True # dopamine modulation flag
generator_flag = True
create_images = True
MaxSynapses = 4000 # max synapses
BOUND = 0.2 # outer bound of rectangular 3d layer
R = .25 # radius of connectivity sphere of a neuron
|
research-team/NEUCOGAR
|
NEST/cube/integration/excitement/simulation_params.py
|
Python
|
gpl-2.0
| 1,252
|
[
"NEURON"
] |
c0f698b2ead8c25fc5568d565c0f13c1ae60df30c0c253f19ae37340d47ba768
|
import copy
import lan
from itertools import chain
import exchange
import collect_array as ca
import collect_loop as cl
import collect_id as ci
import collect_device as cd
class PlaceInReg(object):
def __init__(self, ast):
self.ast = ast
self.PlaceInRegFinding = tuple()
self.PlaceInRegCond = None
self.perform_transformation = False
def place_in_reg(self):
""" Find all array references that can be cached in registers.
Then rewrite the code in this fashion.
"""
optimizable_arrays = dict()
hoist_loop_set = set()
ref_to_loop = ca.get_ref_to_loop(self.ast)
write_only = ca.get_write_only(self.ast)
subscript_no_id = ca.get_subscript_no_id(self.ast)
grid_indices = cl.get_grid_indices(self.ast)
for n in ref_to_loop:
if n in write_only:
continue
ref1 = ref_to_loop[n]
sub1 = subscript_no_id[n]
for (ref, sub, i) in zip(ref1, sub1, range(len(ref1))):
if self._can_perform_optimization(ref, sub):
hoist_loop_set |= set(sub) - set(grid_indices)
try:
optimizable_arrays[n].append(i)
except KeyError:
optimizable_arrays[n] = [i]
hoist_loop_set = self._remove_unknown_loops(hoist_loop_set)
if len(hoist_loop_set) > 1:
print """ PlaceInReg: array references was inside two loops. No optimization. """
return
hoist_loop_list = list(hoist_loop_set)
if optimizable_arrays:
self._set_optimization_arg(optimizable_arrays, hoist_loop_list)
self._set_optimization_condition(optimizable_arrays, hoist_loop_list)
def _set_optimization_condition(self, optimizable_arrays, hoistloop):
num_ref_hoisted = len(list(chain.from_iterable(optimizable_arrays.values())))
(lower_limit, upper_limit) = cl.get_loop_limits(self.ast)
if hoistloop:
m = hoistloop[0]
lhs = lan.BinOp(lan.Id(upper_limit[m]), '-', lan.Id(lower_limit[m]))
else:
lhs = lan.Constant(1)
self.PlaceInRegCond = lan.BinOp(lan.BinOp(lhs, '*', lan.Constant(num_ref_hoisted)), '<', lan.Constant(40))
def _set_optimization_arg(self, optimizable_arrays, hoistloop):
self.PlaceInRegFinding = (optimizable_arrays, hoistloop)
def _remove_unknown_loops(self, insideloop):
loops = cl.get_inner_loops(self.ast)
return {k for k in insideloop if k in loops}
def _can_perform_optimization(self, loop_idx, sub_idx):
"""
# for each array, for each array ref, collect which loop, loop_idx, it is in
# and what indices, sub_idx, are in its subscript.
# if there is a grid_idx in sub_idx and there exists a loop_idx not in sub_idx
:param loop_idx:
:param sub_idx:
:return:
"""
grid_indices = cl.get_grid_indices(self.ast)
return set(sub_idx).intersection(set(grid_indices)) and \
set(loop_idx).difference(set(sub_idx))
def place_in_reg2(self, arr_dict):
self._insert_cache_in_reg(arr_dict)
self._replace_global_ref_with_reg_id(arr_dict)
def _insert_cache_in_reg(self, arr_dict):
initstats = []
# Create the loadings
types = ci.get_types(self.ast)
kernel = cd.get_kernel(self.ast)
kernel_stats = kernel.statements
for i, n in enumerate(arr_dict):
for m in arr_dict[n]:
regid = self._create_reg_var_id(m, n)
reg_type = types[n][0]
reg = lan.TypeId([reg_type], regid)
assign = self._create_reg_assignment(m, n, reg)
initstats.append(assign)
kernel_stats.insert(0, lan.GroupCompound(initstats))
def _replace_global_ref_with_reg_id(self, arr_dict):
# Replace the global Arefs with the register vars
loop_arrays = ca.get_loop_arrays(self.ast)
loop_arrays_parent = ca.get_loop_arrays_parent(self.ast)
for i, n in enumerate(arr_dict):
for m in arr_dict[n]:
idx = m
reg_id = self._create_reg_var_id(m, n)
parent = loop_arrays_parent[n][idx]
aref_old = loop_arrays[n][idx]
exchange_array_id_with_id = exchange.ExchangeArrayIdWithId(aref_old, reg_id)
exchange_array_id_with_id.visit(parent)
@staticmethod
def _create_reg_var_id(m, n):
return lan.Id(n + str(m) + '_reg')
def _create_reg_assignment(self, m, n, reg):
idx = m
loop_arrays = ca.get_loop_arrays(self.ast)
glob_array_ref = copy.deepcopy(loop_arrays[n][idx])
reg_dict = {'isReg': []}
glob_array_ref.extra = reg_dict
assign = lan.Assignment(reg, glob_array_ref)
return assign
def place_in_reg3(self):
""" Check if the arrayref is inside a loop and use a static
array for the allocation of the registers
"""
kernel = cd.get_kernel(self.ast)
kernel_stats = kernel.statements
self.place_in_reg()
if self.PlaceInRegFinding is ():
return
(optimizable_arrays, hoist_loop_list) = self.PlaceInRegFinding
self.perform_transformation = True
if not optimizable_arrays:
return
if not hoist_loop_list:
self.place_in_reg2(optimizable_arrays)
return
hoist_loop = hoist_loop_list[0]
if hoist_loop == '':
print "placeInReg3 only works when the ArrayRef is inside a loop"
print optimizable_arrays
return
initstats = self._create_reg_array_alloc(optimizable_arrays, hoist_loop)
# add the load loop to the initiation stage
loopstats = self._create_load_loop(hoist_loop, initstats)
# Create the loadings
for i, n in enumerate(optimizable_arrays):
for m in optimizable_arrays[n]:
regid = self._create_reg_array_var(n, hoist_loop)
assign = self._create_reg_assignment(m, n, regid)
loopstats.append(assign)
kernel_stats.insert(0, lan.GroupCompound(initstats))
# Replace the global Arefs with the register Arefs
loop_arrays = ca.get_loop_arrays(self.ast)
for i, n in enumerate(optimizable_arrays):
for m in optimizable_arrays[n]:
idx = m
regid = self._create_reg_array_var(n, hoist_loop)
aref_new = copy.deepcopy(regid)
aref_old = loop_arrays[n][idx]
# Copying the internal data of the two arefs
aref_old.name.name = aref_new.name.name
aref_old.subscript = aref_new.subscript
def _create_load_loop(self, hoist_loop, initstats):
loops = cl.get_inner_loops(self.ast)
loop = copy.deepcopy(loops[hoist_loop])
loopstats = []
loop.compound.statements = loopstats
initstats.append(loop)
return loopstats
@staticmethod
def _create_reg_array_var(n, hoist_loop):
regid = lan.ArrayRef(lan.Id(n + '_reg'), [lan.Id(hoist_loop)])
return regid
def _create_reg_array_alloc(self, optimizable_arrays, hoist_loop):
initstats = []
types = ci.get_types(self.ast)
(_, upper_limit) = cl.get_loop_limits(self.ast)
# Add allocation of registers to the initiation stage
for n in optimizable_arrays:
array_init = lan.ArrayTypeId([types[n][0]], lan.Id(n + '_reg'), [lan.Id(upper_limit[hoist_loop])])
initstats.append(array_init)
return initstats
|
dikujepsen/OpenTran
|
v2.0/framework/Matmul/place_in_reg.py
|
Python
|
mit
| 7,807
|
[
"VisIt"
] |
9cb2689c4bc99de29db0b8da0eab8c2729918faeb102dc483b813fdc688fc9de
|
import Tools.HTML
from Top import Top
import logging
log = logging.getLogger(__name__)
"""
jmol commands need to be wrapped into Jmol.script(ID, "...commands...")
Sometimes, it is preferable to do it immediately or within .webdata() function.
Accordingly, html_* elements represent controls that are ready to be inserted
into the web page, while jmol_* need to be wrapped up using jmol_command_to_html.
ID is defined based on self.settings.counter
"""
class JSMol(Top):
def initiate_jmol_applet(self):
s = "jmolApplet%s = Jmol.getApplet(\"jmolApplet%s\", Info)" % ((self.settings.counter,) * 2)
return Tools.HTML.tag(s, 'SCRIPT')
def jmol_command_to_html(self, s, intag=''):
s2 = "Jmol.script(jmolApplet%(counter)s, \"%(script)s\" );" % {
'counter': self.settings.counter,
'script': s.replace('"', '\\"').replace("'", "\\'") # s.replace('"',' " ')
}
return Tools.HTML.tag(s2, 'SCRIPT', intag=intag)
def jmol_load_file(self, webpath):
s = 'load %s' % webpath
return s + '; ' + self.settings.JavaOptions
def html_load_file(self, *args):
s = self.jmol_load_file(*args)
return self.jmol_command_to_html(s)
def jmol_isosurface(self, webpath='', isovalue='', surftype='', webpath_other='', name='', colors='',
use_quotes=False):
isovals = {
'potential': '0.001',
'spin': '0.001',
'spin2': '0.001',
'mo': '0.03',
'amo': '0.03',
'bmo': '0.03'
}
surftypes = {
'potential': 'isosurface %s cutoff %s %s color absolute -0.03 0.03 map %s',
'spin': 'isosurface %s sign cutoff %s %s %s',
'spin2': 'isosurface %s cutoff %s %s %s',
'mo': 'isosurface %s phase cutoff %s %s %s',
'amo': 'isosurface %s phase cutoff %s %s %s',
'bmo': 'isosurface %s phase cutoff %s %s %s'
}
coltypes = {
'mo': 'phase %s %s opaque' % (self.settings.color_mo_plus, self.settings.color_mo_minus),
'amo': 'phase %s %s opaque' % (self.settings.color_mo_plus, self.settings.color_mo_minus),
'bmo': 'phase %s %s opaque' % (self.settings.color_mo_plus, self.settings.color_mo_minus),
'spin': 'red blue',
'spin2': 'blue'
}
st_lower = surftype.lower().split('=')[0]
if st_lower in surftypes:
st = surftypes[st_lower]
else:
st = 'isosurface %s cutoff %s %s %s'
if not isovalue:
if st_lower in isovals:
isovalue = isovals[st_lower]
else:
isovalue = '0.03'
color = colors
if (st_lower in coltypes) and (not color):
color = coltypes[st_lower]
if not color:
color = 'translucent'
if use_quotes:
webpath = '"%s"' % (webpath)
if webpath_other:
webpath_other = '"%s"' % (webpath_other)
log.debug('Plotting isosurface; surftype: %s' % (st_lower))
st2 = st % (name, isovalue, webpath, webpath_other) + '; color isosurface %s' % (color)
log.debug(st2)
return st2
def html_isosurface(self, *args):
s = self.jmol_isosurface(*args)
return self.jmol_command_to_html(s)
def jmol_jvxl(self, webpath='', name='', use_quotes=False):
if use_quotes:
webpath = '"%s"' % (webpath)
return 'isosurface %s %s' % (name, webpath)
def html_jvxl(self, *args):
s = self.jmol_jvxl(*args)
return self.jmol_command_to_html(s)
def jmol_cli(self):
return 'jmolCommandInput("Execute")'
def html_cli(self):
s = self.jmol_cli()
return self.jmol_command_to_html(s)
def jmol_text(self, label, position='top left', color='green'):
return "set echo %s; color echo %s; echo %s;" % (position, color, label)
def html_text(self, *args,**kwargs):
s = self.jmol_text(*args,**kwargs)
return self.jmol_command_to_html(s)
def html_button(self, action, label):
return '<button type="button" onclick="javascript:Jmol.script(jmolApplet%(count)s, \'%(action)s\')">%(label)s</button>\n' % {
'count': self.settings.counter,
'action': action.replace('"', '\\"').replace("'", "\\'"), # s.replace('"',' " '),
'label': label,
}
def html_checkbox(self, on, off, label=''):
s2 = "Jmol.jmolCheckbox(jmolApplet%(counter)s, \"%(script_on)s\", \"%(script_off)s\", \"%(label)s\" );" % {
'counter': self.settings.counter,
'script_on': on,
'script_off': off,
'label': label
}
return Tools.HTML.tag(s2, 'SCRIPT')
def jmol_radiogroup(self, options):
s = ''
for opt in options:
s2 = ''
for o in opt:
s2 += '"%s", ' % (o)
s += '[%s],' % (s2[:-2])
return 'jmolRadioGroup([%s])' % s[:-1]
# TODO I suspect that it is currently broken but never tried it
def html_radiogroup(self, *args):
s = self.jmol_radiogroup(*args)
return self.jmol_command_to_html(s)
def jmol_menu(self, options):
s = ''
for opt in options:
s2 = ''
for o in opt: s2 += '"%s", ' % (o)
s += '[%s],' % (s2[:-2])
return 'jmolMenu([%s])' % s[:-1]
# TODO I suspect that it is currently broken but never tried it
def html_menu(self, *args):
s = self.jmol_menu(*args)
return self.jmol_command_to_html(s)
def html_geom_play_controls(self):
ButtonFirst = self.html_button('frame 1', '<<')
ButtonPrev = self.html_button('anim direction +1 ; frame prev', '<')
ButtonNext = self.html_button('anim direction +1 ; frame next', '>')
ButtonLast = self.html_button('frame last', '>>')
ButtonPlayOnce = self.html_button('anim mode once; frame 1; anim direction +1 ; anim on', 'Play once')
ButtonPlayBack = self.html_button('anim mode once; frame 1; anim direction -1 ; anim on', 'Play back')
ButtonStop = self.html_button('anim off', 'Stop')
return ButtonFirst + ButtonPrev + ButtonNext + ButtonLast + ButtonPlayOnce + ButtonPlayBack + ButtonStop
"""
opts = []
for a in (1,5,10,25,50):
opts.append(['set animationFPS %s' % (a), a])
opts[2].append('checked')
s += self.JMolMenu(opts,script=False)
"""
def html_vibration_switch(self):
return self.html_checkbox("vibration on", "vibration off", "Vibration")
def jmol_measurements(self, ss):
toJmol = ''
for s in ss:
left, right = s.find('('), s.find(')')
if left and right and (right > left):
toJmol += '; measure %s; ' % (s[left + 1:right].replace(',', ' '))
return toJmol
def html_measurements(self, *args):
s = self.jmol_measurements(*args)
return self.jmol_command_to_html(s)
# OLD METHODS, TODO EVENTUALLY TO BE REVISED
def JSMolStyle(self, s):
s2 = "Jmol.script(jmolApplet%(counter)s, \"%(script)s\" );" % {
'counter': self.settings.counter,
'script': s.replace('"', '\\"').replace("'", "\\'") # s.replace('"',' " ')
}
return s2
def JSMolScript(self, s, intag=''):
s2 = self.JSMolStyle(s)
return Tools.HTML.tag(s2, 'SCRIPT', intag=intag)
def JMolApplet(self, webpath='', ExtraScript=''):
s = "jmolApplet%s = Jmol.getApplet(\"jmolApplet%s\", Info)" % ((self.settings.counter,) * 2)
script = self.JMolLoad(webpath=webpath, ExtraScript=ExtraScript)
s += ';\n' + self.JSMolStyle(script)
return Tools.HTML.tag(s, 'SCRIPT')
def JMolLoad(self, webpath='', ExtraScript=''):
sl = ''
if webpath:
sl = 'load %s' % (webpath)
# sl = 'load %s;%s' % (webpath, self.settings.JavaOptions)
if ExtraScript:
sl += ExtraScript
return sl
if __name__ == "__main__":
import sys
sys.path.append('..')
# from Settings import Settings
|
talipovm/terse
|
terse/JSMol.py
|
Python
|
mit
| 8,266
|
[
"Jmol"
] |
a969879c8c6014f39ddec8104bcd6f388c4e40d3eda7304b7a42876614602cfc
|
"""Functions to plot M/EEG data on topo (one axes per channel)
"""
from __future__ import print_function
# Authors: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Denis Engemann <denis.engemann@gmail.com>
# Martin Luessi <mluessi@nmr.mgh.harvard.edu>
# Eric Larson <larson.eric.d@gmail.com>
#
# License: Simplified BSD
import warnings
from itertools import cycle
from functools import partial
import numpy as np
from scipy import ndimage
# XXX : don't import pyplot here or you will break the doc
from ..baseline import rescale
from ..utils import deprecated
from ..io.pick import channel_type, pick_types
from ..fixes import normalize_colors
from ..utils import _clean_names
from .utils import _mutable_defaults, _check_delayed_ssp, COLORS
from .utils import _draw_proj_checkbox
def iter_topography(info, layout=None, on_pick=None, fig=None,
fig_facecolor='k', axis_facecolor='k',
axis_spinecolor='k', layout_scale=None,
colorbar=False):
""" Create iterator over channel positions
This function returns a generator that unpacks into
a series of matplotlib axis objects and data / channel
indices, both corresponding to the sensor positions
of the related layout passed or inferred from the channel info.
`iter_topography`, hence, allows to conveniently realize custom
topography plots.
Parameters
----------
info : instance of mne.io.meas_info.Info
The measurement info.
layout : instance of mne.layout.Layout | None
The layout to use. If None, layout will be guessed
on_pick : callable | None
The callback function to be invoked on clicking one
of the axes. Is supposed to instantiate the following
API: `function(axis, channel_index)`
fig : matplotlib.figure.Figure | None
The figure object to be considered. If None, a new
figure will be created.
fig_facecolor : str | obj
The figure face color. Defaults to black.
axis_facecolor : str | obj
The axis face color. Defaults to black.
axis_spinecolor : str | obj
The axis spine color. Defaults to black. In other words,
the color of the axis' edge lines.
layout_scale: float | None
Scaling factor for adjusting the relative size of the layout
on the canvas. If None, nothing will be scaled.
Returns
-------
A generator that can be unpacked into
ax : matplotlib.axis.Axis
The current axis of the topo plot.
ch_dx : int
The related channel index.
"""
import matplotlib.pyplot as plt
if fig is None:
fig = plt.figure()
fig.set_facecolor(fig_facecolor)
if layout is None:
from ..layouts import find_layout
layout = find_layout(info)
if on_pick is not None:
callback = partial(_plot_topo_onpick, show_func=on_pick)
fig.canvas.mpl_connect('button_press_event', callback)
pos = layout.pos.copy()
if layout_scale:
pos[:, :2] *= layout_scale
ch_names = _clean_names(info['ch_names'])
iter_ch = [(x, y) for x, y in enumerate(layout.names) if y in ch_names]
for idx, name in iter_ch:
ax = plt.axes(pos[idx])
ax.patch.set_facecolor(axis_facecolor)
plt.setp(list(ax.spines.values()), color=axis_spinecolor)
ax.set_xticklabels([])
ax.set_yticklabels([])
plt.setp(ax.get_xticklines(), visible=False)
plt.setp(ax.get_yticklines(), visible=False)
ch_idx = ch_names.index(name)
vars(ax)['_mne_ch_name'] = name
vars(ax)['_mne_ch_idx'] = ch_idx
vars(ax)['_mne_ax_face_color'] = axis_facecolor
yield ax, ch_idx
def _plot_topo(info=None, times=None, show_func=None, layout=None,
decim=None, vmin=None, vmax=None, ylim=None, colorbar=None,
border='none', cmap=None, layout_scale=None, title=None,
x_label=None, y_label=None, vline=None):
"""Helper function to plot on sensor layout"""
import matplotlib.pyplot as plt
# prepare callbacks
tmin, tmax = times[[0, -1]]
on_pick = partial(show_func, tmin=tmin, tmax=tmax, vmin=vmin,
vmax=vmax, ylim=ylim, x_label=x_label,
y_label=y_label, colorbar=colorbar)
fig = plt.figure()
if colorbar:
norm = normalize_colors(vmin=vmin, vmax=vmax)
sm = plt.cm.ScalarMappable(cmap=cmap, norm=norm)
sm.set_array(np.linspace(vmin, vmax))
ax = plt.axes([0.015, 0.025, 1.05, .8], axisbg='k')
cb = fig.colorbar(sm, ax=ax)
cb_yticks = plt.getp(cb.ax.axes, 'yticklabels')
plt.setp(cb_yticks, color='w')
my_topo_plot = iter_topography(info, layout=layout, on_pick=on_pick,
fig=fig, layout_scale=layout_scale,
axis_spinecolor=border,
colorbar=colorbar)
for ax, ch_idx in my_topo_plot:
if layout.kind == 'Vectorview-all' and ylim is not None:
this_type = {'mag': 0, 'grad': 1}[channel_type(info, ch_idx)]
ylim_ = [v[this_type] if _check_vlim(v) else v for v in ylim]
else:
ylim_ = ylim
show_func(ax, ch_idx, tmin=tmin, tmax=tmax, vmin=vmin,
vmax=vmax, ylim=ylim_)
if ylim_ and not any(v is None for v in ylim_):
plt.ylim(*ylim_)
if title is not None:
plt.figtext(0.03, 0.9, title, color='w', fontsize=19)
return fig
def _plot_topo_onpick(event, show_func=None, colorbar=False):
"""Onpick callback that shows a single channel in a new figure"""
# make sure that the swipe gesture in OS-X doesn't open many figures
orig_ax = event.inaxes
if event.inaxes is None:
return
import matplotlib.pyplot as plt
try:
ch_idx = orig_ax._mne_ch_idx
face_color = orig_ax._mne_ax_face_color
fig, ax = plt.subplots(1)
plt.title(orig_ax._mne_ch_name)
ax.set_axis_bgcolor(face_color)
# allow custom function to override parameters
show_func(plt, ch_idx)
except Exception as err:
# matplotlib silently ignores exceptions in event handlers,
# so we print
# it here to know what went wrong
print(err)
raise err
def _imshow_tfr(ax, ch_idx, tmin, tmax, vmin, vmax, ylim=None, tfr=None,
freq=None, vline=None, x_label=None, y_label=None,
colorbar=False, picker=True, cmap=None):
""" Aux function to show time-freq map on topo """
import matplotlib.pyplot as plt
if cmap is None:
cmap = plt.cm.jet
extent = (tmin, tmax, freq[0], freq[-1])
ax.imshow(tfr[ch_idx], extent=extent, aspect="auto", origin="lower",
vmin=vmin, vmax=vmax, picker=picker, cmap=cmap)
if x_label is not None:
plt.xlabel(x_label)
if y_label is not None:
plt.ylabel(y_label)
if colorbar:
plt.colorbar()
def _plot_timeseries(ax, ch_idx, tmin, tmax, vmin, vmax, ylim, data, color,
times, vline=None, x_label=None, y_label=None,
colorbar=False):
""" Aux function to show time series on topo """
import matplotlib.pyplot as plt
picker_flag = False
for data_, color_ in zip(data, color):
if not picker_flag:
# use large tol for picker so we can click anywhere in the axes
ax.plot(times, data_[ch_idx], color_, picker=1e9)
picker_flag = True
else:
ax.plot(times, data_[ch_idx], color_)
if vline:
[plt.axvline(x, color='w', linewidth=0.5) for x in vline]
if x_label is not None:
plt.xlabel(x_label)
if y_label is not None:
plt.ylabel(y_label)
if colorbar:
plt.colorbar()
def _check_vlim(vlim):
"""AUX function"""
return not np.isscalar(vlim) and not vlim is None
def plot_topo(evoked, layout=None, layout_scale=0.945, color=None,
border='none', ylim=None, scalings=None, title=None, proj=False,
vline=[0.0]):
"""Plot 2D topography of evoked responses.
Clicking on the plot of an individual sensor opens a new figure showing
the evoked response for the selected sensor.
Parameters
----------
evoked : list of Evoked | Evoked
The evoked response to plot.
layout : instance of Layout | None
Layout instance specifying sensor positions (does not need to
be specified for Neuromag data). If possible, the correct layout is
inferred from the data.
layout_scale: float
Scaling factor for adjusting the relative size of the layout
on the canvas
color : list of color objects | color object | None
Everything matplotlib accepts to specify colors. If not list-like,
the color specified will be repeated. If None, colors are
automatically drawn.
border : str
matplotlib borders style to be used for each sensor plot.
scalings : dict | None
The scalings of the channel types to be applied for plotting. If None,`
defaults to `dict(eeg=1e6, grad=1e13, mag=1e15)`.
ylim : dict | None
ylim for plots. The value determines the upper and lower subplot
limits. e.g. ylim = dict(eeg=[-200e-6, 200e6]). Valid keys are eeg,
mag, grad, misc. If None, the ylim parameter for each channel is
determined by the maximum absolute peak.
proj : bool | 'interactive'
If true SSP projections are applied before display. If 'interactive',
a check box for reversible selection of SSP projection vectors will
be shown.
title : str
Title of the figure.
vline : list of floats | None
The values at which to show a vertical line.
Returns
-------
fig : Instance of matplotlib.figure.Figure
Images of evoked responses at sensor locations
"""
if not type(evoked) in (tuple, list):
evoked = [evoked]
if type(color) in (tuple, list):
if len(color) != len(evoked):
raise ValueError('Lists of evoked objects and colors'
' must have the same length')
elif color is None:
colors = ['w'] + COLORS
stop = (slice(len(evoked)) if len(evoked) < len(colors)
else slice(len(colors)))
color = cycle(colors[stop])
if len(evoked) > len(colors):
warnings.warn('More evoked objects than colors available.'
'You should pass a list of unique colors.')
else:
color = cycle([color])
times = evoked[0].times
if not all([(e.times == times).all() for e in evoked]):
raise ValueError('All evoked.times must be the same')
info = evoked[0].info
ch_names = evoked[0].ch_names
if not all([e.ch_names == ch_names for e in evoked]):
raise ValueError('All evoked.picks must be the same')
ch_names = _clean_names(ch_names)
if layout is None:
from ..layouts.layout import find_layout
layout = find_layout(info)
# XXX. at the moment we are committed to 1- / 2-sensor-types layouts
chs_in_layout = set(layout.names) & set(ch_names)
types_used = set(channel_type(info, ch_names.index(ch))
for ch in chs_in_layout)
# one check for all vendors
meg_types = ['mag'], ['grad'], ['mag', 'grad'],
is_meg = any(types_used == set(k) for k in meg_types)
if is_meg:
types_used = list(types_used)[::-1] # -> restore kwarg order
picks = [pick_types(info, meg=kk, ref_meg=False, exclude=[])
for kk in types_used]
else:
types_used_kwargs = dict((t, True) for t in types_used)
picks = [pick_types(info, meg=False, **types_used_kwargs)]
assert isinstance(picks, list) and len(types_used) == len(picks)
scalings = _mutable_defaults(('scalings', scalings))[0]
evoked = [e.copy() for e in evoked]
for e in evoked:
for pick, t in zip(picks, types_used):
e.data[pick] = e.data[pick] * scalings[t]
if proj is True and all([e.proj is not True for e in evoked]):
evoked = [e.apply_proj() for e in evoked]
elif proj == 'interactive': # let it fail early.
for e in evoked:
_check_delayed_ssp(e)
if ylim is None:
set_ylim = lambda x: np.abs(x).max()
ylim_ = [set_ylim([e.data[t] for e in evoked]) for t in picks]
ymax = np.array(ylim_)
ylim_ = (-ymax, ymax)
elif isinstance(ylim, dict):
ylim_ = _mutable_defaults(('ylim', ylim))[0]
ylim_ = [ylim_[kk] for kk in types_used]
ylim_ = zip(*[np.array(yl) for yl in ylim_])
else:
raise ValueError('ylim must be None ore a dict')
plot_fun = partial(_plot_timeseries, data=[e.data for e in evoked],
color=color, times=times, vline=vline)
fig = _plot_topo(info=info, times=times, show_func=plot_fun, layout=layout,
decim=1, colorbar=False, ylim=ylim_, cmap=None,
layout_scale=layout_scale, border=border, title=title,
x_label='Time (s)', vline=vline)
if proj == 'interactive':
for e in evoked:
_check_delayed_ssp(e)
params = dict(evokeds=evoked, times=times,
plot_update_proj_callback=_plot_update_evoked_topo,
projs=evoked[0].info['projs'], fig=fig)
_draw_proj_checkbox(None, params)
return fig
def _plot_update_evoked_topo(params, bools):
"""Helper function to update topo sensor plots"""
evokeds, times, fig = [params[k] for k in ('evokeds', 'times', 'fig')]
projs = [proj for ii, proj in enumerate(params['projs'])
if ii in np.where(bools)[0]]
params['proj_bools'] = bools
evokeds = [e.copy() for e in evokeds]
for e in evokeds:
e.info['projs'] = []
e.add_proj(projs)
e.apply_proj()
# make sure to only modify the time courses, not the ticks
axes = fig.get_axes()
n_lines = len(axes[0].lines)
n_diff = len(evokeds) - n_lines
ax_slice = slice(abs(n_diff)) if n_diff < 0 else slice(n_lines)
for ax in axes:
lines = ax.lines[ax_slice]
for line, evoked in zip(lines, evokeds):
line.set_data(times, evoked.data[ax._mne_ch_idx])
fig.canvas.draw()
@deprecated('`plot_topo_tfr` is deprecated and will be removed in '
'MNE 0.9. Use `plot_topo` method on TFR objects.')
def plot_topo_tfr(epochs, tfr, freq, layout=None, colorbar=True, vmin=None,
vmax=None, cmap='RdBu_r', layout_scale=0.945, title=None):
"""Plot time-frequency data on sensor layout
Clicking on the time-frequency map of an individual sensor opens a
new figure showing the time-frequency map of the selected sensor.
Parameters
----------
epochs : instance of Epochs
The epochs used to generate the power
tfr : 3D-array shape=(n_sensors, n_freqs, n_times)
The time-frequency data. Must have the same channels as Epochs.
freq : array-like
Frequencies of interest as passed to induced_power
layout : instance of Layout | None
Layout instance specifying sensor positions (does not need to
be specified for Neuromag data). If possible, the correct layout is
inferred from the data.
colorbar : bool
If true, colorbar will be added to the plot
vmin : float
Minimum value mapped to lowermost color
vmax : float
Minimum value mapped to upppermost color
cmap : instance of matplotlib.pyplot.colormap | str
Colors to be mapped to the values. Default 'RdBu_r'.
layout_scale : float
Scaling factor for adjusting the relative size of the layout
on the canvas
title : str
Title of the figure.
Returns
-------
fig : Instance of matplotlib.figure.Figure
Images of time-frequency data at sensor locations
"""
if vmin is None:
vmin = tfr.min()
if vmax is None:
vmax = tfr.max()
if layout is None:
from ..layouts.layout import find_layout
layout = find_layout(epochs.info)
tfr_imshow = partial(_imshow_tfr, tfr=tfr.copy(), freq=freq, cmap=cmap)
fig = _plot_topo(info=epochs.info, times=epochs.times,
show_func=tfr_imshow, layout=layout, border='w',
colorbar=colorbar, vmin=vmin, vmax=vmax, cmap=cmap,
layout_scale=layout_scale, title=title,
x_label='Time (s)', y_label='Frequency (Hz)')
return fig
@deprecated('`plot_topo_power` is deprecated and will be removed in '
'MNE 0.9. Use `plot_topo` method on TFR objects.')
def plot_topo_power(epochs, power, freq, layout=None, baseline=None,
mode='mean', decim=1, colorbar=True, vmin=None, vmax=None,
cmap=None, layout_scale=0.945, dB=True, title=None):
"""Plot induced power on sensor layout
Clicking on the induced power map of an individual sensor opens a
new figure showing the induced power map of the selected sensor.
Parameters
----------
epochs : instance of Epochs
The epochs used to generate the power
power : 3D-array
First return value from mne.time_frequency.induced_power
freq : array-like
Frequencies of interest as passed to induced_power
layout : instance of Layout | None
Layout instance specifying sensor positions (does not need to
be specified for Neuromag data). If possible, the correct layout is
inferred from the data.
baseline : tuple or list of length 2
The time interval to apply rescaling / baseline correction.
If None do not apply it. If baseline is (a, b)
the interval is between "a (s)" and "b (s)".
If a is None the beginning of the data is used
and if b is None then b is set to the end of the interval.
If baseline is equal to (None, None) all the time
interval is used.
mode : 'logratio' | 'ratio' | 'zscore' | 'mean' | 'percent'
Do baseline correction with ratio (power is divided by mean
power during baseline) or z-score (power is divided by standard
deviation of power during baseline after subtracting the mean,
power = [power - mean(power_baseline)] / std(power_baseline))
If None, baseline no correction will be performed.
decim : integer
Increment for selecting each nth time slice
colorbar : bool
If true, colorbar will be added to the plot
vmin : float
Minimum value mapped to lowermost color
vmax : float
Minimum value mapped to upppermost color
cmap : instance of matplotlib.pyplot.colormap
Colors to be mapped to the values
layout_scale : float
Scaling factor for adjusting the relative size of the layout
on the canvas
dB : bool
If True, log10 will be applied to the data.
title : str
Title of the figure.
Returns
-------
fig : Instance of matplotlib.figure.Figure
Images of induced power at sensor locations
"""
times = epochs.times[::decim].copy()
if mode is not None:
if baseline is None:
baseline = epochs.baseline
power = rescale(power.copy(), times, baseline, mode)
times *= 1e3
if dB:
power = 20 * np.log10(power)
if vmin is None:
vmin = power.min()
if vmax is None:
vmax = power.max()
if layout is None:
from ..layouts.layout import find_layout
layout = find_layout(epochs.info)
power_imshow = partial(_imshow_tfr, tfr=power.copy(), freq=freq)
fig = _plot_topo(info=epochs.info, times=times,
show_func=power_imshow, layout=layout, decim=decim,
colorbar=colorbar, vmin=vmin, vmax=vmax, cmap=cmap,
layout_scale=layout_scale, title=title, border='w',
x_label='Time (s)', y_label='Frequency (Hz)')
return fig
@deprecated('`plot_topo_phase_lock` is deprecated and will be removed in '
'MNE 0.9. Use `plot_topo` method on TFR objects.')
def plot_topo_phase_lock(epochs, phase, freq, layout=None, baseline=None,
mode='mean', decim=1, colorbar=True, vmin=None,
vmax=None, cmap=None, layout_scale=0.945,
title=None):
"""Plot phase locking values (PLV) on sensor layout
Clicking on the PLV map of an individual sensor opens a new figure
showing the PLV map of the selected sensor.
Parameters
----------
epochs : instance of Epochs
The epochs used to generate the phase locking value
phase_lock : 3D-array
Phase locking value, second return value from
mne.time_frequency.induced_power.
freq : array-like
Frequencies of interest as passed to induced_power
layout : instance of Layout | None
Layout instance specifying sensor positions (does not need to
be specified for Neuromag data). If possible, the correct layout is
inferred from the data.
baseline : tuple or list of length 2
The time interval to apply rescaling / baseline correction.
If None do not apply it. If baseline is (a, b)
the interval is between "a (s)" and "b (s)".
If a is None the beginning of the data is used
and if b is None then b is set to the end of the interval.
If baseline is equal to (None, None) all the time
interval is used.
mode : 'logratio' | 'ratio' | 'zscore' | 'mean' | 'percent' | None
Do baseline correction with ratio (phase is divided by mean
phase during baseline) or z-score (phase is divided by standard
deviation of phase during baseline after subtracting the mean,
phase = [phase - mean(phase_baseline)] / std(phase_baseline)).
If None, baseline no correction will be performed.
decim : integer
Increment for selecting each nth time slice
colorbar : bool
If true, colorbar will be added to the plot
vmin : float
Minimum value mapped to lowermost color
vmax : float
Minimum value mapped to upppermost color
cmap : instance of matplotlib.pyplot.colormap
Colors to be mapped to the values
layout_scale : float
Scaling factor for adjusting the relative size of the layout
on the canvas.
title : str
Title of the figure.
Returns
-------
fig : Instance of matplotlib.figure.Figrue
Phase lock images at sensor locations
"""
times = epochs.times[::decim] * 1e3
if mode is not None:
if baseline is None:
baseline = epochs.baseline
phase = rescale(phase.copy(), times, baseline, mode)
if vmin is None:
vmin = phase.min()
if vmax is None:
vmax = phase.max()
if layout is None:
from ..layouts.layout import find_layout
layout = find_layout(epochs.info)
phase_imshow = partial(_imshow_tfr, tfr=phase.copy(), freq=freq)
fig = _plot_topo(info=epochs.info, times=times,
show_func=phase_imshow, layout=layout, decim=decim,
colorbar=colorbar, vmin=vmin, vmax=vmax, cmap=cmap,
layout_scale=layout_scale, title=title, border='w',
x_label='Time (s)', y_label='Frequency (Hz)')
return fig
def _erfimage_imshow(ax, ch_idx, tmin, tmax, vmin, vmax, ylim=None,
data=None, epochs=None, sigma=None,
order=None, scalings=None, vline=None,
x_label=None, y_label=None, colorbar=False):
"""Aux function to plot erfimage on sensor topography"""
import matplotlib.pyplot as plt
this_data = data[:, ch_idx, :].copy()
ch_type = channel_type(epochs.info, ch_idx)
if not ch_type in scalings:
raise KeyError('%s channel type not in scalings' % ch_type)
this_data *= scalings[ch_type]
if callable(order):
order = order(epochs.times, this_data)
if order is not None:
this_data = this_data[order]
this_data = ndimage.gaussian_filter1d(this_data, sigma=sigma, axis=0)
ax.imshow(this_data, extent=[tmin, tmax, 0, len(data)], aspect='auto',
origin='lower', vmin=vmin, vmax=vmax, picker=True)
if x_label is not None:
plt.xlabel(x_label)
if y_label is not None:
plt.ylabel(y_label)
if colorbar:
plt.colorbar()
def plot_topo_image_epochs(epochs, layout=None, sigma=0.3, vmin=None,
vmax=None, colorbar=True, order=None, cmap=None,
layout_scale=.95, title=None, scalings=None):
"""Plot Event Related Potential / Fields image on topographies
Parameters
----------
epochs : instance of Epochs
The epochs.
layout: instance of Layout
System specific sensor positions.
sigma : float
The standard deviation of the Gaussian smoothing to apply along
the epoch axis to apply in the image.
vmin : float
The min value in the image. The unit is uV for EEG channels,
fT for magnetometers and fT/cm for gradiometers.
vmax : float
The max value in the image. The unit is uV for EEG channels,
fT for magnetometers and fT/cm for gradiometers.
colorbar : bool
Display or not a colorbar.
order : None | array of int | callable
If not None, order is used to reorder the epochs on the y-axis
of the image. If it's an array of int it should be of length
the number of good epochs. If it's a callable the arguments
passed are the times vector and the data as 2d array
(data.shape[1] == len(times)).
cmap : instance of matplotlib.pyplot.colormap
Colors to be mapped to the values.
layout_scale: float
scaling factor for adjusting the relative size of the layout
on the canvas.
title : str
Title of the figure.
scalings : dict | None
The scalings of the channel types to be applied for plotting. If
None, defaults to `dict(eeg=1e6, grad=1e13, mag=1e15)`.
Returns
-------
fig : instance of matplotlib figure
Figure distributing one image per channel across sensor topography.
"""
scalings = _mutable_defaults(('scalings', scalings))[0]
data = epochs.get_data()
if vmin is None:
vmin = data.min()
if vmax is None:
vmax = data.max()
if layout is None:
from ..layouts.layout import find_layout
layout = find_layout(epochs.info)
erf_imshow = partial(_erfimage_imshow, scalings=scalings, order=order,
data=data, epochs=epochs, sigma=sigma)
fig = _plot_topo(info=epochs.info, times=epochs.times,
show_func=erf_imshow, layout=layout, decim=1,
colorbar=colorbar, vmin=vmin, vmax=vmax, cmap=cmap,
layout_scale=layout_scale, title=title,
border='w', x_label='Time (s)', y_label='Epoch')
return fig
|
jaeilepp/eggie
|
mne/viz/topo.py
|
Python
|
bsd-2-clause
| 27,382
|
[
"Gaussian"
] |
a68769bd2ad01d050b7f492378c62fe069259eef72446c845ca2ca7b8abd94bb
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import datetime
import io
import itertools
import os
import re
import time
import github
import jinja2
import ruamel.yaml
from conda_build.metadata import (ensure_valid_license_family,
FIELDS as cbfields)
import conda_build.conda_interface
from collections import defaultdict
import copy
from .utils import render_meta_yaml
FIELDS = copy.deepcopy(cbfields)
# Just in case 'extra' moves into conda_build
if 'extra' not in FIELDS.keys():
FIELDS['extra'] = []
FIELDS['extra'].append('recipe-maintainers')
EXPECTED_SECTION_ORDER = ['package', 'source', 'build', 'requirements',
'test', 'app', 'outputs', 'about', 'extra']
REQUIREMENTS_ORDER = ['build', 'host', 'run']
TEST_KEYS = {'imports', 'commands'}
sel_pat = re.compile(r'(.+?)\s*(#.*)?\[([^\[\]]+)\](?(2).*)$')
jinja_pat = re.compile(r'\s*\{%\s*(set)\s+[^\s]+\s*=\s*[^\s]+\s*%\}')
def get_section(parent, name, lints):
if name == 'source':
return get_source_section(parent, lints)
section = parent.get(name, {})
if not isinstance(section, dict):
lints.append('The "{}" section was expected to be a dictionary, but '
'got a {}.'.format(name, type(section).__name__))
section = {}
return section
def get_source_section(parent, lints):
section = parent.get('source', {})
if isinstance(section, dict):
return [ section ]
elif isinstance(section, list):
return section
else:
lints.append('The "source" section was expected to be a dictionary or '
'a list, but got a {}.{}'.format(type(section).__module__,
type(section).__name__))
return [ {} ]
def lint_section_order(major_sections, lints):
section_order_sorted = sorted(major_sections,
key=EXPECTED_SECTION_ORDER.index)
if major_sections != section_order_sorted:
section_order_sorted_str = map(lambda s: "'%s'" % s,
section_order_sorted)
section_order_sorted_str = ", ".join(section_order_sorted_str)
section_order_sorted_str = "[" + section_order_sorted_str + "]"
lints.append('The top level meta keys are in an unexpected order. '
'Expecting {}.'.format(section_order_sorted_str))
def lint_about_contents(about_section, lints):
for about_item in ['home', 'license', 'summary']:
# if the section doesn't exist, or is just empty, lint it.
if not about_section.get(about_item, ''):
lints.append('The {} item is expected in the about section.'
''.format(about_item))
def lintify(meta, recipe_dir=None, conda_forge=False):
lints = []
hints = []
major_sections = list(meta.keys())
# If the recipe_dir exists (no guarantee within this function) , we can
# find the meta.yaml within it.
meta_fname = os.path.join(recipe_dir or '', 'meta.yaml')
sources_section = get_section(meta, 'source', lints)
build_section = get_section(meta, 'build', lints)
requirements_section = get_section(meta, 'requirements', lints)
test_section = get_section(meta, 'test', lints)
about_section = get_section(meta, 'about', lints)
extra_section = get_section(meta, 'extra', lints)
package_section = get_section(meta, 'package', lints)
# 0: Top level keys should be expected
unexpected_sections = []
for section in major_sections:
if section not in EXPECTED_SECTION_ORDER:
lints.append('The top level meta key {} is unexpected' .format(section))
unexpected_sections.append(section)
for section in unexpected_sections:
major_sections.remove(section)
# 1: Top level meta.yaml keys should have a specific order.
lint_section_order(major_sections, lints)
# 2: The about section should have a home, license and summary.
lint_about_contents(about_section, lints)
# 3a: The recipe should have some maintainers.
if not extra_section.get('recipe-maintainers', []):
lints.append('The recipe could do with some maintainers listed in '
'the `extra/recipe-maintainers` section.')
# 3b: Maintainers should be a list
if not isinstance(extra_section.get('recipe-maintainers', []), list):
lints.append('Recipe maintainers should be a json list.')
# 4: The recipe should have some tests.
if not any(key in TEST_KEYS for key in test_section):
test_files = ['run_test.py', 'run_test.sh', 'run_test.bat',
'run_test.pl']
a_test_file_exists = (recipe_dir is not None and
any(os.path.exists(os.path.join(recipe_dir,
test_file))
for test_file in test_files))
if not a_test_file_exists:
lints.append('The recipe must have some tests.')
# 5: License cannot be 'unknown.'
license = about_section.get('license', '').lower()
if 'unknown' == license.strip():
lints.append('The recipe license cannot be unknown.')
# 6: Selectors should be in a tidy form.
if recipe_dir is not None and os.path.exists(meta_fname):
bad_selectors = []
bad_lines = []
# Good selectors look like ".*\s\s#\s[...]"
good_selectors_pat = re.compile(r'(.+?)\s{2,}#\s\[(.+)\](?(2).*)$')
with io.open(meta_fname, 'rt') as fh:
for selector_line, line_number in selector_lines(fh):
if not good_selectors_pat.match(selector_line):
bad_selectors.append(selector_line)
bad_lines.append(line_number)
if bad_selectors:
lints.append('Selectors are suggested to take a '
'``<two spaces>#<one space>[<expression>]`` form.'
' See lines {}'.format(bad_lines))
# 7: The build section should have a build number.
if build_section.get('number', None) is None:
lints.append('The recipe must have a `build/number` section.')
# 8: The build section should be before the run section in requirements.
seen_requirements = [
k for k in requirements_section if k in REQUIREMENTS_ORDER]
requirements_order_sorted = sorted(seen_requirements,
key=REQUIREMENTS_ORDER.index)
if seen_requirements != requirements_order_sorted:
lints.append('The `requirements/` sections should be defined '
'in the following order: ' + ', '.join(REQUIREMENTS_ORDER)
+ '; instead saw: ' + ', '.join(seen_requirements) + '.')
# 9: Files downloaded should have a hash.
for source_section in sources_section:
if ('url' in source_section and
not ({'sha1', 'sha256', 'md5'} & set(source_section.keys()))):
lints.append('When defining a source/url please add a sha256, sha1 '
'or md5 checksum (sha256 preferably).')
# 10: License should not include the word 'license'.
license = about_section.get('license', '').lower()
if 'license' in license.lower():
lints.append('The recipe `license` should not include the word '
'"License".')
# 11: There should be one empty line at the end of the file.
if recipe_dir is not None and os.path.exists(meta_fname):
with io.open(meta_fname, 'r') as f:
lines = f.read().split('\n')
# Count the number of empty lines from the end of the file
empty_lines = itertools.takewhile(lambda x: x == '', reversed(lines))
end_empty_lines_count = len(list(empty_lines))
if end_empty_lines_count > 1:
lints.append('There are {} too many lines. '
'There should be one empty line at the end of the '
'file.'.format(end_empty_lines_count - 1))
elif end_empty_lines_count < 1:
lints.append('There are too few lines. There should be one empty '
'line at the end of the file.')
# 12: License family must be valid (conda-build checks for that)
try:
ensure_valid_license_family(meta)
except RuntimeError as e:
lints.append(str(e))
# 13: Check that the recipe name is valid
recipe_name = package_section.get('name', '').strip()
if re.match('^[a-z0-9_\-.]+$', recipe_name) is None:
lints.append('Recipe name has invalid characters. only lowercase alpha, numeric, '
'underscores, hyphens and dots allowed')
# 14: Run conda-forge specific lints
if conda_forge:
run_conda_forge_lints(meta, recipe_dir, lints)
# 15: Check if we are using legacy patterns
build_reqs = requirements_section.get('build', None)
if build_reqs and ('numpy x.x' in build_reqs):
lints.append('Using pinned numpy packages is a deprecated pattern. Consider '
'using the method outlined '
'[here](https://conda-forge.org/docs/meta.html#building-against-numpy).')
# 16: Subheaders should be in the allowed subheadings
for section in major_sections:
expected_subsections = FIELDS.get(section, [])
if not expected_subsections:
continue
for subsection in get_section(meta, section, lints):
if section != 'source' and subsection not in expected_subsections:
lints.append('The {} section contained an unexpected '
'subsection name. {} is not a valid subsection'
' name.'.format(section, subsection))
elif section == 'source':
for source_subsection in subsection:
if source_subsection not in expected_subsections:
lints.append('The {} section contained an unexpected '
'subsection name. {} is not a valid subsection'
' name.'.format(section, source_subsection))
# 17: noarch doesn't work with selectors
if build_section.get('noarch') is not None and os.path.exists(meta_fname):
with io.open(meta_fname, 'rt') as fh:
in_requirements = False
for line in fh:
line_s = line.strip()
if (line_s == "requirements:"):
in_requirements = True
requirements_spacing = line[:-len(line.lstrip())]
continue
if line_s.startswith("skip:") and is_selector_line(line):
lints.append("`noarch` packages can't have selectors. If "
"the selectors are necessary, please remove "
"`noarch: {}`.".format(build_section['noarch']))
break
if in_requirements:
if requirements_spacing == line[:-len(line.lstrip())]:
in_requirements = False
continue
if is_selector_line(line):
lints.append("`noarch` packages can't have selectors. If "
"the selectors are necessary, please remove "
"`noarch: {}`.".format(build_section['noarch']))
break
# 18: noarch and python setup.py doesn't work
if build_section.get('noarch') == 'python':
if 'script' in build_section:
scripts = build_section['script']
if isinstance(scripts, str):
scripts = [scripts]
for script in scripts:
if "python setup.py install" in script:
lints.append("`noarch: python` packages should use pip. "
"See https://conda-forge.org/docs/meta.html#use-pip")
# 19: check version
if package_section.get('version') is not None:
ver = str(package_section.get('version'))
try:
conda_build.conda_interface.VersionOrder(ver)
except:
lints.append("Package version {} doesn't match conda spec".format(ver))
# 20: Jinja2 variable definitions should be nice.
if recipe_dir is not None and os.path.exists(meta_fname):
bad_jinja = []
bad_lines = []
# Good Jinja2 variable definitions look like "{% set .+ = .+ %}"
good_jinja_pat = re.compile(r'\s*\{%\s(set)\s[^\s]+\s=\s[^\s]+\s%\}')
with io.open(meta_fname, 'rt') as fh:
for jinja_line, line_number in jinja_lines(fh):
if not good_jinja_pat.match(jinja_line):
bad_jinja.append(jinja_line)
bad_lines.append(line_number)
if bad_jinja:
lints.append('Jinja2 variable definitions are suggested to '
'take a ``{{%<one space>set<one space>'
'<variable name><one space>=<one space>'
'<expression><one space>%}}`` form. See lines '
'{}'.format(bad_lines))
# hints
# 1: Legacy usage of compilers
if build_reqs and ('toolchain' in build_reqs):
hints.append('Using toolchain directly in this manner is deprecated. Consider '
'using the compilers outlined '
'[here](https://conda-forge.org/docs/meta.html#compilers).')
return lints, hints
def run_conda_forge_lints(meta, recipe_dir, lints):
gh = github.Github(os.environ['GH_TOKEN'])
package_section = get_section(meta, 'package', lints)
extra_section = get_section(meta, 'extra', lints)
recipe_dirname = os.path.basename(recipe_dir) if recipe_dir else 'recipe'
recipe_name = package_section.get('name', '').strip()
is_staged_recipes = recipe_dirname != 'recipe'
# 1: Check that the recipe does not exist in conda-forge
if is_staged_recipes:
cf = gh.get_user(os.getenv('GH_ORG', 'conda-forge'))
try:
cf.get_repo('{}-feedstock'.format(recipe_name))
feedstock_exists = True
except github.UnknownObjectException as e:
feedstock_exists = False
if feedstock_exists:
lints.append('Feedstock with the same name exists in conda-forge')
bio = gh.get_user('bioconda').get_repo('bioconda-recipes')
try:
bio.get_dir_contents('recipes/{}'.format(recipe_name))
except github.UnknownObjectException as e:
pass
else:
lints.append("Recipe with the same name exists in bioconda: "
"please discuss with @conda-forge/bioconda-recipes.")
# 2: Check that the recipe maintainers exists:
maintainers = extra_section.get('recipe-maintainers', [])
for maintainer in maintainers:
try:
gh.get_user(maintainer)
except github.UnknownObjectException as e:
lints.append('Recipe maintainer "{}" does not exist'.format(maintainer))
# 3: if the recipe dir is inside the example dir
if recipe_dir is not None and 'recipes/example/' in recipe_dir:
lints.append('Please move the recipe out of the example dir and '
'into its own dir.')
def is_selector_line(line):
# Using the same pattern defined in conda-build (metadata.py),
# we identify selectors.
line = line.rstrip()
if line.lstrip().startswith('#'):
# Don't bother with comment only lines
return False
m = sel_pat.match(line)
if m:
m.group(3)
return True
return False
def is_jinja_line(line):
line = line.rstrip()
m = jinja_pat.match(line)
if m:
return True
return False
def selector_lines(lines):
for i, line in enumerate(lines):
if is_selector_line(line):
yield line, i
def jinja_lines(lines):
for i, line in enumerate(lines):
if is_jinja_line(line):
yield line, i
def main(recipe_dir, conda_forge=False, return_hints=False):
recipe_dir = os.path.abspath(recipe_dir)
recipe_meta = os.path.join(recipe_dir, 'meta.yaml')
if not os.path.exists(recipe_dir):
raise IOError('Feedstock has no recipe/meta.yaml.')
with io.open(recipe_meta, 'rt') as fh:
content = render_meta_yaml(''.join(fh))
meta = ruamel.yaml.load(content, ruamel.yaml.RoundTripLoader)
results, hints = lintify(meta, recipe_dir, conda_forge)
if return_hints:
return results, hints
else:
return results
|
shadowwalkersb/conda-smithy
|
conda_smithy/lint_recipe.py
|
Python
|
bsd-3-clause
| 16,738
|
[
"Bioconda"
] |
949e2390772894dd69635fac9fb20d96e67dc519626afcdd03bf81c72e379bf9
|
import uuid
from django.conf import settings
from django.core.mail import send_mail, EmailMultiAlternatives
from django.template.loader import render_to_string
from django.utils.html import strip_tags
BASE_CLIENT_URL = 'http://elsyser.netlify.com/#/'
def generate_activation_key():
return uuid.uuid4().hex
def send_verification_email(user):
subject = 'ELSYSER Account activation'
client_url = BASE_CLIENT_URL + 'auth/activate/{activation_key}/'.format(
activation_key=user.student.activation_key
)
message = 'Visit this link to activate your ELSYSER account: {url}'.format(url=client_url)
msg = 'Hello, {full_name}!\n\n{message}\n\n ~ The ELSYSER Team ~'.format(
full_name=user.get_full_name(),
message=message
)
send_mail(
subject=subject,
message=msg,
from_email=settings.DEFAULT_FROM_EMAIL,
recipient_list=[user.email],
fail_silently=False
)
def send_creation_email(user, model):
model_type = model.__class__.__name__.lower()
client_resource_link = '{model_type}s/{id}/'.format(model_type=model_type, id=model.id)
template_context = {
'full_name': user.get_full_name(),
'type': model_type,
'model': model,
'author': model.author,
'link': BASE_CLIENT_URL + client_resource_link
}
html_content = render_to_string('utils/email.html', context=template_context)
text_content = strip_tags(html_content)
subject = 'ELSYSER {resource} added'.format(resource=model_type)
msg = EmailMultiAlternatives(
subject,
text_content,
settings.DEFAULT_FROM_EMAIL,
[user.email]
)
msg.attach_alternative(html_content, "text/html")
msg.send()
|
pu6ki/elsyser
|
students/utils.py
|
Python
|
mit
| 1,752
|
[
"VisIt"
] |
b95d7325fc0ae426606eeb90ae586181d831227000bec87fee51fd382af92b09
|
#!/usr/local/sci/bin/python
#*****************************
#
# controller for internal QC checks.
#
#
#************************************************************************
# SVN Info
#$Rev:: 112 $: Revision of last commit
#$Author:: rdunn $: Author of last commit
#$Date:: 2017-01-13 14:47:17 +0000 (Fri, 13 Jan 2017) $: Date of last commit
#************************************************************************
import numpy as np
import scipy as sp
import os
import sys
import datetime as dt
import subprocess
import time
# RJHD utilities
import netcdf_procs as ncdfp
import qc_utils as utils
import qc_tests
from set_paths_and_vars import *
#*********************************************
def internal_checks(station_info, restart_id = "", end_id = "", second = False,
all_checks = True,
duplicate = False,
odd = False,
frequent = False,
diurnal = False,
gap = False,
records = False,
streaks = False,
climatological = False,
spike = False,
humidity = False,
cloud = False,
variance = False,
winds = False,
diagnostics = False,
plots = False
):
'''
Run through internal checks on list of stations passed
:param list station_info: list of lists - [[ID, lat, lon, elev]] - strings
:param str restart_id: which station to start on
:param str end_id: which station to end on
:param bool second: do the second run
:param bool all_checks: run all the checks
:param bool duplicate/odd/frequent/diurnal/gap/records/streaks/
climatological/spike/humidity/cloud/variance/winds: run each test separately
:param bool diagnostics: print extra material to screen
:param bool plots: create plots from each test [many files if all stations/all tests]
'''
first = not second
if all_checks:
duplicate = True
odd = True
frequent = True
diurnal = True
gap = True
records = True
streaks = True
climatological = True
spike = True
humidity = True
cloud = True
variance = True
winds = True
else:
print "single tests selected"
qc_code_version = subprocess.check_output(['svnversion']).strip()
# sort truncated run
startindex = 0
if restart_id != "":
startindex, = np.where(station_info[:,0] == restart_id)
if end_id != "":
endindex, = np.where(station_info[:,0] == end_id)
if endindex != len(station_info) -1:
station_info = station_info[startindex: endindex+1]
else:
station_info = station_info[startindex:]
else:
station_info = station_info[startindex:]
for st,stat in enumerate(station_info):
# if st%100 != 0: continue # do every nth station
print dt.datetime.strftime(dt.datetime.now(), "%A, %d %B %Y, %H:%M:%S")
print "{:35s} {:d}/{:d}".format("Station Number : ", st + 1, len(station_info))
print "{:35s} {}".format("Station Identifier :", stat[0])
if plots or diagnostics:
logfile = ""
else:
if first:
logfile = file(LOG_OUTFILE_LOCS+stat[0]+'.log','w')
elif second:
logfile = file(LOG_OUTFILE_LOCS+stat[0]+'.log','a') # append to file if second iteration.
logfile.write(dt.datetime.strftime(dt.datetime.now(), "%A, %d %B %Y, %H:%M:%S\n"))
logfile.write("Internal Checks\n")
logfile.write("{:35s} {}\n".format("Station Identifier :", stat[0]))
process_start_time = time.time()
station = utils.Station(stat[0], float(stat[1]), float(stat[2]), float(stat[3]))
# latitude and longitude check
if np.abs(station.lat) > 90.:
if plots or diagnostics:
print "{} {} {} {} {} {} {}\n".format(\
station.id,"Latitude Check",DATASTART.year, DATAEND.year,"All", "Unphysical latitude {}".format(station.lat))
else:
logfile.write("{} {} {} {} {} {} {}\n".format(\
station.id,"Latitude Check",DATASTART.year, DATAEND.year,"All", "Unphysical latitude {}".format(station.lat)))
logfile.close()
continue
if np.abs(station.lon) > 180.:
if plots or diagnostics:
print "{} {} {} {} {} {} {}\n".format(\
station.id,"Longitude Check",DATASTART.year, DATAEND.year,"All", "Unphysical longitude {}".format(station.lon))
else:
logfile.write("{} {} {} {} {} {} {}\n".format(\
station.id,"Longitude Check",DATASTART.year, DATAEND.year,"All", "Unphysical longitude {}".format(station.lon)))
logfile.close()
continue
# if running through the first time
if first:
if os.path.exists(os.path.join(NETCDF_DATA_LOCS, station.id + ".nc.gz")):
# if gzip file, unzip here
subprocess.call(["gunzip",os.path.join(NETCDF_DATA_LOCS, station.id + ".nc.gz")])
time.sleep(5) # make sure it is unzipped before proceeding
# read in the data
ncdfp.read(os.path.join(NETCDF_DATA_LOCS, station.id + ".nc"), station, process_vars, opt_var_list = carry_thru_vars, diagnostics = diagnostics)
if plots or diagnostics:
print "{:35s} {}\n".format("Total station record size :",len(station.time.data))
else:
logfile.write("{:35s} {}\n".format("Total station record size :",len(station.time.data)))
match_to_compress = utils.create_fulltimes(station, process_vars, DATASTART, DATAEND, carry_thru_vars)
station.qc_flags = np.zeros([len(station.time.data),69]) # changed to include updated wind tests
# get reporting accuracies and frequencies.
for var in process_vars:
st_var = getattr(station, var)
st_var.reporting_stats = utils.monthly_reporting_statistics(st_var, DATASTART, DATAEND)
# or if second pass through?
elif second:
ncdfp.read(os.path.join(NETCDF_DATA_LOCS, station.id + "_mask.nc"), station, process_vars, opt_var_list = carry_thru_vars, diagnostics = diagnostics)
print "{:35s} {}\n".format("Total station record size :",len(station.time.data))
match_to_compress = utils.create_fulltimes(station, process_vars, DATASTART, DATAEND, carry_thru_vars)
# Add history text to netcdf file
# Reporting Changes - TODO
# Duplicate months - check on temperature ONLY
if duplicate:
qc_tests.duplicate_months.dmc(station, ['temperatures'], process_vars, [0], DATASTART, DATAEND, logfile, diagnostics = diagnostics, plots = plots)
# Odd Clusters
if odd:
qc_tests.odd_cluster.occ(station,['temperatures','dewpoints','windspeeds','slp'], [54,55,56,57], DATASTART, logfile, diagnostics = diagnostics, plots = plots, second = second)
utils.apply_windspeed_flags_to_winddir(station, diagnostics = diagnostics)
# Frequent Values
if frequent:
qc_tests.frequent_values.fvc(station, ['temperatures', 'dewpoints','slp'], [1,2,3], DATASTART, DATAEND, logfile, diagnostics = diagnostics, plots = plots)
# Diurnal Cycle
if diurnal:
if np.abs(station.lat) <= 60.:
qc_tests.diurnal_cycle.dcc(station, ['temperatures'], process_vars, [4], logfile, diagnostics = diagnostics, plots = plots)
else:
if plots or diagnostics:
print "Diurnal Cycle Check not run as station latitude ({}) > 60\n".format(station.lat)
else:
logfile.write("Diurnal Cycle Check not run as station latitude ({}) > 60\n".format(station.lat))
# Distributional Gap
if gap:
qc_tests.distributional_gap.dgc(station, ['temperatures','dewpoints','slp'], [5,6,7], DATASTART, DATAEND, logfile, diagnostics = diagnostics, plots = plots, GH = True)
# Records
if records:
qc_tests.records.krc(station, ['temperatures','dewpoints','windspeeds','slp'], [8,9,10,11], logfile, diagnostics = diagnostics, plots = plots)
utils.apply_windspeed_flags_to_winddir(station, diagnostics = diagnostics)
# Streaks and Repetitions
if streaks:
qc_tests.streaks.rsc(station, ['temperatures','dewpoints','windspeeds','slp','winddirs'], [[12,16,20],[13,17,21],[14,18,22],[15,19,23],[66,67,68]], DATASTART, DATAEND, logfile, diagnostics = diagnostics, plots = plots)
utils.apply_windspeed_flags_to_winddir(station, diagnostics = diagnostics)
# Climatological Outlier
if climatological:
qc_tests.climatological.coc(station, ['temperatures','dewpoints'], [24,25], DATASTART, DATAEND, logfile, diagnostics = diagnostics, plots = plots)
# column 26 kept spare for slp
# Spike
if spike:
qc_tests.spike.sc(station, ['temperatures','dewpoints','slp','windspeeds'], [27,28,29,65], DATASTART, DATAEND, logfile, diagnostics = diagnostics, plots = plots, second = second)
utils.apply_windspeed_flags_to_winddir(station, diagnostics = diagnostics)
# Humidity cross checks
if humidity:
qc_tests.humidity.hcc(station, [30,31,32], DATASTART, DATAEND, logfile, diagnostics = diagnostics, plots = plots)
# Cloud cross check
if cloud:
qc_tests.clouds.ccc(station, [33,34,35,36,37,38,39,40], logfile, diagnostics = diagnostics, plots = plots)
# Variance
if variance:
qc_tests.variance.evc(station, ['temperatures','dewpoints','slp','windspeeds'], [58,59,60,61], DATASTART, DATAEND, logfile, diagnostics = diagnostics, plots = plots)
utils.apply_windspeed_flags_to_winddir(station, diagnostics = diagnostics)
# Winds
if winds:
qc_tests.winds.wdc(station, [62,63,64], DATASTART, DATAEND, logfile, diagnostics = diagnostics, plots = plots)
# are flags actually applied?
if diagnostics or plots: raw_input("stop")
# write to file
if first:
ncdfp.write(os.path.join(NETCDF_DATA_LOCS, station.id + "_internal.nc"), station, process_vars, os.path.join(INPUT_FILE_LOCS,'attributes.dat'), opt_var_list = carry_thru_vars, compressed = match_to_compress, processing_date = '', qc_code_version = qc_code_version)
# gzip the raw file
subprocess.call(["gzip",os.path.join(NETCDF_DATA_LOCS, station.id + ".nc")])
elif second:
ncdfp.write(os.path.join(NETCDF_DATA_LOCS, station.id + "_internal2.nc"), station, process_vars, os.path.join(INPUT_FILE_LOCS,'attributes.dat'), opt_var_list = carry_thru_vars, compressed = match_to_compress, processing_date = '', qc_code_version = qc_code_version)
# gzip the raw file
subprocess.call(["gzip",os.path.join(NETCDF_DATA_LOCS, station.id + "_mask.nc")])
logfile.write(dt.datetime.strftime(dt.datetime.now(), "%A, %d %B %Y, %H:%M:%S\n"))
logfile.write("processing took {:4.0f}s\n\n".format(time.time() - process_start_time))
logfile.close()
print "Internal Checks completed\n"
return # internal_checks
#************************************************************************
if __name__=="__main__":
import argparse
# set up keyword arguments
parser = argparse.ArgumentParser()
parser.add_argument('--restart_id', dest='restart_id', action='store', default = "",
help='Restart ID for truncated run, default = ""')
parser.add_argument('--end_id', dest='end_id', action='store', default = "",
help='End ID for truncated run, default = ""')
parser.add_argument('--second', dest='second', action='store_true', default = False,
help='Second run through')
parser.add_argument('--diagnostics', dest='diagnostics', action='store_true', default = False,
help='Run diagnostics (will not write out file)')
parser.add_argument('--plots', dest='plots', action='store_true', default = False,
help='Run plots (will not write out file)')
parser.add_argument('--all', dest='all', action='store_true', default = False,
help='Run all checks')
parser.add_argument('--duplicate', dest='duplicate', action='store_true', default = False,
help='Run duplicate months check')
parser.add_argument('--odd', dest='odd', action='store_true', default = False,
help='Run odd cluster check')
parser.add_argument('--frequent', dest='frequent', action='store_true', default = False,
help='Run frequent value check')
parser.add_argument('--diurnal', dest='diurnal', action='store_true', default = False,
help='Run diurnal cycle check')
parser.add_argument('--gap', dest='gap', action='store_true', default = False,
help='Run distributional gap check')
parser.add_argument('--records', dest='records', action='store_true', default = False,
help='Run world records check')
parser.add_argument('--streaks', dest='streaks', action='store_true', default = False,
help='Run streak check')
parser.add_argument('--climatological', dest='climatological', action='store_true', default = False,
help='Run climatological outlier check')
parser.add_argument('--spike', dest='spike', action='store_true', default = False,
help='Run spike check')
parser.add_argument('--humidity', dest='humidity', action='store_true', default = False,
help='Run humidity cross checks')
parser.add_argument('--cloud', dest='cloud', action='store_true', default = False,
help='Run cloud cross checks')
parser.add_argument('--variance', dest='variance', action='store_true', default = False,
help='Run variance check')
parser.add_argument('--winds', dest='winds', action='store_true', default = False,
help='Run winds checks')
args = parser.parse_args()
if args.all:
# check that no other test is set on top
if args.duplicate: sys.exit("all tests and single test set - what did you want to do?")
if args.odd: sys.exit("all tests and single test set - what did you want to do?")
if args.frequent: sys.exit("all tests and single test set - what did you want to do?")
if args.diurnal: sys.exit("all tests and single test set - what did you want to do?")
if args.gap: sys.exit("all tests and single test set - what did you want to do?")
if args.records: sys.exit("all tests and single test set - what did you want to do?")
if args.streaks: sys.exit("all tests and single test set - what did you want to do?")
if args.climatological: sys.exit("all tests and single test set - what did you want to do?")
if args.spike: sys.exit("all tests and single test set - what did you want to do?")
if args.humidity: sys.exit("all tests and single test set - what did you want to do?")
if args.cloud: sys.exit("all tests and single test set - what did you want to do?")
if args.variance: sys.exit("all tests and single test set - what did you want to do?")
if args.winds: sys.exit("all tests and single test set - what did you want to do?")
'''To run as stand alone, process the file and obtain station list'''
station_list = "candidate_stations.txt"
try:
station_info = np.genfromtxt(os.path.join(INPUT_FILE_LOCS, station_list), dtype=(str))
except IOError:
print "station list not found"
sys.exit()
uk = False
if uk:
uk_locs = []
for s,station in enumerate(station_info[:,0]):
if station[:2] == "03":
uk_locs += [s]
station_info = station_info[uk_locs]
#station_info = ["031740-99999 56.300 -2.583 12.0".split()]
internal_checks(station_info, restart_id = args.restart_id, end_id = args.end_id, second = args.second,
all_checks = args.all,
duplicate = args.duplicate,
odd = args.odd,
frequent = args.frequent,
diurnal = args.diurnal,
gap = args.gap,
records = args.records,
streaks = args.streaks,
climatological = args.climatological,
spike = args.spike,
humidity = args.humidity,
cloud = args.cloud,
variance = args.variance,
winds = args.winds,
diagnostics = args.diagnostics,
plots = args.plots)
#************************************************************************
|
rjhd2/HadISD_v2
|
internal_checks.py
|
Python
|
bsd-3-clause
| 17,691
|
[
"NetCDF"
] |
dc93c5e74ba253ce23f96a9d2387a16e5b0ee07f003868ee4cfce9528bc26f4e
|
import re
from thefuck.utils import for_app
from thefuck.system import open_command
@for_app('yarn', at_least=2)
def match(command):
return (command.script_parts[1] == 'help'
and 'for documentation about this command.' in command.output)
def get_new_command(command):
url = re.findall(
r'Visit ([^ ]*) for documentation about this command.',
command.output)[0]
return open_command(url)
|
nvbn/thefuck
|
thefuck/rules/yarn_help.py
|
Python
|
mit
| 431
|
[
"VisIt"
] |
290bbc673fa7f5cd31efb9a3a76c81fc8770eccc904065e662b1717b14bea4e9
|
#!/usr/bin/python
#
# Copyright (C) 2010,2012,2013,2014,2015,2016 The ESPResSo project
# Copyright (C) 2008 Axel Arnold
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import print_function
import sys
import re
maxbacktrace=5
f=open(sys.argv[1], "r")
if len(sys.argv) > 2:
n=int(sys.argv[2])
else:
n=0
# regular expressions
re_start = re.compile(r"^%d: (?P<op>[a-z]+) (?P<args>.*)" % n)
allocated = {}
linenr=0
for line in f:
linenr = linenr + 1
if linenr % 1000 == 0:
sys.stderr.write(".")
match = re_start.match(line)
if match == None: continue
op = match.group('op')
args = match.group('args').split(" ")
if op == "alloc":
size = args[0]
addr = args[2]
src = [args[4]]
allocated[addr] = (size, src)
elif op == "realloc":
old = args[0]
addr = args[2]
size = args[4]
src = [args[6]]
if old == "(nil)":
pass
elif old in addr:
prev = allocated[old][1][:maxbacktrace-1]
src.extend(prev)
del allocated[old]
else:
src.append("unmanaged source " + old)
allocated[addr] = (size, src)
elif op == "free":
addr = args[0]
src = args[2]
if addr == "(nil)":
pass
elif addr in allocated:
del allocated[addr]
else:
print("\n" + addr + " freed at " + src + ", but never allocated\n")
print("\n")
for (addr,info) in list(allocated.items()):
s = info[0] + " @ " + addr + " allocated at " + info[1][0]
for loc in info[1][1:]:
s += ", from " + loc
print(s)
|
lahnerml/espresso
|
tools/trace_memory.py
|
Python
|
gpl-3.0
| 2,286
|
[
"ESPResSo"
] |
28962728baa37dac7a51e9870fb00d92e343356389316d164bc3b167d7427642
|
# Copyright (C) 2010-2018 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import sys
import unittest as ut
import unittest_decorators as utx
import numpy as np
import numpy.testing
import espressomd
from espressomd import lb
@utx.skipIfMissingGPU()
class TestLBGetUAtPos(ut.TestCase):
"""
Check velocities at particle positions are sorted by ``id`` and
quantitatively correct (only LB GPU).
"""
@classmethod
def setUpClass(self):
self.params = {
'tau': 0.01,
'agrid': 0.5,
'box_l': [12.0, 12.0, 12.0],
'dens': 0.85,
'viscosity': 30.0,
'friction': 2.0,
'gamma': 1.5
}
self.system = espressomd.System(box_l=[1.0, 1.0, 1.0])
self.system.box_l = self.params['box_l']
self.system.cell_system.skin = 0.4
self.system.time_step = 0.01
self.n_nodes_per_dim = int(self.system.box_l[0] / self.params['agrid'])
for p in range(self.n_nodes_per_dim):
# Set particles exactly between two LB nodes in x direction.
self.system.part.add(id=p,
pos=[(p + 1) * self.params['agrid'],
0.5 * self.params['agrid'],
0.5 * self.params['agrid']])
self.lb_fluid = lb.LBFluidGPU(
visc=self.params['viscosity'],
dens=self.params['dens'],
agrid=self.params['agrid'],
tau=self.params['tau'],
)
self.system.actors.add(self.lb_fluid)
self.vels = np.zeros((self.n_nodes_per_dim, 3))
self.vels[:, 0] = np.arange(self.n_nodes_per_dim, dtype=float)
self.interpolated_vels = self.vels.copy()
self.interpolated_vels[:, 0] += 0.5
for n in range(self.n_nodes_per_dim):
self.lb_fluid[n, 0, 0].velocity = self.vels[n, :]
self.system.integrator.run(0)
def test_get_u_at_pos(self):
"""
Test if linear interpolated velocities are equal to the velocities at
the particle positions. This test uses the two-point coupling under
the hood.
"""
numpy.testing.assert_allclose(
self.interpolated_vels[:-1],
self.lb_fluid.get_interpolated_fluid_velocity_at_positions(
self.system.part[:].pos, False)[:-1],
atol=1e-4)
if __name__ == "__main__":
suite = ut.TestSuite()
suite.addTests(ut.TestLoader().loadTestsFromTestCase(TestLBGetUAtPos))
result = ut.TextTestRunner(verbosity=4).run(suite)
sys.exit(not result.wasSuccessful())
|
mkuron/espresso
|
testsuite/python/lb_get_u_at_pos.py
|
Python
|
gpl-3.0
| 3,270
|
[
"ESPResSo"
] |
a849bbffaf6cb0c5e2a4843068a0044373c6d33b728ee128387741926b0988e6
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import inspect
import sys
import warnings
from collections import defaultdict
from collections import deque
from collections import OrderedDict
import attr
import py
import six
from more_itertools import flatten
from py._code.code import FormattedExcinfo
import _pytest
from _pytest import nodes
from _pytest._code.code import TerminalRepr
from _pytest.compat import _format_args
from _pytest.compat import _PytestWrapper
from _pytest.compat import exc_clear
from _pytest.compat import FuncargnamesCompatAttr
from _pytest.compat import get_real_func
from _pytest.compat import get_real_method
from _pytest.compat import getfslineno
from _pytest.compat import getfuncargnames
from _pytest.compat import getimfunc
from _pytest.compat import getlocation
from _pytest.compat import is_generator
from _pytest.compat import isclass
from _pytest.compat import NOTSET
from _pytest.compat import safe_getattr
from _pytest.deprecated import FIXTURE_FUNCTION_CALL
from _pytest.deprecated import FIXTURE_NAMED_REQUEST
from _pytest.outcomes import fail
from _pytest.outcomes import TEST_OUTCOME
FIXTURE_MSG = 'fixtures cannot have "pytest_funcarg__" prefix and be decorated with @pytest.fixture:\n{}'
@attr.s(frozen=True)
class PseudoFixtureDef(object):
cached_result = attr.ib()
scope = attr.ib()
def pytest_sessionstart(session):
import _pytest.python
import _pytest.nodes
scopename2class.update(
{
"package": _pytest.python.Package,
"class": _pytest.python.Class,
"module": _pytest.python.Module,
"function": _pytest.nodes.Item,
"session": _pytest.main.Session,
}
)
session._fixturemanager = FixtureManager(session)
scopename2class = {}
scope2props = dict(session=())
scope2props["package"] = ("fspath",)
scope2props["module"] = ("fspath", "module")
scope2props["class"] = scope2props["module"] + ("cls",)
scope2props["instance"] = scope2props["class"] + ("instance",)
scope2props["function"] = scope2props["instance"] + ("function", "keywords")
def scopeproperty(name=None, doc=None):
def decoratescope(func):
scopename = name or func.__name__
def provide(self):
if func.__name__ in scope2props[self.scope]:
return func(self)
raise AttributeError(
"%s not available in %s-scoped context" % (scopename, self.scope)
)
return property(provide, None, None, func.__doc__)
return decoratescope
def get_scope_package(node, fixturedef):
import pytest
cls = pytest.Package
current = node
fixture_package_name = "%s/%s" % (fixturedef.baseid, "__init__.py")
while current and (
type(current) is not cls or fixture_package_name != current.nodeid
):
current = current.parent
if current is None:
return node.session
return current
def get_scope_node(node, scope):
cls = scopename2class.get(scope)
if cls is None:
raise ValueError("unknown scope")
return node.getparent(cls)
def add_funcarg_pseudo_fixture_def(collector, metafunc, fixturemanager):
# this function will transform all collected calls to a functions
# if they use direct funcargs (i.e. direct parametrization)
# because we want later test execution to be able to rely on
# an existing FixtureDef structure for all arguments.
# XXX we can probably avoid this algorithm if we modify CallSpec2
# to directly care for creating the fixturedefs within its methods.
if not metafunc._calls[0].funcargs:
return # this function call does not have direct parametrization
# collect funcargs of all callspecs into a list of values
arg2params = {}
arg2scope = {}
for callspec in metafunc._calls:
for argname, argvalue in callspec.funcargs.items():
assert argname not in callspec.params
callspec.params[argname] = argvalue
arg2params_list = arg2params.setdefault(argname, [])
callspec.indices[argname] = len(arg2params_list)
arg2params_list.append(argvalue)
if argname not in arg2scope:
scopenum = callspec._arg2scopenum.get(argname, scopenum_function)
arg2scope[argname] = scopes[scopenum]
callspec.funcargs.clear()
# register artificial FixtureDef's so that later at test execution
# time we can rely on a proper FixtureDef to exist for fixture setup.
arg2fixturedefs = metafunc._arg2fixturedefs
for argname, valuelist in arg2params.items():
# if we have a scope that is higher than function we need
# to make sure we only ever create an according fixturedef on
# a per-scope basis. We thus store and cache the fixturedef on the
# node related to the scope.
scope = arg2scope[argname]
node = None
if scope != "function":
node = get_scope_node(collector, scope)
if node is None:
assert scope == "class" and isinstance(collector, _pytest.python.Module)
# use module-level collector for class-scope (for now)
node = collector
if node and argname in node._name2pseudofixturedef:
arg2fixturedefs[argname] = [node._name2pseudofixturedef[argname]]
else:
fixturedef = FixtureDef(
fixturemanager,
"",
argname,
get_direct_param_fixture_func,
arg2scope[argname],
valuelist,
False,
False,
)
arg2fixturedefs[argname] = [fixturedef]
if node is not None:
node._name2pseudofixturedef[argname] = fixturedef
def getfixturemarker(obj):
""" return fixturemarker or None if it doesn't exist or raised
exceptions."""
try:
return getattr(obj, "_pytestfixturefunction", None)
except TEST_OUTCOME:
# some objects raise errors like request (from flask import request)
# we don't expect them to be fixture functions
return None
def get_parametrized_fixture_keys(item, scopenum):
""" return list of keys for all parametrized arguments which match
the specified scope. """
assert scopenum < scopenum_function # function
try:
cs = item.callspec
except AttributeError:
pass
else:
# cs.indices.items() is random order of argnames. Need to
# sort this so that different calls to
# get_parametrized_fixture_keys will be deterministic.
for argname, param_index in sorted(cs.indices.items()):
if cs._arg2scopenum[argname] != scopenum:
continue
if scopenum == 0: # session
key = (argname, param_index)
elif scopenum == 1: # package
key = (argname, param_index, item.fspath.dirpath())
elif scopenum == 2: # module
key = (argname, param_index, item.fspath)
elif scopenum == 3: # class
key = (argname, param_index, item.fspath, item.cls)
yield key
# algorithm for sorting on a per-parametrized resource setup basis
# it is called for scopenum==0 (session) first and performs sorting
# down to the lower scopes such as to minimize number of "high scope"
# setups and teardowns
def reorder_items(items):
argkeys_cache = {}
items_by_argkey = {}
for scopenum in range(0, scopenum_function):
argkeys_cache[scopenum] = d = {}
items_by_argkey[scopenum] = item_d = defaultdict(deque)
for item in items:
keys = OrderedDict.fromkeys(get_parametrized_fixture_keys(item, scopenum))
if keys:
d[item] = keys
for key in keys:
item_d[key].append(item)
items = OrderedDict.fromkeys(items)
return list(reorder_items_atscope(items, argkeys_cache, items_by_argkey, 0))
def fix_cache_order(item, argkeys_cache, items_by_argkey):
for scopenum in range(0, scopenum_function):
for key in argkeys_cache[scopenum].get(item, []):
items_by_argkey[scopenum][key].appendleft(item)
def reorder_items_atscope(items, argkeys_cache, items_by_argkey, scopenum):
if scopenum >= scopenum_function or len(items) < 3:
return items
ignore = set()
items_deque = deque(items)
items_done = OrderedDict()
scoped_items_by_argkey = items_by_argkey[scopenum]
scoped_argkeys_cache = argkeys_cache[scopenum]
while items_deque:
no_argkey_group = OrderedDict()
slicing_argkey = None
while items_deque:
item = items_deque.popleft()
if item in items_done or item in no_argkey_group:
continue
argkeys = OrderedDict.fromkeys(
k for k in scoped_argkeys_cache.get(item, []) if k not in ignore
)
if not argkeys:
no_argkey_group[item] = None
else:
slicing_argkey, _ = argkeys.popitem()
# we don't have to remove relevant items from later in the deque because they'll just be ignored
matching_items = [
i for i in scoped_items_by_argkey[slicing_argkey] if i in items
]
for i in reversed(matching_items):
fix_cache_order(i, argkeys_cache, items_by_argkey)
items_deque.appendleft(i)
break
if no_argkey_group:
no_argkey_group = reorder_items_atscope(
no_argkey_group, argkeys_cache, items_by_argkey, scopenum + 1
)
for item in no_argkey_group:
items_done[item] = None
ignore.add(slicing_argkey)
return items_done
def fillfixtures(function):
""" fill missing funcargs for a test function. """
try:
request = function._request
except AttributeError:
# XXX this special code path is only expected to execute
# with the oejskit plugin. It uses classes with funcargs
# and we thus have to work a bit to allow this.
fm = function.session._fixturemanager
fi = fm.getfixtureinfo(function.parent, function.obj, None)
function._fixtureinfo = fi
request = function._request = FixtureRequest(function)
request._fillfixtures()
# prune out funcargs for jstests
newfuncargs = {}
for name in fi.argnames:
newfuncargs[name] = function.funcargs[name]
function.funcargs = newfuncargs
else:
request._fillfixtures()
def get_direct_param_fixture_func(request):
return request.param
@attr.s(slots=True)
class FuncFixtureInfo(object):
# original function argument names
argnames = attr.ib(type=tuple)
# argnames that function immediately requires. These include argnames +
# fixture names specified via usefixtures and via autouse=True in fixture
# definitions.
initialnames = attr.ib(type=tuple)
names_closure = attr.ib() # type: List[str]
name2fixturedefs = attr.ib() # type: List[str, List[FixtureDef]]
def prune_dependency_tree(self):
"""Recompute names_closure from initialnames and name2fixturedefs
Can only reduce names_closure, which means that the new closure will
always be a subset of the old one. The order is preserved.
This method is needed because direct parametrization may shadow some
of the fixtures that were included in the originally built dependency
tree. In this way the dependency tree can get pruned, and the closure
of argnames may get reduced.
"""
closure = set()
working_set = set(self.initialnames)
while working_set:
argname = working_set.pop()
# argname may be smth not included in the original names_closure,
# in which case we ignore it. This currently happens with pseudo
# FixtureDefs which wrap 'get_direct_param_fixture_func(request)'.
# So they introduce the new dependency 'request' which might have
# been missing in the original tree (closure).
if argname not in closure and argname in self.names_closure:
closure.add(argname)
if argname in self.name2fixturedefs:
working_set.update(self.name2fixturedefs[argname][-1].argnames)
self.names_closure[:] = sorted(closure, key=self.names_closure.index)
class FixtureRequest(FuncargnamesCompatAttr):
""" A request for a fixture from a test or fixture function.
A request object gives access to the requesting test context
and has an optional ``param`` attribute in case
the fixture is parametrized indirectly.
"""
def __init__(self, pyfuncitem):
self._pyfuncitem = pyfuncitem
#: fixture for which this request is being performed
self.fixturename = None
#: Scope string, one of "function", "class", "module", "session"
self.scope = "function"
self._fixture_defs = {} # argname -> FixtureDef
fixtureinfo = pyfuncitem._fixtureinfo
self._arg2fixturedefs = fixtureinfo.name2fixturedefs.copy()
self._arg2index = {}
self._fixturemanager = pyfuncitem.session._fixturemanager
@property
def fixturenames(self):
"""names of all active fixtures in this request"""
result = list(self._pyfuncitem._fixtureinfo.names_closure)
result.extend(set(self._fixture_defs).difference(result))
return result
@property
def node(self):
""" underlying collection node (depends on current request scope)"""
return self._getscopeitem(self.scope)
def _getnextfixturedef(self, argname):
fixturedefs = self._arg2fixturedefs.get(argname, None)
if fixturedefs is None:
# we arrive here because of a dynamic call to
# getfixturevalue(argname) usage which was naturally
# not known at parsing/collection time
parentid = self._pyfuncitem.parent.nodeid
fixturedefs = self._fixturemanager.getfixturedefs(argname, parentid)
self._arg2fixturedefs[argname] = fixturedefs
# fixturedefs list is immutable so we maintain a decreasing index
index = self._arg2index.get(argname, 0) - 1
if fixturedefs is None or (-index > len(fixturedefs)):
raise FixtureLookupError(argname, self)
self._arg2index[argname] = index
return fixturedefs[index]
@property
def config(self):
""" the pytest config object associated with this request. """
return self._pyfuncitem.config
@scopeproperty()
def function(self):
""" test function object if the request has a per-function scope. """
return self._pyfuncitem.obj
@scopeproperty("class")
def cls(self):
""" class (can be None) where the test function was collected. """
clscol = self._pyfuncitem.getparent(_pytest.python.Class)
if clscol:
return clscol.obj
@property
def instance(self):
""" instance (can be None) on which test function was collected. """
# unittest support hack, see _pytest.unittest.TestCaseFunction
try:
return self._pyfuncitem._testcase
except AttributeError:
function = getattr(self, "function", None)
return getattr(function, "__self__", None)
@scopeproperty()
def module(self):
""" python module object where the test function was collected. """
return self._pyfuncitem.getparent(_pytest.python.Module).obj
@scopeproperty()
def fspath(self):
""" the file system path of the test module which collected this test. """
return self._pyfuncitem.fspath
@property
def keywords(self):
""" keywords/markers dictionary for the underlying node. """
return self.node.keywords
@property
def session(self):
""" pytest session object. """
return self._pyfuncitem.session
def addfinalizer(self, finalizer):
""" add finalizer/teardown function to be called after the
last test within the requesting test context finished
execution. """
# XXX usually this method is shadowed by fixturedef specific ones
self._addfinalizer(finalizer, scope=self.scope)
def _addfinalizer(self, finalizer, scope):
colitem = self._getscopeitem(scope)
self._pyfuncitem.session._setupstate.addfinalizer(
finalizer=finalizer, colitem=colitem
)
def applymarker(self, marker):
""" Apply a marker to a single test function invocation.
This method is useful if you don't want to have a keyword/marker
on all function invocations.
:arg marker: a :py:class:`_pytest.mark.MarkDecorator` object
created by a call to ``pytest.mark.NAME(...)``.
"""
self.node.add_marker(marker)
def raiseerror(self, msg):
""" raise a FixtureLookupError with the given message. """
raise self._fixturemanager.FixtureLookupError(None, self, msg)
def _fillfixtures(self):
item = self._pyfuncitem
fixturenames = getattr(item, "fixturenames", self.fixturenames)
for argname in fixturenames:
if argname not in item.funcargs:
item.funcargs[argname] = self.getfixturevalue(argname)
def cached_setup(self, setup, teardown=None, scope="module", extrakey=None):
""" (deprecated) Return a testing resource managed by ``setup`` &
``teardown`` calls. ``scope`` and ``extrakey`` determine when the
``teardown`` function will be called so that subsequent calls to
``setup`` would recreate the resource. With pytest-2.3 you often
do not need ``cached_setup()`` as you can directly declare a scope
on a fixture function and register a finalizer through
``request.addfinalizer()``.
:arg teardown: function receiving a previously setup resource.
:arg setup: a no-argument function creating a resource.
:arg scope: a string value out of ``function``, ``class``, ``module``
or ``session`` indicating the caching lifecycle of the resource.
:arg extrakey: added to internal caching key of (funcargname, scope).
"""
from _pytest.deprecated import CACHED_SETUP
warnings.warn(CACHED_SETUP, stacklevel=2)
if not hasattr(self.config, "_setupcache"):
self.config._setupcache = {} # XXX weakref?
cachekey = (self.fixturename, self._getscopeitem(scope), extrakey)
cache = self.config._setupcache
try:
val = cache[cachekey]
except KeyError:
self._check_scope(self.fixturename, self.scope, scope)
val = setup()
cache[cachekey] = val
if teardown is not None:
def finalizer():
del cache[cachekey]
teardown(val)
self._addfinalizer(finalizer, scope=scope)
return val
def getfixturevalue(self, argname):
""" Dynamically run a named fixture function.
Declaring fixtures via function argument is recommended where possible.
But if you can only decide whether to use another fixture at test
setup time, you may use this function to retrieve it inside a fixture
or test function body.
"""
return self._get_active_fixturedef(argname).cached_result[0]
def getfuncargvalue(self, argname):
""" Deprecated, use getfixturevalue. """
from _pytest import deprecated
warnings.warn(deprecated.GETFUNCARGVALUE, stacklevel=2)
return self.getfixturevalue(argname)
def _get_active_fixturedef(self, argname):
try:
return self._fixture_defs[argname]
except KeyError:
try:
fixturedef = self._getnextfixturedef(argname)
except FixtureLookupError:
if argname == "request":
cached_result = (self, [0], None)
scope = "function"
return PseudoFixtureDef(cached_result, scope)
raise
# remove indent to prevent the python3 exception
# from leaking into the call
self._compute_fixture_value(fixturedef)
self._fixture_defs[argname] = fixturedef
return fixturedef
def _get_fixturestack(self):
current = self
values = []
while 1:
fixturedef = getattr(current, "_fixturedef", None)
if fixturedef is None:
values.reverse()
return values
values.append(fixturedef)
current = current._parent_request
def _compute_fixture_value(self, fixturedef):
"""
Creates a SubRequest based on "self" and calls the execute method of the given fixturedef object. This will
force the FixtureDef object to throw away any previous results and compute a new fixture value, which
will be stored into the FixtureDef object itself.
:param FixtureDef fixturedef:
"""
# prepare a subrequest object before calling fixture function
# (latter managed by fixturedef)
argname = fixturedef.argname
funcitem = self._pyfuncitem
scope = fixturedef.scope
try:
param = funcitem.callspec.getparam(argname)
except (AttributeError, ValueError):
param = NOTSET
param_index = 0
has_params = fixturedef.params is not None
fixtures_not_supported = getattr(funcitem, "nofuncargs", False)
if has_params and fixtures_not_supported:
msg = (
"{name} does not support fixtures, maybe unittest.TestCase subclass?\n"
"Node id: {nodeid}\n"
"Function type: {typename}"
).format(
name=funcitem.name,
nodeid=funcitem.nodeid,
typename=type(funcitem).__name__,
)
fail(msg, pytrace=False)
if has_params:
frame = inspect.stack()[3]
frameinfo = inspect.getframeinfo(frame[0])
source_path = frameinfo.filename
source_lineno = frameinfo.lineno
source_path = py.path.local(source_path)
if source_path.relto(funcitem.config.rootdir):
source_path = source_path.relto(funcitem.config.rootdir)
msg = (
"The requested fixture has no parameter defined for test:\n"
" {}\n\n"
"Requested fixture '{}' defined in:\n{}"
"\n\nRequested here:\n{}:{}".format(
funcitem.nodeid,
fixturedef.argname,
getlocation(fixturedef.func, funcitem.config.rootdir),
source_path,
source_lineno,
)
)
fail(msg, pytrace=False)
else:
# indices might not be set if old-style metafunc.addcall() was used
param_index = funcitem.callspec.indices.get(argname, 0)
# if a parametrize invocation set a scope it will override
# the static scope defined with the fixture function
paramscopenum = funcitem.callspec._arg2scopenum.get(argname)
if paramscopenum is not None:
scope = scopes[paramscopenum]
subrequest = SubRequest(self, scope, param, param_index, fixturedef)
# check if a higher-level scoped fixture accesses a lower level one
subrequest._check_scope(argname, self.scope, scope)
# clear sys.exc_info before invoking the fixture (python bug?)
# if it's not explicitly cleared it will leak into the call
exc_clear()
try:
# call the fixture function
fixturedef.execute(request=subrequest)
finally:
# if fixture function failed it might have registered finalizers
self.session._setupstate.addfinalizer(
functools.partial(fixturedef.finish, request=subrequest),
subrequest.node,
)
def _check_scope(self, argname, invoking_scope, requested_scope):
if argname == "request":
return
if scopemismatch(invoking_scope, requested_scope):
# try to report something helpful
lines = self._factorytraceback()
fail(
"ScopeMismatch: You tried to access the %r scoped "
"fixture %r with a %r scoped request object, "
"involved factories\n%s"
% ((requested_scope, argname, invoking_scope, "\n".join(lines))),
pytrace=False,
)
def _factorytraceback(self):
lines = []
for fixturedef in self._get_fixturestack():
factory = fixturedef.func
fs, lineno = getfslineno(factory)
p = self._pyfuncitem.session.fspath.bestrelpath(fs)
args = _format_args(factory)
lines.append("%s:%d: def %s%s" % (p, lineno, factory.__name__, args))
return lines
def _getscopeitem(self, scope):
if scope == "function":
# this might also be a non-function Item despite its attribute name
return self._pyfuncitem
if scope == "package":
node = get_scope_package(self._pyfuncitem, self._fixturedef)
else:
node = get_scope_node(self._pyfuncitem, scope)
if node is None and scope == "class":
# fallback to function item itself
node = self._pyfuncitem
assert node, 'Could not obtain a node for scope "{}" for function {!r}'.format(
scope, self._pyfuncitem
)
return node
def __repr__(self):
return "<FixtureRequest for %r>" % (self.node)
class SubRequest(FixtureRequest):
""" a sub request for handling getting a fixture from a
test function/fixture. """
def __init__(self, request, scope, param, param_index, fixturedef):
self._parent_request = request
self.fixturename = fixturedef.argname
if param is not NOTSET:
self.param = param
self.param_index = param_index
self.scope = scope
self._fixturedef = fixturedef
self._pyfuncitem = request._pyfuncitem
self._fixture_defs = request._fixture_defs
self._arg2fixturedefs = request._arg2fixturedefs
self._arg2index = request._arg2index
self._fixturemanager = request._fixturemanager
def __repr__(self):
return "<SubRequest %r for %r>" % (self.fixturename, self._pyfuncitem)
def addfinalizer(self, finalizer):
self._fixturedef.addfinalizer(finalizer)
class ScopeMismatchError(Exception):
""" A fixture function tries to use a different fixture function which
which has a lower scope (e.g. a Session one calls a function one)
"""
scopes = "session package module class function".split()
scopenum_function = scopes.index("function")
def scopemismatch(currentscope, newscope):
return scopes.index(newscope) > scopes.index(currentscope)
def scope2index(scope, descr, where=None):
"""Look up the index of ``scope`` and raise a descriptive value error
if not defined.
"""
try:
return scopes.index(scope)
except ValueError:
fail(
"{} {}got an unexpected scope value '{}'".format(
descr, "from {} ".format(where) if where else "", scope
),
pytrace=False,
)
class FixtureLookupError(LookupError):
""" could not return a requested Fixture (missing or invalid). """
def __init__(self, argname, request, msg=None):
self.argname = argname
self.request = request
self.fixturestack = request._get_fixturestack()
self.msg = msg
def formatrepr(self):
tblines = []
addline = tblines.append
stack = [self.request._pyfuncitem.obj]
stack.extend(map(lambda x: x.func, self.fixturestack))
msg = self.msg
if msg is not None:
# the last fixture raise an error, let's present
# it at the requesting side
stack = stack[:-1]
for function in stack:
fspath, lineno = getfslineno(function)
try:
lines, _ = inspect.getsourcelines(get_real_func(function))
except (IOError, IndexError, TypeError):
error_msg = "file %s, line %s: source code not available"
addline(error_msg % (fspath, lineno + 1))
else:
addline("file %s, line %s" % (fspath, lineno + 1))
for i, line in enumerate(lines):
line = line.rstrip()
addline(" " + line)
if line.lstrip().startswith("def"):
break
if msg is None:
fm = self.request._fixturemanager
available = set()
parentid = self.request._pyfuncitem.parent.nodeid
for name, fixturedefs in fm._arg2fixturedefs.items():
faclist = list(fm._matchfactories(fixturedefs, parentid))
if faclist:
available.add(name)
if self.argname in available:
msg = " recursive dependency involving fixture '{}' detected".format(
self.argname
)
else:
msg = "fixture '{}' not found".format(self.argname)
msg += "\n available fixtures: {}".format(", ".join(sorted(available)))
msg += "\n use 'pytest --fixtures [testpath]' for help on them."
return FixtureLookupErrorRepr(fspath, lineno, tblines, msg, self.argname)
class FixtureLookupErrorRepr(TerminalRepr):
def __init__(self, filename, firstlineno, tblines, errorstring, argname):
self.tblines = tblines
self.errorstring = errorstring
self.filename = filename
self.firstlineno = firstlineno
self.argname = argname
def toterminal(self, tw):
# tw.line("FixtureLookupError: %s" %(self.argname), red=True)
for tbline in self.tblines:
tw.line(tbline.rstrip())
lines = self.errorstring.split("\n")
if lines:
tw.line(
"{} {}".format(FormattedExcinfo.fail_marker, lines[0].strip()),
red=True,
)
for line in lines[1:]:
tw.line(
"{} {}".format(FormattedExcinfo.flow_marker, line.strip()),
red=True,
)
tw.line()
tw.line("%s:%d" % (self.filename, self.firstlineno + 1))
def fail_fixturefunc(fixturefunc, msg):
fs, lineno = getfslineno(fixturefunc)
location = "%s:%s" % (fs, lineno + 1)
source = _pytest._code.Source(fixturefunc)
fail(msg + ":\n\n" + str(source.indent()) + "\n" + location, pytrace=False)
def call_fixture_func(fixturefunc, request, kwargs):
yieldctx = is_generator(fixturefunc)
if yieldctx:
it = fixturefunc(**kwargs)
res = next(it)
finalizer = functools.partial(_teardown_yield_fixture, fixturefunc, it)
request.addfinalizer(finalizer)
else:
res = fixturefunc(**kwargs)
return res
def _teardown_yield_fixture(fixturefunc, it):
"""Executes the teardown of a fixture function by advancing the iterator after the
yield and ensure the iteration ends (if not it means there is more than one yield in the function)"""
try:
next(it)
except StopIteration:
pass
else:
fail_fixturefunc(
fixturefunc, "yield_fixture function has more than one 'yield'"
)
class FixtureDef(object):
""" A container for a factory definition. """
def __init__(
self,
fixturemanager,
baseid,
argname,
func,
scope,
params,
unittest=False,
ids=None,
):
self._fixturemanager = fixturemanager
self.baseid = baseid or ""
self.has_location = baseid is not None
self.func = func
self.argname = argname
self.scope = scope
self.scopenum = scope2index(
scope or "function",
descr="Fixture '{}'".format(func.__name__),
where=baseid,
)
self.params = params
self.argnames = getfuncargnames(func, is_method=unittest)
self.unittest = unittest
self.ids = ids
self._finalizers = []
def addfinalizer(self, finalizer):
self._finalizers.append(finalizer)
def finish(self, request):
exceptions = []
try:
while self._finalizers:
try:
func = self._finalizers.pop()
func()
except: # noqa
exceptions.append(sys.exc_info())
if exceptions:
e = exceptions[0]
del exceptions # ensure we don't keep all frames alive because of the traceback
six.reraise(*e)
finally:
hook = self._fixturemanager.session.gethookproxy(request.node.fspath)
hook.pytest_fixture_post_finalizer(fixturedef=self, request=request)
# even if finalization fails, we invalidate
# the cached fixture value and remove
# all finalizers because they may be bound methods which will
# keep instances alive
if hasattr(self, "cached_result"):
del self.cached_result
self._finalizers = []
def execute(self, request):
# get required arguments and register our own finish()
# with their finalization
for argname in self.argnames:
fixturedef = request._get_active_fixturedef(argname)
if argname != "request":
fixturedef.addfinalizer(functools.partial(self.finish, request=request))
my_cache_key = request.param_index
cached_result = getattr(self, "cached_result", None)
if cached_result is not None:
result, cache_key, err = cached_result
if my_cache_key == cache_key:
if err is not None:
six.reraise(*err)
else:
return result
# we have a previous but differently parametrized fixture instance
# so we need to tear it down before creating a new one
self.finish(request)
assert not hasattr(self, "cached_result")
hook = self._fixturemanager.session.gethookproxy(request.node.fspath)
return hook.pytest_fixture_setup(fixturedef=self, request=request)
def __repr__(self):
return "<FixtureDef argname=%r scope=%r baseid=%r>" % (
self.argname,
self.scope,
self.baseid,
)
def resolve_fixture_function(fixturedef, request):
"""Gets the actual callable that can be called to obtain the fixture value, dealing with unittest-specific
instances and bound methods.
"""
fixturefunc = fixturedef.func
if fixturedef.unittest:
if request.instance is not None:
# bind the unbound method to the TestCase instance
fixturefunc = fixturedef.func.__get__(request.instance)
else:
# the fixture function needs to be bound to the actual
# request.instance so that code working with "fixturedef" behaves
# as expected.
if request.instance is not None:
fixturefunc = getimfunc(fixturedef.func)
if fixturefunc != fixturedef.func:
fixturefunc = fixturefunc.__get__(request.instance)
return fixturefunc
def pytest_fixture_setup(fixturedef, request):
""" Execution of fixture setup. """
kwargs = {}
for argname in fixturedef.argnames:
fixdef = request._get_active_fixturedef(argname)
result, arg_cache_key, exc = fixdef.cached_result
request._check_scope(argname, request.scope, fixdef.scope)
kwargs[argname] = result
fixturefunc = resolve_fixture_function(fixturedef, request)
my_cache_key = request.param_index
try:
result = call_fixture_func(fixturefunc, request, kwargs)
except TEST_OUTCOME:
fixturedef.cached_result = (None, my_cache_key, sys.exc_info())
raise
fixturedef.cached_result = (result, my_cache_key, None)
return result
def _ensure_immutable_ids(ids):
if ids is None:
return
if callable(ids):
return ids
return tuple(ids)
def wrap_function_to_warning_if_called_directly(function, fixture_marker):
"""Wrap the given fixture function so we can issue warnings about it being called directly, instead of
used as an argument in a test function.
"""
is_yield_function = is_generator(function)
warning = FIXTURE_FUNCTION_CALL.format(
name=fixture_marker.name or function.__name__
)
if is_yield_function:
@functools.wraps(function)
def result(*args, **kwargs):
__tracebackhide__ = True
warnings.warn(warning, stacklevel=3)
for x in function(*args, **kwargs):
yield x
else:
@functools.wraps(function)
def result(*args, **kwargs):
__tracebackhide__ = True
warnings.warn(warning, stacklevel=3)
return function(*args, **kwargs)
if six.PY2:
result.__wrapped__ = function
# keep reference to the original function in our own custom attribute so we don't unwrap
# further than this point and lose useful wrappings like @mock.patch (#3774)
result.__pytest_wrapped__ = _PytestWrapper(function)
return result
@attr.s(frozen=True)
class FixtureFunctionMarker(object):
scope = attr.ib()
params = attr.ib(converter=attr.converters.optional(tuple))
autouse = attr.ib(default=False)
ids = attr.ib(default=None, converter=_ensure_immutable_ids)
name = attr.ib(default=None)
def __call__(self, function):
if isclass(function):
raise ValueError("class fixtures not supported (maybe in the future)")
if getattr(function, "_pytestfixturefunction", False):
raise ValueError(
"fixture is being applied more than once to the same function"
)
function = wrap_function_to_warning_if_called_directly(function, self)
name = self.name or function.__name__
if name == "request":
warnings.warn(FIXTURE_NAMED_REQUEST)
function._pytestfixturefunction = self
return function
def fixture(scope="function", params=None, autouse=False, ids=None, name=None):
"""Decorator to mark a fixture factory function.
This decorator can be used, with or without parameters, to define a
fixture function.
The name of the fixture function can later be referenced to cause its
invocation ahead of running tests: test
modules or classes can use the ``pytest.mark.usefixtures(fixturename)``
marker.
Test functions can directly use fixture names as input
arguments in which case the fixture instance returned from the fixture
function will be injected.
Fixtures can provide their values to test functions using ``return`` or ``yield``
statements. When using ``yield`` the code block after the ``yield`` statement is executed
as teardown code regardless of the test outcome, and must yield exactly once.
:arg scope: the scope for which this fixture is shared, one of
``"function"`` (default), ``"class"``, ``"module"``,
``"package"`` or ``"session"``.
``"package"`` is considered **experimental** at this time.
:arg params: an optional list of parameters which will cause multiple
invocations of the fixture function and all of the tests
using it.
:arg autouse: if True, the fixture func is activated for all tests that
can see it. If False (the default) then an explicit
reference is needed to activate the fixture.
:arg ids: list of string ids each corresponding to the params
so that they are part of the test id. If no ids are provided
they will be generated automatically from the params.
:arg name: the name of the fixture. This defaults to the name of the
decorated function. If a fixture is used in the same module in
which it is defined, the function name of the fixture will be
shadowed by the function arg that requests the fixture; one way
to resolve this is to name the decorated function
``fixture_<fixturename>`` and then use
``@pytest.fixture(name='<fixturename>')``.
"""
if callable(scope) and params is None and autouse is False:
# direct decoration
return FixtureFunctionMarker("function", params, autouse, name=name)(scope)
if params is not None and not isinstance(params, (list, tuple)):
params = list(params)
return FixtureFunctionMarker(scope, params, autouse, ids=ids, name=name)
def yield_fixture(scope="function", params=None, autouse=False, ids=None, name=None):
""" (return a) decorator to mark a yield-fixture factory function.
.. deprecated:: 3.0
Use :py:func:`pytest.fixture` directly instead.
"""
return fixture(scope=scope, params=params, autouse=autouse, ids=ids, name=name)
defaultfuncargprefixmarker = fixture()
@fixture(scope="session")
def pytestconfig(request):
"""Session-scoped fixture that returns the :class:`_pytest.config.Config` object.
Example::
def test_foo(pytestconfig):
if pytestconfig.getoption("verbose"):
...
"""
return request.config
class FixtureManager(object):
"""
pytest fixtures definitions and information is stored and managed
from this class.
During collection fm.parsefactories() is called multiple times to parse
fixture function definitions into FixtureDef objects and internal
data structures.
During collection of test functions, metafunc-mechanics instantiate
a FuncFixtureInfo object which is cached per node/func-name.
This FuncFixtureInfo object is later retrieved by Function nodes
which themselves offer a fixturenames attribute.
The FuncFixtureInfo object holds information about fixtures and FixtureDefs
relevant for a particular function. An initial list of fixtures is
assembled like this:
- ini-defined usefixtures
- autouse-marked fixtures along the collection chain up from the function
- usefixtures markers at module/class/function level
- test function funcargs
Subsequently the funcfixtureinfo.fixturenames attribute is computed
as the closure of the fixtures needed to setup the initial fixtures,
i. e. fixtures needed by fixture functions themselves are appended
to the fixturenames list.
Upon the test-setup phases all fixturenames are instantiated, retrieved
by a lookup of their FuncFixtureInfo.
"""
_argprefix = "pytest_funcarg__"
FixtureLookupError = FixtureLookupError
FixtureLookupErrorRepr = FixtureLookupErrorRepr
def __init__(self, session):
self.session = session
self.config = session.config
self._arg2fixturedefs = {}
self._holderobjseen = set()
self._arg2finish = {}
self._nodeid_and_autousenames = [("", self.config.getini("usefixtures"))]
session.config.pluginmanager.register(self, "funcmanage")
def getfixtureinfo(self, node, func, cls, funcargs=True):
if funcargs and not getattr(node, "nofuncargs", False):
argnames = getfuncargnames(func, cls=cls)
else:
argnames = ()
usefixtures = flatten(
mark.args for mark in node.iter_markers(name="usefixtures")
)
initialnames = tuple(usefixtures) + argnames
fm = node.session._fixturemanager
initialnames, names_closure, arg2fixturedefs = fm.getfixtureclosure(
initialnames, node
)
return FuncFixtureInfo(argnames, initialnames, names_closure, arg2fixturedefs)
def pytest_plugin_registered(self, plugin):
nodeid = None
try:
p = py.path.local(plugin.__file__).realpath()
except AttributeError:
pass
else:
# construct the base nodeid which is later used to check
# what fixtures are visible for particular tests (as denoted
# by their test id)
if p.basename.startswith("conftest.py"):
nodeid = p.dirpath().relto(self.config.rootdir)
if p.sep != nodes.SEP:
nodeid = nodeid.replace(p.sep, nodes.SEP)
self.parsefactories(plugin, nodeid)
def _getautousenames(self, nodeid):
""" return a tuple of fixture names to be used. """
autousenames = []
for baseid, basenames in self._nodeid_and_autousenames:
if nodeid.startswith(baseid):
if baseid:
i = len(baseid)
nextchar = nodeid[i : i + 1]
if nextchar and nextchar not in ":/":
continue
autousenames.extend(basenames)
return autousenames
def getfixtureclosure(self, fixturenames, parentnode):
# collect the closure of all fixtures , starting with the given
# fixturenames as the initial set. As we have to visit all
# factory definitions anyway, we also return an arg2fixturedefs
# mapping so that the caller can reuse it and does not have
# to re-discover fixturedefs again for each fixturename
# (discovering matching fixtures for a given name/node is expensive)
parentid = parentnode.nodeid
fixturenames_closure = self._getautousenames(parentid)
def merge(otherlist):
for arg in otherlist:
if arg not in fixturenames_closure:
fixturenames_closure.append(arg)
merge(fixturenames)
# at this point, fixturenames_closure contains what we call "initialnames",
# which is a set of fixturenames the function immediately requests. We
# need to return it as well, so save this.
initialnames = tuple(fixturenames_closure)
arg2fixturedefs = {}
lastlen = -1
while lastlen != len(fixturenames_closure):
lastlen = len(fixturenames_closure)
for argname in fixturenames_closure:
if argname in arg2fixturedefs:
continue
fixturedefs = self.getfixturedefs(argname, parentid)
if fixturedefs:
arg2fixturedefs[argname] = fixturedefs
merge(fixturedefs[-1].argnames)
def sort_by_scope(arg_name):
try:
fixturedefs = arg2fixturedefs[arg_name]
except KeyError:
return scopes.index("function")
else:
return fixturedefs[-1].scopenum
fixturenames_closure.sort(key=sort_by_scope)
return initialnames, fixturenames_closure, arg2fixturedefs
def pytest_generate_tests(self, metafunc):
for argname in metafunc.fixturenames:
faclist = metafunc._arg2fixturedefs.get(argname)
if faclist:
fixturedef = faclist[-1]
if fixturedef.params is not None:
parametrize_func = getattr(metafunc.function, "parametrize", None)
if parametrize_func is not None:
parametrize_func = parametrize_func.combined
func_params = getattr(parametrize_func, "args", [[None]])
func_kwargs = getattr(parametrize_func, "kwargs", {})
# skip directly parametrized arguments
if "argnames" in func_kwargs:
argnames = parametrize_func.kwargs["argnames"]
else:
argnames = func_params[0]
if not isinstance(argnames, (tuple, list)):
argnames = [x.strip() for x in argnames.split(",") if x.strip()]
if argname not in func_params and argname not in argnames:
metafunc.parametrize(
argname,
fixturedef.params,
indirect=True,
scope=fixturedef.scope,
ids=fixturedef.ids,
)
else:
continue # will raise FixtureLookupError at setup time
def pytest_collection_modifyitems(self, items):
# separate parametrized setups
items[:] = reorder_items(items)
def parsefactories(self, node_or_obj, nodeid=NOTSET, unittest=False):
from _pytest import deprecated
if nodeid is not NOTSET:
holderobj = node_or_obj
else:
holderobj = node_or_obj.obj
nodeid = node_or_obj.nodeid
if holderobj in self._holderobjseen:
return
from _pytest.nodes import _CompatProperty
self._holderobjseen.add(holderobj)
autousenames = []
for name in dir(holderobj):
# The attribute can be an arbitrary descriptor, so the attribute
# access below can raise. safe_getatt() ignores such exceptions.
maybe_property = safe_getattr(type(holderobj), name, None)
if isinstance(maybe_property, _CompatProperty):
# deprecated
continue
obj = safe_getattr(holderobj, name, None)
marker = getfixturemarker(obj)
# fixture functions have a pytest_funcarg__ prefix (pre-2.3 style)
# or are "@pytest.fixture" marked
if marker is None:
if not name.startswith(self._argprefix):
continue
if not callable(obj):
continue
marker = defaultfuncargprefixmarker
filename, lineno = getfslineno(obj)
warnings.warn_explicit(
deprecated.FUNCARG_PREFIX.format(name=name),
category=None,
filename=str(filename),
lineno=lineno + 1,
)
name = name[len(self._argprefix) :]
elif not isinstance(marker, FixtureFunctionMarker):
# magic globals with __getattr__ might have got us a wrong
# fixture attribute
continue
else:
if marker.name:
name = marker.name
assert not name.startswith(self._argprefix), FIXTURE_MSG.format(name)
# during fixture definition we wrap the original fixture function
# to issue a warning if called directly, so here we unwrap it in order to not emit the warning
# when pytest itself calls the fixture function
if six.PY2 and unittest:
# hack on Python 2 because of the unbound methods
obj = get_real_func(obj)
else:
obj = get_real_method(obj, holderobj)
fixture_def = FixtureDef(
self,
nodeid,
name,
obj,
marker.scope,
marker.params,
unittest=unittest,
ids=marker.ids,
)
faclist = self._arg2fixturedefs.setdefault(name, [])
if fixture_def.has_location:
faclist.append(fixture_def)
else:
# fixturedefs with no location are at the front
# so this inserts the current fixturedef after the
# existing fixturedefs from external plugins but
# before the fixturedefs provided in conftests.
i = len([f for f in faclist if not f.has_location])
faclist.insert(i, fixture_def)
if marker.autouse:
autousenames.append(name)
if autousenames:
self._nodeid_and_autousenames.append((nodeid or "", autousenames))
def getfixturedefs(self, argname, nodeid):
"""
Gets a list of fixtures which are applicable to the given node id.
:param str argname: name of the fixture to search for
:param str nodeid: full node id of the requesting test.
:return: list[FixtureDef]
"""
try:
fixturedefs = self._arg2fixturedefs[argname]
except KeyError:
return None
return tuple(self._matchfactories(fixturedefs, nodeid))
def _matchfactories(self, fixturedefs, nodeid):
for fixturedef in fixturedefs:
if nodes.ischildnode(fixturedef.baseid, nodeid):
yield fixturedef
|
txomon/pytest
|
src/_pytest/fixtures.py
|
Python
|
mit
| 53,770
|
[
"VisIt"
] |
37b028c35d605d82ed7b4c3c5e51740b23526cf65f68c389b82a0ea4a6041a7d
|
"""Handle installation and updates of bcbio-nextgen, third party software and data.
Enables automated installation tool and in-place updates to install additional
data and software.
"""
from __future__ import print_function
import argparse
import collections
import contextlib
import datetime
import dateutil
from distutils.version import LooseVersion
import gzip
import os
import shutil
import subprocess
import sys
import glob
import requests
from six.moves import urllib
import toolz as tz
import yaml
from bcbio import broad, utils
from bcbio.pipeline import genome
from bcbio.variation import effects
from bcbio.distributed.transaction import file_transaction
from bcbio.pipeline import datadict as dd
REMOTES = {
"requirements": "https://raw.githubusercontent.com/chapmanb/bcbio-nextgen/master/requirements-conda.txt",
"gitrepo": "https://github.com/chapmanb/bcbio-nextgen.git",
"cloudbiolinux": "https://github.com/chapmanb/cloudbiolinux/archive/master.tar.gz",
"genome_resources": "https://raw.github.com/chapmanb/bcbio-nextgen/master/config/genomes/%s-resources.yaml",
"snpeff_dl_url": ("http://downloads.sourceforge.net/project/snpeff/databases/v{snpeff_ver}/"
"snpEff_v{snpeff_ver}_{genome}.zip")}
SUPPORTED_GENOMES = ["GRCh37", "hg19", "hg38", "hg38-noalt", "mm10", "mm9",
"rn6", "rn5", "canFam3", "dm3", "galGal4", "phix",
"pseudomonas_aeruginosa_ucbpp_pa14", "sacCer3", "TAIR10",
"WBcel235", "xenTro3", "GRCz10"]
SUPPORTED_INDEXES = ["bowtie", "bowtie2", "bwa", "novoalign", "rtg", "snap",
"star","twobit", "seq", "hisat2"]
DEFAULT_INDEXES = ["rtg", "twobit"]
Tool = collections.namedtuple("Tool", ["name", "fname"])
def upgrade_bcbio(args):
"""Perform upgrade of bcbio to latest release, or from GitHub development version.
Handles bcbio, third party tools and data.
"""
print("Upgrading bcbio")
args = add_install_defaults(args)
if args.upgrade in ["stable", "system", "deps", "development"]:
if args.upgrade == "development":
anaconda_dir = _update_conda_devel()
print("Upgrading bcbio-nextgen to latest development version")
pip_bin = os.path.join(os.path.dirname(sys.executable), "pip")
git_tag = "@%s" % args.revision if args.revision != "master" else ""
_pip_safe_ssl([[pip_bin, "install", "--upgrade", "--no-deps",
"git+%s%s#egg=bcbio-nextgen" % (REMOTES["gitrepo"], git_tag)]], anaconda_dir)
print("Upgrade of bcbio-nextgen development code complete.")
else:
_update_conda_packages()
print("Upgrade of bcbio-nextgen code complete.")
try:
_set_matplotlib_default_backend()
except OSError:
pass
if args.tooldir:
with bcbio_tmpdir():
print("Upgrading third party tools to latest versions")
_symlink_bcbio(args, script="bcbio_nextgen.py")
_symlink_bcbio(args, script="bcbio_setup_genome.py")
_symlink_bcbio(args, script="bcbio_prepare_samples.py")
_symlink_bcbio(args, script="bcbio_fastq_umi_prep.py")
upgrade_thirdparty_tools(args, REMOTES)
print("Third party tools upgrade complete.")
if args.toolplus:
print("Installing additional tools")
_install_toolplus(args)
if args.install_data:
for default in DEFAULT_INDEXES:
if default not in args.aligners:
args.aligners.append(default)
if len(args.aligners) == 0:
print("Warning: no aligners provided with `--aligners` flag")
if len(args.genomes) == 0:
print("Data not installed, no genomes provided with `--genomes` flag")
else:
with bcbio_tmpdir():
print("Upgrading bcbio-nextgen data files")
upgrade_bcbio_data(args, REMOTES)
print("bcbio-nextgen data upgrade complete.")
if args.isolate and args.tooldir:
print("Isolated tool installation not automatically added to environmental variables")
print(" Add:\n {t}/bin to PATH".format(t=args.tooldir))
save_install_defaults(args)
args.datadir = _get_data_dir()
_install_container_bcbio_system(args.datadir)
print("Upgrade completed successfully.")
return args
def _pip_safe_ssl(cmds, anaconda_dir):
"""Run pip, retrying with conda SSL certificate if global certificate fails.
"""
try:
for cmd in cmds:
subprocess.check_call(cmd)
except subprocess.CalledProcessError:
_set_pip_ssl(anaconda_dir)
for cmd in cmds:
subprocess.check_call(cmd)
def _set_pip_ssl(anaconda_dir):
"""Set PIP SSL certificate to installed conda certificate to avoid SSL errors
"""
if anaconda_dir:
cert_file = os.path.join(anaconda_dir, "ssl", "cert.pem")
if os.path.exists(cert_file):
os.environ["PIP_CERT"] = cert_file
def _set_matplotlib_default_backend():
"""
matplotlib will try to print to a display if it is available, but don't want
to run it in interactive mode. we tried setting the backend to 'Agg'' before
importing, but it was still resulting in issues. we replace the existing
backend with 'agg' in the default matplotlibrc. This is a hack until we can
find a better solution
"""
if _matplotlib_installed():
import matplotlib
matplotlib.use('Agg', force=True)
config = matplotlib.matplotlib_fname()
with file_transaction(config) as tx_out_file:
with open(config) as in_file, open(tx_out_file, "w") as out_file:
for line in in_file:
if line.split(":")[0].strip() == "backend":
out_file.write("backend: agg\n")
else:
out_file.write(line)
def _matplotlib_installed():
try:
import matplotlib
except ImportError:
return False
return True
def _symlink_bcbio(args, script="bcbio_nextgen.py"):
"""Ensure a bcbio-nextgen script symlink in final tool directory.
"""
bcbio_anaconda = os.path.join(os.path.dirname(sys.executable), script)
bindir = os.path.join(args.tooldir, "bin")
if not os.path.exists(bindir):
os.makedirs(bindir)
bcbio_final = os.path.join(bindir, script)
if not os.path.exists(bcbio_final):
if os.path.lexists(bcbio_final):
subprocess.check_call(["rm", "-f", bcbio_final])
subprocess.check_call(["ln", "-s", bcbio_anaconda, bcbio_final])
def _install_container_bcbio_system(datadir):
"""Install limited bcbio_system.yaml file for setting core and memory usage.
Adds any non-specific programs to the exposed bcbio_system.yaml file, only
when upgrade happening inside a docker container.
"""
base_file = os.path.join(datadir, "config", "bcbio_system.yaml")
if not os.path.exists(base_file):
return
expose_file = os.path.join(datadir, "galaxy", "bcbio_system.yaml")
expose = set(["memory", "cores", "jvm_opts"])
with open(base_file) as in_handle:
config = yaml.load(in_handle)
if os.path.exists(expose_file):
with open(expose_file) as in_handle:
expose_config = yaml.load(in_handle)
else:
expose_config = {"resources": {}}
for pname, vals in config["resources"].items():
expose_vals = {}
for k, v in vals.items():
if k in expose:
expose_vals[k] = v
if len(expose_vals) > 0 and pname not in expose_config["resources"]:
expose_config["resources"][pname] = expose_vals
if expose_file and os.path.exists(os.path.dirname(expose_file)):
with open(expose_file, "w") as out_handle:
yaml.safe_dump(expose_config, out_handle, default_flow_style=False, allow_unicode=False)
return expose_file
def _get_conda_bin():
conda_bin = os.path.join(os.path.dirname(os.path.realpath(sys.executable)), "conda")
if os.path.exists(conda_bin):
return conda_bin
def _default_deploy_args(args):
"""Standard install arguments for CloudBioLinux.
Avoid using sudo and keep an installation isolated if running as the root user.
"""
return {"flavor": "ngs_pipeline_minimal",
"vm_provider": "novm",
"hostname": "localhost",
"fabricrc_overrides": {"edition": "minimal",
"use_sudo": False,
"keep_isolated": args.isolate or os.geteuid() == 0,
"conda_cmd": _get_conda_bin(),
"distribution": args.distribution or "__auto__",
"dist_name": "__auto__"}}
def _update_conda_packages():
"""If installed in an anaconda directory, upgrade conda packages.
"""
conda_bin = _get_conda_bin()
assert conda_bin, ("Could not find anaconda distribution for upgrading bcbio.\n"
"Using python at %s but could not find conda." % (os.path.realpath(sys.executable)))
req_file = "bcbio-update-requirements.txt"
if os.path.exists(req_file):
os.remove(req_file)
subprocess.check_call([conda_bin, "install", "--yes", "nomkl"])
subprocess.check_call(["wget", "-O", req_file, "--no-check-certificate", REMOTES["requirements"]])
subprocess.check_call([conda_bin, "install", "--update-deps", "--quiet", "--yes",
"-c", "bioconda", "-c", "conda-forge", "--file", req_file])
if os.path.exists(req_file):
os.remove(req_file)
return os.path.dirname(os.path.dirname(conda_bin))
def _update_conda_devel():
"""Update to the latest development conda package.
"""
conda_bin = _get_conda_bin()
assert conda_bin, "Could not find anaconda distribution for upgrading bcbio"
subprocess.check_call([conda_bin, "install", "--yes", "nomkl"])
subprocess.check_call([conda_bin, "install", "--update-deps",
"--quiet", "--yes", "-c", "bioconda", "-c", "conda-forge", "bcbio-nextgen"])
return os.path.dirname(os.path.dirname(conda_bin))
def get_genome_dir(gid, galaxy_dir, data):
"""Return standard location of genome directories.
"""
if galaxy_dir:
refs = genome.get_refs(gid, None, galaxy_dir, data)
seq_file = tz.get_in(["fasta", "base"], refs)
if seq_file and os.path.exists(seq_file):
return os.path.dirname(os.path.dirname(seq_file))
else:
gdirs = glob.glob(os.path.join(_get_data_dir(), "genomes", "*", gid))
if len(gdirs) == 1 and os.path.exists(gdirs[0]):
return gdirs[0]
def _get_data_dir():
base_dir = os.path.realpath(os.path.dirname(os.path.dirname(os.path.realpath(sys.executable))))
if "anaconda" not in os.path.basename(base_dir) and "virtualenv" not in os.path.basename(base_dir):
raise ValueError("Cannot update data for bcbio-nextgen not installed by installer.\n"
"bcbio-nextgen needs to be installed inside an anaconda environment \n"
"located in the same directory as `galaxy` `genomes` and `gemini_data` directories.")
return os.path.dirname(base_dir)
def get_gemini_dir(data=None):
try:
data_dir = _get_data_dir()
return os.path.join(data_dir, "gemini_data")
except ValueError:
if data:
galaxy_dir = dd.get_galaxy_dir(data)
data_dir = os.path.realpath(os.path.dirname(os.path.dirname(galaxy_dir)))
return os.path.join(data_dir, "gemini_data")
else:
return None
def upgrade_bcbio_data(args, remotes):
"""Upgrade required genome data files in place.
"""
from fabric.api import env
data_dir = _get_data_dir()
s = _default_deploy_args(args)
s["actions"] = ["setup_biodata"]
tooldir = args.tooldir or get_defaults().get("tooldir")
if tooldir:
s["fabricrc_overrides"]["system_install"] = tooldir
s["fabricrc_overrides"]["data_files"] = data_dir
s["fabricrc_overrides"]["galaxy_home"] = os.path.join(data_dir, "galaxy")
cbl = get_cloudbiolinux(remotes)
s["genomes"] = _get_biodata(cbl["biodata"], args)
sys.path.insert(0, cbl["dir"])
env.cores = args.cores
cbl_deploy = __import__("cloudbio.deploy", fromlist=["deploy"])
cbl_deploy.deploy(s)
_upgrade_genome_resources(s["fabricrc_overrides"]["galaxy_home"],
remotes["genome_resources"])
_upgrade_snpeff_data(s["fabricrc_overrides"]["galaxy_home"], args, remotes)
if "vep" in args.datatarget:
_upgrade_vep_data(s["fabricrc_overrides"]["galaxy_home"], tooldir)
if 'gemini' in args.datatarget and ("hg19" in args.genomes or "GRCh37" in args.genomes):
gemini = os.path.join(os.path.dirname(sys.executable), "gemini")
extras = []
if "cadd" in args.datatarget:
extras.extend(["--extra", "cadd_score"])
ann_dir = get_gemini_dir()
subprocess.check_call([gemini, "--annotation-dir", ann_dir, "update", "--dataonly"] + extras)
if "kraken" in args.datatarget:
_install_kraken_db(_get_data_dir(), args)
def _upgrade_genome_resources(galaxy_dir, base_url):
"""Retrieve latest version of genome resource YAML configuration files.
"""
for dbkey, ref_file in genome.get_builds(galaxy_dir):
# Check for a remote genome resources file
remote_url = base_url % dbkey
requests.packages.urllib3.disable_warnings()
r = requests.get(remote_url, verify=False)
if r.status_code == requests.codes.ok:
local_file = os.path.join(os.path.dirname(ref_file), os.path.basename(remote_url))
if os.path.exists(local_file):
with open(local_file) as in_handle:
local_config = yaml.load(in_handle)
remote_config = yaml.load(r.text)
needs_update = remote_config["version"] > local_config.get("version", 0)
if needs_update:
shutil.move(local_file, local_file + ".old%s" % local_config.get("version", 0))
else:
needs_update = True
if needs_update:
print("Updating %s genome resources configuration" % dbkey)
with open(local_file, "w") as out_handle:
out_handle.write(r.text)
def _upgrade_vep_data(galaxy_dir, tooldir):
for dbkey, ref_file in genome.get_builds(galaxy_dir):
effects.prep_vep_cache(dbkey, ref_file, tooldir)
def _upgrade_snpeff_data(galaxy_dir, args, remotes):
"""Install or upgrade snpEff databases, localized to reference directory.
"""
snpeff_version = effects.snpeff_version(args)
if not snpeff_version:
return
for dbkey, ref_file in genome.get_builds(galaxy_dir):
resource_file = os.path.join(os.path.dirname(ref_file), "%s-resources.yaml" % dbkey)
if os.path.exists(resource_file):
with open(resource_file) as in_handle:
resources = yaml.load(in_handle)
snpeff_db, snpeff_base_dir = effects.get_db({"genome_resources": resources,
"reference": {"fasta": {"base": ref_file}}})
if snpeff_db:
snpeff_db_dir = os.path.join(snpeff_base_dir, snpeff_db)
if os.path.exists(snpeff_db_dir) and _is_old_database(snpeff_db_dir, args):
shutil.rmtree(snpeff_db_dir)
if not os.path.exists(snpeff_db_dir):
print("Installing snpEff database %s in %s" % (snpeff_db, snpeff_base_dir))
dl_url = remotes["snpeff_dl_url"].format(
snpeff_ver=snpeff_version.replace(".", "_"),
genome=snpeff_db)
dl_file = os.path.basename(dl_url)
with utils.chdir(snpeff_base_dir):
subprocess.check_call(["wget", "--no-check-certificate", "-c", "-O", dl_file, dl_url])
subprocess.check_call(["unzip", dl_file])
os.remove(dl_file)
dl_dir = os.path.join(snpeff_base_dir, "data", snpeff_db)
shutil.move(dl_dir, snpeff_db_dir)
os.rmdir(os.path.join(snpeff_base_dir, "data"))
def _is_old_database(db_dir, args):
"""Check for old database versions, supported in snpEff 4.1.
"""
snpeff_version = effects.snpeff_version(args)
if LooseVersion(snpeff_version) >= LooseVersion("4.1"):
pred_file = os.path.join(db_dir, "snpEffectPredictor.bin")
if not utils.file_exists(pred_file):
return True
with gzip.open(pred_file) as in_handle:
version_info = in_handle.readline().strip().split("\t")
program, version = version_info[:2]
if not program.lower() == "snpeff" or LooseVersion(snpeff_version) > LooseVersion(version):
return True
return False
def _get_biodata(base_file, args):
"""Retrieve biodata genome targets customized by install parameters.
"""
with open(base_file) as in_handle:
config = yaml.load(in_handle)
config["install_liftover"] = False
config["genome_indexes"] = args.aligners
ann_groups = config.pop("annotation_groups", {})
config["genomes"] = [_setup_genome_annotations(g, args, ann_groups)
for g in config["genomes"] if g["dbkey"] in args.genomes]
return config
def _setup_genome_annotations(g, args, ann_groups):
"""Configure genome annotations to install based on datatarget.
"""
available_anns = g.get("annotations", []) + g.pop("annotations_available", [])
anns = []
for orig_target in args.datatarget:
if orig_target in ann_groups:
targets = ann_groups[orig_target]
else:
targets = [orig_target]
for target in targets:
if target in available_anns:
anns.append(target)
g["annotations"] = anns
if "variation" not in args.datatarget and "validation" in g:
del g["validation"]
return g
def upgrade_thirdparty_tools(args, remotes):
"""Install and update third party tools used in the pipeline.
Creates a manifest directory with installed programs on the system.
"""
s = {"fabricrc_overrides": {"system_install": args.tooldir,
"local_install": os.path.join(args.tooldir, "local_install"),
"distribution": args.distribution,
"conda_cmd": _get_conda_bin(),
"use_sudo": False,
"edition": "minimal"}}
s = _default_deploy_args(args)
s["actions"] = ["install_biolinux"]
s["fabricrc_overrides"]["system_install"] = args.tooldir
s["fabricrc_overrides"]["local_install"] = os.path.join(args.tooldir, "local_install")
if args.toolconf and os.path.exists(args.toolconf):
s["fabricrc_overrides"]["conda_yaml"] = args.toolconf
cbl = get_cloudbiolinux(remotes)
sys.path.insert(0, cbl["dir"])
cbl_deploy = __import__("cloudbio.deploy", fromlist=["deploy"])
cbl_deploy.deploy(s)
manifest_dir = os.path.join(_get_data_dir(), "manifest")
print("Creating manifest of installed packages in %s" % manifest_dir)
cbl_manifest = __import__("cloudbio.manifest", fromlist=["manifest"])
if os.path.exists(manifest_dir):
for fname in os.listdir(manifest_dir):
if not fname.startswith("toolplus"):
os.remove(os.path.join(manifest_dir, fname))
cbl_manifest.create(manifest_dir, args.tooldir)
def _install_toolplus(args):
"""Install additional tools we cannot distribute, updating local manifest.
"""
manifest_dir = os.path.join(_get_data_dir(), "manifest")
toolplus_manifest = os.path.join(manifest_dir, "toolplus-packages.yaml")
system_config = os.path.join(_get_data_dir(), "galaxy", "bcbio_system.yaml")
# Handle toolplus installs inside Docker container
if not os.path.exists(system_config):
docker_system_config = os.path.join(_get_data_dir(), "config", "bcbio_system.yaml")
if os.path.exists(docker_system_config):
system_config = docker_system_config
toolplus_dir = os.path.join(_get_data_dir(), "toolplus")
for tool in args.toolplus:
if tool.name in set(["gatk", "mutect"]):
print("Installing %s" % tool.name)
_install_gatk_jar(tool.name, tool.fname, toolplus_manifest, system_config, toolplus_dir)
else:
raise ValueError("Unexpected toolplus argument: %s %s" % (tool.name, tool.fname))
def get_gatk_jar_version(name, fname):
if name == "gatk":
return broad.get_gatk_version(fname)
elif name == "mutect":
return broad.get_mutect_version(fname)
else:
raise ValueError("Unexpected GATK input: %s" % name)
def _install_gatk_jar(name, fname, manifest, system_config, toolplus_dir):
"""Install a jar for GATK or associated tools like MuTect.
"""
if not fname.endswith(".jar"):
raise ValueError("--toolplus argument for %s expects a jar file: %s" % (name, fname))
version = get_gatk_jar_version(name, fname)
store_dir = utils.safe_makedir(os.path.join(toolplus_dir, name, version))
shutil.copyfile(fname, os.path.join(store_dir, os.path.basename(fname)))
_update_system_file(system_config, name, {"dir": store_dir})
_update_manifest(manifest, name, version)
def _update_manifest(manifest_file, name, version):
"""Update the toolplus manifest file with updated name and version
"""
if os.path.exists(manifest_file):
with open(manifest_file) as in_handle:
manifest = yaml.load(in_handle)
else:
manifest = {}
manifest[name] = {"name": name, "version": version}
with open(manifest_file, "w") as out_handle:
yaml.safe_dump(manifest, out_handle, default_flow_style=False, allow_unicode=False)
def _update_system_file(system_file, name, new_kvs):
"""Update the bcbio_system.yaml file with new resource information.
"""
if os.path.exists(system_file):
bak_file = system_file + ".bak%s" % datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S")
shutil.copyfile(system_file, bak_file)
with open(system_file) as in_handle:
config = yaml.load(in_handle)
else:
utils.safe_makedir(os.path.dirname(system_file))
config = {}
new_rs = {}
added = False
for rname, r_kvs in config.get("resources", {}).items():
if rname == name:
for k, v in new_kvs.items():
r_kvs[k] = v
added = True
new_rs[rname] = r_kvs
if not added:
new_rs[name] = new_kvs
config["resources"] = new_rs
with open(system_file, "w") as out_handle:
yaml.safe_dump(config, out_handle, default_flow_style=False, allow_unicode=False)
def _install_kraken_db(datadir, args):
"""Install kraken minimal DB in genome folder.
"""
kraken = os.path.join(datadir, "genomes/kraken")
url = "https://ccb.jhu.edu/software/kraken/dl/minikraken.tgz"
compress = os.path.join(kraken, os.path.basename(url))
base, ext = utils.splitext_plus(os.path.basename(url))
db = os.path.join(kraken, base)
tooldir = args.tooldir or get_defaults()["tooldir"]
requests.packages.urllib3.disable_warnings()
last_mod = urllib.request.urlopen(url).info().getheader('Last-Modified')
last_mod = dateutil.parser.parse(last_mod).astimezone(dateutil.tz.tzutc())
if os.path.exists(os.path.join(tooldir, "bin", "kraken")):
if not os.path.exists(db):
is_new_version = True
else:
cur_file = glob.glob(os.path.join(kraken, "minikraken_*"))[0]
cur_version = datetime.datetime.utcfromtimestamp(os.path.getmtime(cur_file))
is_new_version = last_mod.date() > cur_version.date()
if is_new_version:
shutil.move(cur_file, cur_file.replace('minikraken', 'old'))
if not os.path.exists(kraken):
utils.safe_makedir(kraken)
if is_new_version:
if not os.path.exists(compress):
subprocess.check_call(["wget", "-O", compress, url, "--no-check-certificate"])
cmd = ["tar", "-xzvf", compress, "-C", kraken]
subprocess.check_call(cmd)
last_version = glob.glob(os.path.join(kraken, "minikraken_*"))
utils.symlink_plus(os.path.join(kraken, last_version[0]), os.path.join(kraken, "minikraken"))
utils.remove_safe(compress)
else:
print("You have the latest version %s." % last_mod)
else:
raise argparse.ArgumentTypeError("kraken not installed in tooldir %s." %
os.path.join(tooldir, "bin", "kraken"))
# ## Store a local configuration file with upgrade details
def _get_install_config():
"""Return the YAML configuration file used to store upgrade information.
"""
try:
data_dir = _get_data_dir()
except ValueError:
return None
config_dir = utils.safe_makedir(os.path.join(data_dir, "config"))
return os.path.join(config_dir, "install-params.yaml")
def save_install_defaults(args):
"""Save installation information to make future upgrades easier.
"""
install_config = _get_install_config()
if install_config is None:
return
if utils.file_exists(install_config):
with open(install_config) as in_handle:
cur_config = yaml.load(in_handle)
else:
cur_config = {}
if args.tooldir:
cur_config["tooldir"] = args.tooldir
cur_config["isolate"] = args.isolate
for attr in ["genomes", "aligners", "datatarget"]:
if not cur_config.get(attr):
cur_config[attr] = []
for x in getattr(args, attr):
if x not in cur_config[attr]:
cur_config[attr].append(x)
# toolplus -- save non-filename inputs
attr = "toolplus"
if not cur_config.get(attr):
cur_config[attr] = []
for x in getattr(args, attr):
if not x.fname:
if x.name not in cur_config[attr]:
cur_config[attr].append(x.name)
with open(install_config, "w") as out_handle:
yaml.safe_dump(cur_config, out_handle, default_flow_style=False, allow_unicode=False)
def add_install_defaults(args):
"""Add any saved installation defaults to the upgrade.
"""
# Ensure we install data if we've specified any secondary installation targets
if len(args.genomes) > 0 or len(args.aligners) > 0 or len(args.datatarget) > 0:
args.install_data = True
install_config = _get_install_config()
if install_config is None or not utils.file_exists(install_config):
default_args = {}
else:
with open(install_config) as in_handle:
default_args = yaml.load(in_handle)
# if we are upgrading to development, also upgrade the tools
if args.upgrade in ["development"] and (args.tooldir or "tooldir" in default_args):
args.tools = True
if args.tools and args.tooldir is None:
if "tooldir" in default_args:
args.tooldir = str(default_args["tooldir"])
else:
raise ValueError("Default tool directory not yet saved in config defaults. "
"Specify the '--tooldir=/path/to/tools' to upgrade tools. "
"After a successful upgrade, the '--tools' parameter will "
"work for future upgrades.")
for attr in ["genomes", "aligners"]:
# don't upgrade default genomes if a genome was specified
if attr == "genomes" and len(args.genomes) > 0:
continue
for x in default_args.get(attr, []):
x = str(x)
new_val = getattr(args, attr)
if x not in getattr(args, attr):
new_val.append(x)
setattr(args, attr, new_val)
args = _datatarget_defaults(args, default_args)
if "isolate" in default_args and args.isolate is not True:
args.isolate = default_args["isolate"]
return args
def _datatarget_defaults(args, default_args):
"""Set data installation targets, handling defaults.
Sets variation, rnaseq, smallrna as default targets if we're not
isolated to a single method.
Provides back compatibility for toolplus specifications.
"""
default_data = default_args.get("datatarget", [])
# back-compatible toolplus specifications
for x in default_args.get("toolplus", []):
val = None
if x == "data":
val = "gemini"
elif x in ["cadd", "dbnsfp", "dbscsnv", "kraken"]:
val = x
if val and val not in default_data:
default_data.append(val)
new_val = getattr(args, "datatarget")
for x in default_data:
if x not in new_val:
new_val.append(x)
has_std_target = False
std_targets = ["variation", "rnaseq", "smallrna"]
for target in std_targets:
if target in new_val:
has_std_target = True
break
if not has_std_target:
new_val = new_val + std_targets
setattr(args, "datatarget", new_val)
return args
def get_defaults():
install_config = _get_install_config()
if install_config is None or not utils.file_exists(install_config):
return {}
with open(install_config) as in_handle:
return yaml.load(in_handle)
def _check_toolplus(x):
"""Parse options for adding non-standard/commercial tools like GATK and MuTecT.
"""
if "=" in x and len(x.split("=")) == 2:
name, fname = x.split("=")
fname = os.path.normpath(os.path.realpath(fname))
if not os.path.exists(fname):
raise argparse.ArgumentTypeError("Unexpected --toolplus argument for %s. File does not exist: %s"
% (name, fname))
return Tool(name, fname)
else:
raise argparse.ArgumentTypeError("Unexpected --toolplus argument. Expect toolname=filename.")
def add_subparser(subparsers):
parser = subparsers.add_parser("upgrade", help="Install or upgrade bcbio-nextgen")
parser.add_argument("--cores", default=1,
help="Number of cores to use if local indexing is necessary.")
parser.add_argument("--tooldir",
help="Directory to install 3rd party software tools. Leave unspecified for no tools",
type=lambda x: (os.path.abspath(os.path.expanduser(x))), default=None)
parser.add_argument("--tools",
help="Boolean argument specifying upgrade of tools. Uses previously saved install directory",
action="store_true", default=False)
parser.add_argument("-u", "--upgrade", help="Code version to upgrade",
choices=["stable", "development", "system", "deps", "skip"], default="skip")
parser.add_argument("--toolconf", help="YAML configuration file of tools to install", default=None,
type=lambda x: (os.path.abspath(os.path.expanduser(x))))
parser.add_argument("--revision", help="Specify a git commit hash or tag to install", default="master")
parser.add_argument("--toolplus", help="Specify additional tool categories to install",
action="append", default=[], type=_check_toolplus)
parser.add_argument("--datatarget", help="Data to install. Allows customization or install of extra data.",
action="append", default=[],
choices=["variation", "rnaseq", "smallrna", "gemini", "cadd", "vep", "dbnsfp", "dbscsnv", "battenberg", "kraken", "ericscript"])
parser.add_argument("--genomes", help="Genomes to download",
action="append", default=[], choices=SUPPORTED_GENOMES)
parser.add_argument("--aligners", help="Aligner indexes to download",
action="append", default=[],
choices=SUPPORTED_INDEXES)
parser.add_argument("--data", help="Upgrade data dependencies",
dest="install_data", action="store_true", default=False)
parser.add_argument("--isolate", help="Created an isolated installation without PATH updates",
dest="isolate", action="store_true", default=False)
parser.add_argument("--distribution", help="Operating system distribution",
default="",
choices=["ubuntu", "debian", "centos", "scientificlinux", "macosx"])
return parser
def get_cloudbiolinux(remotes):
base_dir = os.path.join(os.getcwd(), "cloudbiolinux")
if not os.path.exists(base_dir):
subprocess.check_call("wget --progress=dot:mega --no-check-certificate -O- %s | tar xz && "
"(mv cloudbiolinux-master cloudbiolinux || mv master cloudbiolinux)"
% remotes["cloudbiolinux"], shell=True)
return {"biodata": os.path.join(base_dir, "config", "biodata.yaml"),
"dir": base_dir}
@contextlib.contextmanager
def bcbio_tmpdir():
orig_dir = os.getcwd()
work_dir = os.path.join(os.getcwd(), "tmpbcbio-install")
if not os.path.exists(work_dir):
os.makedirs(work_dir)
os.chdir(work_dir)
yield work_dir
os.chdir(orig_dir)
shutil.rmtree(work_dir)
|
biocyberman/bcbio-nextgen
|
bcbio/install.py
|
Python
|
mit
| 33,701
|
[
"BWA",
"Bioconda",
"Bowtie",
"Galaxy"
] |
9156e1126951d5dab9aa9e1ad114aeaf95ab054ad6cb9b6e442781535ec982b1
|
#!/usr/bin/python3
"""Define a PGPWords object inherited from bytearray.
Adding initialization via hex-, or pgp-word-string,
adding .hex() method and
overriding __str__
Mainline code:
Convert pgp words to hex strings and vice versa.
Example:
$ pypgpwords.py DEAD 1337
tactics perceptive Aztec consensus
or
$ pypgpwords.py absurd bodyguard baboon unicorn
0116 14EC
moki@posteo.de
"""
from __future__ import print_function
import sys
SEPARATOR = " "
EVEN = ("aardvark",
"absurd",
"accrue",
"acme",
"adrift",
"adult",
"afflict",
"ahead",
"aimless",
"Algol",
"allow",
"alone",
"ammo",
"ancient",
"apple",
"artist",
"assume",
"Athens",
"atlas",
"Aztec",
"baboon",
"backfield",
"backward",
"banjo",
"beaming",
"bedlamp",
"beehive",
"beeswax",
"befriend",
"Belfast",
"berserk",
"billiard",
"bison",
"blackjack",
"blockade",
"blowtorch",
"bluebird",
"bombast",
"bookshelf",
"brackish",
"breadline",
"breakup",
"brickyard",
"briefcase",
"Burbank",
"button",
"buzzard",
"cement",
"chairlift",
"chatter",
"checkup",
"chisel",
"choking",
"chopper",
"Christmas",
"clamshell",
"classic",
"classroom",
"cleanup",
"clockwork",
"cobra",
"commence",
"concert",
"cowbell",
"crackdown",
"cranky",
"crowfoot",
"crucial",
"crumpled",
"crusade",
"cubic",
"dashboard",
"deadbolt",
"deckhand",
"dogsled",
"dragnet",
"drainage",
"dreadful",
"drifter",
"dropper",
"drumbeat",
"drunken",
"Dupont",
"dwelling",
"eating",
"edict",
"egghead",
"eightball",
"endorse",
"endow",
"enlist",
"erase",
"escape",
"exceed",
"eyeglass",
"eyetooth",
"facial",
"fallout",
"flagpole",
"flatfoot",
"flytrap",
"fracture",
"framework",
"freedom",
"frighten",
"gazelle",
"Geiger",
"glitter",
"glucose",
"goggles",
"goldfish",
"gremlin",
"guidance",
"hamlet",
"highchair",
"hockey",
"indoors",
"indulge",
"inverse",
"involve",
"island",
"jawbone",
"keyboard",
"kickoff",
"kiwi",
"klaxon",
"locale",
"lockup",
"merit",
"minnow",
"miser",
"Mohawk",
"mural",
"music",
"necklace",
"Neptune",
"newborn",
"nightbird",
"Oakland",
"obtuse",
"offload",
"optic",
"orca",
"payday",
"peachy",
"pheasant",
"physique",
"playhouse",
"Pluto",
"preclude",
"prefer",
"preshrunk",
"printer",
"prowler",
"pupil",
"puppy",
"python",
"quadrant",
"quiver",
"quota",
"ragtime",
"ratchet",
"rebirth",
"reform",
"regain",
"reindeer",
"rematch",
"repay",
"retouch",
"revenge",
"reward",
"rhythm",
"ribcage",
"ringbolt",
"robust",
"rocker",
"ruffled",
"sailboat",
"sawdust",
"scallion",
"scenic",
"scorecard",
"Scotland",
"seabird",
"select",
"sentence",
"shadow",
"shamrock",
"showgirl",
"skullcap",
"skydive",
"slingshot",
"slowdown",
"snapline",
"snapshot",
"snowcap",
"snowslide",
"solo",
"southward",
"soybean",
"spaniel",
"spearhead",
"spellbind",
"spheroid",
"spigot",
"spindle",
"spyglass",
"stagehand",
"stagnate",
"stairway",
"standard",
"stapler",
"steamship",
"sterling",
"stockman",
"stopwatch",
"stormy",
"sugar",
"surmount",
"suspense",
"sweatband",
"swelter",
"tactics",
"talon",
"tapeworm",
"tempest",
"tiger",
"tissue",
"tonic",
"topmost",
"tracker",
"transit",
"trauma",
"treadmill",
"Trojan",
"trouble",
"tumor",
"tunnel",
"tycoon",
"uncut",
"unearth",
"unwind",
"uproot",
"upset",
"upshot",
"vapor",
"village",
"virus",
"Vulcan",
"waffle",
"wallet",
"watchword",
"wayside",
"willow",
"woodlark",
"Zulu")
ODD = ("adroitness",
"adviser",
"aftermath",
"aggregate",
"alkali",
"almighty",
"amulet",
"amusement",
"antenna",
"applicant",
"Apollo",
"armistice",
"article",
"asteroid",
"Atlantic",
"atmosphere",
"autopsy",
"Babylon",
"backwater",
"barbecue",
"belowground",
"bifocals",
"bodyguard",
"bookseller",
"borderline",
"bottomless",
"Bradbury",
"bravado",
"Brazilian",
"breakaway",
"Burlington",
"businessman",
"butterfat",
"Camelot",
"candidate",
"cannonball",
"Capricorn",
"caravan",
"caretaker",
"celebrate",
"cellulose",
"certify",
"chambermaid",
"Cherokee",
"Chicago",
"clergyman",
"coherence",
"combustion",
"commando",
"company",
"component",
"concurrent",
"confidence",
"conformist",
"congregate",
"consensus",
"consulting",
"corporate",
"corrosion",
"councilman",
"crossover",
"crucifix",
"cumbersome",
"customer",
"Dakota",
"decadence",
"December",
"decimal",
"designing",
"detector",
"detergent",
"determine",
"dictator",
"dinosaur",
"direction",
"disable",
"disbelief",
"disruptive",
"distortion",
"document",
"embezzle",
"enchanting",
"enrollment",
"enterprise",
"equation",
"equipment",
"escapade",
"Eskimo",
"everyday",
"examine",
"existence",
"exodus",
"fascinate",
"filament",
"finicky",
"forever",
"fortitude",
"frequency",
"gadgetry",
"Galveston",
"getaway",
"glossary",
"gossamer",
"graduate",
"gravity",
"guitarist",
"hamburger",
"Hamilton",
"handiwork",
"hazardous",
"headwaters",
"hemisphere",
"hesitate",
"hideaway",
"holiness",
"hurricane",
"hydraulic",
"impartial",
"impetus",
"inception",
"indigo",
"inertia",
"infancy",
"inferno",
"informant",
"insincere",
"insurgent",
"integrate",
"intention",
"inventive",
"Istanbul",
"Jamaica",
"Jupiter",
"leprosy",
"letterhead",
"liberty",
"maritime",
"matchmaker",
"maverick",
"Medusa",
"megaton",
"microscope",
"microwave",
"midsummer",
"millionaire",
"miracle",
"misnomer",
"molasses",
"molecule",
"Montana",
"monument",
"mosquito",
"narrative",
"nebula",
"newsletter",
"Norwegian",
"October",
"Ohio",
"onlooker",
"opulent",
"Orlando",
"outfielder",
"Pacific",
"pandemic",
"Pandora",
"paperweight",
"paragon",
"paragraph",
"paramount",
"passenger",
"pedigree",
"Pegasus",
"penetrate",
"perceptive",
"performance",
"pharmacy",
"phonetic",
"photograph",
"pioneer",
"pocketful",
"politeness",
"positive",
"potato",
"processor",
"provincial",
"proximate",
"puberty",
"publisher",
"pyramid",
"quantity",
"racketeer",
"rebellion",
"recipe",
"recover",
"repellent",
"replica",
"reproduce",
"resistor",
"responsive",
"retraction",
"retrieval",
"retrospect",
"revenue",
"revival",
"revolver",
"sandalwood",
"sardonic",
"Saturday",
"savagery",
"scavenger",
"sensation",
"sociable",
"souvenir",
"specialist",
"speculate",
"stethoscope",
"stupendous",
"supportive",
"surrender",
"suspicious",
"sympathy",
"tambourine",
"telephone",
"therapist",
"tobacco",
"tolerance",
"tomorrow",
"torpedo",
"tradition",
"travesty",
"trombonist",
"truncated",
"typewriter",
"ultimate",
"undaunted",
"underfoot",
"unicorn",
"unify",
"universe",
"unravel",
"upcoming",
"vacancy",
"vagabond",
"vertigo",
"Virginia",
"visitor",
"vocalist",
"voyager",
"warranty",
"Waterloo",
"whimsical",
"Wichita",
"Wilmington",
"Wyoming",
"yesteryear",
"Yucatan")
class InvalidWordError(ValueError):
pass
def words_to_int(word_iter, odd=False):
"""Generator yielding integer indices for each word in word_iter.
:param word_iter: iterable of pgp words
:type word_iter: iterable
:param odd: start with odd word list
:type odd: boolean
:return: integer
:rtype: generator
"""
for word in word_iter:
try:
yield (ODD if odd else EVEN).index(word)
except ValueError:
msg = "not in {} word list: '{}'"
raise InvalidWordError(msg.format("odd" if odd else "even", word))
# toggle odd/even
odd = not odd
def ints_to_word(int_iter, odd=False):
"""Generator yielding PGP words for each byte/int in int_iter.
:param int_iter: iterable of integers between 0 and 255
:type int_iter: iterable
:param odd: start with odd word list
:type odd: boolean
:return: pgp words
:rtype: generator
"""
for idx in int_iter:
yield (ODD if odd else EVEN)[idx]
# toggle odd/even
odd = not odd
class PGPWords(bytearray):
"""Inherits from bytearray. Add .hex() method and overwrite __str__"""
def __init__(self, source, **kwargs):
"""Initiate bytearray. Added initialization styles:
E.g.:
p = PGPWords("absurd bodyguard baboon", encoding="pgp-words")
p = PGPWords("DEAD 1337", encoding="hex")
"""
enc = kwargs.get("encoding")
if enc == "pgp-words":
kwargs.pop("encoding")
source = words_to_int(source.split(SEPARATOR), **kwargs)
kwargs = {}
elif enc == "hex" or source.startswith('0x'):
kwargs.pop("encoding")
tmp = source.replace("0x", '').replace(' ', '')
source = (int(tmp[i:i+2], 16) for i in range(0, len(tmp), 2))
super(PGPWords, self).__init__(source, **kwargs)
def __str__(self):
"""Return corresponding pgp words, separated by SEPARATOR."""
gen = ints_to_word(self)
return SEPARATOR.join(gen)
def hex(self):
"""Return corresponding hex representation as string"""
tmp = ''.join([hex(i).split('x')[1].zfill(2) for i in self])
gen = (tmp[i:i+4].upper() for i in range(0, len(tmp), 4))
return SEPARATOR.join(gen)
def main():
"""Try to convert arguments in either direction."""
if len(sys.argv) < 2 or sys.argv[1].startswith('-'):
print(__doc__.split("Mainline code:\n\n")[1], file=sys.stderr)
exit(-1)
arg_str = ' '.join(sys.argv[1:])
try:
result = PGPWords(arg_str, encoding="hex")
print(result)
except ValueError as err1:
try:
result = PGPWords(arg_str, encoding="pgp-words").hex()
print(result)
except InvalidWordError as err2:
print(err1, file=sys.stderr)
print(err2, file=sys.stderr)
exit(-1)
if __name__ == "__main__":
main()
|
mo-ki/pypgpwords
|
pypgpwords.py
|
Python
|
mit
| 13,257
|
[
"ORCA"
] |
1f27e411ea38c3c158adc5d7eb245aa13df0722008c0feac478aaa4e5cbebf5e
|
import numpy as np
import scipy.stats as ss
import scipy.special as sp
from .family import Family
from .flat import Flat
from .normal import Normal
from .gas_recursions import gas_recursion_exponential_orderone, gas_recursion_exponential_ordertwo
from .gas_recursions import gasx_recursion_exponential_orderone, gasx_recursion_exponential_ordertwo
from .gas_recursions import gas_llev_recursion_exponential_orderone, gas_llev_recursion_exponential_ordertwo
from .gas_recursions import gas_llt_recursion_exponential_orderone, gas_llt_recursion_exponential_ordertwo
from .gas_recursions import gas_reg_recursion_exponential_orderone, gas_reg_recursion_exponential_ordertwo
class Exponential(Family):
"""
Exponential Distribution
----
This class contains methods relating to the Exponential distribution for time series.
"""
def __init__(self, lmd=1.0, transform=None, **kwargs):
"""
Parameters
----------
lambda : float
Rate parameter for the Exponential distribution
transform : str
Whether to apply a transformation to the location variable - e.g. 'exp' or 'logit'
"""
super(Exponential, self).__init__(transform)
self.lmd0 = lmd
self.covariance_prior = False
self.gradient_only = kwargs.get('gradient_only', False) # used for GAS Exponential models
if self.gradient_only is True:
self.score_function = self.first_order_score
else:
self.score_function = self.second_order_score
def approximating_model(self, beta, T, Z, R, Q, h_approx, data):
""" Creates approximating Gaussian state space model for Exponential measurement density
Parameters
----------
beta : np.array
Contains untransformed starting values for latent variables
T, Z, R, Q : np.array
State space matrices used in KFS algorithm
h_approx : float
The variance of the measurement density
data: np.array
The univariate time series data
Returns
----------
H : np.array
Approximating measurement variance matrix
mu : np.array
Approximating measurement constants
"""
H = np.ones(data.shape[0])*h_approx
mu = np.zeros(data.shape[0])
return H, mu
def approximating_model_reg(self, beta, T, Z, R, Q, h_approx, data, X, state_no):
""" Creates approximating Gaussian state space model for Exponential measurement density
Parameters
----------
beta : np.array
Contains untransformed starting values for latent variables
T, Z, R, Q : np.array
State space matrices used in KFS algorithm
h_approx : float
The variance of the measurement density
data: np.array
The univariate time series data
X: np.array
The regressors
state_no : int
Number of states
Returns
----------
H : np.array
Approximating measurement variance matrix
mu : np.array
Approximating measurement constants
"""
H = np.ones(data.shape[0])*h_approx
mu = np.zeros(data.shape[0])
return H, mu
@staticmethod
def build_latent_variables():
""" Builds additional latent variables for this family
Returns
----------
- A list of lists (each sub-list contains latent variable information)
"""
lvs_to_build = []
return lvs_to_build
@staticmethod
def draw_variable(loc, scale, shape, skewness, nsims):
""" Draws random variables from Exponential distribution
Parameters
----------
loc : float
location parameter for the distribution
scale : float
scale parameter for the distribution
shape : float
tail thickness parameter for the distribution
skewness : float
skewness parameter for the distribution
nsims : int or list
number of draws to take from the distribution
Returns
----------
- Random draws from the distribution
"""
return np.random.exponential(1.0/loc, nsims)
@staticmethod
def first_order_score(y, mean, scale, shape, skewness):
""" GAS Exponential Update term using gradient only - native Python function
Parameters
----------
y : float
datapoint for the time series
mean : float
location parameter for the Exponential distribution
scale : float
scale parameter for the Exponential distribution
shape : float
tail thickness parameter for the Exponential distribution
skewness : float
skewness parameter for the Exponential distribution
Returns
----------
- Score of the Exponential family
"""
return 1 - (mean*y)
def logpdf(self, mu):
"""
Log PDF for Exponential prior
Parameters
----------
mu : float
Latent variable for which the prior is being formed over
Returns
----------
- log(p(mu))
"""
if self.transform is not None:
mu = self.transform(mu)
return ss.expon.logpdf(mu, self.lmd0)
@staticmethod
def markov_blanket(y, mean, scale, shape, skewness):
""" Markov blanket for the Exponential distribution
Parameters
----------
y : np.ndarray
univariate time series
mean : np.ndarray
array of location parameters for the Exponential distribution
scale : float
scale parameter for the Exponential distribution
shape : float
tail thickness parameter for the Exponential distribution
skewness : float
skewness parameter for the Exponential distribution
Returns
----------
- Markov blanket of the Exponential family
"""
return ss.expon.logpdf(x=y, scale=1/mean)
@staticmethod
def exponential_link(x):
return 1.0/np.exp(x)
@staticmethod
def setup():
""" Returns the attributes of this family
Notes
----------
- scale notes whether family has a variance parameter (sigma)
- shape notes whether family has a tail thickness parameter (nu)
- skewness notes whether family has a skewness parameter (gamma)
- mean_transform is a function which transforms the location parameter
- cythonized notes whether the family has cythonized routines
Returns
----------
- model name, link function, scale, shape, skewness, mean_transform, cythonized
"""
name = "Exponential GAS"
link = Exponential.exponential_link
scale = False
shape = False
skewness = False
mean_transform = np.log
cythonized = True
return name, link, scale, shape, skewness, mean_transform, cythonized
@staticmethod
def neg_loglikelihood(y, mean, scale, shape, skewness):
""" Negative loglikelihood function
Parameters
----------
y : np.ndarray
univariate time series
mean : np.ndarray
array of location parameters for the Exponential distribution
scale : float
scale parameter for the Exponential distribution
shape : float
tail thickness parameter for the Exponential distribution
skewness : float
skewness parameter for the Exponential distribution
Returns
----------
- Negative loglikelihood of the Exponential family
"""
return -np.sum(ss.expon.logpdf(x=y, scale=1/mean))
def pdf(self, mu):
"""
PDF for Exponential prior
Parameters
----------
mu : float
Latent variable for which the prior is being formed over
Returns
----------
- p(mu)
"""
if self.transform is not None:
mu = self.transform(mu)
return ss.expon.pdf(mu, self.lmd0)
@staticmethod
def reg_score_function(X, y, mean, scale, shape, skewness):
""" GAS Exponential Regression Update term using gradient only - native Python function
Parameters
----------
X : float
datapoint for the right hand side variable
y : float
datapoint for the time series
mean : float
location parameter for the Exponential distribution
scale : float
scale parameter for the Exponential distribution
shape : float
tail thickness parameter for the Exponential distribution
skewness : float
skewness parameter for the Exponential distribution
Returns
----------
- Score of the Exponential family
"""
return X*(1.0 - mean*y)
@staticmethod
def second_order_score(y, mean, scale, shape, skewness):
""" GAS Exponential Update term potentially using second-order information - native Python function
Parameters
----------
y : float
datapoint for the time series
mean : float
location parameter for the Exponential distribution
scale : float
scale parameter for the Exponential distribution
shape : float
tail thickness parameter for the Exponential distribution
skewness : float
skewness parameter for the Exponential distribution
Returns
----------
- Adjusted score of the Exponential family
"""
return 1 - (mean*y)
# Optional Cythonized recursions below for GAS Exponential models
@staticmethod
def gradient_recursion():
""" GAS Exponential Model Recursion - gradient only
Returns
----------
- Recursion function for GAS Exponential model - gradient only
"""
return gas_recursion_exponential_orderone
@staticmethod
def newton_recursion():
""" GAS Exponential Model Recursion - adjusted score
Returns
----------
- Recursion function for GAS Exponential model - adjusted score
"""
return gas_recursion_exponential_ordertwo
@staticmethod
def gradientx_recursion():
""" GASX Exponential Model Recursion - gradient only
Returns
----------
- Recursion function for GASX Exponential model - gradient only
"""
return gasx_recursion_exponential_orderone
@staticmethod
def newtonx_recursion():
""" GASX Exponential Model Recursion - adjusted score
Returns
----------
- Recursion function for GASX Exponential model - adjusted score
"""
return gasx_recursion_exponential_ordertwo
@staticmethod
def gradientllev_recursion():
""" GAS Local Level Exponential Model Recursion - gradient only
Returns
----------
- Recursion function for GAS Local Level Exponential model - gradient only
"""
return gas_llev_recursion_exponential_orderone
@staticmethod
def newtonllev_recursion():
""" GAS Local Level Exponential Model Recursion - adjusted score
Returns
----------
- Recursion function for GAS Local Level Exponential model - adjusted score
"""
return gas_llev_recursion_exponential_ordertwo
@staticmethod
def gradientllt_recursion():
""" GAS Local Linear Trend Exponential Model Recursion - gradient only
Returns
----------
- Recursion function for GAS Local Linear Trend Exponential model - gradient only
"""
return gas_llt_recursion_exponential_orderone
@staticmethod
def newtonllt_recursion():
""" GAS Local Linear Trend Exponential Model Recursion - adjusted score
Returns
----------
- Recursion function for GAS Local Linear Trend Exponential model - adjusted score
"""
return gas_llt_recursion_exponential_ordertwo
@staticmethod
def gradientreg_recursion():
""" GAS Dynamic Regression Exponential Model Recursion - gradient only
Returns
----------
- Recursion function for GAS Dynamic Regression Exponential model - gradient only
"""
return gas_reg_recursion_exponential_orderone
@staticmethod
def newtonreg_recursion():
""" GAS Dynamic Regression Exponential Model Recursion - adjusted score
Returns
----------
- Recursion function for GAS Dynamic Regression Exponential model - adjusted score
"""
return gas_reg_recursion_exponential_ordertwo
|
RJT1990/pyflux
|
pyflux/families/exponential.py
|
Python
|
bsd-3-clause
| 13,173
|
[
"Gaussian"
] |
050719857d1dcd1f0027e807be35aa53f64a12322d35c30a22d651f5d63d101c
|
"""Functions for fetching checklists and information about visits."""
from ebird.api.utils import call
from ebird.api.validation import (
clean_area,
clean_code,
clean_date,
clean_max_checklists,
)
CHECKLISTS_DATE_URL = "https://ebird.org/ws2.0/product/lists/%s/%s"
CHECKLISTS_RECENT_URL = "https://ebird.org/ws2.0/product/lists/%s"
CHECKLIST_URL = "https://ebird.org/ws2.0/product/checklist/view/%s"
def get_visits(token, area, date=None, max_results=10):
"""
Get the list of checklists for an area. The most recent checklists are
returned if a specific date is not given.
The maps to the two end points in the eBird API 2.0,
https://documenter.getpostman.com/view/664302/S1ENwy59?version=latest#4416a7cc-623b-4340-ab01-80c599ede73e
https://documenter.getpostman.com/view/664302/S1ENwy59?version=latest#95a206d1-a20d-44e0-8c27-acb09ccbea1a
which return results in the same format.
The eBird API call also has a sortKey parameter which returns records
ordered by observation date or by creation date. Since checklists are
often submitted a few days after the actual visit this parameter is
not currently supported. The results are returned ordered by observation
date.
:param token: the token needed to access the API.
:param area: the code for a country, subnational1 region, subnational2
region or location.
:param date: the date, since Jan 1st 1800.
:param max_results: the maximum number of checklists to return from
1 to 200. The default value is 10.
:return: the info for all the checklists submitted.
:raises ValueError: if any of the arguments fail the validation checks.
:raises URLError if there is an error with the connection to the
eBird site.
:raises HTTPError if the eBird API returns an error.
"""
if date is not None:
url = CHECKLISTS_DATE_URL % (clean_area(area), clean_date(date))
else:
url = CHECKLISTS_RECENT_URL % clean_area(area)
params = {
"maxVisits": clean_max_checklists(max_results),
"sortKey": "obs_dt",
}
headers = {
"X-eBirdApiToken": token,
}
return call(url, params, headers)
def get_checklist(token, sub_id):
"""
Get the contents of a checklist.
The information returned include the checklist attributes, date, etc. and the
list of observations. Only the code for the location and subnational1 are
included you will need to call get_hotspot_info() to get the full details
of the location.
The maps to the end point in the eBird API 2.0,
https://documenter.getpostman.com/view/664302/S1ENwy59?version=latest#4416a7cc-623b-4340-ab01-80c599ede73e
:param token: the token needed to access the API.
:param sub_id: the unique identifier for the checklist, e.g. S22893621.
:return: the details of the checklist, including the list of observations
:raises ValueError: if any of the arguments fail the validation checks.
:raises URLError if there is an error with the connection to the
eBird site.
:raises HTTPError if the eBird API returns an error.
"""
url = CHECKLIST_URL % clean_code(sub_id)
headers = {
"X-eBirdApiToken": token,
}
return call(url, {}, headers)
|
ProjectBabbler/ebird-api
|
src/ebird/api/checklists.py
|
Python
|
mit
| 3,290
|
[
"VisIt"
] |
bbcdbd323923f068058d792ced028c6bac0f2b571a90f339f7c261d3656bf772
|
# -*- coding: utf-8 -*-
import sys
import numpy as np
from ase.optimize.optimize import Optimizer
from ase.utils.linesearch import LineSearch
class LBFGS(Optimizer):
"""Limited memory BFGS optimizer.
A limited memory version of the bfgs algorithm. Unlike the bfgs algorithm
used in bfgs.py, the inverse of Hessian matrix is updated. The inverse
Hessian is represented only as a diagonal matrix to save memory
"""
def __init__(self, atoms, restart=None, logfile='-', trajectory=None,
maxstep=None, memory=100, damping=1.0, alpha=70.0,
use_line_search=False):
"""
Parameters:
restart: string
Pickle file used to store vectors for updating the inverse of
Hessian matrix. If set, file with such a name will be searched
and information stored will be used, if the file exists.
logfile: string
Where should output go. None for no output, '-' for stdout.
trajectory: string
Pickle file used to store trajectory of atomic movement.
maxstep: float
How far is a single atom allowed to move. This is useful for DFT
calculations where wavefunctions can be reused if steps are small.
Default is 0.04 Angstrom.
memory: int
Number of steps to be stored. Default value is 100. Three numpy
arrays of this length containing floats are stored.
damping: float
The calculated step is multiplied with this number before added to
the positions.
alpha: float
Initial guess for the Hessian (curvature of energy surface). A
conservative value of 70.0 is the default, but number of needed
steps to converge might be less if a lower value is used. However,
a lower value also means risk of instability.
"""
Optimizer.__init__(self, atoms, restart, logfile, trajectory)
if maxstep is not None:
if maxstep > 1.0:
raise ValueError('You are using a much too large value for ' +
'the maximum step size: %.1f Angstrom' %
maxstep)
self.maxstep = maxstep
else:
self.maxstep = 0.04
self.memory = memory
self.H0 = 1. / alpha # Initial approximation of inverse Hessian
# 1./70. is to emulate the behaviour of BFGS
# Note that this is never changed!
self.damping = damping
self.use_line_search = use_line_search
self.p = None
self.function_calls = 0
self.force_calls = 0
def initialize(self):
"""Initalize everything so no checks have to be done in step"""
self.iteration = 0
self.s = []
self.y = []
self.rho = [] # Store also rho, to avoid calculationg the dot product
# again and again
self.r0 = None
self.f0 = None
self.e0 = None
self.task = 'START'
self.load_restart = False
def read(self):
"""Load saved arrays to reconstruct the Hessian"""
self.iteration, self.s, self.y, self.rho, \
self.r0, self.f0, self.e0, self.task = self.load()
self.load_restart = True
def step(self, f):
"""Take a single step
Use the given forces, update the history and calculate the next step --
then take it"""
r = self.atoms.get_positions()
self.update(r, f, self.r0, self.f0)
s = self.s
y = self.y
rho = self.rho
H0 = self.H0
loopmax = np.min([self.memory, self.iteration])
a = np.empty((loopmax,), dtype=np.float64)
### The algorithm itself:
q = -f.reshape(-1)
for i in range(loopmax - 1, -1, -1):
a[i] = rho[i] * np.dot(s[i], q)
q -= a[i] * y[i]
z = H0 * q
for i in range(loopmax):
b = rho[i] * np.dot(y[i], z)
z += s[i] * (a[i] - b)
self.p = - z.reshape((-1, 3))
###
g = -f
if self.use_line_search == True:
e = self.func(r)
self.line_search(r, g, e)
dr = (self.alpha_k * self.p).reshape(len(self.atoms), -1)
else:
self.force_calls += 1
self.function_calls += 1
dr = self.determine_step(self.p) * self.damping
self.atoms.set_positions(r + dr)
self.iteration += 1
self.r0 = r
self.f0 = -g
self.dump((self.iteration, self.s, self.y,
self.rho, self.r0, self.f0, self.e0, self.task))
def determine_step(self, dr):
"""Determine step to take according to maxstep
Normalize all steps as the largest step. This way
we still move along the eigendirection.
"""
steplengths = (dr**2).sum(1)**0.5
longest_step = np.max(steplengths)
if longest_step >= self.maxstep:
dr *= self.maxstep / longest_step
return dr
def update(self, r, f, r0, f0):
"""Update everything that is kept in memory
This function is mostly here to allow for replay_trajectory.
"""
if self.iteration > 0:
s0 = r.reshape(-1) - r0.reshape(-1)
self.s.append(s0)
# We use the gradient which is minus the force!
y0 = f0.reshape(-1) - f.reshape(-1)
self.y.append(y0)
rho0 = 1.0 / np.dot(y0, s0)
self.rho.append(rho0)
if self.iteration > self.memory:
self.s.pop(0)
self.y.pop(0)
self.rho.pop(0)
def replay_trajectory(self, traj):
"""Initialize history from old trajectory."""
if isinstance(traj, str):
from ase.io.trajectory import PickleTrajectory
traj = PickleTrajectory(traj, 'r')
r0 = None
f0 = None
# The last element is not added, as we get that for free when taking
# the first qn-step after the replay
for i in range(0, len(traj) - 1):
r = traj[i].get_positions()
f = traj[i].get_forces()
self.update(r, f, r0, f0)
r0 = r.copy()
f0 = f.copy()
self.iteration += 1
self.r0 = r0
self.f0 = f0
def func(self, x):
"""Objective function for use of the optimizers"""
self.atoms.set_positions(x.reshape(-1, 3))
self.function_calls += 1
return self.atoms.get_potential_energy()
def fprime(self, x):
"""Gradient of the objective function for use of the optimizers"""
self.atoms.set_positions(x.reshape(-1, 3))
self.force_calls += 1
# Remember that forces are minus the gradient!
return - self.atoms.get_forces().reshape(-1)
def line_search(self, r, g, e):
self.p = self.p.ravel()
p_size = np.sqrt((self.p **2).sum())
if p_size <= np.sqrt(len(self.atoms) * 1e-10):
self.p /= (p_size / np.sqrt(len(self.atoms) * 1e-10))
g = g.ravel()
r = r.ravel()
ls = LineSearch()
self.alpha_k, e, self.e0, self.no_update = \
ls._line_search(self.func, self.fprime, r, self.p, g, e, self.e0,
maxstep=self.maxstep, c1=.23,
c2=.46, stpmax=50.)
if self.alpha_k is None:
raise RuntimeError('LineSearch failed!')
class LBFGSLineSearch(LBFGS):
"""This optimizer uses the LBFGS algorithm, but does a line search that
fulfills the Wolff conditions.
"""
def __init__(self, *args, **kwargs):
kwargs['use_line_search'] = True
LBFGS.__init__(self, *args, **kwargs)
# """Modified version of LBFGS.
#
# This optimizer uses the LBFGS algorithm, but does a line search for the
# minimum along the search direction. This is done by issuing an additional
# force call for each step, thus doubling the number of calculations.
#
# Additionally the Hessian is reset if the new guess is not sufficiently
# better than the old one.
# """
# def __init__(self, *args, **kwargs):
# self.dR = kwargs.pop('dR', 0.1)
# LBFGS.__init__(self, *args, **kwargs)
#
# def update(self, r, f, r0, f0):
# """Update everything that is kept in memory
#
# This function is mostly here to allow for replay_trajectory.
# """
# if self.iteration > 0:
# a1 = abs(np.dot(f.reshape(-1), f0.reshape(-1)))
# a2 = np.dot(f0.reshape(-1), f0.reshape(-1))
# if not (a1 <= 0.5 * a2 and a2 != 0):
# # Reset optimization
# self.initialize()
#
# # Note that the reset above will set self.iteration to 0 again
# # which is why we should check again
# if self.iteration > 0:
# s0 = r.reshape(-1) - r0.reshape(-1)
# self.s.append(s0)
#
# # We use the gradient which is minus the force!
# y0 = f0.reshape(-1) - f.reshape(-1)
# self.y.append(y0)
#
# rho0 = 1.0 / np.dot(y0, s0)
# self.rho.append(rho0)
#
# if self.iteration > self.memory:
# self.s.pop(0)
# self.y.pop(0)
# self.rho.pop(0)
#
# def determine_step(self, dr):
# f = self.atoms.get_forces()
#
# # Unit-vector along the search direction
# du = dr / np.sqrt(np.dot(dr.reshape(-1), dr.reshape(-1)))
#
# # We keep the old step determination before we figure
# # out what is the best to do.
# maxstep = self.maxstep * np.sqrt(3 * len(self.atoms))
#
# # Finite difference step using temporary point
# self.atoms.positions += (du * self.dR)
# # Decide how much to move along the line du
# Fp1 = np.dot(f.reshape(-1), du.reshape(-1))
# Fp2 = np.dot(self.atoms.get_forces().reshape(-1), du.reshape(-1))
# CR = (Fp1 - Fp2) / self.dR
# #RdR = Fp1*0.1
# if CR < 0.0:
# #print "negcurve"
# RdR = maxstep
# #if(abs(RdR) > maxstep):
# # RdR = self.sign(RdR) * maxstep
# else:
# Fp = (Fp1 + Fp2) * 0.5
# RdR = Fp / CR
# if abs(RdR) > maxstep:
# RdR = np.sign(RdR) * maxstep
# else:
# RdR += self.dR * 0.5
# return du * RdR
class HessLBFGS(LBFGS):
"""Backwards compatibiliyt class"""
def __init__(self, *args, **kwargs):
if 'method' in kwargs:
del kwargs['method']
sys.stderr.write('Please use LBFGS instead of HessLBFGS!')
LBFGS.__init__(self, *args, **kwargs)
class LineLBFGS(LBFGSLineSearch):
"""Backwards compatibiliyt class"""
def __init__(self, *args, **kwargs):
if 'method' in kwargs:
del kwargs['method']
sys.stderr.write('Please use LBFGSLineSearch instead of LineLBFGS!')
LBFGSLineSearch.__init__(self, *args, **kwargs)
|
grhawk/ASE
|
tools/ase/optimize/lbfgs.py
|
Python
|
gpl-2.0
| 11,211
|
[
"ASE"
] |
d2e3a9ee3d396cd983656614e8b88586f42ec68b20e29ff09035e59aff0e9483
|
import csv
import random
import requests
import string
from io import StringIO
from typing import Dict, List
from data_refinery_common.logging import get_and_configure_logger
from data_refinery_common.models import Sample
from data_refinery_foreman.surveyor.utils import requests_retry_session
logger = get_and_configure_logger(__name__)
def extract_title(sample: Dict) -> str:
""" Given a flat sample dictionary, find the title """
# Specifically look up for imported, non-SDRF AE samples
for comment in sample.get('source_comment', []):
if 'title' in comment.get('name', ''):
return comment['value']
title_fields = [
'title',
'sample title',
'sample name',
'subject number',
'labeled extract name',
'extract name'
]
title_fields = add_variants(title_fields)
for key, value in sorted(sample.items(), key=lambda x: x[0].lower()):
lower_key = key.lower().strip()
if lower_key in title_fields:
return value
# If we can't even find a unique title for this sample
# something has gone horribly wrong.
return None
def harmonize(metadata: List) -> Dict:
"""
Given a list of samples and their metadata, extract these common properties:
`title`,
`sex`,
`age`,
`specimen_part`,
`genetic_information`,
`disease`,
`disease_stage`,
`cell_line`,
`treatment`,
`race`,
`subject`,
`compound`,
`time`
Array Express Example:
{'Array Data File': 'C30061.CEL',
'Array Design REF': 'A-AFFY-1',
'Assay Name': '1009003-C30061',
'Characteristics[age]': '38',
'Characteristics[developmental stage]': 'adult',
'Characteristics[organism part]': 'islet',
'Characteristics[organism]': 'Homo sapiens',
'Characteristics[sex]': 'male',
'Comment [ArrayExpress FTP file]': 'ftp://ftp.ebi.ac.uk/pub/databases/microarray/data/experiment/MTAB/E-MTAB-3050/E-MTAB-3050.raw.1.zip',
'Comment [Derived ArrayExpress FTP file]': 'ftp://ftp.ebi.ac.uk/pub/databases/microarray/data/experiment/MTAB/E-MTAB-3050/E-MTAB-3050.processed.1.zip',
'Derived Array Data File': 'C30061.txt',
'Description': 'Islets from 38 years old male. Time from islet preparation '
'to culture initiation: 96 hours. Provided by the North West '
'Tissue Center Seattle.',
'Extract Name': 'donor B differentiated cells RNA',
'Factor Value[cell type]': 'differentiated',
'Factor Value[individual]': 'B',
'Factor Value[test result]': 'unsuccessful',
'Image File': 'C30061.DAT',
'Label': 'biotin',
'Labeled Extract Name': 'donor B differentiated cells LEX',
'Material Type': 'cell',
'Protocol REF': 'P-MTAB-41862',
' ': 'donor B islets',
'Technology Type': 'array assay',
'Unit [time unit]': 'year',
'sex': 'male'}
SRA Example:
{'alias': 'GSM2997959_r1',
'broker_name': 'GEO',
'center_name': 'GEO',
'center_project_name': 'GSE99065',
'ena-base-count': '279111754922',
'ena-spot-count': '1152605026',
'experiment_accession': 'SRX3691797',
'experiment_design_description': None,
'experiment_title': 'NextSeq 500 paired end sequencing; GSM2997959: INOF_FRT; '
'Homo sapiens; RNA-Seq',
'lab_name': '',
'library_construction_protocol': 'cDNA library produced with TGIRT RNA was '
'isolated and DNase treated using RNEasy '
'mini kit (Qiagen 74106) according to the '
'manufacturer protocol. 5ug DNA-free total '
'RNA was then ribodepleted using Ribo-zero '
'Gold (Illumina RZG1224 ) according to the '
'manufacturer protocol and purified using a '
'modified ZYMO RNA Clean and Concentrator '
'(R1016) protocol where 8 volumes EtOH '
'instead of 4. rRNA depleted RNA was '
'fragmented with NEBNext Magnesium RNA '
'Fragmentation Module (E6150) followed by '
'dephosphorylation using T4PNK (mandel ) '
'and purified by same modified ZYMO '
'protocol. cDNAs were synthesized via TGIRT '
'template-switching with 1µM TGIRT-III '
'reverse transcriptase (Ingex, LLC) for 15 '
'min at 60o C, during which a DNA '
'oligonucleotide containing the complement '
'of an Illumina Read 2 sequencing '
'primer-binding site becomes seamlessly '
"linked to the 5' cDNA end. After reaction "
'cleanup (Qiagen MinElute Reaction cleanup '
"28206), a 5' adenylated DNA oligonucleotide "
'containing the complement of an Illumina '
'Read 1 sequencing primer-binding site is '
"then ligated to the 3' cDNA end with "
"Thermostable 5' AppDNA / RNA Ligase (New "
'England Biolabs M0319). Properly ligated '
'cDNAs were amplified by PCR (12 cycles) to '
'synthesize the second strand and add '
'Illumina flowcell capture and index '
'sequences. Library was size-selected with '
'Ampure XP beads (Beckman-Coulter) and '
'quantified with Qubit and evaluated on an '
'Agilent 2100 Bioanalyzer.',
'library_layout': 'PAIRED',
'library_selection': 'cDNA',
'library_source': 'TRANSCRIPTOMIC',
'library_strategy': 'RNA-Seq',
'organism_id': '9606',
'organism_name': 'HOMO SAPIENS',
'platform_instrument_model': 'NextSeq500',
'run_accession': 'SRR6718414',
'run_ena_base_count': '1773379870',
'run_ena_first_public': '2018-02-17',
'run_ena_last_update': '2018-02-17',
'run_ena_spot_count': '15046271',
'sample_accession': 'SRS2951393',
'sample_cell_type': 'Immortalized normal ovarian fibroblast',
'sample_ena_base_count': '1773379870',
'sample_ena_first_public': '2018-02-14',
'sample_ena_last_update': '2018-02-14',
'sample_ena_spot_count': '15046271',
'sample_source_name': 'INOF cell line',
'sample_title': 'INOF_FRT',
'sample_treatment': 'none',
'study_abstract': 'The ability to compare the abundance of one RNA molecule '
'to another is a crucial step for understanding how gene '
'expression is modulated to shape the transcriptome '
'landscape. However, little information is available about '
'the relative expression of the different classes of coding '
'and non-coding RNA or even between RNA of the same class. '
'In this study, we present a complete portrait of the human '
'transcriptome that depicts the relationship of all classes '
'of non-ribosomal RNA longer than sixty nucleotides. The '
'results show that the most abundant RNA in the human '
'rRNA-depleted transcriptome is tRNA followed by '
'spliceosomal RNA. Surprisingly, the signal recognition '
'particle RNA 7SL by itself occupied 8% of the ribodepleted '
'transcriptome producing a similar number of transcripts as '
'that produced by all snoRNA genes combined. In general, '
'the most abundant RNA are non-coding but many more protein '
'coding than non-coding genes produce more than 1 '
'transcript per million. Examination of gene functions '
'suggests that RNA abundance reflects both gene and cell '
'function. Together, the data indicate that the human '
'transcriptome is shaped by a small number of highly '
'expressed non-coding genes and a large number of '
'moderately expressed protein coding genes that reflect '
'cellular phenotypes. Overall design: RNA was isolated from '
'SKOV3ip1 and INOF human cell lines and selected with '
'different methods. The resulting libraries were '
'multiplexed and paired-end sequenced using Illumina HiSeq.',
'study_accession': 'SRP107324',
'study_ena_base_count': '279111754922',
'study_ena_first_public': '2017-09-25',
'study_ena_last_update': '2018-02-15',
'study_ena_spot_count': '1152605026',
'study_title': 'Simultaneous detection and relative quantification of coding '
'and non-coding RNA using a single sequencing reaction',
'study_type': 'Transcriptome Analysis',
'submission_accession': 'SRA562540',
'submission_comment': 'submission brokered by GEO',
'submission_title': 'Submitted by Gene Expression Omnibus on 25-SEP-2017'}
GEO:
ex:
{'channel_count': ['1'],
'characteristics_ch1': ['patient: P-39',
'gender: female',
'age: 65',
'location: lower leg',
'transplanted organ: kidney',
'immunosuppressive drugs: azathioprine + prednison',
'sample type: squamous cell carcinoma',
'cell type: keratinocyte'],
'contact_address': ['Einthovenweg 20'],
'contact_city': ['Leiden'],
'contact_country': ['Netherlands'],
'contact_department': ['Toxicogenetics, S4-P'],
'contact_email': ['h.vrieling@lumc.nl'],
'contact_institute': ['Leiden University Medical Center'],
'contact_laboratory': ['room T4-34'],
'contact_name': ['Harry,,Vrieling'],
'contact_zip/postal_code': ['2333 ZC'],
'data_processing': ['Raw data was extracted from the BeadChip data files in '
'Illumina’s BeadStudio Version 3.2 software using the '
'gene expression module (v 3.2.7). Background subtracted '
'data was further analyzed in R-based Bioconductor '
'package, lumi (version 1.12.4). In lumi, the data was '
'transformed (variance-stabilizing transformation (VST)) '
'and normalized (robust spline normalization (RSN)), '
'resulting in log-transformed normalized data. The '
'R-package illuminaHumanv2.db (version 1.4.1) was used '
'for annotation. The data were purged of genes that did '
'not meet the detection limit (expression-detection '
'P-value >0.01) and/or were not annotated. The limma '
'R-package (version 3.2.3) was used to identify '
'differentially expressed genes (DEGs) between SCC, AK '
'and NS. Gene set enrichment analysis (GSEA) was '
'performed with the significantly DEGs from the limma '
'analysis using DAVID Bioinformatic Resources v6.7 '
'(http://david.abcc.ncifcrf.gov). GSEA on the entire data '
'set was performed using the parametric gene set '
'enrichment analysis (PGSEA) R-package (version '
'1.14.0). To identify activation of transcription '
'factors in AKs and SCCs, the DEGs from the limma '
'analysis were investigated using the online analysis '
'tool oPOSSUM.',
'Matrix normalized matrix shows VST-transformed, '
'RSN-normalized data (used scripts from lumi package)',
'Matrix non-normalized: AVG_Signal: average signal for '
'the probe; BEAD_STDERR: standard error of the beads; '
'Avg_NBEADS: average number of beads for that probe; '
'Detection Pval: detection p-value. All extracted from '
'Beadstudio'],
'data_row_count': ['48701'],
'description': ['1881436235_A'],
'extract_protocol_ch1': ['RNA was isolated from SCC and AK samples that '
'contained at least 70% tumor cells, as determined '
'by haematoxylin and eosin stained frozen sections. '
'From the sample of unexposed NS the epidermis was '
'removed for further processing by cryosectioning '
'parallel to the outer surface of the skin biopsy. '
'RNA was extracted from frozen material using the '
'RNeasy Fibrous Tissue kit (Qiagen), which included '
'proteinase K treatment (10 min at 55˚C) of the '
'lysed sample in RLT-buffer and on-column DNase '
'treatment. RNA was quantified using a Nanodrop '
'(NanoDrop technologies) and evaluated for '
'degradation with a RNA 6000 Nano Labchip on the '
'2100 Bioanalyzer (Agilent Technologies)'],
'geo_accession': ['GSM808778'],
'hyb_protocol': ['The standard Illumina hybridization protocol was used. In '
'brief, the samples were hybridized to the arrays at 58ºC '
'overnight.'],
'label_ch1': ['biotin'],
'label_protocol_ch1': ['100 ng of total RNA was converted to cDNA and '
'subsequently labeled cRNA using the Ambion Illumina '
'TotalPrep RNA Amplification kit (Ambion) according to '
'manufacturer’s instructions'],
'last_update_date': ['Feb 06 2013'],
'molecule_ch1': ['total RNA'],
'organism_ch1': ['Homo sapiens'],
'platform_id': ['GPL6102'],
'scan_protocol': ['The beadChips were scanned using the Illumina BeadArray '
'Reader, using the standard Illumina scanning protocol'],
'series_id': ['GSE32628', 'GSE32969', 'GSE32979'],
'source_name_ch1': ['cutaneous squamous cell carcinoma'],
'status': ['Public on Feb 06 2013'],
'submission_date': ['Oct 05 2011'],
'supplementary_file': ['NONE'],
'taxid_ch1': ['9606'],
'title': ['SCC_P-39'],
'type': ['RNA']}
"""
# Prepare the harmonized samples
original_samples = []
harmonized_samples = {}
##
# Title!
# We also use the title as the key in the returned dictionary
##
used_titles = []
for sample in metadata:
title = extract_title(sample)
# If we can't even find a unique title for this sample
# something has gone horribly wrong.
if title:
if title in used_titles:
title = title + "_" + ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(12))
used_titles.append(title)
new_sample = sample.copy()
new_sample['title'] = title
original_samples.append(new_sample)
harmonized_samples[title] = {}
else:
logger.warn("Cannot determine sample title!", sample=sample)
##
# Sex!
##
sex_fields = [
'sex',
'gender',
'subject gender',
'subjext sex',
# This looks reduntant, but there are some samples which use
# Characteristic[Characteristic[sex]]
'characteristic [sex]',
'characteristics [sex]',
]
sex_fields = add_variants(sex_fields)
for sample in original_samples:
title = sample['title']
for key, value in sample.items():
lower_key = key.lower().strip()
if lower_key in sex_fields:
if value.lower() in ['f', 'female', 'woman']:
harmonized_samples[title]['sex'] = "female"
break
elif value.lower() in ['m', 'male', 'man']:
harmonized_samples[title]['sex'] = "male"
break
else:
harmonized_samples[title]['sex'] = value.lower()
break
##
# Age!
##
age_fields = [
'age',
'patient age',
'age of patient',
'age (years)',
'age (months)',
'age (days)',
'age (hours)',
'age at diagnosis',
'age at diagnosis years',
'age at diagnosis months',
'age at diagnosis days',
'age at diagnosis hours',
'characteristic [age]',
'characteristics [age]',
]
age_fields = add_variants(age_fields)
for sample in original_samples:
title = sample['title']
for key, value in sample.items():
lower_key = key.lower().strip()
if lower_key in age_fields:
try:
harmonized_samples[title]['age'] = float(value)
except ValueError:
try:
harmonized_samples[title]['age'] = float(value.split(' ')[0])
except ValueError:
# This is probably something weird, like a '.'
continue
break
##
# Organ Parts!
# Cell Type and Organ Type are different but grouped,
# See: https://github.com/AlexsLemonade/refinebio/issues/165#issuecomment-376684079
##
part_fields = [
# AE
'organism part',
'cell type',
'tissue',
'tissue type',
'tissue source',
'tissue origin',
'source tissue',
'tissue subtype',
'tissue/cell type',
'tissue region',
'tissue compartment',
'tissues',
'tissue of origin',
'tissue-type',
'tissue harvested',
'cell/tissue type',
'tissue subregion',
'organ',
'characteristic [organism part]',
'characteristics [organism part]',
# SRA
'cell_type'
'organismpart',
# GEO
'isolation source',
'tissue sampled',
'cell description'
]
part_fields = add_variants(part_fields)
for sample in original_samples:
title = sample['title']
for key, value in sample.items():
lower_key = key.lower().strip()
if lower_key in part_fields:
harmonized_samples[title]['specimen_part'] = value.lower().strip()
break
##
# Genetic information!
##
genetic_information_fields = [
'strain/background',
'strain',
'strain or line',
'background strain',
'genotype',
'genetic background',
'genetic information',
'genotype/variation',
'ecotype',
'cultivar',
'strain/genotype',
]
genetic_information_fields = add_variants(genetic_information_fields)
for sample in original_samples:
title = sample['title']
for key, value in sample.items():
lower_key = key.lower().strip()
if lower_key in genetic_information_fields:
harmonized_samples[title]['genetic_information'] = value.lower().strip()
##
# Disease!
##
disease_fields = [
'disease',
'disease state',
'disease status',
'diagnosis',
'disease',
'infection with',
'sample type',
]
disease_fields = add_variants(disease_fields)
for sample in original_samples:
title = sample['title']
for key, value in sample.items():
lower_key = key.lower().strip()
if lower_key in disease_fields:
harmonized_samples[title]['disease'] = value.lower().strip()
##
# Disease Stage!
##
disease_stage_fields = [
'disease state',
'disease staging',
'disease stage',
'grade',
'tumor grade',
'who grade',
'histological grade',
'tumor grading',
'disease outcome',
'subject status',
]
disease_stage_fields = add_variants(disease_stage_fields)
for sample in original_samples:
title = sample['title']
for key, value in sample.items():
lower_key = key.lower().strip()
if lower_key in disease_stage_fields:
harmonized_samples[title]['disease_stage'] = value.lower().strip()
##
# Cell Line!
##
cell_line_fields = [
'cell line',
'sample strain',
]
cell_line_fields = add_variants(cell_line_fields)
for sample in original_samples:
title = sample['title']
for key, value in sample.items():
lower_key = key.lower().strip()
if lower_key in cell_line_fields:
harmonized_samples[title]['cell_line'] = value.lower().strip()
##
# Treatment!
##
treatment_fields = [
'treatment',
'treatment group',
'treatment protocol',
'drug treatment',
'clinical treatment',
]
treatment_fields = add_variants(treatment_fields)
for sample in original_samples:
title = sample['title']
for key, value in sample.items():
lower_key = key.lower().strip()
if lower_key in treatment_fields:
harmonized_samples[title]['treatment'] = value.lower().strip()
##
# Race!
##
race_fields = [
'race',
'ethnicity',
'race/ethnicity',
]
race_fields = add_variants(race_fields)
for sample in original_samples:
title = sample['title']
for key, value in sample.items():
lower_key = key.lower().strip()
if lower_key in race_fields:
harmonized_samples[title]['race'] = value.lower().strip()
##
# Subject
##
subject_fields = [
# AE
'subject',
'subject id',
'subject/sample source id',
'subject identifier',
'human subject anonymized id',
'individual',
'individual identifier',
'individual id',
'patient',
'patient id',
'patient identifier',
'patient number',
'patient no',
'donor id',
'donor',
# SRA
'sample_source_name'
]
subject_fields = add_variants(subject_fields)
for sample in original_samples:
title = sample['title']
for key, value in sample.items():
lower_key = key.lower().strip()
if lower_key in subject_fields:
harmonized_samples[title]['subject'] = value.lower().strip()
##
# Developement Stage!
##
development_stage_fields = [
'developmental stage',
'development stage',
'development stages'
]
development_stage_fields = add_variants(development_stage_fields)
for sample in original_samples:
title = sample['title']
for key, value in sample.items():
lower_key = key.lower().strip()
if lower_key in development_stage_fields:
harmonized_samples[title]['developmental_stage'] = value.lower().strip()
##
# Compound!
##
compound_fields = [
'compound',
'compound1',
'compound2',
'compound name',
'drug',
'drugs',
'immunosuppressive drugs'
]
compound_fields = add_variants(compound_fields)
for sample in original_samples:
title = sample['title']
for key, value in sample.items():
lower_key = key.lower().strip()
if lower_key in compound_fields:
harmonized_samples[title]['compound'] = value.lower().strip()
##
# Time!
##
time_fields = [
'time',
'initial time point',
'start time',
'stop time',
'time point',
'sampling time point',
'sampling time',
'time post infection'
]
time_fields = add_variants(time_fields)
for sample in original_samples:
title = sample['title']
for key, value in sample.items():
lower_key = key.lower().strip()
if lower_key in time_fields:
harmonized_samples[title]['time'] = value.lower().strip()
return harmonized_samples
def add_variants(original_list: List):
""" Given a list of strings, create variations likely to give metadata hits.
Ex, given 'cell line', add the ability to hit on 'characteristic [cell_line]' as well.
"""
precopy = original_list.copy()
# Variate forms of multi-word strings
for item in original_list:
if ' ' in item:
precopy.append(item.replace(' ', '_'))
precopy.append(item.replace(' ', '-'))
precopy.append(item.replace(' ', ''))
# Variate to find common key patterns
copy = precopy.copy()
for item in precopy:
copy.append("characteristic [" + item + "]")
copy.append("characteristic[" + item + "]")
copy.append("characteristics [" + item + "]")
copy.append("characteristics[" + item + "]")
copy.append("comment [" + item + "]")
copy.append("comment[" + item + "]")
copy.append("comments [" + item + "]")
copy.append("comments[" + item + "]")
copy.append("factorvalue[" + item + "]")
copy.append("factor value[" + item + "]")
copy.append("factorvalue [" + item + "]")
copy.append("factor value [" + item + "]")
copy.append("sample_" + item)
copy.append("sample_host" + item)
copy.append("sample_sample_" + item) # Yes, seriously.
return copy
def parse_sdrf(sdrf_url: str) -> List:
""" Given a URL to an SDRF file, download parses it into JSON. """
try:
sdrf_text = requests_retry_session().get(sdrf_url, timeout=60).text
except Exception as e:
logger.exception("Unable to fetch URL: " + sdrf_url, exception=str(e))
return []
samples = []
reader = csv.reader(StringIO(sdrf_text), delimiter='\t')
for offset, line in enumerate(reader):
# Get the keys
if offset == 0:
keys = line
continue
sample_values = line
# Skip malformed lines
if len(sample_values) != len(keys):
continue
sample = {}
for col, value in enumerate(sample_values):
key = keys[col]
sample[key] = value
samples.append(sample)
return samples
def preprocess_geo(items: List) -> List:
"""
Prepares items from GEO for harmonization
"""
preprocessed_samples = []
for sample_id, sample in items:
new_sample = {}
for key, value in sample.metadata.items():
if key == "characteristics_ch1":
for pair in value:
# This will almost always happen, except if we get
# a malformed response from the server.
if ':' in pair:
split = pair.split(':', 1)
new_sample[split[0].strip()] = split[1].strip()
continue
# Probably won't be a list with length greater than one,
# but maybe?
new_sample[key] = " ".join(value)
preprocessed_samples.append(new_sample)
return preprocessed_samples
|
data-refinery/data_refinery
|
foreman/data_refinery_foreman/surveyor/harmony.py
|
Python
|
bsd-3-clause
| 31,729
|
[
"Bioconductor"
] |
132cd7f433cebc7e465bb16855ef96158c3a8cde4a42e335a45723e27f7d551a
|
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Additional protein alphabets used in the SCOP database and PDB files.
See Bio.SCOP for more information about SCOP and Biopython"s SCOP module.
"""
__docformat__ = "restructuredtext en"
protein_letters_3to1 = {
"00C": "C", "01W": "X", "02K": "A", "03Y": "C", "07O": "C",
"08P": "C", "0A0": "D", "0A1": "Y", "0A2": "K", "0A8": "C",
"0AA": "V", "0AB": "V", "0AC": "G", "0AD": "G", "0AF": "W",
"0AG": "L", "0AH": "S", "0AK": "D", "0AM": "A", "0AP": "C",
"0AU": "U", "0AV": "A", "0AZ": "P", "0BN": "F", "0C ": "C",
"0CS": "A", "0DC": "C", "0DG": "G", "0DT": "T", "0FL": "A",
"0G ": "G", "0NC": "A", "0SP": "A", "0U ": "U", "0YG": "YG",
"10C": "C", "125": "U", "126": "U", "127": "U", "128": "N",
"12A": "A", "143": "C", "175": "ASG", "193": "X", "1AP": "A",
"1MA": "A", "1MG": "G", "1PA": "F", "1PI": "A", "1PR": "N",
"1SC": "C", "1TQ": "W", "1TY": "Y", "1X6": "S", "200": "F",
"23F": "F", "23S": "X", "26B": "T", "2AD": "X", "2AG": "A",
"2AO": "X", "2AR": "A", "2AS": "X", "2AT": "T", "2AU": "U",
"2BD": "I", "2BT": "T", "2BU": "A", "2CO": "C", "2DA": "A",
"2DF": "N", "2DM": "N", "2DO": "X", "2DT": "T", "2EG": "G",
"2FE": "N", "2FI": "N", "2FM": "M", "2GT": "T", "2HF": "H",
"2LU": "L", "2MA": "A", "2MG": "G", "2ML": "L", "2MR": "R",
"2MT": "P", "2MU": "U", "2NT": "T", "2OM": "U", "2OT": "T",
"2PI": "X", "2PR": "G", "2SA": "N", "2SI": "X", "2ST": "T",
"2TL": "T", "2TY": "Y", "2VA": "V", "2XA": "C", "32S": "X",
"32T": "X", "3AH": "H", "3AR": "X", "3CF": "F", "3DA": "A",
"3DR": "N", "3GA": "A", "3MD": "D", "3ME": "U", "3NF": "Y",
"3QN": "K", "3TY": "X", "3XH": "G", "4AC": "N", "4BF": "Y",
"4CF": "F", "4CY": "M", "4DP": "W", "4F3": "GYG", "4FB": "P",
"4FW": "W", "4HT": "W", "4IN": "W", "4MF": "N", "4MM": "X",
"4OC": "C", "4PC": "C", "4PD": "C", "4PE": "C", "4PH": "F",
"4SC": "C", "4SU": "U", "4TA": "N", "4U7": "A", "56A": "H",
"5AA": "A", "5AB": "A", "5AT": "T", "5BU": "U", "5CG": "G",
"5CM": "C", "5CS": "C", "5FA": "A", "5FC": "C", "5FU": "U",
"5HP": "E", "5HT": "T", "5HU": "U", "5IC": "C", "5IT": "T",
"5IU": "U", "5MC": "C", "5MD": "N", "5MU": "U", "5NC": "C",
"5PC": "C", "5PY": "T", "5SE": "U", "5ZA": "TWG", "64T": "T",
"6CL": "K", "6CT": "T", "6CW": "W", "6HA": "A", "6HC": "C",
"6HG": "G", "6HN": "K", "6HT": "T", "6IA": "A", "6MA": "A",
"6MC": "A", "6MI": "N", "6MT": "A", "6MZ": "N", "6OG": "G",
"70U": "U", "7DA": "A", "7GU": "G", "7JA": "I", "7MG": "G",
"8AN": "A", "8FG": "G", "8MG": "G", "8OG": "G", "9NE": "E",
"9NF": "F", "9NR": "R", "9NV": "V", "A ": "A", "A1P": "N",
"A23": "A", "A2L": "A", "A2M": "A", "A34": "A", "A35": "A",
"A38": "A", "A39": "A", "A3A": "A", "A3P": "A", "A40": "A",
"A43": "A", "A44": "A", "A47": "A", "A5L": "A", "A5M": "C",
"A5N": "N", "A5O": "A", "A66": "X", "AA3": "A", "AA4": "A",
"AAR": "R", "AB7": "X", "ABA": "A", "ABR": "A", "ABS": "A",
"ABT": "N", "ACB": "D", "ACL": "R", "AD2": "A", "ADD": "X",
"ADX": "N", "AEA": "X", "AEI": "D", "AET": "A", "AFA": "N",
"AFF": "N", "AFG": "G", "AGM": "R", "AGT": "C", "AHB": "N",
"AHH": "X", "AHO": "A", "AHP": "A", "AHS": "X", "AHT": "X",
"AIB": "A", "AKL": "D", "AKZ": "D", "ALA": "A", "ALC": "A",
"ALM": "A", "ALN": "A", "ALO": "T", "ALQ": "X", "ALS": "A",
"ALT": "A", "ALV": "A", "ALY": "K", "AN8": "A", "AP7": "A",
"APE": "X", "APH": "A", "API": "K", "APK": "K", "APM": "X",
"APP": "X", "AR2": "R", "AR4": "E", "AR7": "R", "ARG": "R",
"ARM": "R", "ARO": "R", "ARV": "X", "AS ": "A", "AS2": "D",
"AS9": "X", "ASA": "D", "ASB": "D", "ASI": "D", "ASK": "D",
"ASL": "D", "ASM": "X", "ASN": "N", "ASP": "D", "ASQ": "D",
"ASU": "N", "ASX": "B", "ATD": "T", "ATL": "T", "ATM": "T",
"AVC": "A", "AVN": "X", "AYA": "A", "AYG": "AYG", "AZK": "K",
"AZS": "S", "AZY": "Y", "B1F": "F", "B1P": "N", "B2A": "A",
"B2F": "F", "B2I": "I", "B2V": "V", "B3A": "A", "B3D": "D",
"B3E": "E", "B3K": "K", "B3L": "X", "B3M": "X", "B3Q": "X",
"B3S": "S", "B3T": "X", "B3U": "H", "B3X": "N", "B3Y": "Y",
"BB6": "C", "BB7": "C", "BB8": "F", "BB9": "C", "BBC": "C",
"BCS": "C", "BE2": "X", "BFD": "D", "BG1": "S", "BGM": "G",
"BH2": "D", "BHD": "D", "BIF": "F", "BIL": "X", "BIU": "I",
"BJH": "X", "BLE": "L", "BLY": "K", "BMP": "N", "BMT": "T",
"BNN": "F", "BNO": "X", "BOE": "T", "BOR": "R", "BPE": "C",
"BRU": "U", "BSE": "S", "BT5": "N", "BTA": "L", "BTC": "C",
"BTR": "W", "BUC": "C", "BUG": "V", "BVP": "U", "BZG": "N",
"C ": "C", "C12": "TYG", "C1X": "K", "C25": "C", "C2L": "C",
"C2S": "C", "C31": "C", "C32": "C", "C34": "C", "C36": "C",
"C37": "C", "C38": "C", "C3Y": "C", "C42": "C", "C43": "C",
"C45": "C", "C46": "C", "C49": "C", "C4R": "C", "C4S": "C",
"C5C": "C", "C66": "X", "C6C": "C", "C99": "TFG", "CAF": "C",
"CAL": "X", "CAR": "C", "CAS": "C", "CAV": "X", "CAY": "C",
"CB2": "C", "CBR": "C", "CBV": "C", "CCC": "C", "CCL": "K",
"CCS": "C", "CCY": "CYG", "CDE": "X", "CDV": "X", "CDW": "C",
"CEA": "C", "CFL": "C", "CFY": "FCYG", "CG1": "G", "CGA": "E",
"CGU": "E", "CH ": "C", "CH6": "MYG", "CH7": "KYG", "CHF": "X",
"CHG": "X", "CHP": "G", "CHS": "X", "CIR": "R", "CJO": "GYG",
"CLE": "L", "CLG": "K", "CLH": "K", "CLV": "AFG", "CM0": "N",
"CME": "C", "CMH": "C", "CML": "C", "CMR": "C", "CMT": "C",
"CNU": "U", "CP1": "C", "CPC": "X", "CPI": "X", "CQR": "GYG",
"CR0": "TLG", "CR2": "GYG", "CR5": "G", "CR7": "KYG", "CR8": "HYG",
"CRF": "TWG", "CRG": "THG", "CRK": "MYG", "CRO": "GYG", "CRQ": "QYG",
"CRU": "EYG", "CRW": "ASG", "CRX": "ASG", "CS0": "C", "CS1": "C",
"CS3": "C", "CS4": "C", "CS8": "N", "CSA": "C", "CSB": "C",
"CSD": "C", "CSE": "C", "CSF": "C", "CSH": "SHG", "CSI": "G",
"CSJ": "C", "CSL": "C", "CSO": "C", "CSP": "C", "CSR": "C",
"CSS": "C", "CSU": "C", "CSW": "C", "CSX": "C", "CSY": "SYG",
"CSZ": "C", "CTE": "W", "CTG": "T", "CTH": "T", "CUC": "X",
"CWR": "S", "CXM": "M", "CY0": "C", "CY1": "C", "CY3": "C",
"CY4": "C", "CYA": "C", "CYD": "C", "CYF": "C", "CYG": "C",
"CYJ": "X", "CYM": "C", "CYQ": "C", "CYR": "C", "CYS": "C",
"CZ2": "C", "CZO": "GYG", "CZZ": "C", "D11": "T", "D1P": "N",
"D3 ": "N", "D33": "N", "D3P": "G", "D3T": "T", "D4M": "T",
"D4P": "X", "DA ": "A", "DA2": "X", "DAB": "A", "DAH": "F",
"DAL": "A", "DAR": "R", "DAS": "D", "DBB": "T", "DBM": "N",
"DBS": "S", "DBU": "T", "DBY": "Y", "DBZ": "A", "DC ": "C",
"DC2": "C", "DCG": "G", "DCI": "X", "DCL": "X", "DCT": "C",
"DCY": "C", "DDE": "H", "DDG": "G", "DDN": "U", "DDX": "N",
"DFC": "C", "DFG": "G", "DFI": "X", "DFO": "X", "DFT": "N",
"DG ": "G", "DGH": "G", "DGI": "G", "DGL": "E", "DGN": "Q",
"DHA": "S", "DHI": "H", "DHL": "X", "DHN": "V", "DHP": "X",
"DHU": "U", "DHV": "V", "DI ": "I", "DIL": "I", "DIR": "R",
"DIV": "V", "DLE": "L", "DLS": "K", "DLY": "K", "DM0": "K",
"DMH": "N", "DMK": "D", "DMT": "X", "DN ": "N", "DNE": "L",
"DNG": "L", "DNL": "K", "DNM": "L", "DNP": "A", "DNR": "C",
"DNS": "K", "DOA": "X", "DOC": "C", "DOH": "D", "DON": "L",
"DPB": "T", "DPH": "F", "DPL": "P", "DPP": "A", "DPQ": "Y",
"DPR": "P", "DPY": "N", "DRM": "U", "DRP": "N", "DRT": "T",
"DRZ": "N", "DSE": "S", "DSG": "N", "DSN": "S", "DSP": "D",
"DT ": "T", "DTH": "T", "DTR": "W", "DTY": "Y", "DU ": "U",
"DVA": "V", "DXD": "N", "DXN": "N", "DYG": "DYG", "DYS": "C",
"DZM": "A", "E ": "A", "E1X": "A", "ECC": "Q", "EDA": "A",
"EFC": "C", "EHP": "F", "EIT": "T", "ENP": "N", "ESB": "Y",
"ESC": "M", "EXB": "X", "EXY": "L", "EY5": "N", "EYS": "X",
"F2F": "F", "FA2": "A", "FA5": "N", "FAG": "N", "FAI": "N",
"FB5": "A", "FB6": "A", "FCL": "F", "FFD": "N", "FGA": "E",
"FGL": "G", "FGP": "S", "FHL": "X", "FHO": "K", "FHU": "U",
"FLA": "A", "FLE": "L", "FLT": "Y", "FME": "M", "FMG": "G",
"FMU": "N", "FOE": "C", "FOX": "G", "FP9": "P", "FPA": "F",
"FRD": "X", "FT6": "W", "FTR": "W", "FTY": "Y", "FVA": "V",
"FZN": "K", "G ": "G", "G25": "G", "G2L": "G", "G2S": "G",
"G31": "G", "G32": "G", "G33": "G", "G36": "G", "G38": "G",
"G42": "G", "G46": "G", "G47": "G", "G48": "G", "G49": "G",
"G4P": "N", "G7M": "G", "GAO": "G", "GAU": "E", "GCK": "C",
"GCM": "X", "GDP": "G", "GDR": "G", "GFL": "G", "GGL": "E",
"GH3": "G", "GHG": "Q", "GHP": "G", "GL3": "G", "GLH": "Q",
"GLJ": "E", "GLK": "E", "GLM": "X", "GLN": "Q", "GLQ": "E",
"GLU": "E", "GLX": "Z", "GLY": "G", "GLZ": "G", "GMA": "E",
"GMS": "G", "GMU": "U", "GN7": "G", "GND": "X", "GNE": "N",
"GOM": "G", "GPL": "K", "GS ": "G", "GSC": "G", "GSR": "G",
"GSS": "G", "GSU": "E", "GT9": "C", "GTP": "G", "GVL": "X",
"GYC": "CYG", "GYS": "SYG", "H2U": "U", "H5M": "P", "HAC": "A",
"HAR": "R", "HBN": "H", "HCS": "X", "HDP": "U", "HEU": "U",
"HFA": "X", "HGL": "X", "HHI": "H", "HHK": "AK", "HIA": "H",
"HIC": "H", "HIP": "H", "HIQ": "H", "HIS": "H", "HL2": "L",
"HLU": "L", "HMR": "R", "HOL": "N", "HPC": "F", "HPE": "F",
"HPH": "F", "HPQ": "F", "HQA": "A", "HRG": "R", "HRP": "W",
"HS8": "H", "HS9": "H", "HSE": "S", "HSL": "S", "HSO": "H",
"HTI": "C", "HTN": "N", "HTR": "W", "HV5": "A", "HVA": "V",
"HY3": "P", "HYP": "P", "HZP": "P", "I ": "I", "I2M": "I",
"I58": "K", "I5C": "C", "IAM": "A", "IAR": "R", "IAS": "D",
"IC ": "C", "IEL": "K", "IEY": "HYG", "IG ": "G", "IGL": "G",
"IGU": "G", "IIC": "SHG", "IIL": "I", "ILE": "I", "ILG": "E",
"ILX": "I", "IMC": "C", "IML": "I", "IOY": "F", "IPG": "G",
"IPN": "N", "IRN": "N", "IT1": "K", "IU ": "U", "IYR": "Y",
"IYT": "T", "IZO": "M", "JJJ": "C", "JJK": "C", "JJL": "C",
"JW5": "N", "K1R": "C", "KAG": "G", "KCX": "K", "KGC": "K",
"KNB": "A", "KOR": "M", "KPI": "K", "KST": "K", "KYQ": "K",
"L2A": "X", "LA2": "K", "LAA": "D", "LAL": "A", "LBY": "K",
"LC ": "C", "LCA": "A", "LCC": "N", "LCG": "G", "LCH": "N",
"LCK": "K", "LCX": "K", "LDH": "K", "LED": "L", "LEF": "L",
"LEH": "L", "LEI": "V", "LEM": "L", "LEN": "L", "LET": "X",
"LEU": "L", "LEX": "L", "LG ": "G", "LGP": "G", "LHC": "X",
"LHU": "U", "LKC": "N", "LLP": "K", "LLY": "K", "LME": "E",
"LMF": "K", "LMQ": "Q", "LMS": "N", "LP6": "K", "LPD": "P",
"LPG": "G", "LPL": "X", "LPS": "S", "LSO": "X", "LTA": "X",
"LTR": "W", "LVG": "G", "LVN": "V", "LYF": "K", "LYK": "K",
"LYM": "K", "LYN": "K", "LYR": "K", "LYS": "K", "LYX": "K",
"LYZ": "K", "M0H": "C", "M1G": "G", "M2G": "G", "M2L": "K",
"M2S": "M", "M30": "G", "M3L": "K", "M5M": "C", "MA ": "A",
"MA6": "A", "MA7": "A", "MAA": "A", "MAD": "A", "MAI": "R",
"MBQ": "Y", "MBZ": "N", "MC1": "S", "MCG": "X", "MCL": "K",
"MCS": "C", "MCY": "C", "MD3": "C", "MD6": "G", "MDH": "X",
"MDO": "ASG", "MDR": "N", "MEA": "F", "MED": "M", "MEG": "E",
"MEN": "N", "MEP": "U", "MEQ": "Q", "MET": "M", "MEU": "G",
"MF3": "X", "MFC": "GYG", "MG1": "G", "MGG": "R", "MGN": "Q",
"MGQ": "A", "MGV": "G", "MGY": "G", "MHL": "L", "MHO": "M",
"MHS": "H", "MIA": "A", "MIS": "S", "MK8": "L", "ML3": "K",
"MLE": "L", "MLL": "L", "MLY": "K", "MLZ": "K", "MME": "M",
"MMO": "R", "MMT": "T", "MND": "N", "MNL": "L", "MNU": "U",
"MNV": "V", "MOD": "X", "MP8": "P", "MPH": "X", "MPJ": "X",
"MPQ": "G", "MRG": "G", "MSA": "G", "MSE": "M", "MSL": "M",
"MSO": "M", "MSP": "X", "MT2": "M", "MTR": "T", "MTU": "A",
"MTY": "Y", "MVA": "V", "N ": "N", "N10": "S", "N2C": "X",
"N5I": "N", "N5M": "C", "N6G": "G", "N7P": "P", "NA8": "A",
"NAL": "A", "NAM": "A", "NB8": "N", "NBQ": "Y", "NC1": "S",
"NCB": "A", "NCX": "N", "NCY": "X", "NDF": "F", "NDN": "U",
"NEM": "H", "NEP": "H", "NF2": "N", "NFA": "F", "NHL": "E",
"NIT": "X", "NIY": "Y", "NLE": "L", "NLN": "L", "NLO": "L",
"NLP": "L", "NLQ": "Q", "NMC": "G", "NMM": "R", "NMS": "T",
"NMT": "T", "NNH": "R", "NP3": "N", "NPH": "C", "NPI": "A",
"NRP": "LYG", "NRQ": "MYG", "NSK": "X", "NTY": "Y", "NVA": "V",
"NYC": "TWG", "NYG": "NYG", "NYM": "N", "NYS": "C", "NZH": "H",
"O12": "X", "O2C": "N", "O2G": "G", "OAD": "N", "OAS": "S",
"OBF": "X", "OBS": "X", "OCS": "C", "OCY": "C", "ODP": "N",
"OHI": "H", "OHS": "D", "OIC": "X", "OIP": "I", "OLE": "X",
"OLT": "T", "OLZ": "S", "OMC": "C", "OMG": "G", "OMT": "M",
"OMU": "U", "ONE": "U", "ONH": "A", "ONL": "X", "OPR": "R",
"ORN": "A", "ORQ": "R", "OSE": "S", "OTB": "X", "OTH": "T",
"OTY": "Y", "OXX": "D", "P ": "G", "P1L": "C", "P1P": "N",
"P2T": "T", "P2U": "U", "P2Y": "P", "P5P": "A", "PAQ": "Y",
"PAS": "D", "PAT": "W", "PAU": "A", "PBB": "C", "PBF": "F",
"PBT": "N", "PCA": "E", "PCC": "P", "PCE": "X", "PCS": "F",
"PDL": "X", "PDU": "U", "PEC": "C", "PF5": "F", "PFF": "F",
"PFX": "X", "PG1": "S", "PG7": "G", "PG9": "G", "PGL": "X",
"PGN": "G", "PGP": "G", "PGY": "G", "PHA": "F", "PHD": "D",
"PHE": "F", "PHI": "F", "PHL": "F", "PHM": "F", "PIA": "AYG",
"PIV": "X", "PLE": "L", "PM3": "F", "PMT": "C", "POM": "P",
"PPN": "F", "PPU": "A", "PPW": "G", "PQ1": "N", "PR3": "C",
"PR5": "A", "PR9": "P", "PRN": "A", "PRO": "P", "PRS": "P",
"PSA": "F", "PSH": "H", "PST": "T", "PSU": "U", "PSW": "C",
"PTA": "X", "PTH": "Y", "PTM": "Y", "PTR": "Y", "PU ": "A",
"PUY": "N", "PVH": "H", "PVL": "X", "PYA": "A", "PYO": "U",
"PYX": "C", "PYY": "N", "QLG": "QLG", "QMM": "Q", "QPA": "C",
"QPH": "F", "QUO": "G", "R ": "A", "R1A": "C", "R4K": "W",
"RC7": "HYG", "RE0": "W", "RE3": "W", "RIA": "A", "RMP": "A",
"RON": "X", "RT ": "T", "RTP": "N", "S1H": "S", "S2C": "C",
"S2D": "A", "S2M": "T", "S2P": "A", "S4A": "A", "S4C": "C",
"S4G": "G", "S4U": "U", "S6G": "G", "SAC": "S", "SAH": "C",
"SAR": "G", "SBL": "S", "SC ": "C", "SCH": "C", "SCS": "C",
"SCY": "C", "SD2": "X", "SDG": "G", "SDP": "S", "SEB": "S",
"SEC": "A", "SEG": "A", "SEL": "S", "SEM": "S", "SEN": "S",
"SEP": "S", "SER": "S", "SET": "S", "SGB": "S", "SHC": "C",
"SHP": "G", "SHR": "K", "SIB": "C", "SIC": "DC", "SLA": "P",
"SLR": "P", "SLZ": "K", "SMC": "C", "SME": "M", "SMF": "F",
"SMP": "A", "SMT": "T", "SNC": "C", "SNN": "N", "SOC": "C",
"SOS": "N", "SOY": "S", "SPT": "T", "SRA": "A", "SSU": "U",
"STY": "Y", "SUB": "X", "SUI": "DG", "SUN": "S", "SUR": "U",
"SVA": "S", "SVV": "S", "SVW": "S", "SVX": "S", "SVY": "S",
"SVZ": "X", "SWG": "SWG", "SYS": "C", "T ": "T", "T11": "F",
"T23": "T", "T2S": "T", "T2T": "N", "T31": "U", "T32": "T",
"T36": "T", "T37": "T", "T38": "T", "T39": "T", "T3P": "T",
"T41": "T", "T48": "T", "T49": "T", "T4S": "T", "T5O": "U",
"T5S": "T", "T66": "X", "T6A": "A", "TA3": "T", "TA4": "X",
"TAF": "T", "TAL": "N", "TAV": "D", "TBG": "V", "TBM": "T",
"TC1": "C", "TCP": "T", "TCQ": "Y", "TCR": "W", "TCY": "A",
"TDD": "L", "TDY": "T", "TFE": "T", "TFO": "A", "TFQ": "F",
"TFT": "T", "TGP": "G", "TH6": "T", "THC": "T", "THO": "X",
"THR": "T", "THX": "N", "THZ": "R", "TIH": "A", "TLB": "N",
"TLC": "T", "TLN": "U", "TMB": "T", "TMD": "T", "TNB": "C",
"TNR": "S", "TOX": "W", "TP1": "T", "TPC": "C", "TPG": "G",
"TPH": "X", "TPL": "W", "TPO": "T", "TPQ": "Y", "TQI": "W",
"TQQ": "W", "TRF": "W", "TRG": "K", "TRN": "W", "TRO": "W",
"TRP": "W", "TRQ": "W", "TRW": "W", "TRX": "W", "TS ": "N",
"TST": "X", "TT ": "N", "TTD": "T", "TTI": "U", "TTM": "T",
"TTQ": "W", "TTS": "Y", "TY1": "Y", "TY2": "Y", "TY3": "Y",
"TY5": "Y", "TYB": "Y", "TYI": "Y", "TYJ": "Y", "TYN": "Y",
"TYO": "Y", "TYQ": "Y", "TYR": "Y", "TYS": "Y", "TYT": "Y",
"TYU": "N", "TYW": "Y", "TYX": "X", "TYY": "Y", "TZB": "X",
"TZO": "X", "U ": "U", "U25": "U", "U2L": "U", "U2N": "U",
"U2P": "U", "U31": "U", "U33": "U", "U34": "U", "U36": "U",
"U37": "U", "U8U": "U", "UAR": "U", "UCL": "U", "UD5": "U",
"UDP": "N", "UFP": "N", "UFR": "U", "UFT": "U", "UMA": "A",
"UMP": "U", "UMS": "U", "UN1": "X", "UN2": "X", "UNK": "X",
"UR3": "U", "URD": "U", "US1": "U", "US2": "U", "US3": "T",
"US5": "U", "USM": "U", "VAD": "V", "VAF": "V", "VAL": "V",
"VB1": "K", "VDL": "X", "VLL": "X", "VLM": "X", "VMS": "X",
"VOL": "X", "WCR": "GYG", "X ": "G", "X2W": "E", "X4A": "N",
"X9Q": "AFG", "XAD": "A", "XAE": "N", "XAL": "A", "XAR": "N",
"XCL": "C", "XCN": "C", "XCP": "X", "XCR": "C", "XCS": "N",
"XCT": "C", "XCY": "C", "XGA": "N", "XGL": "G", "XGR": "G",
"XGU": "G", "XPR": "P", "XSN": "N", "XTH": "T", "XTL": "T",
"XTR": "T", "XTS": "G", "XTY": "N", "XUA": "A", "XUG": "G",
"XX1": "K", "XXY": "THG", "XYG": "DYG", "Y ": "A", "YCM": "C",
"YG ": "G", "YOF": "Y", "YRR": "N", "YYG": "G", "Z ": "C",
"Z01": "A", "ZAD": "A", "ZAL": "A", "ZBC": "C", "ZBU": "U",
"ZCL": "F", "ZCY": "C", "ZDU": "U", "ZFB": "X", "ZGU": "G",
"ZHP": "N", "ZTH": "T", "ZU0": "T", "ZZJ": "A"}
|
ajing/SIFTS.py
|
SCOPData.py
|
Python
|
mit
| 17,113
|
[
"Biopython"
] |
edf5c3a8b37d30f67e779ec5310ac8b0eb60c2b8a57155f8a2c4c2af37af0a5b
|
from tool_shed.base.twilltestcase import ShedTwillTestCase, common, os
import logging
log = logging.getLogger( __name__ )
category_name = 'Test 1440 Tool dependency missing env.sh'
category_description = 'Test script 1440 for detection of missing environment settings.'
package_repository_name = 'package_env_sh_1_0_1440'
tool_repository_name = 'filter_1440'
package_repository_description = 'Repository that should result in an env.sh file, but does not.'
tool_repository_description = 'Galaxy filtering tool.'
package_repository_long_description = '%s: %s' % ( package_repository_name, package_repository_description )
tool_repository_long_description = '%s: %s' % ( tool_repository_name, tool_repository_description )
'''
1. Create a tool dependency type repository that reliably fails to install successfully. This repository should define
an action that would have created an env.sh file on success, resulting in an env.sh file that should exist, but is missing.
2. Create a repository that defines a complex repository dependency in the repository created in step 1, with prior_install_required
and set_environment_for_install.
3. Attempt to install the second repository into a galaxy instance, verify that it is installed but missing tool dependencies.
'''
class TestMissingEnvSh( ShedTwillTestCase ):
'''Test installing a repository that should create an env.sh file, but does not.'''
def test_0000_initiate_users_and_category( self ):
"""Create necessary user accounts and login as an admin user."""
self.logout()
self.login( email=common.admin_email, username=common.admin_username )
admin_user = self.test_db_util.get_user( common.admin_email )
assert admin_user is not None, 'Problem retrieving user with email %s from the database' % common.admin_email
admin_user_private_role = self.test_db_util.get_private_role( admin_user )
self.create_category( name=category_name, description=category_description )
self.logout()
self.login( email=common.test_user_2_email, username=common.test_user_2_name )
test_user_2 = self.test_db_util.get_user( common.test_user_2_email )
assert test_user_2 is not None, 'Problem retrieving user with email %s from the database' % common.test_user_2_email
test_user_2_private_role = self.test_db_util.get_private_role( test_user_2 )
self.logout()
self.login( email=common.test_user_1_email, username=common.test_user_1_name )
test_user_1 = self.test_db_util.get_user( common.test_user_1_email )
assert test_user_1 is not None, 'Problem retrieving user with email %s from the database' % common.test_user_1_email
test_user_1_private_role = self.test_db_util.get_private_role( test_user_1 )
def test_0005_create_package_repository( self ):
'''Create and populate package_env_sh_1_0_1440.'''
'''
This is step 1 - Create repository package_env_sh_1_0_1440.
Create and populate a repository that is designed to fail a tool dependency installation. This tool dependency should
also define one or more environment variables.
'''
category = self.test_db_util.get_category_by_name( category_name )
repository = self.get_or_create_repository( name=package_repository_name,
description=package_repository_description,
long_description=package_repository_long_description,
owner=common.test_user_1_name,
category_id=self.security.encode_id( category.id ),
strings_displayed=[] )
# Upload the edited tool dependency definition to the package_lapack_3_4_1440 repository.
self.upload_file( repository,
filename='1440_files/dependency_definition/tool_dependencies.xml',
filepath=None,
valid_tools_only=True,
uncompress_file=False,
remove_repo_files_not_in_tar=False,
commit_message='Populate package_env_sh_1_0_1440 with a broken tool dependency definition.',
strings_displayed=[],
strings_not_displayed=[] )
def test_0010_create_filter_repository( self ):
'''Create and populate filter_1440.'''
'''
This is step 2 - Create a repository that defines a complex repository dependency on the repository created in
step 1, with prior_install_required and set_environment_for_install.
'''
category = self.test_db_util.get_category_by_name( category_name )
repository = self.get_or_create_repository( name=tool_repository_name,
description=tool_repository_description,
long_description=tool_repository_long_description,
owner=common.test_user_1_name,
category_id=self.security.encode_id( category.id ),
strings_displayed=[] )
# Upload the edited tool dependency definition to the package_lapack_3_4_1440 repository.
self.upload_file( repository,
filename='filtering/filtering_2.2.0.tar',
filepath=None,
valid_tools_only=True,
uncompress_file=False,
remove_repo_files_not_in_tar=False,
commit_message='Populate filter_1440 with the filtering tool.',
strings_displayed=[],
strings_not_displayed=[] )
self.upload_file( repository,
filename='1440_files/complex_dependency/tool_dependencies.xml',
filepath=None,
valid_tools_only=True,
uncompress_file=False,
remove_repo_files_not_in_tar=False,
commit_message='Populate filter_1440 with a dependency on package_env_sh_1_0_1440.',
strings_displayed=[],
strings_not_displayed=[] )
def test_0015_install_filter_repository( self ):
'''Install the filter_1440 repository to galaxy.'''
'''
This is step 3 - Attempt to install the second repository into a galaxy instance, verify that it is installed but
missing tool dependencies.
'''
self.galaxy_logout()
self.galaxy_login( email=common.admin_email, username=common.admin_username )
post_submit_strings_displayed = [ 'filter_1440', 'package_env_sh_1_0_1440' ]
self.install_repository( 'filter_1440',
common.test_user_1_name,
category_name,
install_tool_dependencies=True,
post_submit_strings_displayed=post_submit_strings_displayed )
def test_0020_verify_missing_tool_dependency( self ):
'''Verify that the filter_1440 repository is installed and missing tool dependencies.'''
repository = self.test_db_util.get_installed_repository_by_name_owner( 'filter_1440', common.test_user_1_name )
strings_displayed = [ 'Missing tool dependencies' ]
self.display_installed_repository_manage_page( repository, strings_displayed=strings_displayed )
assert len( repository.missing_tool_dependencies ) == 1, 'filter_1440 should have a missing tool dependency, but does not.'
|
mikel-egana-aranguren/SADI-Galaxy-Docker
|
galaxy-dist/test/tool_shed/functional/test_1440_missing_env_sh_files.py
|
Python
|
gpl-3.0
| 7,970
|
[
"Galaxy"
] |
e7b767e6caeabf9df475143d801e36ade5091ab9a3d98d7919f50afc531744a8
|
# Copyright (c) 2013, GlaxoSmithKline Research & Development Ltd.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of GlaxoSmithKline Research & Development Ltd.
# nor the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Created by Jameed Hussain, July 2013
import sys
import re
from optparse import OptionParser
from rdkit import Chem
from rdkit.Chem import AllChem
def smiles_to_smarts(smi):
mol = Chem.MolFromSmiles(smi)
if (mol is None):
sys.stderr.write("Can't generate mol for: %s\n" % (smi))
return None
#change the isotope to 42
for atom in mol.GetAtoms():
atom.SetIsotope(42)
#preint out the smiles - all the atom attributes will be fully specified
smarts = Chem.MolToSmiles(mol, isomericSmiles=True)
#remove the 42 isotope labels
smarts = re.sub(r'\[42', "[", smarts)
#now have a fully specified SMARTS - simples!
return smarts
if __name__ == '__main__':
parser = OptionParser(
description="Program to apply transformations to a set of input molecules",
epilog="Example command: mol_transform.py -t TRANSFORM_FILE <SMILES_FILE\t\t"
"Format of smiles file: SMILES ID <space or comma separated>\t\t\t"
"Format of transform file: transform <one per line>\t\t\t"
"Output: SMILES,ID,Transfrom,Modified_SMILES")
parser.add_option('-f', '--file', action='store', dest='transform_file', type='string',
help='The file containing the transforms to apply to your input SMILES')
(options, args) = parser.parse_args()
#print options.transform_file
if options.transform_file is None:
print("Please specify the transform file.")
sys.exit(1)
smiles = []
#read the STDIN
for line in sys.stdin:
line = line.rstrip()
smi, id = re.split(r'\s|,', line)
#print smiles,id
smiles.append((smi, id))
#read the transform file
#all the transform must come from BioDig to guarantee they have been cansmirk'ed
infile = open(options.transform_file, 'r')
print("Input_SMILES,ID,RG-Transform,RG-transformedSMILES")
for transform in infile:
transform = transform.rstrip()
#need to convert the smiles to smart to get rid of any potential issues
lhs, rhs = transform.split(">>")
if (lhs == "[*:1][H]"):
lhs = "[*;!H0:1]"
else:
lhs = smiles_to_smarts(lhs)
if (rhs == "[*:1][H]"):
rhs = "[*:1]"
else:
rhs = smiles_to_smarts(rhs)
rdkit_transform = "%s>>%s" % (lhs, rhs)
rxn = AllChem.ReactionFromSmarts(rdkit_transform)
#rxn = AllChem.ReactionFromSmarts(transform)
for x in smiles:
mol = Chem.MolFromSmiles(x[0])
ps = rxn.RunReactants([mol])
products = set()
for y in range(len(ps)):
for z in range(len(ps[y])):
p = ps[y][z]
Chem.SanitizeMol(p)
products.add(Chem.MolToSmiles(p, isomericSmiles=True))
for p in products:
print("%s,%s,%s,%s" % (x[0], x[1], transform, p))
|
rdkit/rdkit
|
Contrib/mmpa/mol_transform.py
|
Python
|
bsd-3-clause
| 4,351
|
[
"RDKit"
] |
47d1a82a5c3c4844c2ab149d9a22dc86494096d6a67ac75305c93f1a125715d4
|
#!/usr/bin/env python
""" Create a DIRAC transfer/replicateAndRegister request to be executed
by the DMS Transfer Agent
"""
__RCSID__ = "$Id$"
import os
from hashlib import md5
import time
from DIRAC.Core.Base import Script
from DIRAC.Core.Utilities.List import breakListIntoChunks
Script.setUsageMessage( '\n'.join( [ __doc__.split( '\n' )[0],
__doc__.split( '\n' )[1],
'Usage:',
' %s [option|cfgfile] ... DestSE LFN ...' % Script.scriptName,
'Arguments:',
' DestSE: Destination StorageElement',
' LFN: LFN or file containing a List of LFNs' ] ) )
Script.parseCommandLine( ignoreErrors = False )
monitor = False
args = Script.getPositionalArgs()
if len( args ) < 2:
Script.showHelp()
targetSE = args.pop( 0 )
lfns = []
for inputFileName in args:
if os.path.exists( inputFileName ):
inputFile = open( inputFileName, 'r' )
string = inputFile.read()
inputFile.close()
lfns.extend( [ lfn.strip() for lfn in string.splitlines() ] )
else:
lfns.append( inputFileName )
from DIRAC.Resources.Storage.StorageElement import StorageElement
import DIRAC
# Check is provided SE is OK
se = StorageElement( targetSE )
if not se.valid:
print se.errorReason
print
Script.showHelp()
from DIRAC.RequestManagementSystem.Client.ReqClient import ReqClient
from DIRAC.RequestManagementSystem.Client.Request import Request
from DIRAC.RequestManagementSystem.Client.Operation import Operation
from DIRAC.RequestManagementSystem.Client.File import File
from DIRAC.RequestManagementSystem.private.RequestValidator import gRequestValidator
from DIRAC.Resources.Catalog.FileCatalog import FileCatalog
reqClient = ReqClient()
fc = FileCatalog()
for lfnList in breakListIntoChunks( lfns, 100 ):
oRequest = Request()
oRequest.RequestName = "%s_%s" % ( md5( repr( time.time() ) ).hexdigest()[:16], md5( repr( time.time() ) ).hexdigest()[:16] )
replicateAndRegister = Operation()
replicateAndRegister.Type = 'ReplicateAndRegister'
replicateAndRegister.TargetSE = targetSE
res = fc.getFileMetadata( lfnList )
if not res['OK']:
print "Can't get file metadata: %s" % res['Message']
DIRAC.exit( 1 )
if res['Value']['Failed']:
print "Could not get the file metadata of the following, so skipping them:"
for fFile in res['Value']['Failed']:
print fFile
lfnMetadata = res['Value']['Successful']
for lfn in lfnMetadata:
rarFile = File()
rarFile.LFN = lfn
rarFile.Size = lfnMetadata[lfn]['Size']
rarFile.Checksum = lfnMetadata[lfn]['Checksum']
rarFile.GUID = lfnMetadata[lfn]['GUID']
rarFile.ChecksumType = 'ADLER32'
replicateAndRegister.addFile( rarFile )
oRequest.addOperation( replicateAndRegister )
isValid = gRequestValidator.validate( oRequest )
if not isValid['OK']:
print "Request is not valid: ", isValid['Message']
DIRAC.exit( 1 )
result = reqClient.putRequest( oRequest )
if result['OK']:
print "Request %d submitted successfully" % result['Value']
else:
print "Failed to submit Request: ", result['Message']
|
sposs/DIRAC
|
DataManagementSystem/scripts/dirac-dms-create-replication-request.py
|
Python
|
gpl-3.0
| 3,301
|
[
"DIRAC"
] |
218f3138fa32354accfa325f994eddf185944b5762fa4fe57cb981021381fac1
|
from datetime import datetime
import numpy as np
from numpy.random import randn
import pytest
import pandas.util._test_decorators as td
from pandas import DataFrame, Series, bdate_range, notna
@pytest.fixture(params=[True, False])
def raw(request):
return request.param
@pytest.fixture(
params=[
"triang",
"blackman",
"hamming",
"bartlett",
"bohman",
"blackmanharris",
"nuttall",
"barthann",
]
)
def win_types(request):
return request.param
@pytest.fixture(params=["kaiser", "gaussian", "general_gaussian", "exponential"])
def win_types_special(request):
return request.param
@pytest.fixture(
params=["sum", "mean", "median", "max", "min", "var", "std", "kurt", "skew"]
)
def arithmetic_win_operators(request):
return request.param
@pytest.fixture(params=["right", "left", "both", "neither"])
def closed(request):
return request.param
@pytest.fixture(params=[True, False])
def center(request):
return request.param
@pytest.fixture(params=[None, 1])
def min_periods(request):
return request.param
@pytest.fixture(params=[True, False])
def parallel(request):
"""parallel keyword argument for numba.jit"""
return request.param
@pytest.fixture(params=[True, False])
def nogil(request):
"""nogil keyword argument for numba.jit"""
return request.param
@pytest.fixture(params=[True, False])
def nopython(request):
"""nopython keyword argument for numba.jit"""
return request.param
@pytest.fixture(
params=[pytest.param("numba", marks=td.skip_if_no("numba", "0.46.0")), "cython"]
)
def engine(request):
"""engine keyword argument for rolling.apply"""
return request.param
@pytest.fixture(
params=[
pytest.param(("numba", True), marks=td.skip_if_no("numba", "0.46.0")),
("cython", True),
("cython", False),
]
)
def engine_and_raw(request):
"""engine and raw keyword arguments for rolling.apply"""
return request.param
# create the data only once as we are not setting it
def _create_consistency_data():
def create_series():
return [
Series(dtype=object),
Series([np.nan]),
Series([np.nan, np.nan]),
Series([3.0]),
Series([np.nan, 3.0]),
Series([3.0, np.nan]),
Series([1.0, 3.0]),
Series([2.0, 2.0]),
Series([3.0, 1.0]),
Series(
[5.0, 5.0, 5.0, 5.0, np.nan, np.nan, np.nan, 5.0, 5.0, np.nan, np.nan]
),
Series(
[
np.nan,
5.0,
5.0,
5.0,
np.nan,
np.nan,
np.nan,
5.0,
5.0,
np.nan,
np.nan,
]
),
Series(
[
np.nan,
np.nan,
5.0,
5.0,
np.nan,
np.nan,
np.nan,
5.0,
5.0,
np.nan,
np.nan,
]
),
Series(
[
np.nan,
3.0,
np.nan,
3.0,
4.0,
5.0,
6.0,
np.nan,
np.nan,
7.0,
12.0,
13.0,
14.0,
15.0,
]
),
Series(
[
np.nan,
5.0,
np.nan,
2.0,
4.0,
0.0,
9.0,
np.nan,
np.nan,
3.0,
12.0,
13.0,
14.0,
15.0,
]
),
Series(
[
2.0,
3.0,
np.nan,
3.0,
4.0,
5.0,
6.0,
np.nan,
np.nan,
7.0,
12.0,
13.0,
14.0,
15.0,
]
),
Series(
[
2.0,
5.0,
np.nan,
2.0,
4.0,
0.0,
9.0,
np.nan,
np.nan,
3.0,
12.0,
13.0,
14.0,
15.0,
]
),
Series(range(10)),
Series(range(20, 0, -2)),
]
def create_dataframes():
return [
DataFrame(),
DataFrame(columns=["a"]),
DataFrame(columns=["a", "a"]),
DataFrame(columns=["a", "b"]),
DataFrame(np.arange(10).reshape((5, 2))),
DataFrame(np.arange(25).reshape((5, 5))),
DataFrame(np.arange(25).reshape((5, 5)), columns=["a", "b", 99, "d", "d"]),
] + [DataFrame(s) for s in create_series()]
def is_constant(x):
values = x.values.ravel("K")
return len(set(values[notna(values)])) == 1
def no_nans(x):
return x.notna().all().all()
# data is a tuple(object, is_constant, no_nans)
data = create_series() + create_dataframes()
return [(x, is_constant(x), no_nans(x)) for x in data]
@pytest.fixture(params=_create_consistency_data())
def consistency_data(request):
"""Create consistency data"""
return request.param
def _create_arr():
"""Internal function to mock an array."""
arr = randn(100)
locs = np.arange(20, 40)
arr[locs] = np.NaN
return arr
def _create_rng():
"""Internal function to mock date range."""
rng = bdate_range(datetime(2009, 1, 1), periods=100)
return rng
def _create_series():
"""Internal function to mock Series."""
arr = _create_arr()
series = Series(arr.copy(), index=_create_rng())
return series
def _create_frame():
"""Internal function to mock DataFrame."""
rng = _create_rng()
return DataFrame(randn(100, 10), index=rng, columns=np.arange(10))
@pytest.fixture
def nan_locs():
"""Make a range as loc fixture."""
return np.arange(20, 40)
@pytest.fixture
def arr():
"""Make an array as fixture."""
return _create_arr()
@pytest.fixture
def frame():
"""Make mocked frame as fixture."""
return _create_frame()
@pytest.fixture
def series():
"""Make mocked series as fixture."""
return _create_series()
@pytest.fixture(params=[_create_series(), _create_frame()])
def which(request):
"""Turn parametrized which as fixture for series and frame"""
return request.param
|
TomAugspurger/pandas
|
pandas/tests/window/conftest.py
|
Python
|
bsd-3-clause
| 7,223
|
[
"Gaussian"
] |
760a854b65eed584908bc16570a7d7cd5eb4a646f74f574944aa3a0e9c54e5ce
|
#!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
'''
Start and stop dbus2 servers, consumers
Will handle remote run in the future
bootstrap_relay start/stop
bootstrap_producer start/stop
bootstrap_server start/stop
bootstrap_consumer start/stop, stop_scn, stop_after_secs
profile_relay
profile_consumer
zookeeper start/stop/wait_exist/wait_no_exist/wait_value/cmd
$SCRIPT_DIR/dbus2_driver.py -c zookeeper -o start --zookeeper_server_ports=${zookeeper_server_ports} --cmdline_props="tickTime=2000;initLimit=5;syncLimit=2" --zookeeper_cmds=<semicolon separate list of command> --zookeeper_path= zookeeper_value=
-. start, parse the port, generate the local file path in var/work/zookeeper_data/1, start, port default from 2181, generate log4j file
-. stop, find the process id, id is port - 2181 + 1, will stop all the processes
-. wait, query client and get the status
-. execute the cmd
'''
__version__ = "$Revision: 0.1 $"
__date__ = "$Date: 2010/11/16 $"
import distutils.dir_util
import fcntl
import os
import re
import sys
import threading
import time
from optparse import OptionParser, OptionGroup
import pexpect
from utility import *
# Global varaibles
options=None
server_host="localhost"
server_port="8080"
consumer_host="localhost"
consumer_port=8081
consumer_http_start_port=8081 # may need to be changed?
consumer_jmx_service_start_port=10000 # may need to be changed?
rmi_registry_port="1099"
log_file_pattern="%s_%s_%s_%s.%s.log" # testname, component, oper, time, pid
#stats_cmd_pattern='''jps | grep %%s | awk '{printf "open "$1"\\nbean com.linkedin.databus2:relayId=1408230481,type=OutboundTrafficTotalStats\\nget *"}' | java -jar %s/../lib/jmxterm-1.0-alpha-4-uber.jar -i -n''' % get_this_file_dirname()
stats_cmd_pattern='''jps -J-Xms5M -J-Xmx5M | grep %%s | awk '{printf "open "$1"\\nbean com.linkedin.databus2:relayId=1408230481,type=OutboundTrafficTotalStats\\nget *"}' | java -jar %s/../lib/jmxterm-1.0-alpha-4-uber.jar -i -n''' % get_this_file_dirname()
#config_sub_cmd='''dbus2_config_sub.py''' % get_this_file_dirname()
jmx_cli = None
def zookeeper_opers(oper):
if options.zookeeper_reset: zookeeper_opers_stop()
zookeeper_setup(oper)
globals()["zookeeper_opers_%s" % oper]()
def conf_and_deploy(ant_file):
''' to deploy a service only, substitue the cmd_line ops
explored-war build-app-conf change the conf deploy.only
'''
conf_and_deploy_1(ant_file)
def get_stats(pattern):
''' called to get stats for a process '''
pids = [x for x in sys_pipe_call_1("jps | grep %s" % pattern) if x]
if not pids: my_error("pid for component '%s' ('%s') is not find" % (options.component, pattern))
pid = pids[0].split()[0]
get_stats_1(pid, options.jmx_bean, options.jmx_attr)
def wait_event(func, option=None):
''' called to wait for '''
wait_event_1(func(), option)
def producer_wait_event(name, func):
''' called to wait for '''
producer_wait_event_1(name, func())
def shutdown(oper="normal"):
pid = send_shutdown(server_host, options.http_port or server_port, oper == "force")
dbg_print("shutdown pid = %s" % (pid))
ret = wait_for_condition('not process_exist(%s)' % (pid), 120)
def get_wait_timeout():
if options.timeout: return options.timeout
else: return 10
def pause_resume_consumer(oper):
global consumer_port
if options.component_id: consumer_port=find_open_port(consumer_host, consumer_http_start_port, options.component_id)
url = "http://%s:%s/pauseConsumer/%s" % (consumer_host, consumer_port, oper)
out = send_url(url).split("\n")[1]
dbg_print("out = %s" % out)
time.sleep(0.1)
def get_bootstrap_db_conn_info():
return ("bootstrap", "bootstrap", "bootstrap")
lock_tab_sql_file = tempfile.mkstemp()[1]
def producer_lock_tab(oper):
dbname, user, passwd = get_bootstrap_db_conn_info()
if oper == "lock" or oper == "save_file":
qry = '''
drop table if exists lock_stat_tab_1;
CREATE TABLE lock_stat_tab_1 (session_id int) ENGINE=InnoDB;
drop procedure if exists my_session_wait;
delimiter $$
create procedure my_session_wait()
begin
declare tmp int;
LOOP
select sleep(3600) into tmp;
END LOOP;
end$$
delimiter ;
set @cid = connection_id();
insert into lock_stat_tab_1 values (@cid);
commit;
lock table tab_1 read local;
call my_session_wait();
unlock tables;
'''
if oper == "save_file": open(lock_tab_sql_file, "w").write(qry)
else:
ret = mysql_exec_sql(qry, dbname, user, passwd)
print ret
#ret = cmd_call(cmd, options.timeout, "ERROR 2013", get_outf())
else:
ret = mysql_exec_sql_one_row("select session_id from lock_stat_tab_1", dbname, user, passwd)
dbg_print(" ret = %s" % ret)
if not ret: my_error("No lock yet")
session_id = ret[0]
qry = "kill %s" % session_id
ret = mysql_exec_sql(qry, dbname, user, passwd)
def producer_purge_log():
''' this one is deprecated. Use the cleaner instead '''
dbname, user, passwd = get_bootstrap_db_conn_info()
ret = mysql_exec_sql("select id from bootstrap_sources", dbname, user, passwd, None, True)
for srcid in [x[0] for x in ret]: # for each source
dbg_print("srcid = %s" % srcid)
applied_logid = mysql_exec_sql_one_row("select logid from bootstrap_applier_state", dbname, user, passwd)[0]
qry = "select logid from bootstrap_loginfo where srcid=%s and logid<%s order by logid limit %s" % (srcid, applied_logid, options.producer_log_purge_limit)
ret = mysql_exec_sql(qry, dbname, user, passwd, None, True)
logids_to_purge = [x[0] for x in ret]
qry = ""
for logid in logids_to_purge: qry += "drop table if exists log_%s_%s;" % (srcid, logid)
mysql_exec_sql(qry, dbname, user, passwd)
dbg_print("logids_to_purge = %s" % logids_to_purge)
mysql_exec_sql("delete from bootstrap_loginfo where srcid=%s and logid in (%s); commit" % (srcid, ",".join(logids_to_purge)), dbname, user, passwd)
# load the command dictionary
parser = OptionParser(usage="usage: %prog [options]")
execfile(os.path.join(get_this_file_dirname(),"driver_cmd_dict.py"))
allowed_opers=[]
for cmd in cmd_dict: allowed_opers.extend(cmd_dict[cmd].keys())
allowed_opers=[x for x in list(set(allowed_opers)) if x!="default"]
ct=None # global variale of the cmd thread, use to access subprocess
def is_starting_component():
return options.operation != "default" and "%s_%s" % (options.component, options.operation) in cmd_ret_pattern
# need to check pid to determine if process is dead
# Thread and objects
class cmd_thread(threading.Thread):
''' execute one cmd in parallel, check output. there should be a timer. '''
def __init__ (self, cmd, ret_pattern=None, outf=None):
threading.Thread.__init__(self)
self.daemon=True # make it daemon, does not matter if use sys.exit()
self.cmd = cmd
self.ret_pattern = ret_pattern
self.outf = sys.stdout
if outf: self.outf = outf
self.thread_wait_end=False
self.thread_ret_ok=False
self.subp=None
self.ok_to_run=True
def run(self):
self.subp = subprocess_call_1(self.cmd)
if not self.subp:
self.thread_wait_end=True
return
# capture java call here
if options.capture_java_call: cmd_call_capture_java_call() # test only remote
# print the pid
if is_starting_component():
java_pid_str = "## java process pid = %s\n## hostname = %s\n" % (find_java_pid(self.subp.pid), host_name_global)
if java_pid_str: open(options.logfile,"a").write(java_pid_str)
self.outf.write(java_pid_str)
# no block
fd = self.subp.stdout.fileno()
fl = fcntl.fcntl(fd, fcntl.F_GETFL)
fcntl.fcntl(fd, fcntl.F_SETFL, fl | os.O_NONBLOCK)
while (self.ok_to_run): # for timeout case, must terminate the thread, need non block read
try: line = self.subp.stdout.readline()
except IOError, e:
time.sleep(0.1)
#dbg_print("IOError %s" % e)
continue
dbg_print("line = %s" % line)
if not line: break
self.outf.write("%s" % line)
if self.ret_pattern and self.ret_pattern.search(line):
self.thread_ret_ok=True
break
if not self.ret_pattern: self.thread_ret_ok=True # no pattern ok
self.thread_wait_end=True
# has pattern but not find, then not ok
#while (1): # read the rest and close the pipe
# try: line = self.subp.stdout.readline()
# except IOError, e:
# break
self.subp.stdout.close()
# close all the file descriptors
#os.close(1) # stdin
#os.close(2) # stdout
#os.close(3) # stderr
dbg_print("end of thread run")
def cmd_call_capture_java_call():
''' this one depends on the ivy path and ps length. may not work for all '''
if options.capture_java_call!="auto":
short_class_name=options.capture_java_call
else:
short_class_name=cmd_dict[options.component]["stop"].split("grep ")[-1].split(" ")[0]
ret = wait_for_condition('sys_pipe_call("ps -ef | grep java | grep -v grep | grep %s")' % short_class_name, 20)
java_ps_call = sys_pipe_call('ps -ef | grep "/java -d64" | grep -v grep | grep -v capture_java_call| grep %s' % short_class_name)
#java_ps_call = tmp_str
ivy_dir=get_ivy_dir() # espresso has different ivy
dbg_print("ivy_dir = %s, java_ps_call=%s" % (ivy_dir,java_ps_call))
view_root=get_view_root()
class_path_list = []
#pdb.set_trace()
for jar_path in java_ps_call.split("-classpath ")[-1].split(" com.linkedin")[0].split(":"): # classpath
if not jar_path: continue
if not re.search("(%s|%s)" % (ivy_dir,view_root),jar_path):
class_path_list.append(jar_path)
continue
if re.search(ivy_dir,jar_path):
sub_dir= ivy_dir
sub_str = "IVY_DIR"
if re.search(view_root,jar_path):
sub_dir= view_root
sub_str = "VIEW_ROOT"
class_path_list.append('\"%s\"' % re.sub(sub_dir,sub_str,jar_path))
class_path_list.sort()
class_path = "[\n %s\n]" % "\n ,".join(class_path_list)
class_name = java_ps_call.split(short_class_name)[0].split(" ")[-1] + short_class_name
#cmd_direct_call={
print '''
,"%s":
{
"class_path":%s
,"class_name":"%s"
}
''' % (options.component, class_path, class_name)
#}
#dbg_print("class_path = %s, class_name = %s" % (class_path, class_name))
#sys.exit(0)
def cmd_call(cmd, timeout, ret_pattern=None, outf=None):
''' return False if timed out. timeout is in secs '''
#if options.capture_java_call: cmd_call_capture_java_call() # test only remote
if options.operation=="stop" and options.component_id:
process_info = get_process_info()
key=get_process_info_key(options.component, options.component_id)
if key in process_info:
kill_cmd="kill -9"
if "stop" in cmd_dict[options.component]:
kill_cmd = cmd_dict[options.component]["stop"]
m = re.search("^.*(kill.*)\s*$",kill_cmd)
if m: kill_cmd = m.group(1)
sys_call("%s %s" % (kill_cmd, process_info[key]["pid"]))
return RetCode.OK
global ct
ct = cmd_thread(cmd, ret_pattern, outf)
ct.start()
sleep_cnt = 0
sleep_interval = 0.5
ret = RetCode.TIMEOUT
while (sleep_cnt * sleep_interval < timeout):
if ct.thread_wait_end or (ct.subp and not process_exist(ct.subp.pid)):
print "end"
if ct.thread_ret_ok: ret = RetCode.OK # include find pattern or no pattern given
else: ret= RetCode.ERROR
if options.save_process_id:
id = options.component_id and options.component_id or 0
save_process_info(options.component, str(id), None, options.logfile) # no port of cm
#if options.capture_java_call: cmd_call_capture_java_call()
break # done
time.sleep(sleep_interval)
sleep_cnt += 1
while (not ct.thread_wait_end):
ct.ok_to_run = False # terminate the thread in timeout case
time.sleep(0.1)
return ret
remote_component=None
remote_cmd_template='''ssh %s "bash -c 'source /export/home/eng/dzhang/bin/jdk6_env; cd %s; %s'"'''
def run_cmd_remote_setup():
print "!!! REMOTE RUN ENABLED !!!"
global remote_component
component_cnt = 0
# find the one in the cfg file, so multiple consumers must be in sequence
for section in remote_run_config:
if re.search(options.component, section):
remote_component=section
component_cnt +=1
if not options.component_id or compnent_cnt == options.component_id: break
if not remote_component: my_error("No section for component %s, id %s" % (options.component, options.component_id))
remote_component_properties = remote_run_config[remote_component]
set_remote_view_root(remote_component_properties["view_root"])
# create the remote var/work dir, may not be needed as the current view have them
#sys_call("ssh %s mkdir -p %s %s" % remote_run_config[remote_component]["host"], get_remote_work_dir(), get_remote_var_dir()
def run_cmd_remote(cmd):
ret = remote_cmd_template % (remote_run_config[remote_component]["host"], get_remote_view_root(), cmd)
return ret
run_cmd_added_options=[]
def run_cmd_add_option(cmd, option_name, value=None, check_exist=False):
global direct_java_call_jvm_args
dbg_print("option_name = %s, value = %s" % (option_name, value))
#option_name = option_name.split(".")[-1] # get rid of the options., which is for readability only
if option_name not in dir(options): my_error("invalid option name %s" % option_name)
global run_cmd_added_options
run_cmd_added_options.append(option_name)
if not getattr(options, option_name): return cmd # not such option
if not value: value = getattr(options,option_name)
dbg_print("after option_name = %s, value = %s" % (option_name, value))
#pdb.set_trace()
if check_exist:
full_path = file_exists(value)
if not full_path: my_error("File does not exists! %s" % value)
value=full_path
is_jvm_option = re.search("jvm_",option_name)
if isinstance(value, str) and value[0]!='"' and not (option_name in ["cmdline_args"] or is_jvm_option) and options.enable_direct_java_call: # do not quote the cmdline args
#value = value.replace(' ','\\ ') # escape the white space
value = '"%s"' % value # quote it
if options.enable_direct_java_call:
option_mapping = direct_java_call_option_mapping
option_prefix = ""
option_assign = ""
if is_jvm_option or option_name in direct_java_call_jvm_args: # must start with jvm
#pdb.set_trace()
direct_java_call_jvm_args[option_name][1]=value # overide the default value
dbg_print("direct_java_call_jvm_args[%s]=%s" % (option_name,direct_java_call_jvm_args[option_name]))
return cmd
else:
option_mapping = ant_call_option_mapping
option_prefix = "-D"
option_assign = "="
option_mapping_name = option_name # default same as the option name
if option_name in option_mapping: option_mapping_name = option_mapping[option_name]
option_str = option_prefix + option_mapping_name + option_assign + value
dbg_print("option_str = %s" % (option_str))
if not option_str: return cmd
cmd_split=cmd.split()
if options.enable_direct_java_call: # add option to the end
cmd += " %s" % option_str
else:
cmd_split.insert(len(cmd_split)-1,option_str) # here it handles insert before the last one
cmd = " ".join(cmd_split)
dbg_print("cmd = %s" % cmd)
return cmd
def run_cmd_add_log_file(cmd):
global options
if options.logfile: log_file = options.logfile
else: log_file= log_file_pattern % (options.testname, options.component, options.operation, time.strftime('%y%m%d_%H%M%S'), os.getpid())
#log_file = os.path.join(remote_run and get_remote_log_dir() or get_log_dir(), log_file)
# TODO: maybe we want to put the logs in the remote host
log_file = os.path.join(get_log_dir(), log_file)
dbg_print("log_file = %s" % log_file)
options.logfile = log_file
open(log_file,"w").write("TEST_NAME=%s\n" % options.testname)
# logging for all the command
cmd += " 2>&1 | tee -a %s" % log_file
return cmd
def run_cmd_get_return_pattern():
ret_pattern = None
pattern_key = "%s_%s" % (options.component, options.operation)
if pattern_key in cmd_ret_pattern: ret_pattern = cmd_ret_pattern[pattern_key]
if options.wait_pattern: ret_pattern = re.compile(options.wait_pattern)
dbg_print("ret_pattern = %s" % ret_pattern)
return ret_pattern
def run_cmd_setup():
if re.search("_consumer",options.component):
global consumer_host
if remote_run: consumer_host = remote_component_properties["host"]
else: consumer_host = "localhost"
dbg_print("consumer_host= %s" % consumer_host)
# need to remove from ant_call_option_mapping and run_cmd_add_option to avoid invalid option name
def run_cmd_add_config(cmd):
if options.operation in ["start","clean_log","default"]:
if options.enable_direct_java_call:
pass_down_options=direct_java_call_option_mapping.keys()
pass_down_options.extend(direct_java_call_jvm_args.keys())
#pass_down_options.extend(direct_java_call_jvm_args_ordered)
else:
pass_down_options=ant_call_option_mapping.keys()
#option_mapping = options.enable_direct_java_call and direct_java_call_option_mapping or ant_call_option_mapping
#if options.enable_direct_java_call: pass_down_options.append("jvm_args")
if options.config:
if not remote_run:
cmd = run_cmd_add_option(cmd, "config", options.config, check_exist=True) # check exist will figure out
else:
cmd = run_cmd_add_option(cmd, "config", os.path.join(get_remote_view_root(), options.config), check_exist=False)
run_cmd_view_root = remote_run and get_remote_view_root() or get_view_root()
#cmd = run_cmd_add_option(cmd, "dump_file", options.dump_file and os.path.join(run_cmd_view_root, options.dump_file) or None)
#cmd = run_cmd_add_option(cmd, "value_file", options.value_file and os.path.join(run_cmd_view_root, options.value_file) or None)
#cmd = run_cmd_add_option(cmd, "log4j_file", options.log4j_file and os.path.join(run_cmd_view_root, options.log4j_file) or None)
#cmd = run_cmd_add_option(cmd, "jvm_direct_memory_size")
#cmd = run_cmd_add_option(cmd, "jvm_max_heap_size")
#cmd = run_cmd_add_option(cmd, "jvm_gc_log")
#cmd = run_cmd_add_option(cmd, "jvm_args")
#cmd = run_cmd_add_option(cmd, "db_config_file")
#cmd = run_cmd_add_option(cmd, "cmdline_props")
# cmd = run_cmd_add_option(cmd, "filter_conf_file")
if options.checkpoint_dir:
if options.checkpoint_dir == "auto":
checkpoint_dir = os.path.join(get_work_dir(), "databus2_checkpoint_%s_%s" % time.strftime('%y%m%d_%H%M%S'), os.getpid())
else:
checkpoint_dir = options.checkpoint_dir
checkpoint_dir = os.path.join(run_cmd_view_root(), checkpoint_dir)
cmd = run_cmd_add_option(cmd, "checkpoint_dir", checkpoint_dir)
# clear up the directory
if not options.checkpoint_keep and os.path.exists(checkpoint_dir): distutils.dir_util.remove_tree(checkpoint_dir)
# options can be changed during remote run
if remote_run:
remote_component_properties = remote_run_config[remote_component]
if not options.relay_host and "relay_host" in remote_component_properties: options.relay_host = remote_component_properties["relay_host"]
if not options.relay_port and "relay_port" in remote_component_properties: options.relay_port = remote_component_properties["relay_port"]
if not options.bootstrap_host and "bootstrap_host" in remote_component_properties: options.bootstrap_host = remote_component_properties["bootstrap_host"]
if not options.bootstrap_port and "bootstrap_port" in remote_component_properties: options.bootstrap_port = remote_component_properties["bootstrap_port"]
#cmd = run_cmd_add_option(cmd, "relay_host")
#cmd = run_cmd_add_option(cmd, "relay_port")
#cmd = run_cmd_add_option(cmd, "bootstrap_host")
#cmd = run_cmd_add_option(cmd, "bootstrap_port")
#cmd = run_cmd_add_option(cmd, "consumer_event_pattern")
if re.search("_consumer",options.component):
# next available port
if options.http_port: http_port = options.http_port
else: http_port = next_available_port(consumer_host, consumer_http_start_port)
#cmd = run_cmd_add_option(cmd, "http_port", http_port)
#cmd = run_cmd_add_option(cmd, "jmx_service_port", next_available_port(consumer_host, consumer_jmx_service_start_port))
# this will take care of the passdown, no need for run_cmd_add_directly
for option in [x for x in pass_down_options if x not in run_cmd_added_options]:
cmd = run_cmd_add_option(cmd, option)
if options.component=="espresso-relay": cmd+= " -d " # temp hack. TODO: remove
if options.enable_direct_java_call:
#cmd = re.sub("java -classpath","java -d64 -ea %s -classpath" % " ".join([x[0]+x[1] for x in [direct_java_call_jvm_args[y] for y in direct_java_call_jvm_args_ordered] if x[1]]) ,cmd) # d64 here
cmd = re.sub("java -classpath","java -d64 -ea %s -classpath" % " ".join([x[0]+x[1] for x in direct_java_call_jvm_args.values() if x[1]]) ,cmd) # d64 here
dbg_print("cmd = %s" % cmd)
return cmd
def run_cmd_add_ant_debug(cmd):
if re.search("^ant", cmd): cmd = re.sub("^ant","ant -d", cmd)
dbg_print("cmd = %s" % cmd)
return cmd
def run_cmd_save_cmd(cmd):
if not options.logfile: return
re_suffix = re.compile("\.\w+$")
if re_suffix.search(options.logfile): command_file = re_suffix.sub(".sh", options.logfile)
else: command_file = "%s.sh" % options.logfile
dbg_print("command_file = %s" % command_file)
open(command_file,"w").write("%s\n" % cmd)
def run_cmd_restart(cmd):
''' restart using a previous .sh file '''
if not options.logfile: return cmd
previous_run_sh_pattern = "%s_*.sh" % "_".join(options.logfile.split("_")[:-3])
import glob
previous_run_sh = glob.glob(previous_run_sh_pattern)
my_warning("No previous run files. Cannot restart. Start with new options.")
if not previous_run_sh: return cmd
previous_run_sh.sort()
run_sh = previous_run_sh[-1]
print "Use previous run file %s" % run_sh
lines = open(run_sh).readlines()
cmd = lines[0].split("2>&1")[0]
return cmd
def run_cmd_direct_java_call(cmd, component):
''' this needs to be consistent with adding option
currently ant -f ; will mess up if there are options
'''
if not component in cmd_direct_call:
options.enable_direct_java_call = False # disable direct java call if classpath not given
return cmd
#if re.search("^ant", cmd): # only component in has class path given will be
#if True: # every thing
if re.search("ant ", cmd): # only component in has class path given will be
ivy_dir = get_ivy_dir()
view_root = get_view_root()
class_path_list=[]
for class_path in cmd_direct_call[component]["class_path"]:
if re.search("IVY_DIR",class_path):
class_path_list.append(re.sub("IVY_DIR", ivy_dir,class_path))
continue
if re.search("VIEW_ROOT",class_path):
class_path_list.append(re.sub("VIEW_ROOT", view_root,class_path))
if not os.path.exists(class_path_list[-1]): # some jars not in VIEW_ROOT, trigger before command
if "before_cmd" in cmd_direct_call[component]:
before_cmd = "%s; " % cmd_direct_call[component]["before_cmd"]
sys_call(before_cmd)
continue
class_path_list.append(class_path)
if options.check_class_path:
for jar_file in class_path_list:
if not os.path.exists(jar_file):
print "==WARNING NOT EXISTS: " + jar_file
new_jar_path = sys_pipe_call("find %s -name %s" % (ivy_dir, os.path.basename(jar_file))).split("\n")[0]
if new_jar_path:
print "==found " + new_jar_path
class_path_list[class_path_list.index(jar_file)] = new_jar_path
direct_call_cmd = "java -classpath %s %s" % (":".join(class_path_list), cmd_direct_call[component]["class_name"])
if re.search("ant .*;",cmd): cmd = re.sub("ant .*;","%s" % direct_call_cmd, cmd)
else: cmd = re.sub("ant .*$",direct_call_cmd, cmd)
dbg_print("cmd = %s" % cmd)
return cmd
def run_cmd():
if (options.component=="bootstrap_dbreset"): setup_rmi("stop")
if (not options.operation): options.operation="default"
if (not options.testname):
options.testname = "TEST_NAME" in os.environ and os.environ["TEST_NAME"] or "default"
if (options.operation not in cmd_dict[options.component]):
my_error("%s is not one of the command for %s. Valid values are %s " % (options.operation, options.component, cmd_dict[options.component].keys()))
# handle the different connetion string for hudson
if (options.component=="db_relay" and options.db_config_file):
options.db_config_file = db_config_change(options.db_config_file)
if (options.component=="test_bootstrap_producer" and options.operation=="lock_tab"):
producer_lock_tab("save_file")
cmd = cmd_dict[options.component][options.operation]
# cmd can be a funciton call
if isinstance(cmd, list):
if not callable(cmd[0]): my_error("First element should be function")
cmd[0](*tuple(cmd[1:])) # call the function
return
if options.enable_direct_java_call: cmd = run_cmd_direct_java_call(cmd, options.component)
if remote_run: run_cmd_remote_setup()
if options.ant_debug: cmd = run_cmd_add_ant_debug(cmd) # need ant debug call or not
cmd = run_cmd_add_config(cmd) # handle config file
if remote_run: cmd = run_cmd_remote(cmd)
ret_pattern = run_cmd_get_return_pattern()
if options.restart: cmd = run_cmd_restart(cmd)
cmd = run_cmd_add_log_file(cmd)
if is_starting_component(): run_cmd_save_cmd(cmd)
ret = cmd_call(cmd, options.timeout, ret_pattern, get_outf())
if options.operation == "stop": time.sleep(0.1)
return ret
def setup_rmi_cond(oper):
rmi_up = isOpen(server_host, rmi_registry_port)
dbg_print("rmi_up = %s" % rmi_up)
if oper=="start": return rmi_up
if oper=="stop": return not rmi_up
def setup_rmi(oper="start"):
''' start rmi registry if not alreay started '''
ret = RetCode.OK
dbg_print("oper = %s" % oper)
rmi_up = isOpen(server_host, rmi_registry_port)
rmi_str = "ant -f sitetools/rmiscripts/build.xml; ./rmiservers/bin/rmiregistry%s" % oper
if oper=="stop": sys_call(kill_cmd_template % "RegistryImpl") # make sure it stops
if (oper=="start" and not rmi_up) or (oper=="stop" and rmi_up):
sys_call(rmi_str)
# wait for rmi
ret = wait_for_condition('setup_rmi_cond("%s")' % oper)
def setup_env():
#setup_rmi()
pass
def get_outf():
outf = sys.stdout
if options.output: outf = open(options.output,"w")
return outf
def start_jmx_cli():
global jmx_cli
if not jmx_cli:
jmx_cli = pexpect.spawn("java -jar %s/../lib/jmxterm-1.0-alpha-4-uber.jar" % get_this_file_dirname())
jmx_cli.expect("\$>")
def stop_jmx_cli():
global jmx_cli
if jmx_cli:
jmx_cli.sendline("quit")
jmx_cli.expect(pexpect.EOF)
jmx_cli = None
def jmx_cli_cmd(cmd):
if not jmx_cli: start_jmx_cli()
dbg_print("jmx cmd = %s" % cmd)
jmx_cli.sendline(cmd)
jmx_cli.expect("\$>")
ret = jmx_cli.before.split("\r\n")[1:]
dbg_print("jmx cmd ret = %s" % ret)
return ret
def get_stats_1(pid, jmx_bean, jmx_attr):
outf = get_outf()
start_jmx_cli()
jmx_cli_cmd("open %s" % pid)
ret = jmx_cli_cmd("beans")
if jmx_bean=="list":
stat_re = re.compile("^com.linkedin.databus2:")
stats = [x for x in ret if stat_re.search(x)]
outf.write("%s\n" % "\n".join(stats))
return
stat_re = re.compile("^com.linkedin.databus2:.*%s$" % jmx_bean)
stats = [x for x in ret if stat_re.search(x)]
if not stats: # stats not find
stat_re = re.compile("^com.linkedin.databus2:")
stats = [x.split("=")[-1].rstrip() for x in ret if stat_re.search(x)]
my_error("Possible beans are %s" % stats)
full_jmx_bean = stats[0]
jmx_cli_cmd("bean %s" % full_jmx_bean)
if jmx_attr == "all": jmx_attr = "*"
ret = jmx_cli_cmd("get %s" % jmx_attr)
outf.write("%s\n" % "\n".join(ret))
stop_jmx_cli()
def run_testcase(testcase):
dbg_print("testcase = %s" % testcase)
os.chdir(get_testcase_dir())
if not re.search("\.test$", testcase): testcase += ".test"
if not os.path.exists(testcase):
my_error("Test case %s does not exist" % testcase)
dbg_print("testcase = %s" % testcase)
ret = sys_call("/bin/bash %s" % testcase)
os.chdir(view_root)
return ret
def get_ebuf_inbound_total_maxStreamWinScn(host, port, option=None):
url_template = "http://%s:%s/containerStats/inbound/events/total"
if option == "bootstrap":
url_template = "http://%s:%s/clientStats/bootstrap/events/total"
return http_get_field(url_template, host, port, "maxSeenWinScn")
def consumer_reach_maxStreamWinScn(maxWinScn, host, port, option=None):
consumerMaxWinScn = get_ebuf_inbound_total_maxStreamWinScn(host, port, option)
dbg_print("consumerMaxWinScn = %s, maxWinScn = %s" % (consumerMaxWinScn, maxWinScn))
return consumerMaxWinScn >= maxWinScn
def producer_reach_maxStreamWinScn(name, maxWinScn):
''' select max of all the sources '''
dbname, user, passwd = get_bootstrap_db_conn_info()
tab_name = (name == "producer") and "bootstrap_producer_state" or "bootstrap_applier_state"
qry = "select max(windowscn) from %s " % tab_name
ret = mysql_exec_sql_one_row(qry, dbname, user, passwd)
producerMaxWinScn = ret and ret[0] or 0 # 0 if no rows
dbg_print("producerMaxWinScn = %s, maxWinScn = %s" % (producerMaxWinScn, maxWinScn))
return producerMaxWinScn >= maxWinScn
def wait_for_condition(cond, timeout=60, sleep_interval = 0.1):
''' wait for a certain cond. cond could be a function.
This cannot be in utility. Because it needs to see the cond function '''
dbg_print("cond = %s" % cond)
sleep_cnt = 0
ret = RetCode.TIMEOUT
while (sleep_cnt * sleep_interval < timeout):
if eval(cond):
ret = RetCode.OK
break
time.sleep(sleep_interval)
sleep_cnt += 1
return ret
def producer_wait_event_1(name, timeout):
''' options.relay_host should be set for remote_run '''
relay_host = options.relay_host and options.relay_host or server_host
relay_port = options.relay_port and options.relay_port or server_port
if options.sleep_before_wait: time.sleep(options.sleep_before_wait)
maxWinScn = get_ebuf_inbound_total_maxStreamWinScn(relay_host, relay_port)
dbg_print("maxWinScn = %s, timeout = %s" % (maxWinScn, timeout))
ret = wait_for_condition('producer_reach_maxStreamWinScn("%s", %s)' % (name,maxWinScn), timeout)
if ret == RetCode.TIMEOUT: print "Timed out waiting consumer to reach maxWinScn %s" % maxWinScn
return ret
def send_shutdown(host, port, force=False):
''' use kill which is much faster '''
#url_template = "http://%s:%s/operation/shutdown"
url_template = "http://%s:%s/operation/getpid"
pid = http_get_field(url_template, host, port, "pid")
force_str = force and "-9" or ""
sys_call("kill %s %s" % (force_str,pid))
return pid
def wait_event_1(timeout, option=None):
relay_host = options.relay_host and options.relay_host or server_host
relay_port = options.relay_port and options.relay_port or server_port
maxWinScn = get_ebuf_inbound_total_maxStreamWinScn(relay_host, relay_port)
print "Wait maxWinScn:%s" % maxWinScn
dbg_print("maxWinScn = %s, timeout = %s" % (maxWinScn, timeout))
# consumer host is defined already
global consumer_port
if options.component_id: consumer_port=find_open_port(consumer_host, consumer_http_start_port, options.component_id)
if options.http_port: consumer_port = options.http_port
ret = wait_for_condition('consumer_reach_maxStreamWinScn(%s, "%s", %s, "%s")' % (maxWinScn, consumer_host, consumer_port, option and option or ""), timeout)
if ret == RetCode.TIMEOUT: print "Timed out waiting consumer to reach maxWinScn %s" % maxWinScn
if options.sleep_after_wait: time.sleep(options.sleep_after_wait)
return ret
def conf_and_deploy_1_find_dir_name(ant_target, screen_out):
found_target = False
copy_file_re = re.compile("\[copy\] Copying 1 file to (.*)")
for line in screen_out:
if not found_target and line == ant_target: found_target = True
if found_target:
dbg_print("line = %s" % line)
m = copy_file_re.search(line)
if m: return m.group(1)
return None
def conf_and_deploy_1_find_extservice(dir_name):
extservice_re = re.compile("extservices.*\.springconfig")
flist = os.listdir(dir_name)
flist.sort(reverse=True)
for fname in flist:
if extservice_re.search(fname): return os.path.join(dir_name, fname)
return None
def conf_and_deploy_1_find_extservice_name(ant_target, screen_out):
found_target = False
copy_file_re = re.compile("\[copy\] Copying (\S*) to ")
for line in screen_out:
if not found_target and line == ant_target: found_target = True
if found_target:
dbg_print("line = %s" % line)
m = copy_file_re.search(line)
if m: return m.group(1)
return None
from xml.dom.minidom import parse
from xml.dom.minidom import Element
def conf_and_deploy_1_add_conf(file_name):
dom1 = parse(file_name)
map_element=[x for x in dom1.getElementsByTagName("map")][0]
for prop in options.extservice_props:
#props = prop.split(";")
props = prop.split("=")
len_props = len(props)
if len_props not in (2,3):
print "WARNING: prop %s is not a valid setting. IGNORED" % prop
continue
is_top_level= (len_props == 2)
find_keys=[x for x in dom1.getElementsByTagName("entry") if x.attributes["key"].value == props[0]]
dbg_print("find_keys = %s" % find_keys)
if not find_keys:
print "WARNING: prop %s part %s is not in file %s. " % (prop, props[0], file_name)
if is_top_level: # only add when is top level
print "WARNING: prop %s part %s is added to file %s. " % (prop, props[0], file_name)
new_entry=Element("entry")
new_entry.setAttribute("key", props[0])
new_entry.setAttribute("value", props[1])
map_element.appendChild(new_entry)
continue
keyNode = find_keys[0]
if is_top_level:
keyNode.attributes["value"].value=props[-1]
continue
find_props= [x for x in keyNode.getElementsByTagName("prop") if x.attributes["key"].value == props[1]]
dbg_print("find_props = %s" % find_props)
if not find_props:
print "WARNING: prop %s part %s is not in file %s. IGNORED" % (prop, props[1], file_name)
continue
find_props[0].childNodes[0].nodeValue=props[-1]
open(file_name,"w").write(dom1.toxml())
def conf_and_deploy_1(ant_file):
''' to deploy a service only, do exploded-war first,
then build-app-conf substitute the extservice_props into the extservice file
the deploy.only.noconf to deploy the service using the new conf
'''
#pdb.set_trace()
#out = sys_pipe_call("ant -f %s build-app-conf" % (ant_file))
#dir_name = conf_and_deploy_1_find_dir_name("build-app-conf:", out.split("\n"))
tmp_file = tempfile.mkstemp()[1]
cmd = "ant -f %s exploded-war 2>&1 | tee %s" % (ant_file, tmp_file)
ret = cmd_call(cmd, 60, re.compile("BUILD SUCCESSFUL"))
cmd = "ant -f %s build-app-conf 2>&1 | tee %s" % (ant_file, tmp_file)
ret = cmd_call(cmd, 5, re.compile("BUILD SUCCESSFUL"))
dir_name = conf_and_deploy_1_find_dir_name("build-app-conf:", [x.rstrip() for x in open(tmp_file).readlines()])
dbg_print("dir_name = %s" % dir_name)
if dir_name: extservice_file_name = conf_and_deploy_1_find_extservice(dir_name)
if not dir_name or not extservice_file_name: my_error("No extservice file in dir %s" % dir_name)
#out = sys_pipe_call("ant -f %s -d build-app-conf" % (ant_file))
#extservice_file_name = conf_and_deploy_1_find_extservice_name("build-app-conf:", out.split("\n"))
dbg_print("extservice_file_name = %s" % extservice_file_name)
if options.extservice_props:
tmp_files = [extservice_file_name]
tmp_files = save_copy([extservice_file_name])
dbg_print("new_files = %s" % tmp_files)
conf_and_deploy_1_add_conf(extservice_file_name)
#shutil.copy(tmp_files[0], extservice_file_name)
# do the deploy
#pdb.set_trace()
cmd = "ant -f %s deploy.only.noconf 2>&1 | tee %s" % (ant_file, tmp_file)
ret = cmd_call(cmd, 60, re.compile("BUILD SUCCESSFUL"))
zookeeper_cmd=None
zookeeper_server_ports=None
zookeeper_server_dir=None
zookeeper_server_ids=None
#possible_ivy_dir=[os.path.join(os.environ["HOME"],".ivy2/lin-cache/ivy-cache"),os.path.join(os.environ["HOME"],".ivy2/lin-cache"),"/ivy/.ivy2/ivy-cache","/ivy/.ivy2"]
#possible_ivy_dir=[os.path.join(os.environ["HOME"],".m2/repository"), os.path.join(os.environ["HOME"],".ivy2/lin-cache/"),"/ivy/.ivy2"]
def get_ivy_dir():
for ivy_dir in possible_ivy_dir:
if os.path.exists(ivy_dir): break
if not os.path.exists(ivy_dir): raise
return ivy_dir
def zookeeper_setup(oper):
''' may need to do a find later. find $HOME/.ivy2/lin-cache -name zookeeper-3.3.0.jar '''
global zookeeper_cmd, zookeeper_server_ports, zookeeper_server_dir, zookeeper_server_ids, zookeeper_classpath
#possible_ivy_home_dir=[os.path.join(os.environ["HOME"],".ivy2/lin-cache/"),"/ivy/.ivy2"]
possible_ivy_home_dir=[os.path.join(os.environ["HOME"],".m2/repository/"), os.path.join(os.environ["HOME"],".ivy2/lin-cache/"),"/ivy/.ivy2"]
ivy_dir = get_ivy_dir()
zookeeper_class= (oper=="start") and "org.apache.zookeeper.server.quorum.QuorumPeerMain" or "org.apache.zookeeper.ZooKeeperMain"
log4j_file=os.path.join(get_view_root(),"integration-test/config/zookeeper-log4j2file.properties")
dbg_print("zookeeper_classpath = %s" % zookeeper_classpath)
if not "zookeeper_classpath" in globals():
zookeeper_classpath="IVY_DIR/org/apache/zookeeper/zookeeper/3.3.0/zookeeper-3.3.0.jar:IVY_DIR/log4j/log4j/2.17.1/log4j-2.17.1.jar"
if re.search("IVY_DIR",zookeeper_classpath): zookeeper_classpath=re.sub("IVY_DIR", ivy_dir,zookeeper_classpath)
if re.search("VIEW_ROOT",zookeeper_classpath): zookeeper_classpath=re.sub("VIEW_ROOT", view_root,zookeeper_classpath)
run_cmd_add_option("", "config", options.config, check_exist=True) # just add the jvm args
zookeeper_cmd="java -d64 -Xmx512m -Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.local.only=false -Dcom.sun.management.jmxremote.port=%%s -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Dlog4j2.configuration=file://%s %s -cp %s %s" % (log4j_file, " ".join([x[0]+x[1] for x in direct_java_call_jvm_args.values() if x[1]]), zookeeper_classpath, zookeeper_class)
dbg_print("zookeeper_cmd=%s" % (zookeeper_cmd))
zookeeper_server_ports= options.zookeeper_server_ports and options.zookeeper_server_ports or "localhost:2181"
zookeeper_server_dir=os.path.join(get_work_dir(),"zookeeper_data")
dbg_print("zookeeper_server_dir=%s" % (zookeeper_server_dir))
#zookeeper_server_ids= options.zookeeper_server_ids and [int(x) for x in options.zookeeper_server_ids.split(",")] or range(1,len(zookeeper_server_ports.split(","))+1)
zookeeper_server_ids= options.zookeeper_server_ids and [int(x) for x in options.zookeeper_server_ids.split(",")] or range(len(zookeeper_server_ports.split(",")))
dbg_print("zookeeper_server_ids=%s" % (zookeeper_server_ids))
def zookeeper_opers_start_create_conf(zookeeper_server_ports_split):
zookeeper_num_servers = len(zookeeper_server_ports_split)
zookeeper_server_conf_files=[]
zookeeper_internal_port_1_start = 2800
zookeeper_internal_port_2_start = 3800
# overide the default config
server_conf={"tickTime":2000,"initLimit":5,"syncLimit":2,"maxClientCnxns":0}
if options.cmdline_props:
for pair in options.cmdline_props.split(";"):
(k, v) = pair.split("=")
if k in server_conf: server_conf[k] = v
# get the server
zookeeper_internal_conf=""
for k in server_conf: zookeeper_internal_conf+="%s=%s\n" % (k, server_conf[k])
dbg_print("zookeeper_internal_conf = %s" % zookeeper_internal_conf)
#for server_id in range(1,zookeeper_num_servers+1):
for server_id in range(zookeeper_num_servers):
zookeeper_host = zookeeper_server_ports_split[server_id].split(":")[0]
zookeeper_internal_port_1 = zookeeper_internal_port_1_start + server_id
zookeeper_internal_port_2 = zookeeper_internal_port_2_start + server_id
if zookeeper_num_servers>1:
zookeeper_internal_conf += "server.%s=%s:%s:%s\n" % (server_id, zookeeper_host, zookeeper_internal_port_1, zookeeper_internal_port_2)
dbg_print("zookeeper_internal_conf = %s" % zookeeper_internal_conf)
#for server_id in range(1,zookeeper_num_servers+1):
for server_id in range(zookeeper_num_servers):
if server_id not in zookeeper_server_ids: continue
conf_file = os.path.join(zookeeper_server_dir,"conf_%s" % server_id)
dataDir=os.path.join(zookeeper_server_dir,str(server_id))
zookeeper_port = zookeeper_server_ports_split[server_id].split(":")[1]
conf_file_p = open(conf_file, "w")
conf_file_p.write("clientPort=%s\n" % zookeeper_port)
conf_file_p.write("dataDir=%s\n" % dataDir)
conf_file_p.write("%s\n" % zookeeper_internal_conf)
conf_file_p.close()
dbg_print("==conf file %s: \n %s" % (conf_file, open(conf_file).readlines()))
zookeeper_server_conf_files.append(conf_file)
return zookeeper_server_conf_files
def zookeeper_opers_start_create_dirs(zookeeper_server_ports_split):
#for server_id in range(1,len(zookeeper_server_ports_split)+1):
for server_id in range(len(zookeeper_server_ports_split)):
if server_id not in zookeeper_server_ids: continue
current_server_dir=os.path.join(zookeeper_server_dir,str(server_id))
dbg_print("current_server_dir = %s" % current_server_dir)
if os.path.exists(current_server_dir):
if not options.zookeeper_reset: continue
distutils.dir_util.remove_tree(current_server_dir)
try: distutils.dir_util.mkpath(current_server_dir)
except Exception as e: print ("ERROR: Exception = %s" % e)
my_id_file=os.path.join(current_server_dir, "myid")
dbg_print("my_id_file = %s" % my_id_file)
open(my_id_file,"w").write("%s\n" % server_id)
def zookeeper_opers_start():
zookeeper_server_ports_split = zookeeper_server_ports.split(",")
zookeeper_opers_start_create_dirs(zookeeper_server_ports_split)
conf_files = zookeeper_opers_start_create_conf(zookeeper_server_ports_split)
cnt = 0
for conf_file in conf_files:
# no log file for now
#cmd = run_cmd_add_log_file(cmd)
search_str=len(conf_files)>1 and "My election bind port" or "binding to port"
cmd = "%s %s" % (zookeeper_cmd % (int(options.zookeeper_jmx_start_port) + cnt), conf_file)
cmd = run_cmd_add_log_file(cmd)
ret = cmd_call(cmd, 60, re.compile(search_str))
cnt +=1
def zookeeper_opers_stop():
# may be better to use pid, but somehow it is not in the datadir
sys_call(kill_cmd_template % "QuorumPeerMain")
def zookeeper_opers_wait_for_exist():
pass
def zookeeper_opers_wait_for_nonexist():
pass
def zookeeper_opers_wait_for_value():
pass
def zookeeper_opers_cmd():
if not options.zookeeper_cmds:
print "No zookeeper_cmds given"
return
splitted_cmds = ";".join(["echo %s" % x for x in options.zookeeper_cmds.split(";")])
sys_call("(%s) | %s -server %s" % (splitted_cmds, zookeeper_cmd, zookeeper_server_ports))
def main(argv):
# default
global options
parser.add_option("-n", "--testname", action="store", dest="testname", default=None, help="A test name identifier")
parser.add_option("-c", "--component", action="store", dest="component", default=None, choices=cmd_dict.keys(),
help="%s" % cmd_dict.keys())
parser.add_option("-o", "--operation", action="store", dest="operation", default=None, choices=allowed_opers,
help="%s" % allowed_opers)
parser.add_option("--wait_pattern", action="store", dest="wait_pattern", default=None,
help="the pattern to wait for the operation to finish")
parser.add_option("", "--output", action="store", dest="output", default=None,
help="Output file name. Default to stdout")
parser.add_option("", "--logfile", action="store", dest="logfile", default=None,
help="log file for both stdout and stderror. Default auto generated")
parser.add_option("","--timeout", action="store", type="long", dest="timeout", default=600,
help="Time out in secs before waiting for the success pattern. [default: %default]")
parser.add_option("", "--save_process_id", action="store_true", dest="save_process_id", default = False,
help="Store the process id if set. [default: %default]")
parser.add_option("", "--restart", action="store_true", dest="restart", default = False,
help="Restart the process using previos config if set. [default: %default]")
jvm_group = OptionGroup(parser, "jvm options", "")
jvm_group.add_option("", "--jvm_direct_memory_size", action="store", dest="jvm_direct_memory_size", default = None,
help="Set the jvm direct memory size. e.g., 2048m. Default using the one driver_cmd_dict.")
jvm_group.add_option("", "--jvm_max_heap_size", action="store", dest="jvm_max_heap_size", default = None,
help="Set the jvm max heap size. e.g., 1024m. Default using the one in driver_cmd_dict.")
jvm_group.add_option("", "--jvm_min_heap_size", action="store", dest="jvm_min_heap_size", default = None,
help="Set the jvm min heap size. e.g., 1024m. Default using the one in driver_cmd_dict.")
jvm_group.add_option("", "--jvm_args", action="store", dest="jvm_args", default = None,
help="Other jvm args. e.g., '-Xms24m -Xmx50m'")
jvm_group.add_option("", "--jvm_gc_log", action="store", dest="jvm_gc_log", default = None,
help="Enable gc and give jvm gc log file")
test_case_group = OptionGroup(parser, "Testcase options", "")
test_case_group.add_option("", "--testcase", action="store", dest="testcase", default = None,
help="Run a test. Report error. Default no test")
stats_group = OptionGroup(parser, "Stats options", "")
stats_group.add_option("","--jmx_bean", action="store", dest="jmx_bean", default="list",
help="jmx bean to get. [default: %default]")
stats_group.add_option("","--jmx_att", action="store", dest="jmx_attr", default="all",
help="jmx attr to get. [default: %default]")
remote_group = OptionGroup(parser, "Remote options", "")
remote_group.add_option("", "--remote_run", action="store_true", dest="remote_run", default = False,
help="Run remotely based on config file. Default False")
remote_group.add_option("", "--remote_deploy", action="store_true", dest="remote_deploy", default = False,
help="Deploy the source tree to the remote machine based on config file. Default False")
remote_group.add_option("", "--remote_config_file", action="store", dest="remote_config_file", default = None,
help="Remote config file")
zookeeper_group = OptionGroup(parser, "Zookeeper options", "")
zookeeper_group.add_option("", "--zookeeper_server_ports", action="store", dest="zookeeper_server_ports", default = None,
help="comma separated zookeeper ports, used to start/stop/connect to zookeeper")
zookeeper_group.add_option("", "--zookeeper_path", action="store", dest="zookeeper_path", default = None,
help="the zookeeper path to wait for")
zookeeper_group.add_option("", "--zookeeper_value", action="store", dest="zookeeper_value", default = None,
help="zookeeper path value")
zookeeper_group.add_option("", "--zookeeper_cmds", action="store", dest="zookeeper_cmds", default = None,
help="cmds to send to zookeeper client. Comma separated ")
zookeeper_group.add_option("", "--zookeeper_server_ids", action="store", dest="zookeeper_server_ids", default = None,
help="Comma separated list of server to start. If not given, start the number of servers in zookeeper_server_ports. This is used to start server on multiple machines ")
zookeeper_group.add_option("", "--zookeeper_jmx_start_port", action="store", dest="zookeeper_jmx_start_port", default = 27960,
help="Starting port for jmx")
zookeeper_group.add_option("", "--zookeeper_reset", action="store_true", dest="zookeeper_reset", default = False,
help="If true recreate server dir, otherwise start from existing server dir")
debug_group = OptionGroup(parser, "Debug options", "")
debug_group.add_option("-d", "--debug", action="store_true", dest="debug", default = False,
help="debug mode")
debug_group.add_option("--ant_debug", action="store_true", dest="ant_debug", default = False,
help="ant debug mode")
debug_group.add_option("--capture_java_call", action="store", dest="capture_java_call", default = None,
help="capture the java call. give the class name or auto")
debug_group.add_option("--enable_direct_java_call", action="store_true", dest="enable_direct_java_call", default = True,
#debug_group.add_option("--enable_direct_java_call", action="store_true", dest="enable_direct_java_call", default = False,
help="enable direct java call. ")
debug_group.add_option("--check_class_path", action="store_true", dest="check_class_path", default = True,
help="check if class path exists. ")
debug_group.add_option("", "--sys_call_debug", action="store_true", dest="enable_sys_call_debug", default = False,
help="debug sys call")
# load local options
#execfile(os.path.join(get_this_file_dirname(),"driver_local_options.py"))
#pdb.set_trace()
parser.add_option_group(jvm_group)
parser.add_option_group(config_group)
parser.add_option_group(other_option_group)
parser.add_option_group(test_case_group)
parser.add_option_group(stats_group)
parser.add_option_group(remote_group)
parser.add_option_group(zookeeper_group)
parser.add_option_group(debug_group)
(options, args) = parser.parse_args()
set_debug(options.debug)
set_sys_call_debug(options.enable_sys_call_debug)
dbg_print("options = %s args = %s" % (options, args))
arg_error=False
if not options.component and not options.testcase and not options.remote_deploy:
print("\n!!!Please give component!!!\n")
arg_error=True
if arg_error:
parser.print_help()
parser.exit()
if afterParsingHook: afterParsingHook(options) # the hook to call after parsing, change options
setup_env()
if (not options.testname):
options.testname = "TEST_NAME" in os.environ and os.environ["TEST_NAME"] or "default"
os.environ["TEST_NAME"]= options.testname;
if (not "WORK_SUB_DIR" in os.environ):
os.environ["WORK_SUB_DIR"] = "log"
if (not "LOG_SUB_DIR" in os.environ):
os.environ["LOG_SUB_DIR"] = "log"
setup_work_dir()
if options.testcase:
ret = run_testcase(options.testcase)
if ret!=0: ret=1 # workaround a issue that ret of 256 will become 0 after sys.exit
my_exit(ret)
if options.remote_deploy or options.remote_run:
if options.remote_config_file:
parse_config(options.remote_config_file)
if options.remote_deploy:
sys_call_debug_begin()
ret = do_remote_deploy()
sys_call_debug_end()
my_exit(ret)
sys_call_debug_begin()
ret = run_cmd()
sys_call_debug_end()
my_exit(ret)
if __name__ == "__main__":
main(sys.argv[1:])
|
apache/helix
|
helix-core/src/main/scripts/integration-test/script/dds_driver.py
|
Python
|
apache-2.0
| 53,904
|
[
"ESPResSo"
] |
74461a11b33e157e9cb2164cc957d99fdd68c20bb578799cff7a932c258645c1
|
from User import *
from College import College
import urllib2
from bs4 import BeautifulSoup
colleges = [
"Princeton University",
"Harvard University",
"Yale University",
"Columbia University",
"Stanford University",
"University of Chicago",
"Duke University",
"Massachusetts Institute of Technology",
"University of Pennsylvania",
"California Institute of Technology",
"Dartmouth College",
"Johns Hopkins University",
"Northwestern University",
"Brown University",
"Washington University in St. Louis",
"Cornell University",
"Vanderbilt University",
"Rice University",
"University of Notre Dame",
"Emory University",
"Georgetown University",
"University of California--Berkeley",
"Carnegie Mellon University",
"University of California--Los Angeles",
"University of Southern California",
"University of Virginia",
"Wake Forest University",
"Tufts University",
"University of Michigan--Ann Arbor",
"University of North Carolina--Chapel Hill",
"Boston College",
"Brandeis University",
"College of William and Mary",
"Georgia Institute of Technology",
"Case Western Reserve University",
"Pennsylvania State University--University Park",
"University of California--Davis",
"University of California--San Diego",
"Boston University",
"Lehigh University",
"Rensselaer Polytechnic Institute",
"University of California--Santa Barbara",
"University of Illinois--Urbana-Champaign",
"University of Wisconsin--Madison",
"University of Miami",
"Yeshiva University",
"Northeastern University",
"University of California--Irvine",
"University of Florida",
"George Washington University",
"Ohio State University--Columbus",
"Tulane University",
"University of Texas--Austin",
"University of Washington",
"Fordham University",
"Pepperdine University",
"University of Connecticut",
"Southern Methodist University",
"University of Georgia",
"Brigham Young University--Provo",
"Clemson University",
"Syracuse University",
"University of Maryland--College Park",
"University of Pittsburgh",
"Worcester Polytechnic Institute",
"Purdue University--West Lafayette",
"Rutgers, the State University of New Jersey--New Brunswick",
"Texas A&M University--College Station",
"University of Minnesota--Twin Cities",
"Virginia Tech",
"Michigan State University",
"University of Iowa",
"American University",
"Baylor University",
"Clark University",
"Indiana University--Bloomington",
"Stevens Institute of Technology",
"Stony Brook University--SUNY",
"Texas Christian University",
"University of Vermont",
"SUNY College of Environmental Science and Forestry",
"University of Alabama",
"University of California--Santa Cruz",
"University of Colorado--Boulder",
"University of Tulsa",
"Auburn University",
"Colorado School of Mines",
"Binghamton University--SUNY",
"Drexel University",
"University of Missouri",
"University of New Hampshire",
"Iowa State University",
"Loyola University Chicago",
"North Carolina State University--Raleigh",
"St. Louis University",
"University of Kansas",
"University of Nebraska--Lincoln",
"University of Oklahoma",
"Illinois Institute of Technology",
"University at Buffalo--SUNY",
"University of Oregon",
"University of California--Riverside",
"University of Dayton",
"University of South Carolina",
"University of St. Thomas",
"University of the Pacific",
"Michigan Technological University",
"University of San Francisco",
"University of Arizona",
"University of Kentucky",
"The Catholic University of America",
"Clarkson University",
"Colorado State University",
"DePaul University",
"Duquesne University",
"Temple University",
"University of Utah",
"Missouri University of Science & Technology",
"Polytechnic Institute of New York University",
"Hofstra University",
"Kansas State University",
"Louisiana State University--Baton Rouge",
"New School",
"Ohio University",
"University of Cincinnati",
"George Mason University",
"Arizona State University",
"Howard University",
"Mississippi State University",
"Oklahoma State University",
"New Jersey Institute of Technology",
"University of Mississippi",
"Adelphi University",
"Illinois State University",
"San Diego State University",
"St. John's University",
"University of Alabama--Birmingham",
"University of Rhode Island",
"University of Hawaii--Manoa",
"University of Maryland--Baltimore County",
"University of Massachusetts--Lowell",
"Maryville University of St. Louis",
"Texas Tech University",
"University of Idaho",
"University of La Verne",
"University of Louisville",
"University of Wyoming",
"Florida Institute of Technology",
"University of Maine",
"Virginia Commonwealth University",
"University of Central Florida",
"University of South Florida",
"Azusa Pacific University",
"Pace University",
"St. Mary's University of Minnesota",
"University of North Dakota",
"Biola University",
"Indiana University of Pennsylvania",
"Northern Illinois University",
"Southern Illinois University--Carbondale",
"Andrews University",
"Ball State University",
"Bowling Green State University",
"Central Michigan University",
"Edgewood College",
"Immaculata University",
"Louisiana Tech University",
"New Mexico State University",
"North Dakota State University",
"University of Colorado--Denver",
"University of Houston",
"University of North Carolina--Greensboro",
"University of South Dakota",
"Utah State University",
"Kent State University",
"Montana State University",
"South Dakota State University",
"University of Missouri--Kansas City",
"University of Montana",
"University of North Carolina--Charlotte",
"Ashland University",
"Barry University",
"Benedictine University",
"Bowie State University",
"Cardinal Stritch University",
"Clark Atlanta University",
"Cleveland State University",
"East Tennessee State University",
"Florida A&M University",
"Florida Atlantic University",
"Florida International University",
"Georgia Southern University",
"Georgia State University",
"Idaho State University",
"Indiana State University",
"Indiana University-Purdue University--Indianapolis",
"Jackson State University",
"Lamar University",
"Lynn University",
"Middle Tennessee State University",
"Morgan State University",
"National-Louis University",
"North Carolina A&T State University",
"Northern Arizona University",
"Nova Southeastern University",
"Oakland University",
"Old Dominion University",
"Our Lady of the Lake University",
"Portland State University",
"Regent University",
"Sam Houston State University",
"South Carolina State University",
"Spalding University",
"Tennessee State University",
"Texas A&M University--Commerce",
"Texas A&M University--Corpus Christi",
"Texas A&M University--Kingsville",
"Texas Southern University",
"Texas Woman's University",
"Trevecca Nazarene University",
"Trinity International University",
"University of Akron",
"University of Alaska--Fairbanks",
"University of Arkansas--Little Rock",
"University of Louisiana--Lafayette",
"University of Massachusetts--Boston",
"University of Memphis",
"University of Missouri--St. Louis",
"University of Nebraska--Omaha",
"University of Nevada--Las Vegas",
"University of New Orleans",
"University of Northern Colorado",
"University of North Texas",
"University of South Alabama",
"University of Southern Mississippi",
"University of Texas--Arlington",
"University of Texas--El Paso",
"University of Texas--San Antonio",
"University of Toledo",
"University of West Florida",
"University of Wisconsin--Milwaukee",
"Wayne State University",
"Wichita State University",
"Wright State University",
"Argosy University",
"California Institute of Integral Studies",
"Capella University",
"Colorado Technical University",
"Northcentral University",
"Trident University International",
"Union Institute and University",
"University of Phoenix",
"Walden University",
"Wilmington University",
]
colleges_with_sat = [
"Albion College", 480, 640, 460, 620,
"Alfred University", 500, 600, 480, 580,
"Allegheny College", 560, 650, 540, 650,
"American University", 570, 670, 590, 690,
"Amherst College", 670, 760, 670, 770,
"Arizona State University", 500, 630, 480, 610,
"Auburn University", 550, 650, 530, 630,
"Austin College", 570, 670, 560, 660,
"Babson College", 610, 700, 550, 640,
"Bard College", 600, 670, 650, 710,
"Barnard College", 620, 710, 630, 730,
"Bates College", 630, 720, 630, 710,
"Baylor University", 570, 670, 550, 660,
"Bellarmine University", 490, 600, 500, 600,
"Beloit College", 560, 690, 550, 710,
"Bennington College", 560, 660, 620, 720,
"Bentley University", 590, 670, 530, 620,
"Berea College", 530, 630, 540, 660,
"Birmingham-Southern College", 510, 610, 500, 610,
"Boston College", 640, 740, 620, 710,
"Boston University", 610, 720, 570, 670,
"Bowdoin College", 670, 760, 670, 760,
"Brandeis University", 620, 740, 610, 710,
"Brigham Young University", 590, 690, 580, 690,
"Brown University", 660, 770, 660, 760,
"Bryant University", 540, 640, 510, 600,
"Bryn Mawr College", 590, 720, 600, 710,
"Bucknell University", 620, 710, 580, 680,
"California Institute of Technology", 770, 800, 720, 780,
"California Polytechnic State University", 580, 680, 540, 650,
"Calvin College", 540, 690, 520, 670,
"Carleton College", 670, 760, 670, 760,
"Carnegie Mellon University", 690, 790, 630, 730,
"Case Western Reserve University", 660, 760, 600, 700,
"Catawba College", 450, 550, 430, 540,
"Centenary College of Louisiana", 430, 780, 490, 620,
"Centre College", 580, 700, 560, 690,
"Chapman University", 560, 660, 550, 650,
"Claremont McKenna College", 660, 760, 650, 760,
"Clark University", 530, 640, 530, 640,
"Clarkson University", 560, 660, 500, 610,
"Clemson University", 590, 680, 560, 660,
"Coe College", 500, 650, 490, 610,
"Colby College", 630, 720, 610, 710,
"Colgate University", 640, 720, 630, 730,
"College of Charleston", 560, 650, 550, 650,
"College of the Atlantic", 540, 680, 610, 690,
"College of the Holy Cross", 620, 680, 600, 700,
"College of the Ozarks", 440, 560, 510, 610,
"College of William and Mary", 620, 720, 360, 740,
"Colorado College", 610, 710, 630, 720,
"Colorado School of Mines", 630, 720, 570, 670,
"Colorado State University", 520, 640, 500, 620,
"Columbia University", 700, 790, 690, 780,
"Connecticut College", 620, 700, 620, 710,
"Cooper Union", 610, 770, 620, 710,
"Cornell College", 540, 690, 530, 680,
"Cornell University", 670, 780, 640, 740,
"Creighton University", 540, 660, 530, 630,
"Dartmouth College", 680, 780, 670, 780,
"Davidson College", 640, 720, 630, 720,
"Denison University", 600, 680, 600, 720,
"DePaul University", 510, 630, 530, 640,
"DePauw University", 550, 680, 530, 650,
"Dickinson College", 600, 690, 590, 690,
"Drew University", 480, 600, 490, 620,
"Drexel University", 580, 680, 540, 640,
"Duke University", 690, 790, 670, 760,
"Duquesne University", 530, 610, 510, 590,
"Earlham College", 530, 660, 550, 700,
"Eckerd College", 500, 610, 510, 620,
"Elon University", 560, 660, 570, 660,
"Emerson College", 560, 650, 590, 680,
"Emory University", 660, 760, 620, 710,
"Fairfield University", 550, 630, 530, 620,
"Fisk University", 400, 570, 410, 540,
"Flagler College", 520, 580, 540, 600,
"Florida State University", 560, 640, 560, 640,
"Fordham University", 590, 680, 570, 670,
"Franklin & Marshall College", 610, 710, 600, 690,
"Olin College of Engineering", 730, 790, 700, 780,
"Furman University", 560, 660, 550, 650,
"George Mason University", 530, 630, 520, 620,
"Georgetown University", 660, 750, 650, 750,
"Georgia Institute of Technology", 660, 760, 600, 700,
"Gettysburg College", 610, 670, 600, 690,
"Gonzaga University", 550, 650, 540, 640,
"Goucher College", 480, 620, 510, 640,
"Grinnell College", 650, 750, 630, 750,
"Grove City College", 550, 680, 550, 680,
"Guilford College", 490, 660, 480, 620,
"Gustavus Adolphus College", 530, 660, 550, 680,
"Hamilton College", 650, 740, 650, 740,
"Hampden-Sydney College", 510, 610, 490, 620,
"Hampshire College", 540, 650, 600, 700,
"Hampton University", 478, 593, 480, 594,
"Hanover College", 490, 600, 500, 620,
"Harvard College", 710, 790, 700, 800,
"Harvey Mudd College", 740, 800, 680, 770,
"Haverford College", 660, 760, 650, 760,
"Hendrix College", 540, 670, 550, 680,
"Hillsdale College", 570, 690, 630, 740,
"Hiram College", 440, 570, 440, 560,
"Hobart and William Smith Colleges", 570, 660, 570, 650,
"Hofstra University", 540, 630, 530, 630,
"Hollins University", 460, 590, 500, 650,
"Howard University", 480, 580, 490, 580,
"Illinois Institute of Technology", 610, 710, 510, 630,
"Illinois Wesleyan University", 570, 700, 540, 650,
"Indiana University- Bloomington", 510, 620, 540, 600,
"Indiana University of Pennsylvania", 450, 540, 440, 530,
"Iowa State University", 530, 680, 460, 620,
"James Madison University", 530, 630, 520, 620,
"Johns Hopkins University", 670, 770, 640, 740,
"Juniata College", 540, 650, 530, 650,
"Kalamazoo College", 530, 650, 540, 670,
"Kenyon College", 610, 680, 630, 730,
"Knox College", 580, 690, 570, 720,
"Lafayette College", 610, 710, 580, 680,
"Lake Forest College", 530, 670, 530, 620,
"Lawrence University", 580, 710, 580, 720,
"Lehigh University", 630, 730, 570, 670,
"Lewis & Clark College", 590, 670, 600, 700,
"Louisiana State University", 520, 630, 500, 620,
"Loyola College in Maryland", 540, 630, 540, 630,
"Loyola Marymount University", 560, 660, 550, 640,
"Loyola University New Orleans", 510, 620, 530, 650,
"Loyola University of Chicago", 540, 650, 550, 650,
"Lynchburg College", 450, 550, 450, 550,
"Macalester College", 640, 730, 630, 740,
"Manhattanville College", 450, 560, 450, 560,
"Marist College", 550, 640, 530, 620,
"Marlboro College", 520, 650, 560, 730,
"Marquette University", 550, 650, 520, 630,
"Massachusetts Institute of Technology", 740, 800, 670, 770,
"McGill University", 630, 730, 630, 730,
"Mercer University", 540, 640, 530, 630,
"Miami University", 550, 660, 530, 630,
"Michigan State University", 540, 680, 430, 590,
"Michigan Technological University", 520, 650, 580, 680,
"Middlebury College", 640, 740, 630, 740,
"Mills College", 510, 620, 540, 660,
"Millsaps College", 520, 620, 498, 630,
"Monmouth University", 490, 580, 470, 560,
"Moravian College", 470, 550, 480, 590,
"Mount Holyoke College", 610, 700, 610, 720,
"Muhlenberg College", 560, 680, 560, 680,
"New College of Florida", 570, 670, 620, 740,
"New York University", 630, 740, 620, 710,
"North Carolina State University", 580, 670, 550, 630,
"Northeastern University", 650, 740, 630, 720,
"Northwestern University", 700, 780, 680, 760,
"Oberlin College", 620, 720, 650, 740,
"Oglethorpe University", 510, 610, 530, 620,
"Ohio Northern University", 540, 660, 510, 620,
"Ohio State University", 610, 710, 540, 650,
"Ohio University", 490, 610, 480, 590,
"Ohio Wesleyan University", 520, 640, 510, 620,
"Penn State University", 560, 670, 530, 630,
"Pepperdine University", 570, 680, 550, 650,
"Pitzer College", 590, 680, 580, 710,
"Pomona College", 690, 780, 690, 790,
"Princeton University", 710, 800, 700, 790,
"Providence College", 520, 630, 530, 640,
"Purdue University", 550, 620, 510, 620,
"Quinnipiac University", 510, 610, 490, 580,
"Randolph-Macon College", 490, 590 , 490, 590,
"Randolph-Macon Woman's College", 490, 610 , 480, 610,
"Reed College", 620, 720 , 660, 750,
"Rensselaer Polytechnic Institute", 670, 770 , 620, 720,
"Rhodes College", 580, 680 , 590, 690,
"Rice University", 700, 780 , 660, 750,
"Rider University", 470, 570 , 460, 560,
"Ripon College", 510, 680 , 490, 630,
"Rochester Institute of Technology", 570, 680 , 540, 650,
"Rollins College", 540, 640 , 550, 640,
"Rose-Hulman Institute of Technology", 640, 750 , 540, 670,
"Rutgers University", 540, 670, 500, 620,
"Saint Anselm College", 520, 620 , 540, 610,
"Saint Louis University", 540, 670 , 530, 660,
"Saint Mary's College of California", 500, 610 , 500, 600,
"Saint Michael's College", 520, 620 , 530, 630,
"Saint Olaf College", 590, 710 , 590, 710,
"Salisbury University", 540, 620 , 540, 610,
"Samford University", 510, 630 , 520, 630,
"Santa Clara University", 610, 700 , 590, 680,
"Scripps College", 620, 700 , 640, 730,
"Seattle University", 540, 640 , 530, 640,
"Seton Hall University", 510, 610 , 490, 590,
"Sewanee University", 580, 660 , 590, 690,
"Siena College", 510, 610 , 490, 590,
"Simmons College", 520, 620 , 520, 630,
"Skidmore College", 570, 670 , 560, 680,
"Smith College", 600, 710 , 610, 720,
"Sonoma State University", 450, 560 , 440, 550,
"Southern Methodist University", 620, 700 , 600, 690,
"Southwestern University", 540, 640 , 520, 640,
"Spelman College", 460, 540 , 470, 570,
"St. Bonaventure University", 470, 600 , 460, 580,
"St. John's University", 490, 620 , 480, 590,
"St. Lawrence University", 570, 660 , 550, 650,
"St. Mary's College of Maryland", 540, 650 , 570, 670,
"Stanford University", 700, 790 , 680, 780,
"Stephens College", 440, 540 , 480, 590,
"Stevens Institute of Technology", 630, 670 , 540, 670,
"Suffolk University", 450, 570 , 440, 560,
"SUNY at Albany", 520, 610 , 490, 580,
"SUNY at Binghamton", 630, 710 , 590, 680,
"SUNY at Buffalo", 550, 650 , 500, 600,
"SUNY at Stony Brook", 600, 700 , 550, 650,
"SUNY College at Geneseo", 600, 700 , 580, 690,
"SUNY Purchase College", 480, 580 , 500, 600,
"Susquehanna University", 510, 610 , 510, 620,
"Swarthmore College", 670, 770 , 680, 780,
"Sweet Briar College", 450, 570 , 490, 610,
"Syracuse University", 540, 650 , 500, 620,
"Temple University", 510, 620 , 500, 610,
"Texas A&M University", 520, 610 , 500, 590,
"Texas Christian University", 550, 650 , 540, 620,
"The Catholic University of America", 510, 610 , 500, 610,
"The College of New Jersey", 580, 680 , 550, 660,
"The College of Wooster", 540, 660 , 540, 660,
"The Evergreen State College", 450, 580 , 500, 630,
"The George Washington University", 600, 700 , 600, 690,
"The University of Alabama", 500, 640 , 500, 620,
"The University of Scranton", 530, 620 , 510, 600,
"The University of South Dakota", 460, 620 , 430, 640,
"The University of Texas at Austin", 580, 710 , 550, 670,
"The University of Tulsa", 570, 690 , 560, 710,
"Transylvania University", 520, 620 , 520, 660,
"Trinity College", 600, 700 , 590, 690,
"Trinity University", 580, 670 , 570, 680,
"Truman State University", 540, 680 , 540, 680,
"Tufts University", 690, 770 , 680, 750,
"Tulane University", 620, 700 , 620, 700,
"Union College", 620, 700 , 590, 680,
"United States Air Force Academy", 620, 710 , 590, 690,
"United States Coast Guard Academy", 620, 690 , 570, 670,
"United States Merchant Marine Academy", 610, 690 , 570, 660,
"United States Military Academy", 600, 690 , 580, 700,
"University of Arizona", 490, 620 , 480, 600,
"University of Arkansas", 520, 630 , 500, 610,
"University of California-Berkeley", 650, 770 , 600, 730,
"University of California-Davis", 570, 690 , 520, 640,
"University of California-Los Angeles", 600, 760 , 560, 680,
"University of California-Riverside", 500, 630 , 470, 580,
"University of California-San Diego", 560, 720 , 510, 650,
"University of California-Santa Barbara", 570, 690 , 540, 660,
"University of California-Santa Cruz", 490, 630 , 470, 610,
"University of Central Florida", 550, 650 , 530, 630,
"University of Chicago", 710, 790 , 710, 780,
"University of Colorado", 540, 650, 520, 630,
"University of Connecticut", 580, 680 , 550, 650,
"University of Dallas", 530, 640 , 550, 670,
"University of Dayton", 500, 640 , 510, 620,
"University of Delaware", 560, 660 , 540, 650,
"University of Denver", 560, 660 , 550, 640,
"University of Florida", 590, 690 , 580, 670,
"University of Georgia", 580, 670 , 560, 660,
"University of Hawaii", 500, 610, 480, 580,
"University of Idaho", 490, 600 , 480, 590,
"University of Illinois", 680, 790, 550, 680,
"University of Iowa", 550, 690 , 470, 630,
"University of Kentucky", 510, 630 , 500, 620,
"University of Maine", 480, 600 , 470, 590,
"University of Mary Washington", 510, 600 , 520, 630,
"University of Maryland", 610, 720 , 580, 690,
"University of Massachusetts", 560, 660 , 530, 630,
"University of Miami", 630, 720 , 600, 700,
"University of Michigan", 650, 760 , 610, 700,
"University of Minnesota", 620, 740 , 550, 690,
"University of Mississippi", 480, 600 , 480, 600,
"University of Missouri", 530, 650 , 510, 640,
"University of Montana", 470, 600 , 480, 600,
"University of Nebraska", 520, 670 , 490, 660,
"University of New Hampshire", 500, 610 , 490, 590,
"University of New Mexico", 470, 600 , 470, 610,
"University of New Orleans", 490, 620 , 470, 590,
"University of North Carolina", 610, 710 , 590, 690,
"University of Notre Dame", 680, 770 , 660, 750,
"University of Oklahoma", 540, 660 , 510, 640,
"University of Oregon", 500, 620 , 490, 600,
"University of Pennsylvania", 690, 780 , 660, 760,
"University of Pittsburgh", 600, 680 , 570, 660,
"University of Puget Sound", 580, 660 , 570, 690,
"University of Redlands", 520, 620 , 510, 610,
"University of Rhode Island", 510, 620 , 490, 590,
"University of Richmond", 620, 720 , 580, 700,
"University of Rochester", 650, 750 , 600, 700,
"University of San Diego", 570, 670 , 540, 650,
"University of San Francisco", 530, 630 , 510, 620,
"University of South Carolina", 560, 650 , 540, 640,
"University of South Florida", 540, 640 , 530, 630,
"University of Southern California", 650, 760 , 620, 720,
"University of Tennessee", 520, 650, 520, 640,
"University of the Pacific", 550, 690 , 520, 650,
"University of Utah", 510, 650 , 510, 620,
"University of Vermont", 540, 650 , 540, 640,
"University of Virginia", 640, 740 , 620, 720,
"University of Washington", 580, 700 , 520, 650,
"University of Wisconsin", 630, 750 , 530, 650,
"University of Wyoming", 500, 630 , 480, 610,
"Ursinus College", 540, 660 , 540, 650,
"Valparaiso University", 510, 620 , 500, 590,
"Vanderbilt University", 710, 790 , 690, 770,
"Vassar College", 650, 730 , 660, 750,
"Villanova University", 610, 710 , 590, 680,
"Wabash College", 530, 640 , 500, 610,
"Wagner College", 530, 630 , 520, 640,
"Wake Forest University", 630, 710 , 620, 700,
"Warren Wilson College", 480, 590 , 510, 660,
"Washington and Lee University", 650, 740 , 650, 740,
"Washington State University", 470, 600 , 460, 570,
"Wellesley College", 640, 740 , 650, 740,
"Wells College", 470, 590 , 480, 600,
"Wesleyan College", 420, 540 , 450, 580,
"Wesleyan University", 660, 740 , 640, 740,
"West Virginia University", 470, 580 , 460, 560,
"Westminster College", 490, 600, 470, 590,
"Whitman College", 610, 690 , 610, 730,
"Whittier College", 480, 590 , 470, 580,
"Willamette University", 540, 650 , 540, 660,
"William Jewell College", 510, 610 , 530, 600,
"Williams College", 660, 770 , 670, 770,
"Wittenberg University", 500, 620 , 500, 620,
"Wofford College", 590, 680 , 570, 630,
"Worcester Polytechnic Institute", 640, 720 , 560, 670,
"Xavier University", 510, 610 , 500, 600,
"Yale University", 710, 790 , 700, 800,
]
class C:
def __init__(self, name, lm, hm, lr, hr):
from Range import Range
self.name = name
self.math_range = Range(lm, hm)
self.read_range = Range(lr, hr)
def printC(self):
print "{0} Math {1}-{2} Reading {3}-{4}".format(self.name, self.math_range.bottom,
self.math_range.top, self.read_range.bottom, self.read_range.top)
def get_sizes():
colleges = []
url = "http://collegestats.org/colleges/all/largest/%d/"
page = 1
colls = []
while page < 2:
url = "http://collegestats.org/colleges/all/largest/%d/" % (page)
request = urllib2.urlopen(url)
soup = BeautifulSoup(request.read())
div = soup.find(id="content")
names = []
sizes = []
tuitions = []
addresses = []
zips = []
num = 0
tuition_num = 0
address_num = 0
for td in div.find_all('td'):
try:
if td['class'][0] == "state":
if address_num < 3:
address_num += 1
continue
address = "adr213, zip0143"
for meta in td.find_all('meta'):
if meta['itemprop'] == "streetAddress":
address = address.replace("adr213", meta['content'])
if meta['itemprop'] == "postalCode":
address = address.replace("zip0143", meta['content'])
zips.append(int(meta['content'][0:5]))
addresses.append(address)
if td['class'][0] == "name":
if num < 3:
num += 1
continue
name = td.a.string.strip()
names.append(name)
if td['class'][0] == "students":
size = int(td.string.replace(",",""))
sizes.append(size)
if td['class'][0] == "tuition":
if tuition_num < 3:
tuition_num += 1
continue
if "N/A" in td.string:
tuition = 0
else:
tuition = int(td.string.replace(",","").replace("$",""))
tuitions.append(tuition)
except:
continue
#l = [ (x, y, z) for x in names for y in sizes for z in tuitions ]
l = zip(names, sizes, tuitions, addresses, zips)
page += 1
colls.append(l)
colls = [item for sublist in colls for item in sublist]
return colls
def populate_database():
"""
Returns a list of College objects to be stored in the database
Then they can be reconstructed by called
colleges = db_load_colleges()
"""
database_schools = []
n = 0
cols = []
cols_with_size = get_sizes()
while n < len(colleges_with_sat):
c = C(colleges_with_sat[n], colleges_with_sat[n+1], colleges_with_sat[n+2], colleges_with_sat[n+3], colleges_with_sat[n+4])
cols.append(c)
n+=5
for i in range(0, len(colleges)):
name = colleges[i]
if False: #db_college_exists(name):
continue
sats = {}
size = 0
tuition = 0
address = ""
zipcode = 0
matched = False
for c in cols:
if levenshtein(c.name, name) < 3:
matched = True
sats['math'] = c.math_range
sats['reading'] = c.read_range
if not matched:
sats = None
for c in cols_with_size:
#print c[0]
if levenshtein(c[0], name) < 3:
size = c[1]
tuition = c[2]
address = c[3]
zipcode = c[4]
#print c
break
college = College(name, "", i, sats, size, tuition, address, zipcode)
#print college
database_schools.append(college)
#college.print_college()
user = User()
user.name = "Aaron"
user.sats = {"math" : 800, "reading" : 800}
#print college.find_location()
#print college.get_difficulty()
return database_schools
def levenshtein(s1, s2):
if len(s1) < len(s2):
return levenshtein(s2, s1)
# len(s1) >= len(s2)
if len(s2) == 0:
return len(s1)
previous_row = xrange(len(s2) + 1)
for i, c1 in enumerate(s1):
current_row = [i + 1]
for j, c2 in enumerate(s2):
insertions = previous_row[j + 1] + 1 # j+1 instead of j since previous_row and current_row are one character longer
deletions = current_row[j] + 1 # than s2
substitutions = previous_row[j] + (c1 != c2)
current_row.append(min(insertions, deletions, substitutions))
previous_row = current_row
return previous_row[-1]
#print get_sizes()
#print populate_database()
#print "Done!"
#
#user = User()
#user.name = "Aaron"
#i = 400
#while i <= 800:
# user.sats = {"math" : i, "reading" : i}
# print "Math: %d Reading: %d Level: %f" % (user.sats['math'], user.sats['reading'],
# user.get_level())
# i += 10
|
aacoppa/inglorious-gangsters
|
Populator.py
|
Python
|
mit
| 29,862
|
[
"COLUMBUS"
] |
c25d04e55bde3a0443b9beea883ffbcdb8cb0e6fb2691237a3667b4cfd2cd53b
|
'''Evaluate constant expressions in ir.
'''
from __future__ import absolute_import
from __future__ import with_statement
import operator as O
import types
from ..runtime.multimethod import MultiMethod, defmethod, around
from ..runtime.picklep import picklep
from ..runtime.copy import make_copy
from ..runtime.purity import purep
from ..compiler.resolution import compile_time_resolve, UnresolvableError
from ..compiler.walk import propigate_location, IRWalker
from ..compiler import ir as I
from ..compiler import codegen
from ..compiler import bind
constant_reduce = MultiMethod('constant_reduce',
signature='node',
doc='''
If possible, reduce expression to simpler expression.
Called after children nodes have been reduced to simpler nodes
''')
def handle_constants(node):
node = reduce_constants(node)
node = fix_possible_constants(node)
return node
def reduce_constants(node):
#reduce children first
for child in list(I.iter_children(node)):
r_child = reduce_constants(child)
if r_child is not child:
#print 'node', child, r_child
I.replace_child(child, r_child)
return constant_reduce(node)
class PossibleConstantFixer(IRWalker):
"""We don't want to include any non-pickleable objects as constants.
On the other hand, we do want all constant values availabe for
constant reduction as non-pickleable constants can be used to
reduce expression to pickleable ones.
This is accomplished by marking all reduction produced constants as
`possible_constants` where we still keep the generating ir node around.
After all reduction, non-pickleable possible_constants are replaced by
the ir repressentation where as pickleable constants are converted to
regular constants.
"""
descend_into_functions = True
def visit_possible_constant(self, pc):
if picklep(pc.value):
I.replace_child(pc, I.make_constant(pc.value))
else:
I.replace_child(pc, pc.node)
self.visit(pc.node)
def fix_possible_constants(node):
if isinstance(node, I.possible_constant):
return fix_possible_constants(I.make_toplevel(node, bind.Scope())).expression
PossibleConstantFixer().visit(node)
return node
class NotConstant(Exception):
pass
no_default = object()
def as_value(op, default=no_default):
if op is None and default is not no_default:
return default
if not isinstance(op, I.constant):
raise NotConstant
return op.value
def catch_notconstant(func):
def inner(node, *args, **kwds):
try:
return func(node, *args, **kwds)
except NotConstant:
return node
return inner
def mkcnst(node, value):
return propigate_location(node, I.make_possible_constant(value, node))
@catch_notconstant
def reduce_through_function(node, func):
return mkcnst(node, evaluate_catch(node, func, *map(as_value, I.iter_children(node))))
def evaluate_catch(node, func, *args):
try:
return func(*args)
except Exception:
# Could insert code to handle errors here as they aren't neccessarily fatal.
# We can always revert back to the original node if the function is incapable of
# reduction.
# Need a way to distinguish such expected errors from programming errors.
raise
#by default do nothing
@defmethod(constant_reduce, [I.node])
def meth(node):
return node
unary_functions = {
I.neg : O.neg,
I.pos : O.pos,
I.not_ : O.not_,
I.convert : repr,
I.invert : O.invert,
I.get_iter : iter,
}
@defmethod(constant_reduce, [I.unary_base])
def meth(node):
return reduce_through_function(node, unary_functions[type(node)])
binary_functions = {
I.add : O.add,
I.subtract : O.sub,
I.multiply : O.mul,
I.divide : O.div,
I.floor_divide : O.floordiv,
I.true_divide : O.truediv,
I.modulo : O.mod,
I.iadd : O.iadd,
I.isubtract : O.isub,
I.imultiply : O.imul,
I.idivide : O.idiv,
I.ifloor_divide : O.ifloordiv,
I.itrue_divide : O.itruediv,
I.imodulo : O.imod,
I.lshift : O.lshift,
I.rshift : O.rshift,
I.binand : O.and_,
I.binor : O.or_,
I.binxor : O.xor,
I.ibinand : O.iand,
I.ibinor : O.ior,
I.ibinxor : O.ixor,
I.eq : O.eq,
I.ne : O.ne,
I.gt : O.gt,
I.ge : O.ge,
I.eq : O.eq,
I.le : O.le,
I.lt : O.lt,
I.in_ : O.contains,
I.notin : lambda x,seq: x not in seq,
I.is_ : O.is_,
I.isnot : O.is_not,
I.exception_match : isinstance,
}
@defmethod(constant_reduce, [I.binary_base])
def meth(node):
return reduce_through_function(node, binary_functions[type(node)])
@defmethod(constant_reduce, [I.attrget])
@catch_notconstant
def meth(node):
return mkcnst(node, evaluate_catch(node, getattr, as_value(node.obj), node.name))
@defmethod(constant_reduce, [I.getitem])
@catch_notconstant
def meth(node):
return mkcnst(node, evaluate_catch(node, lambda op, item: op[item], as_value(node.op), as_value(node.item)))
@defmethod(constant_reduce, [I.progn])
@catch_notconstant
def meth(node):
if not node.exprs:
return I.copy_loc(I.make_nop(), node)
for expr in node.exprs:
value = as_value(expr)
return mkcnst(node, value)
@defmethod(constant_reduce, [I.call])
@catch_notconstant
def meth(node):
callee = as_value(node.callee)
if not purep(callee):
raise NotConstant
star_args = as_value(node.star_args, [])
star_kwds = as_value(node.star_kwds, {})
args = map(as_value, node.args)
kwds = dict(zip(node.kwd_names, map(as_value, node.kwd_values)))
def perform_call():
if set(kwds) & set(star_kwds):
#could insert code to raise this error at runtime (possibly expected?)
raise ValueError("multiple values for same keyword")
kwds.update(star_kwds)
return callee(*(args + star_args), **kwds)
return mkcnst(node, evaluate_catch(node, perform_call))
@defmethod(constant_reduce, [I.if_])
@catch_notconstant
def meth(node):
return node.then if as_value(node.condition) else node.else_
@defmethod(constant_reduce, [I.function])
@catch_notconstant
def meth(func):
if codegen.get_function_free_bindings(func):
return func
map(as_value, func.defaults)
#must import here to prevent cyclic imports
from ..compiler.function import make_function
return mkcnst(func, make_function(make_copy(func)))
|
matthagy/Jamenson
|
jamenson/transform/constant_reduction.py
|
Python
|
apache-2.0
| 6,969
|
[
"VisIt"
] |
f0d302cbb959d111d1f5487e7f6f45afdb51f6fbef922b32a51f016f937487c0
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# This module is also sponsored by E.T.A.I. (www.etai.fr)
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: vmware_guest
short_description: Manages virtual machines in vCenter
description: >
This module can be used to create new virtual machines from templates or other virtual machines,
manage power state of virtual machine such as power on, power off, suspend, shutdown, reboot, restart etc.,
modify various virtual machine components like network, disk, customization etc.,
rename a virtual machine and remove a virtual machine with associated components.
version_added: '2.2'
author:
- Loic Blot (@nerzhul) <loic.blot@unix-experience.fr>
- Philippe Dellaert (@pdellaert) <philippe@dellaert.org>
- Abhijeet Kasurde (@Akasurde) <akasurde@redhat.com>
requirements:
- python >= 2.6
- PyVmomi
notes:
- Please make sure that the user used for vmware_guest has the correct level of privileges.
- For example, following is the list of minimum privileges required by users to create virtual machines.
- " DataStore > Allocate Space"
- " Virtual Machine > Configuration > Add New Disk"
- " Virtual Machine > Configuration > Add or Remove Device"
- " Virtual Machine > Inventory > Create New"
- " Network > Assign Network"
- " Resource > Assign Virtual Machine to Resource Pool"
- "Module may require additional privileges as well, which may be required for gathering facts - e.g. ESXi configurations."
- Tested on vSphere 5.5, 6.0, 6.5 and 6.7
- Use SCSI disks instead of IDE when you want to expand online disks by specifying a SCSI controller
- "For additional information please visit Ansible VMware community wiki - U(https://github.com/ansible/community/wiki/VMware)."
options:
state:
description:
- Specify the state the virtual machine should be in.
- 'If C(state) is set to C(present) and virtual machine exists, ensure the virtual machine
configurations conforms to task arguments.'
- 'If C(state) is set to C(absent) and virtual machine exists, then the specified virtual machine
is removed with its associated components.'
- 'If C(state) is set to one of the following C(poweredon), C(poweredoff), C(present), C(restarted), C(suspended)
and virtual machine does not exists, then virtual machine is deployed with given parameters.'
- 'If C(state) is set to C(poweredon) and virtual machine exists with powerstate other than powered on,
then the specified virtual machine is powered on.'
- 'If C(state) is set to C(poweredoff) and virtual machine exists with powerstate other than powered off,
then the specified virtual machine is powered off.'
- 'If C(state) is set to C(restarted) and virtual machine exists, then the virtual machine is restarted.'
- 'If C(state) is set to C(suspended) and virtual machine exists, then the virtual machine is set to suspended mode.'
- 'If C(state) is set to C(shutdownguest) and virtual machine exists, then the virtual machine is shutdown.'
- 'If C(state) is set to C(rebootguest) and virtual machine exists, then the virtual machine is rebooted.'
default: present
choices: [ present, absent, poweredon, poweredoff, restarted, suspended, shutdownguest, rebootguest ]
name:
description:
- Name of the virtual machine to work with.
- Virtual machine names in vCenter are not necessarily unique, which may be problematic, see C(name_match).
- 'If multiple virtual machines with same name exists, then C(folder) is required parameter to
identify uniqueness of the virtual machine.'
- This parameter is required, if C(state) is set to C(poweredon), C(poweredoff), C(present), C(restarted), C(suspended)
and virtual machine does not exists.
- This parameter is case sensitive.
required: yes
name_match:
description:
- If multiple virtual machines matching the name, use the first or last found.
default: 'first'
choices: [ first, last ]
uuid:
description:
- UUID of the virtual machine to manage if known, this is VMware's unique identifier.
- This is required if C(name) is not supplied.
- If virtual machine does not exists, then this parameter is ignored.
- Please note that a supplied UUID will be ignored on virtual machine creation, as VMware creates the UUID internally.
use_instance_uuid:
description:
- Whether to use the VMWare instance UUID rather than the BIOS UUID.
default: no
type: bool
version_added: '2.8'
template:
description:
- Template or existing virtual machine used to create new virtual machine.
- If this value is not set, virtual machine is created without using a template.
- If the virtual machine already exists, this parameter will be ignored.
- This parameter is case sensitive.
- You can also specify template or VM UUID for identifying source. version_added 2.8. Use C(hw_product_uuid) from M(vmware_guest_facts) as UUID value.
- From version 2.8 onwards, absolute path to virtual machine or template can be used.
aliases: [ 'template_src' ]
is_template:
description:
- Flag the instance as a template.
- This will mark the given virtual machine as template.
default: 'no'
type: bool
version_added: '2.3'
folder:
description:
- Destination folder, absolute path to find an existing guest or create the new guest.
- The folder should include the datacenter. ESX's datacenter is ha-datacenter.
- This parameter is case sensitive.
- This parameter is required, while deploying new virtual machine. version_added 2.5.
- 'If multiple machines are found with same name, this parameter is used to identify
uniqueness of the virtual machine. version_added 2.5'
- 'Examples:'
- ' folder: /ha-datacenter/vm'
- ' folder: ha-datacenter/vm'
- ' folder: /datacenter1/vm'
- ' folder: datacenter1/vm'
- ' folder: /datacenter1/vm/folder1'
- ' folder: datacenter1/vm/folder1'
- ' folder: /folder1/datacenter1/vm'
- ' folder: folder1/datacenter1/vm'
- ' folder: /folder1/datacenter1/vm/folder2'
hardware:
description:
- Manage virtual machine's hardware attributes.
- All parameters case sensitive.
- 'Valid attributes are:'
- ' - C(hotadd_cpu) (boolean): Allow virtual CPUs to be added while the virtual machine is running.'
- ' - C(hotremove_cpu) (boolean): Allow virtual CPUs to be removed while the virtual machine is running.
version_added: 2.5'
- ' - C(hotadd_memory) (boolean): Allow memory to be added while the virtual machine is running.'
- ' - C(memory_mb) (integer): Amount of memory in MB.'
- ' - C(nested_virt) (bool): Enable nested virtualization. version_added: 2.5'
- ' - C(num_cpus) (integer): Number of CPUs.'
- ' - C(num_cpu_cores_per_socket) (integer): Number of Cores Per Socket. Value should be multiple of C(num_cpus).'
- ' - C(scsi) (string): Valid values are C(buslogic), C(lsilogic), C(lsilogicsas) and C(paravirtual) (default).'
- " - C(memory_reservation_lock) (boolean): If set true, memory resource reservation for the virtual machine
will always be equal to the virtual machine's memory size. version_added: 2.5"
- ' - C(max_connections) (integer): Maximum number of active remote display connections for the virtual machines.
version_added: 2.5.'
- ' - C(mem_limit) (integer): The memory utilization of a virtual machine will not exceed this limit. Unit is MB.
version_added: 2.5'
- ' - C(mem_reservation) (integer): The amount of memory resource that is guaranteed available to the virtual
machine. Unit is MB. C(memory_reservation) is alias to this. version_added: 2.5'
- ' - C(cpu_limit) (integer): The CPU utilization of a virtual machine will not exceed this limit. Unit is MHz.
version_added: 2.5'
- ' - C(cpu_reservation) (integer): The amount of CPU resource that is guaranteed available to the virtual machine.
Unit is MHz. version_added: 2.5'
- ' - C(version) (integer): The Virtual machine hardware versions. Default is 10 (ESXi 5.5 and onwards).
Please check VMware documentation for correct virtual machine hardware version.
Incorrect hardware version may lead to failure in deployment. If hardware version is already equal to the given
version then no action is taken. version_added: 2.6'
- ' - C(boot_firmware) (string): Choose which firmware should be used to boot the virtual machine.
Allowed values are "bios" and "efi". version_added: 2.7'
- ' - C(virt_based_security) (bool): Enable Virtualization Based Security feature for Windows 10.
(Support from Virtual machine hardware version 14, Guest OS Windows 10 64 bit, Windows Server 2016)'
guest_id:
description:
- Set the guest ID.
- This parameter is case sensitive.
- 'Examples:'
- " virtual machine with RHEL7 64 bit, will be 'rhel7_64Guest'"
- " virtual machine with CentOS 64 bit, will be 'centos64Guest'"
- " virtual machine with Ubuntu 64 bit, will be 'ubuntu64Guest'"
- This field is required when creating a virtual machine.
- >
Valid values are referenced here:
U(http://pubs.vmware.com/vsphere-6-5/topic/com.vmware.wssdk.apiref.doc/vim.vm.GuestOsDescriptor.GuestOsIdentifier.html)
version_added: '2.3'
disk:
description:
- A list of disks to add.
- This parameter is case sensitive.
- Shrinking disks is not supported.
- Removing existing disks of the virtual machine is not supported.
- 'Valid attributes are:'
- ' - C(size_[tb,gb,mb,kb]) (integer): Disk storage size in specified unit.'
- ' - C(type) (string): Valid values are:'
- ' - C(thin) thin disk'
- ' - C(eagerzeroedthick) eagerzeroedthick disk, added in version 2.5'
- ' Default: C(None) thick disk, no eagerzero.'
- ' - C(datastore) (string): The name of datastore which will be used for the disk. If C(autoselect_datastore) is set to True,
then will select the less used datastore whose name contains this "disk.datastore" string.'
- ' - C(filename) (string): Existing disk image to be used. Filename must be already exists on the datastore.'
- ' Specify filename string in C([datastore_name] path/to/file.vmdk) format. Added in version 2.8.'
- ' - C(autoselect_datastore) (bool): select the less used datastore. "disk.datastore" and "disk.autoselect_datastore"
will not be used if C(datastore) is specified outside this C(disk) configuration.'
- ' - C(disk_mode) (string): Type of disk mode. Added in version 2.6'
- ' - Available options are :'
- ' - C(persistent): Changes are immediately and permanently written to the virtual disk. This is default.'
- ' - C(independent_persistent): Same as persistent, but not affected by snapshots.'
- ' - C(independent_nonpersistent): Changes to virtual disk are made to a redo log and discarded at power off, but not affected by snapshots.'
cdrom:
description:
- A CD-ROM configuration for the virtual machine.
- 'Valid attributes are:'
- ' - C(type) (string): The type of CD-ROM, valid options are C(none), C(client) or C(iso). With C(none) the CD-ROM will be disconnected but present.'
- ' - C(iso_path) (string): The datastore path to the ISO file to use, in the form of C([datastore1] path/to/file.iso). Required if type is set C(iso).'
version_added: '2.5'
resource_pool:
description:
- Use the given resource pool for virtual machine operation.
- This parameter is case sensitive.
- Resource pool should be child of the selected host parent.
version_added: '2.3'
wait_for_ip_address:
description:
- Wait until vCenter detects an IP address for the virtual machine.
- This requires vmware-tools (vmtoolsd) to properly work after creation.
- "vmware-tools needs to be installed on the given virtual machine in order to work with this parameter."
default: 'no'
type: bool
wait_for_customization:
description:
- Wait until vCenter detects all guest customizations as successfully completed.
- When enabled, the VM will automatically be powered on.
default: 'no'
type: bool
version_added: '2.8'
state_change_timeout:
description:
- If the C(state) is set to C(shutdownguest), by default the module will return immediately after sending the shutdown signal.
- If this argument is set to a positive integer, the module will instead wait for the virtual machine to reach the poweredoff state.
- The value sets a timeout in seconds for the module to wait for the state change.
default: 0
version_added: '2.6'
snapshot_src:
description:
- Name of the existing snapshot to use to create a clone of a virtual machine.
- This parameter is case sensitive.
- While creating linked clone using C(linked_clone) parameter, this parameter is required.
version_added: '2.4'
linked_clone:
description:
- Whether to create a linked clone from the snapshot specified.
- If specified, then C(snapshot_src) is required parameter.
default: 'no'
type: bool
version_added: '2.4'
force:
description:
- Ignore warnings and complete the actions.
- This parameter is useful while removing virtual machine which is powered on state.
- 'This module reflects the VMware vCenter API and UI workflow, as such, in some cases the `force` flag will
be mandatory to perform the action to ensure you are certain the action has to be taken, no matter what the consequence.
This is specifically the case for removing a powered on the virtual machine when C(state) is set to C(absent).'
default: 'no'
type: bool
datacenter:
description:
- Destination datacenter for the deploy operation.
- This parameter is case sensitive.
default: ha-datacenter
cluster:
description:
- The cluster name where the virtual machine will run.
- This is a required parameter, if C(esxi_hostname) is not set.
- C(esxi_hostname) and C(cluster) are mutually exclusive parameters.
- This parameter is case sensitive.
version_added: '2.3'
esxi_hostname:
description:
- The ESXi hostname where the virtual machine will run.
- This is a required parameter, if C(cluster) is not set.
- C(esxi_hostname) and C(cluster) are mutually exclusive parameters.
- This parameter is case sensitive.
annotation:
description:
- A note or annotation to include in the virtual machine.
version_added: '2.3'
customvalues:
description:
- Define a list of custom values to set on virtual machine.
- A custom value object takes two fields C(key) and C(value).
- Incorrect key and values will be ignored.
version_added: '2.3'
networks:
description:
- A list of networks (in the order of the NICs).
- Removing NICs is not allowed, while reconfiguring the virtual machine.
- All parameters and VMware object names are case sensitive.
- 'One of the below parameters is required per entry:'
- ' - C(name) (string): Name of the portgroup or distributed virtual portgroup for this interface.
When specifying distributed virtual portgroup make sure given C(esxi_hostname) or C(cluster) is associated with it.'
- ' - C(vlan) (integer): VLAN number for this interface.'
- 'Optional parameters per entry (used for virtual hardware):'
- ' - C(device_type) (string): Virtual network device (one of C(e1000), C(e1000e), C(pcnet32), C(vmxnet2), C(vmxnet3) (default), C(sriov)).'
- ' - C(mac) (string): Customize MAC address.'
- ' - C(dvswitch_name) (string): Name of the distributed vSwitch.
This value is required if multiple distributed portgroups exists with the same name. version_added 2.7'
- ' - C(start_connected) (bool): Indicates that virtual network adapter starts with associated virtual machine powers on. version_added: 2.5'
- 'Optional parameters per entry (used for OS customization):'
- ' - C(type) (string): Type of IP assignment (either C(dhcp) or C(static)). C(dhcp) is default.'
- ' - C(ip) (string): Static IP address (implies C(type: static)).'
- ' - C(netmask) (string): Static netmask required for C(ip).'
- ' - C(gateway) (string): Static gateway.'
- ' - C(dns_servers) (string): DNS servers for this network interface (Windows).'
- ' - C(domain) (string): Domain name for this network interface (Windows).'
- ' - C(wake_on_lan) (bool): Indicates if wake-on-LAN is enabled on this virtual network adapter. version_added: 2.5'
- ' - C(allow_guest_control) (bool): Enables guest control over whether the connectable device is connected. version_added: 2.5'
version_added: '2.3'
customization:
description:
- Parameters for OS customization when cloning from the template or the virtual machine, or apply to the existing virtual machine directly.
- Not all operating systems are supported for customization with respective vCenter version,
please check VMware documentation for respective OS customization.
- For supported customization operating system matrix, (see U(http://partnerweb.vmware.com/programs/guestOS/guest-os-customization-matrix.pdf))
- All parameters and VMware object names are case sensitive.
- Linux based OSes requires Perl package to be installed for OS customizations.
- 'Common parameters (Linux/Windows):'
- ' - C(existing_vm) (bool): If set to C(True), do OS customization on the specified virtual machine directly.
If set to C(False) or not specified, do OS customization when cloning from the template or the virtual machine. version_added: 2.8'
- ' - C(dns_servers) (list): List of DNS servers to configure.'
- ' - C(dns_suffix) (list): List of domain suffixes, also known as DNS search path (default: C(domain) parameter).'
- ' - C(domain) (string): DNS domain name to use.'
- ' - C(hostname) (string): Computer hostname (default: shorted C(name) parameter). Allowed characters are alphanumeric (uppercase and lowercase)
and minus, rest of the characters are dropped as per RFC 952.'
- 'Parameters related to Windows customization:'
- ' - C(autologon) (bool): Auto logon after virtual machine customization (default: False).'
- ' - C(autologoncount) (int): Number of autologon after reboot (default: 1).'
- ' - C(domainadmin) (string): User used to join in AD domain (mandatory with C(joindomain)).'
- ' - C(domainadminpassword) (string): Password used to join in AD domain (mandatory with C(joindomain)).'
- ' - C(fullname) (string): Server owner name (default: Administrator).'
- ' - C(joindomain) (string): AD domain to join (Not compatible with C(joinworkgroup)).'
- ' - C(joinworkgroup) (string): Workgroup to join (Not compatible with C(joindomain), default: WORKGROUP).'
- ' - C(orgname) (string): Organisation name (default: ACME).'
- ' - C(password) (string): Local administrator password.'
- ' - C(productid) (string): Product ID.'
- ' - C(runonce) (list): List of commands to run at first user logon.'
- ' - C(timezone) (int): Timezone (See U(https://msdn.microsoft.com/en-us/library/ms912391.aspx)).'
version_added: '2.3'
vapp_properties:
description:
- A list of vApp properties
- 'For full list of attributes and types refer to: U(https://github.com/vmware/pyvmomi/blob/master/docs/vim/vApp/PropertyInfo.rst)'
- 'Basic attributes are:'
- ' - C(id) (string): Property id - required.'
- ' - C(value) (string): Property value.'
- ' - C(type) (string): Value type, string type by default.'
- ' - C(operation): C(remove): This attribute is required only when removing properties.'
version_added: '2.6'
customization_spec:
description:
- Unique name identifying the requested customization specification.
- This parameter is case sensitive.
- If set, then overrides C(customization) parameter values.
version_added: '2.6'
datastore:
description:
- Specify datastore or datastore cluster to provision virtual machine.
- 'This parameter takes precedence over "disk.datastore" parameter.'
- 'This parameter can be used to override datastore or datastore cluster setting of the virtual machine when deployed
from the template.'
- Please see example for more usage.
version_added: '2.7'
convert:
description:
- Specify convert disk type while cloning template or virtual machine.
choices: [ thin, thick, eagerzeroedthick ]
version_added: '2.8'
extends_documentation_fragment: vmware.documentation
'''
EXAMPLES = r'''
- name: Create a virtual machine on given ESXi hostname
vmware_guest:
hostname: "{{ vcenter_hostname }}"
username: "{{ vcenter_username }}"
password: "{{ vcenter_password }}"
validate_certs: no
folder: /DC1/vm/
name: test_vm_0001
state: poweredon
guest_id: centos64Guest
# This is hostname of particular ESXi server on which user wants VM to be deployed
esxi_hostname: "{{ esxi_hostname }}"
disk:
- size_gb: 10
type: thin
datastore: datastore1
hardware:
memory_mb: 512
num_cpus: 4
scsi: paravirtual
networks:
- name: VM Network
mac: aa:bb:dd:aa:00:14
ip: 10.10.10.100
netmask: 255.255.255.0
device_type: vmxnet3
wait_for_ip_address: yes
delegate_to: localhost
register: deploy_vm
- name: Create a virtual machine from a template
vmware_guest:
hostname: "{{ vcenter_hostname }}"
username: "{{ vcenter_username }}"
password: "{{ vcenter_password }}"
validate_certs: no
folder: /testvms
name: testvm_2
state: poweredon
template: template_el7
disk:
- size_gb: 10
type: thin
datastore: g73_datastore
hardware:
memory_mb: 512
num_cpus: 6
num_cpu_cores_per_socket: 3
scsi: paravirtual
memory_reservation_lock: True
mem_limit: 8096
mem_reservation: 4096
cpu_limit: 8096
cpu_reservation: 4096
max_connections: 5
hotadd_cpu: True
hotremove_cpu: True
hotadd_memory: False
version: 12 # Hardware version of virtual machine
boot_firmware: "efi"
cdrom:
type: iso
iso_path: "[datastore1] livecd.iso"
networks:
- name: VM Network
mac: aa:bb:dd:aa:00:14
wait_for_ip_address: yes
delegate_to: localhost
register: deploy
- name: Clone a virtual machine from Windows template and customize
vmware_guest:
hostname: "{{ vcenter_hostname }}"
username: "{{ vcenter_username }}"
password: "{{ vcenter_password }}"
validate_certs: no
datacenter: datacenter1
cluster: cluster
name: testvm-2
template: template_windows
networks:
- name: VM Network
ip: 192.168.1.100
netmask: 255.255.255.0
gateway: 192.168.1.1
mac: aa:bb:dd:aa:00:14
domain: my_domain
dns_servers:
- 192.168.1.1
- 192.168.1.2
- vlan: 1234
type: dhcp
customization:
autologon: yes
dns_servers:
- 192.168.1.1
- 192.168.1.2
domain: my_domain
password: new_vm_password
runonce:
- powershell.exe -ExecutionPolicy Unrestricted -File C:\Windows\Temp\ConfigureRemotingForAnsible.ps1 -ForceNewSSLCert -EnableCredSSP
delegate_to: localhost
- name: Clone a virtual machine from Linux template and customize
vmware_guest:
hostname: "{{ vcenter_hostname }}"
username: "{{ vcenter_username }}"
password: "{{ vcenter_password }}"
validate_certs: no
datacenter: "{{ datacenter }}"
state: present
folder: /DC1/vm
template: "{{ template }}"
name: "{{ vm_name }}"
cluster: DC1_C1
networks:
- name: VM Network
ip: 192.168.10.11
netmask: 255.255.255.0
wait_for_ip_address: True
customization:
domain: "{{ guest_domain }}"
dns_servers:
- 8.9.9.9
- 7.8.8.9
dns_suffix:
- example.com
- example2.com
delegate_to: localhost
- name: Rename a virtual machine (requires the virtual machine's uuid)
vmware_guest:
hostname: "{{ vcenter_hostname }}"
username: "{{ vcenter_username }}"
password: "{{ vcenter_password }}"
validate_certs: no
uuid: "{{ vm_uuid }}"
name: new_name
state: present
delegate_to: localhost
- name: Remove a virtual machine by uuid
vmware_guest:
hostname: "{{ vcenter_hostname }}"
username: "{{ vcenter_username }}"
password: "{{ vcenter_password }}"
validate_certs: no
uuid: "{{ vm_uuid }}"
state: absent
delegate_to: localhost
- name: Manipulate vApp properties
vmware_guest:
hostname: "{{ vcenter_hostname }}"
username: "{{ vcenter_username }}"
password: "{{ vcenter_password }}"
validate_certs: no
name: vm_name
state: present
vapp_properties:
- id: remoteIP
category: Backup
label: Backup server IP
type: str
value: 10.10.10.1
- id: old_property
operation: remove
delegate_to: localhost
- name: Set powerstate of a virtual machine to poweroff by using UUID
vmware_guest:
hostname: "{{ vcenter_hostname }}"
username: "{{ vcenter_username }}"
password: "{{ vcenter_password }}"
validate_certs: no
uuid: "{{ vm_uuid }}"
state: poweredoff
delegate_to: localhost
- name: Deploy a virtual machine in a datastore different from the datastore of the template
vmware_guest:
hostname: "{{ vcenter_hostname }}"
username: "{{ vcenter_username }}"
password: "{{ vcenter_password }}"
name: "{{ vm_name }}"
state: present
template: "{{ template_name }}"
# Here datastore can be different which holds template
datastore: "{{ virtual_machine_datastore }}"
hardware:
memory_mb: 512
num_cpus: 2
scsi: paravirtual
delegate_to: localhost
'''
RETURN = r'''
instance:
description: metadata about the new virtual machine
returned: always
type: dict
sample: None
'''
import re
import time
import string
HAS_PYVMOMI = False
try:
from pyVmomi import vim, vmodl, VmomiSupport
HAS_PYVMOMI = True
except ImportError:
pass
from random import randint
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_text, to_native
from ansible.module_utils.vmware import (find_obj, gather_vm_facts, get_all_objs,
compile_folder_path_for_object, serialize_spec,
vmware_argument_spec, set_vm_power_state, PyVmomi,
find_dvs_by_name, find_dvspg_by_name, wait_for_vm_ip,
wait_for_task, TaskError)
class PyVmomiDeviceHelper(object):
""" This class is a helper to create easily VMWare Objects for PyVmomiHelper """
def __init__(self, module):
self.module = module
self.next_disk_unit_number = 0
self.scsi_device_type = {
'lsilogic': vim.vm.device.VirtualLsiLogicController,
'paravirtual': vim.vm.device.ParaVirtualSCSIController,
'buslogic': vim.vm.device.VirtualBusLogicController,
'lsilogicsas': vim.vm.device.VirtualLsiLogicSASController,
}
def create_scsi_controller(self, scsi_type):
scsi_ctl = vim.vm.device.VirtualDeviceSpec()
scsi_ctl.operation = vim.vm.device.VirtualDeviceSpec.Operation.add
scsi_device = self.scsi_device_type.get(scsi_type, vim.vm.device.ParaVirtualSCSIController)
scsi_ctl.device = scsi_device()
scsi_ctl.device.busNumber = 0
# While creating a new SCSI controller, temporary key value
# should be unique negative integers
scsi_ctl.device.key = -randint(1000, 9999)
scsi_ctl.device.hotAddRemove = True
scsi_ctl.device.sharedBus = 'noSharing'
scsi_ctl.device.scsiCtlrUnitNumber = 7
return scsi_ctl
def is_scsi_controller(self, device):
return isinstance(device, tuple(self.scsi_device_type.values()))
@staticmethod
def create_ide_controller():
ide_ctl = vim.vm.device.VirtualDeviceSpec()
ide_ctl.operation = vim.vm.device.VirtualDeviceSpec.Operation.add
ide_ctl.device = vim.vm.device.VirtualIDEController()
ide_ctl.device.deviceInfo = vim.Description()
# While creating a new IDE controller, temporary key value
# should be unique negative integers
ide_ctl.device.key = -randint(200, 299)
ide_ctl.device.busNumber = 0
return ide_ctl
@staticmethod
def create_cdrom(ide_ctl, cdrom_type, iso_path=None):
cdrom_spec = vim.vm.device.VirtualDeviceSpec()
cdrom_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.add
cdrom_spec.device = vim.vm.device.VirtualCdrom()
cdrom_spec.device.controllerKey = ide_ctl.device.key
cdrom_spec.device.key = -1
cdrom_spec.device.connectable = vim.vm.device.VirtualDevice.ConnectInfo()
cdrom_spec.device.connectable.allowGuestControl = True
cdrom_spec.device.connectable.startConnected = (cdrom_type != "none")
if cdrom_type in ["none", "client"]:
cdrom_spec.device.backing = vim.vm.device.VirtualCdrom.RemotePassthroughBackingInfo()
elif cdrom_type == "iso":
cdrom_spec.device.backing = vim.vm.device.VirtualCdrom.IsoBackingInfo(fileName=iso_path)
return cdrom_spec
@staticmethod
def is_equal_cdrom(vm_obj, cdrom_device, cdrom_type, iso_path):
if cdrom_type == "none":
return (isinstance(cdrom_device.backing, vim.vm.device.VirtualCdrom.RemotePassthroughBackingInfo) and
cdrom_device.connectable.allowGuestControl and
not cdrom_device.connectable.startConnected and
(vm_obj.runtime.powerState != vim.VirtualMachinePowerState.poweredOn or not cdrom_device.connectable.connected))
elif cdrom_type == "client":
return (isinstance(cdrom_device.backing, vim.vm.device.VirtualCdrom.RemotePassthroughBackingInfo) and
cdrom_device.connectable.allowGuestControl and
cdrom_device.connectable.startConnected and
(vm_obj.runtime.powerState != vim.VirtualMachinePowerState.poweredOn or cdrom_device.connectable.connected))
elif cdrom_type == "iso":
return (isinstance(cdrom_device.backing, vim.vm.device.VirtualCdrom.IsoBackingInfo) and
cdrom_device.backing.fileName == iso_path and
cdrom_device.connectable.allowGuestControl and
cdrom_device.connectable.startConnected and
(vm_obj.runtime.powerState != vim.VirtualMachinePowerState.poweredOn or cdrom_device.connectable.connected))
def create_scsi_disk(self, scsi_ctl, disk_index=None):
diskspec = vim.vm.device.VirtualDeviceSpec()
diskspec.operation = vim.vm.device.VirtualDeviceSpec.Operation.add
diskspec.device = vim.vm.device.VirtualDisk()
diskspec.device.backing = vim.vm.device.VirtualDisk.FlatVer2BackingInfo()
diskspec.device.controllerKey = scsi_ctl.device.key
if self.next_disk_unit_number == 7:
raise AssertionError()
if disk_index == 7:
raise AssertionError()
"""
Configure disk unit number.
"""
if disk_index is not None:
diskspec.device.unitNumber = disk_index
self.next_disk_unit_number = disk_index + 1
else:
diskspec.device.unitNumber = self.next_disk_unit_number
self.next_disk_unit_number += 1
# unit number 7 is reserved to SCSI controller, increase next index
if self.next_disk_unit_number == 7:
self.next_disk_unit_number += 1
return diskspec
def get_device(self, device_type, name):
nic_dict = dict(pcnet32=vim.vm.device.VirtualPCNet32(),
vmxnet2=vim.vm.device.VirtualVmxnet2(),
vmxnet3=vim.vm.device.VirtualVmxnet3(),
e1000=vim.vm.device.VirtualE1000(),
e1000e=vim.vm.device.VirtualE1000e(),
sriov=vim.vm.device.VirtualSriovEthernetCard(),
)
if device_type in nic_dict:
return nic_dict[device_type]
else:
self.module.fail_json(msg='Invalid device_type "%s"'
' for network "%s"' % (device_type, name))
def create_nic(self, device_type, device_label, device_infos):
nic = vim.vm.device.VirtualDeviceSpec()
nic.device = self.get_device(device_type, device_infos['name'])
nic.device.wakeOnLanEnabled = bool(device_infos.get('wake_on_lan', True))
nic.device.deviceInfo = vim.Description()
nic.device.deviceInfo.label = device_label
nic.device.deviceInfo.summary = device_infos['name']
nic.device.connectable = vim.vm.device.VirtualDevice.ConnectInfo()
nic.device.connectable.startConnected = bool(device_infos.get('start_connected', True))
nic.device.connectable.allowGuestControl = bool(device_infos.get('allow_guest_control', True))
nic.device.connectable.connected = True
if 'mac' in device_infos and self.is_valid_mac_addr(device_infos['mac']):
nic.device.addressType = 'manual'
nic.device.macAddress = device_infos['mac']
else:
nic.device.addressType = 'generated'
return nic
@staticmethod
def is_valid_mac_addr(mac_addr):
"""
Function to validate MAC address for given string
Args:
mac_addr: string to validate as MAC address
Returns: (Boolean) True if string is valid MAC address, otherwise False
"""
mac_addr_regex = re.compile('[0-9a-f]{2}([-:])[0-9a-f]{2}(\\1[0-9a-f]{2}){4}$')
return bool(mac_addr_regex.match(mac_addr))
def integer_value(self, input_value, name):
"""
Function to return int value for given input, else return error
Args:
input_value: Input value to retrieve int value from
name: Name of the Input value (used to build error message)
Returns: (int) if integer value can be obtained, otherwise will send a error message.
"""
if isinstance(input_value, int):
return input_value
elif isinstance(input_value, str) and input_value.isdigit():
return int(input_value)
else:
self.module.fail_json(msg='"%s" attribute should be an'
' integer value.' % name)
class PyVmomiCache(object):
""" This class caches references to objects which are requested multiples times but not modified """
def __init__(self, content, dc_name=None):
self.content = content
self.dc_name = dc_name
self.networks = {}
self.clusters = {}
self.esx_hosts = {}
self.parent_datacenters = {}
def find_obj(self, content, types, name, confine_to_datacenter=True):
""" Wrapper around find_obj to set datacenter context """
result = find_obj(content, types, name)
if result and confine_to_datacenter:
if to_text(self.get_parent_datacenter(result).name) != to_text(self.dc_name):
result = None
objects = self.get_all_objs(content, types, confine_to_datacenter=True)
for obj in objects:
if name is None or to_text(obj.name) == to_text(name):
return obj
return result
def get_all_objs(self, content, types, confine_to_datacenter=True):
""" Wrapper around get_all_objs to set datacenter context """
objects = get_all_objs(content, types)
if confine_to_datacenter:
if hasattr(objects, 'items'):
# resource pools come back as a dictionary
# make a copy
tmpobjs = objects.copy()
for k, v in objects.items():
parent_dc = self.get_parent_datacenter(k)
if parent_dc.name != self.dc_name:
tmpobjs.pop(k, None)
objects = tmpobjs
else:
# everything else should be a list
objects = [x for x in objects if self.get_parent_datacenter(x).name == self.dc_name]
return objects
def get_network(self, network):
if network not in self.networks:
self.networks[network] = self.find_obj(self.content, [vim.Network], network)
return self.networks[network]
def get_cluster(self, cluster):
if cluster not in self.clusters:
self.clusters[cluster] = self.find_obj(self.content, [vim.ClusterComputeResource], cluster)
return self.clusters[cluster]
def get_esx_host(self, host):
if host not in self.esx_hosts:
self.esx_hosts[host] = self.find_obj(self.content, [vim.HostSystem], host)
return self.esx_hosts[host]
def get_parent_datacenter(self, obj):
""" Walk the parent tree to find the objects datacenter """
if isinstance(obj, vim.Datacenter):
return obj
if obj in self.parent_datacenters:
return self.parent_datacenters[obj]
datacenter = None
while True:
if not hasattr(obj, 'parent'):
break
obj = obj.parent
if isinstance(obj, vim.Datacenter):
datacenter = obj
break
self.parent_datacenters[obj] = datacenter
return datacenter
class PyVmomiHelper(PyVmomi):
def __init__(self, module):
super(PyVmomiHelper, self).__init__(module)
self.device_helper = PyVmomiDeviceHelper(self.module)
self.configspec = None
self.change_detected = False # a change was detected and needs to be applied through reconfiguration
self.change_applied = False # a change was applied meaning at least one task succeeded
self.customspec = None
self.cache = PyVmomiCache(self.content, dc_name=self.params['datacenter'])
def gather_facts(self, vm):
return gather_vm_facts(self.content, vm)
def remove_vm(self, vm):
# https://www.vmware.com/support/developer/converter-sdk/conv60_apireference/vim.ManagedEntity.html#destroy
if vm.summary.runtime.powerState.lower() == 'poweredon':
self.module.fail_json(msg="Virtual machine %s found in 'powered on' state, "
"please use 'force' parameter to remove or poweroff VM "
"and try removing VM again." % vm.name)
task = vm.Destroy()
self.wait_for_task(task)
if task.info.state == 'error':
return {'changed': self.change_applied, 'failed': True, 'msg': task.info.error.msg, 'op': 'destroy'}
else:
return {'changed': self.change_applied, 'failed': False}
def configure_guestid(self, vm_obj, vm_creation=False):
# guest_id is not required when using templates
if self.params['template'] and not self.params['guest_id']:
return
# guest_id is only mandatory on VM creation
if vm_creation and self.params['guest_id'] is None:
self.module.fail_json(msg="guest_id attribute is mandatory for VM creation")
if self.params['guest_id'] and \
(vm_obj is None or self.params['guest_id'].lower() != vm_obj.summary.config.guestId.lower()):
self.change_detected = True
self.configspec.guestId = self.params['guest_id']
def configure_resource_alloc_info(self, vm_obj):
"""
Function to configure resource allocation information about virtual machine
:param vm_obj: VM object in case of reconfigure, None in case of deploy
:return: None
"""
rai_change_detected = False
memory_allocation = vim.ResourceAllocationInfo()
cpu_allocation = vim.ResourceAllocationInfo()
if 'hardware' in self.params:
if 'mem_limit' in self.params['hardware']:
mem_limit = None
try:
mem_limit = int(self.params['hardware'].get('mem_limit'))
except ValueError:
self.module.fail_json(msg="hardware.mem_limit attribute should be an integer value.")
memory_allocation.limit = mem_limit
if vm_obj is None or memory_allocation.limit != vm_obj.config.memoryAllocation.limit:
rai_change_detected = True
if 'mem_reservation' in self.params['hardware'] or 'memory_reservation' in self.params['hardware']:
mem_reservation = self.params['hardware'].get('mem_reservation')
if mem_reservation is None:
mem_reservation = self.params['hardware'].get('memory_reservation')
try:
mem_reservation = int(mem_reservation)
except ValueError:
self.module.fail_json(msg="hardware.mem_reservation or hardware.memory_reservation should be an integer value.")
memory_allocation.reservation = mem_reservation
if vm_obj is None or \
memory_allocation.reservation != vm_obj.config.memoryAllocation.reservation:
rai_change_detected = True
if 'cpu_limit' in self.params['hardware']:
cpu_limit = None
try:
cpu_limit = int(self.params['hardware'].get('cpu_limit'))
except ValueError:
self.module.fail_json(msg="hardware.cpu_limit attribute should be an integer value.")
cpu_allocation.limit = cpu_limit
if vm_obj is None or cpu_allocation.limit != vm_obj.config.cpuAllocation.limit:
rai_change_detected = True
if 'cpu_reservation' in self.params['hardware']:
cpu_reservation = None
try:
cpu_reservation = int(self.params['hardware'].get('cpu_reservation'))
except ValueError:
self.module.fail_json(msg="hardware.cpu_reservation should be an integer value.")
cpu_allocation.reservation = cpu_reservation
if vm_obj is None or \
cpu_allocation.reservation != vm_obj.config.cpuAllocation.reservation:
rai_change_detected = True
if rai_change_detected:
self.configspec.memoryAllocation = memory_allocation
self.configspec.cpuAllocation = cpu_allocation
self.change_detected = True
def configure_cpu_and_memory(self, vm_obj, vm_creation=False):
# set cpu/memory/etc
if 'hardware' in self.params:
if 'num_cpus' in self.params['hardware']:
try:
num_cpus = int(self.params['hardware']['num_cpus'])
except ValueError:
self.module.fail_json(msg="hardware.num_cpus attribute should be an integer value.")
# check VM power state and cpu hot-add/hot-remove state before re-config VM
if vm_obj and vm_obj.runtime.powerState == vim.VirtualMachinePowerState.poweredOn:
if not vm_obj.config.cpuHotRemoveEnabled and num_cpus < vm_obj.config.hardware.numCPU:
self.module.fail_json(msg="Configured cpu number is less than the cpu number of the VM, "
"cpuHotRemove is not enabled")
if not vm_obj.config.cpuHotAddEnabled and num_cpus > vm_obj.config.hardware.numCPU:
self.module.fail_json(msg="Configured cpu number is more than the cpu number of the VM, "
"cpuHotAdd is not enabled")
if 'num_cpu_cores_per_socket' in self.params['hardware']:
try:
num_cpu_cores_per_socket = int(self.params['hardware']['num_cpu_cores_per_socket'])
except ValueError:
self.module.fail_json(msg="hardware.num_cpu_cores_per_socket attribute "
"should be an integer value.")
if num_cpus % num_cpu_cores_per_socket != 0:
self.module.fail_json(msg="hardware.num_cpus attribute should be a multiple "
"of hardware.num_cpu_cores_per_socket")
self.configspec.numCoresPerSocket = num_cpu_cores_per_socket
if vm_obj is None or self.configspec.numCoresPerSocket != vm_obj.config.hardware.numCoresPerSocket:
self.change_detected = True
self.configspec.numCPUs = num_cpus
if vm_obj is None or self.configspec.numCPUs != vm_obj.config.hardware.numCPU:
self.change_detected = True
# num_cpu is mandatory for VM creation
elif vm_creation and not self.params['template']:
self.module.fail_json(msg="hardware.num_cpus attribute is mandatory for VM creation")
if 'memory_mb' in self.params['hardware']:
try:
memory_mb = int(self.params['hardware']['memory_mb'])
except ValueError:
self.module.fail_json(msg="Failed to parse hardware.memory_mb value."
" Please refer the documentation and provide"
" correct value.")
# check VM power state and memory hotadd state before re-config VM
if vm_obj and vm_obj.runtime.powerState == vim.VirtualMachinePowerState.poweredOn:
if vm_obj.config.memoryHotAddEnabled and memory_mb < vm_obj.config.hardware.memoryMB:
self.module.fail_json(msg="Configured memory is less than memory size of the VM, "
"operation is not supported")
elif not vm_obj.config.memoryHotAddEnabled and memory_mb != vm_obj.config.hardware.memoryMB:
self.module.fail_json(msg="memoryHotAdd is not enabled")
self.configspec.memoryMB = memory_mb
if vm_obj is None or self.configspec.memoryMB != vm_obj.config.hardware.memoryMB:
self.change_detected = True
# memory_mb is mandatory for VM creation
elif vm_creation and not self.params['template']:
self.module.fail_json(msg="hardware.memory_mb attribute is mandatory for VM creation")
if 'hotadd_memory' in self.params['hardware']:
if vm_obj and vm_obj.runtime.powerState == vim.VirtualMachinePowerState.poweredOn and \
vm_obj.config.memoryHotAddEnabled != bool(self.params['hardware']['hotadd_memory']):
self.module.fail_json(msg="Configure hotadd memory operation is not supported when VM is power on")
self.configspec.memoryHotAddEnabled = bool(self.params['hardware']['hotadd_memory'])
if vm_obj is None or self.configspec.memoryHotAddEnabled != vm_obj.config.memoryHotAddEnabled:
self.change_detected = True
if 'hotadd_cpu' in self.params['hardware']:
if vm_obj and vm_obj.runtime.powerState == vim.VirtualMachinePowerState.poweredOn and \
vm_obj.config.cpuHotAddEnabled != bool(self.params['hardware']['hotadd_cpu']):
self.module.fail_json(msg="Configure hotadd cpu operation is not supported when VM is power on")
self.configspec.cpuHotAddEnabled = bool(self.params['hardware']['hotadd_cpu'])
if vm_obj is None or self.configspec.cpuHotAddEnabled != vm_obj.config.cpuHotAddEnabled:
self.change_detected = True
if 'hotremove_cpu' in self.params['hardware']:
if vm_obj and vm_obj.runtime.powerState == vim.VirtualMachinePowerState.poweredOn and \
vm_obj.config.cpuHotRemoveEnabled != bool(self.params['hardware']['hotremove_cpu']):
self.module.fail_json(msg="Configure hotremove cpu operation is not supported when VM is power on")
self.configspec.cpuHotRemoveEnabled = bool(self.params['hardware']['hotremove_cpu'])
if vm_obj is None or self.configspec.cpuHotRemoveEnabled != vm_obj.config.cpuHotRemoveEnabled:
self.change_detected = True
if 'memory_reservation_lock' in self.params['hardware']:
self.configspec.memoryReservationLockedToMax = bool(self.params['hardware']['memory_reservation_lock'])
if vm_obj is None or self.configspec.memoryReservationLockedToMax != vm_obj.config.memoryReservationLockedToMax:
self.change_detected = True
if 'boot_firmware' in self.params['hardware']:
# boot firmware re-config can cause boot issue
if vm_obj is not None:
return
boot_firmware = self.params['hardware']['boot_firmware'].lower()
if boot_firmware not in ('bios', 'efi'):
self.module.fail_json(msg="hardware.boot_firmware value is invalid [%s]."
" Need one of ['bios', 'efi']." % boot_firmware)
self.configspec.firmware = boot_firmware
self.change_detected = True
def configure_cdrom(self, vm_obj):
# Configure the VM CD-ROM
if "cdrom" in self.params and self.params["cdrom"]:
if "type" not in self.params["cdrom"] or self.params["cdrom"]["type"] not in ["none", "client", "iso"]:
self.module.fail_json(msg="cdrom.type is mandatory")
if self.params["cdrom"]["type"] == "iso" and ("iso_path" not in self.params["cdrom"] or not self.params["cdrom"]["iso_path"]):
self.module.fail_json(msg="cdrom.iso_path is mandatory in case cdrom.type is iso")
if vm_obj and vm_obj.config.template:
# Changing CD-ROM settings on a template is not supported
return
cdrom_spec = None
cdrom_device = self.get_vm_cdrom_device(vm=vm_obj)
iso_path = self.params["cdrom"]["iso_path"] if "iso_path" in self.params["cdrom"] else None
if cdrom_device is None:
# Creating new CD-ROM
ide_device = self.get_vm_ide_device(vm=vm_obj)
if ide_device is None:
# Creating new IDE device
ide_device = self.device_helper.create_ide_controller()
self.change_detected = True
self.configspec.deviceChange.append(ide_device)
elif len(ide_device.device) > 3:
self.module.fail_json(msg="hardware.cdrom specified for a VM or template which already has 4 IDE devices of which none are a cdrom")
cdrom_spec = self.device_helper.create_cdrom(ide_ctl=ide_device, cdrom_type=self.params["cdrom"]["type"], iso_path=iso_path)
if vm_obj and vm_obj.runtime.powerState == vim.VirtualMachinePowerState.poweredOn:
cdrom_spec.device.connectable.connected = (self.params["cdrom"]["type"] != "none")
elif not self.device_helper.is_equal_cdrom(vm_obj=vm_obj, cdrom_device=cdrom_device, cdrom_type=self.params["cdrom"]["type"], iso_path=iso_path):
# Updating an existing CD-ROM
if self.params["cdrom"]["type"] in ["client", "none"]:
cdrom_device.backing = vim.vm.device.VirtualCdrom.RemotePassthroughBackingInfo()
elif self.params["cdrom"]["type"] == "iso":
cdrom_device.backing = vim.vm.device.VirtualCdrom.IsoBackingInfo(fileName=iso_path)
cdrom_device.connectable = vim.vm.device.VirtualDevice.ConnectInfo()
cdrom_device.connectable.allowGuestControl = True
cdrom_device.connectable.startConnected = (self.params["cdrom"]["type"] != "none")
if vm_obj and vm_obj.runtime.powerState == vim.VirtualMachinePowerState.poweredOn:
cdrom_device.connectable.connected = (self.params["cdrom"]["type"] != "none")
cdrom_spec = vim.vm.device.VirtualDeviceSpec()
cdrom_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.edit
cdrom_spec.device = cdrom_device
if cdrom_spec:
self.change_detected = True
self.configspec.deviceChange.append(cdrom_spec)
def configure_hardware_params(self, vm_obj):
"""
Function to configure hardware related configuration of virtual machine
Args:
vm_obj: virtual machine object
"""
if 'hardware' in self.params:
if 'max_connections' in self.params['hardware']:
# maxMksConnections == max_connections
self.configspec.maxMksConnections = int(self.params['hardware']['max_connections'])
if vm_obj is None or self.configspec.maxMksConnections != vm_obj.config.maxMksConnections:
self.change_detected = True
if 'nested_virt' in self.params['hardware']:
self.configspec.nestedHVEnabled = bool(self.params['hardware']['nested_virt'])
if vm_obj is None or self.configspec.nestedHVEnabled != bool(vm_obj.config.nestedHVEnabled):
self.change_detected = True
if 'version' in self.params['hardware']:
hw_version_check_failed = False
temp_version = self.params['hardware'].get('version', 10)
try:
temp_version = int(temp_version)
except ValueError:
hw_version_check_failed = True
if temp_version not in range(3, 15):
hw_version_check_failed = True
if hw_version_check_failed:
self.module.fail_json(msg="Failed to set hardware.version '%s' value as valid"
" values range from 3 (ESX 2.x) to 14 (ESXi 6.5 and greater)." % temp_version)
# Hardware version is denoted as "vmx-10"
version = "vmx-%02d" % temp_version
self.configspec.version = version
if vm_obj is None or self.configspec.version != vm_obj.config.version:
self.change_detected = True
if vm_obj is not None:
# VM exists and we need to update the hardware version
current_version = vm_obj.config.version
# current_version = "vmx-10"
version_digit = int(current_version.split("-", 1)[-1])
if temp_version < version_digit:
self.module.fail_json(msg="Current hardware version '%d' which is greater than the specified"
" version '%d'. Downgrading hardware version is"
" not supported. Please specify version greater"
" than the current version." % (version_digit,
temp_version))
new_version = "vmx-%02d" % temp_version
try:
task = vm_obj.UpgradeVM_Task(new_version)
self.wait_for_task(task)
if task.info.state == 'error':
return {'changed': self.change_applied, 'failed': True, 'msg': task.info.error.msg, 'op': 'upgrade'}
except vim.fault.AlreadyUpgraded:
# Don't fail if VM is already upgraded.
pass
if 'virt_based_security' in self.params['hardware']:
host_version = self.select_host().summary.config.product.version
if int(host_version.split('.')[0]) < 6 or (int(host_version.split('.')[0]) == 6 and int(host_version.split('.')[1]) < 7):
self.module.fail_json(msg="ESXi version %s not support VBS." % host_version)
guest_ids = ['windows9_64Guest', 'windows9Server64Guest']
if vm_obj is None:
guestid = self.configspec.guestId
else:
guestid = vm_obj.summary.config.guestId
if guestid not in guest_ids:
self.module.fail_json(msg="Guest '%s' not support VBS." % guestid)
if (vm_obj is None and int(self.configspec.version.split('-')[1]) >= 14) or \
(vm_obj and int(vm_obj.config.version.split('-')[1]) >= 14 and (vm_obj.runtime.powerState == vim.VirtualMachinePowerState.poweredOff)):
self.configspec.flags = vim.vm.FlagInfo()
self.configspec.flags.vbsEnabled = bool(self.params['hardware']['virt_based_security'])
if bool(self.params['hardware']['virt_based_security']):
self.configspec.flags.vvtdEnabled = True
self.configspec.nestedHVEnabled = True
if (vm_obj is None and self.configspec.firmware == 'efi') or \
(vm_obj and vm_obj.config.firmware == 'efi'):
self.configspec.bootOptions = vim.vm.BootOptions()
self.configspec.bootOptions.efiSecureBootEnabled = True
else:
self.module.fail_json(msg="Not support VBS when firmware is BIOS.")
if vm_obj is None or self.configspec.flags.vbsEnabled != vm_obj.config.flags.vbsEnabled:
self.change_detected = True
def get_device_by_type(self, vm=None, type=None):
if vm is None or type is None:
return None
for device in vm.config.hardware.device:
if isinstance(device, type):
return device
return None
def get_vm_cdrom_device(self, vm=None):
return self.get_device_by_type(vm=vm, type=vim.vm.device.VirtualCdrom)
def get_vm_ide_device(self, vm=None):
return self.get_device_by_type(vm=vm, type=vim.vm.device.VirtualIDEController)
def get_vm_network_interfaces(self, vm=None):
device_list = []
if vm is None:
return device_list
nw_device_types = (vim.vm.device.VirtualPCNet32, vim.vm.device.VirtualVmxnet2,
vim.vm.device.VirtualVmxnet3, vim.vm.device.VirtualE1000,
vim.vm.device.VirtualE1000e, vim.vm.device.VirtualSriovEthernetCard)
for device in vm.config.hardware.device:
if isinstance(device, nw_device_types):
device_list.append(device)
return device_list
def sanitize_network_params(self):
"""
Sanitize user provided network provided params
Returns: A sanitized list of network params, else fails
"""
network_devices = list()
# Clean up user data here
for network in self.params['networks']:
if 'name' not in network and 'vlan' not in network:
self.module.fail_json(msg="Please specify at least a network name or"
" a VLAN name under VM network list.")
if 'name' in network and self.cache.get_network(network['name']) is None:
self.module.fail_json(msg="Network '%(name)s' does not exist." % network)
elif 'vlan' in network:
dvps = self.cache.get_all_objs(self.content, [vim.dvs.DistributedVirtualPortgroup])
for dvp in dvps:
if hasattr(dvp.config.defaultPortConfig, 'vlan') and \
isinstance(dvp.config.defaultPortConfig.vlan.vlanId, int) and \
str(dvp.config.defaultPortConfig.vlan.vlanId) == str(network['vlan']):
network['name'] = dvp.config.name
break
if 'dvswitch_name' in network and \
dvp.config.distributedVirtualSwitch.name == network['dvswitch_name'] and \
dvp.config.name == network['vlan']:
network['name'] = dvp.config.name
break
if dvp.config.name == network['vlan']:
network['name'] = dvp.config.name
break
else:
self.module.fail_json(msg="VLAN '%(vlan)s' does not exist." % network)
if 'type' in network:
if network['type'] not in ['dhcp', 'static']:
self.module.fail_json(msg="Network type '%(type)s' is not a valid parameter."
" Valid parameters are ['dhcp', 'static']." % network)
if network['type'] != 'static' and ('ip' in network or 'netmask' in network):
self.module.fail_json(msg='Static IP information provided for network "%(name)s",'
' but "type" is set to "%(type)s".' % network)
else:
# Type is optional parameter, if user provided IP or Subnet assume
# network type as 'static'
if 'ip' in network or 'netmask' in network:
network['type'] = 'static'
else:
# User wants network type as 'dhcp'
network['type'] = 'dhcp'
if network.get('type') == 'static':
if 'ip' in network and 'netmask' not in network:
self.module.fail_json(msg="'netmask' is required if 'ip' is"
" specified under VM network list.")
if 'ip' not in network and 'netmask' in network:
self.module.fail_json(msg="'ip' is required if 'netmask' is"
" specified under VM network list.")
validate_device_types = ['pcnet32', 'vmxnet2', 'vmxnet3', 'e1000', 'e1000e', 'sriov']
if 'device_type' in network and network['device_type'] not in validate_device_types:
self.module.fail_json(msg="Device type specified '%s' is not valid."
" Please specify correct device"
" type from ['%s']." % (network['device_type'],
"', '".join(validate_device_types)))
if 'mac' in network and not PyVmomiDeviceHelper.is_valid_mac_addr(network['mac']):
self.module.fail_json(msg="Device MAC address '%s' is invalid."
" Please provide correct MAC address." % network['mac'])
network_devices.append(network)
return network_devices
def configure_network(self, vm_obj):
# Ignore empty networks, this permits to keep networks when deploying a template/cloning a VM
if len(self.params['networks']) == 0:
return
network_devices = self.sanitize_network_params()
# List current device for Clone or Idempotency
current_net_devices = self.get_vm_network_interfaces(vm=vm_obj)
if len(network_devices) < len(current_net_devices):
self.module.fail_json(msg="Given network device list is lesser than current VM device list (%d < %d). "
"Removing interfaces is not allowed"
% (len(network_devices), len(current_net_devices)))
for key in range(0, len(network_devices)):
nic_change_detected = False
network_name = network_devices[key]['name']
if key < len(current_net_devices) and (vm_obj or self.params['template']):
# We are editing existing network devices, this is either when
# are cloning from VM or Template
nic = vim.vm.device.VirtualDeviceSpec()
nic.operation = vim.vm.device.VirtualDeviceSpec.Operation.edit
nic.device = current_net_devices[key]
if ('wake_on_lan' in network_devices[key] and
nic.device.wakeOnLanEnabled != network_devices[key].get('wake_on_lan')):
nic.device.wakeOnLanEnabled = network_devices[key].get('wake_on_lan')
nic_change_detected = True
if ('start_connected' in network_devices[key] and
nic.device.connectable.startConnected != network_devices[key].get('start_connected')):
nic.device.connectable.startConnected = network_devices[key].get('start_connected')
nic_change_detected = True
if ('allow_guest_control' in network_devices[key] and
nic.device.connectable.allowGuestControl != network_devices[key].get('allow_guest_control')):
nic.device.connectable.allowGuestControl = network_devices[key].get('allow_guest_control')
nic_change_detected = True
if nic.device.deviceInfo.summary != network_name:
nic.device.deviceInfo.summary = network_name
nic_change_detected = True
if 'device_type' in network_devices[key]:
device = self.device_helper.get_device(network_devices[key]['device_type'], network_name)
device_class = type(device)
if not isinstance(nic.device, device_class):
self.module.fail_json(msg="Changing the device type is not possible when interface is already present. "
"The failing device type is %s" % network_devices[key]['device_type'])
# Changing mac address has no effect when editing interface
if 'mac' in network_devices[key] and nic.device.macAddress != current_net_devices[key].macAddress:
self.module.fail_json(msg="Changing MAC address has not effect when interface is already present. "
"The failing new MAC address is %s" % nic.device.macAddress)
else:
# Default device type is vmxnet3, VMWare best practice
device_type = network_devices[key].get('device_type', 'vmxnet3')
nic = self.device_helper.create_nic(device_type,
'Network Adapter %s' % (key + 1),
network_devices[key])
nic.operation = vim.vm.device.VirtualDeviceSpec.Operation.add
nic_change_detected = True
if hasattr(self.cache.get_network(network_name), 'portKeys'):
# VDS switch
pg_obj = None
if 'dvswitch_name' in network_devices[key]:
dvs_name = network_devices[key]['dvswitch_name']
dvs_obj = find_dvs_by_name(self.content, dvs_name)
if dvs_obj is None:
self.module.fail_json(msg="Unable to find distributed virtual switch %s" % dvs_name)
pg_obj = find_dvspg_by_name(dvs_obj, network_name)
if pg_obj is None:
self.module.fail_json(msg="Unable to find distributed port group %s" % network_name)
else:
pg_obj = self.cache.find_obj(self.content, [vim.dvs.DistributedVirtualPortgroup], network_name)
if (nic.device.backing and
(not hasattr(nic.device.backing, 'port') or
(nic.device.backing.port.portgroupKey != pg_obj.key or
nic.device.backing.port.switchUuid != pg_obj.config.distributedVirtualSwitch.uuid))):
nic_change_detected = True
dvs_port_connection = vim.dvs.PortConnection()
dvs_port_connection.portgroupKey = pg_obj.key
# If user specifies distributed port group without associating to the hostsystem on which
# virtual machine is going to be deployed then we get error. We can infer that there is no
# association between given distributed port group and host system.
host_system = self.params.get('esxi_hostname')
if host_system and host_system not in [host.config.host.name for host in pg_obj.config.distributedVirtualSwitch.config.host]:
self.module.fail_json(msg="It seems that host system '%s' is not associated with distributed"
" virtual portgroup '%s'. Please make sure host system is associated"
" with given distributed virtual portgroup" % (host_system, pg_obj.name))
# TODO: (akasurde) There is no way to find association between resource pool and distributed virtual portgroup
# For now, check if we are able to find distributed virtual switch
if not pg_obj.config.distributedVirtualSwitch:
self.module.fail_json(msg="Failed to find distributed virtual switch which is associated with"
" distributed virtual portgroup '%s'. Make sure hostsystem is associated with"
" the given distributed virtual portgroup." % pg_obj.name)
dvs_port_connection.switchUuid = pg_obj.config.distributedVirtualSwitch.uuid
nic.device.backing = vim.vm.device.VirtualEthernetCard.DistributedVirtualPortBackingInfo()
nic.device.backing.port = dvs_port_connection
elif isinstance(self.cache.get_network(network_name), vim.OpaqueNetwork):
# NSX-T Logical Switch
nic.device.backing = vim.vm.device.VirtualEthernetCard.OpaqueNetworkBackingInfo()
network_id = self.cache.get_network(network_name).summary.opaqueNetworkId
nic.device.backing.opaqueNetworkType = 'nsx.LogicalSwitch'
nic.device.backing.opaqueNetworkId = network_id
nic.device.deviceInfo.summary = 'nsx.LogicalSwitch: %s' % network_id
else:
# vSwitch
if not isinstance(nic.device.backing, vim.vm.device.VirtualEthernetCard.NetworkBackingInfo):
nic.device.backing = vim.vm.device.VirtualEthernetCard.NetworkBackingInfo()
nic_change_detected = True
net_obj = self.cache.get_network(network_name)
if nic.device.backing.network != net_obj:
nic.device.backing.network = net_obj
nic_change_detected = True
if nic.device.backing.deviceName != network_name:
nic.device.backing.deviceName = network_name
nic_change_detected = True
if nic_change_detected:
self.configspec.deviceChange.append(nic)
self.change_detected = True
def configure_vapp_properties(self, vm_obj):
if len(self.params['vapp_properties']) == 0:
return
for x in self.params['vapp_properties']:
if not x.get('id'):
self.module.fail_json(msg="id is required to set vApp property")
new_vmconfig_spec = vim.vApp.VmConfigSpec()
if vm_obj:
# VM exists
# This is primarily for vcsim/integration tests, unset vAppConfig was not seen on my deployments
orig_spec = vm_obj.config.vAppConfig if vm_obj.config.vAppConfig else new_vmconfig_spec
vapp_properties_current = dict((x.id, x) for x in orig_spec.property)
vapp_properties_to_change = dict((x['id'], x) for x in self.params['vapp_properties'])
# each property must have a unique key
# init key counter with max value + 1
all_keys = [x.key for x in orig_spec.property]
new_property_index = max(all_keys) + 1 if all_keys else 0
for property_id, property_spec in vapp_properties_to_change.items():
is_property_changed = False
new_vapp_property_spec = vim.vApp.PropertySpec()
if property_id in vapp_properties_current:
if property_spec.get('operation') == 'remove':
new_vapp_property_spec.operation = 'remove'
new_vapp_property_spec.removeKey = vapp_properties_current[property_id].key
is_property_changed = True
else:
# this is 'edit' branch
new_vapp_property_spec.operation = 'edit'
new_vapp_property_spec.info = vapp_properties_current[property_id]
try:
for property_name, property_value in property_spec.items():
if property_name == 'operation':
# operation is not an info object property
# if set to anything other than 'remove' we don't fail
continue
# Updating attributes only if needed
if getattr(new_vapp_property_spec.info, property_name) != property_value:
setattr(new_vapp_property_spec.info, property_name, property_value)
is_property_changed = True
except Exception as e:
msg = "Failed to set vApp property field='%s' and value='%s'. Error: %s" % (property_name, property_value, to_text(e))
self.module.fail_json(msg=msg)
else:
if property_spec.get('operation') == 'remove':
# attempt to delete non-existent property
continue
# this is add new property branch
new_vapp_property_spec.operation = 'add'
property_info = vim.vApp.PropertyInfo()
property_info.classId = property_spec.get('classId')
property_info.instanceId = property_spec.get('instanceId')
property_info.id = property_spec.get('id')
property_info.category = property_spec.get('category')
property_info.label = property_spec.get('label')
property_info.type = property_spec.get('type', 'string')
property_info.userConfigurable = property_spec.get('userConfigurable', True)
property_info.defaultValue = property_spec.get('defaultValue')
property_info.value = property_spec.get('value', '')
property_info.description = property_spec.get('description')
new_vapp_property_spec.info = property_info
new_vapp_property_spec.info.key = new_property_index
new_property_index += 1
is_property_changed = True
if is_property_changed:
new_vmconfig_spec.property.append(new_vapp_property_spec)
else:
# New VM
all_keys = [x.key for x in new_vmconfig_spec.property]
new_property_index = max(all_keys) + 1 if all_keys else 0
vapp_properties_to_change = dict((x['id'], x) for x in self.params['vapp_properties'])
is_property_changed = False
for property_id, property_spec in vapp_properties_to_change.items():
new_vapp_property_spec = vim.vApp.PropertySpec()
# this is add new property branch
new_vapp_property_spec.operation = 'add'
property_info = vim.vApp.PropertyInfo()
property_info.classId = property_spec.get('classId')
property_info.instanceId = property_spec.get('instanceId')
property_info.id = property_spec.get('id')
property_info.category = property_spec.get('category')
property_info.label = property_spec.get('label')
property_info.type = property_spec.get('type', 'string')
property_info.userConfigurable = property_spec.get('userConfigurable', True)
property_info.defaultValue = property_spec.get('defaultValue')
property_info.value = property_spec.get('value', '')
property_info.description = property_spec.get('description')
new_vapp_property_spec.info = property_info
new_vapp_property_spec.info.key = new_property_index
new_property_index += 1
is_property_changed = True
if is_property_changed:
new_vmconfig_spec.property.append(new_vapp_property_spec)
if new_vmconfig_spec.property:
self.configspec.vAppConfig = new_vmconfig_spec
self.change_detected = True
def customize_customvalues(self, vm_obj, config_spec):
if len(self.params['customvalues']) == 0:
return
vm_custom_spec = config_spec
vm_custom_spec.extraConfig = []
changed = False
facts = self.gather_facts(vm_obj)
for kv in self.params['customvalues']:
if 'key' not in kv or 'value' not in kv:
self.module.exit_json(msg="customvalues items required both 'key' and 'value fields.")
# If kv is not kv fetched from facts, change it
if kv['key'] not in facts['customvalues'] or facts['customvalues'][kv['key']] != kv['value']:
option = vim.option.OptionValue()
option.key = kv['key']
option.value = kv['value']
vm_custom_spec.extraConfig.append(option)
changed = True
if changed:
self.change_detected = True
def customize_vm(self, vm_obj):
# User specified customization specification
custom_spec_name = self.params.get('customization_spec')
if custom_spec_name:
cc_mgr = self.content.customizationSpecManager
if cc_mgr.DoesCustomizationSpecExist(name=custom_spec_name):
temp_spec = cc_mgr.GetCustomizationSpec(name=custom_spec_name)
self.customspec = temp_spec.spec
return
else:
self.module.fail_json(msg="Unable to find customization specification"
" '%s' in given configuration." % custom_spec_name)
# Network settings
adaptermaps = []
for network in self.params['networks']:
guest_map = vim.vm.customization.AdapterMapping()
guest_map.adapter = vim.vm.customization.IPSettings()
if 'ip' in network and 'netmask' in network:
guest_map.adapter.ip = vim.vm.customization.FixedIp()
guest_map.adapter.ip.ipAddress = str(network['ip'])
guest_map.adapter.subnetMask = str(network['netmask'])
elif 'type' in network and network['type'] == 'dhcp':
guest_map.adapter.ip = vim.vm.customization.DhcpIpGenerator()
if 'gateway' in network:
guest_map.adapter.gateway = network['gateway']
# On Windows, DNS domain and DNS servers can be set by network interface
# https://pubs.vmware.com/vi3/sdk/ReferenceGuide/vim.vm.customization.IPSettings.html
if 'domain' in network:
guest_map.adapter.dnsDomain = network['domain']
elif 'domain' in self.params['customization']:
guest_map.adapter.dnsDomain = self.params['customization']['domain']
if 'dns_servers' in network:
guest_map.adapter.dnsServerList = network['dns_servers']
elif 'dns_servers' in self.params['customization']:
guest_map.adapter.dnsServerList = self.params['customization']['dns_servers']
adaptermaps.append(guest_map)
# Global DNS settings
globalip = vim.vm.customization.GlobalIPSettings()
if 'dns_servers' in self.params['customization']:
globalip.dnsServerList = self.params['customization']['dns_servers']
# TODO: Maybe list the different domains from the interfaces here by default ?
if 'dns_suffix' in self.params['customization']:
dns_suffix = self.params['customization']['dns_suffix']
if isinstance(dns_suffix, list):
globalip.dnsSuffixList = " ".join(dns_suffix)
else:
globalip.dnsSuffixList = dns_suffix
elif 'domain' in self.params['customization']:
globalip.dnsSuffixList = self.params['customization']['domain']
if self.params['guest_id']:
guest_id = self.params['guest_id']
else:
guest_id = vm_obj.summary.config.guestId
# For windows guest OS, use SysPrep
# https://pubs.vmware.com/vi3/sdk/ReferenceGuide/vim.vm.customization.Sysprep.html#field_detail
if 'win' in guest_id:
ident = vim.vm.customization.Sysprep()
ident.userData = vim.vm.customization.UserData()
# Setting hostName, orgName and fullName is mandatory, so we set some default when missing
ident.userData.computerName = vim.vm.customization.FixedName()
# computer name will be truncated to 15 characters if using VM name
default_name = self.params['name'].replace(' ', '')
default_name = ''.join([c for c in default_name if c not in string.punctuation])
ident.userData.computerName.name = str(self.params['customization'].get('hostname', default_name[0:15]))
ident.userData.fullName = str(self.params['customization'].get('fullname', 'Administrator'))
ident.userData.orgName = str(self.params['customization'].get('orgname', 'ACME'))
if 'productid' in self.params['customization']:
ident.userData.productId = str(self.params['customization']['productid'])
ident.guiUnattended = vim.vm.customization.GuiUnattended()
if 'autologon' in self.params['customization']:
ident.guiUnattended.autoLogon = self.params['customization']['autologon']
ident.guiUnattended.autoLogonCount = self.params['customization'].get('autologoncount', 1)
if 'timezone' in self.params['customization']:
# Check if timezone value is a int before proceeding.
ident.guiUnattended.timeZone = self.device_helper.integer_value(
self.params['customization']['timezone'],
'customization.timezone')
ident.identification = vim.vm.customization.Identification()
if self.params['customization'].get('password', '') != '':
ident.guiUnattended.password = vim.vm.customization.Password()
ident.guiUnattended.password.value = str(self.params['customization']['password'])
ident.guiUnattended.password.plainText = True
if 'joindomain' in self.params['customization']:
if 'domainadmin' not in self.params['customization'] or 'domainadminpassword' not in self.params['customization']:
self.module.fail_json(msg="'domainadmin' and 'domainadminpassword' entries are mandatory in 'customization' section to use "
"joindomain feature")
ident.identification.domainAdmin = str(self.params['customization']['domainadmin'])
ident.identification.joinDomain = str(self.params['customization']['joindomain'])
ident.identification.domainAdminPassword = vim.vm.customization.Password()
ident.identification.domainAdminPassword.value = str(self.params['customization']['domainadminpassword'])
ident.identification.domainAdminPassword.plainText = True
elif 'joinworkgroup' in self.params['customization']:
ident.identification.joinWorkgroup = str(self.params['customization']['joinworkgroup'])
if 'runonce' in self.params['customization']:
ident.guiRunOnce = vim.vm.customization.GuiRunOnce()
ident.guiRunOnce.commandList = self.params['customization']['runonce']
else:
# FIXME: We have no clue whether this non-Windows OS is actually Linux, hence it might fail!
# For Linux guest OS, use LinuxPrep
# https://pubs.vmware.com/vi3/sdk/ReferenceGuide/vim.vm.customization.LinuxPrep.html
ident = vim.vm.customization.LinuxPrep()
# TODO: Maybe add domain from interface if missing ?
if 'domain' in self.params['customization']:
ident.domain = str(self.params['customization']['domain'])
ident.hostName = vim.vm.customization.FixedName()
hostname = str(self.params['customization'].get('hostname', self.params['name'].split('.')[0]))
# Remove all characters except alphanumeric and minus which is allowed by RFC 952
valid_hostname = re.sub(r"[^a-zA-Z0-9\-]", "", hostname)
ident.hostName.name = valid_hostname
self.customspec = vim.vm.customization.Specification()
self.customspec.nicSettingMap = adaptermaps
self.customspec.globalIPSettings = globalip
self.customspec.identity = ident
def get_vm_scsi_controller(self, vm_obj):
# If vm_obj doesn't exist there is no SCSI controller to find
if vm_obj is None:
return None
for device in vm_obj.config.hardware.device:
if self.device_helper.is_scsi_controller(device):
scsi_ctl = vim.vm.device.VirtualDeviceSpec()
scsi_ctl.device = device
return scsi_ctl
return None
def get_configured_disk_size(self, expected_disk_spec):
# what size is it?
if [x for x in expected_disk_spec.keys() if x.startswith('size_') or x == 'size']:
# size, size_tb, size_gb, size_mb, size_kb
if 'size' in expected_disk_spec:
size_regex = re.compile(r'(\d+(?:\.\d+)?)([tgmkTGMK][bB])')
disk_size_m = size_regex.match(expected_disk_spec['size'])
try:
if disk_size_m:
expected = disk_size_m.group(1)
unit = disk_size_m.group(2)
else:
raise ValueError
if re.match(r'\d+\.\d+', expected):
# We found float value in string, let's typecast it
expected = float(expected)
else:
# We found int value in string, let's typecast it
expected = int(expected)
if not expected or not unit:
raise ValueError
except (TypeError, ValueError, NameError):
# Common failure
self.module.fail_json(msg="Failed to parse disk size please review value"
" provided using documentation.")
else:
param = [x for x in expected_disk_spec.keys() if x.startswith('size_')][0]
unit = param.split('_')[-1].lower()
expected = [x[1] for x in expected_disk_spec.items() if x[0].startswith('size_')][0]
expected = int(expected)
disk_units = dict(tb=3, gb=2, mb=1, kb=0)
if unit in disk_units:
unit = unit.lower()
return expected * (1024 ** disk_units[unit])
else:
self.module.fail_json(msg="%s is not a supported unit for disk size."
" Supported units are ['%s']." % (unit,
"', '".join(disk_units.keys())))
# No size found but disk, fail
self.module.fail_json(
msg="No size, size_kb, size_mb, size_gb or size_tb attribute found into disk configuration")
def find_vmdk(self, vmdk_path):
"""
Takes a vsphere datastore path in the format
[datastore_name] path/to/file.vmdk
Returns vsphere file object or raises RuntimeError
"""
datastore_name, vmdk_fullpath, vmdk_filename, vmdk_folder = self.vmdk_disk_path_split(vmdk_path)
datastore = self.cache.find_obj(self.content, [vim.Datastore], datastore_name)
if datastore is None:
self.module.fail_json(msg="Failed to find the datastore %s" % datastore_name)
return self.find_vmdk_file(datastore, vmdk_fullpath, vmdk_filename, vmdk_folder)
def add_existing_vmdk(self, vm_obj, expected_disk_spec, diskspec, scsi_ctl):
"""
Adds vmdk file described by expected_disk_spec['filename'], retrieves the file
information and adds the correct spec to self.configspec.deviceChange.
"""
filename = expected_disk_spec['filename']
# if this is a new disk, or the disk file names are different
if (vm_obj and diskspec.device.backing.fileName != filename) or vm_obj is None:
vmdk_file = self.find_vmdk(expected_disk_spec['filename'])
diskspec.device.backing.fileName = expected_disk_spec['filename']
diskspec.device.capacityInKB = VmomiSupport.vmodlTypes['long'](vmdk_file.fileSize / 1024)
diskspec.device.key = -1
self.change_detected = True
self.configspec.deviceChange.append(diskspec)
def configure_disks(self, vm_obj):
# Ignore empty disk list, this permits to keep disks when deploying a template/cloning a VM
if len(self.params['disk']) == 0:
return
scsi_ctl = self.get_vm_scsi_controller(vm_obj)
# Create scsi controller only if we are deploying a new VM, not a template or reconfiguring
if vm_obj is None or scsi_ctl is None:
scsi_ctl = self.device_helper.create_scsi_controller(self.get_scsi_type())
self.change_detected = True
self.configspec.deviceChange.append(scsi_ctl)
disks = [x for x in vm_obj.config.hardware.device if isinstance(x, vim.vm.device.VirtualDisk)] \
if vm_obj is not None else None
if disks is not None and self.params.get('disk') and len(self.params.get('disk')) < len(disks):
self.module.fail_json(msg="Provided disks configuration has less disks than "
"the target object (%d vs %d)" % (len(self.params.get('disk')), len(disks)))
disk_index = 0
for expected_disk_spec in self.params.get('disk'):
disk_modified = False
# If we are manipulating and existing objects which has disks and disk_index is in disks
if vm_obj is not None and disks is not None and disk_index < len(disks):
diskspec = vim.vm.device.VirtualDeviceSpec()
# set the operation to edit so that it knows to keep other settings
diskspec.operation = vim.vm.device.VirtualDeviceSpec.Operation.edit
diskspec.device = disks[disk_index]
else:
diskspec = self.device_helper.create_scsi_disk(scsi_ctl, disk_index)
disk_modified = True
# increment index for next disk search
disk_index += 1
# index 7 is reserved to SCSI controller
if disk_index == 7:
disk_index += 1
if 'disk_mode' in expected_disk_spec:
disk_mode = expected_disk_spec.get('disk_mode', 'persistent').lower()
valid_disk_mode = ['persistent', 'independent_persistent', 'independent_nonpersistent']
if disk_mode not in valid_disk_mode:
self.module.fail_json(msg="disk_mode specified is not valid."
" Should be one of ['%s']" % "', '".join(valid_disk_mode))
if (vm_obj and diskspec.device.backing.diskMode != disk_mode) or (vm_obj is None):
diskspec.device.backing.diskMode = disk_mode
disk_modified = True
else:
diskspec.device.backing.diskMode = "persistent"
# is it thin?
if 'type' in expected_disk_spec:
disk_type = expected_disk_spec.get('type', '').lower()
if disk_type == 'thin':
diskspec.device.backing.thinProvisioned = True
elif disk_type == 'eagerzeroedthick':
diskspec.device.backing.eagerlyScrub = True
if 'filename' in expected_disk_spec and expected_disk_spec['filename'] is not None:
self.add_existing_vmdk(vm_obj, expected_disk_spec, diskspec, scsi_ctl)
continue
elif vm_obj is None or self.params['template']:
# We are creating new VM or from Template
# Only create virtual device if not backed by vmdk in original template
if diskspec.device.backing.fileName == '':
diskspec.fileOperation = vim.vm.device.VirtualDeviceSpec.FileOperation.create
# which datastore?
if expected_disk_spec.get('datastore'):
# TODO: This is already handled by the relocation spec,
# but it needs to eventually be handled for all the
# other disks defined
pass
kb = self.get_configured_disk_size(expected_disk_spec)
# VMWare doesn't allow to reduce disk sizes
if kb < diskspec.device.capacityInKB:
self.module.fail_json(
msg="Given disk size is smaller than found (%d < %d). Reducing disks is not allowed." %
(kb, diskspec.device.capacityInKB))
if kb != diskspec.device.capacityInKB or disk_modified:
diskspec.device.capacityInKB = kb
self.configspec.deviceChange.append(diskspec)
self.change_detected = True
def select_host(self):
hostsystem = self.cache.get_esx_host(self.params['esxi_hostname'])
if not hostsystem:
self.module.fail_json(msg='Failed to find ESX host "%(esxi_hostname)s"' % self.params)
if hostsystem.runtime.connectionState != 'connected' or hostsystem.runtime.inMaintenanceMode:
self.module.fail_json(msg='ESXi "%(esxi_hostname)s" is in invalid state or in maintenance mode.' % self.params)
return hostsystem
def autoselect_datastore(self):
datastore = None
datastores = self.cache.get_all_objs(self.content, [vim.Datastore])
if datastores is None or len(datastores) == 0:
self.module.fail_json(msg="Unable to find a datastore list when autoselecting")
datastore_freespace = 0
for ds in datastores:
if ds.summary.freeSpace > datastore_freespace:
datastore = ds
datastore_freespace = ds.summary.freeSpace
return datastore
def get_recommended_datastore(self, datastore_cluster_obj=None):
"""
Function to return Storage DRS recommended datastore from datastore cluster
Args:
datastore_cluster_obj: datastore cluster managed object
Returns: Name of recommended datastore from the given datastore cluster
"""
if datastore_cluster_obj is None:
return None
# Check if Datastore Cluster provided by user is SDRS ready
sdrs_status = datastore_cluster_obj.podStorageDrsEntry.storageDrsConfig.podConfig.enabled
if sdrs_status:
# We can get storage recommendation only if SDRS is enabled on given datastorage cluster
pod_sel_spec = vim.storageDrs.PodSelectionSpec()
pod_sel_spec.storagePod = datastore_cluster_obj
storage_spec = vim.storageDrs.StoragePlacementSpec()
storage_spec.podSelectionSpec = pod_sel_spec
storage_spec.type = 'create'
try:
rec = self.content.storageResourceManager.RecommendDatastores(storageSpec=storage_spec)
rec_action = rec.recommendations[0].action[0]
return rec_action.destination.name
except Exception:
# There is some error so we fall back to general workflow
pass
datastore = None
datastore_freespace = 0
for ds in datastore_cluster_obj.childEntity:
if isinstance(ds, vim.Datastore) and ds.summary.freeSpace > datastore_freespace:
# If datastore field is provided, filter destination datastores
datastore = ds
datastore_freespace = ds.summary.freeSpace
if datastore:
return datastore.name
return None
def select_datastore(self, vm_obj=None):
datastore = None
datastore_name = None
if len(self.params['disk']) != 0:
# TODO: really use the datastore for newly created disks
if 'autoselect_datastore' in self.params['disk'][0] and self.params['disk'][0]['autoselect_datastore']:
datastores = self.cache.get_all_objs(self.content, [vim.Datastore])
datastores = [x for x in datastores if self.cache.get_parent_datacenter(x).name == self.params['datacenter']]
if datastores is None or len(datastores) == 0:
self.module.fail_json(msg="Unable to find a datastore list when autoselecting")
datastore_freespace = 0
for ds in datastores:
if (ds.summary.freeSpace > datastore_freespace) or (ds.summary.freeSpace == datastore_freespace and not datastore):
# If datastore field is provided, filter destination datastores
if 'datastore' in self.params['disk'][0] and \
isinstance(self.params['disk'][0]['datastore'], str) and \
ds.name.find(self.params['disk'][0]['datastore']) < 0:
continue
datastore = ds
datastore_name = datastore.name
datastore_freespace = ds.summary.freeSpace
elif 'datastore' in self.params['disk'][0]:
datastore_name = self.params['disk'][0]['datastore']
# Check if user has provided datastore cluster first
datastore_cluster = self.cache.find_obj(self.content, [vim.StoragePod], datastore_name)
if datastore_cluster:
# If user specified datastore cluster so get recommended datastore
datastore_name = self.get_recommended_datastore(datastore_cluster_obj=datastore_cluster)
# Check if get_recommended_datastore or user specified datastore exists or not
datastore = self.cache.find_obj(self.content, [vim.Datastore], datastore_name)
else:
self.module.fail_json(msg="Either datastore or autoselect_datastore should be provided to select datastore")
if not datastore and self.params['template']:
# use the template's existing DS
disks = [x for x in vm_obj.config.hardware.device if isinstance(x, vim.vm.device.VirtualDisk)]
if disks:
datastore = disks[0].backing.datastore
datastore_name = datastore.name
# validation
if datastore:
dc = self.cache.get_parent_datacenter(datastore)
if dc.name != self.params['datacenter']:
datastore = self.autoselect_datastore()
datastore_name = datastore.name
if not datastore:
if len(self.params['disk']) != 0 or self.params['template'] is None:
self.module.fail_json(msg="Unable to find the datastore with given parameters."
" This could mean, %s is a non-existent virtual machine and module tried to"
" deploy it as new virtual machine with no disk. Please specify disks parameter"
" or specify template to clone from." % self.params['name'])
self.module.fail_json(msg="Failed to find a matching datastore")
return datastore, datastore_name
def obj_has_parent(self, obj, parent):
if obj is None and parent is None:
raise AssertionError()
current_parent = obj
while True:
if current_parent.name == parent.name:
return True
# Check if we have reached till root folder
moid = current_parent._moId
if moid in ['group-d1', 'ha-folder-root']:
return False
current_parent = current_parent.parent
if current_parent is None:
return False
def get_scsi_type(self):
disk_controller_type = "paravirtual"
# set cpu/memory/etc
if 'hardware' in self.params:
if 'scsi' in self.params['hardware']:
if self.params['hardware']['scsi'] in ['buslogic', 'paravirtual', 'lsilogic', 'lsilogicsas']:
disk_controller_type = self.params['hardware']['scsi']
else:
self.module.fail_json(msg="hardware.scsi attribute should be 'paravirtual' or 'lsilogic'")
return disk_controller_type
def find_folder(self, searchpath):
""" Walk inventory objects one position of the searchpath at a time """
# split the searchpath so we can iterate through it
paths = [x.replace('/', '') for x in searchpath.split('/')]
paths_total = len(paths) - 1
position = 0
# recursive walk while looking for next element in searchpath
root = self.content.rootFolder
while root and position <= paths_total:
change = False
if hasattr(root, 'childEntity'):
for child in root.childEntity:
if child.name == paths[position]:
root = child
position += 1
change = True
break
elif isinstance(root, vim.Datacenter):
if hasattr(root, 'vmFolder'):
if root.vmFolder.name == paths[position]:
root = root.vmFolder
position += 1
change = True
else:
root = None
if not change:
root = None
return root
def get_resource_pool(self, cluster=None, host=None, resource_pool=None):
""" Get a resource pool, filter on cluster, esxi_hostname or resource_pool if given """
cluster_name = cluster or self.params.get('cluster', None)
host_name = host or self.params.get('esxi_hostname', None)
resource_pool_name = resource_pool or self.params.get('resource_pool', None)
# get the datacenter object
datacenter = find_obj(self.content, [vim.Datacenter], self.params['datacenter'])
if not datacenter:
self.module.fail_json(msg='Unable to find datacenter "%s"' % self.params['datacenter'])
# if cluster is given, get the cluster object
if cluster_name:
cluster = find_obj(self.content, [vim.ComputeResource], cluster_name, folder=datacenter)
if not cluster:
self.module.fail_json(msg='Unable to find cluster "%s"' % cluster_name)
# if host is given, get the cluster object using the host
elif host_name:
host = find_obj(self.content, [vim.HostSystem], host_name, folder=datacenter)
if not host:
self.module.fail_json(msg='Unable to find host "%s"' % host_name)
cluster = host.parent
else:
cluster = None
# get resource pools limiting search to cluster or datacenter
resource_pool = find_obj(self.content, [vim.ResourcePool], resource_pool_name, folder=cluster or datacenter)
if not resource_pool:
if resource_pool_name:
self.module.fail_json(msg='Unable to find resource_pool "%s"' % resource_pool_name)
else:
self.module.fail_json(msg='Unable to find resource pool, need esxi_hostname, resource_pool, or cluster')
return resource_pool
def deploy_vm(self):
# https://github.com/vmware/pyvmomi-community-samples/blob/master/samples/clone_vm.py
# https://www.vmware.com/support/developer/vc-sdk/visdk25pubs/ReferenceGuide/vim.vm.CloneSpec.html
# https://www.vmware.com/support/developer/vc-sdk/visdk25pubs/ReferenceGuide/vim.vm.ConfigSpec.html
# https://www.vmware.com/support/developer/vc-sdk/visdk41pubs/ApiReference/vim.vm.RelocateSpec.html
# FIXME:
# - static IPs
self.folder = self.params.get('folder', None)
if self.folder is None:
self.module.fail_json(msg="Folder is required parameter while deploying new virtual machine")
# Prepend / if it was missing from the folder path, also strip trailing slashes
if not self.folder.startswith('/'):
self.folder = '/%(folder)s' % self.params
self.folder = self.folder.rstrip('/')
datacenter = self.cache.find_obj(self.content, [vim.Datacenter], self.params['datacenter'])
if datacenter is None:
self.module.fail_json(msg='No datacenter named %(datacenter)s was found' % self.params)
dcpath = compile_folder_path_for_object(datacenter)
# Nested folder does not have trailing /
if not dcpath.endswith('/'):
dcpath += '/'
# Check for full path first in case it was already supplied
if (self.folder.startswith(dcpath + self.params['datacenter'] + '/vm') or
self.folder.startswith(dcpath + '/' + self.params['datacenter'] + '/vm')):
fullpath = self.folder
elif self.folder.startswith('/vm/') or self.folder == '/vm':
fullpath = "%s%s%s" % (dcpath, self.params['datacenter'], self.folder)
elif self.folder.startswith('/'):
fullpath = "%s%s/vm%s" % (dcpath, self.params['datacenter'], self.folder)
else:
fullpath = "%s%s/vm/%s" % (dcpath, self.params['datacenter'], self.folder)
f_obj = self.content.searchIndex.FindByInventoryPath(fullpath)
# abort if no strategy was successful
if f_obj is None:
# Add some debugging values in failure.
details = {
'datacenter': datacenter.name,
'datacenter_path': dcpath,
'folder': self.folder,
'full_search_path': fullpath,
}
self.module.fail_json(msg='No folder %s matched in the search path : %s' % (self.folder, fullpath),
details=details)
destfolder = f_obj
if self.params['template']:
vm_obj = self.get_vm_or_template(template_name=self.params['template'])
if vm_obj is None:
self.module.fail_json(msg="Could not find a template named %(template)s" % self.params)
else:
vm_obj = None
# always get a resource_pool
resource_pool = self.get_resource_pool()
# set the destination datastore for VM & disks
if self.params['datastore']:
# Give precedence to datastore value provided by user
# User may want to deploy VM to specific datastore.
datastore_name = self.params['datastore']
# Check if user has provided datastore cluster first
datastore_cluster = self.cache.find_obj(self.content, [vim.StoragePod], datastore_name)
if datastore_cluster:
# If user specified datastore cluster so get recommended datastore
datastore_name = self.get_recommended_datastore(datastore_cluster_obj=datastore_cluster)
# Check if get_recommended_datastore or user specified datastore exists or not
datastore = self.cache.find_obj(self.content, [vim.Datastore], datastore_name)
else:
(datastore, datastore_name) = self.select_datastore(vm_obj)
self.configspec = vim.vm.ConfigSpec()
self.configspec.deviceChange = []
self.configure_guestid(vm_obj=vm_obj, vm_creation=True)
self.configure_cpu_and_memory(vm_obj=vm_obj, vm_creation=True)
self.configure_hardware_params(vm_obj=vm_obj)
self.configure_resource_alloc_info(vm_obj=vm_obj)
self.configure_vapp_properties(vm_obj=vm_obj)
self.configure_disks(vm_obj=vm_obj)
self.configure_network(vm_obj=vm_obj)
self.configure_cdrom(vm_obj=vm_obj)
# Find if we need network customizations (find keys in dictionary that requires customizations)
network_changes = False
for nw in self.params['networks']:
for key in nw:
# We don't need customizations for these keys
if key not in ('device_type', 'mac', 'name', 'vlan', 'type', 'start_connected'):
network_changes = True
break
if len(self.params['customization']) > 0 or network_changes or self.params.get('customization_spec') is not None:
self.customize_vm(vm_obj=vm_obj)
clonespec = None
clone_method = None
try:
if self.params['template']:
# create the relocation spec
relospec = vim.vm.RelocateSpec()
# Only select specific host when ESXi hostname is provided
if self.params['esxi_hostname']:
relospec.host = self.select_host()
relospec.datastore = datastore
# Convert disk present in template if is set
if self.params['convert']:
for device in vm_obj.config.hardware.device:
if hasattr(device.backing, 'fileName'):
disk_locator = vim.vm.RelocateSpec.DiskLocator()
disk_locator.diskBackingInfo = vim.vm.device.VirtualDisk.FlatVer2BackingInfo()
if self.params['convert'] in ['thin']:
disk_locator.diskBackingInfo.thinProvisioned = True
if self.params['convert'] in ['eagerzeroedthick']:
disk_locator.diskBackingInfo.eagerlyScrub = True
if self.params['convert'] in ['thick']:
disk_locator.diskBackingInfo.diskMode = "persistent"
disk_locator.diskId = device.key
disk_locator.datastore = datastore
relospec.disk.append(disk_locator)
# https://www.vmware.com/support/developer/vc-sdk/visdk41pubs/ApiReference/vim.vm.RelocateSpec.html
# > pool: For a clone operation from a template to a virtual machine, this argument is required.
relospec.pool = resource_pool
linked_clone = self.params.get('linked_clone')
snapshot_src = self.params.get('snapshot_src', None)
if linked_clone:
if snapshot_src is not None:
relospec.diskMoveType = vim.vm.RelocateSpec.DiskMoveOptions.createNewChildDiskBacking
else:
self.module.fail_json(msg="Parameter 'linked_src' and 'snapshot_src' are"
" required together for linked clone operation.")
clonespec = vim.vm.CloneSpec(template=self.params['is_template'], location=relospec)
if self.customspec:
clonespec.customization = self.customspec
if snapshot_src is not None:
if vm_obj.snapshot is None:
self.module.fail_json(msg="No snapshots present for virtual machine or template [%(template)s]" % self.params)
snapshot = self.get_snapshots_by_name_recursively(snapshots=vm_obj.snapshot.rootSnapshotList,
snapname=snapshot_src)
if len(snapshot) != 1:
self.module.fail_json(msg='virtual machine "%(template)s" does not contain'
' snapshot named "%(snapshot_src)s"' % self.params)
clonespec.snapshot = snapshot[0].snapshot
clonespec.config = self.configspec
clone_method = 'Clone'
try:
task = vm_obj.Clone(folder=destfolder, name=self.params['name'], spec=clonespec)
except vim.fault.NoPermission as e:
self.module.fail_json(msg="Failed to clone virtual machine %s to folder %s "
"due to permission issue: %s" % (self.params['name'],
destfolder,
to_native(e.msg)))
self.change_detected = True
else:
# ConfigSpec require name for VM creation
self.configspec.name = self.params['name']
self.configspec.files = vim.vm.FileInfo(logDirectory=None,
snapshotDirectory=None,
suspendDirectory=None,
vmPathName="[" + datastore_name + "]")
clone_method = 'CreateVM_Task'
try:
task = destfolder.CreateVM_Task(config=self.configspec, pool=resource_pool)
except vmodl.fault.InvalidRequest as e:
self.module.fail_json(msg="Failed to create virtual machine due to invalid configuration "
"parameter %s" % to_native(e.msg))
except vim.fault.RestrictedVersion as e:
self.module.fail_json(msg="Failed to create virtual machine due to "
"product versioning restrictions: %s" % to_native(e.msg))
self.change_detected = True
self.wait_for_task(task)
except TypeError as e:
self.module.fail_json(msg="TypeError was returned, please ensure to give correct inputs. %s" % to_text(e))
if task.info.state == 'error':
# https://kb.vmware.com/selfservice/microsites/search.do?language=en_US&cmd=displayKC&externalId=2021361
# https://kb.vmware.com/selfservice/microsites/search.do?language=en_US&cmd=displayKC&externalId=2173
# provide these to the user for debugging
clonespec_json = serialize_spec(clonespec)
configspec_json = serialize_spec(self.configspec)
kwargs = {
'changed': self.change_applied,
'failed': True,
'msg': task.info.error.msg,
'clonespec': clonespec_json,
'configspec': configspec_json,
'clone_method': clone_method
}
return kwargs
else:
# set annotation
vm = task.info.result
if self.params['annotation']:
annotation_spec = vim.vm.ConfigSpec()
annotation_spec.annotation = str(self.params['annotation'])
task = vm.ReconfigVM_Task(annotation_spec)
self.wait_for_task(task)
if task.info.state == 'error':
return {'changed': self.change_applied, 'failed': True, 'msg': task.info.error.msg, 'op': 'annotation'}
if self.params['customvalues']:
vm_custom_spec = vim.vm.ConfigSpec()
self.customize_customvalues(vm_obj=vm, config_spec=vm_custom_spec)
task = vm.ReconfigVM_Task(vm_custom_spec)
self.wait_for_task(task)
if task.info.state == 'error':
return {'changed': self.change_applied, 'failed': True, 'msg': task.info.error.msg, 'op': 'customvalues'}
if self.params['wait_for_ip_address'] or self.params['wait_for_customization'] or self.params['state'] in ['poweredon', 'restarted']:
set_vm_power_state(self.content, vm, 'poweredon', force=False)
if self.params['wait_for_ip_address']:
self.wait_for_vm_ip(vm)
if self.params['wait_for_customization']:
is_customization_ok = self.wait_for_customization(vm)
if not is_customization_ok:
vm_facts = self.gather_facts(vm)
return {'changed': self.change_applied, 'failed': True, 'instance': vm_facts, 'op': 'customization'}
vm_facts = self.gather_facts(vm)
return {'changed': self.change_applied, 'failed': False, 'instance': vm_facts}
def get_snapshots_by_name_recursively(self, snapshots, snapname):
snap_obj = []
for snapshot in snapshots:
if snapshot.name == snapname:
snap_obj.append(snapshot)
else:
snap_obj = snap_obj + self.get_snapshots_by_name_recursively(snapshot.childSnapshotList, snapname)
return snap_obj
def reconfigure_vm(self):
self.configspec = vim.vm.ConfigSpec()
self.configspec.deviceChange = []
self.configure_guestid(vm_obj=self.current_vm_obj)
self.configure_cpu_and_memory(vm_obj=self.current_vm_obj)
self.configure_hardware_params(vm_obj=self.current_vm_obj)
self.configure_disks(vm_obj=self.current_vm_obj)
self.configure_network(vm_obj=self.current_vm_obj)
self.configure_cdrom(vm_obj=self.current_vm_obj)
self.customize_customvalues(vm_obj=self.current_vm_obj, config_spec=self.configspec)
self.configure_resource_alloc_info(vm_obj=self.current_vm_obj)
self.configure_vapp_properties(vm_obj=self.current_vm_obj)
if self.params['annotation'] and self.current_vm_obj.config.annotation != self.params['annotation']:
self.configspec.annotation = str(self.params['annotation'])
self.change_detected = True
relospec = vim.vm.RelocateSpec()
if self.params['resource_pool']:
relospec.pool = self.get_resource_pool()
if relospec.pool != self.current_vm_obj.resourcePool:
task = self.current_vm_obj.RelocateVM_Task(spec=relospec)
self.wait_for_task(task)
if task.info.state == 'error':
return {'changed': self.change_applied, 'failed': True, 'msg': task.info.error.msg, 'op': 'relocate'}
# Only send VMWare task if we see a modification
if self.change_detected:
task = None
try:
task = self.current_vm_obj.ReconfigVM_Task(spec=self.configspec)
except vim.fault.RestrictedVersion as e:
self.module.fail_json(msg="Failed to reconfigure virtual machine due to"
" product versioning restrictions: %s" % to_native(e.msg))
self.wait_for_task(task)
if task.info.state == 'error':
return {'changed': self.change_applied, 'failed': True, 'msg': task.info.error.msg, 'op': 'reconfig'}
# Rename VM
if self.params['uuid'] and self.params['name'] and self.params['name'] != self.current_vm_obj.config.name:
task = self.current_vm_obj.Rename_Task(self.params['name'])
self.wait_for_task(task)
if task.info.state == 'error':
return {'changed': self.change_applied, 'failed': True, 'msg': task.info.error.msg, 'op': 'rename'}
# Mark VM as Template
if self.params['is_template'] and not self.current_vm_obj.config.template:
try:
self.current_vm_obj.MarkAsTemplate()
self.change_applied = True
except vmodl.fault.NotSupported as e:
self.module.fail_json(msg="Failed to mark virtual machine [%s] "
"as template: %s" % (self.params['name'], e.msg))
# Mark Template as VM
elif not self.params['is_template'] and self.current_vm_obj.config.template:
resource_pool = self.get_resource_pool()
kwargs = dict(pool=resource_pool)
if self.params.get('esxi_hostname', None):
host_system_obj = self.select_host()
kwargs.update(host=host_system_obj)
try:
self.current_vm_obj.MarkAsVirtualMachine(**kwargs)
self.change_applied = True
except vim.fault.InvalidState as invalid_state:
self.module.fail_json(msg="Virtual machine is not marked"
" as template : %s" % to_native(invalid_state.msg))
except vim.fault.InvalidDatastore as invalid_ds:
self.module.fail_json(msg="Converting template to virtual machine"
" operation cannot be performed on the"
" target datastores: %s" % to_native(invalid_ds.msg))
except vim.fault.CannotAccessVmComponent as cannot_access:
self.module.fail_json(msg="Failed to convert template to virtual machine"
" as operation unable access virtual machine"
" component: %s" % to_native(cannot_access.msg))
except vmodl.fault.InvalidArgument as invalid_argument:
self.module.fail_json(msg="Failed to convert template to virtual machine"
" due to : %s" % to_native(invalid_argument.msg))
except Exception as generic_exc:
self.module.fail_json(msg="Failed to convert template to virtual machine"
" due to generic error : %s" % to_native(generic_exc))
# Automatically update VMWare UUID when converting template to VM.
# This avoids an interactive prompt during VM startup.
uuid_action = [x for x in self.current_vm_obj.config.extraConfig if x.key == "uuid.action"]
if not uuid_action:
uuid_action_opt = vim.option.OptionValue()
uuid_action_opt.key = "uuid.action"
uuid_action_opt.value = "create"
self.configspec.extraConfig.append(uuid_action_opt)
self.change_detected = True
# add customize existing VM after VM re-configure
if 'existing_vm' in self.params['customization'] and self.params['customization']['existing_vm']:
if self.current_vm_obj.config.template:
self.module.fail_json(msg="VM is template, not support guest OS customization.")
if self.current_vm_obj.runtime.powerState != vim.VirtualMachinePowerState.poweredOff:
self.module.fail_json(msg="VM is not in poweroff state, can not do guest OS customization.")
cus_result = self.customize_exist_vm()
if cus_result['failed']:
return cus_result
vm_facts = self.gather_facts(self.current_vm_obj)
return {'changed': self.change_applied, 'failed': False, 'instance': vm_facts}
def customize_exist_vm(self):
task = None
# Find if we need network customizations (find keys in dictionary that requires customizations)
network_changes = False
for nw in self.params['networks']:
for key in nw:
# We don't need customizations for these keys
if key not in ('device_type', 'mac', 'name', 'vlan', 'type', 'start_connected'):
network_changes = True
break
if len(self.params['customization']) > 1 or network_changes or self.params.get('customization_spec'):
self.customize_vm(vm_obj=self.current_vm_obj)
try:
task = self.current_vm_obj.CustomizeVM_Task(self.customspec)
except vim.fault.CustomizationFault as e:
self.module.fail_json(msg="Failed to customization virtual machine due to CustomizationFault: %s" % to_native(e.msg))
except vim.fault.RuntimeFault as e:
self.module.fail_json(msg="failed to customization virtual machine due to RuntimeFault: %s" % to_native(e.msg))
except Exception as e:
self.module.fail_json(msg="failed to customization virtual machine due to fault: %s" % to_native(e.msg))
self.wait_for_task(task)
if task.info.state == 'error':
return {'changed': self.change_applied, 'failed': True, 'msg': task.info.error.msg, 'op': 'customize_exist'}
if self.params['wait_for_customization']:
set_vm_power_state(self.content, self.current_vm_obj, 'poweredon', force=False)
is_customization_ok = self.wait_for_customization(self.current_vm_obj)
if not is_customization_ok:
return {'changed': self.change_applied, 'failed': True, 'op': 'wait_for_customize_exist'}
return {'changed': self.change_applied, 'failed': False}
def wait_for_task(self, task, poll_interval=1):
"""
Wait for a VMware task to complete. Terminal states are 'error' and 'success'.
Inputs:
- task: the task to wait for
- poll_interval: polling interval to check the task, in seconds
Modifies:
- self.change_applied
"""
# https://www.vmware.com/support/developer/vc-sdk/visdk25pubs/ReferenceGuide/vim.Task.html
# https://www.vmware.com/support/developer/vc-sdk/visdk25pubs/ReferenceGuide/vim.TaskInfo.html
# https://github.com/virtdevninja/pyvmomi-community-samples/blob/master/samples/tools/tasks.py
while task.info.state not in ['error', 'success']:
time.sleep(poll_interval)
self.change_applied = self.change_applied or task.info.state == 'success'
def wait_for_vm_ip(self, vm, poll=100, sleep=5):
ips = None
facts = {}
thispoll = 0
while not ips and thispoll <= poll:
newvm = self.get_vm()
facts = self.gather_facts(newvm)
if facts['ipv4'] or facts['ipv6']:
ips = True
else:
time.sleep(sleep)
thispoll += 1
return facts
def get_vm_events(self, vm, eventTypeIdList):
byEntity = vim.event.EventFilterSpec.ByEntity(entity=vm, recursion="self")
filterSpec = vim.event.EventFilterSpec(entity=byEntity, eventTypeId=eventTypeIdList)
eventManager = self.content.eventManager
return eventManager.QueryEvent(filterSpec)
def wait_for_customization(self, vm, poll=10000, sleep=10):
thispoll = 0
while thispoll <= poll:
eventStarted = self.get_vm_events(vm, ['CustomizationStartedEvent'])
if len(eventStarted):
thispoll = 0
while thispoll <= poll:
eventsFinishedResult = self.get_vm_events(vm, ['CustomizationSucceeded', 'CustomizationFailed'])
if len(eventsFinishedResult):
if not isinstance(eventsFinishedResult[0], vim.event.CustomizationSucceeded):
self.module.fail_json(msg='Customization failed with error {0}:\n{1}'.format(
eventsFinishedResult[0]._wsdlName, eventsFinishedResult[0].fullFormattedMessage))
return False
break
else:
time.sleep(sleep)
thispoll += 1
return True
else:
time.sleep(sleep)
thispoll += 1
self.module.fail_json('waiting for customizations timed out.')
return False
def main():
argument_spec = vmware_argument_spec()
argument_spec.update(
state=dict(type='str', default='present',
choices=['absent', 'poweredoff', 'poweredon', 'present', 'rebootguest', 'restarted', 'shutdownguest', 'suspended']),
template=dict(type='str', aliases=['template_src']),
is_template=dict(type='bool', default=False),
annotation=dict(type='str', aliases=['notes']),
customvalues=dict(type='list', default=[]),
name=dict(type='str'),
name_match=dict(type='str', choices=['first', 'last'], default='first'),
uuid=dict(type='str'),
use_instance_uuid=dict(type='bool', default=False),
folder=dict(type='str'),
guest_id=dict(type='str'),
disk=dict(type='list', default=[]),
cdrom=dict(type='dict', default={}),
hardware=dict(type='dict', default={}),
force=dict(type='bool', default=False),
datacenter=dict(type='str', default='ha-datacenter'),
esxi_hostname=dict(type='str'),
cluster=dict(type='str'),
wait_for_ip_address=dict(type='bool', default=False),
state_change_timeout=dict(type='int', default=0),
snapshot_src=dict(type='str'),
linked_clone=dict(type='bool', default=False),
networks=dict(type='list', default=[]),
resource_pool=dict(type='str'),
customization=dict(type='dict', default={}, no_log=True),
customization_spec=dict(type='str', default=None),
wait_for_customization=dict(type='bool', default=False),
vapp_properties=dict(type='list', default=[]),
datastore=dict(type='str'),
convert=dict(type='str', choices=['thin', 'thick', 'eagerzeroedthick']),
)
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True,
mutually_exclusive=[
['cluster', 'esxi_hostname'],
],
required_one_of=[
['name', 'uuid'],
],
)
result = {'failed': False, 'changed': False}
pyv = PyVmomiHelper(module)
# Check if the VM exists before continuing
vm = pyv.get_vm()
# VM already exists
if vm:
if module.params['state'] == 'absent':
# destroy it
if module.check_mode:
result.update(
vm_name=vm.name,
changed=True,
current_powerstate=vm.summary.runtime.powerState.lower(),
desired_operation='remove_vm',
)
module.exit_json(**result)
if module.params['force']:
# has to be poweredoff first
set_vm_power_state(pyv.content, vm, 'poweredoff', module.params['force'])
result = pyv.remove_vm(vm)
elif module.params['state'] == 'present':
if module.check_mode:
result.update(
vm_name=vm.name,
changed=True,
desired_operation='reconfigure_vm',
)
module.exit_json(**result)
result = pyv.reconfigure_vm()
elif module.params['state'] in ['poweredon', 'poweredoff', 'restarted', 'suspended', 'shutdownguest', 'rebootguest']:
if module.check_mode:
result.update(
vm_name=vm.name,
changed=True,
current_powerstate=vm.summary.runtime.powerState.lower(),
desired_operation='set_vm_power_state',
)
module.exit_json(**result)
# set powerstate
tmp_result = set_vm_power_state(pyv.content, vm, module.params['state'], module.params['force'], module.params['state_change_timeout'])
if tmp_result['changed']:
result["changed"] = True
if module.params['state'] in ['poweredon', 'restarted', 'rebootguest'] and module.params['wait_for_ip_address']:
wait_result = wait_for_vm_ip(pyv.content, vm)
if not wait_result:
module.fail_json(msg='Waiting for IP address timed out')
tmp_result['instance'] = wait_result
if not tmp_result["failed"]:
result["failed"] = False
result['instance'] = tmp_result['instance']
if tmp_result["failed"]:
result["failed"] = True
result["msg"] = tmp_result["msg"]
else:
# This should not happen
raise AssertionError()
# VM doesn't exist
else:
if module.params['state'] in ['poweredon', 'poweredoff', 'present', 'restarted', 'suspended']:
if module.check_mode:
result.update(
changed=True,
desired_operation='deploy_vm',
)
module.exit_json(**result)
result = pyv.deploy_vm()
if result['failed']:
module.fail_json(msg='Failed to create a virtual machine : %s' % result['msg'])
if result['failed']:
module.fail_json(**result)
else:
module.exit_json(**result)
if __name__ == '__main__':
main()
|
h3biomed/ansible
|
lib/ansible/modules/cloud/vmware/vmware_guest.py
|
Python
|
gpl-3.0
| 136,014
|
[
"VisIt"
] |
22ffe72b3a500c4840685d536814453d77bd18a7fc1cdb9cc567a60dc820f1eb
|
#!/usr/bin/env python
# Copyright 2010-2013 by Peter Cock.
# All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
import sys
# Add path to Bio
sys.path.append('..')
r"""Read and write BGZF compressed files (the GZIP variant used in BAM).
The SAM/BAM file format (Sequence Alignment/Map) comes in a plain text
format (SAM), and a compressed binary format (BAM). The latter uses a
modified form of gzip compression called BGZF (Blocked GNU Zip Format),
which can be applied to any file format to provide compression with
efficient random access. BGZF is described together with the SAM/BAM
file format at http://samtools.sourceforge.net/SAM1.pdf
Please read the text below about 'virtual offsets' before using BGZF
files for random access.
Aim of this module
------------------
The Python gzip library can be used to read BGZF files, since for
decompression they are just (specialised) gzip files. What this
module aims to facilitate is random access to BGZF files (using the
'virtual offset' idea), and writing BGZF files (which means using
suitably sized gzip blocks and writing the extra 'BC' field in the
gzip headers). As in the gzip library, the zlib library is used
internally.
In addition to being required for random access to and writing of
BAM files, the BGZF format can also be used on other sequential
data (in the sense of one record after another), such as most of
the sequence data formats supported in Bio.SeqIO (like FASTA,
FASTQ, GenBank, etc) or large MAF alignments.
The Bio.SeqIO indexing functions use this module to support BGZF files.
Technical Introduction to BGZF
------------------------------
The gzip file format allows multiple compressed blocks, each of which
could be a stand alone gzip file. As an interesting bonus, this means
you can use Unix "cat" to combined to gzip files into one by
concatenating them. Also, each block can have one of several compression
levels (including uncompressed, which actually takes up a little bit
more space due to the gzip header).
What the BAM designers realised was that while random access to data
stored in traditional gzip files was slow, breaking the file into
gzip blocks would allow fast random access to each block. To access
a particular piece of the decompressed data, you just need to know
which block it starts in (the offset of the gzip block start), and
how far into the (decompressed) contents of the block you need to
read.
One problem with this is finding the gzip block sizes efficiently.
You can do it with a standard gzip file, but it requires every block
to be decompressed -- and that would be rather slow. Additionally
typical gzip files may use very large blocks.
All that differs in BGZF is that compressed size of each gzip block
is limited to 2^16 bytes, and an extra 'BC' field in the gzip header
records this size. Traditional decompression tools can ignore this,
and unzip the file just like any other gzip file.
The point of this is you can look at the first BGZF block, find out
how big it is from this 'BC' header, and thus seek immediately to
the second block, and so on.
The BAM indexing scheme records read positions using a 64 bit
'virtual offset', comprising coffset << 16 | uoffset, where coffset
is the file offset of the BGZF block containing the start of the read
(unsigned integer using up to 64-16 = 48 bits), and uoffset is the
offset within the (decompressed) block (unsigned 16 bit integer).
This limits you to BAM files where the last block starts by 2^48
bytes, or 256 petabytes, and the decompressed size of each block
is at most 2^16 bytes, or 64kb. Note that this matches the BGZF
'BC' field size which limits the compressed size of each block to
2^16 bytes, allowing for BAM files to use BGZF with no gzip
compression (useful for intermediate files in memory to reduced
CPU load).
Warning about namespaces
------------------------
It is considered a bad idea to use "from XXX import ``*``" in Python, because
it pollutes the namespace. This is a real issue with Bio.bgzf (and the
standard Python library gzip) because they contain a function called open
i.e. Suppose you do this:
>>> from Bio.bgzf import *
>>> print(open.__module__)
Bio.bgzf
Or,
>>> from gzip import *
>>> print(open.__module__)
gzip
Notice that the open function has been replaced. You can "fix" this if you
need to by importing the built-in open function:
>>> try:
... from __builtin__ import open # Python 2
... except ImportError:
... from builtins import open # Python 3
...
However, what we recommend instead is to use the explicit namespace, e.g.
>>> from Bio import bgzf
>>> print(bgzf.open.__module__)
Bio.bgzf
Example
-------
This is an ordinary GenBank file compressed using BGZF, so it can
be decompressed using gzip,
>>> import gzip
>>> handle = gzip.open("GenBank/NC_000932.gb.bgz", "r")
>>> assert 0 == handle.tell()
>>> line = handle.readline()
>>> assert 80 == handle.tell()
>>> line = handle.readline()
>>> assert 143 == handle.tell()
>>> data = handle.read(70000)
>>> assert 70143 == handle.tell()
>>> handle.close()
We can also access the file using the BGZF reader - but pay
attention to the file offsets which will be explained below:
>>> handle = BgzfReader("GenBank/NC_000932.gb.bgz", "r")
>>> assert 0 == handle.tell()
>>> print(handle.readline().rstrip())
LOCUS NC_000932 154478 bp DNA circular PLN 15-APR-2009
>>> assert 80 == handle.tell()
>>> print(handle.readline().rstrip())
DEFINITION Arabidopsis thaliana chloroplast, complete genome.
>>> assert 143 == handle.tell()
>>> data = handle.read(70000)
>>> assert 987828735 == handle.tell()
>>> print(handle.readline().rstrip())
f="GeneID:844718"
>>> print(handle.readline().rstrip())
CDS complement(join(84337..84771,85454..85843))
>>> offset = handle.seek(make_virtual_offset(55074, 126))
>>> print(handle.readline().rstrip())
68521 tatgtcattc gaaattgtat aaagacaact cctatttaat agagctattt gtgcaagtat
>>> handle.close()
Notice the handle's offset looks different as a BGZF file. This
brings us to the key point about BGZF, which is the block structure:
>>> handle = open("GenBank/NC_000932.gb.bgz", "rb")
>>> for values in BgzfBlocks(handle):
... print("Raw start %i, raw length %i; data start %i, data length %i" % values)
Raw start 0, raw length 15073; data start 0, data length 65536
Raw start 15073, raw length 17857; data start 65536, data length 65536
Raw start 32930, raw length 22144; data start 131072, data length 65536
Raw start 55074, raw length 22230; data start 196608, data length 65536
Raw start 77304, raw length 14939; data start 262144, data length 43478
Raw start 92243, raw length 28; data start 305622, data length 0
>>> handle.close()
In this example the first three blocks are 'full' and hold 65536 bytes
of uncompressed data. The fourth block isn't full and holds 43478 bytes.
Finally there is a special empty fifth block which takes 28 bytes on
disk and serves as an 'end of file' (EOF) marker. If this is missing,
it is possible your BGZF file is incomplete.
By reading ahead 70,000 bytes we moved into the second BGZF block,
and at that point the BGZF virtual offsets start to look different
to a simple offset into the decompressed data as exposed by the gzip
library.
As an example, consider seeking to the decompressed position 196734.
Since 196734 = 65536 + 65536 + 65536 + 126 = 65536*3 + 126, this
is equivalent to jumping the first three blocks (which in this
specific example are all size 65536 after decompression - which
does not always hold) and starting at byte 126 of the fourth block
(after decompression). For BGZF, we need to know the fourth block's
offset of 55074 and the offset within the block of 126 to get the
BGZF virtual offset.
>>> print(55074 << 16 | 126)
3609329790
>>> print(bgzf.make_virtual_offset(55074, 126))
3609329790
Thus for this BGZF file, decompressed position 196734 corresponds
to the virtual offset 3609329790. However, another BGZF file with
different contents would have compressed more or less efficiently,
so the compressed blocks would be different sizes. What this means
is the mapping between the uncompressed offset and the compressed
virtual offset depends on the BGZF file you are using.
If you are accessing a BGZF file via this module, just use the
handle.tell() method to note the virtual offset of a position you
may later want to return to using handle.seek().
The catch with BGZF virtual offsets is while they can be compared
(which offset comes first in the file), you cannot safely subtract
them to get the size of the data between them, nor add/subtract
a relative offset.
Of course you can parse this file with Bio.SeqIO using BgzfReader,
although there isn't any benefit over using gzip.open(...), unless
you want to index BGZF compressed sequence files:
>>> from Bio import SeqIO
>>> handle = BgzfReader("GenBank/NC_000932.gb.bgz")
>>> record = SeqIO.read(handle, "genbank")
>>> handle.close()
>>> print(record.id)
NC_000932.1
"""
from __future__ import print_function
import sys # to detect when under Python 2
import zlib
import struct
from Bio._py3k import _as_bytes, _as_string
from Bio._py3k import open as _open
__docformat__ = "restructuredtext en"
# For Python 2 can just use: _bgzf_magic = '\x1f\x8b\x08\x04'
# but need to use bytes on Python 3
_bgzf_magic = b"\x1f\x8b\x08\x04"
_bgzf_header = b"\x1f\x8b\x08\x04\x00\x00\x00\x00\x00\xff\x06\x00\x42\x43\x02\x00"
_bgzf_eof = b"\x1f\x8b\x08\x04\x00\x00\x00\x00\x00\xff\x06\x00BC\x02\x00\x1b\x00\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00"
_bytes_BC = b"BC"
def open(filename, mode="rb"):
"""Open a BGZF file for reading, writing or appending."""
if "r" in mode.lower():
return BgzfReader(filename, mode)
elif "w" in mode.lower() or "a" in mode.lower():
return BgzfWriter(filename, mode)
else:
raise ValueError("Bad mode %r" % mode)
def make_virtual_offset(block_start_offset, within_block_offset):
"""Compute a BGZF virtual offset from block start and within block offsets.
The BAM indexing scheme records read positions using a 64 bit
'virtual offset', comprising in C terms:
block_start_offset << 16 | within_block_offset
Here block_start_offset is the file offset of the BGZF block
start (unsigned integer using up to 64-16 = 48 bits), and
within_block_offset within the (decompressed) block (unsigned
16 bit integer).
>>> make_virtual_offset(0, 0)
0
>>> make_virtual_offset(0, 1)
1
>>> make_virtual_offset(0, 2**16 - 1)
65535
>>> make_virtual_offset(0, 2**16)
Traceback (most recent call last):
...
ValueError: Require 0 <= within_block_offset < 2**16, got 65536
>>> 65536 == make_virtual_offset(1, 0)
True
>>> 65537 == make_virtual_offset(1, 1)
True
>>> 131071 == make_virtual_offset(1, 2**16 - 1)
True
>>> 6553600000 == make_virtual_offset(100000, 0)
True
>>> 6553600001 == make_virtual_offset(100000, 1)
True
>>> 6553600010 == make_virtual_offset(100000, 10)
True
>>> make_virtual_offset(2**48, 0)
Traceback (most recent call last):
...
ValueError: Require 0 <= block_start_offset < 2**48, got 281474976710656
"""
if within_block_offset < 0 or within_block_offset >= 65536:
raise ValueError("Require 0 <= within_block_offset < 2**16, got %i" % within_block_offset)
if block_start_offset < 0 or block_start_offset >= 281474976710656:
raise ValueError("Require 0 <= block_start_offset < 2**48, got %i" % block_start_offset)
return (block_start_offset << 16) | within_block_offset
def split_virtual_offset(virtual_offset):
"""Divides a 64-bit BGZF virtual offset into block start & within block offsets.
>>> (100000, 0) == split_virtual_offset(6553600000)
True
>>> (100000, 10) == split_virtual_offset(6553600010)
True
"""
start = virtual_offset >>16
return start, virtual_offset ^ (start << 16)
def BgzfBlocks(handle):
"""Low level debugging function to inspect BGZF blocks.
Expects a BGZF compressed file opened in binary read mode using
the builtin open function. Do not use a handle from this bgzf
module or the gzip module's open function which will decompress
the file.
Returns the block start offset (see virtual offsets), the block
length (add these for the start of the next block), and the
decompressed length of the blocks contents (limited to 65536 in
BGZF), as an iterator - one tuple per BGZF block.
>>> try:
... from __builtin__ import open # Python 2
... except ImportError:
... from builtins import open # Python 3
...
>>> handle = open("SamBam/ex1.bam", "rb")
>>> for values in BgzfBlocks(handle):
... print("Raw start %i, raw length %i; data start %i, data length %i" % values)
Raw start 0, raw length 18239; data start 0, data length 65536
Raw start 18239, raw length 18223; data start 65536, data length 65536
Raw start 36462, raw length 18017; data start 131072, data length 65536
Raw start 54479, raw length 17342; data start 196608, data length 65536
Raw start 71821, raw length 17715; data start 262144, data length 65536
Raw start 89536, raw length 17728; data start 327680, data length 65536
Raw start 107264, raw length 17292; data start 393216, data length 63398
Raw start 124556, raw length 28; data start 456614, data length 0
>>> handle.close()
Indirectly we can tell this file came from an old version of
samtools because all the blocks (except the final one and the
dummy empty EOF marker block) are 65536 bytes. Later versions
avoid splitting a read between two blocks, and give the header
its own block (useful to speed up replacing the header). You
can see this in ex1_refresh.bam created using samtools 0.1.18:
samtools view -b ex1.bam > ex1_refresh.bam
>>> handle = open("SamBam/ex1_refresh.bam", "rb")
>>> for values in BgzfBlocks(handle):
... print("Raw start %i, raw length %i; data start %i, data length %i" % values)
Raw start 0, raw length 53; data start 0, data length 38
Raw start 53, raw length 18195; data start 38, data length 65434
Raw start 18248, raw length 18190; data start 65472, data length 65409
Raw start 36438, raw length 18004; data start 130881, data length 65483
Raw start 54442, raw length 17353; data start 196364, data length 65519
Raw start 71795, raw length 17708; data start 261883, data length 65411
Raw start 89503, raw length 17709; data start 327294, data length 65466
Raw start 107212, raw length 17390; data start 392760, data length 63854
Raw start 124602, raw length 28; data start 456614, data length 0
>>> handle.close()
The above example has no embedded SAM header (thus the first block
is very small at just 38 bytes of decompressed data), while the next
example does (a larger block of 103 bytes). Notice that the rest of
the blocks show the same sizes (they contain the same read data):
>>> handle = open("SamBam/ex1_header.bam", "rb")
>>> for values in BgzfBlocks(handle):
... print("Raw start %i, raw length %i; data start %i, data length %i" % values)
Raw start 0, raw length 104; data start 0, data length 103
Raw start 104, raw length 18195; data start 103, data length 65434
Raw start 18299, raw length 18190; data start 65537, data length 65409
Raw start 36489, raw length 18004; data start 130946, data length 65483
Raw start 54493, raw length 17353; data start 196429, data length 65519
Raw start 71846, raw length 17708; data start 261948, data length 65411
Raw start 89554, raw length 17709; data start 327359, data length 65466
Raw start 107263, raw length 17390; data start 392825, data length 63854
Raw start 124653, raw length 28; data start 456679, data length 0
>>> handle.close()
"""
data_start = 0
while True:
start_offset = handle.tell()
# This may raise StopIteration which is perfect here
block_length, data = _load_bgzf_block(handle)
data_len = len(data)
yield start_offset, block_length, data_start, data_len
data_start += data_len
def _load_bgzf_block(handle, text_mode=False):
"""Internal function to load the next BGZF function (PRIVATE)."""
magic = handle.read(4)
if not magic:
# End of file
raise StopIteration
if magic != _bgzf_magic:
raise ValueError(r"A BGZF (e.g. a BAM file) block should start with "
r"%r, not %r; handle.tell() now says %r"
% (_bgzf_magic, magic, handle.tell()))
gzip_mod_time, gzip_extra_flags, gzip_os, extra_len = \
struct.unpack("<LBBH", handle.read(8))
block_size = None
x_len = 0
while x_len < extra_len:
subfield_id = handle.read(2)
subfield_len = struct.unpack("<H", handle.read(2))[0] # uint16_t
subfield_data = handle.read(subfield_len)
x_len += subfield_len + 4
if subfield_id == _bytes_BC:
assert subfield_len == 2, "Wrong BC payload length"
assert block_size is None, "Two BC subfields?"
block_size = struct.unpack("<H", subfield_data)[0] + 1 # uint16_t
assert x_len == extra_len, (x_len, extra_len)
assert block_size is not None, "Missing BC, this isn't a BGZF file!"
# Now comes the compressed data, CRC, and length of uncompressed data.
deflate_size = block_size - 1 - extra_len - 19
d = zlib.decompressobj(-15) # Negative window size means no headers
data = d.decompress(handle.read(deflate_size)) + d.flush()
expected_crc = handle.read(4)
expected_size = struct.unpack("<I", handle.read(4))[0]
assert expected_size == len(data), \
"Decompressed to %i, not %i" % (len(data), expected_size)
# Should cope with a mix of Python platforms...
crc = zlib.crc32(data)
if crc < 0:
crc = struct.pack("<i", crc)
else:
crc = struct.pack("<I", crc)
assert expected_crc == crc, \
"CRC is %s, not %s" % (crc, expected_crc)
if text_mode:
return block_size, _as_string(data)
else:
return block_size, data
class BgzfReader(object):
r"""BGZF reader, acts like a read only handle but seek/tell differ.
Let's use the BgzfBlocks function to have a peak at the BGZF blocks
in an example BAM file,
>>> try:
... from __builtin__ import open # Python 2
... except ImportError:
... from builtins import open # Python 3
...
>>> handle = open("SamBam/ex1.bam", "rb")
>>> for values in BgzfBlocks(handle):
... print("Raw start %i, raw length %i; data start %i, data length %i" % values)
Raw start 0, raw length 18239; data start 0, data length 65536
Raw start 18239, raw length 18223; data start 65536, data length 65536
Raw start 36462, raw length 18017; data start 131072, data length 65536
Raw start 54479, raw length 17342; data start 196608, data length 65536
Raw start 71821, raw length 17715; data start 262144, data length 65536
Raw start 89536, raw length 17728; data start 327680, data length 65536
Raw start 107264, raw length 17292; data start 393216, data length 63398
Raw start 124556, raw length 28; data start 456614, data length 0
>>> handle.close()
Now let's see how to use this block information to jump to
specific parts of the decompressed BAM file:
>>> handle = BgzfReader("SamBam/ex1.bam", "rb")
>>> assert 0 == handle.tell()
>>> magic = handle.read(4)
>>> assert 4 == handle.tell()
So far nothing so strange, we got the magic marker used at the
start of a decompressed BAM file, and the handle position makes
sense. Now however, let's jump to the end of this block and 4
bytes into the next block by reading 65536 bytes,
>>> data = handle.read(65536)
>>> len(data)
65536
>>> assert 1195311108 == handle.tell()
Expecting 4 + 65536 = 65540 were you? Well this is a BGZF 64-bit
virtual offset, which means:
>>> split_virtual_offset(1195311108)
(18239, 4)
You should spot 18239 as the start of the second BGZF block, while
the 4 is the offset into this block. See also make_virtual_offset,
>>> make_virtual_offset(18239, 4)
1195311108
Let's jump back to almost the start of the file,
>>> make_virtual_offset(0, 2)
2
>>> handle.seek(2)
2
>>> handle.close()
Note that you can use the max_cache argument to limit the number of
BGZF blocks cached in memory. The default is 100, and since each
block can be up to 64kb, the default cache could take up to 6MB of
RAM. The cache is not important for reading through the file in one
pass, but is important for improving performance of random access.
"""
def __init__(self, filename=None, mode="r", fileobj=None, max_cache=100):
# TODO - Assuming we can seek, check for 28 bytes EOF empty block
# and if missing warn about possible truncation (as in samtools)?
if max_cache < 1:
raise ValueError("Use max_cache with a minimum of 1")
# Must open the BGZF file in binary mode, but we may want to
# treat the contents as either text or binary (unicode or
# bytes under Python 3)
if fileobj:
assert filename is None
handle = fileobj
assert "b" in handle.mode.lower()
else:
if "w" in mode.lower() \
or "a" in mode.lower():
raise ValueError("Must use read mode (default), not write or append mode")
handle = _open(filename, "rb")
self._text = "b" not in mode.lower()
if self._text:
self._newline = "\n"
else:
self._newline = b"\n"
self._handle = handle
self.max_cache = max_cache
self._buffers = {}
self._block_start_offset = None
self._block_raw_length = None
self._load_block(handle.tell())
def _load_block(self, start_offset=None):
if start_offset is None:
# If the file is being read sequentially, then _handle.tell()
# should be pointing at the start of the next block.
# However, if seek has been used, we can't assume that.
start_offset = self._block_start_offset + self._block_raw_length
if start_offset == self._block_start_offset:
self._within_block_offset = 0
return
elif start_offset in self._buffers:
# Already in cache
self._buffer, self._block_raw_length = self._buffers[start_offset]
self._within_block_offset = 0
self._block_start_offset = start_offset
return
# Must hit the disk... first check cache limits,
while len(self._buffers) >= self.max_cache:
# TODO - Implemente LRU cache removal?
self._buffers.popitem()
# Now load the block
handle = self._handle
if start_offset is not None:
handle.seek(start_offset)
self._block_start_offset = handle.tell()
try:
block_size, self._buffer = _load_bgzf_block(handle, self._text)
except StopIteration:
# EOF
block_size = 0
if self._text:
self._buffer = ""
else:
self._buffer = b""
self._within_block_offset = 0
self._block_raw_length = block_size
# Finally save the block in our cache,
self._buffers[self._block_start_offset] = self._buffer, block_size
def tell(self):
"""Returns a 64-bit unsigned BGZF virtual offset."""
if 0 < self._within_block_offset == len(self._buffer):
# Special case where we're right at the end of a (non empty) block.
# For non-maximal blocks could give two possible virtual offsets,
# but for a maximal block can't use 65536 as the within block
# offset. Therefore for consistency, use the next block and a
# within block offset of zero.
return (self._block_start_offset + self._block_raw_length) << 16
else:
# return make_virtual_offset(self._block_start_offset,
# self._within_block_offset)
# TODO - Include bounds checking as in make_virtual_offset?
return (self._block_start_offset << 16) | self._within_block_offset
def seek(self, virtual_offset):
"""Seek to a 64-bit unsigned BGZF virtual offset."""
# Do this inline to avoid a function call,
# start_offset, within_block = split_virtual_offset(virtual_offset)
start_offset = virtual_offset >> 16
within_block = virtual_offset ^ (start_offset << 16)
if start_offset != self._block_start_offset:
# Don't need to load the block if already there
# (this avoids a function call since _load_block would do nothing)
self._load_block(start_offset)
assert start_offset == self._block_start_offset
if within_block > len(self._buffer) \
and not (within_block == 0 and len(self._buffer)==0):
raise ValueError("Within offset %i but block size only %i"
% (within_block, len(self._buffer)))
self._within_block_offset = within_block
# assert virtual_offset == self.tell(), \
# "Did seek to %i (%i, %i), but tell says %i (%i, %i)" \
# % (virtual_offset, start_offset, within_block,
# self.tell(), self._block_start_offset, self._within_block_offset)
return virtual_offset
def read(self, size=-1):
if size < 0:
raise NotImplementedError("Don't be greedy, that could be massive!")
elif size == 0:
if self._text:
return ""
else:
return b""
elif self._within_block_offset + size <= len(self._buffer):
# This may leave us right at the end of a block
# (lazy loading, don't load the next block unless we have too)
data = self._buffer[self._within_block_offset:self._within_block_offset + size]
self._within_block_offset += size
assert data # Must be at least 1 byte
return data
else:
data = self._buffer[self._within_block_offset:]
size -= len(data)
self._load_block() # will reset offsets
# TODO - Test with corner case of an empty block followed by
# a non-empty block
if not self._buffer:
return data # EOF
elif size:
# TODO - Avoid recursion
return data + self.read(size)
else:
# Only needed the end of the last block
return data
def readline(self):
i = self._buffer.find(self._newline, self._within_block_offset)
# Three cases to consider,
if i==-1:
# No newline, need to read in more data
data = self._buffer[self._within_block_offset:]
self._load_block() # will reset offsets
if not self._buffer:
return data # EOF
else:
# TODO - Avoid recursion
return data + self.readline()
elif i + 1 == len(self._buffer):
# Found new line, but right at end of block (SPECIAL)
data = self._buffer[self._within_block_offset:]
# Must now load the next block to ensure tell() works
self._load_block() # will reset offsets
assert data
return data
else:
# Found new line, not at end of block (easy case, no IO)
data = self._buffer[self._within_block_offset:i + 1]
self._within_block_offset = i + 1
# assert data.endswith(self._newline)
return data
def __next__(self):
line = self.readline()
if not line:
raise StopIteration
return line
if sys.version_info[0] < 3:
def next(self):
"""Python 2 style alias for Python 3 style __next__ method."""
return self.__next__()
def __iter__(self):
return self
def close(self):
self._handle.close()
self._buffer = None
self._block_start_offset = None
self._buffers = None
def seekable(self):
return True
def isatty(self):
return False
def fileno(self):
return self._handle.fileno()
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
class BgzfWriter(object):
def __init__(self, filename=None, mode="w", fileobj=None, compresslevel=6):
if fileobj:
assert filename is None
handle = fileobj
else:
if "w" not in mode.lower() \
and "a" not in mode.lower():
raise ValueError("Must use write or append mode, not %r" % mode)
if "a" in mode.lower():
handle = _open(filename, "ab")
else:
handle = _open(filename, "wb")
self._text = "b" not in mode.lower()
self._handle = handle
self._buffer = b""
self.compresslevel = compresslevel
def _write_block(self, block):
# print("Saving %i bytes" % len(block))
start_offset = self._handle.tell()
assert len(block) <= 65536
# Giving a negative window bits means no gzip/zlib headers, -15 used in samtools
c = zlib.compressobj(self.compresslevel,
zlib.DEFLATED,
-15,
zlib.DEF_MEM_LEVEL,
0)
compressed = c.compress(block) + c.flush()
del c
assert len(compressed) < 65536, "TODO - Didn't compress enough, try less data in this block"
crc = zlib.crc32(block)
# Should cope with a mix of Python platforms...
if crc < 0:
crc = struct.pack("<i", crc)
else:
crc = struct.pack("<I", crc)
bsize = struct.pack("<H", len(compressed) + 25) # includes -1
crc = struct.pack("<I", zlib.crc32(block) & 0xffffffff)
uncompressed_length = struct.pack("<I", len(block))
# Fixed 16 bytes,
# gzip magic bytes (4) mod time (4),
# gzip flag (1), os (1), extra length which is six (2),
# sub field which is BC (2), sub field length of two (2),
# Variable data,
# 2 bytes: block length as BC sub field (2)
# X bytes: the data
# 8 bytes: crc (4), uncompressed data length (4)
data = _bgzf_header + bsize + compressed + crc + uncompressed_length
self._handle.write(data)
def write(self, data):
# TODO - Check bytes vs unicode
data = _as_bytes(data)
# block_size = 2**16 = 65536
data_len = len(data)
if len(self._buffer) + data_len < 65536:
# print("Cached %r" % data)
self._buffer += data
return
else:
# print("Got %r, writing out some data..." % data)
self._buffer += data
while len(self._buffer) >= 65536:
self._write_block(self._buffer[:65536])
self._buffer = self._buffer[65536:]
def flush(self):
while len(self._buffer) >= 65536:
self._write_block(self._buffer[:65535])
self._buffer = self._buffer[65535:]
self._write_block(self._buffer)
self._buffer = b""
self._handle.flush()
def close(self):
"""Flush data, write 28 bytes empty BGZF EOF marker, and close the BGZF file."""
if self._buffer:
self.flush()
# samtools will look for a magic EOF marker, just a 28 byte empty BGZF block,
# and if it is missing warns the BAM file may be truncated. In addition to
# samtools writing this block, so too does bgzip - so we should too.
self._handle.write(_bgzf_eof)
self._handle.flush()
self._handle.close()
def tell(self):
"""Returns a BGZF 64-bit virtual offset."""
return make_virtual_offset(self._handle.tell(), len(self._buffer))
def seekable(self):
# Not seekable, but we do support tell...
return False
def isatty(self):
return False
def fileno(self):
return self._handle.fileno()
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
if __name__ == "__main__":
import sys
if len(sys.argv) > 1:
print("Call this with no arguments and pipe uncompressed data in on stdin")
print("and it will produce BGZF compressed data on stdout. e.g.")
print("")
print("./bgzf.py < example.fastq > example.fastq.bgz")
print("")
print("The extension convention of *.bgz is to distinugish these from *.gz")
print("used for standard gzipped files without the block structure of BGZF.")
print("You can use the standard gunzip command to decompress BGZF files,")
print("if it complains about the extension try something like this:")
print("")
print("cat example.fastq.bgz | gunzip > example.fastq")
print("")
print("See also the tool bgzip that comes with samtools")
sys.exit(0)
sys.stderr.write("Producing BGZF output from stdin...\n")
w = BgzfWriter(fileobj=sys.stdout)
while True:
data = sys.stdin.read(65536)
w.write(data)
if not data:
break
# Doing close with write an empty BGZF block as EOF marker:
w.close()
sys.stderr.write("BGZF data produced\n")
|
Ambuj-UF/ConCat-1.0
|
src/Utils/Bio/bgzf.py
|
Python
|
gpl-2.0
| 34,138
|
[
"Biopython"
] |
da5b4f393afd0dc622f77541089201abb98a22d2882792c7b6734e7924d2c122
|
# -*- coding: utf-8 -*-
"""
ORCA Open Remote Control Application
Copyright (C) 2013-2020 Carsten Thielepape
Please contact me by : http://www.orca-remote.org/
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from os.path import sep, expanduser
from kivy.logger import Logger
from ORCA.utils.Platform import OS_ToPath
from ORCA.utils.Path import cPath
def GetUserDownloadsDataPath() -> cPath:
""" returns the path to the download folder """
oRetPath = cPath(OS_ToPath(expanduser('~') + sep + 'Downloads'))
Logger.debug("Download Folder = "+oRetPath.string)
if oRetPath.Exists():
return oRetPath
Logger.error("Downloadpath not valid:"+oRetPath.string)
return cPath('')
#todo: enable as soon we can use the new toolchain
'''
from plyer import storagepath
from kivy.logger import Logger
from ORCA.utils.Path import cPath
def GetUserDownloadsDataPath():
""" returns the path to the download folder """
uRetPath=u"/"
try:
uRetPath = storagepath.get_downloads_dir
Logger.debug("Android Download Folder = "+uRetPath)
except Exception as e:
Logger.error("GetUserDownloadsDataPath for Android failed:"+str(e))
oRetPath = cPath(uRetPath)
if not oRetPath.IsDir():
Logger.error("Android Download path not valid:" + oRetPath.string)
return oRetPath
'''
|
thica/ORCA-Remote
|
src/ORCA/utils/Platform/generic/generic_GetUserDownloadsDataPath.py
|
Python
|
gpl-3.0
| 2,102
|
[
"ORCA"
] |
2f2f437179a110e75f0c2e6562dfdd88fd6b7872164aa74737ecdd442ede029f
|
#
# This file is a part of KNOSSOS.
#
# (C) Copyright 2007-2011
# Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V.
#
# KNOSSOS is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 of
# the License as published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
#
# For further information, visit http://www.knossostool.org or contact
# Joergen.Kornfeld@mpimf-heidelberg.mpg.de or
# Fabian.Svara@mpimf-heidelberg.mpg.de
#
import re, glob, os, sys, shutil
xReg = re.compile('_x(\d*)')
yReg = re.compile('_y(\d*)')
zReg = re.compile('_z(\d*)')
files = glob.glob(os.getcwd() + "/*.raw")
files = files + glob.glob(os.getcwd() + "/*.overlay")
for file in files:
try:
x = int(xReg.search(file).groups()[0])
y = int(yReg.search(file).groups()[0])
z = int(zReg.search(file).groups()[0])
except AttributeError:
print("Incorrectly formatted .raw file: " + file)
continue
newDir = os.path.abspath('x%04d/y%04d/z%04d/' % (x, y, z))
try:
os.makedirs(os.path.normpath(newDir))
except OSError:
pass
print(os.path.normpath(newDir))
shutil.move(file, os.path.normpath(newDir + '/'))
|
thorbenk/knossos-svn
|
tools/flatToNested.py
|
Python
|
gpl-2.0
| 1,649
|
[
"VisIt"
] |
506365ee9e88d78c2aa7c9d2be84d26e62eaed6ebf12cebbdb09744f5fae277f
|
"""
.. _statsrefmanual:
==========================================
Statistical functions (:mod:`scipy.stats`)
==========================================
.. currentmodule:: scipy.stats
This module contains a large number of probability distributions,
summary and frequency statistics, correlation functions and statistical
tests, masked statistics, kernel density estimation, quasi-Monte Carlo
functionality, and more.
Statistics is a very large area, and there are topics that are out of scope
for SciPy and are covered by other packages. Some of the most important ones
are:
- `statsmodels <https://www.statsmodels.org/stable/index.html>`__:
regression, linear models, time series analysis, extensions to topics
also covered by ``scipy.stats``.
- `Pandas <https://pandas.pydata.org/>`__: tabular data, time series
functionality, interfaces to other statistical languages.
- `PyMC <https://docs.pymc.io/>`__: Bayesian statistical
modeling, probabilistic machine learning.
- `scikit-learn <https://scikit-learn.org/>`__: classification, regression,
model selection.
- `Seaborn <https://seaborn.pydata.org/>`__: statistical data visualization.
- `rpy2 <https://rpy2.github.io/>`__: Python to R bridge.
Probability distributions
=========================
Each univariate distribution is an instance of a subclass of `rv_continuous`
(`rv_discrete` for discrete distributions):
.. autosummary::
:toctree: generated/
rv_continuous
rv_discrete
rv_histogram
Continuous distributions
------------------------
.. autosummary::
:toctree: generated/
alpha -- Alpha
anglit -- Anglit
arcsine -- Arcsine
argus -- Argus
beta -- Beta
betaprime -- Beta Prime
bradford -- Bradford
burr -- Burr (Type III)
burr12 -- Burr (Type XII)
cauchy -- Cauchy
chi -- Chi
chi2 -- Chi-squared
cosine -- Cosine
crystalball -- Crystalball
dgamma -- Double Gamma
dweibull -- Double Weibull
erlang -- Erlang
expon -- Exponential
exponnorm -- Exponentially Modified Normal
exponweib -- Exponentiated Weibull
exponpow -- Exponential Power
f -- F (Snecdor F)
fatiguelife -- Fatigue Life (Birnbaum-Saunders)
fisk -- Fisk
foldcauchy -- Folded Cauchy
foldnorm -- Folded Normal
genlogistic -- Generalized Logistic
gennorm -- Generalized normal
genpareto -- Generalized Pareto
genexpon -- Generalized Exponential
genextreme -- Generalized Extreme Value
gausshyper -- Gauss Hypergeometric
gamma -- Gamma
gengamma -- Generalized gamma
genhalflogistic -- Generalized Half Logistic
genhyperbolic -- Generalized Hyperbolic
geninvgauss -- Generalized Inverse Gaussian
gilbrat -- Gilbrat
gompertz -- Gompertz (Truncated Gumbel)
gumbel_r -- Right Sided Gumbel, Log-Weibull, Fisher-Tippett, Extreme Value Type I
gumbel_l -- Left Sided Gumbel, etc.
halfcauchy -- Half Cauchy
halflogistic -- Half Logistic
halfnorm -- Half Normal
halfgennorm -- Generalized Half Normal
hypsecant -- Hyperbolic Secant
invgamma -- Inverse Gamma
invgauss -- Inverse Gaussian
invweibull -- Inverse Weibull
johnsonsb -- Johnson SB
johnsonsu -- Johnson SU
kappa4 -- Kappa 4 parameter
kappa3 -- Kappa 3 parameter
ksone -- Distribution of Kolmogorov-Smirnov one-sided test statistic
kstwo -- Distribution of Kolmogorov-Smirnov two-sided test statistic
kstwobign -- Limiting Distribution of scaled Kolmogorov-Smirnov two-sided test statistic.
laplace -- Laplace
laplace_asymmetric -- Asymmetric Laplace
levy -- Levy
levy_l
levy_stable
logistic -- Logistic
loggamma -- Log-Gamma
loglaplace -- Log-Laplace (Log Double Exponential)
lognorm -- Log-Normal
loguniform -- Log-Uniform
lomax -- Lomax (Pareto of the second kind)
maxwell -- Maxwell
mielke -- Mielke's Beta-Kappa
moyal -- Moyal
nakagami -- Nakagami
ncx2 -- Non-central chi-squared
ncf -- Non-central F
nct -- Non-central Student's T
norm -- Normal (Gaussian)
norminvgauss -- Normal Inverse Gaussian
pareto -- Pareto
pearson3 -- Pearson type III
powerlaw -- Power-function
powerlognorm -- Power log normal
powernorm -- Power normal
rdist -- R-distribution
rayleigh -- Rayleigh
rice -- Rice
recipinvgauss -- Reciprocal Inverse Gaussian
semicircular -- Semicircular
skewcauchy -- Skew Cauchy
skewnorm -- Skew normal
studentized_range -- Studentized Range
t -- Student's T
trapezoid -- Trapezoidal
triang -- Triangular
truncexpon -- Truncated Exponential
truncnorm -- Truncated Normal
tukeylambda -- Tukey-Lambda
uniform -- Uniform
vonmises -- Von-Mises (Circular)
vonmises_line -- Von-Mises (Line)
wald -- Wald
weibull_min -- Minimum Weibull (see Frechet)
weibull_max -- Maximum Weibull (see Frechet)
wrapcauchy -- Wrapped Cauchy
Multivariate distributions
--------------------------
.. autosummary::
:toctree: generated/
multivariate_normal -- Multivariate normal distribution
matrix_normal -- Matrix normal distribution
dirichlet -- Dirichlet
wishart -- Wishart
invwishart -- Inverse Wishart
multinomial -- Multinomial distribution
special_ortho_group -- SO(N) group
ortho_group -- O(N) group
unitary_group -- U(N) group
random_correlation -- random correlation matrices
multivariate_t -- Multivariate t-distribution
multivariate_hypergeom -- Multivariate hypergeometric distribution
Discrete distributions
----------------------
.. autosummary::
:toctree: generated/
bernoulli -- Bernoulli
betabinom -- Beta-Binomial
binom -- Binomial
boltzmann -- Boltzmann (Truncated Discrete Exponential)
dlaplace -- Discrete Laplacian
geom -- Geometric
hypergeom -- Hypergeometric
logser -- Logarithmic (Log-Series, Series)
nbinom -- Negative Binomial
nchypergeom_fisher -- Fisher's Noncentral Hypergeometric
nchypergeom_wallenius -- Wallenius's Noncentral Hypergeometric
nhypergeom -- Negative Hypergeometric
planck -- Planck (Discrete Exponential)
poisson -- Poisson
randint -- Discrete Uniform
skellam -- Skellam
yulesimon -- Yule-Simon
zipf -- Zipf (Zeta)
zipfian -- Zipfian
An overview of statistical functions is given below. Many of these functions
have a similar version in `scipy.stats.mstats` which work for masked arrays.
Summary statistics
==================
.. autosummary::
:toctree: generated/
describe -- Descriptive statistics
gmean -- Geometric mean
hmean -- Harmonic mean
kurtosis -- Fisher or Pearson kurtosis
mode -- Modal value
moment -- Central moment
skew -- Skewness
kstat --
kstatvar --
tmean -- Truncated arithmetic mean
tvar -- Truncated variance
tmin --
tmax --
tstd --
tsem --
variation -- Coefficient of variation
find_repeats
trim_mean
gstd -- Geometric Standard Deviation
iqr
sem
bayes_mvs
mvsdist
entropy
differential_entropy
median_absolute_deviation
median_abs_deviation
bootstrap
Frequency statistics
====================
.. autosummary::
:toctree: generated/
cumfreq
itemfreq
percentileofscore
scoreatpercentile
relfreq
.. autosummary::
:toctree: generated/
binned_statistic -- Compute a binned statistic for a set of data.
binned_statistic_2d -- Compute a 2-D binned statistic for a set of data.
binned_statistic_dd -- Compute a d-D binned statistic for a set of data.
Correlation functions
=====================
.. autosummary::
:toctree: generated/
f_oneway
alexandergovern
pearsonr
spearmanr
pointbiserialr
kendalltau
weightedtau
somersd
linregress
siegelslopes
theilslopes
multiscale_graphcorr
Statistical tests
=================
.. autosummary::
:toctree: generated/
ttest_1samp
ttest_ind
ttest_ind_from_stats
ttest_rel
chisquare
cramervonmises
cramervonmises_2samp
power_divergence
kstest
ks_1samp
ks_2samp
epps_singleton_2samp
mannwhitneyu
tiecorrect
rankdata
ranksums
wilcoxon
kruskal
friedmanchisquare
brunnermunzel
combine_pvalues
jarque_bera
page_trend_test
tukey_hsd
.. autosummary::
:toctree: generated/
ansari
bartlett
levene
shapiro
anderson
anderson_ksamp
binom_test
binomtest
fligner
median_test
mood
skewtest
kurtosistest
normaltest
Quasi-Monte Carlo
=================
.. toctree::
:maxdepth: 4
stats.qmc
Masked statistics functions
===========================
.. toctree::
stats.mstats
Other statistical functionality
===============================
Transformations
---------------
.. autosummary::
:toctree: generated/
boxcox
boxcox_normmax
boxcox_llf
yeojohnson
yeojohnson_normmax
yeojohnson_llf
obrientransform
sigmaclip
trimboth
trim1
zmap
zscore
Statistical distances
---------------------
.. autosummary::
:toctree: generated/
wasserstein_distance
energy_distance
Random variate generation / CDF Inversion
-----------------------------------------
.. autosummary::
:toctree: generated/
rvs_ratio_uniforms
NaiveRatioUniforms
NumericalInverseHermite
NumericalInversePolynomial
TransformedDensityRejection
DiscreteAliasUrn
Circular statistical functions
------------------------------
.. autosummary::
:toctree: generated/
circmean
circvar
circstd
Contingency table functions
---------------------------
.. autosummary::
:toctree: generated/
chi2_contingency
contingency.crosstab
contingency.expected_freq
contingency.margins
contingency.relative_risk
contingency.association
fisher_exact
barnard_exact
boschloo_exact
Plot-tests
----------
.. autosummary::
:toctree: generated/
ppcc_max
ppcc_plot
probplot
boxcox_normplot
yeojohnson_normplot
Univariate and multivariate kernel density estimation
-----------------------------------------------------
.. autosummary::
:toctree: generated/
gaussian_kde
Warnings / Errors used in :mod:`scipy.stats`
--------------------------------------------
.. autosummary::
:toctree: generated/
F_onewayConstantInputWarning
F_onewayBadInputSizesWarning
PearsonRConstantInputWarning
PearsonRNearConstantInputWarning
SpearmanRConstantInputWarning
BootstrapDegenerateDistributionWarning
UNURANError
"""
from ._stats_py import *
from .distributions import *
from ._morestats import *
from ._binomtest import binomtest
from ._binned_statistic import *
from ._kde import gaussian_kde
from . import mstats
from . import qmc
from ._multivariate import *
from . import contingency
from .contingency import chi2_contingency
from ._bootstrap import bootstrap, BootstrapDegenerateDistributionWarning
from ._entropy import *
from ._hypotests import *
from ._rvs_sampling import rvs_ratio_uniforms # noqa
from ._unuran import * # noqa
from ._page_trend_test import page_trend_test
from ._mannwhitneyu import mannwhitneyu
__all__ = [s for s in dir() if not s.startswith("_")] # Remove dunders.
from scipy._lib._testutils import PytestTester
test = PytestTester(__name__)
del PytestTester
|
grlee77/scipy
|
scipy/stats/__init__.py
|
Python
|
bsd-3-clause
| 12,694
|
[
"Gaussian"
] |
3bafdcf8491d9f26b671af21fe307567fb493e98dbfe4149aa64fac2c2002288
|
#!/usr/bin/env python
# cython: profile=True
# -*- coding: utf-8 -*-
###############################################################################
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 2.1 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
###############################################################################
"""
This script uses hybrid MPI/OpenMP paralleism in addition to highly optimized
SIMD vectorization within the compute kernels. Using multiple MPI processes
requires running this command using your MPI implementation's process manager,
e.g. `mpirun`, `mpiexec`, or `aprun`. The number of OpenMP threads can be
controled by setting the OMP_NUM_THREADS environment variable. (e.g.
$ export OMP_NUM_THREADS=4; mpirun -np 16 python tent.py <options>)
Authors: Carlos Xavier Hernandez
"""
#-----------------------------------
# Imports
#-----------------------------------
from __future__ import print_function
import glob, argparse, os, sys, time, datetime, itertools, warnings
import numpy as np
try:
import mdtraj as md
except ImportError:
print("This package requires the latest development version of MDTraj")
print("which can be downloaded from https://github.com/rmcgibbo/mdtraj")
sys.exit(1)
try:
from mpi4py import MPI
except:
print("This package requires mpi4py, which can be downloaded")
print("from https://pypi.python.org/pypi/mpi4py")
sys.exit(1)
try:
import pymc as pm
except ImportError:
print("This package requires pymc, which can be downloaded")
print("from https://pypi.python.org/pypi/pymc")
sys.exit(1)
#-----------------------------------
# Globals
#-----------------------------------
COMM = MPI.COMM_WORLD
RANK = COMM.rank
SIZE = COMM.size
def rmsd(traj, ref, idx):
return np.sqrt(np.sum(np.square(traj[:,idx,:] - ref[:,idx,:]),axis=(1,2))/idx.shape[0])
def printM(message, *args):
if RANK == 0:
if len(args) == 0:
print(message)
else:
print(message % args)
class timing(object):
"Context manager for printing performance"
def __init__(self, name):
self.name = name
def __enter__(self):
self.start = time.time()
def __exit__(self, ty, val, tb):
end = time.time()
print("<RANK %d> PERFORMANCE [%s] : %0.3f seconds" % (RANK, self.name, end-self.start))
return False
def parse_cmdln():
parser = argparse.ArgumentParser(description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('-td', '--dir', dest='dir',help='Directory containing trajectories')
parser.add_argument('-ext', '--ext', dest='ext', help='File extension', default='dcd')
parser.add_argument('-ref', '--ref', dest='reference', help='Reference pdb of bound structure')
parser.add_argument('-s', '--stride', dest='stride', help='Stride', default=10)
parser.add_argument('-p', '--protein', dest='prot', help='Protein indices', default=None)
parser.add_argument('-l', '--ligand', dest='lig', help='Ligand indices', default=None)
parser.add_argument('-d', '--cutoff', dest='d', help='RMSD cutoff', default=0.5)
parser.add_argument('-c', '--significance', dest='c', help='Signficance cutoff', default=5.0)
#parser.add_argument('-i', '--idx', dest='idx', help='Residues to compare in bound pose', default=None)
args = parser.parse_args()
return args
def init_gamma_parms(r):
if r.mean() > 0.0:
a = 1/np.mean(r)
else:
a = np.finfo('f').max
return a
def findEvent(metric, steps = 10000, burn=0.1, thin=1):
a = init_gamma_parms(metric)
mu1, mu2, tau = pm.Exponential('u1',a), pm.Exponential('u2',a), pm.DiscreteUniform("tau", lower=0, upper=metric.shape[0])
@pm.deterministic
def params(a1=mu1, a2=mu2,tau=tau):
out = np.zeros(metric.shape[0])
out[:tau] = a1
out[tau:] = a2
return out
obs = pm.Poisson("obs",params,value=metric,observed=True)
model = pm.Model([obs,mu1,mu2,tau])
mcmc = pm.MCMC(model)
mcmc.sample(steps, int(np.rint(burn*steps)), thin, progress_bar=False)
mu1_samples = mcmc.trace('u1')[:]
mu2_samples = mcmc.trace('u2')[:]
tau_samples = mcmc.trace('tau')[:]
sig, m1, m2, tau = (np.mean(mu2_samples)-np.mean(mu1_samples))/np.sqrt(np.var(mu1_samples)+np.var(mu2_samples) + 1E-5), np.median(mu1_samples), np.median(mu2_samples), np.median(tau)
return sig, m1, m2, tau
def create_features(ref, prot, lig, d):
set1 = [ref.topology.atom(i).residue.index for i in prot]
set2 = [ref.topology.atom(i).residue.index for i in lig]
contacts = md.compute_contacts(ref,contacts=list(itertools.product(set1,set2)))
atom_set = contacts[1][np.where(contacts[0]<d)[1],:]
return atom_set
def calculate_metrics(traj, features, d):
contacts = md.compute_contacts(traj, contacts = features)
h = np.sum(contacts[0] < .5, axis=1)
return h
def main(trajectories, ref, prot, lig, stride, d, c):
bind = unbind = 0
features = create_features(ref, prot, lig, d)
for trajectory in trajectories:
with timing('Finding binding events...'):
traj = md.load(trajectory, top = ref, stride = stride)
traj.superpose(ref, atom_indices = prot)
h = calculate_metrics(traj, features, d)
q, m1, m2, tau = findEvent(h)
if (c < q)*(tau>0.0):
bind += 1
elif (-c > q)*(tau>0.0):
unbind += 1
COMM.Barrier()
n_bind = n_unbind = 0
COMM.reduce(bind, n_bind, MPI.SUM)
COMM.reduce(unbind, n_unbind, MPI.SUM)
printM(u'Found %s binding events and %s unbinding events (sigma is %s)' % (n_bind,n_unbind,c))
if __name__ == "__main__":
options = parse_cmdln()
topology = md.load(options.reference)
if RANK == 0:
trajectories = glob.glob(options.dir + "/*." + options.ext)
try:
if not options.dir:
parser.error('Please supply a directory.')
if not options.reference:
parser.error('Please supply a reference file.')
if not trajectories:
print("No trajectories found.")
sys.exit(1)
if len(trajectories) < SIZE:
print("There are more nodes than trajectories.")
sys.exit(1)
except SystemExit:
if SIZE > 1:
COMM.Abort()
exit()
trajectories = [trajectories[i::SIZE] for i in range(SIZE)]
prot = np.loadtxt(options.prot, dtype=int)
lig = np.loadtxt(options.lig, dtype=int)
#idx = np.hstack((prot,lig))
#prot = np.arange(len(prot))
#lig = np.arange(len(prot),len(idx))
else:
trajectories = lig = prot = None
trajectories = COMM.scatter(trajectories, root=0)
prot = COMM.bcast(prot, root=0)
lig = COMM.bcast(lig, root=0)
#idx = COMM.bcast(idx, root=0)
printM('Starting...')
main(trajectories, topology, prot, lig, int(options.stride), options.d, options.c)
|
cxhernandez/findBindingEvents
|
countBindingEvents.py
|
Python
|
gpl-2.0
| 7,737
|
[
"MDTraj"
] |
fb2f66825431cdff22fd08a5a222242f6a3cd8dffd00283df8c0a7c09edc61d2
|
# Copyright (c) 2014 AG Stephan
# from github.com/pseudonym117/Riot-Watcher
# file Riot-Watcher/riotwatcher/riotwatcher.py
# last downloaded on 15th October 2015
# with a few changes: search "CUSTOMISED"
from collections import deque
import time
import requests
### CUSTOMISED
import data_path
with open(data_path.riot_api_key,'r') as f:
import json
my_key = json.load(f)
###
# Constants
BRAZIL = 'br'
EUROPE_NORDIC_EAST = 'eune'
EUROPE_WEST = 'euw'
KOREA = 'kr'
LATIN_AMERICA_NORTH = 'lan'
LATIN_AMERICA_SOUTH = 'las'
NORTH_AMERICA = 'na'
OCEANIA = 'oce'
RUSSIA = 'ru'
TURKEY = 'tr'
### CUSTOMISED
my_default_region = EUROPE_WEST
###
# Platforms
platforms = {
BRAZIL: 'BR1',
EUROPE_NORDIC_EAST: 'EUN1',
EUROPE_WEST: 'EUW1',
KOREA: 'KR',
LATIN_AMERICA_NORTH: 'LA1',
LATIN_AMERICA_SOUTH: 'LA2',
NORTH_AMERICA: 'NA1',
OCEANIA: 'OC1',
RUSSIA: 'RU',
TURKEY: 'TR1'
}
queue_types = [
'CUSTOM', # Custom games
'NORMAL_5x5_BLIND', # Normal 5v5 blind pick
'BOT_5x5', # Historical Summoners Rift coop vs AI games
'BOT_5x5_INTRO', # Summoners Rift Intro bots
'BOT_5x5_BEGINNER', # Summoner's Rift Coop vs AI Beginner Bot games
'BOT_5x5_INTERMEDIATE', # Historical Summoner's Rift Coop vs AI Intermediate Bot games
'NORMAL_3x3', # Normal 3v3 games
'NORMAL_5x5_DRAFT', # Normal 5v5 Draft Pick games
'ODIN_5x5_BLIND', # Dominion 5v5 Blind Pick games
'ODIN_5x5_DRAFT', # Dominion 5v5 Draft Pick games
'BOT_ODIN_5x5', # Dominion Coop vs AI games
'RANKED_SOLO_5x5', # Ranked Solo 5v5 games
'RANKED_PREMADE_3x3', # Ranked Premade 3v3 games
'RANKED_PREMADE_5x5', # Ranked Premade 5v5 games
'RANKED_TEAM_3x3', # Ranked Team 3v3 games
'RANKED_TEAM_5x5', # Ranked Team 5v5 games
'BOT_TT_3x3', # Twisted Treeline Coop vs AI games
'GROUP_FINDER_5x5', # Team Builder games
'ARAM_5x5', # ARAM games
'ONEFORALL_5x5', # One for All games
'FIRSTBLOOD_1x1', # Snowdown Showdown 1v1 games
'FIRSTBLOOD_2x2', # Snowdown Showdown 2v2 games
'SR_6x6', # Hexakill games
'URF_5x5', # Ultra Rapid Fire games
'BOT_URF_5x5', # Ultra Rapid Fire games played against AI games
'NIGHTMARE_BOT_5x5_RANK1', # Doom Bots Rank 1 games
'NIGHTMARE_BOT_5x5_RANK2', # Doom Bots Rank 2 games
'NIGHTMARE_BOT_5x5_RANK5', # Doom Bots Rank 5 games
'ASCENSION_5x5', # Ascension games
'HEXAKILL', # 6v6 games on twisted treeline
'KING_PORO_5x5', # King Poro game games
'COUNTER_PICK', # Nemesis games,
'BILGEWATER_5x5', # Black Market Brawlers games
]
game_maps = [
{'map_id': 1, 'name': "Summoner's Rift", 'notes': "Summer Variant"},
{'map_id': 2, 'name': "Summoner's Rift", 'notes': "Autumn Variant"},
{'map_id': 3, 'name': "The Proving Grounds", 'notes': "Tutorial Map"},
{'map_id': 4, 'name': "Twisted Treeline", 'notes': "Original Version"},
{'map_id': 8, 'name': "The Crystal Scar", 'notes': "Dominion Map"},
{'map_id': 10, 'name': "Twisted Treeline", 'notes': "Current Version"},
{'map_id': 11, 'name': "Summoner's Rift", 'notes': "Current Version"},
{'map_id': 12, 'name': "Howling Abyss", 'notes': "ARAM Map"},
{'map_id': 14, 'name': "Butcher's Bridge", 'notes': "ARAM Map"},
]
game_modes = [
'CLASSIC', # Classic Summoner's Rift and Twisted Treeline games
'ODIN', # Dominion/Crystal Scar games
'ARAM', # ARAM games
'TUTORIAL', # Tutorial games
'ONEFORALL', # One for All games
'ASCENSION', # Ascension games
'FIRSTBLOOD', # Snowdown Showdown games
'KINGPORO', # King Poro games
]
game_types = [
'CUSTOM_GAME', # Custom games
'TUTORIAL_GAME', # Tutorial games
'MATCHED_GAME', # All other games
]
sub_types = [
'NONE', # Custom games
'NORMAL', # Summoner's Rift unranked games
'NORMAL_3x3', # Twisted Treeline unranked games
'ODIN_UNRANKED', # Dominion/Crystal Scar games
'ARAM_UNRANKED_5v5', # ARAM / Howling Abyss games
'BOT', # Summoner's Rift and Crystal Scar games played against AI
'BOT_3x3', # Twisted Treeline games played against AI
'RANKED_SOLO_5x5', # Summoner's Rift ranked solo queue games
'RANKED_TEAM_3x3', # Twisted Treeline ranked team games
'RANKED_TEAM_5x5', # Summoner's Rift ranked team games
'ONEFORALL_5x5', # One for All games
'FIRSTBLOOD_1x1', # Snowdown Showdown 1x1 games
'FIRSTBLOOD_2x2', # Snowdown Showdown 2x2 games
'SR_6x6', # Hexakill games
'CAP_5x5', # Team Builder games
'URF', # Ultra Rapid Fire games
'URF_BOT', # Ultra Rapid Fire games against AI
'NIGHTMARE_BOT', # Nightmare bots
'ASCENSION', # Ascension games
'HEXAKILL', # Twisted Treeline 6x6 Hexakill
'KING_PORO', # King Poro games
'COUNTER_PICK', # Nemesis games
'BILGEWATER', # Black Market Brawlers games
]
player_stat_summary_types = [
'Unranked', # Summoner's Rift unranked games
'Unranked3x3', # Twisted Treeline unranked games
'OdinUnranked', # Dominion/Crystal Scar games
'AramUnranked5x5', # ARAM / Howling Abyss games
'CoopVsAI', # Summoner's Rift and Crystal Scar games played against AI
'CoopVsAI3x3', # Twisted Treeline games played against AI
'RankedSolo5x5', # Summoner's Rift ranked solo queue games
'RankedTeams3x3', # Twisted Treeline ranked team games
'RankedTeams5x5', # Summoner's Rift ranked team games
'OneForAll5x5', # One for All games
'FirstBlood1x1', # Snowdown Showdown 1x1 games
'FirstBlood2x2', # Snowdown Showdown 2x2 games
'SummonersRift6x6', # Hexakill games
'CAP5x5', # Team Builder games
'URF', # Ultra Rapid Fire games
'URFBots', # Ultra Rapid Fire games played against AI
'NightmareBot', # Summoner's Rift games played against Nightmare AI
'Hexakill', # Twisted Treeline 6x6 Hexakill games
'KingPoro', # King Poro games
'CounterPick', # Nemesis games
'Bilgewater', # Black Market Brawlers games
]
solo_queue, ranked_5s, ranked_3s = 'RANKED_SOLO_5x5', 'RANKED_TEAM_5x5', 'RANKED_TEAM_3x3'
api_versions = {
'champion': 1.2,
'current-game': 1.0,
'featured-games': 1.0,
'game': 1.3,
'league': 2.5,
'lol-static-data': 1.2,
'lol-status': 1.0,
'match': 2.2,
'matchhistory': 2.2,
'matchlist': 2.2,
'stats': 1.3,
'summoner': 1.4,
'team': 2.4
}
class LoLException(Exception):
def __init__(self, error, response):
self.error = error
self.headers = response.headers
def __str__(self):
return self.error
error_400 = "Bad request"
error_401 = "Unauthorized"
error_404 = "Game data not found"
error_429 = "Too many requests"
error_500 = "Internal server error"
error_503 = "Service unavailable"
def raise_status(response):
if response.status_code == 400:
raise LoLException(error_400, response)
elif response.status_code == 401:
raise LoLException(error_401, response)
elif response.status_code == 404:
raise LoLException(error_404, response)
elif response.status_code == 429:
raise LoLException(error_429, response)
elif response.status_code == 500:
raise LoLException(error_500, response)
elif response.status_code == 503:
raise LoLException(error_503, response)
else:
response.raise_for_status()
class RateLimit:
def __init__(self, allowed_requests, seconds):
self.allowed_requests = allowed_requests
self.seconds = seconds
self.made_requests = deque()
def __reload(self):
t = time.time()
while len(self.made_requests) > 0 and self.made_requests[0] < t:
self.made_requests.popleft()
def add_request(self):
self.made_requests.append(time.time() + self.seconds)
def request_available(self):
self.__reload()
return len(self.made_requests) < self.allowed_requests
class RiotWatcher:
### CUSTOMISED
#def __init__(self, key, default_region=NORTH_AMERICA, limits=(RateLimit(10, 10), RateLimit(500, 600), )):
def __init__(self, key=my_key, default_region=my_default_region, limits=(RateLimit(10, 10), RateLimit(500, 600), )):
###
self.key = key
self.default_region = default_region
self.limits = limits
def can_make_request(self):
for lim in self.limits:
if not lim.request_available():
return False
return True
### CUSTOMISED
# after function wait in file Riot-Watcher/riotwatcher/tests.py
def wait(self):
while not self.can_make_request():
time.sleep(1)
###
def base_request(self, url, region, static=False, **kwargs):
if region is None:
region = self.default_region
args = {'api_key': self.key}
for k in kwargs:
if kwargs[k] is not None:
args[k] = kwargs[k]
r = requests.get(
'https://{proxy}.api.pvp.net/api/lol/{static}{region}/{url}'.format(
proxy='global' if static else region,
static='static-data/' if static else '',
region=region,
url=url
),
params=args
)
if not static:
for lim in self.limits:
lim.add_request()
raise_status(r)
return r.json()
def _observer_mode_request(self, url, proxy=None, **kwargs):
if proxy is None:
proxy = self.default_region
args = {'api_key': self.key}
for k in kwargs:
if kwargs[k] is not None:
args[k] = kwargs[k]
r = requests.get(
'https://{proxy}.api.pvp.net/observer-mode/rest/{url}'.format(
proxy=proxy,
url=url
),
params=args
)
for lim in self.limits:
lim.add_request()
raise_status(r)
return r.json()
@staticmethod
def sanitized_name(name):
return name.replace(' ', '').lower()
# champion-v1.2
def _champion_request(self, end_url, region, **kwargs):
return self.base_request(
'v{version}/champion/{end_url}'.format(
version=api_versions['champion'],
end_url=end_url
),
region,
**kwargs
)
def get_all_champions(self, region=None, free_to_play=False):
return self._champion_request('', region, freeToPlay=free_to_play)
def get_champion(self, champion_id, region=None):
return self._champion_request('{id}'.format(id=champion_id), region)
# current-game-v1.0
def get_current_game(self, summoner_id, platform_id=None, region=None):
if platform_id is None:
platform_id = platforms[self.default_region]
return self._observer_mode_request(
'consumer/getSpectatorGameInfo/{platform}/{summoner_id}'.format(
platform=platform_id,
summoner_id=summoner_id
),
region
)
# featured-game-v1.0
def get_featured_games(self, proxy=None):
return self._observer_mode_request('featured', proxy)
# game-v1.3
def _game_request(self, end_url, region, **kwargs):
return self.base_request(
'v{version}/game/{end_url}'.format(
version=api_versions['game'],
end_url=end_url
),
region,
**kwargs
)
def get_recent_games(self, summoner_id, region=None):
return self._game_request('by-summoner/{summoner_id}/recent'.format(summoner_id=summoner_id), region)
# league-v2.5
def _league_request(self, end_url, region, **kwargs):
return self.base_request(
'v{version}/league/{end_url}'.format(
version=api_versions['league'],
end_url=end_url
),
region,
**kwargs
)
def get_league(self, summoner_ids=None, team_ids=None, region=None):
"""summoner_ids and team_ids arguments must be iterable, only one should be specified, not both"""
if (summoner_ids is None) != (team_ids is None):
if summoner_ids is not None:
return self._league_request(
'by-summoner/{summoner_ids}'.format(summoner_ids=','.join([str(s) for s in summoner_ids])),
region
)
else:
return self._league_request(
'by-team/{team_ids}'.format(team_ids=','.join([str(t) for t in team_ids])),
region
)
def get_league_entry(self, summoner_ids=None, team_ids=None, region=None):
"""summoner_ids and team_ids arguments must be iterable, only one should be specified, not both"""
if (summoner_ids is None) != (team_ids is None):
if summoner_ids is not None:
return self._league_request(
'by-summoner/{summoner_ids}/entry'.format(
summoner_ids=','.join([str(s) for s in summoner_ids])
),
region
)
else:
return self._league_request(
'by-team/{team_ids}/entry'.format(team_ids=','.join([str(t) for t in team_ids])),
region
)
def get_challenger(self, region=None, queue=solo_queue):
return self._league_request('challenger', region, type=queue)
def get_master(self, region=None, queue=solo_queue):
return self._league_request('master', region, type=queue)
# lol-static-data-v1.2
def _static_request(self, end_url, region, **kwargs):
return self.base_request(
'v{version}/{end_url}'.format(
version=api_versions['lol-static-data'],
end_url=end_url
),
region,
static=True,
**kwargs
)
def static_get_champion_list(self, region=None, locale=None, version=None, data_by_id=None, champ_data=None):
return self._static_request(
'champion',
region,
locale=locale,
version=version,
dataById=data_by_id,
champData=champ_data
)
def static_get_champion(self, champ_id, region=None, locale=None, version=None, champ_data=None):
return self._static_request(
'champion/{id}'.format(id=champ_id),
region,
locale=locale,
version=version,
champData=champ_data
)
def static_get_item_list(self, region=None, locale=None, version=None, item_list_data=None):
return self._static_request('item', region, locale=locale, version=version, itemListData=item_list_data)
def static_get_item(self, item_id, region=None, locale=None, version=None, item_data=None):
return self._static_request(
'item/{id}'.format(id=item_id),
region,
locale=locale,
version=version,
itemData=item_data
)
def static_get_mastery_list(self, region=None, locale=None, version=None, mastery_list_data=None):
return self._static_request(
'mastery',
region,
locale=locale,
version=version,
masteryListData=mastery_list_data
)
def static_get_mastery(self, mastery_id, region=None, locale=None, version=None, mastery_data=None):
return self._static_request(
'mastery/{id}'.format(id=mastery_id),
region,
locale=locale,
version=version,
masteryData=mastery_data
)
def static_get_realm(self, region=None):
return self._static_request('realm', region)
def static_get_rune_list(self, region=None, locale=None, version=None, rune_list_data=None):
return self._static_request('rune', region, locale=locale, version=version, runeListData=rune_list_data)
def static_get_rune(self, rune_id, region=None, locale=None, version=None, rune_data=None):
return self._static_request(
'rune/{id}'.format(id=rune_id),
region,
locale=locale,
version=version,
runeData=rune_data
)
def static_get_summoner_spell_list(self, region=None, locale=None, version=None, data_by_id=None, spell_data=None):
return self._static_request(
'summoner-spell',
region,
locale=locale,
version=version,
dataById=data_by_id,
spellData=spell_data
)
def static_get_summoner_spell(self, spell_id, region=None, locale=None, version=None, spell_data=None):
return self._static_request(
'summoner-spell/{id}'.format(id=spell_id),
region,
locale=locale,
version=version,
spellData=spell_data
)
def static_get_versions(self, region=None):
return self._static_request('versions', region)
# match-v2.2
def _match_request(self, end_url, region, **kwargs):
return self.base_request(
'v{version}/match/{end_url}'.format(
version=api_versions['match'],
end_url=end_url
),
region,
**kwargs
)
def get_match(self, match_id, region=None, include_timeline=False):
return self._match_request(
'{match_id}'.format(match_id=match_id),
region,
includeTimeline=include_timeline
)
# lol-status-v1.0
@staticmethod
def get_server_status(region=None):
if region is None:
url = 'shards'
else:
url = 'shards/{region}'.format(region=region)
r = requests.get('http://status.leagueoflegends.com/{url}'.format(url=url))
raise_status(r)
return r.json()
# match history-v2.2
def _match_history_request(self, end_url, region, **kwargs):
return self.base_request(
'v{version}/matchhistory/{end_url}'.format(
version=api_versions['matchhistory'],
end_url=end_url
),
region,
**kwargs
)
def get_match_history(self, summoner_id, region=None, champion_ids=None, ranked_queues=None, begin_index=None,
end_index=None):
return self._match_history_request(
'{summoner_id}'.format(summoner_id=summoner_id),
region,
championIds=champion_ids,
rankedQueues=ranked_queues,
beginIndex=begin_index,
endIndex=end_index
)
# match list-v2.2
def _match_list_request(self, end_url, region, **kwargs):
return self.base_request(
'v{version}/matchlist/by-summoner/{end_url}'.format(
version=api_versions['matchlist'],
end_url=end_url,
),
region,
**kwargs
)
def get_match_list(self, summoner_id, region=None, champion_ids=None, ranked_queues=None, seasons=None,
begin_time=None, end_time=None, begin_index=None, end_index=None):
return self._match_list_request(
'{summoner_id}'.format(summoner_id=summoner_id),
region,
championsIds=champion_ids,
rankedQueues=ranked_queues,
seasons=seasons,
beginTime=begin_time,
endTime=end_time,
beginIndex=begin_index,
endIndex=end_index
)
# stats-v1.3
def _stats_request(self, end_url, region, **kwargs):
return self.base_request(
'v{version}/stats/{end_url}'.format(
version=api_versions['stats'],
end_url=end_url
),
region,
**kwargs
)
def get_stat_summary(self, summoner_id, region=None, season=None):
return self._stats_request(
'by-summoner/{summoner_id}/summary'.format(summoner_id=summoner_id),
region,
season='SEASON{}'.format(season) if season is not None else None)
def get_ranked_stats(self, summoner_id, region=None, season=None):
return self._stats_request(
'by-summoner/{summoner_id}/ranked'.format(summoner_id=summoner_id),
region,
season='SEASON{}'.format(season) if season is not None else None
)
# summoner-v1.4
def _summoner_request(self, end_url, region, **kwargs):
return self.base_request(
'v{version}/summoner/{end_url}'.format(
version=api_versions['summoner'],
end_url=end_url
),
region,
**kwargs
)
def get_mastery_pages(self, summoner_ids, region=None):
return self._summoner_request(
'{summoner_ids}/masteries'.format(summoner_ids=','.join([str(s) for s in summoner_ids])),
region
)
def get_rune_pages(self, summoner_ids, region=None):
return self._summoner_request(
'{summoner_ids}/runes'.format(summoner_ids=','.join([str(s) for s in summoner_ids])),
region
)
def get_summoners(self, names=None, ids=None, region=None):
if (names is None) != (ids is None):
return self._summoner_request(
'by-name/{summoner_names}'.format(
summoner_names=','.join([self.sanitized_name(n) for n in names])) if names is not None
else '{summoner_ids}'.format(summoner_ids=','.join([str(i) for i in ids])),
region
)
else:
return None
def get_summoner(self, name=None, _id=None, region=None):
if (name is None) != (_id is None):
if name is not None:
name = self.sanitized_name(name)
return self.get_summoners(names=[name, ], region=region)[name]
else:
return self.get_summoners(ids=[_id, ], region=region)[str(_id)]
return None
def get_summoner_name(self, summoner_ids, region=None):
return self._summoner_request(
'{summoner_ids}/name'.format(summoner_ids=','.join([str(s) for s in summoner_ids])),
region
)
# team-v2.4
def _team_request(self, end_url, region, **kwargs):
return self.base_request(
'v{version}/team/{end_url}'.format(
version=api_versions['team'],
end_url=end_url
),
region,
**kwargs
)
def get_teams_for_summoner(self, summoner_id, region=None):
return self.get_teams_for_summoners([summoner_id, ], region=region)[str(summoner_id)]
def get_teams_for_summoners(self, summoner_ids, region=None):
return self._team_request(
'by-summoner/{summoner_id}'.format(summoner_id=','.join([str(s) for s in summoner_ids])),
region
)
def get_team(self, team_id, region=None):
return self.get_teams([team_id, ], region=region)[str(team_id)]
def get_teams(self, team_ids, region=None):
return self._team_request('{team_ids}'.format(team_ids=','.join(str(t) for t in team_ids)), region)
|
dianegalloiswong/LoL-stats
|
riotwatcher.py
|
Python
|
mit
| 23,417
|
[
"CRYSTAL"
] |
9b16e9773ca55fb70b2b08c4f5203ed05dc3eef7d3c8d409daa0501076064e24
|
# take the guardian articles and generate a csv
# import guardian articles
import os
import json
import csv
import re
from elasticsearch import Elasticsearch
from elasticsearch_dsl.connections import connections
connections.create_connection(hosts=['http://controcurator.org:80/ess'])
es = Elasticsearch(
['http://controcurator.org/ess/'],
port=80)
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
articles = ['https://www.theguardian.com/commentisfree/2017/apr/11/working-class-public-spaces-musee-d-orsay',
'https://www.theguardian.com/football/2017/apr/11/juventus-barcelona-champions-league-quarter-final-match-report',
'https://www.theguardian.com/world/2017/apr/11/us-defense-syria-chemical-weapons-attacks-assad-regime',
'https://www.theguardian.com/society/2017/apr/11/parents-fighting-to-keep-baby-charlie-gard-life-support-lose-high-court-battle',
'https://www.theguardian.com/football/2017/apr/11/borussia-dortmund-explosion-team-bus',
'https://www.theguardian.com/education/2017/apr/12/new-free-schools-despite-secondary-staff-cuts',
'https://www.theguardian.com/politics/2017/mar/21/martin-mcguinness-northern-ireland-former-deputy-first-minister-dies',
'https://www.theguardian.com/politics/2017/apr/12/foreign-states-may-have-interfered-in-brexit-vote-report-says',
'https://www.theguardian.com/us-news/2017/apr/11/homeland-security-searches-electronics-border',
'https://www.theguardian.com/environment/2017/mar/22/princess-anne-backs-gm-crops-livestock-unlike-prince-charles',
'https://www.theguardian.com/music/2017/apr/11/palestine-music-expo-pmx-musicians-shaking-up-the-occupied-territories',
'https://www.theguardian.com/world/2017/apr/11/g7-rejects-uk-call-for-sanctions-against-russia-and-syria',
'https://www.theguardian.com/commentisfree/2017/apr/11/frontline-brexit-culture-wars-ask-comedian-al-murray',
'https://www.theguardian.com/news/2017/apr/11/painting-a-new-picture-of-the-little-ice-age-weatherwatch',
'https://www.theguardian.com/us-news/2017/apr/11/detroit-michigan-500-dollar-house-rust-belt-america',
'https://www.theguardian.com/global-development/2017/apr/11/worrying-trend-as-aid-money-stays-in-wealthiest-countries',
'https://www.theguardian.com/society/2017/apr/11/recorded-childhood-cancers-rise-worldwide-world-health-organization',
'https://www.theguardian.com/commentisfree/2016/dec/08/modern-day-hermits-share-experiences',
'https://www.theguardian.com/football/2017/mar/22/ronnie-moran-liverpool-dies',
'https://www.theguardian.com/lifeandstyle/2017/apr/11/vision-thing-how-babies-colour-in-the-world',
'https://www.theguardian.com/world/2017/apr/11/nurses-grant-dying-man-final-wish-cigarette-glass-wine',
'https://www.theguardian.com/business/2017/apr/11/labour-declare-war-late-payers-marks-spencer-jeremy-corbyn',
'https://www.theguardian.com/science/2017/apr/12/scientists-unravel-mystery-of-the-loose-shoelace',
'https://www.theguardian.com/us-news/2017/apr/11/united-airlines-shares-plummet-passenger-removal-controversy',
'https://www.theguardian.com/business/2017/apr/11/judges-reject-us-bankers-claim-to-be-randy-work-genius-in-divorce-case',
'https://www.theguardian.com/business/2017/apr/12/tesco-profits-1bn-growth-supermarket',
'https://www.theguardian.com/money/2017/apr/11/probate-fees-plan-is-daft-as-well-as-devious',
'https://www.theguardian.com/commentisfree/2017/apr/11/donald-trump-russia-rex-tillersons-visit-syria',
'https://www.theguardian.com/environment/2017/apr/12/uk-butterflies-worst-hit-in-2016-with-70-of-species-in-decline-study-finds',
'https://www.theguardian.com/business/2017/apr/11/developing-countries-demands-for-better-life-must-be-met-says-world-bank-head',
'https://www.theguardian.com/politics/2017/apr/12/devon-and-cornwall-pcc-expenses-inquiry-prosecutors',
'https://www.theguardian.com/politics/shortcuts/2017/apr/11/deep-england-brexit-britain',
'https://www.theguardian.com/society/2017/apr/11/uk-supreme-court-denies-tobacco-firms-permission-for-plain-packaging-appeal',
'https://www.theguardian.com/society/2017/mar/21/dawn-butler-stood-up-for-deaf-people-but-we-need-more-than-gestures',
'https://www.theguardian.com/technology/2017/apr/11/gordon-ramsay-father-in-law-admits-hacking-company-computers',
'https://www.theguardian.com/tv-and-radio/2017/mar/20/richard-hammond-injured-in-grand-tour-crash-in-mozambique',
'https://www.theguardian.com/us-news/2017/apr/11/sean-spicer-hitler-chemical-weapons-holocaust-assad',
'https://www.theguardian.com/science/2017/mar/22/face-medieval-cambridge-man-emerges-700-years-after-death',
'https://www.theguardian.com/society/2017/mar/22/new-alzheimers-test-can-predict-age-when-disease-will-appear',
'https://www.theguardian.com/world/2017/apr/11/national-archives-mi5-file-new-zealand-diplomat-paddy-costello-kgb-spy',
'https://www.theguardian.com/australia-news/2017/mar/22/british-war-veteran-granted-permanent-residency-in-australia-ending-visa-drama',
'https://www.theguardian.com/books/2017/apr/11/x-men-illustrator-alleged-anti-christian-messages-marvel-ardian-syaf',
'https://www.theguardian.com/business/2017/apr/12/burger-king-ok-google-commercial',
'https://www.theguardian.com/business/2017/apr/12/edf-customers-price-rise-electricity-gas-energy',
'https://www.theguardian.com/business/2017/apr/12/ship-oil-rig-pioneer-spirit-shell-north-sea-decommissioning',
'https://www.theguardian.com/business/2017/mar/22/asian-shares-drop-investors-fear-trump-wont-deliver-promises',
'https://www.theguardian.com/football/2017/apr/11/tony-adams-vows-to-give-granada-players-a-kick-up-the-arse',
'https://www.theguardian.com/football/2017/mar/22/football-transfer-rumours-jermain-defoe-back-to-west-ham',
'https://www.theguardian.com/global-development/2017/apr/11/india-acts-to-help-acid-attack-victims',
'https://www.theguardian.com/money/2017/apr/11/student-loan-interest-rate-rise-uk-inflation-brexit',
'https://www.theguardian.com/uk-news/2017/mar/17/coroner-warns-of-dangers-after-man-electrocuted-in-bath-while-charging-phone',
'https://www.theguardian.com/business/2017/mar/22/london-taxi-company-coventry-electric-cabs-jobs-brexit',
'https://www.theguardian.com/commentisfree/2016/dec/14/experiences-accessing-mental-health-services-uk',
'https://www.theguardian.com/commentisfree/2017/apr/11/france-left-europe-jean-luc-melenchon-presidential-election',
'https://www.theguardian.com/commentisfree/2017/apr/11/sean-spicers-hitler-holocaust-speak-volumes',
'https://www.theguardian.com/commentisfree/2017/apr/11/united-airlines-flying-while-asian-fear',
'https://www.theguardian.com/environment/2017/mar/22/country-diary-long-mynd-shropshire-light-spout-waterfall',
'https://www.theguardian.com/football/2017/apr/11/borussia-dortmund-shock-team-bus-explosions',
'https://www.theguardian.com/football/2017/mar/17/stewart-downing-middlesbrough-karanka-row-agnew',
'https://www.theguardian.com/football/2017/mar/22/which-football-manager-has-been-sacked-by-one-club-the-most-times',
'https://www.theguardian.com/music/2017/mar/16/ed-sheeran-headline-sunday-night-glastonbury-2017',
'https://www.theguardian.com/sport/2017/apr/11/pennsylvania-woman-jail-threats-youth-football-league-officials',
'https://www.theguardian.com/sport/blog/2017/mar/22/talking-horses-best-wednesday-bets-for-warwick-and-newcastle',
'https://www.theguardian.com/technology/2017/mar/17/youtube-and-google-search-for-answers',
'https://www.theguardian.com/tv-and-radio/2017/mar/19/neighbours-tv-soap-could-disappear-from-british-screens',
'https://www.theguardian.com/uk-news/2017/apr/11/boris-johnson-full-support-failure-secure-sanctions-syria-russia',
'https://www.theguardian.com/world/2017/mar/22/brussels-unveil-terror-victims-memorial-one-year-after-attacks',
'https://www.theguardian.com/world/2017/mar/22/north-korea-missile-test-failure',
'https://www.theguardian.com/business/2017/mar/16/bank-of-england-uk-interest-rates-monetary-policy-committee',
'https://www.theguardian.com/business/2017/mar/21/inflation-uk-wages-lag-behind-prices-mark-carney',
'https://www.theguardian.com/business/2017/mar/22/nervous-markets-take-fright-at-prospect-of-trump-failing-to-deliver',
'https://www.theguardian.com/commentisfree/2016/dec/21/i-lost-my-mum-seven-weeks-ago-our-readers-on-coping-with-grief-at-christmas',
'https://www.theguardian.com/commentisfree/2017/jan/06/brexit-vote-have-you-applied-for-a-second-passport',
'https://www.theguardian.com/fashion/2017/mar/22/fiorucci-why-the-disco-friendly-label-is-perfect-for-2017',
'https://www.theguardian.com/film/2017/mar/17/from-the-corner-of-the-oval-obama-white-house-movie',
'https://www.theguardian.com/film/2017/mar/22/film-franchises-terminator-sequel-arnold-schwarzenegger-die-hard-alien',
'https://www.theguardian.com/law/2017/apr/12/judge-sacked-over-online-posts-calling-his-critics-donkeys',
'https://www.theguardian.com/lifeandstyle/2017/mar/17/monopoly-board-game-new-tokens-vote',
'https://www.theguardian.com/music/2017/mar/16/stormzy-condemns-nme-for-using-him-as-poster-boy-for-depression',
'https://www.theguardian.com/music/2017/mar/21/los-angeles-police-mistake-wyclef-jean-suspect-assault-case',
'https://www.theguardian.com/politics/2017/mar/22/uk-based-airlines-told-to-move-to-europe-after-brexit-or-lose-major-routes',
'https://www.theguardian.com/society/2017/apr/11/national-social-care-service-centralised-nhs',
'https://www.theguardian.com/sport/2017/mar/17/wales-france-six-nations-world-rankings',
'https://www.theguardian.com/tv-and-radio/2017/mar/22/n-word-taboo-tv-carmichael-show-atlanta-insecure-language',
'https://www.theguardian.com/uk-news/2017/mar/16/man-dies-explosion-former-petrol-station-highgate-north-london-swains-lane',
'https://www.theguardian.com/us-news/2017/mar/17/national-weather-service-forecasting-temperatures-storms',
'https://www.theguardian.com/us-news/2017/mar/22/fbi-muslim-employees-discrimination-religion-middle-east-travel',
'https://www.theguardian.com/us-news/2017/mar/22/zapier-pay-employees-move-silicon-valley-startup',
'https://www.theguardian.com/world/2017/mar/17/fleeing-from-dantes-hell-on-mount-etna',
'https://www.theguardian.com/world/2017/mar/22/gay-clergyman-jeffrey-johns-turned-down-welsh-bishop-twice-before-claims',
'https://www.theguardian.com/world/2017/mar/23/apple-paid-no-tax-in-new-zealand-for-at-least-a-decade-reports-say',
'https://www.theguardian.com/books/2017/mar/22/comics-chavez-redline-transformers-v-gi-joe',
'https://www.theguardian.com/business/2017/apr/11/uk-inflation-rate-stays-three-year-high',
'https://www.theguardian.com/commentisfree/2017/apr/12/charlie-gard-legal-aid',
'https://www.theguardian.com/commentisfree/2017/mar/22/rights-gig-economy-self-employed-worker',
'https://www.theguardian.com/media/2017/mar/14/face-off-mps-and-social-media-giants-online-hate-speech-facebook-twitter',
'https://www.theguardian.com/music/2017/apr/11/michael-buble-wife-says-son-noah-is-recovering-from-cancer',
'https://www.theguardian.com/society/2017/apr/11/bullying-and-violence-grip-out-of-control-guys-marsh-jail-dorset',
'https://www.theguardian.com/stage/2017/mar/22/trisha-brown-obituary',
'https://www.theguardian.com/travel/2017/mar/22/10-best-clubs-in-amsterdam-chosen-by-dj-experts',
'https://www.theguardian.com/us-news/2017/apr/11/us-universal-healthcare-single-payer-rallies',
'https://www.theguardian.com/us-news/2017/mar/22/us-border-agent-sexually-assaults-teenage-sisters-texas',
'https://www.theguardian.com/world/2017/apr/11/hundreds-of-refugees-missing-after-dunkirk-camp-fire',
'https://www.theguardian.com/world/2017/mar/22/unicef-condemns-sale-cambodian-breast-milk-us-mothers-firm-ambrosia-labs',
'https://www.theguardian.com/world/commentisfree/2017/mar/17/week-in-patriarchy-bbc-dad-jessica-valenti',
'https://www.theguardian.com/business/2017/mar/15/us-federal-reserve-raises-interest-rates-to-1',
'https://www.theguardian.com/business/2017/mar/21/london-cycle-courier-was-punished-for-refusing-work-after-eight-hours-in-cold',
'https://www.theguardian.com/football/2017/mar/17/tottenham-harry-kane-return-injury',
'https://www.theguardian.com/politics/2017/mar/15/browse-of-commons-explore-uk-parliament-with-first-virtual-tour',
'https://www.theguardian.com/politics/2017/mar/21/martin-mcguinness-sinn-fein-members-carry-coffin-home-in-derry',
'https://www.theguardian.com/sport/2017/mar/18/ireland-england-six-nations-dublin',
'https://www.theguardian.com/us-news/2017/mar/20/ivanka-trump-west-wing-office-security-clearance',
'https://www.theguardian.com/film/2017/mar/21/look-on-the-sweet-side-of-love-actually',
'https://www.theguardian.com/media/2017/mar/20/jamie-oliver-new-show-deal-channel-4-tv',
'https://www.theguardian.com/politics/2017/mar/16/theresa-may-vows-absolute-faith-in-hammond-after-u-turn',
'https://www.theguardian.com/politics/2017/mar/21/nicola-sturgeon-accused-of-hypocrisy-as-independence-debate-begins',
'https://www.theguardian.com/sport/2017/mar/17/jailed-transgender-fell-runner-thought-uk-athletics-was-trying-to-kill-her',
'https://www.theguardian.com/uk-news/2017/mar/16/former-marine-cleared-alexander-blackman-freed-immediately-ex-soldier-jail',
'https://www.theguardian.com/world/2017/mar/16/india-brexit-and-the-legacy-of-empire-in-africa',
'https://www.theguardian.com/world/2017/mar/18/a-good-looking-bird-the-bush-stone-curlew-that-loves-its-own-reflection',
'https://www.theguardian.com/world/2017/mar/21/electronics-ban-middle-east-flights-safety-hazards-airline-profit',
'https://www.theguardian.com/business/2017/mar/14/us-federal-reserve-interest-rates-janet-yellen-donald-trump',
'https://www.theguardian.com/business/2017/mar/16/rupert-murdoch-sky-bid-uk-ofcom',
'https://www.theguardian.com/business/2017/mar/20/us-forbids-devices-larger-cell-phones-flights-13-countries',
'https://www.theguardian.com/business/2017/mar/22/uk-ceos-national-living-wage-equality-trust-pay-gap',
'https://www.theguardian.com/football/2017/mar/17/arsene-wenger-granit-xhaka-referees',
'https://www.theguardian.com/lifeandstyle/2017/mar/17/chorizo-chicken-lemon-yoghurt-cavolo-nero-recipe-anna-hansen',
'https://www.theguardian.com/politics/2017/mar/17/george-osborne-london-evening-standard-editor-appointment-evgeny-lebedev',
'https://www.theguardian.com/uk-news/2017/mar/16/scotland-cannot-afford-to-ignore-its-deficit',
'https://www.theguardian.com/uk-news/2017/mar/17/prince-william-visits-paris-for-the-first-time-since-mother-dianas-death',
'https://www.theguardian.com/us-news/2017/mar/16/oc-actor-mischa-barton-speaks-out-sex-tapes-scandal',
'https://www.theguardian.com/world/2017/mar/15/uk-government-child-slavery-products-sold-britain-innovation-fund',
'https://www.theguardian.com/commentisfree/2017/mar/17/the-guardian-view-on-brexit-and-publishing-a-hardcore-problem',
'https://www.theguardian.com/politics/2017/mar/21/osborne-becomes-the-remainers-great-hope',
'https://www.theguardian.com/society/2017/mar/16/scotlands-exam-body-to-ensure-invigilators-get-living-wage',
'https://www.theguardian.com/society/2017/mar/18/rural-deprivation-and-ill-health-in-england-in-danger-of-being-overlooked',
'https://www.theguardian.com/sport/2017/mar/16/michael-oleary-team-not-ruling-out-return-mullins-yard-cheltenham-festival-horse-racing',
'https://www.theguardian.com/sport/2017/mar/17/ireland-v-england-lions-six-nations-rugby-union',
'https://www.theguardian.com/sport/2017/mar/18/this-is-your-night-conlans-dream-debut-wipes-out-nightmares-of-the-past',
'https://www.theguardian.com/sport/2017/mar/21/bha-dope-tests-horses-racecourse',
'https://www.theguardian.com/sport/2017/mar/21/donald-trump-colin-kaepernick-free-agent-anthem-protest',
'https://www.theguardian.com/uk-news/2017/mar/16/protect-survive-nuclear-war-republished-pamphlet',
'https://www.theguardian.com/uk-news/2017/mar/21/sisters-al-najjar-sue-cumberland-hotel-london-brutal-hammer-attack',
'https://www.theguardian.com/uk-news/2017/mar/22/what-support-does-your-employer-give-to-fathers',
'https://www.theguardian.com/artanddesign/2017/mar/21/winged-bull-and-giant-dollop-of-cream-to-adorn-trafalgar-squares-fourth-plinth',
'https://www.theguardian.com/books/2017/mar/17/the-bone-readers-jacob-ross-caribbean-thriller-jhalak-prize',
'https://www.theguardian.com/business/2017/mar/11/democrats-question-trump-conflict-of-interest-deutsche-bank-investigation-money-laundering',
'https://www.theguardian.com/business/2017/mar/17/barclays-bob-diamond-panmure-gordon',
'https://www.theguardian.com/commentisfree/2017/mar/15/brexit-was-an-english-vote-for-independence-you-cant-begrudge-the-scots-the-same',
'https://www.theguardian.com/environment/2017/mar/21/the-snow-buntings-drift-takes-them-much-further-than-somerset',
'https://www.theguardian.com/fashion/2017/mar/21/art-colour-victoria-beckham-van-gogh-fashion',
'https://www.theguardian.com/lifeandstyle/2017/mar/17/i-am-26-and-find-it-hard-to-meet-people-on-the-same-wavelength-as-me',
'https://www.theguardian.com/lifeandstyle/shortcuts/2017/mar/21/open-a-window-and-have-a-cold-shower-could-being-chilly-improve-your-health',
'https://www.theguardian.com/society/2017/mar/22/four-supersized-prisons-to-be-built-england-and-wales-elizabeth-truss-plan',
'https://www.theguardian.com/sport/2017/mar/17/ben-youngs-england-ireland-grand-slam-six-nations',
'https://www.theguardian.com/technology/2017/mar/17/google-ads-bike-helmets-adverts',
'https://www.theguardian.com/us-news/2017/mar/20/fbi-director-comey-confirms-investigation-trump-russia',
'https://www.theguardian.com/world/2017/mar/17/time-for-a-declaration-of-war-on-happiness']
fieldnames = ['id','url','title','paragraphCount','text','commentCount','comments','type','section','published']
filename = 'guardian'
# open file to write to
w = open(filename+'.csv', 'wb')
wr = csv.writer(w, delimiter=',',quotechar='"', quoting=csv.QUOTE_NONNUMERIC)
wr.writerow(fieldnames)
# keep track of how many articles are used
count = 0
# keep list of seen articles
# go through each file
for file in articles:
query = {
"query": {
"constant_score": {
"filter": {
"term": {
"url": file
}
}
}
},
"from": 0,
"size": 1
}
response = es.search(index="controcurator", doc_type="article", body=query)
if len(response['hits']['hits']) == 0:
print "-- ARTICLE NOT FOUND --"
continue
print file
article = response['hits']['hits'][0]['_source']
if 'comments' not in article:
print "-- NO COMMENTS --"
continue
socmed = [c['text'] for c in article['comments'] if 'type' in c and len(c['text']) < 300 and not c['text'].startswith('This comment was removed')][0:5]
comments = [c['text'] for c in article['comments'] if 'type' not in c and len(c['text']) < 300 and not c['text'].startswith('This comment was removed')][0:5]
print "SOCMED:",len(socmed)
print "COMMENTS:",len(comments)
if len(socmed) < 5:
print 'TOO FEW SOCMED:',len(socmed)
continue
commentCount = len(socmed)
paragraphs = article['document']['text'].split('</p>')
text = '</p>'.join(paragraphs[:2]) + '</p>'
# array for one row on the csv
row = [
response['hits']['hits'][0]['_id'],
article['url'],
article['document']['title'],
0,
text.encode('UTF-8'),
commentCount,
'||'.join(socmed).encode('UTF-8'),
article['type'],
0,
article['published']
]
#print 'SAVED:',title
wr.writerow(row)
count += 1
w.close()
print count
|
ControCurator/controcurator
|
cronjobs/generateInput.py
|
Python
|
mit
| 19,551
|
[
"VisIt"
] |
d7de047f9f5fedaacc8ff73130db4be700af98795f1cdf1411b9e51a2a8be3df
|
import nest
import nest.voltage_trace
import nest.raster_plot
import pylab as pl
nest.ResetKernel()
nest.SetKernelStatus({'local_num_threads': 4,
'resolution': 0.01})
lol = {'C_m': 1.5}
nest.SetDefaults("hh_psc_alpha", lol)
neuron = nest.Create("hh_psc_alpha")
noise = nest.Create("poisson_generator")
nest.SetStatus(noise, {'start': 10., 'stop': 40., 'rate': 1000.})
mm = nest.Create("multimeter")
det = nest.Create("spike_detector")
nest.SetStatus(mm, {"withgid": True, "withtime": True, 'record_from': ['V_m', 'Act_m', 'Inact_n', 'Act_h'], 'interval' :0.1})
nest.Connect(noise, neuron, syn_spec={'weight': 90.0})
nest.Connect(mm, neuron)
nest.Connect(neuron, det)
# m = Na
# n = K
# Conductance in nS / cm^2
g_Na = nest.GetDefaults("hh_psc_alpha")['g_Na']
E_Na = nest.GetDefaults("hh_psc_alpha")['E_Na']
g_K = nest.GetDefaults("hh_psc_alpha")['g_K']
E_K = nest.GetDefaults("hh_psc_alpha")['E_K']
nest.Simulate(70.)
events = nest.GetStatus(mm)[0]['events']
t = events['times']
pl.subplot(221)
nest.voltage_trace.from_device(mm)
pl.plot(t, events['V_m'], 'b')
pl.plot(nest.GetStatus(det)[0]['events']['times'], nest.GetStatus(det)[0]['events']['senders'], marker='.', color='r')
pl.subplot(222)
pl.plot(t, events['Act_m'], 'r', t, events['Inact_n'], 'g' )
#pl.plot(t, [ event * g_Na for event in events['Act_m'] ], t, [ event * g_K for event in events['Inact_n'] ])
#pl.plot(t, [ -event for event in events['Act_m'] ], t, [ event - 0.3 for event in events['Inact_n'] ])
pl.legend( ('Na', 'K') )
pl.title("Ion channels")
pl.ylabel("Channel activation")
pl.xlabel("Time (ms)")
I_Na_list = []
I_K_list = []
# Chloride
# I_L = g_L * (V_m - E_L)
# http://humanphysiology.tuars.com/program/section1/1ch4/s1ch4_49.htm
for i in range( len(events['V_m']) ):
m = events['Act_m'][i]
h = events['Act_h'][i]
n = events['Inact_n'][i]
V_m = events['V_m'][i]
I_Na_list.append( m**3 * h * g_Na * (V_m - E_Na) )
I_K_list.append( n**4 * g_K * (V_m - E_K) )
print 'm={} | h={} | n={} | V_m={} | I_Na={} | I_K={}'.format(m, h, n, V_m, I_Na_list[i], I_K_list[i])
pl.subplot(223)
pl.plot(t, I_Na_list, 'r')
pl.legend( ('Na', 'K') )
pl.title("Ion channels")
pl.ylabel("nA ")
pl.xlabel("Time (ms)")
pl.subplot(224)
pl.plot(t, I_K_list, 'g', t, I_Na_list, 'r')
pl.legend( ('Na', 'K') )
pl.title("Ion channels")
pl.ylabel("nA ")
pl.xlabel("Time (ms)")
pl.show()
pl.close()
|
research-team/NEUCOGAR
|
NEST/cube/dopamine/integrated/scripts/test.py
|
Python
|
gpl-2.0
| 2,416
|
[
"NEURON"
] |
7fcc57a0862f550ee3beaa9e9409f3fc56796cf9477c2b77b855055ec538d0ce
|
# Copyright (c) Amber Brown, 2015
# See LICENSE for details.
import os
from textwrap import dedent
from twisted.trial.unittest import TestCase
import mock
from click.testing import CliRunner
from ..create import _main
def setup_simple_project(config=None, mkdir=True):
if not config:
config = dedent(
"""\
[tool.towncrier]
package = "foo"
"""
)
with open("pyproject.toml", "w") as f:
f.write(config)
os.mkdir("foo")
with open("foo/__init__.py", "w") as f:
f.write('__version__ = "1.2.3"\n')
if mkdir:
os.mkdir("foo/newsfragments")
class TestCli(TestCase):
maxDiff = None
def _test_success(
self, content=None, config=None, mkdir=True, additional_args=None
):
runner = CliRunner()
with runner.isolated_filesystem():
setup_simple_project(config, mkdir)
args = ["123.feature.rst"]
if content is None:
content = ["Add your info here"]
if additional_args is not None:
args.extend(additional_args)
result = runner.invoke(_main, args)
self.assertEqual(["123.feature.rst"], os.listdir("foo/newsfragments"))
with open("foo/newsfragments/123.feature.rst") as fh:
self.assertEqual(content, fh.readlines())
self.assertEqual(0, result.exit_code)
def test_basics(self):
"""Ensure file created where output directory already exists."""
self._test_success(mkdir=True)
def test_directory_created(self):
"""Ensure both file and output directory created if necessary."""
self._test_success(mkdir=False)
def test_edit_without_comments(self):
"""Create file with dynamic content."""
content = ["This is line 1\n", "This is line 2"]
with mock.patch("click.edit") as mock_edit:
mock_edit.return_value = "".join(content)
self._test_success(content=content, additional_args=["--edit"])
def test_edit_with_comment(self):
"""Create file editly with ignored line."""
content = ["This is line 1\n", "This is line 2"]
comment = "# I am ignored\n"
with mock.patch("click.edit") as mock_edit:
mock_edit.return_value = "".join(content[:1] + [comment] + content[1:])
self._test_success(content=content, additional_args=["--edit"])
def test_edit_abort(self):
"""Create file editly and abort."""
with mock.patch("click.edit") as mock_edit:
mock_edit.return_value = None
runner = CliRunner()
with runner.isolated_filesystem():
setup_simple_project(config=None, mkdir=True)
result = runner.invoke(_main, ["123.feature.rst", "--edit"])
self.assertEqual([], os.listdir("foo/newsfragments"))
self.assertEqual(1, result.exit_code)
def test_different_directory(self):
"""Ensure non-standard directories are used."""
runner = CliRunner()
config = dedent(
"""\
[tool.towncrier]
directory = "releasenotes"
"""
)
with runner.isolated_filesystem():
setup_simple_project(config, mkdir=False)
os.mkdir("releasenotes")
result = runner.invoke(_main, ["123.feature.rst"])
self.assertEqual(["123.feature.rst"], os.listdir("releasenotes"))
self.assertEqual(0, result.exit_code)
def test_invalid_section(self):
"""Ensure creating a path without a valid section is rejected."""
runner = CliRunner()
with runner.isolated_filesystem():
setup_simple_project()
self.assertEqual([], os.listdir("foo/newsfragments"))
result = runner.invoke(_main, ["123.foobar.rst"])
self.assertEqual([], os.listdir("foo/newsfragments"))
self.assertEqual(type(result.exception), SystemExit, result.exception)
self.assertIn(
"Expected filename '123.foobar.rst' to be of format", result.output
)
def test_file_exists(self):
"""Ensure we don't overwrite existing files."""
runner = CliRunner()
with runner.isolated_filesystem():
setup_simple_project()
self.assertEqual([], os.listdir("foo/newsfragments"))
runner.invoke(_main, ["123.feature.rst"])
result = runner.invoke(_main, ["123.feature.rst"])
self.assertEqual(type(result.exception), SystemExit)
self.assertIn("123.feature.rst already exists", result.output)
|
hawkowl/towncrier
|
src/towncrier/test/test_create.py
|
Python
|
mit
| 4,684
|
[
"Amber"
] |
cd820f7b90b73e60c151fb37b5d8020c2aee533ff0f04b14cc6e8672fa6cfa41
|
"""
vtkDrawing
Convenience methods for creating simple vtk objects
that can be used in renderers.
Call one of the methods with some custom parameters
and out comes a vtkActor that can be given to a
renderer with AddViewProp().
:Authors:
Berend Klein Haneveld
"""
from vtk import vtkActor
from vtk import vtkVectorText
from vtk import vtkLineSource
from vtk import vtkSphereSource
from vtk import vtkRegularPolygonSource
from vtk import vtkDataSetMapper
from vtk import vtkPolyDataMapper
from vtk import vtkFollower
from vtk import vtkAssembly
from vtk import vtkMatrix4x4
from vtk import vtkTransform
from vtk import vtkOutlineSource
from vtk import vtkConeSource
from vtk import vtkParametricTorus
from vtk import vtkParametricFunctionSource
from vtk import vtkTubeFilter
from vtk import vtkAppendPolyData
from vtk import vtkCubeSource
from vtk import vtkTransformFilter
import math
from core.operations import Add
from core.operations import Subtract
from core.operations import Multiply
def TransformWithMatrix(matrix):
"""
Return matrix with a copy of the given matrix.
"""
matrixCopy = vtkMatrix4x4()
matrixCopy.DeepCopy(matrix)
transform = vtkTransform()
transform.SetMatrix(matrixCopy)
return transform
def ColorActor(actor, color, opacity=None):
"""
Give the actor a custom color and / or opacity.
"""
if color:
actor.GetProperty().SetColor(color[0], color[1], color[2])
if opacity:
actor.GetProperty().SetOpacity(opacity)
def CreateLine(p1, p2, color=None):
"""
Creates a line between p1 and p2.
"""
lineSource = vtkLineSource()
lineSource.SetPoint1(p1[0], p1[1], p1[2])
lineSource.SetPoint2(p2[0], p2[1], p2[2])
lineMapper = vtkDataSetMapper()
lineMapper.SetInputConnection(lineSource.GetOutputPort())
lineActor = vtkActor()
lineActor.SetMapper(lineMapper)
# Give the actor a custom color
ColorActor(lineActor, color)
return lineActor
def CreateLineBeginAndEnd(p1, p2, length, color=None):
"""
Length is value between 0 and 0.5 to specify how long
each begin and end part is compared to the complete line.
:rtype: list of line actors
"""
point1 = p1
point2 = Add(p1, Multiply(Subtract(p2, p1), length))
point3 = p2
point4 = Add(p2, Multiply(Subtract(p1, p2), length))
line1 = CreateLine(point1, point2, color)
line2 = CreateLine(point3, point4, color)
return [line1, line2]
def CreateSphere(radius, color=None):
sphereSource = vtkSphereSource()
sphereSource.SetRadius(radius)
sphereSource.SetThetaResolution(18)
sphereSource.SetPhiResolution(18)
sphereMapper = vtkPolyDataMapper()
sphereMapper.SetInputConnection(sphereSource.GetOutputPort())
sphereActor = vtkActor()
sphereActor.PickableOff()
sphereActor.SetMapper(sphereMapper)
# Give the actor a custom color
ColorActor(sphereActor, color)
# Also give the sphere object the convenience methods of
# SetCenter() and GetCenter() that misses from the vtkActor
# class but is present in the vtkSphereSource class
def setCenter(x, y, z):
sphereSource.SetCenter(x, y, z)
def getCenter():
return sphereSource.GetCenter()
setattr(sphereActor, "SetCenter", setCenter)
setattr(sphereActor, "GetCenter", getCenter)
return sphereActor
def CreateTextItem(text, scale, camera, color=None):
textSource = vtkVectorText()
textSource.SetText(text)
textMapper = vtkPolyDataMapper()
textMapper.SetInputConnection(textSource.GetOutputPort())
textFollower = vtkFollower()
textFollower.SetMapper(textMapper)
textFollower.SetCamera(camera)
textFollower.SetScale(scale)
# Give the actor a custom color
ColorActor(textFollower, color)
return textFollower
def CreateCircle(radius):
circleSource = vtkRegularPolygonSource()
circleSource.SetNumberOfSides(32)
circleSource.SetRadius(radius)
circleSource.SetGeneratePolygon(False)
circleMapper = vtkPolyDataMapper()
circleMapper.SetInputConnection(circleSource.GetOutputPort())
circle = vtkActor()
circle.PickableOff()
circle.SetMapper(circleMapper)
circle.GetProperty().SetColor(1.0, 0.5, 0.5)
return circle
def CreateSquare(width, color=None, zOffset=0):
halfWidth = width / 2.0
squareSource = vtkOutlineSource()
squareSource.GenerateFacesOff()
squareSource.SetBounds(-halfWidth, halfWidth, -halfWidth, halfWidth, zOffset, zOffset)
squareMapper = vtkPolyDataMapper()
squareMapper.SetInputConnection(squareSource.GetOutputPort())
square = vtkActor()
square.PickableOff()
square.SetMapper(squareMapper)
square.GetProperty().SetColor(1.0, 0.5, 0.5)
ColorActor(square, color)
return square
def CreateTorus(point1, point2, axe):
"""
Creates a torus that has point1 as center point2 defines
a point on the torus.
"""
direction = map(lambda x, y: x - y, point2, point1)
length = math.sqrt(sum(map(lambda x: x ** 2, direction)))
torus = vtkParametricTorus()
torus.SetRingRadius(length / 1.5)
torus.SetCrossSectionRadius(length / 30.0)
torusSource = vtkParametricFunctionSource()
torusSource.SetParametricFunction(torus)
torusSource.SetScalarModeToPhase()
torusSource.Update()
transform = vtkTransform()
if axe == 0:
transform.RotateY(90)
elif axe == 1:
transform.RotateX(90)
transformFilter = vtkTransformFilter()
transformFilter.SetInputConnection(torusSource.GetOutputPort())
transformFilter.SetTransform(transform)
transformFilter.Update()
torusMapper = vtkPolyDataMapper()
torusMapper.SetInputConnection(transformFilter.GetOutputPort())
torusActor = vtkActor()
torusActor.SetMapper(torusMapper)
return torusActor, transformFilter.GetOutput()
def CreateBoxOnStick(point1, point2, tipRatio=0.3):
"""
Creates an stick with a box as tip from point1 to point2.
Use tipRatio for setting the ratio for tip of the arrow.
"""
direction = map(lambda x, y: x - y, point2, point1)
length = math.sqrt(sum(map(lambda x: x ** 2, direction)))
unitDir = map(lambda x: x / length, direction)
shaftDir = map(lambda x: x * (1.0 - tipRatio), unitDir)
tipPos = map(lambda x: x * (1.0 - (tipRatio * 0.5)), unitDir)
lineSource = vtkLineSource()
lineSource.SetPoint1(0, 0, 0)
lineSource.SetPoint2(shaftDir)
tubeFilter = vtkTubeFilter()
tubeFilter.SetInputConnection(lineSource.GetOutputPort())
tubeFilter.SetRadius(0.02)
tubeFilter.SetNumberOfSides(8)
tubeFilter.CappingOn()
cubeSource = vtkCubeSource()
# cubeSource.CappingOn()
cubeSource.SetXLength(tipRatio)
cubeSource.SetYLength(tipRatio)
cubeSource.SetZLength(tipRatio)
cubeSource.SetCenter(tipPos)
polyCombine = vtkAppendPolyData()
polyCombine.AddInputConnection(tubeFilter.GetOutputPort())
polyCombine.AddInputConnection(cubeSource.GetOutputPort())
polyCombine.Update()
polyMapper = vtkDataSetMapper()
polyMapper.SetInputConnection(polyCombine.GetOutputPort())
arrow = vtkActor()
arrow.SetMapper(polyMapper)
arrow.SetScale(length)
arrow.SetPosition(point1)
arrow.GetProperty().SetColor(1.0, 0.0, 1.0)
return arrow, polyCombine.GetOutput()
def CreateArrow(point1, point2, tipRatio=0.3):
"""
Creates an arrow from point1 to point2. Use tipRatio for
setting the ratio for tip of the arrow.
"""
direction = map(lambda x, y: x - y, point2, point1)
length = math.sqrt(sum(map(lambda x: x ** 2, direction)))
unitDir = map(lambda x: x / length, direction)
shaftDir = map(lambda x: x * (1.0 - tipRatio), unitDir)
tipPos = map(lambda x: x * (1.0 - (tipRatio * 0.5)), unitDir)
lineSource = vtkLineSource()
lineSource.SetPoint1(0, 0, 0)
lineSource.SetPoint2(shaftDir)
tubeFilter = vtkTubeFilter()
tubeFilter.SetInputConnection(lineSource.GetOutputPort())
tubeFilter.SetRadius(0.02)
tubeFilter.SetNumberOfSides(8)
tubeFilter.CappingOn()
coneSource = vtkConeSource()
coneSource.CappingOn()
coneSource.SetHeight(tipRatio)
coneSource.SetRadius(.2)
coneSource.SetResolution(16)
coneSource.SetCenter(tipPos)
coneSource.SetDirection(tipPos)
polyCombine = vtkAppendPolyData()
polyCombine.AddInputConnection(tubeFilter.GetOutputPort())
polyCombine.AddInputConnection(coneSource.GetOutputPort())
polyCombine.Update()
polyMapper = vtkDataSetMapper()
polyMapper.SetInputConnection(polyCombine.GetOutputPort())
arrow = vtkActor()
arrow.SetMapper(polyMapper)
arrow.SetScale(length)
arrow.SetPosition(point1)
arrow.GetProperty().SetColor(1.0, 0.0, 1.0)
return arrow, polyCombine.GetOutput()
def CreateOutline(bounds, color=None):
squareSource = vtkOutlineSource()
squareSource.GenerateFacesOff()
squareSource.SetBounds(bounds)
squareMapper = vtkPolyDataMapper()
squareMapper.SetInputConnection(squareSource.GetOutputPort())
square = vtkActor()
square.PickableOff()
square.SetMapper(squareMapper)
square.GetProperty().SetColor(1.0, 1.0, 1.0)
ColorActor(square, color)
return square
def CreateBounds(bounds):
"""
Creates a boundary object to display around a volume.
:rtype: list of actors
"""
originX = bounds[0]
originY = bounds[2]
originZ = bounds[4]
boundX = bounds[1]
boundY = bounds[3]
boundZ = bounds[5]
linePartLength = 0.2
lineActors = []
lineActors += CreateLineBeginAndEnd([originX, originY, originZ], [boundX, originY, originZ], linePartLength)
lineActors += CreateLineBeginAndEnd([originX, originY, originZ], [originX, boundY, originZ], linePartLength)
lineActors += CreateLineBeginAndEnd([originX, originY, originZ], [originX, originY, boundZ], linePartLength)
ColorActor(lineActors[0], [1, 0, 0])
ColorActor(lineActors[2], [0, 1, 0])
ColorActor(lineActors[4], [0, 0, 1])
lineActors += CreateLineBeginAndEnd([boundX, boundY, boundZ], [boundX, boundY, originZ], linePartLength)
lineActors += CreateLineBeginAndEnd([boundX, boundY, boundZ], [originX, boundY, boundZ], linePartLength)
lineActors += CreateLineBeginAndEnd([boundX, boundY, boundZ], [boundX, originY, boundZ], linePartLength)
lineActors += CreateLineBeginAndEnd([boundX, originY, originZ], [boundX, originY, boundZ], linePartLength)
lineActors += CreateLineBeginAndEnd([boundX, originY, originZ], [boundX, boundY, originZ], linePartLength)
lineActors += CreateLineBeginAndEnd([originX, boundY, originZ], [originX, boundY, boundZ], linePartLength)
lineActors += CreateLineBeginAndEnd([originX, boundY, originZ], [boundX, boundY, originZ], linePartLength)
lineActors += CreateLineBeginAndEnd([originX, originY, boundZ], [originX, boundY, boundZ], linePartLength)
lineActors += CreateLineBeginAndEnd([originX, originY, boundZ], [boundX, originY, boundZ], linePartLength)
for lineActor in lineActors:
ColorActor(lineActor, color=None, opacity=0.5)
mean = reduce(lambda x, y: x + y, bounds) / 3.0
sphereActor = CreateSphere(mean / 25.0)
sphereActor.SetPosition(originX, originY, originZ)
dataGrid = vtkAssembly()
for lineActor in lineActors:
dataGrid.AddPart(lineActor)
return [dataGrid, sphereActor]
def CreateOrientationGrid(bounds, camera):
return []
originX = bounds[0]
originY = bounds[2]
originZ = bounds[4]
boundX = bounds[1] * 1.2
boundY = bounds[3] * 1.2
boundZ = bounds[5] * 1.2
lineActorsX = []
lineActorsY = []
lineActorsZ = []
lineText = []
# Create the main axes
lineActorsX.append(CreateLine([0, 0, 0], [boundX, 0, 0]))
lineActorsX.append(CreateLine([0, 0, 0], [originX, 0, 0]))
lineActorsY.append(CreateLine([0, 0, 0], [0, boundY, 0]))
lineActorsY.append(CreateLine([0, 0, 0], [0, originY, 0]))
lineActorsZ.append(CreateLine([0, 0, 0], [0, 0, boundZ]))
lineActorsZ.append(CreateLine([0, 0, 0], [0, 0, originZ]))
# Create the nudges on the X axis
subdivSize = boundX / 10
subdivSize = ClosestToMeasurement(subdivSize)
smallHandleSize = subdivSize / 5.0
bigHandleSize = 2 * smallHandleSize
for index in range(1, int(boundX / subdivSize)):
handleSize = smallHandleSize if index % 5 != 0 else bigHandleSize
lineActorsX.append(CreateLine([index * subdivSize, 0, 0], [index * subdivSize, handleSize, 0]))
lineActorsX.append(CreateLine([index * subdivSize, 0, 0], [index * subdivSize, 0, handleSize]))
if index > 0 and index % 5 == 0:
textItem = CreateTextItem(str(index * subdivSize), 0.4 * subdivSize, camera)
textItem.SetPosition([index * subdivSize, -handleSize, -handleSize])
ColorActor(textItem, color=[0.6, 0.6, 0.6])
lineText.append(textItem)
textItemX = CreateTextItem("X", 0.5 * subdivSize, camera)
textItemX.SetPosition([boundX, 0, 0])
# Create the nudges on the Y axis
subdivSize = boundY / 10
subdivSize = ClosestToMeasurement(subdivSize)
smallHandleSize = subdivSize / 5.0
for index in range(1, int(boundY / subdivSize)):
handleSize = smallHandleSize if index % 5 != 0 else bigHandleSize
lineActorsY.append(CreateLine([0, index * subdivSize, 0], [handleSize, index * subdivSize, 0]))
lineActorsY.append(CreateLine([0, index * subdivSize, 0], [0, index * subdivSize, handleSize]))
if index > 0 and index % 5 == 0:
textItem = CreateTextItem(str(index * subdivSize), 0.4 * subdivSize, camera)
textItem.SetPosition([-smallHandleSize, index * subdivSize, -smallHandleSize])
ColorActor(textItem, color=[0.6, 0.6, 0.6])
lineText.append(textItem)
textItemY = CreateTextItem("Y", 0.5 * subdivSize, camera)
textItemY.SetPosition([0, boundY, 0])
# Create the nudges on the Z axis
subdivSize = boundZ / 10
subdivSize = ClosestToMeasurement(subdivSize)
smallHandleSize = subdivSize / 5.0
for index in range(1, int(boundZ / subdivSize)):
handleSize = smallHandleSize if index % 5 != 0 else bigHandleSize
lineActorsZ.append(CreateLine([0, 0, index * subdivSize], [handleSize, 0, index * subdivSize]))
lineActorsZ.append(CreateLine([0, 0, index * subdivSize], [0, handleSize, index * subdivSize]))
if index > 0 and index % 5 == 0:
textItem = CreateTextItem(str(index * subdivSize), 0.4 * subdivSize, camera)
textItem.SetPosition([-handleSize, -handleSize, index * subdivSize])
ColorActor(textItem, color=[0.6, 0.6, 0.6])
lineText.append(textItem)
textItemZ = CreateTextItem("Z", 0.5 * subdivSize, camera)
textItemZ.SetPosition([0, 0, boundZ])
# Color the axis: R, G and B
for lineActor in lineActorsX:
ColorActor(lineActor, [1, 0, 0])
for lineActor in lineActorsY:
ColorActor(lineActor, [0, 1, 0])
for lineActor in lineActorsZ:
ColorActor(lineActor, [0, 0, 1])
# Add the lines into one big assembly
dataGrid = vtkAssembly()
for lineActor in (lineActorsX + lineActorsY + lineActorsZ):
dataGrid.AddPart(lineActor)
return [dataGrid, textItemX, textItemY, textItemZ] + lineText
def ClosestToMeasurement(number):
# gridNudges describes the possible values for indicator intervals for the grid
gridNudges = [1, 5, 10, 50, 100, 500, 1000, 5000, 10000]
# Calculate diff
diff = map(lambda x: abs(x - number), gridNudges)
index = diff.index(min(diff))
return gridNudges[index]
|
berendkleinhaneveld/Registrationshop
|
core/vtkDrawing.py
|
Python
|
mit
| 14,609
|
[
"VTK"
] |
d20a26abb0e16dfaf4bca6e7e3ba2cc894de3d4fac9a780fe406ee12e752f5ef
|
import cv2
import numpy as np
from matplotlib import pyplot as plt
import BasePixelTransfer
class BaseSpatialFilter:
def __init__(self):
pass
def Mean(self, img, size):
kernel = np.ones((size, size), np.int32)
dImg = cv2.filter2D(img, -1, kernel)
return dImg
def Median(self, img, size):
dImg = cv2.medianBlur(img, size)
return dImg
def GenerateGaussian(self, size, sigma, flag=True):
kernel = np.zeros((size, size), np.float64)
radius = (size - 1) / 2
for x in xrange(-radius, radius + 1):
for y in xrange(-radius, radius + 1):
kernel[x + radius, y + radius] = \
np.exp(-(x ** 2 + y ** 2) / (2 * sigma ** 2)) / (2 * np.pi * sigma ** 2)
if flag == True:
beishu = np.sum(kernel)
kernel /= beishu
return kernel
def Gaussian(self, img, size, sigma):
kernel = self.GenerateGaussian(size, sigma)
dImg = cv2.filter2D(img, -1, kernel)
return dImg
def SobelDemo(self, img):
dImg = self.Sobel(img)
cv2.namedWindow("lena")
cv2.imshow("lena", img)
cv2.namedWindow("sobel")
cv2.imshow("sobel", dImg)
cv2.waitKey(0)
def Sobel(self, img):
karr = np.array([-1, -2 , -1, 0, 0, 0, 1, 2, 1])
kernel1 = karr.reshape(3,3)
kernel2 = kernel1.transpose()
img1 = cv2.filter2D(img, -1, kernel1)
img2 = cv2.filter2D(img, -1, kernel2)
dImg = img1 + img2
return dImg
def LaplacianDemo(self, img):
dImg = self.Laplacian(img)
cv2.namedWindow("lena")
cv2.imshow("lena", img)
cv2.namedWindow("laplace")
cv2.imshow("laplace", dImg)
cv2.waitKey(0)
def Laplacian(self,img):
karr = np.array([0, 1, 0, 1, -4, 1, 0, 1, 0])
kernel1 = karr.reshape(3, 3)
kernel2 = kernel1.transpose()
img1 = cv2.filter2D(img, -1, kernel1)
img2 = cv2.filter2D(img, -1, kernel2)
dImg = img1 + img2
return dImg
def LoGDemo(self, img, size, sigma):
dImg = self.LoG(img, size, sigma)
cv2.namedWindow("lena")
cv2.imshow("lena", img)
cv2.namedWindow("LoG")
cv2.imshow("LoG", dImg)
cv2.waitKey(0)
def GenerateLoG(self, size, sigma):
kernel = np.zeros((size, size), np.float64)
radius = (size - 1) / 2
for x in xrange(-radius, radius + 1):
for y in xrange(-radius, radius + 1):
kernel[x + radius, y + radius] = \
np.exp(-(x ** 2 + y ** 2) / (2 * sigma ** 2)) * (x ** 2 + y ** 2 - 2 * sigma ** 2) \
/ (sigma ** 4) #2 ** (size - 2)
beishu = 1.0 / np.sum(kernel)
kernel = beishu * kernel
kernel[radius, radius] -= 1
return beishu * kernel
def LoG(self, img, size, sigma):
kernel = self.GenerateLoG(size, sigma)
dImg = cv2.filter2D(img, -1, kernel)
return dImg
def DoGDemo(self, img, size, sigma1, sigma2):
dImg = self.DoG(img, size, sigma1, sigma2)
cv2.namedWindow("lena")
cv2.imshow("lena", img)
cv2.namedWindow("DoG")
cv2.imshow("DoG", dImg)
cv2.waitKey(0)
def DoG(self, img, size, sigma1, sigma2):
img = np.float64(img)
if sigma1 == 0:
img1 = img
else:
kernel1 = self.GenerateGaussian(5, sigma1, True)
img1 = cv2.filter2D(img, -1, kernel1)
if sigma2 == 0:
img2 = img
else:
kernel2 = self.GenerateGaussian(5, sigma2, True)
img2 = cv2.filter2D(img, -1, kernel2)
dImg = img1 - img2
return dImg
def DoGCornerDetectDemo(self, img, size, sigmaList, threv):
dImg = self.DoGCornerDetect(img, size, sigmaList, threv)
cv2.namedWindow("lena")
cv2.imshow("lena", img)
cv2.namedWindow("Corner")
cv2.imshow("Corner", dImg)
cv2.waitKey(0)
def DoGCornerDetect(self, img, size, sigmaList, threv):
if len(sigmaList) != 6:
return
dImg = img.copy() / 2
radius = 1
zRadius = 1
dogImg = np.zeros((img.shape[0], img.shape[1], len(sigmaList) / 2), np.float64)
for i in xrange(0, len(sigmaList) / 2 ):
dogImg[:, :, i] = self.DoG(img, size, sigmaList[i * 2], sigmaList[i * 2 + 1])
for x in xrange(radius, dogImg.shape[0] - radius):
for y in xrange(radius, dogImg.shape[1] - radius):
if dogImg[x,y,zRadius] >= np.max(dogImg[x-radius:x+radius+1, y-radius:y+radius+1, [0,2]]) or \
dogImg[x, y, zRadius] <= np.min(dogImg[x - radius:x + radius + 1, y - radius:y + radius + 1, \
[0, 2]]):
if threv < dogImg[x, y, zRadius] or dogImg[x, y, zRadius] < -threv:
dImg[x, y] = 255
return dImg
|
artzers/NGImageProcessor
|
BaseSpatialFilter.py
|
Python
|
mit
| 5,014
|
[
"Gaussian"
] |
5464f428c4c6b3561ac733885310e9b1d5e8cbc822a22a021b1bf4aa834e063c
|
# -*- coding: utf-8 -*-
# vim: autoindent shiftwidth=4 expandtab textwidth=120 tabstop=4 softtabstop=4
###############################################################################
# OpenLP - Open Source Lyrics Projection #
# --------------------------------------------------------------------------- #
# Copyright (c) 2008-2013 Raoul Snyman #
# Portions copyright (c) 2008-2013 Tim Bentley, Gerald Britton, Jonathan #
# Corwin, Samuel Findlay, Michael Gorven, Scott Guerrieri, Matthias Hub, #
# Meinert Jordan, Armin Köhler, Erik Lundin, Edwin Lunando, Brian T. Meyer. #
# Joshua Miller, Stevan Pettit, Andreas Preikschat, Mattias Põldaru, #
# Christian Richter, Philip Ridout, Simon Scudder, Jeffrey Smith, #
# Maikel Stuivenberg, Martin Thompson, Jon Tibble, Dave Warnock, #
# Frode Woldsund, Martin Zibricky, Patrick Zimmermann #
# --------------------------------------------------------------------------- #
# This program is free software; you can redistribute it and/or modify it #
# under the terms of the GNU General Public License as published by the Free #
# Software Foundation; version 2 of the License. #
# #
# This program is distributed in the hope that it will be useful, but WITHOUT #
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or #
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for #
# more details. #
# #
# You should have received a copy of the GNU General Public License along #
# with this program; if not, write to the Free Software Foundation, Inc., 59 #
# Temple Place, Suite 330, Boston, MA 02111-1307 USA #
###############################################################################
"""
The actual print service dialog
"""
import cgi
import datetime
import os
from PyQt4 import QtCore, QtGui
from lxml import html
from openlp.core.lib import Settings, UiStrings, Registry, translate, get_text_file_string
from openlp.core.ui.printservicedialog import Ui_PrintServiceDialog, ZoomSize
from openlp.core.utils import AppLocation
DEFAULT_CSS = """/*
Edit this file to customize the service order print. Note, that not all CSS
properties are supported. See:
http://doc.trolltech.com/4.7/richtext-html-subset.html#css-properties
*/
.serviceTitle {
font-weight: 600;
font-size: x-large;
color: black;
}
.item {
color: black;
}
.itemTitle {
font-weight: 600;
font-size: large;
}
.itemText {
margin-top: 10px;
}
.itemFooter {
font-size: 8px;
}
.itemNotes {}
.itemNotesTitle {
font-weight: bold;
font-size: 12px;
}
.itemNotesText {
font-size: 11px;
}
.media {}
.mediaTitle {
font-weight: bold;
font-size: 11px;
}
.mediaText {}
.imageList {}
.customNotes {
margin-top: 10px;
}
.customNotesTitle {
font-weight: bold;
font-size: 11px;
}
.customNotesText {
font-size: 11px;
}
.newPage {
page-break-before: always;
}
"""
class PrintServiceForm(QtGui.QDialog, Ui_PrintServiceDialog):
"""
The :class:`~openlp.core.ui.printserviceform.PrintServiceForm` class displays a dialog for printing the service.
"""
def __init__(self):
"""
Constructor
"""
super(PrintServiceForm, self).__init__(Registry().get('main_window'))
self.printer = QtGui.QPrinter()
self.print_dialog = QtGui.QPrintDialog(self.printer, self)
self.document = QtGui.QTextDocument()
self.zoom = 0
self.setupUi(self)
# Load the settings for the dialog.
settings = Settings()
settings.beginGroup('advanced')
self.slide_text_check_box.setChecked(settings.value('print slide text'))
self.page_break_after_text.setChecked(settings.value('add page break'))
if not self.slide_text_check_box.isChecked():
self.page_break_after_text.setDisabled(True)
self.meta_data_check_box.setChecked(settings.value('print file meta data'))
self.notes_check_box.setChecked(settings.value('print notes'))
self.zoom_combo_box.setCurrentIndex(settings.value('display size'))
settings.endGroup()
# Signals
self.print_button.triggered.connect(self.print_service_order)
self.zoom_out_button.clicked.connect(self.zoom_out)
self.zoom_in_button.clicked.connect(self.zoom_in)
self.zoom_original_button.clicked.connect(self.zoom_original)
self.preview_widget.paintRequested.connect(self.paint_requested)
self.zoom_combo_box.currentIndexChanged.connect(self.display_size_changed)
self.plain_copy.triggered.connect(self.copy_text)
self.html_copy.triggered.connect(self.copy_html_text)
self.slide_text_check_box.stateChanged.connect(self.on_slide_text_check_box_changed)
self.update_preview_text()
def toggle_options(self, checked):
"""
Toggle various options
"""
self.options_widget.setVisible(checked)
if checked:
left = self.options_button.pos().x()
top = self.toolbar.height()
self.options_widget.move(left, top)
self.title_line_edit.setFocus()
else:
self.save_options()
self.update_preview_text()
def update_preview_text(self):
"""
Creates the html text and updates the html of *self.document*.
"""
html_data = self._add_element('html')
self._add_element('head', parent=html_data)
self._add_element('title', self.title_line_edit.text(), html_data.head)
css_path = os.path.join(AppLocation.get_data_path(), 'service_print.css')
custom_css = get_text_file_string(css_path)
if not custom_css:
custom_css = DEFAULT_CSS
self._add_element('style', custom_css, html_data.head, attribute=('type', 'text/css'))
self._add_element('body', parent=html_data)
self._add_element('h1', cgi.escape(self.title_line_edit.text()), html_data.body, classId='serviceTitle')
for index, item in enumerate(self.service_manager.service_items):
self._add_preview_item(html_data.body, item['service_item'], index)
# Add the custom service notes:
if self.footer_text_edit.toPlainText():
div = self._add_element('div', parent=html_data.body, classId='customNotes')
self._add_element(
'span', translate('OpenLP.ServiceManager', 'Custom Service Notes: '), div, classId='customNotesTitle')
self._add_element('span', cgi.escape(self.footer_text_edit.toPlainText()), div, classId='customNotesText')
self.document.setHtml(html.tostring(html_data).decode())
self.preview_widget.updatePreview()
def _add_preview_item(self, body, item, index):
"""
Add a preview item
"""
div = self._add_element('div', classId='item', parent=body)
# Add the title of the service item.
item_title = self._add_element('h2', parent=div, classId='itemTitle')
self._add_element('img', parent=item_title, attribute=('src', item.icon))
self._add_element('span', ' ' + cgi.escape(item.get_display_title()), item_title)
if self.slide_text_check_box.isChecked():
# Add the text of the service item.
if item.is_text():
verse_def = None
for slide in item.get_frames():
if not verse_def or verse_def != slide['verseTag']:
text_div = self._add_element('div', parent=div, classId='itemText')
else:
self._add_element('br', parent=text_div)
self._add_element('span', slide['html'], text_div)
verse_def = slide['verseTag']
# Break the page before the div element.
if index != 0 and self.page_break_after_text.isChecked():
div.set('class', 'item newPage')
# Add the image names of the service item.
elif item.is_image():
ol = self._add_element('ol', parent=div, classId='imageList')
for slide in range(len(item.get_frames())):
self._add_element('li', item.get_frame_title(slide), ol)
# add footer
foot_text = item.foot_text
foot_text = foot_text.partition('<br>')[2]
if foot_text:
foot_text = cgi.escape(foot_text.replace('<br>', '\n'))
self._add_element('div', foot_text.replace('\n', '<br>'), parent=div, classId='itemFooter')
# Add service items' notes.
if self.notes_check_box.isChecked():
if item.notes:
p = self._add_element('div', classId='itemNotes', parent=div)
self._add_element('span', translate('OpenLP.ServiceManager', 'Notes: '), p, classId='itemNotesTitle')
self._add_element('span', cgi.escape(item.notes).replace('\n', '<br>'), p, classId='itemNotesText')
# Add play length of media files.
if item.is_media() and self.meta_data_check_box.isChecked():
tme = item.media_length
if item.end_time > 0:
tme = item.end_time - item.start_time
title = self._add_element('div', classId='media', parent=div)
self._add_element(
'span', translate('OpenLP.ServiceManager', 'Playing time: '), title, classId='mediaTitle')
self._add_element('span', str(datetime.timedelta(seconds=tme)), title, classId='mediaText')
def _add_element(self, tag, text=None, parent=None, classId=None, attribute=None):
"""
Creates a html element. If ``text`` is given, the element's text will
set and if a ``parent`` is given, the element is appended.
``tag``
The html tag, e. g. ``u'span'``. Defaults to ``None``.
``text``
The text for the tag. Defaults to ``None``.
``parent``
The parent element. Defaults to ``None``.
``classId``
Value for the class attribute
``attribute``
Tuple name/value pair to add as an optional attribute
"""
if text is not None:
element = html.fragment_fromstring(str(text), create_parent=tag)
else:
element = html.Element(tag)
if parent is not None:
parent.append(element)
if classId is not None:
element.set('class', classId)
if attribute is not None:
element.set(attribute[0], attribute[1])
return element
def paint_requested(self, printer):
"""
Paint the preview of the *self.document*.
``printer``
A *QPrinter* object.
"""
self.document.print_(printer)
def display_size_changed(self, display):
"""
The Zoom Combo box has changed so set up the size.
"""
if display == ZoomSize.Page:
self.preview_widget.fitInView()
elif display == ZoomSize.Width:
self.preview_widget.fitToWidth()
elif display == ZoomSize.OneHundred:
self.preview_widget.fitToWidth()
self.preview_widget.zoomIn(1)
elif display == ZoomSize.SeventyFive:
self.preview_widget.fitToWidth()
self.preview_widget.zoomIn(0.75)
elif display == ZoomSize.Fifty:
self.preview_widget.fitToWidth()
self.preview_widget.zoomIn(0.5)
elif display == ZoomSize.TwentyFive:
self.preview_widget.fitToWidth()
self.preview_widget.zoomIn(0.25)
settings = Settings()
settings.beginGroup('advanced')
settings.setValue('display size', display)
settings.endGroup()
def copy_text(self):
"""
Copies the display text to the clipboard as plain text
"""
self.update_song_usage()
cursor = QtGui.QTextCursor(self.document)
cursor.select(QtGui.QTextCursor.Document)
clipboard_text = cursor.selectedText()
# We now have the unprocessed unicode service text in the cursor
# So we replace u2028 with \n and u2029 with \n\n and a few others
clipboard_text = clipboard_text.replace('\u2028', '\n')
clipboard_text = clipboard_text.replace('\u2029', '\n\n')
clipboard_text = clipboard_text.replace('\u2018', '\'')
clipboard_text = clipboard_text.replace('\u2019', '\'')
clipboard_text = clipboard_text.replace('\u201c', '"')
clipboard_text = clipboard_text.replace('\u201d', '"')
clipboard_text = clipboard_text.replace('\u2026', '...')
clipboard_text = clipboard_text.replace('\u2013', '-')
clipboard_text = clipboard_text.replace('\u2014', '-')
# remove the icon from the text
clipboard_text = clipboard_text.replace('\ufffc\xa0', '')
# and put it all on the clipboard
self.main_window.clipboard.setText(clipboard_text)
def copy_html_text(self):
"""
Copies the display text to the clipboard as Html
"""
self.update_song_usage()
self.main_window.clipboard.setText(self.document.toHtml())
def print_service_order(self):
"""
Called, when the *print_button* is clicked. Opens the *print_dialog*.
"""
if not self.print_dialog.exec_():
return
self.update_song_usage()
# Print the document.
self.document.print_(self.printer)
def zoom_in(self):
"""
Called when *zoom_in_button* is clicked.
"""
self.preview_widget.zoomIn()
self.zoom -= 0.1
def zoom_out(self):
"""
Called when *zoom_out_button* is clicked.
"""
self.preview_widget.zoomOut()
self.zoom += 0.1
def zoom_original(self):
"""
Called when *zoom_out_button* is clicked.
"""
self.preview_widget.zoomIn(1 + self.zoom)
self.zoom = 0
def update_text_format(self, value):
"""
Called when html copy check box is selected.
"""
if value == QtCore.Qt.Checked:
self.copyTextButton.setText(UiStrings().CopyToHtml)
else:
self.copyTextButton.setText(UiStrings().CopyToText)
def on_slide_text_check_box_changed(self, state):
"""
Disable or enable the ``page_break_after_text`` checkbox as it should only
be enabled, when the ``slide_text_check_box`` is enabled.
"""
self.page_break_after_text.setDisabled(state == QtCore.Qt.Unchecked)
def save_options(self):
"""
Save the settings and close the dialog.
"""
# Save the settings for this dialog.
settings = Settings()
settings.beginGroup('advanced')
settings.setValue('print slide text', self.slide_text_check_box.isChecked())
settings.setValue('add page break', self.page_break_after_text.isChecked())
settings.setValue('print file meta data', self.meta_data_check_box.isChecked())
settings.setValue('print notes', self.notes_check_box.isChecked())
settings.endGroup()
def update_song_usage(self):
"""
Update the song usage
"""
# Only continue when we include the song's text.
if not self.slide_text_check_box.isChecked():
return
for item in self.service_manager.service_items:
# Trigger Audit requests
Registry().register_function('print_service_started', [item['service_item']])
def _get_service_manager(self):
"""
Adds the service manager to the class dynamically
"""
if not hasattr(self, '_service_manager'):
self._service_manager = Registry().get('service_manager')
return self._service_manager
service_manager = property(_get_service_manager)
def _get_main_window(self):
"""
Adds the main window to the class dynamically
"""
if not hasattr(self, '_main_window'):
self._main_window = Registry().get('main_window')
return self._main_window
main_window = property(_get_main_window)
|
marmyshev/item_title
|
openlp/core/ui/printserviceform.py
|
Python
|
gpl-2.0
| 16,598
|
[
"Brian"
] |
6eb934ed7ca3d0f98fe14395fddddb2b198e2f04b2d03b42757f59120812467a
|
#!/usr/bin/env
"""
GOA_Winds_NARR_model_prep.py
Retrieve NARR winds for two locations:
GorePoint - 58deg 58min N, 150deg 56min W
and Globec3 59.273701N, 148.9653W
Filter NARR winds with a triangular filter (1/4, 1/2, 1/4) and output every 3hrs
Provide U, V
Save in EPIC NetCDF standard
"""
#System Stack
import datetime
import sys
#Science Stack
import numpy as np
from netCDF4 import Dataset
# User Stack
import general_utilities.haversine as sphered
from utilities import ncutilities as ncutil
# Visual Stack
import matplotlib.pyplot as plt
from mpl_toolkits.basemap import Basemap, shiftgrid
__author__ = 'Shaun Bell'
__email__ = 'shaun.bell@noaa.gov'
__created__ = datetime.datetime(2014, 01, 13)
__modified__ = datetime.datetime(2014, 01, 13)
__version__ = "0.1.0"
__status__ = "Development"
__keywords__ = 'NARR','GLOBEC3', 'Gorept','3hr filtered', 'U,V','Winds', 'Gulf of Alaska'
"""------------------------General Modules-------------------------------------------"""
def from_netcdf(infile):
""" Uses ncreadfile_dic which returns a dictionary of all data from netcdf"""
###nc readin/out
nchandle = ncutil.ncopen(infile)
params = ncutil.get_vars(nchandle) #gets all of them
ncdata = ncutil.ncreadfile_dic(nchandle, params)
ncutil.ncclose(nchandle)
return (ncdata, params)
def from_netcdf_1dsplice(infile, height_ind, lat_ind, lon_ind):
""" Uses ncreadfile_dic which returns a dictionary of all data from netcdf"""
###nc readin/out
nchandle = ncutil.ncopen(infile)
params = ncutil.get_vars(nchandle) #gets all of them
print "Parameters available: "
print params
ncdata = ncutil.ncreadfile_dic_slice(nchandle, params, height_ind=height_ind, lat_ind=lat_ind, lon_ind=lon_ind)
ncutil.ncclose(nchandle)
return ncdata
def latlon_grid(infile):
nchandle = ncutil.ncopen(infile)
lat_lon = ncutil.get_geocoords(nchandle)
ncutil.ncclose(nchandle)
return (lat_lon)
def write2epic( file_name, stationid, time, lat_lon, data ):
ncinstance = ncutil.EPIC_NC(savefile=file_name)
ncinstance.file_create()
ncinstance.sbeglobal_atts()
ncinstance.PMELglobal_atts(Station_Name=stationid, file_name=( __file__.split('/')[-1]) )
ncinstance.dimension_init(len_time=len(time[0]))
ncinstance.variable_init()
ncinstance.add_coord_data(time1=time[0], time2=time[1], latitude=lat_lon[0], longitude=-1 * lat_lon[1], \
depth_level=10. )
ncinstance.add_data('WU_422', data[0])
ncinstance.add_data('WV_423', data[1])
ncinstance.close()
def date2pydate(file_time, file_time2=None, file_flag='EPIC'):
""" Ingest EPIC date or NCEP Date and provide python serial date"""
if file_flag == 'EPIC':
ref_time_py = datetime.datetime.toordinal(datetime.datetime(1968, 5, 23))
ref_time_epic = 2440000
offset = ref_time_epic - ref_time_py
try: #if input is an array
python_time = [None] * len(file_time)
for i, val in enumerate(file_time):
pyday = file_time[i] - offset
pyfrac = file_time2[i] / (1000. * 60. * 60.* 24.) #milliseconds in a day
python_time[i] = (pyday + pyfrac)
except:
pyday = file_time - offset
pyfrac = file_time2 / (1000. * 60. * 60.* 24.) #milliseconds in a day
python_time = (pyday + pyfrac)
elif file_flag == 'NARR':
""" Hours since 1800-1-1"""
base_date=datetime.datetime.strptime('1800-01-01','%Y-%m-%d').toordinal()
python_time = file_time / 24. + base_date
elif file_flag == 'NCEP':
""" Hours since 1800-1-1"""
base_date=datetime.datetime.strptime('1800-01-01','%Y-%m-%d').toordinal()
python_time = file_time / 24. + base_date
else:
print "time flag not recognized"
sys.exit()
return np.array(python_time)
def pydate2EPIC(file_time):
ref_time_py = datetime.datetime.toordinal(datetime.datetime(1968, 5, 23))
ref_time_epic = 2440000
offset = ref_time_epic - ref_time_py
time1 = np.floor(file_time) + offset #truncate to get day and add 2440000 for true julian day
time2 = ( file_time - np.floor(file_time) ) * (1000. * 60. * 60.* 24.) #milliseconds since 0000GMT
return(time1, time2)
def pythondate2str(pdate):
(year,month,day) = datetime.datetime.fromordinal(int(pdate)).strftime('%Y-%b-%d').split('-')
delta_t = pdate - int(pdate)
dhour = str(int(np.floor(24 * (delta_t))))
dmin = str(int(np.floor(60 * ((24 * (delta_t)) - np.floor(24 * (delta_t))))))
dsec = str(int(np.floor(60 * ((60 * ((24 * (delta_t)) - np.floor(24 * (delta_t)))) - \
np.floor(60 * ((24 * (delta_t)) - np.floor(24 * (delta_t))))))))
#add zeros to time
if len(dhour) == 1:
dhour = '0' + dhour
if len(dmin) == 1:
dmin = '0' + dmin
if len(dsec) == 1:
dsec = '0' + dsec
return year + '-' + month + '-' + day + ' ' + dhour+':'+dmin+':'+dsec
"---"
def rotate_coord(angle_rot, mag, dir):
""" converts math coords to along/cross shelf.
+ onshore / along coast with land to right (right handed)
- offshore / along coast with land to left
Todo: convert met standard for winds (left handed coordinate system
"""
dir = dir - angle_rot
along = mag * np.sin(np.deg2rad(dir))
cross = mag * np.cos(np.deg2rad(dir))
return (along, cross)
def triangle_smoothing(data_in):
weights=np.array([0.25,0.5,0.25])
filtered_data = np.convolve(data_in,np.array(weights),'same') #edge effects
return filtered_data
"""------------------------- Topo Modules -------------------------------------------"""
def etopo5_data():
""" read in etopo5 topography/bathymetry. """
file = '/Users/bell/Data_Local/MapGrids/etopo5.nc'
etopodata = Dataset(file)
topoin = etopodata.variables['bath'][:]
lons = etopodata.variables['X'][:]
lats = etopodata.variables['Y'][:]
etopodata.close()
topoin,lons = shiftgrid(0.,topoin,lons,start=False) # -360 -> 0
lons, lats = np.meshgrid(lons, lats)
return(topoin, lats, lons)
"""------------------------- Main Modules -------------------------------------------"""
### list of files
NARR = '/Users/bell/Data_Local/Reanalysis_Files/NARR/3hourly/'
infile = [NARR + 'uwnd.10m.2003.nc']
### Grab grid points for future slicing - assume grid is same in all model output
lat_lon = latlon_grid(infile[0])
station_name = ['Chiniak Trough','Chiniak Trough']
sta_lat = [57.33333,57.33333]
sta_long = [151.33333,151.33333]
#Find NARR nearest point to moorings - haversine formula
# NARR data is -180->180 (positive east), Moorings are usually expressed +W for FOCI
globec_pt = sphered.nearest_point([sta_lat[0],-1 * sta_long[0]],lat_lon['lat'],lat_lon['lon'], '2d')
gorept_pt = sphered.nearest_point([sta_lat[1],-1 * sta_long[1]],lat_lon['lat'],lat_lon['lon'], '2d')
globec_modelpt = [lat_lon['lat'][globec_pt[3],globec_pt[4]],lat_lon['lon'][globec_pt[3],globec_pt[4]]]
gorept_modelpt = [lat_lon['lat'][gorept_pt[3],gorept_pt[4]],lat_lon['lon'][gorept_pt[3],gorept_pt[4]]]
print "Globec nearest point to %s, %s which is lat:%s , lon:%s" \
% (sta_lat[0], sta_long[0], globec_modelpt[0], globec_modelpt[1])
print "GorePt nearest point to %s, %s which is lat:%s , lon:%s" \
% (sta_lat[1], sta_long[1], gorept_modelpt[0], gorept_modelpt[1])
#loop over all requested data
#years = arange(1984,2014,1)
#years = [1984, 1987, 1989, 1991, 1994, 2001, 2002, 2003, 2004, 2005, 2006, 2011, 2013]
years = [2001,2002]
for yy in years:
# retrieve only these location's data
# uwnd
infile = NARR + 'uwnd.10m.'+ str(yy) + '.nc'
print "Working on file " + infile
globec3_data = from_netcdf_1dsplice(infile, None, globec_pt[3], globec_pt[4])
gorept_data = from_netcdf_1dsplice(infile, None, gorept_pt[3], gorept_pt[4])
#filter data
globec3u_f = triangle_smoothing(globec3_data['uwnd'])
goreptu_f = triangle_smoothing(gorept_data['uwnd'])
globec3u = globec3_data['uwnd']
goreptu = gorept_data['uwnd']
# retrieve only these location's data
# vwnd
infile = NARR + 'vwnd.10m.'+ str(yy) + '.nc'
print "Working on file " + infile
globec3_data = from_netcdf_1dsplice(infile, None, globec_pt[3], globec_pt[4])
gorept_data = from_netcdf_1dsplice(infile, None, gorept_pt[3], gorept_pt[4])
#filter data
globec3v_f = triangle_smoothing(globec3_data['vwnd'])
goreptv_f = triangle_smoothing(gorept_data['vwnd'])
globec3v = globec3_data['vwnd']
goreptv = gorept_data['vwnd']
#rotate to shore (Along/Across)
NARR_wind_mag = np.sqrt(globec3u**2. + globec3v**2.)
NARR_wind_dir_math = np.rad2deg(np.arctan2(globec3v, globec3u))
(NARRalong, NARRcross) = rotate_coord(135., NARR_wind_mag, NARR_wind_dir_math)
#convert to EPIC time
pydate = date2pydate(globec3_data['time'], file_flag='NARR')
epic_time, epic_time1 = pydate2EPIC(pydate)
# output u,v wind components from model grid points
save_to_nc = True
if save_to_nc:
# write to NetCDF
outfile = 'data/NARR_5720N15120W_' + str(yy) + '.nc'
print "Writing to Epic NetCDF " + outfile
write2epic( outfile, station_name[1], [epic_time, epic_time1], globec_modelpt, [globec3u_f, globec3v_f])
outfile = 'data/NARR_5720N15120W_' + str(yy) + '.nc'
print "Writing to Epic NetCDF " + outfile
write2epic( outfile, station_name[0], [epic_time, epic_time1], gorept_modelpt, [goreptu_f, goreptv_f])
output2screen = False
if output2screen:
print"Date/Time, Across (m/s), Along(m/s)\n"
for i,v in enumerate(pydate):
print "{0}, {1}, {2}".format(pythondate2str(v), NARRcross[i],NARRalong[i])
plot_geoloc = True
if plot_geoloc:
(topoin, elats, elons) = etopo5_data()
fig = plt.figure()
ax = plt.subplot(111)
m = Basemap(resolution='i',projection='merc', llcrnrlat=55, \
urcrnrlat=62,llcrnrlon=-155,urcrnrlon=-145, lat_ts=45)
# Mooring Data
x_moor, y_moor = m([-1. * sta_long[0], -1. * sta_long[1]],sta_lat)
x_close, y_close = m([globec_modelpt[1],gorept_modelpt[1]], [globec_modelpt[0],gorept_modelpt[0]])
#ETOPO 5 contour data
ex, ey = m(elons, elats)
CS = m.contourf(ex,ey,topoin, levels=range(250,5000,250), cmap='gray_r', alpha=.75) #colors='black'
CS = m.contour(ex,ey,topoin, levels=range(250,5000,250), linewidths=0.2, colors='black', alpha=.75) #
CS = m.contour(ex,ey,topoin, levels=[-1000, -200, -100], linestyle='--', linewidths=0.2, colors='black', alpha=.75) #
#plot points
m.scatter(x_close,y_close,20,marker='+',color='b')
m.scatter(x_moor,y_moor,20,marker='o',color='g')
m.drawcountries(linewidth=0.5)
m.drawcoastlines(linewidth=0.5)
m.drawparallels(np.arange(55,62,2.),labels=[1,0,0,0],color='black',dashes=[1,1],labelstyle='+/-',linewidth=0.2) # draw parallels
m.drawmeridians(np.arange(-155,-145,2.),labels=[0,0,0,1],color='black',dashes=[1,1],labelstyle='+/-',linewidth=0.2) # draw meridians
#m.fillcontinents(color='black')
DefaultSize = fig.get_size_inches()
fig.set_size_inches( (DefaultSize[0], DefaultSize[1]) )
plt.savefig('images/ChiniakTrough_region.png', bbox_inches='tight', dpi = (100))
plt.close()
|
shaunwbell/FOCI_Analysis
|
ReanalysisRetreival_orig/GOA_Winds_Mordy/chiniaktrough_NARR_model_prep.py
|
Python
|
mit
| 11,651
|
[
"NetCDF"
] |
e57cdc95d416c15d2567f28ddb84f7f1801a04ec4363ec2740dc5c88f6ea0447
|
#!/usr/bin/env python
"""Hierarchical Cache Simulator."""
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
import textwrap
from functools import reduce
import sys
from collections import Iterable
from cachesim import backend
if sys.version_info[0] < 3:
range = xrange
def is_power2(num):
"""Return True if num is a power of two."""
return num > 0 and (num & (num - 1)) == 0
class CacheSimulator(object):
"""
High-level interface to the Cache Simulator.
This is the only class that needs to be directly interfaced to.
"""
def __init__(self, first_level, main_memory):
"""
Create interface to interact with cache simulator backend.
:param first_level: first cache level object.
:param main_memory: main memory object.
"""
assert isinstance(first_level, Cache), \
"first_level needs to be a Cache object."
assert isinstance(main_memory, MainMemory), \
"main_memory needs to be a MainMemory object"
self.first_level = first_level
for l in self.levels(with_mem=False): # iterating to last level
self.last_level = l
self.main_memory = main_memory
@classmethod
def from_dict(cls, d):
"""Create cache hierarchy from dictionary."""
main_memory = MainMemory()
caches = {}
referred_caches = set()
# First pass, create all named caches and collect references
for name, conf in d.items():
caches[name] = Cache(name=name,
**{k: v for k, v in conf.items()
if k not in ['store_to', 'load_from', 'victims_to']})
if 'store_to' in conf:
referred_caches.add(conf['store_to'])
if 'load_from' in conf:
referred_caches.add(conf['load_from'])
if 'victims_to' in conf:
referred_caches.add(conf['victims_to'])
# Second pass, connect caches
for name, conf in d.items():
if 'store_to' in conf and conf['store_to'] is not None:
caches[name].set_store_to(caches[conf['store_to']])
if 'load_from' in conf and conf['load_from'] is not None:
caches[name].set_load_from(caches[conf['load_from']])
if 'victims_to' in conf and conf['victims_to'] is not None:
caches[name].set_victims_to(caches[conf['victims_to']])
# Find first level (not target of any load_from or store_to)
first_level = set(d.keys()) - referred_caches
assert len(first_level) == 1, "Unable to find first cache level."
first_level = caches[list(first_level)[0]]
# Find last level caches (has no load_from or store_to target)
last_level_load = c = first_level
while c is not None:
last_level_load = c
c = c.load_from
assert last_level_load is not None, "Unable to find last cache level."
last_level_store = c = first_level
while c is not None:
last_level_store = c
c = c.store_to
assert last_level_store is not None, "Unable to find last cache level."
# Set main memory connections
main_memory.load_to(last_level_load)
main_memory.store_from(last_level_store)
return cls(first_level, main_memory), caches, main_memory
def reset_stats(self):
"""
Reset statistics in all cache levels.
Use this after warming up the caches to get a steady state result.
"""
for c in self.levels(with_mem=False):
c.reset_stats()
def force_write_back(self):
"""Write all pending dirty lines back."""
# force_write_back() is acting recursive by it self, but multiple write-back first level
# caches are imaginable. Better safe than sorry:
for c in self.levels(with_mem=False):
c.force_write_back()
def load(self, addr, length=1):
"""
Load one or more addresses.
:param addr: byte address of load location
:param length: All address from addr until addr+length (exclusive) are
loaded (default: 1)
"""
if addr is None:
return
elif not isinstance(addr, Iterable):
self.first_level.load(addr, length=length)
else:
self.first_level.iterload(addr, length=length)
def store(self, addr, length=1, non_temporal=False):
"""
Store one or more adresses.
:param addr: byte address of store location
:param length: All address from addr until addr+length (exclusive) are
stored (default: 1)
:param non_temporal: if True, no write-allocate will be issued, but cacheline will be zeroed
"""
if non_temporal:
raise ValueError("non_temporal stores are not yet supported")
if addr is None:
return
elif not isinstance(addr, Iterable):
self.first_level.store(addr, length=length)
else:
self.first_level.iterstore(addr, length=length)
def loadstore(self, addrs, length=1):
"""
Load and store address in order given.
:param addrs: iteratable of address tuples: [(loads, stores), ...]
:param length: will load and store all bytes between addr and
addr+length (for each address)
"""
if not isinstance(addrs, Iterable):
raise ValueError("addr must be iteratable")
self.first_level.loadstore(addrs, length=length)
def stats(self):
"""Collect all stats from all cache levels."""
for c in self.levels():
yield c.stats()
def print_stats(self, header=True, file=sys.stdout):
"""Pretty print stats table."""
if header:
print("CACHE {:*^18} {:*^18} {:*^18} {:*^18} {:*^18}".format(
"HIT", "MISS", "LOAD", "STORE", "EVICT"), file=file)
for s in self.stats():
print("{name:>5} {HIT_count:>6} ({HIT_byte:>8}B) {MISS_count:>6} ({MISS_byte:>8}B) "
"{LOAD_count:>6} ({LOAD_byte:>8}B) {STORE_count:>6} "
"({STORE_byte:>8}B) {EVICT_count:>6} ({EVICT_byte:>8}B)".format(
HIT_bytes=2342, **s),
file=file)
def levels(self, with_mem=True):
"""Return cache levels, optionally including main memory."""
p = self.first_level
while p is not None:
yield p
# FIXME bad hack to include victim caches, need a more general solution, probably
# involving recursive tree walking
if p.victims_to is not None and p.victims_to != p.load_from:
yield p.victims_to
if p.store_to is not None and p.store_to != p.load_from and p.store_to != p.victims_to:
yield p.store_to
p = p.load_from
if with_mem:
yield self.main_memory
def count_invalid_entries(self):
"""Sum of all invalid entry counts from cache levels."""
return sum([c.count_invalid_entries() for c in self.levels(with_mem=False)])
def mark_all_invalid(self):
"""Mark all entries invalid and reset stats."""
for c in self.levels(with_mem=False):
c.mark_all_invalid()
self.reset_stats()
# def draw_array(self, start, width, height, block=1):
# """Return image representation of cache states."""
# length = (width*height)//block
# canvas = Image.new("RGB", (width, height))
# # FIXME: switch to palette "P" with ImagePalette
#
# for h in range(height):
# for w in range(width):
# addr = start+h*(width*block)+w*block
#
# l1 = self.first_level
# l2 = self.first_level.parent
# l3 = self.first_level.parent.parent
# if l1.contains(addr):
# canvas.putpixel((w,h), (0,0,255))
# elif l2.contains(addr):
# canvas.putpixel((w,h), (255,0,0))
# elif l3.contains(addr):
# canvas.putpixel((w,h), (0,255,0))
# else:
# canvas.putpixel((w,h), (255,255,255))
#
# return canvas
def __repr__(self, recursion=True):
"""Return string representation of object."""
first_level_repr = self.first_level.__repr__(recursion=recursion)
main_memory_repr = self.main_memory.__repr__(recursion=recursion)
return 'CacheSimulator({}, {})'.format(first_level_repr, main_memory_repr)
def get_backend(cache):
"""Return backend of *cache* unless *cache* is None, then None is returned."""
if cache is not None:
return cache.backend
return None
class Cache(object):
"""Cache level object."""
replacement_policy_enum = {"FIFO": 0, "LRU": 1, "MRU": 2, "RR": 3}
def __init__(self, name, sets, ways, cl_size,
replacement_policy="LRU",
write_back=True,
write_allocate=True,
write_combining=False,
subblock_size=None,
load_from=None, store_to=None, victims_to=None,
swap_on_load=False):
"""Create one cache level out of given configuration.
:param sets: total number of sets, if 1 cache will be full-associative
:param ways: total number of ways, if 1 cache will be direct mapped
:param cl_size: number of bytes that can be addressed individually
:param replacement_policy: FIFO, LRU (default), MRU or RR
:param write_back: if true (default), write back will be done on evict.
Otherwise write-through is used
:param write_allocate: if true (default), a load will be issued on a
write miss
:param write_combining: if true, this cache will combine writes and
issue them on evicts(default is false)
:param subblock_size: the minimum blocksize that write-combining can
handle
:param load_from: the cache level to forward a load in case of a load
miss or write-allocate, if None, assumed to be main
memory
:param store_to: the cache level to forward a store to in case of
eviction of dirty lines, if None, assumed to be main
memory
:param victims_to: the cache level to forward any evicted lines to
(dirty or not)
:param swap_on_load: if true, lines will be swaped between this and the
higher cache level (default is false).
Currently not supported.
The total cache size is the product of sets*ways*cl_size.
Internally all addresses are converted to cacheline indices.
Instantization has to happen from last level cache to first level
cache, since each subsequent level requires a reference of the other
level.
"""
assert load_from is None or isinstance(load_from, Cache), \
"load_from needs to be None or a Cache object."
assert store_to is None or isinstance(store_to, Cache), \
"store_to needs to be None or a Cache object."
assert victims_to is None or isinstance(victims_to, Cache), \
"victims_to needs to be None or a Cache object."
assert is_power2(cl_size), \
"cl_size needs to be a power of two."
assert store_to is None or store_to.cl_size >= cl_size, \
"cl_size may only increase towards main memory."
assert load_from is None or load_from.cl_size >= cl_size, \
"cl_size may only increase towards main memory."
assert replacement_policy in self.replacement_policy_enum, \
"Unsupported replacement strategy, we only support: " + \
', '.join(self.replacement_policy_enum)
assert (write_back, write_allocate) in [(False, False), (True, True), (True, False)], \
"Unsupported write policy, we only support write-through and non-write-allocate, " \
"write-back and write-allocate, and write-back and non-write-allocate."
assert write_combining and write_back and not write_allocate or not write_combining, \
"Write combining may only be used in a cache with write-back and non-write-allocate"
assert subblock_size is None or cl_size % subblock_size == 0, \
"subblock_size needs to be a devisor of cl_size or None."
# TODO check that ways only increase from higher to lower _exclusive_ cache
# other wise swap won't be a valid procedure to ensure exclusiveness
# TODO check that cl_size has to be the same with exclusive an victim caches
self.name = name
self.replacement_policy = replacement_policy
self.replacement_policy_id = self.replacement_policy_enum[replacement_policy]
self.load_from = load_from
self.store_to = store_to
self.victims_to = victims_to
self.swap_on_load = swap_on_load
if subblock_size is None:
subblock_size = cl_size
self.backend = backend.Cache(
name=name, sets=sets, ways=ways, cl_size=cl_size,
replacement_policy_id=self.replacement_policy_id,
write_back=write_back, write_allocate=write_allocate,
write_combining=write_combining, subblock_size=subblock_size,
load_from=get_backend(load_from), store_to=get_backend(store_to),
victims_to=get_backend(victims_to),
swap_on_load=swap_on_load)
def get_cl_start(self, addr):
"""Return first address belonging to the same cacheline as *addr*."""
return addr >> self.backend.cl_bits << self.backend.cl_bits
def get_cl_end(self, addr):
"""Return last address belonging to the same cacheline as *addr*."""
return self.get_cl_start(addr) + self.backend.cl_size - 1
def set_load_from(self, load_from):
"""Update load_from in Cache and backend."""
assert load_from is None or isinstance(load_from, Cache), \
"load_from needs to be None or a Cache object."
assert load_from is None or load_from.cl_size <= self.cl_size, \
"cl_size may only increase towards main memory."
self.load_from = load_from
self.backend.load_from = load_from.backend
def set_store_to(self, store_to):
"""Update store_to in Cache and backend."""
assert store_to is None or isinstance(store_to, Cache), \
"store_to needs to be None or a Cache object."
assert store_to is None or store_to.cl_size <= self.cl_size, \
"cl_size may only increase towards main memory."
self.store_to = store_to
self.backend.store_to = store_to.backend
def set_victims_to(self, victims_to):
"""Update victims_to in Cache and backend."""
assert victims_to is None or isinstance(victims_to, Cache), \
"store_to needs to be None or a Cache object."
assert victims_to is None or victims_to.cl_size == self.cl_size, \
"cl_size may only increase towards main memory."
self.victims_to = victims_to
self.backend.victims_to = victims_to.backend
def __getattr__(self, key):
"""Return cache attribute, preferably to backend."""
if "backend" in self.__dict__:
return getattr(self.backend, key)
else:
raise AttributeError("'{}' object has no attribute '{}'".format(self.__class__, key))
def stats(self):
"""Return dictionay with all stats at this level."""
assert self.backend.LOAD_count >= 0, "LOAD_count < 0"
assert self.backend.LOAD_byte >= 0, "LOAD_byte < 0"
assert self.backend.STORE_count >= 0, "STORE_count < 0"
assert self.backend.STORE_byte >= 0, "STORE_byte < 0"
assert self.backend.HIT_count >= 0, "HIT_count < 0"
assert self.backend.HIT_byte >= 0, "HIT_byte < 0"
assert self.backend.MISS_count >= 0, "MISS_count < 0"
assert self.backend.MISS_byte >= 0, "MISS_byte < 0"
assert self.backend.EVICT_count >= 0, "EVICT_count < 0"
assert self.backend.EVICT_byte >= 0, "EVICT_byte < 0"
return {'name': self.name,
'LOAD_count': self.backend.LOAD_count,
'LOAD_byte': self.backend.LOAD_byte,
'STORE_count': self.backend.STORE_count,
'STORE_byte': self.backend.STORE_byte,
'HIT_count': self.backend.HIT_count,
'HIT_byte': self.backend.HIT_byte,
'MISS_count': self.backend.MISS_count,
'MISS_byte': self.backend.MISS_byte,
'EVICT_count': self.backend.EVICT_count,
'EVICT_byte': self.backend.EVICT_byte}
def size(self):
"""Return total cache size."""
return self.sets * self.ways * self.cl_size
def __repr__(self, recursion=False):
"""Return string representation of object."""
if recursion:
load_from_repr, store_to_repr, victims_to_repr = map(
lambda c: c.__repr__(recursion=True) if c is not None else 'None',
[self.load_from, self.store_to, self.victims_to])
else:
load_from_repr = self.load_from.name if self.load_from is not None else 'None'
store_to_repr = self.store_to.name if self.store_to is not None else 'None'
victims_to_repr = self.victims_to.name if self.victims_to is not None else 'None'
return ('Cache(name={!r}, sets={!r}, ways={!r}, cl_size={!r}, replacement_policy={!r}, '
'write_back={!r}, write_allocate={!r}, write_combining={!r}, load_from={}, '
'store_to={}, victims_to={}, swap_on_load={!r})').format(
self.name, self.sets, self.ways, self.cl_size, self.replacement_policy,
self.write_back, self.write_allocate, self.write_combining, load_from_repr,
store_to_repr, victims_to_repr, self.swap_on_load)
class MainMemory(object):
"""Main memory object. Last level of cache hierarchy, able to hit on all requests."""
def __init__(self, name=None, last_level_load=None, last_level_store=None):
"""Create one cache level out of given configuration."""
self.name = "MEM" if name is None else name
if last_level_load is not None:
self.load_to(last_level_load)
else:
self.last_level_load = None
if last_level_store is not None:
self.store_from(last_level_store)
else:
self.last_level_store = None
def reset_stats(self):
"""Dummy, no stats need to be reset in main memory."""
# since all stats in main memory are derived from the last level cache, there is nothing to
# reset
pass
def load_to(self, last_level_load):
"""Set level where to load from."""
assert isinstance(last_level_load, Cache), \
"last_level needs to be a Cache object."
assert last_level_load.load_from is None, \
"last_level_load must be a last level cache (.load_from is None)."
self.last_level_load = last_level_load
def store_from(self, last_level_store):
"""Set level where to store to."""
assert isinstance(last_level_store, Cache), \
"last_level needs to be a Cache object."
assert last_level_store.store_to is None, \
"last_level_store must be a last level cache (.store_to is None)."
self.last_level_store = last_level_store
def __getattr__(self, key):
"""Return cache attribute, preferably to backend."""
try:
return self.stats()[key]
except KeyError:
raise AttributeError
def stats(self):
"""Return dictionay with all stats at this level."""
load_count = self.last_level_load.MISS_count
load_byte = self.last_level_load.MISS_byte
if self.last_level_load.victims_to is not None:
# If there is a victim cache between last_level and memory, subtract all victim hits
load_count -= self.last_level_load.victims_to.HIT_count
load_byte -= self.last_level_load.victims_to.HIT_byte
return {'name': self.name,
'LOAD_count': load_count,
'LOAD_byte': load_byte,
'HIT_count': load_count,
'HIT_byte': load_byte,
'STORE_count': self.last_level_store.EVICT_count,
'STORE_byte': self.last_level_store.EVICT_byte,
'EVICT_count': 0,
'EVICT_byte': 0,
'MISS_count': 0,
'MISS_byte': 0}
def __repr__(self, recursion=False):
"""Return string representation of object."""
if recursion:
last_level_load_repr, last_level_store_repr = map(
lambda c: c.__repr__(recursion=True) if c is not None else 'None',
[self.last_level_load, self.last_level_store])
else:
last_level_load_repr, last_level_store_repr = map(
lambda c: c.name if c is not None else 'None',
[self.last_level_load, self.last_level_store])
return 'MainMemory(last_level_load={}, last_level_store={})'.format(
last_level_load_repr, last_level_store_repr)
class CacheVisualizer(object):
"""Visualize cache state by generation of VTK files."""
def __init__(self, cs, dims, start_address=0, element_size=8, filename_base=None):
"""
Create interface to interact with cache visualizer.
:param cs: CacheSimulator object.
:param dims: dimensions at which you wish to visualize the data
for eg. [10,15]. tells visualize a 2d array of
10 rows and 15 columns of elements having wordSize.
:param start_address: starting address of the array.
:param element_size: size of each element in bytes.
:param filename_base: base name of VTK file to be outputed for Paraview.
"""
assert isinstance(cs, CacheSimulator), \
"cs needs to be a CacheSimulator object."
ndim = len(dims)
assert ndim < 3, "Currently dump and view supported up to 3-D arrays only"
self.dims = dims
self.npts = reduce(int.__mul__, self.dims, 1)
self.cs = cs
self.startAddress = start_address
self.element_size = element_size
self.filename_base = filename_base
self.count = 0
def dump_state(self):
vtk_str = textwrap.dedent("""\
# vtk DataFile Version 4.0
CACHESIM VTK output
ASCII
DATASET STRUCTURED_POINTS
""")
# dimension string needs to be reversed and padded to 3 dimensions (using 1s)
dim_str = " ".join([str(d+1) for d in reversed((self.dims + [1, 1, 1])[:3])])
vtk_str += textwrap.dedent("""\
DIMENSIONS {}
ORIGIN 0 0 0
SPACING 1 1 1
CELL_DATA {}
FIELD DATA 1
""").format(dim_str, self.npts)
ctr = 1
data = []
for c in self.cs.levels(with_mem=False):
address = [0] * self.npts
cached_addresses = {x - self.startAddress for x in c.backend.cached}
# Filtering elements outside of scope and scaling address to element indices
cached_elements = {x // self.element_size for x in cached_addresses
if 0 <= x < self.npts * self.element_size}
for a in cached_elements:
address[a] = 1
data.append(address)
ctr += 1
total_levels = (ctr - 1)
vtk_str += "\nData_arr {} {} double\n".format(total_levels, self.npts)
for i in range(self.npts):
vtk_str += " ".join([str(d[i]) for d in data])
vtk_str += "\n"
if self.filename_base is None:
file = sys.stdout
else:
file = open("{}_{}.vtk".format(self.filename_base, self.count), 'w')
file.write(vtk_str)
file.flush()
if file != sys.stdout:
file.close()
self.count += 1
|
RRZE-HPC/pycachesim
|
cachesim/cache.py
|
Python
|
agpl-3.0
| 24,617
|
[
"ParaView",
"VTK"
] |
b6057b618035b9e683f9df501c11b1f6bf7eb4cfa3161232d4a24de87377fbaa
|
########################################################################################
## This file is a part of YAP package of scripts. https://github.com/shpakoo/YAP
## Distributed under the MIT license: http://www.opensource.org/licenses/mit-license.php
## Copyright (c) 2011-2013 Sebastian Szpakowski
########################################################################################
#################################################
## A library of experimental "steps" or program wrappers to construct pipelines
## Pipeline steps orchestration, grid management and output handling.
#################################################
import sys, tempfile, shlex, glob, os, stat, hashlib, time, datetime, re, curses
from threading import *
from subprocess import *
from MothurCommandInfoWrapper import *
from StepsLibrary import *
from collections import defaultdict
from collections import deque
from random import *
from Queue import *
##threading redefines enumerate() with no arguments. as a kludge, we drop it here
globals().pop('enumerate',None)
_author="Sebastian Szpakowski"
_date="2012/09/20"
_version="Version X"
#################################################
## Classes
##
class GroupSplit(DefaultStep):
def __init__(self, INS, ARGS, PREV):
DefaultStep.__init__(self)
self.setInputs(INS)
self.setArguments(ARGS)
self.setPrevious(PREV)
self.setName("GroupSplit")
self.start()
def performStep(self):
pattern=[]
fasta = self.find("fasta")[0]
groups = self.find("group")[0]
mapping = defaultdict(str)
fasta = "%s/%s" % (self.stepdir, fasta)
groups = "%s/%s" % (self.stepdir, groups)
for read, group in GeneralPurposeParser(groups, sep="\t"):
mapping[read] = group
samples = defaultdict(list)
for cur in set(mapping.values()):
fn= "%s/%s.fasta" % (self.stepdir, cur)
f=open(fn, 'w')
counter=0
for head, seq in FastaParser(fasta):
if mapping[head] == cur:
f.write(">%s\n%s\n" % (head, seq))
counter+=1
self.message( "%s\t%s" % (cur, counter))
f.close()
class TCOFFEE(DefaultStep):
def __init__(self, INS, ARGS, PREV):
DefaultStep.__init__(self)
self.setInputs(INS)
self.setArguments(ARGS)
self.setPrevious(PREV)
self.setName("TCOFFEE")
self.start()
def performStep(self):
pattern=[]
files = self.find("fasta")
tasks = list()
argstring = ""
for arg, val in self.arguments.items():
if arg == "pattern":
pattern = val.strip().split(",")
else:
argstring = "%s %s %s " % (argstring, arg, val)
template = "t_coffee "
for f in files:
if len(pattern)>0:
for k in pattern:
if f.find(k)>-1:
command = "%s %s %s " % (template, f, argstring)
self.message(command)
task = GridTask(template="pick", name=self.stepname, command=command, cpu=4, dependson=list(), cwd = self.stepdir)
task.wait()
else:
command = "%s %s %s " % (template, f, argstring)
self.message(command)
task = GridTask(template="pick", name=self.stepname, command=command, cpu=4, dependson=list(), cwd = self.stepdir)
task.wait()
class CLUSTALW2(DefaultStep):
def __init__(self, INS, ARGS, PREV):
DefaultStep.__init__(self)
self.setInputs(INS)
self.setArguments(ARGS)
self.setPrevious(PREV)
self.setName("CLUSTALW2")
self.start()
def performStep(self):
pattern = []
files = self.find("fasta")
tasks = list()
argstring = ""
for arg, val in self.arguments.items():
if arg == "pattern":
pattern = val.strip().split(",")
else:
argstring = "%s %s %s " % (argstring, arg, val)
template = "%sclustalw2 %s " % (binpath, argstring)
for f in files:
if len(pattern)>0:
for k in pattern:
if f.find(k)>-1:
command = "%s -INFILE=%s" % (template, f)
self.message(command)
task = GridTask(template="pick", name=self.stepname, command=command, cpu=1, dependson=list(), cwd = self.stepdir)
tasks.append(task)
else:
command = "%s -INFILE=%s" % (template, f)
self.message(command)
task = GridTask(template="pick", name=self.stepname, command=command, cpu=1, dependson=list(), cwd = self.stepdir)
tasks.append(task)
for task in tasks:
task.wait()
class Bowtie1 (DefaultStep):
def __init__(self, INS, ARGS, PREV):
DefaultStep.__init__(self)
self.setInputs(INS)
self.setArguments(ARGS)
self.setPrevious(PREV)
self.setName("BOWTIE1aligner")
self.counter=0
self.start()
def performStep(self):
tasks = list()
m1 = self.find("mate1")
m2 = self.find("mate2")
m3 = self.find("fasta")
cpus = 1
argstring = ""
for arg, val in self.arguments.items():
argstring = "%s %s %s " % (argstring, arg, val)
if arg =="-p":
cpus = val
tasks = list()
self.message("processing %s file(s)..." % (len(m1)+len(m3)))
for f in m1:
name = ".".join(f.strip().split(".")[:-1])
if "%s.mate2" % (name) in m2:
k = "bowtie %s -1 %s.mate1 -2 %s.mate2 > %s.bowtie1alignment" % (argstring, name, name, name)
#self.message(k)
task = GridTask(template="pick", name=self.stepname, command=k, cpu=cpus, dependson=list(), cwd = self.stepdir)
tasks.append(task)
else:
self.message("skipping: %s" % (name))
for f in m3:
name = ".".join(f.strip().split(".")[:-1])
k = "bowtie %s %s > %s.bowtie1alignment" % (argstring, f, name)
#self.message(k)
task = GridTask(template="pick", name=self.stepname, command=k, cpu=cpus, dependson=list(), cwd = self.stepdir)
tasks.append(task)
for task in tasks:
task.wait()
### remove spaces from header (i.e. keep first oken only)
### and make all 50 base fragments (overlapping by 25)
class TilingFasta(DefaultStep):
def __init__(self, INS, PREV):
DefaultStep.__init__(self)
self.setInputs(INS)
#self.setArguments(ARGS)
self.setPrevious(PREV)
self.setName("TilingFasta")
self.start()
def performStep(self):
tasks = list()
f = self.find("fasta")
for file in f:
input = FastaParser("%s/%s" % (self.stepdir, file))
output = open("%s/%s.tile.fasta"% (self.stepdir, file), "w")
for head, seq in input:
head = head.split()[0]
counter=1
while len(seq)>50:
tmphead = "%s:%s-%s" % (head, counter, counter+100)
tmpseq = seq[:50]
seq = seq[25:]
counter+=25
output.write(">%s\n%s\n" % (tmphead, tmpseq))
output.close()
class Bowtie2 (DefaultStep):
def __init__(self, INS, ARGS, PREV):
DefaultStep.__init__(self)
self.setInputs(INS)
self.setArguments(ARGS)
self.setPrevious(PREV)
self.setName("BOWTIE2aligner")
self.counter=0
self.start()
def performStep(self):
tasks = list()
m1 = self.find("mate1")
m2 = self.find("mate2")
m3 = self.find("fasta")
cpus = 1
argstring = ""
for arg, val in self.arguments.items():
argstring = "%s %s %s " % (argstring, arg, val)
if arg =="-p":
cpus = val
tasks = list()
jobs = len(m1)
counter=0
for f in m1:
counter+=1
name = ".".join(f.strip().split(".")[:-1])
if "%s.mate2" % (name) in m2:
k = "~/bin/bowtie2 %s -q -1 %s.mate1 -2 %s.mate2 -S %s.sam" % (argstring, name, name, name)
if counter==1:
self.message(k)
elif counter==2:
self.message("processing %s file(s)..." % (jobs))
task = GridTask(template="pick", name=self.stepname, command=k, cpu=cpus, dependson=list(), cwd = self.stepdir)
tasks.append(task)
else:
self.message("skipping: %s" % (name))
jobs = len(m3)
counter=0
for f in m3:
counter+=1
name = ".".join(f.strip().split(".")[:-1])
k = "~/bin/bowtie2 %s -f -U %s -S %s.sam" % (argstring, f, name)
if counter==1:
self.message(k)
elif counter==2:
self.message("processing %s file(s)..." % (jobs))
task = GridTask(template="pick", name=self.stepname, command=k, cpu=cpus, dependson=list(), cwd = self.stepdir)
tasks.append(task)
for task in tasks:
task.wait()
class AwkCommand(DefaultStep):
def __init__(self, INS, ARGS, PREV,):
DefaultStep.__init__(self)
self.setInputs(INS)
self.setArguments(ARGS)
self.setPrevious(PREV)
self.setName("AWK")
#self.nodeCPUs=nodeCPUs
self.start()
def performStep(self):
tasks = list()
oldtype = self.getInputValue("type")
newtype = self.getInputValue("newtype")
files = self.find(oldtype)
awk = self.getInputValue("awk")
postprocessing = ""
if self.getInputValue("sort") != None:
postprocessing = "%s | sort" % (postprocessing)
if self.getInputValue("uniq") != None:
postprocessing = "%s | uniq" % (postprocessing)
if self.getInputValue("postprocess") != None:
postprocessing = "%s | %s " % (postprocessing, self.getInputValue("postprocess"))
counter=0
for f in files:
counter+=1
newname = f[0:-len(oldtype)]
newname = "%s%s" % (newname, newtype)
k = "awk '%s' %s %s > %s" % (awk, f, postprocessing, newname)
if counter==1:
self.message(k)
elif counter==2:
self.message("processing %s file(s)..." % (len(files)))
task = GridTask(template="pick", name=self.stepname, command=k, cpu=1, cwd = self.stepdir)
tasks.append(task)
for task in tasks:
task.wait()
class ContaminantRemoval(DefaultStep):
def __init__(self, INS, ARGS, PREV):
DefaultStep.__init__(self)
self.setInputs(INS)
self.setArguments(ARGS)
self.setPrevious(PREV)
self.setName("decontaminate")
self.start()
def performStep(self):
tasks = list()
m1 = self.find("fasta")
m1.extend(self.find("mate1"))
m1.extend(self.find("mate2"))
filter = self.find("bowtie1alignment")
filter.extend(self.find("filter"))
### index a filename using the filename sans the step id (0) and extension (-1)
#filters = {".".join(key.strip().split(".")[1:-1]) : key for key in filter}
filters = dict()
for key in filter:
filters[".".join(key.strip().split(".")[1:-1])] = key
argstring = ""
for arg, val in self.arguments.items():
argstring = "%s %s %s " % (argstring, arg, val)
tasks = list()
missing = 0
# if len(filter)>1:
# self.message("too many (%s) filters..." % (len(filter)))
# self.failed = True
for file in m1:
### find appropriate filter
name = ".".join(file.strip().split(".")[1:-1])
self.message("%s -> %s" % (name, filters[name]))
if name in filters.keys():
k = "%spython %sMateFilter.py %s -i %s -f %s " % (binpath, scriptspath, argstring, file, filters[name])
if len(m1)==1:
self.message(k)
task = GridTask(template="pick", name="%s" % (self.stepname), command=k, cpu=1, cwd = self.stepdir)
tasks.append(task)
else:
missing +=1
if missing>0:
self.message("%s missing filters observed..." % (missing))
self.failed = True
for task in tasks:
task.wait()
class SingletonsFishOut(DefaultStep):
def __init__(self, INS, ARGS, PREV):
DefaultStep.__init__(self)
self.setInputs(INS)
self.setArguments(ARGS)
self.setPrevious(PREV)
self.setName("fishing")
self.start()
def performStep(self):
singletons = self.find("singletons")
assembled = self.find("assembled")
fasta = self.find("fasta")
tasks = list()
if len (singletons) !=0 and len(assembled) !=0:
singletons = singletons[0]
assembled = assembled[0]
for f in fasta:
if f.find("contigs")==-1:
k = "%spython %sMateFilter.py -i %s -k %s -t fasta -s singletons " % (binpath, scriptspath, f, singletons )
self.message(k)
task = GridTask(template="pick", name="%s" % (self.stepname), command=k, cpu=1, cwd = self.stepdir)
task.wait()
k = "%spython %sMateFilter.py -i %s -k %s -t fasta -s assembled " % (binpath, scriptspath, f, assembled )
self.message(k)
task = GridTask(template="pick", name="%s" % (self.stepname), command=k, cpu=1, cwd = self.stepdir)
task.wait()
class fastx_quality_stats(DefaultStep):
def __init__(self, INS, ARGS, PREV):
DefaultStep.__init__(self)
self.setInputs(INS)
self.setArguments(ARGS)
self.setPrevious(PREV)
self.setName("fastx_qstats")
self.start()
def performStep(self):
tasks = list()
m1 = self.find("mate1")
m1.extend(self.find("mate2"))
argstring = ""
for arg, val in self.arguments.items():
argstring = "%s %s %s " % (argstring, arg, val)
self.message(m1)
for file in m1:
k = "fastx_quality_stats %s -i %s -o %s.fastx_stats" % (argstring, file, file)
self.message(k)
task = GridTask(template="pick", name="%s" % (self.stepname), command=k, cpu=1, cwd = self.stepdir)
tasks.append(task)
for task in tasks:
task.wait()
class fastq_quality_filter(DefaultStep):
def __init__(self, INS, ARGS, PREV):
DefaultStep.__init__(self)
self.setInputs(INS)
self.setArguments(ARGS)
self.setPrevious(PREV)
self.setName("fastq_qfilter")
self.start()
def performStep(self):
tasks = list()
m1 = self.find("mate1")
m1.extend(self.find("mate2"))
argstring = ""
for arg, val in self.arguments.items():
argstring = "%s %s %s " % (argstring, arg, val)
tasks = list()
self.message("processing %s files..." % len(m1))
for file in m1:
suffix = file.split(".")[-1]
prefix = ".".join(file.split(".")[:-1])
k = "fastq_quality_filter %s -i %s -o %s.q.%s" % (argstring, file, prefix, suffix)
if len(m1)<10:
self.message(k)
task = GridTask(template="pick", name="%s" % (self.stepname), command=k, cpu=1, cwd = self.stepdir)
tasks.append(task)
for task in tasks:
task.wait()
class fastq2fasta(DefaultStep):
def __init__(self, INS, ARGS, PREV):
DefaultStep.__init__(self)
self.setInputs(INS)
self.setArguments(ARGS)
self.setPrevious(PREV)
self.setName("fastq2fasta")
self.start()
def performStep(self):
tasks = list()
m1 = self.find("fastq")
if len(m1)==0:
m1.extend(self.find("mate1"))
m1.extend(self.find("mate2"))
argstring = ""
for arg, val in self.arguments.items():
argstring = "%s %s %s " % (argstring, arg, val)
tasks = list()
self.message("processing %s files..." % len(m1))
for file in m1:
suffix = file.split(".")[-1]
prefix = ".".join(file.split(".")[:-1])
if suffix=="fastq":
k = "%sfastq_to_fasta %s -i %s -o %s.fasta" % (binpath, argstring, file, prefix)
else:
k = "%sfastq_to_fasta %s -i %s -o %s.fasta.%s" % (binpath, argstring, file, prefix, suffix)
if len(m1)<10:
self.message(k)
task = GridTask(template="pick", name="%s" % (self.stepname), command=k, cpu=1, cwd = self.stepdir, debug=True)
tasks.append(task)
for task in tasks:
task.wait()
class mateInterweave(DefaultStep):
def __init__(self, INS, ARGS, PREV):
DefaultStep.__init__(self)
self.setInputs(INS)
self.setArguments(ARGS)
self.setPrevious(PREV)
self.setName("mateInterweave")
self.start()
def performStep(self):
tasks = list()
m1 = self.find("mate1")
m2 = self.find("mate2")
tasks = list()
self.message("processing %s/%s files..." % (len(m1), len(m2)))
for f in m1:
f = ".".join(f.strip().split(".")[:-1])
if "%s.mate2" %( f) in m2:
argstring = ""
for arg, val in self.arguments.items():
argstring = "%s %s %s " % (argstring, arg, val)
argstring = "%s -f %s.mate1,%s.mate2" % (argstring, f, f)
# if len(cluster)==2:
# argstring = "%s -c %s " % (argstring, ",".join(cluster))
#
k = "%spython %sinterweaveMates.py %s" % (binpath, scriptspath, argstring)
if len(m1)<10:
self.message(k)
task = GridTask(template="pick", name="%s" % (self.stepname), command=k, cpu=1, cwd = self.stepdir)
tasks.append(task)
for task in tasks:
task.wait()
class MateMerge(DefaultStep):
def __init__(self, INS, ARGS, PREV, prefix="files"):
DefaultStep.__init__(self)
self.setInputs(INS)
self.setArguments(ARGS)
self.setPrevious(PREV)
self.setName("MATE_cat")
#self.nodeCPUs=nodeCPUs
self.prefix = prefix
self.start()
def performStep(self):
m1 = self.find("mate1")
m2 = self.find("mate2")
k = "cat *.fasta.mate1 *.fasta.mate2 > %s.mates.fasta" % (self.prefix)
self.message(k)
task = GridTask(template="pick", name="cat", command=k, cpu=1, cwd = self.stepdir)
task.wait()
class CLC_Assemble(DefaultStep):
def __init__(self, INS, ARGS, PREV):
DefaultStep.__init__(self)
self.setInputs(INS)
self.setArguments(ARGS)
self.setPrevious(PREV)
self.setName("CLC_Assemble")
self.start()
def performStep(self):
tasks = list()
x= self.find("properties")
m1 = self.find("fasta")
cpus=1
template="pick"
if len(x)!=1 or len(m1)!=1:
self.failed=True
else:
m1 = m1[0]
prefix = ".".join(m1.split(".")[:-1])
argstring=""
for arg, val in self.arguments.items():
argstring = "%s %s %s " % (argstring, arg, val)
if arg=="--cpus":
cpus=val
done = False
while not done:
k = "/usr/local/packages/clc-ngs-cell/clc_novo_assemble -o %s.contigs.fasta %s -q %s" % (prefix, argstring, m1)
self.message(k)
task = GridTask(template="pick", name="%s" % (self.stepname), command=k, cpu=cpus, cwd = self.stepdir, debug=True)
task.wait()
for file in glob.glob("%s/*.e*" % (self.stepdir)):
#self.message(file)
contents = "\n".join(loadLines("%s" % (file)))
if contents.find("No more available licenses")>-1:
self.message("No more available licenses, retrying in a bit...")
command = "rm %s" % (file)
p = Popen(shlex.split(command), close_fds=True)
p.wait()
time.sleep(60)
else:
done = True
class CLC_Assemble_Ref(DefaultStep):
def __init__(self, INS, ARGS, PREV):
DefaultStep.__init__(self)
self.setInputs(INS)
self.setArguments(ARGS)
self.setPrevious(PREV)
self.setName("CLC_Assemble_Ref")
self.start()
def performStep(self):
tasks = list()
x= self.find("properties")
cpus=1
template="pick"
if len(x)!=1:
self.failed=True
else:
fastas = self.find("fasta")
self.message(fastas)
contigs = list()
reads = list()
if len(fastas)==2:
for f in fastas:
if f.find("contig")>-1:
contigs.append(f)
else:
reads.append(f)
if len(contigs)==1 and len(reads)==1:
contigs = contigs[0]
reads =reads[0]
prefix = ".".join(reads.split(".")[:-1])
argstring=""
for arg, val in self.arguments.items():
argstring = "%s %s %s " % (argstring, arg, val)
if arg=="--cpus":
cpus=val
### clean reads
done = False
while not done:
k = "/usr/local/packages/clc-ngs-cell/clc_ref_assemble_long %s -o %s.clean.cas -q %s -d %s" % (argstring, prefix, reads, contigs)
self.message(k)
task = GridTask(template="pick", name="%s" % (self.stepname), command=k, cpu=cpus, cwd = self.stepdir, debug=True)
task.wait()
for file in glob.glob("%s/*.e*" % (self.stepdir)):
#self.message(file)
contents = "\n".join(loadLines("%s" % (file)))
if contents.find("No more available licenses")>-1:
self.message("No more available licenses, retrying in a bit...")
command = "rm %s" % (file)
p = Popen(shlex.split(command), close_fds=True)
p.wait()
time.sleep(60)
else:
done = True
else:
self.failed=True
else:
self.failed=True
class CLC_Assemble_Info(DefaultStep):
def __init__(self, ARGS, PREV):
DefaultStep.__init__(self)
#self.setInputs(INS)
self.setArguments(ARGS)
self.setPrevious(PREV)
self.setName("CLC_Assemble_Info")
#self.nodeCPUs=nodeCPUs
self.start()
def performStep(self):
for f in self.find("cas"):
k = "/usr/local/packages/clc-ngs-cell/assembly_info %s > %s.clcassemblystats" % (f,f)
self.message(k)
task = GridTask(template="pick", name=self.stepname, command=k, cwd = self.stepdir)
task.wait()
class ContigCoverageUpdate(DefaultStep):
def __init__(self, ARGS, PREV):
DefaultStep.__init__(self)
#self.setInputs(INS)
self.setArguments(ARGS)
self.setPrevious(PREV)
self.setName("ContigCovUp")
#self.nodeCPUs=nodeCPUs
self.start()
def performStep(self):
table = self.find("clcassemblystats")
fasta = self.find("fasta")
tasks = list()
for f in fasta:
#self.message(f)
if f.find("contigs")>-1:
for t in table:
if ".".join(f.split(".")[1:-3]) in t:
k = "%spython %sContigCoverageUpdate.py %s %s" % (binpath, scriptspath, t, f)
self.message(k)
task = GridTask(template="pick", name=self.stepname, command=k, cwd = self.stepdir)
tasks.append(task)
for t in tasks:
t.wait()
#self.failed=True
class ORFCoverage(DefaultStep):
def __init__(self, INS, ARGS, PREV):
DefaultStep.__init__(self)
self.setInputs(INS)
self.setArguments(ARGS)
self.setPrevious(PREV)
self.setName("ORFCoverage")
#self.nodeCPUs=nodeCPUs
self.start()
def performStep(self):
tasks = list()
table = self.find("clcassemblytable")[0]
fastas = self.find("fasta")
fasta= ""
for f in fastas:
if f.find("orf")>-1:
fasta = f
id = fasta.strip().split(".")[1]
k = "%spython %sORFCoverage.py -o %s -a %s -e %s.orfs.weight" % (binpath, scriptspath, fasta, table, id)
self.message(k)
task = GridTask(template="pick", name=self.stepname, command=k, cwd = self.stepdir)
tasks.append(task)
for t in tasks:
t.wait()
class ORFCoverageNorm(DefaultStep):
def __init__(self, INS, ARGS, PREV):
DefaultStep.__init__(self)
self.setInputs(INS)
self.setArguments(ARGS)
self.setPrevious(PREV)
self.setName("ORFCoverageNorm")
#self.nodeCPUs=nodeCPUs
self.start()
def performStep(self):
tasks = list()
weight = self.find("weight")
k = "%sR CMD BATCH %sORFweights.r" % (binpath, scriptspath)
self.message(k)
task = GridTask(template="pick", name=self.stepname, command=k, cwd = self.stepdir)
tasks.append(task)
for t in tasks:
t.wait()
class PROKModify(DefaultStep):
def __init__(self, INS, ARGS, PREV):
DefaultStep.__init__(self)
self.setInputs(INS)
self.setArguments(ARGS)
self.setPrevious(PREV)
self.setName("ProkModify")
#self.nodeCPUs=nodeCPUs
self.start()
def performStep(self):
tasks = list()
w = self.find("normweight")[0]
p = self.find("txt")[0]
id = p.strip().split(".")[1]
k = "%spython %sORFCoverageModifyProk.py -p %s -w %s -o %s.jcvinorm" % (binpath, scriptspath, p, w, id)
self.message(k)
task = GridTask(template="pick", name=self.stepname, command=k, cwd = self.stepdir)
tasks.append(task)
for t in tasks:
t.wait()
class CLC_Assemble_Table(DefaultStep):
def __init__(self, ARGS, PREV):
DefaultStep.__init__(self)
#self.setInputs(INS)
self.setArguments(ARGS)
self.setPrevious(PREV)
self.setName("CLC_Assemble_Table")
#self.nodeCPUs=nodeCPUs
self.start()
def performStep(self):
x = self.find ("fasta")
tasks = list()
for f in self.find("cas"):
k = "/usr/local/packages/clc-ngs-cell/assembly_table -n -p -s %s > %s.clcassemblytable" % (f,f)
self.message(k)
task = GridTask(template="pick", name=self.stepname, command=k, cwd = self.stepdir)
task.wait()
k = "awk ' $5 == -1 && $4 == -1 {print $2}' %s.clcassemblytable > %s.singletons" % (f,f)
self.message(k)
task = GridTask(template="pick", name=self.stepname, command=k, cwd = self.stepdir)
tasks.append(task)
k = "awk ' $5 > -1 && $4 > -1 {print $2}' %s.clcassemblytable > %s.assembled" % (f,f)
self.message(k)
task = GridTask(template="pick", name=self.stepname, command=k, cwd = self.stepdir)
tasks.append(task)
for task in tasks:
task.wait()
class FastaSummaryRPlots(DefaultStep):
def __init__(self, ARGS, PREV):
DefaultStep.__init__(self)
#self.setInputs(INS)
self.setArguments(ARGS)
self.setPrevious(PREV)
self.setName("FastaSummary")
#self.nodeCPUs=nodeCPUs
self.start()
def performStep(self):
self.find("fasta")
k = "%sR CMD BATCH %sStatFastaFiles.R" % (binpath, scriptspath)
self.message(k)
task = GridTask(template="pick", name=self.stepname, command=k, cwd = self.stepdir)
task.wait()
class ClearcutTree(DefaultStep):
def __init__(self, ARGS, PREV):
DefaultStep.__init__(self)
#self.setInputs(INS)
self.setArguments(ARGS)
self.setPrevious(PREV)
self.setName("ClearcutTree")
#self.nodeCPUs=nodeCPUs
self.start()
def performStep(self):
x = self.find("fasta")
for fasta in x:
k = "%sclearcut --alignment --DNA --in=%s --out=%s.tre" % (binpath, fasta, fasta)
self.message(k)
task = GridTask(template="himem.q", name=self.stepname, command=k, cwd = self.stepdir)
task.wait()
class SQA(DefaultStep):
def __init__(self, ARGS, PREV):
DefaultStep.__init__(self)
#self.setInputs(INS)
self.setArguments(ARGS)
self.setPrevious(PREV)
self.setName("SolexaQA")
#self.nodeCPUs=nodeCPUs
self.start()
def performStep(self):
files= list()
for type in ("mate1", "mate2", "fastq"):
tmp = self.find(type)
if tmp!=None:
files.extend(tmp)
argstring = ""
for arg, val in self.arguments.items():
argstring = "%s %s %s " % (argstring, arg, val)
tasks=list()
for f in files:
k = "perl %sSolexaQA.pl %s %s" % (sqapath, f, argstring)
self.message(k)
task = GridTask(template="pick", name=self.stepname, command=k, cwd = self.stepdir)
tasks.append(task)
for task in tasks:
task.wait()
class SQAtrim(DefaultStep):
def __init__(self, ARGS, PREV):
DefaultStep.__init__(self)
#self.setInputs(INS)
self.setArguments(ARGS)
self.setPrevious(PREV)
self.setName("DynamicTrim")
#self.nodeCPUs=nodeCPUs
self.start()
def performStep(self):
files= list()
for type in ("mate1", "mate2", "fastq"):
tmp = self.find(type)
if tmp!=None:
files.extend(tmp)
argstring = ""
for arg, val in self.arguments.items():
argstring = "%s %s %s " % (argstring, arg, val)
jobs = len(files)
counter=0
tasks=list()
for f in files:
counter+=1
tokens = f.strip().split(".")
newf = "%s.trimmed.%s" % ( ".".join(tokens[:-1]), tokens[-1])
k = "perl %sDynamicTrim.pl %s %s; mv %s.trimmed %s" % (sqapath, f, argstring, f, newf)
if counter==1:
self.message(k)
elif counter==2:
self.message("processing %s files..." % (jobs))
task = GridTask(template="pick", name=self.stepname, command=k, cwd = self.stepdir)
tasks.append(task)
for task in tasks:
task.wait()
class SQAlenfil(DefaultStep):
def __init__(self, ARGS, PREV):
DefaultStep.__init__(self)
#self.setInputs(INS)
self.setArguments(ARGS)
self.setPrevious(PREV)
self.setName("LengthSort")
#self.nodeCPUs=nodeCPUs
self.start()
def performStep(self):
M1 = list()
M2 = list()
pairedindex = defaultdict(list)
singlefiles = list()
for tp in ("mate1", "mate2"):
tmp = self.find(tp)
if tmp!=None:
if tp.endswith("1"):
M1.extend(tmp)
elif tp.endswith("2"):
M2.extend(tmp)
for f1 in M1:
core1 = f1.strip().split(".")[1:-1]
for f2 in M2:
core2 = f2.strip().split(".")[1:-1]
if core1==core2:
pairedindex[f1].append(f2)
for type in ("fastq"):
tmp = self.find(type)
if tmp!=None:
singlefiles.extend(tmp)
argstring = ""
for arg, val in self.arguments.items():
argstring = "%s %s %s " % (argstring, arg, val)
tasks=list()
for f1, f2s in pairedindex.items():
if len(f2s)!=1:
singlefiles.append(f1)
singlefiles.extend(f2s)
else:
newf1 = "%s.len.mate1" % (".".join(f1.strip().split(".")[:-1]))
newf2 = "%s.len.mate2" % (".".join(f2s[0].strip().split(".")[:-1]))
k = "perl %sLengthSort.pl %s %s %s; mv %s.paired1 %s; mv %s.paired2 %s" % (sqapath, f1, f2s[0], argstring, f1, newf1, f1, newf2)
#k = "perl %sLengthSort.pl %s %s %s" % (sqapath, f1, f2s[0], argstring)
self.message(k)
task = GridTask(template="pick", name=self.stepname, command=k, cwd = self.stepdir)
tasks.append(task)
for f in singlefiles:
newf = "%s.len.fastq" % (".".join(f.strip().split(".")[:-1]))
k = "perl %sLengthSort.pl %s %s; mv %s.single %s" % (sqapath, f, argstring, f, newd)
self.message(k)
task = GridTask(template="pick", name=self.stepname, command=k, cwd = self.stepdir)
tasks.append(task)
for task in tasks:
task.wait()
class GuessFastQEncoding(DefaultStep):
def __init__(self, ARGS, PREV):
DefaultStep.__init__(self)
#self.setInputs(INS)
self.setArguments(ARGS)
self.setPrevious(PREV)
self.setName("FastQEnc")
#self.nodeCPUs=nodeCPUs
self.start()
def performStep(self):
x = self.find("mate1")
k = "%spython %sFastQEncoding.py %s > %s.offset" % (binpath, scriptspath, x[0], x[0])
self.message(k)
task = GridTask(template="pick", name=self.stepname, command=k, cwd = self.stepdir)
task.wait()
otpt = ""
for line in loadLines("%s/%s.offset" % (self.stepdir, x[0])):
otpt = "%s%s" % (otpt, line.strip())
self.message("%s -> %s" % (x[0], otpt))
self.setOutputValue("-Q", otpt)
class MascotReportLifter(DefaultStep):
def __init__(self, ARGS, PREV):
DefaultStep.__init__(self)
#self.setInputs(INS)
self.setArguments(ARGS)
self.setPrevious(PREV)
self.setName("MascotLifter")
#self.nodeCPUs=nodeCPUs
self.start()
def performStep(self):
one_at_a_time.acquire()
try:
file = self.getInputValue("file")
id = self.getInputValue("id")
script = os.environ["YAP_MASCOT_AUTOMATION_JS"] #"/usr/local/projects/GATES/jshankar/YAPCOPY/sszpakow/ANNOTATION/MascotAutomaton.js"
self.message("caching and reporting on %s in file %s" % (id, file))
#"/usr/local/projects/GATES/jshankar/YAPCOPY/sszpakow/YAP/bin/phantomjs
k = "%s %s %s %s" (os.environ["YAP_MASCOT_AUTOMATION_PHANTOM_JS"],\
script, file, id)
self.message(k)
#task = GridTask(template="default", name=self.stepname, command=k, cwd = self.stepdir, debug=True)
#task.wait()
p = Popen(shlex.split(k), stdout = PIPE, stderr = PIPE, close_fds=True, cwd=self.stepdir)
out, err = p.communicate()
with open("%s/%s_%s.out.log" % (self.stepdir,\
file.strip().split("/")[-1], id ), "w") as f:
f.write(out)
f.write("\n")
with open("%s/%s_%s.err.log" % (self.stepdir,\
file.strip().split("/")[-1], id ), "w") as f:
f.write(err)
f.write("\n")
if err.find("'waitFor()' timeout")>-1 or out.find("'waitFor()' timeout"):
self.message("Time out detected! %s - %s" % (id, file) )
#self.failed=True
finally:
one_at_a_time.release()
class SED_replace(DefaultStep):
def __init__(self, INS, ARGS, PREV,):
DefaultStep.__init__(self)
self.setInputs(INS)
self.setArguments(ARGS)
self.setPrevious(PREV)
self.setName("SED_replace")
#self.nodeCPUs=nodeCPUs
self.start()
def performStep(self):
tasks = list()
for t in self.getInputValue("types").strip().split(","):
files = self.find(t)
old = self.getInputValue("old")
new = self.getInputValue("new")
for f in files:
newname = f[0:-len(t)]
newname = "%str.%s" % (newname, t)
k = "sed 's/%s/%s/g' %s > %s" % (old, new, f, newname)
self.message(k)
task = GridTask(template="pick", name=self.stepname, command=k, cpu=1, cwd = self.stepdir)
tasks.append(task)
for task in tasks:
task.wait()
class FileMiniImport(DefaultStep):
def __init__(self, INS, ARGS):
DefaultStep.__init__(self)
self.setInputs(INS)
self.setArguments(ARGS)
#self.setPrevious(PREV)
self.setName("FILE_mini_input")
self.start()
def performStep(self):
lines = self.getInputValue("lines")
if lines == None:
lines = 100000
for type in self.inputs.keys():
files = self.inputs[type]
for file in files:
pool_open_files.acquire()
file = file.split("~")
if len(file)>1:
file, newname = file
tmp = file.strip().split("/")[-1]
k = "head -n %s %s" % (lines, file)
outname = "%s.%s" % (newname, type)
else:
file = file[0]
tmp = file.strip().split("/")[-1]
k ="head -n %s %s " % (lines, file, tmp)
outname = "imported.%s" % (tmp)
p = Popen(shlex.split(k), stdout=PIPE, stderr=PIPE, cwd=self.stepdir, close_fds=True)
self.message(k)
out,err = p.communicate()
p.wait()
o = open("%s/%s" % (self.stepdir, outname), "w")
o.write(out)
o.close()
pool_open_files.release()
class FileSplit(DefaultStep):
def __init__(self, ARGS, PREV):
DefaultStep.__init__(self)
#ARGS = {"types": TYPES}
#self.setInputs(INS)
self.setArguments(ARGS)
self.setPrevious(PREV)
self.setName("FILE_split")
#self.nodeCPUs=nodeCPUs
self.chunk = 0
self.start()
def performStep(self):
tasks = list()
self.chunk = self.getInputValue("chunk")
for t in self.getInputValue("types").strip().split(","):
files = self.find(t)
for f in files:
k = "split -a 5 -l %s %s %s.split. " % (self.chunk, f, f)
self.message(k)
task = GridTask(template="pick", name=self.stepname, command=k, cpu=1, cwd = self.stepdir)
tasks.append(task)
for task in tasks:
task.wait()
### rename the files
for file in glob.glob("%s/*.split.*" % (self.stepdir)):
newfile = file.strip().split(".")
suffix = newfile[-3:]
suffix.reverse()
newfile = "%s.%s" % ( ".".join(newfile[:-3]), ".".join(suffix))
#self.message("%s -> %s" % (file, newfile) )
command = "mv %s %s" % (file, newfile)
p = Popen(shlex.split(command), close_fds=True)
p.wait()
class FastaSplit(DefaultStep):
def __init__(self, ARGS, PREV):
DefaultStep.__init__(self)
self.setArguments(ARGS)
self.setPrevious(PREV)
self.setName("FASTA_split")
self.start()
def performStep(self):
tasks = list()
chunk = self.getInputValue("chunk")
for t in self.getInputValue("types").strip().split(","):
files = self.find(t)
for f in files:
k = "{0}python ~/scripts/python/FastaSplitter.py -f {1} -c {2}".format(binpath, f, chunk)
self.message(k)
task = GridTask(template="pick", name=self.stepname, command=k, cpu=1, cwd = self.stepdir)
tasks.append(task)
for task in tasks:
task.wait()
class FastaSort(DefaultStep):
def __init__(self, ARGS, PREV):
DefaultStep.__init__(self)
self.setArguments(ARGS)
self.setPrevious(PREV)
self.setName("FASTA_sort")
self.start()
def performStep(self):
tasks = list()
for t in self.getInputValue("types").strip().split(","):
files = self.find(t)
for f in files:
k = "{0}python ~/scripts/python/FastaSort.py {1}".format(binpath, f)
self.message(k)
task = GridTask(template="pick", name=self.stepname, command=k, cpu=1, cwd = self.stepdir)
tasks.append(task)
for task in tasks:
task.wait()
class BLAST(DefaultStep):
def __init__(self, ARGS, PREV):
DefaultStep.__init__(self)
self.setArguments(ARGS)
self.setPrevious(PREV)
self.setName("BLAST")
self.start()
def performStep(self):
tasks = list()
cpus = 0
arguments = ""
mode = "blastn"
for arg, val in self.arguments.items():
if arg == "mode":
mode = val;
else:
if arg == "-num_threads":
cpus = val
arguments = "{0} {1} {2} ".format(arguments, arg, val)
if cpus ==0:
cpus = 4
arguments = "{0} -num_threads 4 ".format(arguments)
for f in self.find("fasta"):
k = "/usr/local/packages/ncbi-blast+/bin/{0} {1} -query {2} -outfmt \"6 qseqid sseqid pident length mismatch gapopen qstart qend sstart send evalue bitscore\" -out {2}.blast6".format(mode, arguments, f)
self.message(k)
task = GridTask(template="pick", name=self.stepname, command=k, cpu=cpus, cwd = self.stepdir, debug=True)
tasks.append(task)
for task in tasks:
task.wait()
#self.failed = True
class FileTypeTrim(DefaultStep):
def __init__(self, ARGS, PREV):
DefaultStep.__init__(self)
#self.setInputs(INS)
self.setArguments(ARGS)
self.setPrevious(PREV)
self.setName("FILE_typetrim")
#self.nodeCPUs=nodeCPUs
self.start()
def performStep(self):
tasks = list()
for input in self.arguments.keys():
files = self.find(input)
for file in files:
outname = "%s" % (file[:-len(input)])
outname = outname.strip(".")
k = "cp %s %s" % (file, outname)
self.message(k)
task = GridTask(template="pick", name=self.stepname, command=k, cpu=1, cwd = self.stepdir)
tasks.append(task)
for task in tasks:
task.wait()
class Flash (DefaultStep):
def __init__(self, INS, ARGS, PREV):
DefaultStep.__init__(self)
self.setInputs(INS)
self.setArguments(ARGS)
self.setPrevious(PREV)
self.setName("Flash")
self.counter=0
self.start()
def performStep(self):
tasks = list()
m1 = self.find("mate1")
m2 = self.find("mate2")
argstring = ""
for arg, val in self.arguments.items():
argstring = "%s %s %s " % (argstring, arg, val)
tasks = list()
jobs = len(m1)
counter=0
for f in m1:
counter+=1
name = ".".join(f.strip().split(".")[:-1])
if "%s.mate2" % (name) in m2:
k = "%sflash %s.mate1 %s.mate2 %s -o %s; mv %s.notCombined_1.fastq %s.notC.mate1; mv %s.notCombined_2.fastq %s.notC.mate2" % (binpath, name, name, argstring, name, name, name, name, name)
if counter==1:
self.message(k)
elif counter==2:
self.message("processing %s file(s)..." % (jobs))
task = GridTask(template="pick", name=self.stepname, command=k, cpu=1, dependson=list(), cwd = self.stepdir)
tasks.append(task)
else:
self.message("skipping: %s" % (name))
for task in tasks:
task.wait()
class PrimerClipper(DefaultStep):
def __init__(self, INS, ARGS, PREV):
DefaultStep.__init__(self)
self.setInputs(INS)
self.setArguments(ARGS)
self.setPrevious(PREV)
self.setName("PrimerClipper")
self.start()
def performStep(self):
tasks = list()
m1 = self.find("fasta")
argstring = ""
for arg, val in self.arguments.items():
argstring = "%s %s %s " % (argstring, arg, val)
tasks = list()
self.message("processing %s files..." % len(m1))
for file in m1:
suffix = file.split(".")[-1]
prefix = ".".join(file.split(".")[:-1])
k = "%spython %sPrimerClipper.py %s -i %s" % (binpath, scriptspath, argstring, file)
if len(m1)<10:
self.message(k)
task = GridTask(template="pick", name="%s" % (self.stepname), command=k, cpu=1, cwd = self.stepdir, debug=True)
tasks.append(task)
for task in tasks:
task.wait()
if len(m1)==0:
self.message("No files for clipping...")
class FastaHeadHash(DefaultStep):
def __init__(self, INS, ARGS, PREV):
DefaultStep.__init__(self)
self.setInputs(INS)
self.setArguments(ARGS)
self.setPrevious(PREV)
self.setName("FastaHeadHash")
self.start()
def performStep(self):
tasks = list()
m1 = self.find("fasta")
prefix = self.arguments.pop("--prefix","")
id_gen = self.arguments.pop("--id-gen","iid")
if id_gen == "iid":
assert prefix
argstring = ""
for arg, val in self.arguments.items():
argstring = "%s %s %s " % (argstring, arg, val)
tasks = list()
self.message("processing %s files..." % len(m1))
for (i_file,file) in enumerate(m1):
if id_gen == "iid" and len(m1) > 1:
this_prefix = "{}x{}".format(prefix,i_file)
else:
this_prefix = prefix
k = "%spython %sFastaUniversalRenamer.py %s --fasta %s --prefix '%s' --id-gen '%s'" % \
(binpath, scriptspath, argstring, file, this_prefix, id_gen)
self.message(k)
task = GridTask(template="pick", name="%s" % (self.stepname), command=k, cpu=1, cwd = self.stepdir)
tasks.append(task)
for task in tasks:
task.wait()
class OtuTable(DefaultStep):
def __init__(self, INS, ARGS, PREV):
DefaultStep.__init__(self)
self.setInputs(INS)
self.setArguments(ARGS)
self.setPrevious(PREV)
self.setName("OtuTable")
self.start()
def performStep(self):
list = self.find("list")[0]
group = self.find("group")[0]
k = "%spython %sOTUtableMaker.py -l %s -g %s " % (binpath, scriptspath, list, group )
self.message(k)
task = GridTask(template="pick", name="%s" % (self.stepname), command=k, cpu=1, cwd = self.stepdir)
task.wait()
#################################################
## FUNCTIONS
#################################################
def getQ(file):
k = "%spython %sFastQEncoding.py %s" % (binpath, scriptspath, file)
p = Popen(shlex.split(k), stdout=PIPE, stderr=PIPE, close_fds=True)
out,err = p.communicate()
p.wait()
return "%s" % (out.strip())
#################################################
## ARGS
#################################################
one_at_a_time = BoundedSemaphore(value=2, verbose=False)
sqapath = os.path.join(os.environ["YAP_DEPS"],"solexaQA-current/")
#################################################
## Finish
#################################################
|
andreyto/YAP
|
StepsLibrary_EXP.py
|
Python
|
mit
| 52,486
|
[
"BLAST",
"Bowtie"
] |
b769e52549a2af6f2fc04b36f783c3027bc8403499cf0568d232097e372846dc
|
#
# @BEGIN LICENSE
#
# Psi4: an open-source quantum chemistry software package
#
# Copyright (c) 2007-2016 The Psi4 Developers.
#
# The copyrights for code used from other parties are included in
# the corresponding files.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @END LICENSE
#
"""
Module to provide lightweight definitions of functionals and
SuperFunctionals
"""
import re
import os
import math
from psi4 import core
from psi4.driver.qcdb import interface_dftd3 as dftd3
## ==> Functionals <== ##
def build_s_x_functional(name):
# Call this first
fun = core.Functional.build_base('S_X')
# => User-Customization <= #
# No spaces, keep it short and according to convention
fun.set_name('S_X')
# Tab in, trailing newlines
fun.set_description(' Slater LSDA Exchange\n')
# Tab in, trailing newlines
fun.set_citation(' J.C. Slater, Phys. Rev., 81(3):385-390, 1951\n')
# These should be set by build_base, but prove that you know what's up
fun.set_gga(False)
fun.set_meta(False)
fun.set_alpha(1.0)
fun.set_omega(0.0)
# Custom parameters
# => End User-Customization <= #
return fun
def build_b88_x_functional(name):
# Call this first
fun = core.Functional.build_base('B88_X')
# => User-Customization <= #
# No spaces, keep it short and according to convention
fun.set_name('B88_X')
# Tab in, trailing newlines
fun.set_description(' Becke88 GGA Exchange\n')
# Tab in, trailing newlines
fun.set_citation(' A.D. Becke, Phys. Rev. A, 38(6):3098-3100, 1988\n')
# These should be set by build_base, but prove that you know what's up
fun.set_gga(True)
fun.set_meta(False)
fun.set_alpha(1.0)
fun.set_omega(0.0)
# Custom parameters
fun.set_parameter('B88_d', 0.0042)
fun.set_parameter('B88_a', 1.0000)
# => End User-Customization <= #
return fun
def build_b86b_x_functional(name):
# Call this first
fun = core.Functional.build_base('B86B_X')
# => User-Customization <= #
# No spaces, keep it short and according to convention
fun.set_name('B86B_X')
# Tab in, trailing newlines
fun.set_description(' Becke86B GGA Exchange\n')
# Tab in, trailing newlines
fun.set_citation(' A. D. Becke, J. Chem. Phys. 85:7184, 1986.\n')
# These should be set by build_base, but prove that you know what's up
fun.set_gga(True)
fun.set_meta(False)
fun.set_alpha(1.0)
fun.set_omega(0.0)
# => End User-Customization <= #
return fun
def build_pw86_x_functional(name):
# Call this first
fun = core.Functional.build_base('PW86_X')
# => User-Customization <= #
# No spaces, keep it short and according to convention
fun.set_name('PW86_X')
# Tab in, trailing newlines
fun.set_description(' Perdew-Wang 1986 (PW86) GGA Exchange\n')
# Tab in, trailing newlines
fun.set_citation(' J. P. Perdew and W. Yue, Phys. Rev. B 33:8800(R), 1986.\n')
# These should be set by build_base, but prove that you know what's up
fun.set_gga(True)
fun.set_meta(False)
fun.set_alpha(1.0)
fun.set_omega(0.0)
# => End User-Customization <= #
return fun
def build_b3_x_functional(name):
# Call this first
fun = core.Functional.build_base('B88_X')
# => User-Customization <= #
# No spaces, keep it short and according to convention
fun.set_name('B3_X')
# Tab in, trailing newlines
fun.set_description(' Becke88 GGA Exchange (B3LYP weighting)\n')
# Tab in, trailing newlines
fun.set_citation(' P.J. Stephens et. al., J. Phys. Chem., 98, 11623-11627, 1994\n')
# These should be set by build_base, but prove that you know what's up
fun.set_gga(True)
fun.set_meta(False)
fun.set_alpha(0.8)
fun.set_omega(0.0)
# Custom parameters
fun.set_parameter('B88_d', 0.0042)
fun.set_parameter('B88_a', 0.9000)
# => End User-Customization <= #
return fun
def build_pbe_x_functional(name):
# Call this first
fun = core.Functional.build_base('PBE_X')
# => User-Customization <= #
# No spaces, keep it short and according to convention
fun.set_name('PBE_X')
# Tab in, trailing newlines
fun.set_description(' PBE GGA Exchange Hole (Parameter Free)\n')
# Tab in, trailing newlines
fun.set_citation(' J.P. Perdew et. al., Phys. Rev. Lett., 77(18), 3865-3868, 1996\n')
# These should be set by build_base, but prove that you know what's up
fun.set_gga(True)
fun.set_meta(False)
fun.set_alpha(1.0)
fun.set_omega(0.0)
# Custom parameters
fun.set_parameter('PBE_kp', 0.804)
fun.set_parameter('PBE_mu', 0.2195149727645171)
# => End User-Customization <= #
return fun
def build_revpbe_x_functional(name):
# Call this first
fun = core.Functional.build_base('PBE_X')
# => User-Customization <= #
# No spaces, keep it short and according to convention
fun.set_name('revPBE_X')
# Tab in, trailing newlines
fun.set_description(' Revised PBE GGA Exchange Hole (Parameter Free)\n')
# Tab in, trailing newlines
fun.set_citation(' Zhang et. al., Phys. Rev. Lett., 80(4), 890, 1998\n')
# These should be set by build_base, but prove that you know what's up
fun.set_gga(True)
fun.set_meta(False)
fun.set_alpha(1.0)
fun.set_omega(0.0)
# Custom parameters
fun.set_parameter('PBE_kp', 1.245)
fun.set_parameter('PBE_mu', 0.2195149727645171)
# => End User-Customization <= #
return fun
def build_rpbe_x_functional(name):
# Call this first
fun = core.Functional.build_base('RPBE_X')
# => User-Customization <= #
# No spaces, keep it short and according to convention
fun.set_name('RPBE_X')
# Tab in, trailing newlines
fun.set_description(' RPBE GGA Exchange Hole (Parameter Free)\n')
# Tab in, trailing newlines
fun.set_citation(' Hammer et. al. Phys. Rev. B, 59(2), 7413-7421, 1999\n')
# These should be set by build_base, but prove that you know what's up
fun.set_gga(True)
fun.set_meta(False)
fun.set_alpha(1.0)
fun.set_omega(0.0)
# Custom parameters
fun.set_parameter('PBE_kp', 0.804)
fun.set_parameter('PBE_mu', 0.2195149727645171)
# => End User-Customization <= #
return fun
def build_sogga_x_functional(name):
# Call this first
fun = core.Functional.build_base('SOGGA_X')
# => User-Customization <= #
# No spaces, keep it short and according to convention
fun.set_name('SOGGA_X')
# Tab in, trailing newlines
fun.set_description(' Second Order GGA Exchange Hole (Parameter Free)\n')
# Tab in, trailing newlines
fun.set_citation(' Zhao et. al., J. Chem. Phys., 128(18), 184109, 2008\n')
# These should be set by build_base, but prove that you know what's up
fun.set_gga(True)
fun.set_meta(False)
fun.set_alpha(1.0)
fun.set_omega(0.0)
# Custom parameters
fun.set_parameter('PBE_kp', 0.55208138)
fun.set_parameter('PBE_mu', 10.0 / 81.0)
# => End User-Customization <= #
return fun
def build_pbesol_x_functional(name):
# Call this first
fun = core.Functional.build_base('PBE_X')
# => User-Customization <= #
# No spaces, keep it short and according to convention
fun.set_name('PBEsol_X')
# Tab in, trailing newlines
fun.set_description(' PBEsol GGA Exchange Hole (Parameter Free)\n')
# Tab in, trailing newlines
fun.set_citation(' J.P. Perdew et. al., Phys. Rev. Lett., 77(18), 3865-3868, 1996\n')
# These should be set by build_base, but prove that you know what's up
fun.set_gga(True)
fun.set_meta(False)
fun.set_alpha(1.0)
fun.set_omega(0.0)
# Custom parameters
fun.set_parameter('PBE_kp', 0.804)
fun.set_parameter('PBE_mu', 10.0 / 81.0)
# => End User-Customization <= #
return fun
def build_pw91_x_functional(name):
# Call this first
fun = core.Functional.build_base('PW91_X')
# => User-Customization <= #
# No spaces, keep it short and according to convention
fun.set_name('PW91_X')
# Tab in, trailing newlines
fun.set_description(' PW91 Parameterized GGA Exchange\n')
# Tab in, trailing newlines
fun.set_citation(' J.P. Perdew et. al., Phys. Rev. B., 46(11), 6671-6687, 1992\n')
# These should be set by build_base, but prove that you know what's up
fun.set_gga(True)
fun.set_meta(False)
fun.set_alpha(1.0)
fun.set_omega(0.0)
# Custom parameters
k01 = math.pow(6.0 * math.pi * math.pi, 1.0 / 3.0)
k02 = k01 * k01
k04 = k02 * k02
fun.set_parameter('PW91_a1', 0.19645 / (2.0 * k01))
fun.set_parameter('PW91_a2', 7.79560 / (2.0 * k01))
fun.set_parameter('PW91_a3', 0.27430 / (4.0 * k02))
fun.set_parameter('PW91_a4', 0.15080 / (4.0 * k02))
fun.set_parameter('PW91_a5', 100.000 / (4.0 * k02))
fun.set_parameter('PW91_a6', 0.00400 / (16.0 * k04))
# => End User-Customization <= #
return fun
def build_b97_x_functional(name):
# Call this first
fun = core.Functional.build_base('B97_X')
# => User-Customization <= #
# No spaces, keep it short and according to convention
fun.set_name('B97_X')
# Tab in, trailing newlines
fun.set_description(' B97 Parameterized GGA Exchange\n')
# Tab in, trailing newlines
fun.set_citation(' A.D. Becke, J. Chem. Phys., 107(20), 8554-8560, 1997\n')
# These should be set by build_base, but prove that you know what's up
fun.set_gga(True)
fun.set_meta(False)
fun.set_alpha(1.0)
fun.set_omega(0.0)
# Custom parameters
fun.set_parameter('B97_gamma', 0.004)
# => End User-Customization <= #
return fun
def build_vwn5_c_functional(name):
# Call this first
fun = core.Functional.build_base('VWN5_C')
# => User-Customization <= #
# No spaces, keep it short and according to convention
fun.set_name('VWN5_C')
# Tab in, trailing newlines
fun.set_description(' VWN5 LSDA Correlation, QMC Parameters, VWN5 Spin Polarization\n')
# Tab in, trailing newlines
fun.set_citation(' S.H. Vosko, L. Wilk, and M. Nusair, Can. J. Phys., 58, 1200-1211, 1980\n')
# These should be set by build_base, but prove that you know what's up
fun.set_gga(False)
fun.set_meta(False)
fun.set_alpha(1.0)
fun.set_omega(0.0)
# Custom parameters
fun.set_parameter('EcP_2', -0.10498)
fun.set_parameter('EcP_3', 3.72744)
fun.set_parameter('EcP_4', 12.9352)
fun.set_parameter('EcF_2', -0.32500)
fun.set_parameter('EcF_3', 7.06042)
fun.set_parameter('EcF_4', 18.0578)
fun.set_parameter('Ac_2', -0.00475840)
fun.set_parameter('Ac_3', 1.13107)
fun.set_parameter('Ac_4', 13.0045)
# => End User-Customization <= #
return fun
def build_vwn5rpa_c_functional(name):
# Call this first
fun = core.Functional.build_base('VWN5_C')
# => User-Customization <= #
# No spaces, keep it short and according to convention
fun.set_name('VWN5RPA_C')
# Tab in, trailing newlines
fun.set_description(' VWN5 LSDA Correlation, RPA Parameters, VWN5 Spin Polarization\n')
# Tab in, trailing newlines
fun.set_citation(' S.H. Vosko, L. Wilk, and M. Nusair, Can. J. Phys., 58, 1200-1211, 1980\n')
# These should be set by build_base, but prove that you know what's up
fun.set_gga(False)
fun.set_meta(False)
fun.set_alpha(1.0)
fun.set_omega(0.0)
# Custom parameters
fun.set_parameter('EcP_2', -0.409286)
fun.set_parameter('EcP_3', 13.0720)
fun.set_parameter('EcP_4', 42.7198)
fun.set_parameter('EcF_2', -0.743294)
fun.set_parameter('EcF_3', 20.1231)
fun.set_parameter('EcF_4', 101.578)
fun.set_parameter('Ac_2', -0.228344)
fun.set_parameter('Ac_3', 1.06835)
fun.set_parameter('Ac_4', 11.4813)
# => End User-Customization <= #
return fun
def build_vwn3_c_functional(name):
# Call this first
fun = core.Functional.build_base('VWN3_C')
# => User-Customization <= #
# No spaces, keep it short and according to convention
fun.set_name('VWN3_C')
# Tab in, trailing newlines
fun.set_description(' VWN3 LSDA Correlation, QMC Parameters, VWN1 Spin Polarization\n')
# Tab in, trailing newlines
fun.set_citation(' S.H. Vosko, L. Wilk, and M. Nusair, Can. J. Phys., 58, 1200-1211, 1980\n')
# These should be set by build_base, but prove that you know what's up
fun.set_gga(False)
fun.set_meta(False)
fun.set_alpha(1.0)
fun.set_omega(0.0)
# Custom parameters
fun.set_parameter('EcP_2', -0.10498)
fun.set_parameter('EcP_3', 3.72744)
fun.set_parameter('EcP_4', 12.9352)
fun.set_parameter('EcF_2', -0.32500)
fun.set_parameter('EcF_3', 7.06042)
fun.set_parameter('EcF_4', 18.0578)
# => End User-Customization <= #
return fun
def build_vwn3rpa_c_functional(name):
# Call this first
fun = core.Functional.build_base('VWN3_C')
# => User-Customization <= #
# No spaces, keep it short and according to convention
fun.set_name('VWN3RPA_C')
# Tab in, trailing newlines
fun.set_description(' VWN3 LSDA Correlation, RPA Parameters, VWN1 Spin Polarization\n')
# Tab in, trailing newlines
fun.set_citation(' S.H. Vosko, L. Wilk, and M. Nusair, Can. J. Phys., 58, 1200-1211, 1980\n')
# These should be set by build_base, but prove that you know what's up
fun.set_gga(False)
fun.set_meta(False)
fun.set_alpha(1.0)
fun.set_omega(0.0)
# Custom parameters
fun.set_parameter('EcP_2', -0.409286)
fun.set_parameter('EcP_3', 13.0720)
fun.set_parameter('EcP_4', 42.7198)
fun.set_parameter('EcF_2', -0.743294)
fun.set_parameter('EcF_3', 20.1231)
fun.set_parameter('EcF_4', 101.578)
# => End User-Customization <= #
return fun
def build_ws_x_functional(name):
# Call this first
fun = core.Functional.build_base('wS_X')
# => User-Customization <= #
# No spaces, keep it short and according to convention
fun.set_name('wS_X')
# Tab in, trailing newlines
fun.set_description(' Slater Short-Range LSDA Exchange\n')
# Tab in, trailing newlines
fun.set_citation(' Adamson et. al., J. Comput. Chem., 20(9), 921-927, 1999\n')
# These should be set by build_base, but prove that you know what's up
fun.set_gga(False)
fun.set_meta(False)
fun.set_alpha(1.0)
fun.set_omega(0.3)
# Custom parameters
# => End User-Customization <= #
return fun
def build_wpbe_x_functional(name):
# Call this first
fun = core.Functional.build_base('wPBE_X')
# => User-Customization <= #
# No spaces, keep it short and according to convention
fun.set_name('wPBE_X')
# Tab in, trailing newlines
fun.set_description(' PBE Short-Range GGA Exchange (HJS Formalism)\n')
# Tab in, trailing newlines
fun.set_citation(' Henderson et. al., J. Chem. Phys., 128, 194105, 2008\n Weintraub, Henderson, and Scuseria, J. Chem. Theory. Comput., 5, 754 (2009)\n')
# These should be set by build_base, but prove that you know what's up
fun.set_gga(True)
fun.set_meta(False)
fun.set_alpha(1.0)
fun.set_omega(0.3)
# Custom parameters
fun.set_parameter('A', 0.7572110)
fun.set_parameter('B', -0.1063640)
fun.set_parameter('C', -0.1186490)
fun.set_parameter('D', 0.6096500)
fun.set_parameter('E', -0.0477963)
fun.set_parameter('Ha0', 0.0000000)
fun.set_parameter('Ha1', 0.0000000)
fun.set_parameter('Ha2', 0.0159941)
fun.set_parameter('Ha3', 0.0852995)
fun.set_parameter('Ha4', -0.1603680)
fun.set_parameter('Ha5', 0.1526450)
fun.set_parameter('Ha6', -0.0971263)
fun.set_parameter('Ha7', 0.0422061)
fun.set_parameter('Hb0', 1.0000000)
fun.set_parameter('Hb1', 5.3331900)
fun.set_parameter('Hb2', -12.478000)
fun.set_parameter('Hb3', 11.098800)
fun.set_parameter('Hb4', -5.1101300)
fun.set_parameter('Hb5', 1.7146800)
fun.set_parameter('Hb6', -0.6103800)
fun.set_parameter('Hb7', 0.3075550)
fun.set_parameter('Hb8', -0.0770547)
fun.set_parameter('Hb9', 0.0334840)
# => End User-Customization <= #
return fun
def build_wpbesol_x_functional(name):
# Call this first
fun = core.Functional.build_base('wPBE_X')
# => User-Customization <= #
# No spaces, keep it short and according to convention
fun.set_name('wPBEsol_X')
# Tab in, trailing newlines
fun.set_description(' PBEsol Short-Range GGA Exchange (HJS Formalism)\n')
# Tab in, trailing newlines
fun.set_citation(' Henderson et. al., J. Chem. Phys., 128, 194105, 2008\n Weintraub, Henderson, and Scuseria, J. Chem. Theory. Comput., 5, 754 (2009)\n')
# These should be set by build_base, but prove that you know what's up
fun.set_gga(True)
fun.set_meta(False)
fun.set_alpha(1.0)
fun.set_omega(0.3)
# Custom parameters
fun.set_parameter('A', 0.7572110)
fun.set_parameter('B', -0.1063640)
fun.set_parameter('C', -0.1186490)
fun.set_parameter('D', 0.6096500)
fun.set_parameter('E', -0.0477963)
fun.set_parameter('Ha0', 0.0000000)
fun.set_parameter('Ha1', 0.0000000)
fun.set_parameter('Ha2', 0.0047333)
fun.set_parameter('Ha3', 0.0403304)
fun.set_parameter('Ha4', -0.0574615)
fun.set_parameter('Ha5', 0.0435395)
fun.set_parameter('Ha6', -0.0216251)
fun.set_parameter('Ha7', 0.0063721)
fun.set_parameter('Hb0', 1.00000)
fun.set_parameter('Hb1', 8.52056)
fun.set_parameter('Hb2', -13.9885)
fun.set_parameter('Hb3', 9.28583)
fun.set_parameter('Hb4', -3.27287)
fun.set_parameter('Hb5', 0.843499)
fun.set_parameter('Hb6', -0.235543)
fun.set_parameter('Hb7', 0.0847074)
fun.set_parameter('Hb8', -0.0171561)
fun.set_parameter('Hb9', 0.0050552)
# => End User-Customization <= #
return fun
def build_wb88_x_functional(name):
# Call this first
fun = core.Functional.build_base('wB88_X')
# => User-Customization <= #
# No spaces, keep it short and according to convention
fun.set_name('wB88_X')
# Tab in, trailing newlines
fun.set_description(' B88 Short-Range GGA Exchange (HJS Formalism)\n')
# Tab in, trailing newlines
fun.set_citation(' Henderson et. al., J. Chem. Phys., 128, 194105, 2008\n Weintraub, Henderson, and Scuseria, J. Chem. Theory. Comput., 5, 754 (2009)\n')
# These should be set by build_base, but prove that you know what's up
fun.set_gga(True)
fun.set_meta(False)
fun.set_alpha(1.0)
fun.set_omega(0.3)
# Custom parameters
fun.set_parameter('A', 0.7572110)
fun.set_parameter('B', -0.1063640)
fun.set_parameter('C', -0.1186490)
fun.set_parameter('D', 0.6096500)
fun.set_parameter('E', -0.0477963)
fun.set_parameter('Ha0', 0.0000000)
fun.set_parameter('Ha1', 0.0000000)
fun.set_parameter('Ha2', 0.0253933)
fun.set_parameter('Ha3', -0.0673075)
fun.set_parameter('Ha4', 0.0891476)
fun.set_parameter('Ha5', -0.0454168)
fun.set_parameter('Ha6', -0.0076581)
fun.set_parameter('Ha7', 0.0142506)
fun.set_parameter('Hb0', 1.00000)
fun.set_parameter('Hb1', -2.65060)
fun.set_parameter('Hb2', 3.91108)
fun.set_parameter('Hb3', -3.31509)
fun.set_parameter('Hb4', 1.54485)
fun.set_parameter('Hb5', -0.198386)
fun.set_parameter('Hb6', -0.136112)
fun.set_parameter('Hb7', 0.0647862)
fun.set_parameter('Hb8', 0.0159586)
fun.set_parameter('Hb9', -2.45066E-4)
# => End User-Customization <= #
return fun
def build_hf_x_functional(name):
# Call this first
fun = core.Functional.build_base('HF_X')
# => End User-Customization <= #
return fun
def build_primitive_functional(name):
# Call this first
key = name.upper()
if (key[0] == 'W'):
key = 'w' + key[1:]
fun = core.Functional.build_base(key)
# => User-Customization <= #
# No spaces, keep it short and according to convention
fun.set_name(key)
# Tab in, trailing newlines
fun.set_description(fun.description())
# Tab in, trailing newlines
fun.set_citation(fun.citation())
# These should be set by build_base, but prove that you know what's up
fun.set_gga(fun.is_gga())
fun.set_meta(fun.is_meta())
fun.set_alpha(fun.alpha())
fun.set_omega(fun.omega())
# Custom parameters
# Always built-in for this functional
# => End User-Customization <= #
return fun
# Functional lookup table
functionals = {
's_x' : build_s_x_functional,
'b88_x' : build_b88_x_functional,
'b86b_x' : build_b86b_x_functional,
'pw86_x' : build_pw86_x_functional,
'b3_x' : build_b3_x_functional,
'pbe_x' : build_pbe_x_functional,
'revpbe_x' : build_revpbe_x_functional,
'rpbe_x' : build_rpbe_x_functional,
'sogga_x' : build_sogga_x_functional,
'pbesol_x' : build_pbesol_x_functional,
'pw91_x' : build_pw91_x_functional,
'b97_x' : build_b97_x_functional,
'ws_x' : build_ws_x_functional,
'wb97_x' : build_primitive_functional,
'wpbe_x' : build_wpbe_x_functional,
'wpbesol_x' : build_wpbesol_x_functional,
'wb88_x' : build_wb88_x_functional,
'ft97b_x' : build_primitive_functional,
'm_x' : build_primitive_functional,
'lyp_c' : build_primitive_functional,
'pz81_c' : build_primitive_functional,
'p86_c' : build_primitive_functional,
'vwn5rpa_c' : build_vwn5rpa_c_functional,
'vwn5_c' : build_vwn5_c_functional,
'vwn3rpa_c' : build_vwn3rpa_c_functional,
'vwn3_c' : build_vwn3_c_functional,
'pw91_c' : build_primitive_functional,
'pw92_c' : build_primitive_functional,
'pbe_c' : build_primitive_functional,
'ft97_c' : build_primitive_functional,
'b_c' : build_primitive_functional,
'm_c' : build_primitive_functional,
'pbea_c' : build_primitive_functional,
'pw92a_c' : build_primitive_functional,
'wpbe_c' : build_primitive_functional,
'wpw92_c' : build_primitive_functional,
'hf_x' : build_hf_x_functional,
}
def build_functional(alias):
name = alias.lower()
return functionals[name](name)
def functional_list():
val = []
for key in functionals.keys():
val.append(functionals[key](key))
return val
## ==> SuperFunctionals <== ##
def build_ws_x_superfunctional(name, npoints, deriv):
# Call this first
sup = core.SuperFunctional.blank()
sup.set_max_points(npoints)
sup.set_deriv(deriv)
# => User-Customization <= #
# No spaces, keep it short and according to convention
sup.set_name('wS_X')
# Tab in, trailing newlines
sup.set_description(' Slater Short-Range LSDA Exchange\n')
# Tab in, trailing newlines
sup.set_citation(' Adamson et. al., J. Comput. Chem., 20(9), 921-927, 1999\n')
# Add member functionals
sup.add_x_functional(build_functional('wS_X'))
# Set GKS up after adding functionals
sup.set_x_omega(0.3)
sup.set_c_omega(0.0)
sup.set_x_alpha(0.0)
sup.set_c_alpha(0.0)
# => End User-Customization <= #
# Call this last
sup.allocate()
return (sup, False)
def build_wpbe_x_superfunctional(name, npoints, deriv):
# Call this first
sup = core.SuperFunctional.blank()
sup.set_max_points(npoints)
sup.set_deriv(deriv)
# => User-Customization <= #
# No spaces, keep it short and according to convention
sup.set_name('wPBE_X')
# Tab in, trailing newlines
sup.set_description(' PBE Short-Range GGA Exchange (HJS Model)\n')
# Tab in, trailing newlines
sup.set_citation(' Henderson et. al., J. Chem. Phys., 128, 194105, 2008\n Weintraub, Henderson, and Scuseria, J. Chem. Theory. Comput., 5, 754 (2009)\n')
# Add member functionals
sup.add_x_functional(build_functional('wPBE_X'))
# Set GKS up after adding functionals
sup.set_x_omega(0.3)
sup.set_c_omega(0.0)
sup.set_x_alpha(0.0)
sup.set_c_alpha(0.0)
# => End User-Customization <= #
# Call this last
sup.allocate()
return (sup, False)
def build_wpbesol_x_superfunctional(name, npoints, deriv):
# Call this first
sup = core.SuperFunctional.blank()
sup.set_max_points(npoints)
sup.set_deriv(deriv)
# => User-Customization <= #
# No spaces, keep it short and according to convention
sup.set_name('wPBEsol_X')
# Tab in, trailing newlines
sup.set_description(' PBEsol Short-Range GGA Exchange (HJS Model)\n')
# Tab in, trailing newlines
sup.set_citation(' Henderson et. al., J. Chem. Phys., 128, 194105, 2008\n Weintraub, Henderson, and Scuseria, J. Chem. Theory. Comput., 5, 754 (2009)\n')
# Add member functionals
sup.add_x_functional(build_functional('wPBEsol_X'))
# Set GKS up after adding functionals
sup.set_x_omega(0.3)
sup.set_c_omega(0.0)
sup.set_x_alpha(0.0)
sup.set_c_alpha(0.0)
# => End User-Customization <= #
# Call this last
sup.allocate()
return (sup, False)
def build_wpw92_c_superfunctional(name, npoints, deriv):
# Call this first
sup = core.SuperFunctional.blank()
sup.set_max_points(npoints)
sup.set_deriv(deriv)
# => User-Customization <= #
# No spaces, keep it short and according to convention
sup.set_name('wPW92_C')
# Tab in, trailing newlines
sup.set_description(' Short-Range PW92 Correlation Functional\n')
# Tab in, trailing newlines
sup.set_citation(' TODO\n')
# Add member functionals
sup.add_c_functional(build_functional('wPW92_C'))
# Set GKS up after adding functionals
sup.set_x_omega(0.0)
sup.set_c_omega(0.3)
sup.set_x_alpha(0.0)
sup.set_c_alpha(0.0)
# => End User-Customization <= #
# Call this last
sup.allocate()
return (sup, False)
def build_wpbe_c_superfunctional(name, npoints, deriv):
# Call this first
sup = core.SuperFunctional.blank()
sup.set_max_points(npoints)
sup.set_deriv(deriv)
# => User-Customization <= #
# No spaces, keep it short and according to convention
sup.set_name('wPBE_C')
# Tab in, trailing newlines
sup.set_description(' Short-Range PBE Correlation Functional\n')
# Tab in, trailing newlines
sup.set_citation(' TODO\n')
# Add member functionals
sup.add_c_functional(build_functional('wPBE_C'))
# Set GKS up after adding functionals
sup.set_x_omega(0.0)
sup.set_c_omega(0.5)
sup.set_x_alpha(0.0)
sup.set_c_alpha(0.0)
# => End User-Customization <= #
# Call this last
sup.allocate()
return (sup, False)
def build_wpbe2_superfunctional(name, npoints, deriv):
# Call this first
sup = core.SuperFunctional.blank()
sup.set_max_points(npoints)
sup.set_deriv(deriv)
# => User-Customization <= #
# No spaces, keep it short and according to convention
sup.set_name('wPBE2')
# Tab in, trailing newlines
sup.set_description(' Double-Hybrid PBE LRC Functional\n')
# Tab in, trailing newlines
sup.set_citation(' TODO\n')
# Add member functionals
sup.add_x_functional(build_functional('wPBE_X'))
sup.add_c_functional(build_functional('wPBE_C'))
# Set GKS up after adding functionals
sup.set_x_omega(0.3)
sup.set_c_omega(0.5)
sup.set_x_alpha(0.0)
sup.set_c_alpha(0.0)
# => End User-Customization <= #
# Call this last
sup.allocate()
return (sup, False)
def build_wb88_x_superfunctional(name, npoints, deriv):
# Call this first
sup = core.SuperFunctional.blank()
sup.set_max_points(npoints)
sup.set_deriv(deriv)
# => User-Customization <= #
# No spaces, keep it short and according to convention
sup.set_name('wB88_X')
# Tab in, trailing newlines
sup.set_description(' B88 Short-Range GGA Exchange (HJS Model)\n')
# Tab in, trailing newlines
sup.set_citation(' Henderson et. al., J. Chem. Phys., 128, 194105, 2008\n Weintraub, Henderson, and Scuseria, J. Chem. Theory. Comput., 5, 754 (2009)\n')
# Add member functionals
sup.add_x_functional(build_functional('wB88_X'))
# Set GKS up after adding functionals
sup.set_x_omega(0.3)
sup.set_c_omega(0.0)
sup.set_x_alpha(0.0)
sup.set_c_alpha(0.0)
# => End User-Customization <= #
# Call this last
sup.allocate()
return (sup, False)
def build_svwn_superfunctional(name, npoints, deriv):
# Call this first
sup = core.SuperFunctional.blank()
sup.set_max_points(npoints)
sup.set_deriv(deriv)
# => User-Customization <= #
# No spaces, keep it short and according to convention
sup.set_name('SVWN')
# Tab in, trailing newlines
sup.set_description(' SVWN3 (RPA) LSDA Functional\n')
# Tab in, trailing newlines
sup.set_citation(' Adamson et. al., J. Comput. Chem., 20(9), 921-927, 1999\n')
# Add member functionals
sup.add_x_functional(build_functional('S_X'))
sup.add_c_functional(build_functional('VWN3RPA_C'))
# Set GKS up after adding functionals
sup.set_x_omega(0.0)
sup.set_c_omega(0.0)
sup.set_x_alpha(0.0)
sup.set_c_alpha(0.0)
# => End User-Customization <= #
# Call this last
sup.allocate()
return (sup, False)
def build_blyp_superfunctional(name, npoints, deriv):
# Call this first
sup = core.SuperFunctional.blank()
sup.set_max_points(npoints)
sup.set_deriv(deriv)
# => User-Customization <= #
# No spaces, keep it short and according to convention
sup.set_name('BLYP')
# Tab in, trailing newlines
sup.set_description(' BLYP GGA Exchange-Correlation Functional\n')
# Tab in, trailing newlines
sup.set_citation(' P.J. Stephens et. al., J. Phys. Chem., 98, 11623-11627, 1994\n B. Miehlich et. al., Chem. Phys. Lett., 157(3), 200-206 1989\n')
# Add member functionals
sup.add_x_functional(build_functional('B88_X'))
sup.add_c_functional(build_functional('LYP_C'))
# Set GKS up after adding functionals
sup.set_x_omega(0.0)
sup.set_c_omega(0.0)
sup.set_x_alpha(0.0)
sup.set_c_alpha(0.0)
# => End User-Customization <= #
# Call this last
sup.allocate()
return (sup, False)
def build_b86bpbe_superfunctional(name, npoints, deriv):
# Call this first
sup = core.SuperFunctional.blank()
sup.set_max_points(npoints)
sup.set_deriv(deriv)
# => User-Customization <= #
# No spaces, keep it short and according to convention
sup.set_name('B86BPBE')
# Tab in, trailing newlines
sup.set_description(' B86BPBE GGA Exchange-Correlation Functional\n')
# Tab in, trailing newlines
sup.set_citation(' A. D. Becke, J. Chem. Phys. 85:7184, 1986.\n')
# Add member functionals
sup.add_x_functional(build_functional('B86B_X'))
sup.add_c_functional(build_functional('PBE_C'))
# Set GKS up after adding functionals
sup.set_x_omega(0.0)
sup.set_c_omega(0.0)
sup.set_x_alpha(0.0)
sup.set_c_alpha(0.0)
# => End User-Customization <= #
# Call this last
sup.allocate()
return (sup, False)
def build_pw86pbe_superfunctional(name, npoints, deriv):
# Call this first
sup = core.SuperFunctional.blank()
sup.set_max_points(npoints)
sup.set_deriv(deriv)
# => User-Customization <= #
# No spaces, keep it short and according to convention
sup.set_name('PW86PBE')
# Tab in, trailing newlines
sup.set_description(' PW86PBE GGA Exchange-Correlation Functional\n')
# Tab in, trailing newlines
sup.set_citation(' J. P. Perdew and W. Yue, Phys. Rev. B 33:8800(R), 1986.\n')
# Add member functionals
sup.add_x_functional(build_functional('PW86_X'))
sup.add_c_functional(build_functional('PBE_C'))
# Set GKS up after adding functionals
sup.set_x_omega(0.0)
sup.set_c_omega(0.0)
sup.set_x_alpha(0.0)
sup.set_c_alpha(0.0)
# => End User-Customization <= #
# Call this last
sup.allocate()
return (sup, False)
def build_pw91_superfunctional(name, npoints, deriv):
# Call this first
sup = core.SuperFunctional.blank()
sup.set_max_points(npoints)
sup.set_deriv(deriv)
# => User-Customization <= #
# No spaces, keep it short and according to convention
sup.set_name('PW91')
# Tab in, trailing newlines
sup.set_description(' PW91 GGA Exchange-Correlation Functional\n')
# Tab in, trailing newlines
sup.set_citation(' J.P. Perdew et. al., Phys. Rev. B., 46(11), 6671-6687, 1992\n')
# Add member functionals
sup.add_x_functional(build_functional('PW91_X'))
sup.add_c_functional(build_functional('PW91_C'))
# Set GKS up after adding functionals
sup.set_x_omega(0.0)
sup.set_c_omega(0.0)
sup.set_x_alpha(0.0)
sup.set_c_alpha(0.0)
# => End User-Customization <= #
# Call this last
sup.allocate()
return (sup, False)
def build_bp86_superfunctional(name, npoints, deriv):
# Call this first
sup = core.SuperFunctional.blank()
sup.set_max_points(npoints)
sup.set_deriv(deriv)
# => User-Customization <= #
# No spaces, keep it short and according to convention
sup.set_name('BP86')
# Tab in, trailing newlines
sup.set_description(' BP86 GGA Exchange-Correlation Functional\n')
# Tab in, trailing newlines
sup.set_citation(' Null\n')
# Add member functionals
sup.add_x_functional(build_functional('B88_X'))
sup.add_c_functional(build_functional('P86_C'))
# Set GKS up after adding functionals
sup.set_x_omega(0.0)
sup.set_c_omega(0.0)
sup.set_x_alpha(0.0)
sup.set_c_alpha(0.0)
# => End User-Customization <= #
# Call this last
sup.allocate()
return (sup, False)
def build_ft97_superfunctional(name, npoints, deriv):
# Call this first
sup = core.SuperFunctional.blank()
sup.set_max_points(npoints)
sup.set_deriv(deriv)
# => User-Customization <= #
# No spaces, keep it short and according to convention
sup.set_name('FT97')
# Tab in, trailing newlines
sup.set_description(' FT97 GGA Exchange-Correlation Functional\n')
# Tab in, trailing newlines
sup.set_citation(' M. Filatov and W. Theil, Int. J. Quant. Chem., 62, 603-616, 1997\n')
# Add member functionals
sup.add_x_functional(build_functional('FT97B_X'))
sup.add_c_functional(build_functional('FT97_C'))
# Set GKS up after adding functionals
sup.set_x_omega(0.0)
sup.set_c_omega(0.0)
sup.set_x_alpha(0.0)
sup.set_c_alpha(0.0)
# => End User-Customization <= #
# Call this last
sup.allocate()
return (sup, False)
def build_pbe_superfunctional(name, npoints, deriv):
# Call this first
sup = core.SuperFunctional.blank()
sup.set_max_points(npoints)
sup.set_deriv(deriv)
# => User-Customization <= #
# No spaces, keep it short and according to convention
sup.set_name('PBE')
# Tab in, trailing newlines
sup.set_description(' PBE GGA Exchange-Correlation Functional\n')
# Tab in, trailing newlines
sup.set_citation(' J.P. Perdew et. al., Phys. Rev. Lett., 77(18), 3865-3868, 1996\n')
# Add member functionals
sup.add_x_functional(build_functional('PBE_X'))
sup.add_c_functional(build_functional('PBE_C'))
# Set GKS up after adding functionals
sup.set_x_omega(0.0)
sup.set_c_omega(0.0)
sup.set_x_alpha(0.0)
sup.set_c_alpha(0.0)
# => End User-Customization <= #
# Call this last
sup.allocate()
return (sup, False)
def build_pbe0_superfunctional(name, npoints, deriv):
sup = build_pbe_superfunctional(name, npoints, deriv)[0]
sup.set_name('PBE0')
sup.set_description(' PBE0 Hybrid GGA Exchange-Correlation Functional\n')
sup.set_citation(' Adamo et. al., J. Chem. Phys., 110(13), 6158, 1999\n')
sup.set_x_alpha(0.25)
return (sup, False)
def build_sogga_superfunctional(name, npoints, deriv):
# Call this first
sup = core.SuperFunctional.blank()
sup.set_max_points(npoints)
sup.set_deriv(deriv)
# => User-Customization <= #
# No spaces, keep it short and according to convention
sup.set_name('SOGGA')
# Tab in, trailing newlines
sup.set_description(' Second Order GGA Exchange-Correlation Functional\n')
# Tab in, trailing newlines
sup.set_citation(' Zhao et. al., J. Chem. Phys., 128(18), 184109, 2008\n')
# Add member functionals
sup.add_x_functional(build_functional('SOGGA_X'))
C = build_functional('PBE_C')
C.set_parameter('bet', 0.037526)
sup.add_c_functional(C)
# Set GKS up after adding functionals
sup.set_x_omega(0.0)
sup.set_c_omega(0.0)
sup.set_x_alpha(0.0)
sup.set_c_alpha(0.0)
# => End User-Customization <= #
# Call this last
sup.allocate()
return (sup, False)
def build_b3lyp_superfunctional(name, npoints, deriv):
# Call this first
sup = core.SuperFunctional.blank()
sup.set_max_points(npoints)
sup.set_deriv(deriv)
# => User-Customization <= #
# No spaces, keep it short and according to convention
sup.set_name('B3LYP')
# Tab in, trailing newlines
sup.set_description(' B3LYP Hybrid-GGA Exchange-Correlation Functional (VWN1-RPA)\n')
# Tab in, trailing newlines
sup.set_citation(' P.J. Stephens et. al., J. Phys. Chem., 98, 11623-11627, 1994\n')
# Add member functionals
b3 = build_functional('B3_X')
b3.set_alpha(1.0)
sup.add_x_functional(b3)
lyp = build_functional('LYP_C')
lyp.set_alpha(0.81)
vwn = build_functional('VWN3RPA_C')
vwn.set_alpha(0.19)
sup.add_c_functional(vwn)
sup.add_c_functional(lyp)
# Set GKS up after adding functionals
sup.set_x_omega(0.0)
sup.set_c_omega(0.0)
sup.set_x_alpha(0.2)
sup.set_c_alpha(0.0)
# => End User-Customization <= #
# Call this last
sup.allocate()
return (sup, False)
def build_hf_x_superfunctional(name, npoints, deriv):
# Call this first
sup = core.SuperFunctional.blank()
sup.set_max_points(npoints)
sup.set_deriv(deriv)
# => User-Customization <= #
# No spaces, keep it short and according to convention
sup.set_name('HF_X')
# Tab in, trailing newlines
sup.set_description(' Hartree-Fock Exchange Functional\n')
# Tab in, trailing newlines
sup.set_citation(' \n')
# Add member functionals
hf_x = build_functional('hf_x')
hf_x.set_alpha(1.0)
sup.add_x_functional(hf_x)
# Set GKS up after adding functionals
sup.set_x_omega(0.0)
sup.set_c_omega(0.0)
sup.set_x_alpha(1.0)
sup.set_c_alpha(0.0)
sup.allocate()
return (sup, False)
def build_b3lyp5_superfunctional(name, npoints, deriv):
# Call this first
sup = core.SuperFunctional.blank()
sup.set_max_points(npoints)
sup.set_deriv(deriv)
# => User-Customization <= #
# No spaces, keep it short and according to convention
sup.set_name('B3LYP5')
# Tab in, trailing newlines
sup.set_description(' B3LYP5 Hybrid-GGA Exchange-Correlation Functional (VWN5)\n')
# Tab in, trailing newlines
sup.set_citation(' P.J. Stephens et. al., J. Phys. Chem., 98, 11623-11627, 1994\n')
# Add member functionals
b3 = build_functional('B3_X')
b3.set_alpha(1.0)
sup.add_x_functional(b3)
lyp = build_functional('LYP_C')
lyp.set_alpha(0.81)
vwn = build_functional('VWN5_C')
vwn.set_alpha(0.19)
sup.add_c_functional(lyp)
sup.add_c_functional(vwn)
# Set GKS up after adding functionals
sup.set_x_omega(0.0)
sup.set_c_omega(0.0)
sup.set_x_alpha(0.2)
sup.set_c_alpha(0.0)
# => End User-Customization <= #
# Call this last
sup.allocate()
return (sup, False)
def build_b970_superfunctional(name, npoints, deriv):
# Call this first
sup = core.SuperFunctional.blank()
sup.set_max_points(npoints)
sup.set_deriv(deriv)
# => User-Customization <= #
# No spaces, keep it short and according to convention
sup.set_name('B97-0')
# Tab in, trailing newlines
sup.set_description(' B97-0 Hybrid-GGA Exchange-Correlation Functional\n')
# Tab in, trailing newlines
sup.set_citation(' A.D. Becke, J. Chem. Phys., 107(20), 8554-8560, 1997\n')
# Add member functionals
X = build_functional('B97_X')
X.set_name('B97-0_X')
X.set_alpha(1.0 / 0.8057)
X.set_parameter('B97_gamma', 0.004)
X.set_parameter('B97_a0', 0.8094)
X.set_parameter('B97_a1', 0.5073)
X.set_parameter('B97_a2', 0.7481)
C = build_functional('B_C')
C.set_name('B97-0_C')
C.set_parameter('B97_os_gamma', 0.006)
C.set_parameter('B97_os_a0', 0.9454)
C.set_parameter('B97_os_a1', 0.7471)
C.set_parameter('B97_os_a2', -4.5961)
C.set_parameter('B97_ss_gamma', 0.2)
C.set_parameter('B97_ss_a0', 0.1737)
C.set_parameter('B97_ss_a1', 2.3487)
C.set_parameter('B97_ss_a2', -2.4868)
sup.add_x_functional(X)
sup.add_c_functional(C)
# Set GKS up after adding functionals
sup.set_x_omega(0.0)
sup.set_c_omega(0.0)
sup.set_x_alpha(0.1943)
sup.set_c_alpha(0.0)
# => End User-Customization <= #
# Call this last
sup.allocate()
return (sup, False)
def build_b971_superfunctional(name, npoints, deriv):
# Call this first
sup = core.SuperFunctional.blank()
sup.set_max_points(npoints)
sup.set_deriv(deriv)
# => User-Customization <= #
# No spaces, keep it short and according to convention
sup.set_name('B97-1')
# Tab in, trailing newlines
sup.set_description(' B97-1 Hybrid-GGA Exchange-Correlation Functional\n')
# Tab in, trailing newlines
sup.set_citation(' F.A. Hamprecht et. al., J. Chem. Phys., 109(15), 6264-6271, 1998\n')
# Add member functionals
X = build_functional('B97_X')
X.set_name('B97-1_X')
X.set_alpha(1.0 / 0.79)
X.set_parameter('B97_gamma', 0.004)
X.set_parameter('B97_a0', 0.789518)
X.set_parameter('B97_a1', 0.573805)
X.set_parameter('B97_a2', 0.660975)
C = build_functional('B_C')
C.set_name('B97-1_C')
C.set_parameter('B97_os_gamma', 0.006)
C.set_parameter('B97_os_a0', 0.955689)
C.set_parameter('B97_os_a1', 0.788552)
C.set_parameter('B97_os_a2', -5.47869)
C.set_parameter('B97_ss_gamma', 0.2)
C.set_parameter('B97_ss_a0', 0.0820011)
C.set_parameter('B97_ss_a1', 2.71681)
C.set_parameter('B97_ss_a2', -2.87103)
sup.add_x_functional(X)
sup.add_c_functional(C)
# Set GKS up after adding functionals
sup.set_x_omega(0.0)
sup.set_c_omega(0.0)
sup.set_x_alpha(0.21)
sup.set_c_alpha(0.0)
# => End User-Customization <= #
# Call this last
sup.allocate()
return (sup, False)
def build_b972_superfunctional(name, npoints, deriv):
# Call this first
sup = core.SuperFunctional.blank()
sup.set_max_points(npoints)
sup.set_deriv(deriv)
# => User-Customization <= #
# No spaces, keep it short and according to convention
sup.set_name('B97-2')
# Tab in, trailing newlines
sup.set_description(' B97-2 Hybrid-GGA Exchange-Correlation Functional\n')
# Tab in, trailing newlines
sup.set_citation(' P.J. Wilson et. al., J. Chem. Phys., 115(20), 9233-9242, 2001\n')
# Add member functionals
X = build_functional('B97_X')
X.set_name('B97-2_X')
X.set_alpha(1.0 / 0.79)
X.set_parameter('B97_gamma', 0.004)
X.set_parameter('B97_a0', 0.827642)
X.set_parameter('B97_a1', 0.047840)
X.set_parameter('B97_a2', 1.761250)
C = build_functional('B_C')
C.set_name('B97-2_C')
C.set_parameter('B97_os_gamma', 0.006)
C.set_parameter('B97_os_a0', 0.999849)
C.set_parameter('B97_os_a1', 1.40626)
C.set_parameter('B97_os_a2', -7.44060)
C.set_parameter('B97_ss_gamma', 0.2)
C.set_parameter('B97_ss_a0', 0.585808)
C.set_parameter('B97_ss_a1', -0.691682)
C.set_parameter('B97_ss_a2', 0.394796)
sup.add_x_functional(X)
sup.add_c_functional(C)
# Set GKS up after adding functionals
sup.set_x_omega(0.0)
sup.set_c_omega(0.0)
sup.set_x_alpha(0.21)
sup.set_c_alpha(0.0)
# => End User-Customization <= #
# Call this last
sup.allocate()
return (sup, False)
def build_b97d_superfunctional(name, npoints, deriv):
# Call this first
sup = core.SuperFunctional.blank()
sup.set_max_points(npoints)
sup.set_deriv(deriv)
# => User-Customization <= #
# No spaces, keep it short and according to convention
sup.set_name('B97-D2P4')
# Tab in, trailing newlines
sup.set_description(' B97-D Pure-GGA Exchange-Correlation Functional\n')
# Tab in, trailing newlines
sup.set_citation(' S. Grimme, J. Comput. Chem., 27, 1787-1799, 2006\n')
# Add member functionals
X = build_functional('B97_X')
X.set_name('B97-D_X')
X.set_alpha(1.0)
X.set_parameter('B97_gamma', 0.004)
X.set_parameter('B97_a0', 1.08662)
X.set_parameter('B97_a1', -0.52127)
X.set_parameter('B97_a2', 3.25429)
C = build_functional('B_C')
C.set_name('B97-D_C')
C.set_parameter('B97_os_gamma', 0.006)
C.set_parameter('B97_os_a0', 0.69041)
C.set_parameter('B97_os_a1', 6.30270)
C.set_parameter('B97_os_a2', -14.9712)
C.set_parameter('B97_ss_gamma', 0.2)
C.set_parameter('B97_ss_a0', 0.22340)
C.set_parameter('B97_ss_a1', -1.56208)
C.set_parameter('B97_ss_a2', 1.94293)
sup.add_x_functional(X)
sup.add_c_functional(C)
# Set GKS up after adding functionals
sup.set_x_omega(0.0)
sup.set_c_omega(0.0)
sup.set_x_alpha(0.0)
sup.set_c_alpha(0.0)
# => End User-Customization <= #
# Call this last
sup.allocate()
return (sup, ('b97-d', 'd2p4'))
def build_hcth_superfunctional(name, npoints, deriv):
# Call this first
sup = core.SuperFunctional.blank()
sup.set_max_points(npoints)
sup.set_deriv(deriv)
# => User-Customization <= #
# No spaces, keep it short and according to convention
sup.set_name('HCTH')
# Tab in, trailing newlines
sup.set_description(' HCTH Pure-GGA Exchange-Correlation Functional\n')
# Tab in, trailing newlines
sup.set_citation(' F.A. Hamprecht et. al., J. Chem. Phys., 109(15), 6264-6271\n')
# Add member functionals
X = build_functional('B97_X')
X.set_name('HCTH_X')
X.set_alpha(1.0)
X.set_parameter('B97_gamma', 0.004)
X.set_parameter('B97_a0', 1.09320)
X.set_parameter('B97_a1', -0.744056)
X.set_parameter('B97_a2', 5.59920)
X.set_parameter('B97_a3', -6.78549)
X.set_parameter('B97_a4', 4.49357)
C = build_functional('B_C')
C.set_name('HCTH_C')
C.set_parameter('B97_os_gamma', 0.006)
C.set_parameter('B97_os_a0', 0.729974)
C.set_parameter('B97_os_a1', 3.35287)
C.set_parameter('B97_os_a2', -11.5430)
C.set_parameter('B97_os_a3', 8.08564)
C.set_parameter('B97_os_a4', -4.47857)
C.set_parameter('B97_ss_gamma', 0.2)
C.set_parameter('B97_ss_a0', 0.222601)
C.set_parameter('B97_ss_a1', -0.0338622)
C.set_parameter('B97_ss_a2', -0.0125170)
C.set_parameter('B97_ss_a3', -0.802496)
C.set_parameter('B97_ss_a4', 1.55396)
sup.add_x_functional(X)
sup.add_c_functional(C)
# Set GKS up after adding functionals
sup.set_x_omega(0.0)
sup.set_c_omega(0.0)
sup.set_x_alpha(0.0)
sup.set_c_alpha(0.0)
# => End User-Customization <= #
# Call this last
sup.allocate()
return (sup, False)
def build_hcth120_superfunctional(name, npoints, deriv):
# Call this first
sup = core.SuperFunctional.blank()
sup.set_max_points(npoints)
sup.set_deriv(deriv)
# => User-Customization <= #
# No spaces, keep it short and according to convention
sup.set_name('HCTH120')
# Tab in, trailing newlines
sup.set_description(' HCTH120 Pure-GGA Exchange-Correlation Functional\n')
# Tab in, trailing newlines
sup.set_citation(' A.D. Boese, et. al., J. Chem. Phys., 112(4), 1670-1678, 2000\n')
# Add member functionals
X = build_functional('B97_X')
X.set_name('HCTH120_X')
X.set_alpha(1.0)
X.set_parameter('B97_gamma', 0.004)
X.set_parameter('B97_a0', 1.09163)
X.set_parameter('B97_a1', -0.747215)
X.set_parameter('B97_a2', 5.07833)
X.set_parameter('B97_a3', -4.10746)
X.set_parameter('B97_a4', 1.17173)
C = build_functional('B_C')
C.set_name('HCTH120_C')
C.set_parameter('B97_os_gamma', 0.006)
C.set_parameter('B97_os_a0', 0.514730)
C.set_parameter('B97_os_a1', 6.92982)
C.set_parameter('B97_os_a2', -24.7073)
C.set_parameter('B97_os_a3', 23.1098)
C.set_parameter('B97_os_a4', -11.3234)
C.set_parameter('B97_ss_gamma', 0.2)
C.set_parameter('B97_ss_a0', 0.489508)
C.set_parameter('B97_ss_a1', -0.260699)
C.set_parameter('B97_ss_a2', 0.432917)
C.set_parameter('B97_ss_a3', -1.99247)
C.set_parameter('B97_ss_a4', 2.48531)
sup.add_x_functional(X)
sup.add_c_functional(C)
# Set GKS up after adding functionals
sup.set_x_omega(0.0)
sup.set_c_omega(0.0)
sup.set_x_alpha(0.0)
sup.set_c_alpha(0.0)
# => End User-Customization <= #
# Call this last
sup.allocate()
return (sup, False)
def build_hcth147_superfunctional(name, npoints, deriv):
# Call this first
sup = core.SuperFunctional.blank()
sup.set_max_points(npoints)
sup.set_deriv(deriv)
# => User-Customization <= #
# No spaces, keep it short and according to convention
sup.set_name('HCTH147')
# Tab in, trailing newlines
sup.set_description(' HCTH147 Pure-GGA Exchange-Correlation Functional\n')
# Tab in, trailing newlines
sup.set_citation(' A.D. Boese, et. al., J. Chem. Phys., 112(4), 1670-1678, 2000\n')
# Add member functionals
X = build_functional('B97_X')
X.set_name('HCTH147_X')
X.set_alpha(1.0)
X.set_parameter('B97_gamma', 0.004)
X.set_parameter('B97_a0', 1.09025)
X.set_parameter('B97_a1', -0.799194)
X.set_parameter('B97_a2', 5.57212)
X.set_parameter('B97_a3', -5.86760)
X.set_parameter('B97_a4', 3.04544)
C = build_functional('B_C')
C.set_name('HCTH147_C')
C.set_parameter('B97_os_gamma', 0.006)
C.set_parameter('B97_os_a0', 0.542352)
C.set_parameter('B97_os_a1', 7.01464)
C.set_parameter('B97_os_a2', -28.3822)
C.set_parameter('B97_os_a3', 35.0329)
C.set_parameter('B97_os_a4', -20.4284)
C.set_parameter('B97_ss_gamma', 0.2)
C.set_parameter('B97_ss_a0', 0.562576)
C.set_parameter('B97_ss_a1', 0.0171436)
C.set_parameter('B97_ss_a2', -1.30636)
C.set_parameter('B97_ss_a3', 1.05747)
C.set_parameter('B97_ss_a4', 0.885429)
sup.add_x_functional(X)
sup.add_c_functional(C)
# Set GKS up after adding functionals
sup.set_x_omega(0.0)
sup.set_c_omega(0.0)
sup.set_x_alpha(0.0)
sup.set_c_alpha(0.0)
# => End User-Customization <= #
# Call this last
sup.allocate()
return (sup, False)
def build_hcth407_superfunctional(name, npoints, deriv):
# Call this first
sup = core.SuperFunctional.blank()
sup.set_max_points(npoints)
sup.set_deriv(deriv)
# => User-Customization <= #
# No spaces, keep it short and according to convention
sup.set_name('HCTH407')
# Tab in, trailing newlines
sup.set_description(' HCTH407 Pure-GGA Exchange-Correlation Functional\n')
# Tab in, trailing newlines
sup.set_citation(' A.D. Boese and N.C. Handy, J. Chem. Phys., 114(13), 5497-5503, 2001\n')
# Add member functionals
X = build_functional('B97_X')
X.set_name('HCTH407_X')
X.set_alpha(1.0)
X.set_parameter('B97_gamma', 0.004)
X.set_parameter('B97_a0', 1.08184)
X.set_parameter('B97_a1', -0.518339)
X.set_parameter('B97_a2', 3.42562)
X.set_parameter('B97_a3', -2.62901)
X.set_parameter('B97_a4', 2.28855)
C = build_functional('B_C')
C.set_name('HCTH407_C')
C.set_parameter('B97_os_gamma', 0.006)
C.set_parameter('B97_os_a0', 0.589076)
C.set_parameter('B97_os_a1', 4.42374)
C.set_parameter('B97_os_a2', -19.2218)
C.set_parameter('B97_os_a3', 42.5721)
C.set_parameter('B97_os_a4', -42.0052)
C.set_parameter('B97_ss_gamma', 0.2)
C.set_parameter('B97_ss_a0', 1.18777)
C.set_parameter('B97_ss_a1', -2.40292)
C.set_parameter('B97_ss_a2', 5.61741)
C.set_parameter('B97_ss_a3', -9.17923)
C.set_parameter('B97_ss_a4', 6.24798)
sup.add_x_functional(X)
sup.add_c_functional(C)
# Set GKS up after adding functionals
sup.set_x_omega(0.0)
sup.set_c_omega(0.0)
sup.set_x_alpha(0.0)
sup.set_c_alpha(0.0)
# => End User-Customization <= #
# Call this last
sup.allocate()
return (sup, False)
def build_wsvwn_superfunctional(name, npoints, deriv):
# Call this first
sup = core.SuperFunctional.blank()
sup.set_max_points(npoints)
sup.set_deriv(deriv)
# => User-Customization <= #
# No spaces, keep it short and according to convention
sup.set_name('wSVWN')
# Tab in, trailing newlines
sup.set_description(' LSDA SR-XC Functional\n')
# Tab in, trailing newlines
sup.set_citation(' Adamson et. al., J. Comput. Chem., 20(9), 921-927, 1999\n')
# Add member functionals
sup.add_x_functional(build_functional('wS_X'))
sup.add_c_functional(build_functional('VWN3RPA_C'))
# Set GKS up after adding functionals
sup.set_x_omega(0.3)
sup.set_c_omega(0.0)
sup.set_x_alpha(0.0)
sup.set_c_alpha(0.0)
# => End User-Customization <= #
# Call this last
sup.allocate()
return (sup, False)
def build_wpbe_superfunctional(name, npoints, deriv):
# Call this first
sup = core.SuperFunctional.blank()
sup.set_max_points(npoints)
sup.set_deriv(deriv)
# => User-Customization <= #
# No spaces, keep it short and according to convention
sup.set_name('wPBE')
# Tab in, trailing newlines
sup.set_description(' PBE SR-XC Functional (HJS Model)\n')
# Tab in, trailing newlines
sup.set_citation(' Henderson et. al., J. Chem. Phys., 128, 194105, 2008\n Weintraub, Henderson, and Scuseria, J. Chem. Theory. Comput., 5, 754 (2009)\n')
# Add member functionals
sup.add_x_functional(build_functional('wPBE_X'))
sup.add_c_functional(build_functional('PBE_C'))
# Set GKS up after adding functionals
sup.set_x_omega(0.4)
sup.set_c_omega(0.0)
sup.set_x_alpha(0.0)
sup.set_c_alpha(0.0)
# => End User-Customization <= #
# Call this last
sup.allocate()
return (sup, False)
def build_wpbe0_superfunctional(name, npoints, deriv):
sup = build_wpbe_superfunctional(name, npoints, deriv)[0]
sup.set_name('wPBE0')
sup.set_description(' PBE0 SR-XC Functional (HJS Model)\n')
sup.set_x_omega(0.3)
sup.set_x_alpha(0.25)
return (sup, False)
def build_wpbesol_superfunctional(name, npoints, deriv):
# Call this first
sup = core.SuperFunctional.blank()
sup.set_max_points(npoints)
sup.set_deriv(deriv)
# => User-Customization <= #
# No spaces, keep it short and according to convention
sup.set_name('wPBEsol')
# Tab in, trailing newlines
sup.set_description(' PBEsol SR-XC Functional (HJS Model)\n')
# Tab in, trailing newlines
sup.set_citation(' Henderson et. al., J. Chem. Phys., 128, 194105, 2008\n Weintraub, Henderson, and Scuseria, J. Chem. Theory. Comput., 5, 754 (2009)\n')
# Add member functionals
sup.add_x_functional(build_functional('wPBEsol_X'))
sup.add_c_functional(build_functional('PBE_C'))
# Set GKS up after adding functionals
sup.set_x_omega(0.4)
sup.set_c_omega(0.0)
sup.set_x_alpha(0.0)
sup.set_c_alpha(0.0)
# => End User-Customization <= #
# Call this last
sup.allocate()
return (sup, False)
def build_wpbesol0_superfunctional(name, npoints, deriv):
sup = build_wpbesol_superfunctional(name, npoints, deriv)[0]
sup.set_name('wPBEsol0')
sup.set_description(' PBEsol0 SR-XC Functional (HJS Model)\n')
sup.set_x_omega(0.3)
sup.set_x_alpha(0.25)
return (sup, False)
def build_wblyp_superfunctional(name, npoints, deriv):
# Call this first
sup = core.SuperFunctional.blank()
sup.set_max_points(npoints)
sup.set_deriv(deriv)
# => User-Customization <= #
# No spaces, keep it short and according to convention
sup.set_name('wBLYP')
# Tab in, trailing newlines
sup.set_description(' BLYP SR-XC Functional (HJS Model)\n')
# Tab in, trailing newlines
sup.set_citation(' Henderson et. al., J. Chem. Phys., 128, 194105, 2008\n Weintraub, Henderson, and Scuseria, J. Chem. Theory. Comput., 5, 754 (2009)\n')
# Add member functionals
sup.add_x_functional(build_functional('wB88_X'))
sup.add_c_functional(build_functional('LYP_C'))
# Set GKS up after adding functionals
sup.set_x_omega(0.3)
sup.set_c_omega(0.0)
sup.set_x_alpha(0.0)
sup.set_c_alpha(0.0)
# => End User-Customization <= #
# Call this last
sup.allocate()
return (sup, False)
def build_wb97_superfunctional(name, npoints, deriv):
# Call this first
sup = core.SuperFunctional.blank()
sup.set_max_points(npoints)
sup.set_deriv(deriv)
# => User-Customization <= #
# No spaces, keep it short and according to convention
sup.set_name('wB97')
# Tab in, trailing newlines
sup.set_description(' Parameterized LRC B97 GGA XC Functional\n')
# Tab in, trailing newlines
sup.set_citation(' J.-D. Chai and M. Head-Gordon, J. Chem. Phys., 128, 084106, 2008\n')
# Add member functionals
X = build_functional('wB97_X')
X.set_name('wB97_X')
X.set_alpha(1.0)
X.set_parameter('B97_gamma', 0.004)
X.set_parameter('B97_a0', 1.0)
X.set_parameter('B97_a1', 1.13116E0)
X.set_parameter('B97_a2', -2.74915E0)
X.set_parameter('B97_a3', 1.20900E1)
X.set_parameter('B97_a4', -5.71642E0)
C = build_functional('B_C')
C.set_name('wB97_C')
C.set_parameter('B97_os_gamma', 0.006)
C.set_parameter('B97_os_a0', 1.0)
C.set_parameter('B97_os_a1', 3.99051E0)
C.set_parameter('B97_os_a2', -1.70066E1)
C.set_parameter('B97_os_a3', 1.07292E0)
C.set_parameter('B97_os_a4', 8.88211E0)
C.set_parameter('B97_ss_gamma', 0.2)
C.set_parameter('B97_ss_a0', 1.0)
C.set_parameter('B97_ss_a1', -2.55352E0)
C.set_parameter('B97_ss_a2', 1.18926E1)
C.set_parameter('B97_ss_a3', -2.69452E1)
C.set_parameter('B97_ss_a4', 1.70927E1)
sup.add_x_functional(X)
sup.add_c_functional(C)
# Set GKS up after adding functionals
sup.set_x_omega(0.4)
sup.set_c_omega(0.0)
sup.set_x_alpha(0.0)
sup.set_c_alpha(0.0)
# => End User-Customization <= #
# Call this last
sup.allocate()
return (sup, False)
def build_wb97x_superfunctional(name, npoints, deriv):
# Call this first
sup = core.SuperFunctional.blank()
sup.set_max_points(npoints)
sup.set_deriv(deriv)
# => User-Customization <= #
# No spaces, keep it short and according to convention
sup.set_name('wB97X')
# Tab in, trailing newlines
sup.set_description(' Parameterized Hybrid LRC B97 GGA XC Functional\n')
# Tab in, trailing newlines
sup.set_citation(' J.-D. Chai and M. Head-Gordon, J. Chem. Phys., 128, 084106, 2008\n')
# Add member functionals
X = build_functional('wB97_X')
X.set_name('wB97X_X')
X.set_alpha(1.0 / (1.0 - 0.157706))
X.set_parameter('B97_gamma', 0.004)
X.set_parameter('B97_a0', 8.42294E-1)
X.set_parameter('B97_a1', 7.26479E-1)
X.set_parameter('B97_a2', 1.04760E0)
X.set_parameter('B97_a3', -5.70635E0)
X.set_parameter('B97_a4', 1.32794E1)
C = build_functional('B_C')
C.set_name('wB97X_C')
C.set_parameter('B97_os_gamma', 0.006)
C.set_parameter('B97_os_a0', 1.0)
C.set_parameter('B97_os_a1', 2.37031E0)
C.set_parameter('B97_os_a2', -1.13995E1)
C.set_parameter('B97_os_a3', 6.58405E0)
C.set_parameter('B97_os_a4', -3.78132E0)
C.set_parameter('B97_ss_gamma', 0.2)
C.set_parameter('B97_ss_a0', 1.0)
C.set_parameter('B97_ss_a1', -4.33879E0)
C.set_parameter('B97_ss_a2', 1.82308E1)
C.set_parameter('B97_ss_a3', -3.17430E1)
C.set_parameter('B97_ss_a4', 1.72901E1)
sup.add_x_functional(X)
sup.add_c_functional(C)
# Set GKS up after adding functionals
sup.set_x_omega(0.4)
sup.set_c_omega(0.0)
sup.set_x_alpha(0.157706)
sup.set_c_alpha(0.0)
# => End User-Customization <= #
# Call this last
sup.allocate()
return (sup, False)
def build_wb97xd_superfunctional(name, npoints, deriv):
# Call this first
sup = core.SuperFunctional.blank()
sup.set_max_points(npoints)
sup.set_deriv(deriv)
# => User-Customization <= #
# No spaces, keep it short and according to convention
sup.set_name('wB97X-D')
# Tab in, trailing newlines
sup.set_description(' Parameterized Hybrid LRC B97 GGA XC Functional with Dispersion\n')
# Tab in, trailing newlines
sup.set_citation(' J.-D. Chai and M. Head-Gordon, Phys. Chem. Chem. Phys., 10, 6615-6620, 2008\n')
# Add member functionals
alpha = 2.22036E-1
omega = 0.2
X = build_functional('wB97_X')
X.set_name('wB97X_X')
X.set_alpha(1.0 / (1.0 - alpha))
X.set_parameter('B97_gamma', 0.004)
X.set_parameter('B97_a0', 7.77964E-1) # Table 1: c_{x\sigma,0}
X.set_parameter('B97_a1', 6.61160E-1) # Table 1: c_{x\sigma,1}
X.set_parameter('B97_a2', 5.74541E-1) # Table 1: c_{x\sigma,2}
X.set_parameter('B97_a3', -5.25671E0) # Table 1: c_{x\sigma,3}
X.set_parameter('B97_a4', 1.16386E1) # Table 1: c_{x\sigma,4}
C = build_functional('B_C')
C.set_name('wB97X_C')
C.set_parameter('B97_os_gamma', 0.006)
C.set_parameter('B97_os_a0', 1.0) # Table 1: c_{c\alpha\beta,0}
C.set_parameter('B97_os_a1', 1.79413E0) # Table 1: c_{c\alpha\beta,1}
C.set_parameter('B97_os_a2', -1.20477E1) # Table 1: c_{c\alpha\beta,2}
C.set_parameter('B97_os_a3', 1.40847E1) # Table 1: c_{c\alpha\beta,3}
C.set_parameter('B97_os_a4', -8.50809E0) # Table 1: c_{c\alpha\beta,4}
C.set_parameter('B97_ss_gamma', 0.2)
C.set_parameter('B97_ss_a0', 1.0) # Table 1: c_{c\sigma\sigma,0}
C.set_parameter('B97_ss_a1', -6.90539E0) # Table 1: c_{c\sigma\sigma,1}
C.set_parameter('B97_ss_a2', 3.13343E1) # Table 1: c_{c\sigma\sigma,2}
C.set_parameter('B97_ss_a3', -5.10533E1) # Table 1: c_{c\sigma\sigma,3}
C.set_parameter('B97_ss_a4', 2.64423E1) # Table 1: c_{c\sigma\sigma,4}
sup.add_x_functional(X)
sup.add_c_functional(C)
# Set GKS up after adding functionals
sup.set_x_omega(omega) # Table 1: omega
sup.set_c_omega(0.0)
sup.set_x_alpha(alpha) # Table 1: c_x
sup.set_c_alpha(0.0)
# => End User-Customization <= #
# Call this last
sup.allocate()
return (sup, ('wB97', '-CHG'))
def build_m05_superfunctional(name, npoints, deriv):
# Call this first
sup = core.SuperFunctional.blank()
sup.set_max_points(npoints)
sup.set_deriv(deriv)
# => User-Customization <= #
# No spaces, keep it short and according to convention
sup.set_name('M05')
# Tab in, trailing newlines
sup.set_description(' Heavily Parameterized Hybrid Meta-GGA XC Functional\n')
# Tab in, trailing newlines
sup.set_citation(' Zhao et. al., J. Chem. Phys., 123, 161103, 2005\n')
# Add member functionals
X = build_functional('M_X')
X.set_name('M05_X')
X.set_alpha(1.0)
# LSDA Exchange type is Slater, no parameters
# GGA Exchange type is PBE, special parameters because Truhlar is lazy
C1 = 3.36116E-3
C2 = 4.49267E-3
K0 = 3.0 / 2.0 * math.pow(3.0 / (math.pi * 4.0), 1.0 / 3.0)
k0 = math.pow(6.0 * math.pi * math.pi, 1.0 / 3.0)
kp = C1 / (C2 * K0)
mu = 4.0 * k0 * k0 * kp * C2
X.set_parameter('PBE_kp', kp) # Different effective kp
X.set_parameter('PBE_mu', mu) # Different effective mu
# Meta Exchange type is insane mess of w power series expansion
X.set_parameter('Meta_a0', 1.0)
X.set_parameter('Meta_a1', 0.08151)
X.set_parameter('Meta_a2', -0.43956)
X.set_parameter('Meta_a3', -3.22422)
X.set_parameter('Meta_a4', 2.01819)
X.set_parameter('Meta_a5', 8.79431)
X.set_parameter('Meta_a6', -0.00295)
X.set_parameter('Meta_a7', 9.82029)
X.set_parameter('Meta_a8', -4.82351)
X.set_parameter('Meta_a9', -48.17574)
X.set_parameter('Meta_a10', 3.64802)
X.set_parameter('Meta_a11', 34.02248)
C = build_functional('M_C')
C.set_name('M05_C')
# LSDA Correlation type is PW92, no parameters
# GGA Correlation type is B97
C.set_parameter('B97_os_gamma', 0.0031 * 2.0)
C.set_parameter('B97_os_a0', 1.0)
C.set_parameter('B97_os_a1', 3.78569)
C.set_parameter('B97_os_a2', -14.15261)
C.set_parameter('B97_os_a3', -7.46589)
C.set_parameter('B97_os_a4', 17.94491)
C.set_parameter('B97_ss_gamma', 0.06)
C.set_parameter('B97_ss_a0', 1.0)
C.set_parameter('B97_ss_a1', 3.77344)
C.set_parameter('B97_ss_a2', -26.04463)
C.set_parameter('B97_ss_a3', 30.69913)
C.set_parameter('B97_ss_a4', -9.22695)
# Meta Correlation type is Becke metric, no parameters
# Add the functionals in
sup.add_x_functional(X)
sup.add_c_functional(C)
# Set GKS up after adding functionals
sup.set_x_omega(0.0)
sup.set_c_omega(0.0)
sup.set_x_alpha(0.28) # Hartree-Fock exact exchange
sup.set_c_alpha(0.0)
# => End User-Customization <= #
# Call this last
sup.allocate()
return (sup, False)
def build_m05_2x_superfunctional(name, npoints, deriv):
# Call this first
sup = core.SuperFunctional.blank()
sup.set_max_points(npoints)
sup.set_deriv(deriv)
# => User-Customization <= #
# No spaces, keep it short and according to convention
sup.set_name('M05-2X')
# Tab in, trailing newlines
sup.set_description(' Heavily Parameterized Hybrid Meta-GGA XC Functional\n')
# Tab in, trailing newlines
sup.set_citation(' Zhao et. al., J. Chem. Theory Comput., 2, 364, 2006\n')
# Add member functionals
X = build_functional('M_X')
X.set_name('M05_2X_X')
X.set_alpha(1.0)
# LSDA Exchange type is Slater, no parameters
# GGA Exchange type is PBE, special parameters because Truhlar is lazy
C1 = 3.36116E-3
C2 = 4.49267E-3
K0 = 3.0 / 2.0 * math.pow(3.0 / (math.pi * 4.0), 1.0 / 3.0)
k0 = math.pow(6.0 * math.pi * math.pi, 1.0 / 3.0)
kp = C1 / (C2 * K0)
mu = 4.0 * k0 * k0 * kp * C2
X.set_parameter('PBE_kp', kp)
X.set_parameter('PBE_mu', mu)
# Meta Exchange type is insane mess of w power series expansion
X.set_parameter('Meta_a0', 1.0)
X.set_parameter('Meta_a1', -0.56833)
X.set_parameter('Meta_a2', -1.30057)
X.set_parameter('Meta_a3', 5.50070)
X.set_parameter('Meta_a4', 9.06402)
X.set_parameter('Meta_a5', -32.21075)
X.set_parameter('Meta_a6', -23.73298)
X.set_parameter('Meta_a7', 70.22996)
X.set_parameter('Meta_a8', 29.88614)
X.set_parameter('Meta_a9', -60.25778)
X.set_parameter('Meta_a10', -13.22205)
X.set_parameter('Meta_a11', 15.23694)
C = build_functional('M_C')
C.set_name('M05_2X_C')
# LSDA Correlation type is PW92, no parameters
# GGA Correlation type is B97
C.set_parameter('B97_os_gamma', 0.0031 * 2.0)
C.set_parameter('B97_os_a0', 1.00000)
C.set_parameter('B97_os_a1', 1.09297)
C.set_parameter('B97_os_a2', -3.79171)
C.set_parameter('B97_os_a3', 2.82810)
C.set_parameter('B97_os_a4', -10.58909)
C.set_parameter('B97_ss_gamma', 0.06)
C.set_parameter('B97_ss_a0', 1.00000)
C.set_parameter('B97_ss_a1', -3.05430)
C.set_parameter('B97_ss_a2', 7.61854)
C.set_parameter('B97_ss_a3', 1.47665)
C.set_parameter('B97_ss_a4', -11.92365)
# Meta Correlation type is Becke metric, no parameters
# Add the functionals in
sup.add_x_functional(X)
sup.add_c_functional(C)
# Set GKS up after adding functionals
sup.set_x_omega(0.0)
sup.set_c_omega(0.0)
sup.set_x_alpha(0.56) # Hartree-Fock exact exchange
sup.set_c_alpha(0.0)
# => End User-Customization <= #
# Call this last
sup.allocate()
return (sup, False)
def build_dldf_superfunctional(name, npoints, deriv):
# Call this first
sup = core.SuperFunctional.blank()
sup.set_max_points(npoints)
sup.set_deriv(deriv)
# => User-Customization <= #
# No spaces, keep it short and according to convention
sup.set_name('dlDF')
# Tab in, trailing newlines
sup.set_description(' Dispersionless Hybrid Meta-GGA XC Functional\n')
# Tab in, trailing newlines
sup.set_citation(' Pernal et. al., Phys. Rev. Lett., 103, 263201, 2009\n')
# Add member functionals
X = build_functional('M_X')
X.set_name('dlDF_X')
X.set_alpha(1.0)
# LSDA Exchange type is Slater, no parameters
# GGA Exchange type is PBE
kp = 4.8827323
mu = 0.3511128
X.set_parameter('PBE_kp', kp)
X.set_parameter('PBE_mu', mu)
# Meta Exchange is a reparametrized truncation of Truhlar's functional
X.set_parameter('Meta_a0', 1.0)
X.set_parameter('Meta_a1', -0.1637571)
X.set_parameter('Meta_a2', -0.1880028)
X.set_parameter('Meta_a3', -0.4490609)
X.set_parameter('Meta_a4', -0.0082359)
C = build_functional('M_C')
C.set_name('dlDF_C')
# LSDA Correlation type is PW92, no parameters
# GGA Correlation type is B97
C.set_parameter('B97_os_gamma', 0.0031 * 2.0)
C.set_parameter('B97_os_a0', 1.00000)
C.set_parameter('B97_os_a1', 5.9515308)
C.set_parameter('B97_os_a2', -11.1602877)
C.set_parameter('B97_ss_gamma', 0.06)
C.set_parameter('B97_ss_a0', 1.00000)
C.set_parameter('B97_ss_a1', -2.5960897)
C.set_parameter('B97_ss_a2', 2.2233793)
# Meta Correlation type is Becke metric, no parameters
# Add the functionals in
sup.add_x_functional(X)
sup.add_c_functional(C)
# Set GKS up after adding functionals
sup.set_x_omega(0.0)
sup.set_c_omega(0.0)
sup.set_x_alpha(0.6144129) # Hartree-Fock exact exchange
sup.set_c_alpha(0.0)
# => End User-Customization <= #
# Call this last
sup.allocate()
return (sup, False)
def build_dldfd09_superfunctional(name, npoints, deriv):
sup, disp = build_dldf_superfunctional(name, npoints, deriv)
sup.set_name('dlDF+D09')
return (sup, ('dlDF', '-DAS2009'))
def build_dldfd10_superfunctional(name, npoints, deriv):
sup, disp = build_dldf_superfunctional(name, npoints, deriv)
sup.set_name('dlDF+D')
return (sup, ('dlDF', '-DAS2010'))
def build_hfd_superfunctional(name, npoints, deriv):
sup = core.SuperFunctional.blank()
sup.set_max_points(npoints)
sup.set_deriv(deriv)
sup.set_name('HF+D')
sup.set_x_alpha(1.0)
sup.allocate()
return (sup, ('HF', '-DAS2010'))
def build_b2plyp_superfunctional(name, npoints, deriv):
# Call this first
sup = core.SuperFunctional.blank()
sup.set_max_points(npoints)
sup.set_deriv(deriv)
# => User-Customization <= #
# No spaces, keep it short and according to convention
sup.set_name('B2PLYP')
# Tab in, trailing newlines
sup.set_description(' B2PLYP Double Hybrid Exchange-Correlation Functional\n')
# Tab in, trailing newlines
sup.set_citation(' S. Grimme, J. Chem. Phys., 124, 034108, 2006\n')
# Add member functionals
becke = build_functional('B88_X')
becke.set_alpha(1.0)
sup.add_x_functional(becke)
lyp = build_functional('LYP_C')
lyp.set_alpha(1.0)
sup.add_c_functional(lyp)
# Set GKS up after adding functionals
sup.set_x_omega(0.0)
sup.set_c_omega(0.0)
sup.set_x_alpha(0.53)
sup.set_c_alpha(0.27)
# => End User-Customization <= #
# Call this last
sup.allocate()
return (sup, False)
def build_wb97x_2tqz_superfunctional(name, npoints, deriv):
# Call this first
sup = core.SuperFunctional.blank()
sup.set_max_points(npoints)
sup.set_deriv(deriv)
# => User-Customization <= #
# No spaces, keep it short and according to convention
sup.set_name('wB97X-2(TQZ)')
# Tab in, trailing newlines
sup.set_description(' Double Hybrid LRC B97 GGA XC Functional (TQZ parametrization)\n')
# Tab in, trailing newlines
sup.set_citation(' J.-D. Chai and M. Head-Gordon, J. Chem. Phys., 131, 174105, 2009\n')
# Add member functionals
X = build_functional('wB97_X')
X.set_name('wB97X_X')
X.set_alpha(1.0 / (1.0 - 0.636158))
X.set_parameter('B97_gamma', 0.004)
X.set_parameter('B97_a0', 3.15503E-1)
X.set_parameter('B97_a1', 1.04772E0)
X.set_parameter('B97_a2', -2.33506E0)
X.set_parameter('B97_a3', 3.19909E0)
C = build_functional('B_C')
C.set_name('wB97X_C')
C.set_parameter('B97_os_gamma', 0.006)
C.set_parameter('B97_os_a0', 5.18198E-1)
C.set_parameter('B97_os_a1', -5.85956E-1)
C.set_parameter('B97_os_a2', 4.27080E0)
C.set_parameter('B97_os_a3', -6.48897E0)
C.set_parameter('B97_ss_gamma', 0.2)
C.set_parameter('B97_ss_a0', 9.08460E-1)
C.set_parameter('B97_ss_a1', -2.80936E0)
C.set_parameter('B97_ss_a2', 6.02676E0)
C.set_parameter('B97_ss_a3', -4.56981E0)
sup.add_x_functional(X)
sup.add_c_functional(C)
# Set GKS up after adding functionals
sup.set_x_omega(0.3)
sup.set_c_omega(0.0)
sup.set_x_alpha(0.636158)
sup.set_c_alpha(1.0)
sup.set_c_os_alpha(0.447105)
sup.set_c_ss_alpha(0.529319)
# => End User-Customization <= #
# Call this last
sup.allocate()
return (sup, False)
def build_wb97x_2lp_superfunctional(name, npoints, deriv):
# Call this first
sup = core.SuperFunctional.blank()
sup.set_max_points(npoints)
sup.set_deriv(deriv)
# => User-Customization <= #
# No spaces, keep it short and according to convention
sup.set_name('wB97X-2(LP)')
# Tab in, trailing newlines
sup.set_description(' Double Hybrid LRC B97 GGA XC Functional (Large Pople parametrization)\n')
# Tab in, trailing newlines
sup.set_citation(' J.-D. Chai and M. Head-Gordon, J. Chem. Phys., 131, 174105, 2009\n')
# Add member functionals
X = build_functional('wB97_X')
X.set_name('wB97X_X')
X.set_alpha(1.0 / (1.0 - 0.678792))
X.set_parameter('B97_gamma', 0.004)
X.set_parameter('B97_a0', 2.51767E-1)
X.set_parameter('B97_a1', 1.57375E0)
X.set_parameter('B97_a2', -5.26624E0)
X.set_parameter('B97_a3', 6.74313E0)
C = build_functional('B_C')
C.set_name('wB97X_C')
C.set_parameter('B97_os_gamma', 0.006)
C.set_parameter('B97_os_a0', 5.53261E-1)
C.set_parameter('B97_os_a1', -1.16626E0)
C.set_parameter('B97_os_a2', 6.84409E0)
C.set_parameter('B97_os_a3', -8.90640E0)
C.set_parameter('B97_ss_gamma', 0.2)
C.set_parameter('B97_ss_a0', 1.15698E0)
C.set_parameter('B97_ss_a1', -3.31669E0)
C.set_parameter('B97_ss_a2', 6.27265E0)
C.set_parameter('B97_ss_a3', -4.51464E0)
sup.add_x_functional(X)
sup.add_c_functional(C)
# Set GKS up after adding functionals
sup.set_x_omega(0.3)
sup.set_c_omega(0.0)
sup.set_x_alpha(0.678792)
sup.set_c_alpha(1.0)
sup.set_c_os_alpha(0.477992)
sup.set_c_ss_alpha(0.581569)
# => End User-Customization <= #
# Call this last
sup.allocate()
return (sup, False)
def build_pbe0_2_superfunctional(name, npoints, deriv):
# Call this first
sup = core.SuperFunctional.blank()
sup.set_max_points(npoints)
sup.set_deriv(deriv)
# => User-Customization <= #
# No spaces, keep it short and according to convention
sup.set_name('PBE0-2')
# Tab in, trailing newlines
sup.set_description(' PBE0-2 Double Hydrid Exchange-Correlation Functional\n')
# Tab in, trailing newlines
sup.set_citation(' J. Chai, Chem. Phys. Lett., 538, 121-125, 2012\n')
# Add member functionals
X = build_functional('PBE_X')
X.set_alpha(1.0)
sup.add_x_functional(X)
C = build_functional('PBE_C')
C.set_alpha(1.0)
sup.add_c_functional(C)
# Set GKS up after adding functionals
sup.set_x_omega(0.0)
sup.set_c_omega(0.0)
sup.set_x_alpha(0.793701)
sup.set_c_alpha(0.5)
# => End User-Customization <= #
# Call this last
sup.allocate()
return (sup, False)
def build_dsd_blyp_superfunctional(name, npoints, deriv):
# Call this first
sup = core.SuperFunctional.blank()
sup.set_max_points(npoints)
sup.set_deriv(deriv)
# => User-Customization <= #
# No spaces, keep it short and according to convention
sup.set_name('DSD-BLYP')
# Tab in, trailing newlines
sup.set_description(' DSD-BLYP Dispersion-corrected SCS Double Hybrid XC Functional\n')
# Tab in, trailing newlines
sup.set_citation(' S. Kozuch, Phys. Chem. Chem. Phys., 13, 20104, 2011\n')
# Add member functionals
X = build_functional('B88_X')
X.set_alpha(1.0)
sup.add_x_functional(X)
C = build_functional('LYP_C')
C.set_alpha(0.55)
sup.add_c_functional(C)
# Set GKS up after adding functionals
sup.set_x_omega(0.0)
sup.set_c_omega(0.0)
sup.set_x_alpha(0.71)
sup.set_c_alpha(1.0)
sup.set_c_os_alpha(0.46)
sup.set_c_ss_alpha(0.43)
# => End User-Customization <= #
# Call this last
sup.allocate()
return (sup, False)
def build_dsd_pbep86_superfunctional(name, npoints, deriv):
# Call this first
sup = core.SuperFunctional.blank()
sup.set_max_points(npoints)
sup.set_deriv(deriv)
# => User-Customization <= #
# No spaces, keep it short and according to convention
sup.set_name('DSD-PBEP86')
# Tab in, trailing newlines
sup.set_description(' DSD-PBEP86 Dispersion-corrected SCS Double Hybrid XC Functional (opt. for -D2)\n')
# Tab in, trailing newlines
sup.set_citation(' S. Kozuch, Phys. Chem. Chem. Phys., 13, 20104, 2011\n')
# Add member functionals
X = build_functional('PBE_X')
X.set_alpha(1.0)
sup.add_x_functional(X)
C = build_functional('P86_C')
C.set_alpha(0.45)
sup.add_c_functional(C)
# Set GKS up after adding functionals
sup.set_x_omega(0.0)
sup.set_c_omega(0.0)
sup.set_x_alpha(0.68)
sup.set_c_alpha(1.0)
sup.set_c_ss_alpha(0.23)
sup.set_c_os_alpha(0.51)
# => End User-Customization <= #
# Call this last
sup.allocate()
return (sup, False)
def build_dsd_pbepbe_superfunctional(name, npoints, deriv):
# Call this first
sup = core.SuperFunctional.blank()
sup.set_max_points(npoints)
sup.set_deriv(deriv)
# => User-Customization <= #
# No spaces, keep it short and according to convention
sup.set_name('DSD-PBEPBE')
# Tab in, trailing newlines
sup.set_description(' DSD-PBEPBE Dispersion-corrected SCS Double Hybrid XC Functional\n')
# Tab in, trailing newlines
sup.set_citation(' S. Kozuch, Phys. Chem. Chem. Phys., 13, 20104, 2011\n')
# Add member functionals
X = build_functional('PBE_X')
X.set_alpha(1.0)
sup.add_x_functional(X)
C = build_functional('PBE_C')
C.set_alpha(0.51)
sup.add_c_functional(C)
# Set GKS up after adding functionals
sup.set_x_omega(0.0)
sup.set_c_omega(0.0)
sup.set_x_alpha(0.66)
sup.set_c_alpha(1.0)
sup.set_c_ss_alpha(0.12)
sup.set_c_os_alpha(0.53)
# => End User-Customization <= #
# Call this last
sup.allocate()
return (sup, False)
def build_primitive_superfunctional(name, npoints, deriv):
# Call this first
sup = core.SuperFunctional.blank()
sup.set_max_points(npoints)
sup.set_deriv(deriv)
# => User-Customization <= #
key = name.upper()
fun = build_functional(key)
# No spaces, keep it short and according to convention
sup.set_name(key)
# Tab in, trailing newlines
sup.set_description(fun.description())
# Tab in, trailing newlines
sup.set_citation(fun.citation())
# Add member functionals
if (key[-1] == 'X'):
sup.add_x_functional(fun)
else:
sup.add_c_functional(fun)
# Set GKS up after adding functionals
sup.set_x_omega(0.0)
sup.set_c_omega(0.0)
sup.set_x_alpha(0.0)
sup.set_c_alpha(0.0)
# => End User-Customization <= #
# Call this last
sup.allocate()
return (sup, False)
def build_hf_superfunctional(name, npoints, deriv):
# Special "functional" that is simply Hartree Fock
# Call this first
sup = core.SuperFunctional.blank()
sup.set_max_points(npoints)
sup.set_deriv(deriv)
# => User-Customization <= #
# No spaces, keep it short and according to convention
sup.set_name('HF')
# Tab in, trailing newlines
sup.set_description(' Hartree Fock as Roothan prescribed\n')
# Tab in, trailing newlines
sup.set_citation(' \n')
# 100% exact exchange
sup.set_x_alpha(1.0)
# Zero out other GKS
sup.set_c_omega(0.0)
sup.set_x_omega(0.0)
sup.set_c_alpha(0.0)
# Dont allocate, no functionals
return (sup, False)
# Superfunctional lookup table
superfunctionals = {
'hf' : build_hf_superfunctional,
'hf+d' : build_hfd_superfunctional,
's_x' : build_primitive_superfunctional,
'b88_x' : build_primitive_superfunctional,
'b3_x' : build_primitive_superfunctional,
'pbe_x' : build_primitive_superfunctional,
'rpbe_x' : build_primitive_superfunctional,
'sogga_x' : build_primitive_superfunctional,
'pbesol_x' : build_primitive_superfunctional,
'pw91_x' : build_primitive_superfunctional,
'ws_x' : build_ws_x_superfunctional,
'wpbe_x' : build_wpbe_x_superfunctional,
'wpbesol_x' : build_wpbesol_x_superfunctional,
'wb88_x' : build_wb88_x_superfunctional,
'lyp_c' : build_primitive_superfunctional,
'ft97b_x' : build_primitive_superfunctional,
'pz81_c' : build_primitive_superfunctional,
'p86_c' : build_primitive_superfunctional,
'pw91_c' : build_primitive_superfunctional,
'pw92_c' : build_primitive_superfunctional,
'pbe_c' : build_primitive_superfunctional,
'ft97_c' : build_primitive_superfunctional,
'vwn5rpa_c' : build_primitive_superfunctional,
'vwn5_c' : build_primitive_superfunctional,
'vwn3rpa_c' : build_primitive_superfunctional,
'vwn3_c' : build_primitive_superfunctional,
'svwn' : build_svwn_superfunctional,
'blyp' : build_blyp_superfunctional,
'b86bpbe' : build_b86bpbe_superfunctional,
'pw86pbe' : build_pw86pbe_superfunctional,
'bp86' : build_bp86_superfunctional,
'pw91' : build_pw91_superfunctional,
'pbe' : build_pbe_superfunctional,
'ft97' : build_ft97_superfunctional,
'b3lyp' : build_b3lyp_superfunctional,
'b3lyp5' : build_b3lyp5_superfunctional,
'hf_x' : build_hf_x_superfunctional,
'pbe0' : build_pbe0_superfunctional,
'b97-0' : build_b970_superfunctional,
'b97-1' : build_b971_superfunctional,
'b97-2' : build_b972_superfunctional,
'b97-d' : build_b97d_superfunctional,
'hcth' : build_hcth_superfunctional,
'hcth120' : build_hcth120_superfunctional,
'hcth147' : build_hcth147_superfunctional,
'hcth407' : build_hcth407_superfunctional,
'wsvwn' : build_wsvwn_superfunctional,
'wpbe' : build_wpbe_superfunctional,
'wpbe0' : build_wpbe0_superfunctional,
'wpbesol' : build_wpbesol_superfunctional,
'wpbesol0' : build_wpbesol0_superfunctional,
'wblyp' : build_wblyp_superfunctional,
'wb97' : build_wb97_superfunctional,
'wb97x' : build_wb97x_superfunctional,
'wb97x-d' : build_wb97xd_superfunctional,
'm05' : build_m05_superfunctional,
'm05-2x' : build_m05_2x_superfunctional,
'dldf' : build_dldf_superfunctional,
'dldf+d09' : build_dldfd09_superfunctional,
'dldf+d' : build_dldfd10_superfunctional,
'sogga' : build_sogga_superfunctional,
'b2plyp' : build_b2plyp_superfunctional,
#'wb97x-2(tqz)' : build_wb97x_2tqz_superfunctional, # removed 26 Feb 2014 pending better handling of SS/OS DH coeff
#'wb97x-2(lp)' : build_wb97x_2lp_superfunctional, # removed 26 Feb 2014 pending better handling of SS/OS DH coeff
'pbe0-2' : build_pbe0_2_superfunctional,
#'dsd-blyp' : build_dsd_blyp_superfunctional, # -D variants still need to be added # removed 26 Feb 2014 pending better handling of SS/OS DH coeff
#'dsd-pbep86' : build_dsd_pbep86_superfunctional, # removed 26 Feb 2014 pending better handling of SS/OS DH coeff
#'dsd-pbepbe' : build_dsd_pbepbe_superfunctional, # removed 26 Feb 2014 pending better handling of SS/OS DH coeff
'pbea_c' : build_primitive_superfunctional,
'pw92a_c' : build_primitive_superfunctional,
'wpbe_c' : build_wpbe_c_superfunctional,
'wpw92_c' : build_wpw92_c_superfunctional,
'wpbe2' : build_wpbe2_superfunctional,
}
## Build up the lost of functionals we can compute
# Add in plain values
superfunctional_list = []
for key in superfunctionals.keys():
sup = superfunctionals[key](key, 1, 1)[0]
superfunctional_list.append(sup)
# Figure out what Grimme functionals we have
p4_funcs = set(superfunctionals.keys())
p4_funcs -= set(['b97-d'])
for dashlvl, superfunctional_listues in dftd3.dashcoeff.items():
func_list = (set(superfunctional_listues.keys()) & p4_funcs)
for func in func_list:
sup = superfunctionals[func](func, 1, 1)[0]
sup.set_name(sup.name() + '-' + dashlvl.upper())
superfunctional_list.append(sup)
if dashlvl == 'd2p4':
# -D2 overide
sup = superfunctionals[func](func, 1, 1)[0]
sup.set_name(sup.name() + '-D2')
superfunctional_list.append(sup)
# -D overide
sup = superfunctionals[func](func, 1, 1)[0]
sup.set_name(sup.name() + '-D')
superfunctional_list.append(sup)
if dashlvl == 'd3zero':
sup = superfunctionals[func](func, 1, 1)[0]
sup.set_name(sup.name() + '-D3')
superfunctional_list.append(sup)
if dashlvl == 'd3mzero':
sup = superfunctionals[func](func, 1, 1)[0]
sup.set_name(sup.name() + '-D3M')
superfunctional_list.append(sup)
# B97D is an odd one
for dashlvl in dftd3.full_dash_keys:
if dashlvl == 'd2p4': continue
sup = superfunctionals['b97-d']('b97-d', 1, 1)[0]
sup.set_name('B97-' + dashlvl.upper())
superfunctional_list.append(sup)
# wPBE, grr need a new scheme
for dashlvl in ['d3', 'd3m', 'd3zero', 'd3mzero', 'd3bj', 'd3mbj']:
sup = superfunctionals['wpbe']('wpbe', 1, 1)[0]
sup.set_name(sup.name() + '-' + dashlvl.upper())
superfunctional_list.append(sup)
def build_superfunctional(alias):
name = alias.lower()
npoints = core.get_option("SCF", "DFT_BLOCK_MAX_POINTS");
deriv = 1 # Default depth for now
# Grab out superfunctional
if name in ["gen", ""]:
sup = (core.get_option("DFT_CUSTOM_FUNCTIONAL"), False)
if not isinstance(sup[0], core.SuperFunctional):
raise KeyError("SCF: Custom Functional requested, but nothing provided in DFT_CUSTOM_FUNCTIONAL")
elif name in superfunctionals.keys():
sup = superfunctionals[name](name, npoints, deriv)
elif any(name.endswith(al) for al in dftd3.full_dash_keys):
# Odd hack for b97-d
if 'b97-d' in name:
name = name.replace('b97', 'b97-d')
dashparam = [x for x in dftd3.full_dash_keys if name.endswith(x)]
if len(dashparam) > 1:
raise Exception("Dashparam %s is ambiguous.")
else:
dashparam = dashparam[0]
base_name = name.replace('-' + dashparam, '')
if dashparam in ['d2', 'd']:
dashparam = 'd2p4'
if dashparam == 'd3':
dashparam = 'd3zero'
if dashparam == 'd3m':
dashparam = 'd3mzero'
if base_name not in superfunctionals.keys():
raise KeyError("SCF: Functional (%s) with base (%s) not found!" % (alias, base_name))
func = superfunctionals[base_name](base_name, npoints, deriv)[0]
base_name = base_name.replace('wpbe', 'lcwpbe')
sup = (func, (base_name, dashparam))
else:
raise KeyError("SCF: Functional (%s) not found!" % alias)
# Set options
if core.has_option_changed("SCF", "DFT_OMEGA") and sup[0].is_x_lrc():
sup[0].set_x_omega(core.get_option("SCF", "DFT_OMEGA"))
if core.has_option_changed("SCF", "DFT_OMEGA_C") and sup[0].is_c_lrc():
sup[0].set_c_omega(core.get_option("SCF", "DFT_OMEGA_C"))
if core.has_option_changed("SCF", "DFT_ALPHA"):
sup[0].set_x_alpha(core.get_option("SCF", "DFT_ALPHA"))
if core.has_option_changed("SCF", "DFT_ALPHA_C"):
sup[0].set_c_alpha(core.get_option("SCF", "DFT_ALPHA_C"))
# Check SCF_TYPE
if sup[0].is_x_lrc() and (core.get_option("SCF", "SCF_TYPE") not in ["DIRECT", "DF", "OUT_OF_CORE", "PK"]):
raise KeyError("SCF: SCF_TYPE (%s) not supported for range-seperated functionals."
% core.get_option("SCF", "SCF_TYPE"))
return sup
def test_ccl_functional(functional, ccl_functional):
check = True
if (not os.path.exists('data_pt_%s.html' % (ccl_functional))):
os.system('wget ftp://ftp.dl.ac.uk/qcg/dft_library/data_pt_%s.html' % ccl_functional)
fh = open('data_pt_%s.html' % (ccl_functional))
lines = fh.readlines()
fh.close()
points = []
point = {}
rho_line = re.compile(r'^\s*rhoa=\s*(-?\d+\.\d+E[+-]\d+)\s*rhob=\s*(-?\d+\.\d+E[+-]\d+)\s*sigmaaa=\s*(-?\d+\.\d+E[+-]\d+)\s*sigmaab=\s*(-?\d+\.\d+E[+-]\d+)\s*sigmabb=\s*(-?\d+\.\d+E[+-]\d+)\s*')
val_line = re.compile(r'^\s*(\w*)\s*=\s*(-?\d+\.\d+E[+-]\d+)')
aliases = { 'zk' : 'v',
'vrhoa' : 'v_rho_a',
'vrhob' : 'v_rho_b',
'vsigmaaa' : 'v_gamma_aa',
'vsigmaab' : 'v_gamma_ab',
'vsigmabb' : 'v_gamma_bb',
'v2rhoa2' : 'v_rho_a_rho_a',
'v2rhoab' : 'v_rho_a_rho_b',
'v2rhob2' : 'v_rho_b_rho_b',
'v2rhoasigmaaa' : 'v_rho_a_gamma_aa',
'v2rhoasigmaab' : 'v_rho_a_gamma_ab',
'v2rhoasigmabb' : 'v_rho_a_gamma_bb',
'v2rhobsigmaaa' : 'v_rho_b_gamma_aa',
'v2rhobsigmaab' : 'v_rho_b_gamma_ab',
'v2rhobsigmabb' : 'v_rho_b_gamma_bb',
'v2sigmaaa2' : 'v_gamma_aa_gamma_aa',
'v2sigmaaaab' : 'v_gamma_aa_gamma_ab',
'v2sigmaaabb' : 'v_gamma_aa_gamma_bb',
'v2sigmaab2' : 'v_gamma_ab_gamma_ab',
'v2sigmaabbb' : 'v_gamma_ab_gamma_bb',
'v2sigmabb2' : 'v_gamma_bb_gamma_bb',
}
for line in lines:
mobj = re.match(rho_line, line)
if (mobj):
if len(point):
points.append(point)
point = {}
point['rho_a'] = float(mobj.group(1))
point['rho_b'] = float(mobj.group(2))
point['gamma_aa'] = float(mobj.group(3))
point['gamma_ab'] = float(mobj.group(4))
point['gamma_bb'] = float(mobj.group(5))
continue
mobj = re.match(val_line, line)
if (mobj):
point[aliases[mobj.group(1)]] = float(mobj.group(2))
points.append(point)
N = len(points)
rho_a = core.Vector(N)
rho_b = core.Vector(N)
gamma_aa = core.Vector(N)
gamma_ab = core.Vector(N)
gamma_bb = core.Vector(N)
tau_a = core.Vector(N)
tau_b = core.Vector(N)
index = 0
for point in points:
rho_a[index] = point['rho_a']
rho_b[index] = point['rho_b']
gamma_aa[index] = point['gamma_aa']
gamma_ab[index] = point['gamma_ab']
gamma_bb[index] = point['gamma_bb']
index = index + 1
super = build_superfunctional(functional, N, 1)
super.test_functional(rho_a, rho_b, gamma_aa, gamma_ab, gamma_bb, tau_a, tau_b)
v = super.value('V')
v_rho_a = super.value('V_RHO_A')
v_rho_b = super.value('V_RHO_B')
v_gamma_aa = super.value('V_GAMMA_AA')
v_gamma_ab = super.value('V_GAMMA_AB')
v_gamma_bb = super.value('V_GAMMA_BB')
if not v_gamma_aa:
v_gamma_aa = tau_a
v_gamma_ab = tau_a
v_gamma_bb = tau_a
tasks = ['v', 'v_rho_a', 'v_rho_b', 'v_gamma_aa', 'v_gamma_ab', 'v_gamma_bb']
mapping = {
'v': v,
'v_rho_a': v_rho_a,
'v_rho_b': v_rho_b,
'v_gamma_aa': v_gamma_aa,
'v_gamma_ab': v_gamma_ab,
'v_gamma_bb': v_gamma_bb,
}
super.print_detail(3)
index = 0
for point in points:
core.print_out('rho_a= %11.3E, rho_b= %11.3E, gamma_aa= %11.3E, gamma_ab= %11.3E, gamma_bb= %11.3E\n' % (rho_a[index], rho_b[index], gamma_aa[index], gamma_ab[index], gamma_bb[index]))
for task in tasks:
v_ref = point[task]
v_obs = mapping[task][index]
delta = v_obs - v_ref
if (v_ref == 0.0):
epsilon = 0.0
else:
epsilon = abs(delta / v_ref)
if (epsilon < 1.0E-11):
passed = 'PASSED'
else:
passed = 'FAILED'
check = False
core.print_out('\t%-15s %24.16E %24.16E %24.16E %24.16E %6s\n' % (task, v_ref, v_obs, delta, epsilon, passed))
index = index + 1
core.print_out('\n')
return check
|
kannon92/psi4
|
psi4/driver/procedures/dft_functional.py
|
Python
|
gpl-2.0
| 96,317
|
[
"Psi4"
] |
810d3e566581f541ce5d0a1c7869284ab62487c8bdbaec0b4a6da7fbcc41bed1
|
# slicer imports
from __main__ import vtk, qt, ctk, slicer
# vmtk includes
import SlicerVmtkCommonLib
#
# Vesselness Filtering using VMTK based Tools
#
class VesselnessFiltering:
def __init__( self, parent ):
parent.title = "Vesselness Filtering"
parent.categories = ["Vascular Modeling Toolkit", ]
parent.contributors = ["Daniel Haehn (Boston Children's Hospital)", "Luca Antiga (Orobix)", "Steve Pieper (Isomics)"]
parent.helpText = """dsfdsf"""
parent.acknowledgementText = """sdfsdfdsf"""
self.parent = parent
class VesselnessFilteringWidget:
def __init__( self, parent=None ):
if not parent:
self.parent = slicer.qMRMLWidget()
self.parent.setLayout( qt.QVBoxLayout() )
self.parent.setMRMLScene( slicer.mrmlScene )
else:
self.parent = parent
self.layout = self.parent.layout()
# this flag is 1 if there is an update in progress
self.__updating = 1
# the pointer to the logic
self.__logic = None
if not parent:
self.setup()
self.__inputVolumeNodeSelector.setMRMLScene( slicer.mrmlScene )
self.__seedFiducialsNodeSelector.setMRMLScene( slicer.mrmlScene )
self.__outputVolumeNodeSelector.setMRMLScene( slicer.mrmlScene )
self.__previewVolumeNodeSelector.setMRMLScene( slicer.mrmlScene )
# after setup, be ready for events
self.__updating = 0
self.parent.show()
# register default slots
self.parent.connect( 'mrmlSceneChanged(vtkMRMLScene*)', self.onMRMLSceneChanged )
def GetLogic( self ):
'''
'''
if not self.__logic:
self.__logic = SlicerVmtkCommonLib.VesselnessFilteringLogic()
return self.__logic
def setup( self ):
# check if the SlicerVmtk module is installed properly
# self.__vmtkInstalled = SlicerVmtkCommonLib.Helper.CheckIfVmtkIsInstalled()
# Helper.Debug("VMTK found: " + self.__vmtkInstalled)
#
# the I/O panel
#
ioCollapsibleButton = ctk.ctkCollapsibleButton()
ioCollapsibleButton.text = "Input/Output"
self.layout.addWidget( ioCollapsibleButton )
ioFormLayout = qt.QFormLayout( ioCollapsibleButton )
# inputVolume selector
self.__inputVolumeNodeSelector = slicer.qMRMLNodeComboBox()
self.__inputVolumeNodeSelector.objectName = 'inputVolumeNodeSelector'
self.__inputVolumeNodeSelector.toolTip = "Select the input volume."
self.__inputVolumeNodeSelector.nodeTypes = ['vtkMRMLScalarVolumeNode']
self.__inputVolumeNodeSelector.noneEnabled = False
self.__inputVolumeNodeSelector.addEnabled = False
self.__inputVolumeNodeSelector.removeEnabled = False
self.__inputVolumeNodeSelector.addAttribute( "vtkMRMLScalarVolumeNode", "LabelMap", "0" )
ioFormLayout.addRow( "Input Volume:", self.__inputVolumeNodeSelector )
self.parent.connect( 'mrmlSceneChanged(vtkMRMLScene*)',
self.__inputVolumeNodeSelector, 'setMRMLScene(vtkMRMLScene*)' )
self.__inputVolumeNodeSelector.connect( 'currentNodeChanged(vtkMRMLNode*)', self.onInputVolumeChanged )
# seed selector
self.__seedFiducialsNodeSelector = slicer.qMRMLNodeComboBox()
self.__seedFiducialsNodeSelector.objectName = 'seedFiducialsNodeSelector'
self.__seedFiducialsNodeSelector.toolTip = "Select a fiducial to use as a Seed to detect the maximal diameter."
self.__seedFiducialsNodeSelector.nodeTypes = ['vtkMRMLMarkupsFiducialNode']
self.__seedFiducialsNodeSelector.baseName = "DiameterSeed"
self.__seedFiducialsNodeSelector.noneEnabled = False
self.__seedFiducialsNodeSelector.addEnabled = False
self.__seedFiducialsNodeSelector.removeEnabled = False
ioFormLayout.addRow( "Seed in largest Vessel:", self.__seedFiducialsNodeSelector )
self.parent.connect( 'mrmlSceneChanged(vtkMRMLScene*)',
self.__seedFiducialsNodeSelector, 'setMRMLScene(vtkMRMLScene*)' )
self.__seedFiducialsNodeSelector.connect( 'currentNodeChanged(vtkMRMLNode*)', self.onSeedChanged )
self.__ioAdvancedToggle = qt.QCheckBox( "Show Advanced Properties" )
self.__ioAdvancedToggle.setChecked( False )
ioFormLayout.addRow( self.__ioAdvancedToggle )
#
# I/O advanced panel
#
self.__ioAdvancedPanel = qt.QFrame( ioCollapsibleButton )
self.__ioAdvancedPanel.hide()
self.__ioAdvancedPanel.setFrameStyle( 6 )
ioFormLayout.addRow( self.__ioAdvancedPanel )
self.__ioAdvancedToggle.connect( "clicked()", self.onIOAdvancedToggle )
ioAdvancedFormLayout = qt.QFormLayout( self.__ioAdvancedPanel )
# lock button
self.__detectPushButton = qt.QPushButton()
self.__detectPushButton.text = "Detect parameters automatically"
self.__detectPushButton.checkable = True
self.__detectPushButton.checked = True
# self.__unLockPushButton.connect("clicked()", self.calculateParameters())
ioAdvancedFormLayout.addRow( self.__detectPushButton )
# outputVolume selector
self.__outputVolumeNodeSelector = slicer.qMRMLNodeComboBox()
self.__outputVolumeNodeSelector.toolTip = "Select the output labelmap."
self.__outputVolumeNodeSelector.nodeTypes = ['vtkMRMLScalarVolumeNode']
self.__outputVolumeNodeSelector.baseName = "VesselnessFiltered"
self.__outputVolumeNodeSelector.noneEnabled = False
self.__outputVolumeNodeSelector.addEnabled = True
self.__outputVolumeNodeSelector.selectNodeUponCreation = True
self.__outputVolumeNodeSelector.removeEnabled = True
ioAdvancedFormLayout.addRow( "Output Volume:", self.__outputVolumeNodeSelector )
self.parent.connect( 'mrmlSceneChanged(vtkMRMLScene*)',
self.__outputVolumeNodeSelector, 'setMRMLScene(vtkMRMLScene*)' )
# previewVolume selector
self.__previewVolumeNodeSelector = slicer.qMRMLNodeComboBox()
self.__previewVolumeNodeSelector.toolTip = "Select the preview volume."
self.__previewVolumeNodeSelector.nodeTypes = ['vtkMRMLScalarVolumeNode']
self.__previewVolumeNodeSelector.baseName = "VesselnessPreview"
self.__previewVolumeNodeSelector.noneEnabled = False
self.__previewVolumeNodeSelector.addEnabled = True
self.__previewVolumeNodeSelector.selectNodeUponCreation = True
self.__previewVolumeNodeSelector.removeEnabled = True
ioAdvancedFormLayout.addRow( "Preview Volume:", self.__previewVolumeNodeSelector )
self.parent.connect( 'mrmlSceneChanged(vtkMRMLScene*)',
self.__previewVolumeNodeSelector, 'setMRMLScene(vtkMRMLScene*)' )
self.__minimumDiameterSpinBox = qt.QSpinBox()
self.__minimumDiameterSpinBox.minimum = 0
self.__minimumDiameterSpinBox.maximum = 1000
self.__minimumDiameterSpinBox.singleStep = 1
self.__minimumDiameterSpinBox.toolTip = "Specify the minimum Diameter manually."
ioAdvancedFormLayout.addRow( "Minimum Diameter [vx]:", self.__minimumDiameterSpinBox )
self.__maximumDiameterSpinBox = qt.QSpinBox()
self.__maximumDiameterSpinBox.minimum = 0
self.__maximumDiameterSpinBox.maximum = 1000
self.__maximumDiameterSpinBox.singleStep = 1
self.__maximumDiameterSpinBox.toolTip = "Specify the maximum Diameter manually."
ioAdvancedFormLayout.addRow( "Maximum Diameter [vx]:", self.__maximumDiameterSpinBox )
# add empty row
ioAdvancedFormLayout.addRow( "", qt.QWidget() )
# alpha slider
alphaLabel = qt.QLabel()
alphaLabel.text = "more Tubes <-> more Plates" + SlicerVmtkCommonLib.Helper.CreateSpace( 16 )
alphaLabel.setAlignment( 4 )
alphaLabel.toolTip = "A lower value detects tubes rather than plate-like structures."
ioAdvancedFormLayout.addRow( alphaLabel )
self.__alphaSlider = ctk.ctkSliderWidget()
self.__alphaSlider.decimals = 1
self.__alphaSlider.minimum = 0.1
self.__alphaSlider.maximum = 500
self.__alphaSlider.singleStep = 0.1
self.__alphaSlider.toolTip = alphaLabel.toolTip
ioAdvancedFormLayout.addRow( self.__alphaSlider )
# beta slider
betaLabel = qt.QLabel()
betaLabel.text = "more Blobs <-> more Tubes" + SlicerVmtkCommonLib.Helper.CreateSpace( 16 )
betaLabel.setAlignment( 4 )
betaLabel.toolTip = "A higher value detects tubes rather than blobs."
ioAdvancedFormLayout.addRow( betaLabel )
self.__betaSlider = ctk.ctkSliderWidget()
self.__betaSlider.decimals = 1
self.__betaSlider.minimum = 0.1
self.__betaSlider.maximum = 500
self.__betaSlider.singleStep = 0.1
self.__betaSlider.toolTip = betaLabel.toolTip
ioAdvancedFormLayout.addRow( self.__betaSlider )
# contrast slider
contrastLabel = qt.QLabel()
contrastLabel.text = "low Input Contrast <-> high Input Contrast" + SlicerVmtkCommonLib.Helper.CreateSpace( 14 )
contrastLabel.setAlignment( 4 )
contrastLabel.toolTip = "If the intensity contrast in the input image between vessel and background is high, choose a high value else choose a low value."
ioAdvancedFormLayout.addRow( contrastLabel )
self.__contrastSlider = ctk.ctkSliderWidget()
self.__contrastSlider.decimals = 0
self.__contrastSlider.minimum = 0
self.__contrastSlider.maximum = 500
self.__contrastSlider.singleStep = 10
self.__contrastSlider.toolTip = contrastLabel.toolTip
ioAdvancedFormLayout.addRow( self.__contrastSlider )
#
# Reset, preview and apply buttons
#
self.__buttonBox = qt.QDialogButtonBox()
self.__resetButton = self.__buttonBox.addButton( self.__buttonBox.RestoreDefaults )
self.__resetButton.toolTip = "Click to reset all input elements to default."
self.__previewButton = self.__buttonBox.addButton( self.__buttonBox.Discard )
self.__previewButton.setIcon( qt.QIcon() )
self.__previewButton.text = "Preview.."
self.__previewButton.toolTip = "Click to refresh the preview."
self.__startButton = self.__buttonBox.addButton( self.__buttonBox.Apply )
self.__startButton.setIcon( qt.QIcon() )
self.__startButton.text = "Start!"
self.__startButton.enabled = False
self.__startButton.toolTip = "Click to start the filtering."
self.layout.addWidget( self.__buttonBox )
self.__resetButton.connect( "clicked()", self.restoreDefaults )
self.__previewButton.connect( "clicked()", self.onRefreshButtonClicked )
self.__startButton.connect( "clicked()", self.onStartButtonClicked )
# be ready for events
self.__updating = 0
# set default values
self.restoreDefaults()
# compress the layout
self.layout.addStretch( 1 )
def onMRMLSceneChanged( self ):
'''
'''
SlicerVmtkCommonLib.Helper.Debug( "onMRMLSceneChanged" )
self.restoreDefaults()
def onInputVolumeChanged( self ):
'''
'''
if not self.__updating:
self.__updating = 1
SlicerVmtkCommonLib.Helper.Debug( "onInputVolumeChanged" )
# do nothing right now
self.__updating = 0
def onSeedChanged( self ):
'''
'''
if not self.__updating:
self.__updating = 1
# nothing yet
self.__updating = 0
def onStartButtonClicked( self ):
'''
'''
if self.__detectPushButton.checked:
self.restoreDefaults()
self.calculateParameters()
self.__startButton.enabled = True
# this is no preview
self.start( False )
def onRefreshButtonClicked( self ):
'''
'''
if self.__detectPushButton.checked:
self.restoreDefaults()
self.calculateParameters()
# calculate the preview
self.start( True )
# activate startButton
self.__startButton.enabled = True
def calculateParameters( self ):
'''
'''
SlicerVmtkCommonLib.Helper.Debug( "calculateParameters" )
# first we need the nodes
currentVolumeNode = self.__inputVolumeNodeSelector.currentNode()
currentSeedsNode = self.__seedFiducialsNodeSelector.currentNode()
if not currentVolumeNode:
# we need a input volume node
SlicerVmtkCommonLib.Helper.Debug( "calculateParameters: Have no valid volume node" )
return False
if not currentSeedsNode:
# we need a seeds node
SlicerVmtkCommonLib.Helper.Debug( "calculateParameters: Have no valid fiducial node" )
return False
image = currentVolumeNode.GetImageData()
currentCoordinatesRAS = [0, 0, 0]
# grab the current coordinates
n = currentSeedsNode.GetNumberOfFiducials()
currentSeedsNode.GetNthFiducialPosition(n-1,currentCoordinatesRAS)
seed = SlicerVmtkCommonLib.Helper.ConvertRAStoIJK( currentVolumeNode, currentCoordinatesRAS )
# we detect the diameter in IJK space (image has spacing 1,1,1) with IJK coordinates
detectedDiameter = self.GetLogic().getDiameter( image, int( seed[0] ), int( seed[1] ), int( seed[2] ) )
SlicerVmtkCommonLib.Helper.Debug( "Diameter detected: " + str( detectedDiameter ) )
contrastMeasure = self.GetLogic().calculateContrastMeasure( image, int( seed[0] ), int( seed[1] ), int( seed[2] ), detectedDiameter )
SlicerVmtkCommonLib.Helper.Debug( "Contrast measure: " + str( contrastMeasure ) )
self.__maximumDiameterSpinBox.value = detectedDiameter
self.__contrastSlider.value = contrastMeasure
return True
def onIOAdvancedToggle( self ):
'''
Show the I/O Advanced panel
'''
# re-calculate parameter
self.calculateParameters()
if self.__ioAdvancedToggle.checked:
self.__ioAdvancedPanel.show()
else:
self.__ioAdvancedPanel.hide()
def restoreDefaults( self ):
'''
'''
if not self.__updating:
self.__updating = 1
SlicerVmtkCommonLib.Helper.Debug( "restoreDefaults" )
self.__detectPushButton.checked = True
self.__minimumDiameterSpinBox.value = 1
self.__maximumDiameterSpinBox.value = 7
self.__alphaSlider.value = 0.3
self.__betaSlider.value = 500
self.__contrastSlider.value = 100
self.__startButton.enabled = False
self.__updating = 0
# if a volume is selected, the threshold slider values have to match it
self.onInputVolumeChanged()
def start( self, preview=False ):
'''
'''
SlicerVmtkCommonLib.Helper.Debug( "Starting Vesselness Filtering.." )
# first we need the nodes
currentVolumeNode = self.__inputVolumeNodeSelector.currentNode()
currentSeedsNode = self.__seedFiducialsNodeSelector.currentNode()
if preview:
# if previewMode, get the node selector of the preview volume
currentOutputVolumeNodeSelector = self.__previewVolumeNodeSelector
else:
currentOutputVolumeNodeSelector = self.__outputVolumeNodeSelector
currentOutputVolumeNode = currentOutputVolumeNodeSelector.currentNode()
if not currentVolumeNode:
# we need a input volume node
return 0
if not currentOutputVolumeNode or currentOutputVolumeNode.GetID() == currentVolumeNode.GetID():
# we need an output volume node
newVolumeDisplayNode = slicer.mrmlScene.CreateNodeByClass( "vtkMRMLScalarVolumeDisplayNode" )
newVolumeDisplayNode.SetDefaultColorMap()
newVolumeDisplayNode.SetScene( slicer.mrmlScene )
slicer.mrmlScene.AddNode( newVolumeDisplayNode )
newVolumeNode = slicer.mrmlScene.CreateNodeByClass( "vtkMRMLScalarVolumeNode" )
newVolumeNode.SetScene( slicer.mrmlScene )
newVolumeNode.SetName( slicer.mrmlScene.GetUniqueNameByString( currentOutputVolumeNodeSelector.baseName ) )
newVolumeNode.SetAndObserveDisplayNodeID( newVolumeDisplayNode.GetID() )
slicer.mrmlScene.AddNode( newVolumeNode )
currentOutputVolumeNode = newVolumeNode
currentOutputVolumeNodeSelector.setCurrentNode( currentOutputVolumeNode )
if preview and not currentSeedsNode:
# we need a seedsNode for preview
SlicerVmtkCommonLib.Helper.Info( "A seed point is required to use the preview mode." )
return 0
# we get the fiducial coordinates
if currentSeedsNode:
currentCoordinatesRAS = [0, 0, 0]
# grab the current coordinates
n = currentSeedsNode.GetNumberOfFiducials()
currentSeedsNode.GetNthFiducialPosition(n-1,currentCoordinatesRAS)
inputImage = currentVolumeNode.GetImageData()
#
# vesselness parameters
#
# we need to convert diameter to mm, we use the minimum spacing to multiply the voxel value
minimumDiameter = self.__minimumDiameterSpinBox.value * min( currentVolumeNode.GetSpacing() )
maximumDiameter = self.__maximumDiameterSpinBox.value * min( currentVolumeNode.GetSpacing() )
SlicerVmtkCommonLib.Helper.Debug( minimumDiameter )
SlicerVmtkCommonLib.Helper.Debug( maximumDiameter )
alpha = self.__alphaSlider.value
beta = self.__betaSlider.value
contrastMeasure = self.__contrastSlider.value
#
# end of vesselness parameters
#
# this image will later hold the inputImage
image = vtk.vtkImageData()
# this image will later hold the outputImage
outImage = vtk.vtkImageData()
# if we are in previewMode, we have to cut the ROI first for speed
if preview:
# we extract the ROI of currentVolumeNode and save it to currentOutputVolumeNode
# we work in RAS space
SlicerVmtkCommonLib.Helper.extractROI( currentVolumeNode.GetID(), currentOutputVolumeNode.GetID(), currentCoordinatesRAS, self.__maximumDiameterSpinBox.value )
# get the new cutted imageData
image.DeepCopy( currentOutputVolumeNode.GetImageData() )
image.Update()
else:
# there was no ROI extraction, so just clone the inputImage
image.DeepCopy( inputImage )
image.Update()
# attach the spacing and origin to get accurate vesselness computation
image.SetSpacing( currentVolumeNode.GetSpacing() )
image.SetOrigin( currentVolumeNode.GetOrigin() )
# we now compute the vesselness in RAS space, image has spacing and origin attached, the diameters are converted to mm
# we use RAS space to support anisotropic datasets
outImage.DeepCopy( self.GetLogic().performFrangiVesselness( image, minimumDiameter, maximumDiameter, 5, alpha, beta, contrastMeasure ) )
outImage.Update()
# let's remove spacing and origin attached to outImage
outImage.SetSpacing( 1, 1, 1 )
outImage.SetOrigin( 0, 0, 0 )
# we only want to copy the orientation from input to output when we are not in preview mode
if not preview:
currentOutputVolumeNode.CopyOrientation( currentVolumeNode )
# we set the outImage which has spacing 1,1,1. The ijkToRas matrix of the node will take care of that
currentOutputVolumeNode.SetAndObserveImageData( outImage )
# for preview: show the inputVolume as background and the outputVolume as foreground in the slice viewers
# note: that's the only way we can have the preview as an overlay of the originalvolume
# for not preview: show the outputVolume as background and the inputVolume as foreground in the slice viewers
if preview:
fgVolumeID = currentOutputVolumeNode.GetID()
bgVolumeID = currentVolumeNode.GetID()
else:
bgVolumeID = currentOutputVolumeNode.GetID()
fgVolumeID = currentVolumeNode.GetID()
selectionNode = slicer.app.applicationLogic().GetSelectionNode()
selectionNode.SetReferenceActiveVolumeID( bgVolumeID )
selectionNode.SetReferenceSecondaryVolumeID( fgVolumeID )
slicer.app.applicationLogic().PropagateVolumeSelection()
# renew auto window/level for the output
currentOutputVolumeNode.GetDisplayNode().AutoWindowLevelOff()
currentOutputVolumeNode.GetDisplayNode().AutoWindowLevelOn()
# show foreground volume
numberOfCompositeNodes = slicer.mrmlScene.GetNumberOfNodesByClass( 'vtkMRMLSliceCompositeNode' )
for n in xrange( numberOfCompositeNodes ):
compositeNode = slicer.mrmlScene.GetNthNodeByClass( n, 'vtkMRMLSliceCompositeNode' )
if compositeNode:
if preview:
# the preview is the foreground volume, so we want to show it fully
compositeNode.SetForegroundOpacity( 1.0 )
else:
# now the background volume is the vesselness output, we want to show it fully
compositeNode.SetForegroundOpacity( 0.0 )
# fit slice to all sliceviewers
slicer.app.applicationLogic().FitSliceToAll()
# jump all sliceViewers to the fiducial point, if one was used
if currentSeedsNode:
numberOfSliceNodes = slicer.mrmlScene.GetNumberOfNodesByClass( 'vtkMRMLSliceNode' )
for n in xrange( numberOfSliceNodes ):
sliceNode = slicer.mrmlScene.GetNthNodeByClass( n, "vtkMRMLSliceNode" )
if sliceNode:
sliceNode.JumpSliceByOffsetting( currentCoordinatesRAS[0], currentCoordinatesRAS[1], currentCoordinatesRAS[2] )
SlicerVmtkCommonLib.Helper.Debug( "End of Vesselness Filtering.." )
class Slicelet( object ):
"""A slicer slicelet is a module widget that comes up in stand alone mode
implemented as a python class.
This class provides common wrapper functionality used by all slicer modlets.
"""
# TODO: put this in a SliceletLib
# TODO: parse command line arge
def __init__( self, widgetClass=None ):
self.parent = qt.QFrame()
self.parent.setLayout( qt.QVBoxLayout() )
# TODO: should have way to pop up python interactor
self.buttons = qt.QFrame()
self.buttons.setLayout( qt.QHBoxLayout() )
self.parent.layout().addWidget( self.buttons )
self.addDataButton = qt.QPushButton( "Add Data" )
self.buttons.layout().addWidget( self.addDataButton )
self.addDataButton.connect( "clicked()", slicer.app.ioManager().openAddDataDialog )
self.loadSceneButton = qt.QPushButton( "Load Scene" )
self.buttons.layout().addWidget( self.loadSceneButton )
self.loadSceneButton.connect( "clicked()", slicer.app.ioManager().openLoadSceneDialog )
if widgetClass:
self.widget = widgetClass( self.parent )
self.widget.setup()
self.parent.show()
class VesselnessFilteringSlicelet( Slicelet ):
""" Creates the interface when module is run as a stand alone gui app.
"""
def __init__( self ):
super( VesselnessFilteringSlicelet, self ).__init__( VesselnessFilteringWidget )
if __name__ == "__main__":
# TODO: need a way to access and parse command line arguments
# TODO: ideally command line args should handle --xml
import sys
print( sys.argv )
slicelet = VesselnessFilteringSlicelet()
|
jcfr/SlicerExtension-VMTK
|
PythonModules/VesselnessFiltering.py
|
Python
|
apache-2.0
| 22,448
|
[
"VTK"
] |
70a8f96cf50e34f98de6839130dcb09c732e495ab83a8e595498983d2f123087
|
from brian.stdunits import *
from brian.units import *
F = 1
N_SUBPOP = 2
INTERCO_RATE = 0
INTERCO_STRENGTH = 0
PARAMETERS = {
'Common':
{'simu_dt' : 0.05*msecond,
'simu_length' : 2000*msecond,
'N_subpop' : N_SUBPOP,
'N_mitral' : N_SUBPOP*50*F,
'inter_conn_rate' : {},
'inter_conn_strength' : {}
},
'Input':
{'tau_Ein' : 3*msecond,
'g_Ein0' : 1*siemens*meter**-2,
'sigma_Ein' : 0.35*siemens*meter**-2
},
'InputOscillation':
{'f' : 2*Hz,
'C' : 1 # Must be set to 1 for oscillation
},
'Mitral':
{'C_m' : 0.08*farad*meter**-2,
'g_L' : 0.87*siemens*meter**-2,
'E_L' : -64.5*mvolt,
'V_r' : -74*mvolt,
'V_t' : -62*mvolt,
't_refract' : 0.2*msecond
},
'Granule':
{'C_m' : 0.01*farad*meter**-2,
'g_L' : 0.83*siemens*meter**-2,
'E_L' : -70*mvolt,
'g_SD' : 1*siemens*meter**-2,
'g_DS' : 300*siemens*meter**-2
},
'Synapse':
{'V_E' : 0*mvolt,
'V_act_E' : 0*mvolt,
'g_E' : 1.4*siemens*meter**-2/F,
'sigma_E' : 0.01*mvolt,
'alpha_E' : 10*msecond**-1,
'beta_E' : 1./3*msecond**-1,
'V_I' : -80*mvolt,
'V_act_I' : -66.4*mvolt,
'g_I' : 10*siemens*meter**-2,
'sigma_I' : 0.4*mvolt,
'alpha_I' : 5*msecond**-1,
'beta_I' : 1./10*msecond**-1
},
}
|
neuro-lyon/multiglom-model
|
src/paramsets/std_gamma_1glom.py
|
Python
|
mit
| 1,370
|
[
"Brian"
] |
02e967a7e8edbdf294bd758c8ab892f595a3c1e8ae94c1a4d7b9c6084a4c22be
|
"""
This is the boilerplate default configuration file.
Changes and additions to settings should be done in the config module
located in the application root rather than this config.
"""
config = {
# webapp2 sessions
'webapp2_extras.sessions': {'secret_key': '_PUT_KEY_HERE_YOUR_SECRET_KEY_'},
# webapp2 authentication
'webapp2_extras.auth': {'user_model': 'boilerplate.models.User',
'cookie_name': 'session_name'},
# jinja2 templates
'webapp2_extras.jinja2': {'template_path': ['templates', 'boilerplate/templates', 'admin/templates'],
'environment_args': {'extensions': ['jinja2.ext.i18n']}},
# application name
'app_name': "AirShare",
# the default language code for the application.
# should match whatever language the site uses when i18n is disabled
'app_lang': 'en',
# Locale code = <language>_<territory> (ie 'en_US')
# to pick locale codes see http://cldr.unicode.org/index/cldr-spec/picking-the-right-language-code
# also see http://www.sil.org/iso639-3/codes.asp
# Language codes defined under iso 639-1 http://en.wikipedia.org/wiki/List_of_ISO_639-1_codes
# Territory codes defined under iso 3166-1 alpha-2 http://en.wikipedia.org/wiki/ISO_3166-1
# disable i18n if locales array is empty or None
'locales': ['en_US', 'es_ES', 'it_IT', 'zh_CN', 'id_ID', 'fr_FR', 'de_DE', 'ru_RU', 'pt_BR', 'cs_CZ','vi_VN'],
# contact page email settings
'contact_sender': "PUT_SENDER_EMAIL_HERE",
'contact_recipient': "eugenewong@berkeley.edu",
# Password AES Encryption Parameters
# aes_key must be only 16 (*AES-128*), 24 (*AES-192*), or 32 (*AES-256*) bytes (characters) long.
'aes_key': "12_24_32_BYTES_KEY_FOR_PASSWORDS",
'salt': "_PUT_SALT_HERE_TO_SHA512_PASSWORDS_",
# get your own consumer key and consumer secret by registering at https://dev.twitter.com/apps
# callback url must be: http://[YOUR DOMAIN]/login/twitter/complete
'twitter_consumer_key': 'PUT_YOUR_TWITTER_CONSUMER_KEY_HERE',
'twitter_consumer_secret': 'PUT_YOUR_TWITTER_CONSUMER_SECRET_HERE',
#Facebook Login
# get your own consumer key and consumer secret by registering at https://developers.facebook.com/apps
#Very Important: set the site_url= your domain in the application settings in the facebook app settings page
# callback url must be: http://[YOUR DOMAIN]/login/facebook/complete
'fb_api_key': 'PUT_YOUR_FACEBOOK_PUBLIC_KEY_HERE',
'fb_secret': 'PUT_YOUR_FACEBOOK_PUBLIC_KEY_HERE',
#Linkedin Login
#Get you own api key and secret from https://www.linkedin.com/secure/developer
'linkedin_api': 'PUT_YOUR_LINKEDIN_PUBLIC_KEY_HERE',
'linkedin_secret': 'PUT_YOUR_LINKEDIN_PUBLIC_KEY_HERE',
# Github login
# Register apps here: https://github.com/settings/applications/new
'github_server': 'github.com',
'github_redirect_uri': 'http://www.example.com/social_login/github/complete',
'github_client_id': 'PUT_YOUR_GITHUB_CLIENT_ID_HERE',
'github_client_secret': 'PUT_YOUR_GITHUB_CLIENT_SECRET_HERE',
# get your own recaptcha keys by registering at http://www.google.com/recaptcha/
'captcha_public_key': "6Lf3HusSAAAAAFEpGIbj8PHdyenVEyllOSVGW5Mo",
'captcha_private_key': "6Lf3HusSAAAAAFrghYJQcSxjiPAa0iqnhHclHnPO",
# Leave blank "google_analytics_domain" if you only want Analytics code
'google_analytics_domain': "YOUR_PRIMARY_DOMAIN (e.g. google.com)",
'google_analytics_code': "UA-XXXXX-X",
# add status codes and templates used to catch and display errors
# if a status code is not listed here it will use the default app engine
# stacktrace error page or browser error page
'error_templates': {
403: 'errors/default_error.html',
404: 'errors/default_error.html',
500: 'errors/default_error.html',
},
# Enable Federated login (OpenID and OAuth)
# Google App Engine Settings must be set to Authentication Options: Federated Login
'enable_federated_login': True,
# jinja2 base layout template
'base_layout': 'base.html',
# send error emails to developers
'send_mail_developer': True,
# fellas' list
#'developers': (
# ('Santa Klauss', 'snowypal@northpole.com'),
#),
# If true, it will write in datastore a log of every email sent
'log_email': True,
# If true, it will write in datastore a log of every visit
'log_visit': True,
# ----> ADD MORE CONFIGURATION OPTIONS HERE <----
} # end config
|
eugenewong/AirShare
|
boilerplate/config.py
|
Python
|
apache-2.0
| 4,560
|
[
"VisIt"
] |
47aafc5f533730f5aca0d873a81062e33f343adba484c56eb0731c6f750c2282
|
import re, sys, pprint
import unittest
import setup_path
from lib.asmlib.assembler import *
from lib.asmlib.asm_common_types import *
from lib.asmlib.asmparser import *
from lib.commonlib.utils import unpack_word, bytes2word, unpack_bytes
class TestAssembler(unittest.TestCase):
""" It's quite hard to do extensive tests on this level,
because we have to get deep into the implementation
details.
Therefore, these tests try to sample the "sanity checks"
of assembly. The real "heavy" testing is done by running
assembled code on the simulator and watching for expected
results.
"""
def setUp(self):
self.asm = Assembler()
def assemble(self, txt):
return self.asm.assemble(txt)
# Digs into the guts of Assembler to pull the symbol table
# created by the first pass.
#
# Since this test module is developed in sync with Assembler,
# this makes sense for more scrupulous inspection.
#
def symtab(self, txt):
symtab, addr_imf = self.asm._compute_addresses(self.asm._parse(txt))
return symtab
def addr_imf(self, txt):
symtab, addr_imf = self.asm._compute_addresses(self.asm._parse(txt))
return addr_imf
def test_firstpass_symbol_table(self):
txt1 = r'''
.segment text
lab1: add $r1, $r2, $v1
lab2: add $r2, $r3, $r4
'''
# note: SegAddr are namedtuples, so they can be just
# compared with normal tuples
#
self.assertEqual(self.symtab(txt1),
{ 'lab1': ('text', 0),
'lab2': ('text', 4)})
txt2 = r'''
.segment text
lab1: add $r1, $r2, $r3
.word 1, 2, 3, 4, 5
lab2: add $r2, $r3, $r4
.alloc 50
gaga:
'''
self.assertEqual(self.symtab(txt2),
{ 'lab1': ('text', 0),
'lab2': ('text', 24),
'gaga': ('text', 80)})
txt3 = r'''
.segment text
lab1: add $r1, $r2, $r3
.byte 1, 2, 3, 4, 5
lab2: add $r2, $r3, $r4
add $r2, $r3, $r4
.byte 0xFF
joe: nop
nop
kwa: nop
.string "hello"
jay: nop
'''
self.assertEqual(self.symtab(txt3),
{ 'lab1': ('text', 0),
'lab2': ('text', 12),
'joe': ('text', 24),
'kwa': ('text', 32),
'jay': ('text', 44),})
txt4 = r'''
.segment text
add $r1, $r2, $r3
lab1: .byte 1, 2, 4, 6, 7, 8
lab2: add $r1, $r2, $r3
.segment data
lab3: add $r1, $r2, $r3
lab4:
lab5: add $r1, $r2, $r3
'''
self.assertEqual(self.symtab(txt4),
{ 'lab1': ('text', 4),
'lab2': ('text', 12),
'lab3': ('data', 0),
'lab4': ('data', 4),
'lab5': ('data', 4)})
txt5 = r'''
.segment data
ko: add $r4, $r4, 23
dddd:
br: .string "h\n\\\"ello"
'''
self.assertEqual(self.symtab(txt5),
{ 'ko': ('data', 0),
'dddd': ('data', 4),
'br': ('data', 4)})
# here that 0-termination of a string is taken into
# account.
#
txt51 = r'''
.segment text
pow: .string "abcd"
jack: nop
'''
self.assertEqual(self.symtab(txt51),
{ 'pow': ('text', 0),
'jack': ('text', 8)})
txt6 = r'''
.segment wow
add $r0, $r0, $zero
call 12
li $r6, 0x45678919
bt: nop
'''
self.assertEqual(self.symtab(txt6),
{'bt': ('wow', 16)})
def test_firstpass_addr_imf(self):
txt11 = r'''
.segment wow
add $r0, $r0, $r0
call 12
li $r6, 0x45678919
bt: nop
'''
aimf = self.addr_imf(txt11)
self.assertEqual(aimf[0][0], ('wow', 0))
self.assertEqual(type(aimf[0][1]), Instruction)
self.assertEqual(aimf[3][0], ('wow', 16))
self.assertEqual(type(aimf[3][1]), Instruction)
txt12 = r'''
.segment text
add $r1, $r2, $r3
lab1: .byte 1, 2, 4, 6, 7, 8
lab2: add $r1, $r2, $r3
.segment data
lab3: add $r1, $r2, $r3
lab4:
lab5: add $r1, $r2, $r3
'''
aimf = self.addr_imf(txt12)
self.assertEqual(aimf[1][0], ('text', 4))
self.assertEqual(type(aimf[1][1]), Directive)
self.assertEqual(aimf[4][0], ('data', 4))
self.assertEqual(type(aimf[4][1]), Instruction)
def test_assemble_basic_export_and_segment(self):
txt = r'''
.segment text
.global jj
.global nb
and $r2, $r0, $r2 # clear r2
jj: lw $r17, 20($v1)
.segment data
.byte 0x14, 0x18, 0x01, 8, 9
nb: .word 0x56899001
'''
obj = self.assemble(txt)
# export table
self.assertEqual(obj.export_table[0],
('jj', ('text', 4)))
self.assertEqual(obj.export_table[1],
('nb', ('data', 8)))
# import and reloc tables should be empty
self.assertEqual(obj.import_table, [])
self.assertEqual(obj.reloc_table, [])
self.assertEqual(len(obj.seg_data), 2)
text_seg = obj.seg_data['text']
data_seg = obj.seg_data['data']
# check the correct encoding of instructions in the text
# segment
#
self.assertEqual(bytes2word(text_seg[0:4]),
9 << 26 | 2 << 21 | 2 << 11)
self.assertEqual(bytes2word(text_seg[4:8]),
0xF << 26 | 17 << 21 | 3 << 16 | 20)
# check the correct placement of data in the data segment
#
self.assertEqual(data_seg[0:5], list(unpack_bytes(b'\x14\x18\x01\x08\x09')))
self.assertEqual(data_seg[8:12], list(unpack_bytes(b'\x01\x90\x89\x56')))
def test_assemble_memref_define(self):
txt = r'''
.segment text
.define DEF, 0x20
lw $r3, DEF($r4)
'''
obj = self.assemble(txt)
text_seg = obj.seg_data['text']
self.assertEqual(bytes2word(text_seg[0:4]),
0xF << 26 | 3 << 21 | 4 << 16 | 0x20)
def test_assemble_basic_import(self):
txt = r'''
.segment text
call georgia
jr $r29
li $r11, california
.alloc 256
call california
sw $r5, 0($r5)
'''
obj = self.assemble(txt)
# export and reloc tables should be empty
self.assertEqual(obj.export_table, [])
self.assertEqual(obj.reloc_table, [])
# import table
self.assertEqual(obj.import_table[0],
('georgia', ImportType.CALL, ('text', 0)))
self.assertEqual(obj.import_table[1],
('california', ImportType.LI, ('text', 8)))
self.assertEqual(obj.import_table[2],
('california', ImportType.CALL, ('text', 16 + 256)))
# see what was actually assembled into the first CALL
# since the constant is imported, 0 is placed in the
# off26 field
#
text_seg = obj.seg_data['text']
self.assertEqual(bytes2word(text_seg[0:4]),
0x1D << 26)
def test_assemble_basic_reloc(self):
txt = r'''
.segment text
rip1: nop
call rip1
jr $r29
li $r11, rip2
.alloc 256
call rip3
sw $r5, 0($r5)
rip2: nop
rip3: nop
'''
obj = self.assemble(txt)
# export and import tables should be empty
self.assertEqual(obj.export_table, [])
self.assertEqual(obj.import_table, [])
# reloc table
self.assertEqual(obj.reloc_table[0],
('text', RelocType.CALL, ('text', 4)))
self.assertEqual(obj.reloc_table[1],
('text', RelocType.LI, ('text', 12)))
self.assertEqual(obj.reloc_table[2],
('text', RelocType.CALL, ('text', 276)))
# make sure that the assembled instructions are correct.
#
text_seg = obj.seg_data['text']
# the first part of LI is the LUI, which gets nothing from
# the offset, since it's too small
# the second part is the ORI, which gets the offset in its
# constant field
#
self.assertEqual(bytes2word(text_seg[12:16]),
0x6 << 26 | 11 << 21)
self.assertEqual(bytes2word(text_seg[16:20]),
0x2A << 26 | 11 << 21 | 11 << 16 | 284)
# check call's instruction too
self.assertEqual(bytes2word(text_seg[276:280]),
0x1D << 26 | (288 // 4))
class TestAssemblerErrors(unittest.TestCase):
def setUp(self):
self.asm = Assembler()
def assemble(self, txt):
return self.asm.assemble(txt)
def assert_str_contains(self, str, what):
self.failUnless(str.find(what) > -1, '"%s" contains "%s"' % (str, what))
def assert_error_at_line(self, msg, lineno):
self.assert_str_contains(msg, 'lineno %s' % lineno)
def assert_assembly_error(self, txt, msg=None, lineno=None):
try:
self.assemble(txt)
except AssemblyError:
err = sys.exc_info()[1]
err_msg = str(err)
if msg:
self.assert_str_contains(err_msg, msg)
if lineno:
self.assert_str_contains(err_msg, 'line %s' % lineno)
else:
self.fail('AssemblyError not raised')
def test_label_duplicate_error(self):
msg = 'duplicated'
txt = r'''
.segment text
lbl: add $r1, $r2, $r3
lbl: add $r2, $r5, $r4
'''
self.assert_assembly_error(txt, msg, 4)
txt = r'''
.segment text
lbl: add $r1, $r2, $r3
lab_5: .alloc 4
lbl6: add $r2, $r5, $r4
.segment data
lab_4: .word 0x56664412
lab_5: add $r0, $r0, $r0
'''
self.assert_assembly_error(txt, msg, 8)
def test_unknown_instruction_error(self):
txt = r''' .segment text
jafa $r1, $r1, $r2
'''
self.assert_assembly_error(txt, 'unknown instruction', 2)
txt = r''' .segment text
bnez r12, lab
lab: jafa $r1, $r1, $r2
'''
self.assert_assembly_error(txt, 'unknown instruction', 3)
def test_segment_directive_error(self):
seg_msg = 'segment must be defined before'
txt = r'''add $r4, $r4, 2'''
self.assert_assembly_error(txt, seg_msg, 1)
txt = r'''bla: .segment joe'''
self.assert_assembly_error(txt, seg_msg, 1)
txt = r'''.alloc 4'''
self.assert_assembly_error(txt, seg_msg, 1)
seg_arg_msg = 'argument(s) expected'
txt = '.segment'
self.assert_assembly_error(txt, seg_arg_msg, 1)
txt = '.segment a, b'
self.assert_assembly_error(txt, seg_arg_msg, 1)
txt = '.segment 456'
self.assert_assembly_error(txt, 'unexpected type', 1)
def test_define_directive_error(self):
txt = r'''
.segment text
.define joe, moe
'''
self.assert_assembly_error(txt, 'unexpected type', 3)
txt = r''' .segment text
.define 0x7, PQA
'''
self.assert_assembly_error(txt, 'unexpected type', 2)
def test_global_directive_error(self):
txt = r'''
.segment text
.global 12
'''
self.assert_assembly_error(txt, 'unexpected type', 3)
txt = r'''
.segment text
ax: nop
.global brap
'''
self.assert_assembly_error(txt, 'unknown label', 4)
def test_byte_directive_error(self):
txt = r'''
.segment s
.byte 5, 6, 9, ak
'''
self.assert_assembly_error(txt, 'argument 4 not a valid', 3)
txt = r''' .segment t
.byte 5, 9, 256, 4, 5, 6
'''
self.assert_assembly_error(txt, 'argument 3 not a valid', 2)
def test_word_directive_error(self):
txt = r'''
.segment s
.word k5, 6, 9, ak
'''
self.assert_assembly_error(txt, 'argument 1 not a valid', 3)
txt = r''' .segment t
.word 5, 9, 256, 4, 5, 699799799799799
'''
self.assert_assembly_error(txt, 'argument 6 not a valid', 2)
if __name__ == '__main__':
unittest.main()
|
8l/luz-cpu
|
luz_asm_sim/tests_unit/test_assembler.py
|
Python
|
unlicense
| 14,535
|
[
"MOE"
] |
3ee2997eab1d58eff471a33c3942a1f99ac2f723a3b6eb6be175fefe9cd4d9c8
|
# -*- coding: utf-8 -*-
# This script can also be called directly to build and install the pymoose
# module.
#
# Alternatively, you can use cmake build system which provides finer control
# over the build. This script is called by cmake to install the python module.
#
# This script is compatible with python2.7 and python3+. Therefore use of
# super() is commented out.
#
# NOTES:
# * Python2
# - Update setuptools using `python2 -m pip install setuptools --upgrade --user'.
__author__ = "Dilawar Singh"
__copyright__ = "Copyright 2019-, Dilawar Singh"
__maintainer__ = ""
__email__ = ""
import os
import sys
import multiprocessing
import subprocess
import datetime
try:
cmakeVersion = subprocess.call(["cmake", "--version"],
stdout=subprocess.PIPE)
except Exception as e:
print(e)
print("[ERROR] cmake is not found. Please install cmake.")
quit(-1)
# See https://docs.python.org/3/library/distutils.html
# setuptools is preferred over distutils. And we are supporting python3 only.
from setuptools import setup, Extension, Command
from setuptools.command.build_ext import build_ext as _build_ext
import subprocess
# Global variables.
sdir_ = os.path.dirname(os.path.realpath(__file__))
stamp = datetime.datetime.now().strftime('%Y%m%d')
builddir_ = os.path.join(sdir_, '_temp__build')
if not os.path.exists(builddir_):
os.makedirs(builddir_)
numCores_ = multiprocessing.cpu_count()
version_ = '3.3.0.dev%s' % stamp
# importlib is available only for python3. Since we build wheels, prefer .so
# extension. This way a wheel built by any python3.x will work with any python3.
class CMakeExtension(Extension):
def __init__(self, name, **kwargs):
# don't invoke the original build_ext for this special extension
import tempfile
# Create a temp file to create a dummy target. This build raises an
# exception because sources are empty. With python3 we can fix it by
# passing `optional=True` to the argument. With python2 there is no
# getaway from it.
f = tempfile.NamedTemporaryFile(suffix='.cpp', delete=False)
f.write(b'int main() { return 1; }')
Extension.__init__(self, name, sources=[f.name], **kwargs)
f.close()
class TestCommand(Command):
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
print("[INFO ] Running tests... ")
os.chdir(builddir_)
self.spawn(["ctest", "--output-on-failure", '-j%d'%numCores_])
os.chdir(sdir_)
class build_ext(_build_ext):
user_options = [
('with-boost', None, 'Use Boost Libraries (OFF)')
, ('with-gsl', None, 'Use Gnu Scienfific Library (ON)')
, ('with-gsl-static', None, 'Use GNU Scientific Library (static library) (OFF)')
, ('debug', None, 'Build moose in debugging mode (OFF)')
, ('no-build', None, 'DO NOT BUILD. (for debugging/development)')
] + _build_ext.user_options
def initialize_options(self):
# Initialize options.
self.with_boost = 0
self.with_gsl = 1
self.with_gsl_static = 0
self.debug = 0
self.no_build = 0
self.cmake_options = {}
# super().initialize_options()
_build_ext.initialize_options(self)
def finalize_options(self):
# Finalize options.
# super().finalize_options()
_build_ext.finalize_options(self)
self.cmake_options['PYTHON_EXECUTABLE'] = os.path.realpath(sys.executable)
self.cmake_options['VERSION_MOOSE'] = version_
if self.with_boost:
self.cmake_options['WITH_BOOST'] = 'ON'
self.cmake_options['WITH_GSL'] = 'OFF'
else:
if self.with_gsl_static:
self.cmake_options['GSL_USE_STATIC_LIBRARIES'] = 'ON'
if self.debug:
self.cmake_options['CMAKE_BUILD_TYPE'] = 'Debug'
else:
self.cmake_options['CMAKE_BUILD_TYPE'] = 'Release'
def run(self):
if self.no_build:
return
for ext in self.extensions:
self.build_cmake(ext)
# super().run()
_build_ext.run(self)
def build_cmake(self, ext):
global numCores_
global sdir_
print("\n==========================================================\n")
print("[INFO ] Building pymoose in %s ..." % builddir_)
cmake_args = []
for k, v in self.cmake_options.items():
cmake_args.append('-D%s=%s' % (k,v))
os.chdir(str(builddir_))
self.spawn(['cmake', str(sdir_)] + cmake_args)
if not self.dry_run:
self.spawn(['make', '-j%d'%numCores_])
os.chdir(str(sdir_))
with open(os.path.join(sdir_, "README.md")) as f:
readme = f.read()
setup(
name="pymoose",
version=version_,
description= 'Python scripting interface of MOOSE Simulator (https://moose.ncbs.res.in)',
long_description=readme,
long_description_content_type='text/markdown',
author='MOOSERes',
author_email='bhalla@ncbs.res.in',
maintainer='Dilawar Singh',
maintainer_email='',
url='http://moose.ncbs.res.in',
packages=[
'rdesigneur', 'moose', 'moose.SBML', 'moose.genesis', 'moose.neuroml',
'moose.neuroml2', 'moose.chemUtil', 'moose.chemMerge'
],
package_dir={
'rdesigneur': os.path.join(sdir_, 'python', 'rdesigneur'),
'moose': os.path.join(sdir_, 'python', 'moose')
},
package_data={
'moose': [
'_moose.so'
, os.path.join('neuroml2','schema','NeuroMLCoreDimensions.xml')
, os.path.join('chemUtil', 'rainbow2.pkl')
]
},
# python2 specific version here as well.
install_requires=['numpy', 'matplotlib','vpython'],
extra_requires={'dev' : [ 'coverage', 'pytest', 'pytest-cov' ]},
ext_modules=[CMakeExtension('dummy', optional=True)],
cmdclass={'build_ext': build_ext, 'test': TestCommand},
)
|
BhallaLab/moose-core
|
setup.py
|
Python
|
gpl-3.0
| 6,058
|
[
"MOOSE"
] |
35831ca9af6618160a1bbdeb9dcad6af9ce3450c8282c651e4365402d862a584
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
import sys
import platform
from setuptools import setup, find_packages, Extension
from setuptools.command.build_ext import build_ext as _build_ext
class build_ext(_build_ext):
def finalize_options(self):
_build_ext.finalize_options(self)
# Prevent numpy from thinking it is still in its setup process:
import builtins
if hasattr(builtins, '__NUMPY_SETUP__'):
del builtins.__NUMPY_SETUP__
import importlib
import numpy
importlib.reload(numpy)
self.include_dirs.append(numpy.get_include())
extra_link_args = []
if sys.platform.startswith('win') and platform.machine().endswith('64'):
extra_link_args.append('-Wl,--allow-multiple-definition')
long_desc = """
Official docs: [http://pymatgen.org](http://pymatgen.org/)
Pymatgen (Python Materials Genomics) is a robust, open-source Python library
for materials analysis. These are some of the main features:
1. Highly flexible classes for the representation of Element, Site, Molecule,
Structure objects.
2. Extensive input/output support, including support for
[VASP](http://cms.mpi.univie.ac.at/vasp/), [ABINIT](http://www.abinit.org/),
CIF, Gaussian, XYZ, and many other file formats.
3. Powerful analysis tools, including generation of phase diagrams, Pourbaix
diagrams, diffusion analyses, reactions, etc.
4. Electronic structure analyses, such as density of states and band structure.
5. Integration with the Materials Project REST API.
Pymatgen is free to use. However, we also welcome your help to improve this
library by making your own contributions. These contributions can be in the
form of additional tools or modules you develop, or feature requests and bug
reports. Please report any bugs and issues at pymatgen's [Github page]
(https://github.com/materialsproject/pymatgen). For help with any pymatgen
issues, please use the [Discourse page](https://pymatgen.discourse.group).
Why use pymatgen?
=================
There are many materials analysis codes out there, both commerical and free,
but pymatgen offer several advantages:
1. **It is (fairly) robust.** Pymatgen is used by thousands of researchers,
and is the analysis code powering the [Materials Project](https://www.materialsproject.org).
The analysis it produces survives rigorous scrutiny every single day. Bugs
tend to be found and corrected quickly. Pymatgen also uses
[CircleCI](https://circleci.com) and [Appveyor](https://www.appveyor.com/)
for continuous integration on the Linux and Windows platforms,
respectively, which ensures that every commit passes a comprehensive suite
of unittests.
2. **It is well documented.** A fairly comprehensive documentation has been
written to help you get to grips with it quickly.
3. **It is open.** You are free to use and contribute to pymatgen. It also means
that pymatgen is continuously being improved. We will attribute any code you
contribute to any publication you specify. Contributing to pymatgen means
your research becomes more visible, which translates to greater impact.
4. **It is fast.** Many of the core numerical methods in pymatgen have been
optimized by vectorizing in numpy/scipy. This means that coordinate
manipulations are extremely fast and are in fact comparable to codes
written in other languages. Pymatgen also comes with a complete system for
handling periodic boundary conditions.
5. **It will be around.** Pymatgen is not a pet research project. It is used in
the well-established Materials Project. It is also actively being developed
and maintained by the [Materials Virtual Lab](https://www.materialsvirtuallab.org),
the ABINIT group and many other research groups.
With effect from version 2019.1.1, pymatgen only supports Python 3.x. Users
who require Python 2.7 should install pymatgen v2018.x.
"""
setup(
name="pymatgen",
packages=find_packages(),
version="2019.5.8",
cmdclass={'build_ext': build_ext},
setup_requires=['numpy>=1.14.3', 'setuptools>=18.0'],
install_requires=["numpy>=1.14.3", "requests", "ruamel.yaml>=0.15.6",
"monty>=1.0.6", "scipy>=1.0.1", "pydispatcher>=2.0.5",
"tabulate", "spglib>=1.9.9.44", "networkx>=2.1",
"matplotlib>=1.5", "palettable>=2.1.1", "sympy", "pandas"],
extras_require={
"provenance": ["pybtex"],
"ase": ["ase>=3.3"],
"vis": ["vtk>=6.0.0"],
"abinit": ["apscheduler==2.1.0", "netcdf4"]},
package_data={"pymatgen.core": ["*.json"],
"pymatgen.analysis": ["*.yaml", "*.json"],
"pymatgen.analysis.cost": ["*.csv"],
"pymatgen.analysis.chemenv.coordination_environments.coordination_geometries_files": ["*.txt", "*.json"],
"pymatgen.analysis.chemenv.coordination_environments.strategy_files": ["*.json"],
"pymatgen.analysis.hhi": ["*.csv"],
"pymatgen.analysis.magnetism": ["*.json", "*.yaml"],
"pymatgen.analysis.structure_prediction": ["data/*.json", "*.yaml"],
"pymatgen.io.vasp": ["*.yaml"],
"pymatgen.io.lammps": ["templates/*.*"],
"pymatgen.io.feff": ["*.yaml"],
"pymatgen.symmetry": ["*.yaml", "*.json", "*.sqlite"],
"pymatgen.entries": ["*.yaml"],
"pymatgen.vis": ["ElementColorSchemes.yaml"],
"pymatgen.command_line": ["OxideTersoffPotentials"],
"pymatgen.analysis.defects": ["*.json"],
"pymatgen.analysis.diffraction": ["*.json"],
"pymatgen.util": ["structures/*.json"]},
author="Pymatgen Development Team",
author_email="ongsp@eng.ucsd.edu",
maintainer="Shyue Ping Ong, Matthew Horton",
maintainer_email="ongsp@eng.ucsd.edu, mkhorton@lbl.gov",
url="http://www.pymatgen.org",
license="MIT",
description="Python Materials Genomics is a robust materials "
"analysis code that defines core object representations for "
"structures and molecules with support for many electronic "
"structure codes. It is currently the core analysis code "
"powering the Materials Project "
"(https://www.materialsproject.org).",
long_description=long_desc,
long_description_content_type='text/markdown',
keywords=["VASP", "gaussian", "ABINIT", "nwchem", "qchem", "materials", "science",
"project", "electronic", "structure", "analysis", "phase", "diagrams",
"crystal"],
classifiers=[
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Development Status :: 4 - Beta",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Topic :: Scientific/Engineering :: Information Analysis",
"Topic :: Scientific/Engineering :: Physics",
"Topic :: Scientific/Engineering :: Chemistry",
"Topic :: Software Development :: Libraries :: Python Modules"
],
ext_modules=[Extension("pymatgen.optimization.linear_assignment",
["pymatgen/optimization/linear_assignment.c"],
extra_link_args=extra_link_args),
Extension("pymatgen.util.coord_cython",
["pymatgen/util/coord_cython.c"],
extra_link_args=extra_link_args)],
entry_points={
'console_scripts': [
'pmg = pymatgen.cli.pmg:main',
'feff_input_generation = pymatgen.cli.feff_input_generation:main',
'feff_plot_cross_section = pymatgen.cli.feff_plot_cross_section:main',
'feff_plot_dos = pymatgen.cli.feff_plot_dos:main',
'gaussian_analyzer = pymatgen.cli.gaussian_analyzer:main',
'get_environment = pymatgen.cli.get_environment:main',
]
}
)
|
dongsenfo/pymatgen
|
setup.py
|
Python
|
mit
| 8,279
|
[
"ABINIT",
"ASE",
"CRYSTAL",
"FEFF",
"Gaussian",
"LAMMPS",
"NWChem",
"VASP",
"VTK",
"pymatgen"
] |
3666553ba4446254145a9e705f3731d9117755ce8f3312de7d9a4bcde18623a7
|
###############usage: reads in photometry file and for n stars creates x and y average displacements and integrates them over m time steps to create the actual gaussian profile#####################################################
###############
from numpy import *
import sys
import os
#from scipy import *
from scipy import fftpack
class Chdir:
def __init__( self, newPath ):
self.savedPath = os.getcwd()
os.chdir(newPath)
def __del__( self ):
os.chdir( self.savedPath )
def g2d(coords, TENSIG, beta, PSF, nx, ny, c, pixscale):
data = zeros((nx,ny),float)
for i in (coords):
for j in xrange(c[0]-TENSIG,c[0]+TENSIG+1):
for k in xrange(c[1]-TENSIG,c[1]+TENSIG+1):
g=10.0/(2.0*3.1415*PSF*PSF/(pixscale*pixscale))*exp(-((j-i[0])*(j-i[0])+(k-i[1])*(k-i[1]))/(2.0*PSF*PSF/(pixscale*pixscale)))
data[j][k] +=g
return data
def m2d(coords, TENSIG, nx,ny,c, beta, PSF, pixscale):
data = zeros((nx,ny),float)
for i in (coords):
for j in xrange(c[0]-TENSIG,c[0]+TENSIG+1):
for k in xrange(c[1]-TENSIG,c[1]+TENSIG+1):
m=10.0/(2.0*3.1415*PSF*PSF/(pixscale*pixscale))*\
pow((1.0+(((j-i[0])*(j-i[0])+(k-i[1])*(k-i[1]))/(PSF*PSF/(pixscale*pixscale)))),-beta)
data[j][k] +=m
return data
def mkfits(par, coords1, coords2, newdir, date, PSF, TENSIG, pixscalex, pixscaley,cadence, x, y, nx, ny, c, exposure):
from pyfits import *
fitsobj = HDUList()
# create Primary HDU with minimal header keywords
hdu = PrimaryHDU()
# add a 10x5 array of zeros
h=hdu.header
h.update('RA', '%s' %par['ra'])
h.update('Dec', '%s' %par['dec'])
h.update('DATE', '%s' %date)#par['date'])
h.update('TELESC', '%s'%par['scope'])
h.update('CAMERA', '%s'%par['camera'])
h.update ('IMTYPE', 'LI', 'LI for Lucky Imaging, HSP for high speed photometry.' )
h.update ('CCDSCLX', '%s' %pixscalex,'arcsec/pixel')
h.update ('CCDSCLY', '%s' %pixscaley,'arcsec/pixel')
profile=par['profile'] #'g' for gaussian, 'm' for moffat
beta = float(par['beta'])
if profile=='g':
data1 = g2d(coords1, TENSIG, beta, PSF, nx, ny, c, pixscalex)
data2 = g2d(coords2, TENSIG, beta, PSF, nx, ny, c, pixscalex)
hdu.data=concatenate((data1,data2))
h.update('PROFILE', 'gaussian')
h.update('PSF', '%s' %PSF, 'arcseconds')
elif profile == 'm':
data1 = m2d(coords1, TENSIG, nx,ny,c, beta, PSF, pixscalex)
data2 = m2d(coords2, TENSIG, nx,ny,c, beta, PSF, pixscalex)
hdu.data=concatenate((data1,data2))
h.update('PROFILE', 'moffat')
h.update('PSFALPHA', '%s' %(par['alpha']))
h.update('PSFBETA', '%s' %par['beta'])
h.update('DISPLACE', '%s/coord_list.dat' %newdir, 'photometry file for x and y position')
h.update('CADENCE', '%f' %cadence, 'frequency of position update in hz')
h.update('INTEGRAT', '%s' % par['nsteps'], 'number of integrations')
exposure =(float(par['nsteps'])*exposure)
h.update('EXPOSURE', '%f' %exposure, 'exposure in seconds')
h.update ('NSTARS' , '1', 'number of stars used')
# save to a file, the writeto method will make sure the required
# keywords are conforming to the data
notes1 = 'if IMTYPE is LI the coordinate refers tot he location of the brightest pixel within a restricted area (typically 25 pix radius) centered on the position of the target at the previous time step. one star is used. coordinate file format is #file x y brightest-pixel-counts ----------'
notes2 = 'if IMTYPE is HSP sextractor and iraf photometry phot package are used to derive x and y position. more then one star can be used. coordinate file format is #image-index-in-spool \[x1 y1 flux1 normalized-flux1]*number of stars -----'
notes =par['notes']
h.update('REDUCTN', '%s' %(notes1+notes2))
h.update('NOTES', '%s' %(par['notes']))
fitsobj.append(hdu)
fname = '%s/psf_%s_%3.1fs.fits'%(newdir,profile,exposure)
print 'writing fits file to %s'%fname
if os.path.isfile(fname):
strg = "rm %s"%fname
os.system(strg)
fitsobj.writeto(fname)
###################################################main#######################
def centan(outpath,dispfile, par, nstars, nameroot, newdir):
from pyfits import open as pfopen
from pylab import *
if os.path.isfile(dispfile) == 0:
print "no strehl analysis file ",dispfile,". run analysis first!"
return -1
f=open(dispfile,'r')
allcoordslist=[]
skip = int(par['nskip'])
nsteps = int(par['nsteps'])
##### HEADER INFO #####
firstfits = '%s/unspooled/%s_%05d.fits' %(outpath,nameroot,skip)
image=pfopen(firstfits)
header=image[0].header
image.close()
if 'HBIN' in header:
pixscalex = float(par['ps'])*float(header['HBIN'])
pixscaley = float(par['ps'])*float(header['VBIN'])
elif 'CCDXBIN' in header:
pixscalex = float(par['ps'])*float(header['CCDXBIN'])
pixscaley = float(par['ps'])*float(header['CCDYBIN'])
if 'EXPOSURE' in header:
exposure = float(header['EXPOSURE'])
elif 'EXPTIME' in header:
exposure = float(header['EXPTIME'])
else:
print "no exposure lenght recognizable key!"
return -1
if 'KCT' in header:
cadence = float(header['KCT'])
else:
cadence = 1.0/exposure
if 'FRAME' in header:
date = header['FRAME']
elif 'DATE' in header:
date = header['DATE']
PSFg=float(par['psf'])
PSFm=float(par['alpha'])
nx,ny=100,100
c=(50,50)
profile=par['profile'] #'g' for gaussian, 'm' for moffat
if profile=='g':
PSF = PSFg
elif profile == 'm':
PSF=PSFm
else:
print "unknown profile"
return -1
TENSIG=min(int(PSF/pixscalex*5),c[0])
x,y=arange(nx),arange(ny)
for i in f:
if i.startswith('#'): continue
i=i.split()
allcoordslist.append([i[0], float(i[1]), float(i[2]),
float(i[3]), float(i[4]), float(i[5]),
float(i[6]), float(i[7]), float(i[8])])
allcoords=sorted(allcoordslist,key=lambda list:list[0])
if skip>0:
allcoords=allcoords[skip:]
coordfile = "%s/coord_list.dat"%(newdir)
f= open(coordfile,'w')
print >> f ,"#fname dx(pix) dy(pix) dx(arcsec) dy(arcsec) flux(counts, aperture) x(pix,aperture) y(pix, aperture) x(pix, maxflux), y(pix, maxflux) nrightest pixel(counts)"
x0, y0 = allcoords[0][6],allcoords[0][7]
for l in allcoords:
dx=float(l[6])-float(x0)
dy=float(l[7])-float(y0)
print >>f, l[0],dx,dy,dx*pixscalex,dy*pixscaley,l[8],\
l[6],l[7],l[4],l[5],l[3]
#print zip(*allcoordslist)[4]
mux = []
muy = []
for i in xrange(nstars):
dx=array(zip(*allcoords)[6])
dy=array(zip(*allcoords)[7])
mux.append(array(dx[:nsteps]-dx[0]+c[1]))
muy.append(array(dy[:nsteps]-dy[0]+c[0]))
mx= mean(mux,0)
my= mean(muy,0)
xindex = arange(len(dx))
plt.figure()
#fname = '%s/%s/%s_dx.png'%(LIDIR,par['fits'],par['fits'])
#savefig(fname,dpi=None, facecolor='w', edgecolor='w',
# orientation='portrait', papertype=None, format=None,
# transparent=False, bbox_inches=None, pad_inches=0.1)
subplot(2,1,1)
plt.xlabel('time (seconds)')
plt.ylabel('displacement (arcseconds)')
plt.ylabel('dx (arcseconds)')
plot (xindex*cadence,(dx-dx[0])*pixscalex, 'o-',label='x')
subplot(2,1,2)
plt.ylabel('dy (arcseconds)')
plot (xindex*cadence,(dy-dy[0])*pixscaley, 'o-',label='y')
legend(loc=1, ncol=1, shadow=True)
fname = '%s/%s_dxdy.png'%(newdir,nameroot)
savefig(fname,dpi=None, facecolor='w', edgecolor='w',
orientation='portrait', papertype=None, format=None,
transparent=False, bbox_inches=None, pad_inches=0.1)
plt.figure()
plt.xlabel('dx (arcseconds)')
plt.ylabel('dx (arcseconds)')
#fname = '%s/%s/%s_dx.png'%(LIDIR,par['fits'],par['fits'])
#savefig(fname,dpi=None, facecolor='w', edgecolor='w',
# orientation='portrait', papertype=None, format=None,
# transparent=False, bbox_inches=None, pad_inches=0.1)
plot ((dx-dx[0])*pixscalex,(dy-dy[0])*pixscaley, 'o')
# legend(loc=1, ncol=1, shadow=True)
fname = '%s/%s_dxvsdy.png'%(newdir,nameroot)
savefig(fname,dpi=None, facecolor='w', edgecolor='w',
orientation='portrait', papertype=None, format=None,
transparent=False, bbox_inches=None, pad_inches=0.1)
plt.figure()
xfft=fft((dx-dx[0])*pixscalex)
yfft=fft((dy-dy[0])*pixscaley)
nxfft=len(xfft)
nyfft=len(yfft)
powerx = abs(xfft[1:(nxfft/2)])**2
powery = abs(yfft[1:(nyfft/2)])**2
nyquist=1./2
freqx=array(range(nxfft/2))/(nxfft/2.0)*nyquist
freqy=array(range(nyfft/2))/(nyfft/2.0)*nyquist
periodx=1./freqx
periody=1./freqy
plt.xlabel('period of x and y oscillations [seconds]')
plt.ylabel('power')
plot(periodx[1:len(periodx)/2], powerx[0:len(powerx)/2], 'o-',label='x')
plot(periody[1:len(periody)/2], powery[0:len(powery)/2], 'o-',label='y')
# plt.xlim(0,max(periodx)/2)
# xaxis((0,40))
fname = '%s/%s_fft.png'%(newdir,nameroot)
# show()
savefig(fname,dpi=None, facecolor='w', edgecolor='w',
orientation='portrait', papertype=None, format=None,
transparent=False, bbox_inches=None, pad_inches=0.1)
coords1 = array([ zeros(2,float) for i in xrange(nsteps) ]).reshape(nsteps,2)
coords2 = array([ ones(2,float)*50 for i in xrange(nsteps) ]).reshape(nsteps,2)
for i in range(nsteps):
coords1[i][0] = mx[i]
coords1[i][1] = my[i]
# coords2[i][0] *=c[0]
# coords2[i][1] *=c[1]
mkfits(par, coords1, coords2,newdir,date, PSF, TENSIG, pixscalex, pixscaley, cadence,x, y,nx, ny, c, exposure)
strg = 'cp %s/unspooled/%s_%05d.fits %s'%(outpath, nameroot,skip,newdir)
os.system(strg)
# os.chdir(olddir)
# os.system(strg)
# strg = 'tar -czvf %s.tgz %s_displacement'%(newdir,nameroot)
# print strg
# os.system(strg)
return 1
if __name__ == '__main__':
if len(sys.argv) != 2 or sys.argv[1].startswith('-h') or sys.argv[1] == 'h':
print """Usage. Requires:
**name of parameter file conatining :**
Directory containing images
#'y' for using displacement, 'n' for just integration
'disp' : 'y',
#target coordinates (optional)
'ra' : '',\
'dec' : '',
'profile' : 'm',\
'alpha' : 1.4,\
'beta' : 3.0,\
'psf' : 0.7,\
#number of steps to use in the psf reconstruction
'nsteps' : 100,\
#number of steps images to skip
'nskip':0,\
#telescope
'scope' : 'FTN'
dark method
"""
sys.exit()
##### DECLARE VARIABLES #####
from mymkdir import mymkdir
par = readconfig(sys.argv[1])
print par
olddir = '%s/%s/' %(LIDIR,par['spool'][0])
newdir = '%s/%s/%s_displacement' %(LIDIR,par['spool'][0],par['spool'][0])
if mymkdir(newdir)!=0:
sys.exit(0)
# strg = 'mkdir %s'%newdir
# os.system(strg)
dispfile = "%s/%s/strehl_list.dat"%(LIDIR,par['spool'][0])
centan(doutpath,dispfile, par, 1,nameroot, newdir)
|
fedhere/getlucky
|
LIpipe/psf.py
|
Python
|
mit
| 11,707
|
[
"Gaussian"
] |
ad66c126e0e6dc742c96f252d43e9072e1950be5dc177e9fc98f68a9c06da876
|
"""Package for learning complete games from data
The API of this individual module is still unstable and may change as
improvements or refinements are made.
There are two general game types in this module: learned games and deviation
games. Learned games vary by the method, but generally expose methods for
computing payoffs and may other features. Deviation games use learned games and
different functions to compute deviation payoffs via various methods.
"""
import warnings
import numpy as np
from numpy.lib import recfunctions
import sklearn
from sklearn import gaussian_process as gp
from gameanalysis import gamereader
from gameanalysis import paygame
from gameanalysis import restrict
from gameanalysis import rsgame
from gameanalysis import utils
class _DevRegressionGame(rsgame._CompleteGame): # pylint: disable=protected-access
"""A game regression model that learns deviation payoffs
This model functions as a game, but doesn't have a default way of computing
deviation payoffs. It must be wrapped with another game that uses payoff
data to compute deviation payoffs.
"""
def __init__( # pylint: disable=too-many-arguments
self, game, regressors, offset, scale, min_payoffs, max_payoffs,
rest):
super().__init__(game.role_names, game.strat_names,
game.num_role_players)
self._regressors = regressors
self._offset = offset
self._offset.setflags(write=False)
self._scale = scale
self._scale.setflags(write=False)
self._min_payoffs = min_payoffs
self._min_payoffs.setflags(write=False)
self._max_payoffs = max_payoffs
self._max_payoffs.setflags(write=False)
self._rest = rest
self._rest.setflags(write=False)
def deviation_payoffs(self, _, **_kw): # pylint: disable=arguments-differ
raise ValueError(
"regression games don't define deviation payoffs and must be "
'used as a model for a deviation game')
def get_payoffs(self, profiles):
utils.check(
self.is_profile(profiles).all(), 'must pass valid profiles')
payoffs = np.zeros(profiles.shape)
for i, (off, scale, reg) in enumerate(zip(
self._offset, self._scale, self._regressors)):
mask = profiles[..., i] > 0
profs = profiles[mask]
profs[:, i] -= 1
if profs.size:
payoffs[mask, i] = reg.predict(restrict.translate(
profs, self._rest)).ravel() * scale + off
return payoffs
def get_dev_payoffs(self, dev_profs):
"""Compute the payoff for deviating
This implementation is more efficient than the default since we don't
need to compute the payoff for non deviators."""
prof_view = np.rollaxis(restrict.translate(dev_profs.reshape(
(-1, self.num_roles, self.num_strats)), self._rest), 1, 0)
payoffs = np.empty(dev_profs.shape[:-2] + (self.num_strats,))
pay_view = payoffs.reshape((-1, self.num_strats)).T
for pays, profs, reg in zip(
pay_view, utils.repeat(prof_view, self.num_role_strats),
self._regressors):
np.copyto(pays, reg.predict(profs))
return payoffs * self._scale + self._offset
def max_strat_payoffs(self):
return self._max_payoffs.view()
def min_strat_payoffs(self):
return self._min_payoffs.view()
def restrict(self, restriction):
base = rsgame.empty_copy(self).restrict(restriction)
new_rest = self._rest.copy()
new_rest[new_rest] = restriction
regs = tuple(reg for reg, m in zip(self._regressors, restriction) if m)
return _DevRegressionGame(
base, regs, self._offset[restriction], self._scale[restriction],
self._min_payoffs[restriction], self._max_payoffs[restriction],
new_rest)
def _add_constant(self, constant):
off = np.broadcast_to(constant, self.num_roles).repeat(
self.num_role_strats)
return _DevRegressionGame(
self, self._regressors, self._offset + off, self._scale,
self._min_payoffs + off, self._max_payoffs + off, self._rest)
def _multiply_constant(self, constant):
mul = np.broadcast_to(constant, self.num_roles).repeat(
self.num_role_strats)
return _DevRegressionGame(
self, self._regressors, self._offset * mul, self._scale * mul,
self._min_payoffs * mul, self._max_payoffs * mul, self._rest)
def _add_game(self, _):
return NotImplemented
def __eq__(self, othr):
# pylint: disable-msg=protected-access
return (super().__eq__(othr) and
self._regressors == othr._regressors and
np.allclose(self._offset, othr._offset) and
np.allclose(self._scale, othr._scale) and
np.all(self._rest == othr._rest))
def __hash__(self):
return hash((super().__hash__(), self._rest.tobytes()))
def _dev_profpay(game):
"""Iterate over deviation profiles and payoffs"""
sgame = paygame.samplegame_copy(game)
profiles = sgame.flat_profiles()
payoffs = sgame.flat_payoffs()
for i, pays in enumerate(payoffs.T):
mask = (profiles[:, i] > 0) & ~np.isnan(pays)
utils.check(
mask.any(), "couldn't find deviation data for a strategy")
profs = profiles[mask]
profs[:, i] -= 1
yield i, profs, pays[mask]
def nngame_train( # pylint: disable=too-many-arguments,too-many-locals
game, epochs=100, layer_sizes=(32, 32), dropout=0.2, verbosity=0,
optimizer='sgd', loss='mean_squared_error'):
"""Train a neural network regression model
This mostly exists as a proof of concept, individual testing should be done
to make sure it is working sufficiently. This API will likely change to
support more general architectures and training.
"""
utils.check(layer_sizes, 'must have at least one layer')
utils.check(0 <= dropout < 1, 'dropout must be a valid probability')
# This is for delayed importing inf tensor flow
from keras import models, layers
model = models.Sequential()
lay_iter = iter(layer_sizes)
model.add(layers.Dense(
next(lay_iter), input_shape=[game.num_strats], activation='relu'))
for units in lay_iter:
model.add(layers.Dense(units, activation='relu'))
if dropout:
model.add(layers.Dropout(dropout))
model.add(layers.Dense(1, activation='sigmoid'))
regs = []
offsets = np.empty(game.num_strats)
scales = np.empty(game.num_strats)
for i, profs, pays in _dev_profpay(game):
# XXX Payoff normalization specific to sigmoid. If we accept alternate
# models, we need a way to compute how to potentially normalize
# payoffs.
min_pay = pays.min()
offsets[i] = min_pay
max_pay = pays.max()
scale = 1 if np.isclose(max_pay, min_pay) else max_pay - min_pay
scales[i] = scale
reg = models.clone_model(model)
reg.compile(optimizer=optimizer, loss=loss)
reg.fit(profs, (pays - min_pay) / scale, epochs=epochs,
verbose=verbosity)
regs.append(reg)
return _DevRegressionGame(
game, tuple(regs), offsets, scales, game.min_strat_payoffs(),
game.max_strat_payoffs(), np.ones(game.num_strats, bool))
def sklgame_train(game, estimator):
"""Create a regression game from an arbitrary sklearn estimator
Parameters
----------
game : RsGame
The game to learn, must have at least one payoff per strategy.
estimator : sklearn estimator
An estimator that supports clone, fit, and predict via the stand
scikit-learn estimator API.
"""
regs = []
for _, profs, pays in _dev_profpay(game):
reg = sklearn.base.clone(estimator)
reg.fit(profs, pays)
regs.append(reg)
return _DevRegressionGame(
game, tuple(regs), np.zeros(game.num_strats), np.ones(game.num_strats),
game.min_strat_payoffs(), game.max_strat_payoffs(),
np.ones(game.num_strats, bool))
class _RbfGpGame(rsgame._CompleteGame): # pylint: disable=too-many-instance-attributes,protected-access
"""A regression game using RBF Gaussian processes
This regression game has a build in deviation payoff based off of a
continuous approximation of the multinomial distribution.
"""
def __init__( # pylint: disable=too-many-locals,too-many-arguments
self, role_names, strat_names, num_role_players, offset, coefs,
lengths, sizes, profiles, alpha):
super().__init__(role_names, strat_names, num_role_players)
self._offset = offset
self._offset.setflags(write=False)
self._coefs = coefs
self._coefs.setflags(write=False)
self._lengths = lengths
self._lengths.setflags(write=False)
self._sizes = sizes
self._sizes.setflags(write=False)
self._size_starts = np.insert(self._sizes[:-1].cumsum(), 0, 0)
self._size_starts.setflags(write=False)
self._profiles = profiles
self._profiles.setflags(write=False)
self._alpha = alpha
self._alpha.setflags(write=False)
# Useful member
self._dev_players = np.repeat(
self.num_role_players - np.eye(self.num_roles, dtype=int),
self.num_role_strats, 0)
self._dev_players.setflags(write=False)
# Compute min and max payoffs
# TODO These are pretty conservative, and could maybe be made more
# accurate
sdp = self._dev_players.repeat(self.num_role_strats, 1)
max_rbf = np.einsum('ij,ij,ij->i', sdp, sdp, 1 / self._lengths)
minw = np.exp(-max_rbf / 2) # pylint: disable=invalid-unary-operand-type
mask = self._alpha > 0
pos = np.add.reduceat(self._alpha * mask, self._size_starts)
neg = np.add.reduceat(self._alpha * ~mask, self._size_starts)
self._min_payoffs = self._coefs * (pos * minw + neg) + self._offset
self._min_payoffs.setflags(write=False)
self._max_payoffs = self._coefs * (pos + neg * minw) + self._offset
self._max_payoffs.setflags(write=False)
def get_payoffs(self, profiles):
utils.check(
self.is_profile(profiles).all(), 'must pass valid profiles')
dev_profiles = np.repeat(
profiles[..., None, :] - np.eye(self.num_strats, dtype=int),
self._sizes, -2)
vec = ((dev_profiles - self._profiles) /
self._lengths.repeat(self._sizes, 0))
rbf = np.einsum('...ij,...ij->...i', vec, vec)
payoffs = self._offset + self._coefs * np.add.reduceat(
np.exp(-rbf / 2) * self._alpha, self._size_starts, -1) # pylint: disable=invalid-unary-operand-type
payoffs[profiles == 0] = 0
return payoffs
def get_dev_payoffs(self, dev_profs, *, jacobian=False): # pylint: disable=arguments-differ
dev_profiles = dev_profs.repeat(
np.add.reduceat(self._sizes, self.role_starts), -2)
vec = ((dev_profiles - self._profiles) /
self._lengths.repeat(self._sizes, 0))
rbf = np.einsum('...ij,...ij->...i', vec, vec)
exp = np.exp(-rbf / 2) * self._alpha # pylint: disable=invalid-unary-operand-type
payoffs = self._offset + self._coefs * np.add.reduceat(
exp, self._size_starts, -1)
if not jacobian:
return payoffs
jac = -(self._coefs[:, None] / self._lengths *
np.add.reduceat(exp[:, None] * vec, self._size_starts, 0))
return payoffs, jac
def max_strat_payoffs(self):
return self._max_payoffs.view()
def min_strat_payoffs(self):
return self._min_payoffs.view()
def deviation_payoffs(self, mixture, *, jacobian=False, **_): # pylint: disable=too-many-locals
players = self._dev_players.repeat(self.num_role_strats, 1)
avg_prof = players * mixture
diag = 1 / (self._lengths ** 2 + avg_prof)
diag_sizes = diag.repeat(self._sizes, 0)
diff = self._profiles - avg_prof.repeat(self._sizes, 0)
det = 1 / (1 - self._dev_players * np.add.reduceat(
mixture ** 2 * diag, self.role_starts, 1))
det_sizes = det.repeat(self._sizes, 0)
cov_diag = np.einsum('ij,ij,ij->i', diff, diff, diag_sizes)
cov_outer = np.add.reduceat(
mixture * diag_sizes * diff, self.role_starts, 1)
sec_term = np.einsum(
'ij,ij,ij,ij->i', self._dev_players.repeat(self._sizes, 0),
det_sizes, cov_outer, cov_outer)
exp = np.exp(-(cov_diag + sec_term) / 2)
coef = self._lengths.prod(1) * np.sqrt(diag.prod(1) * det.prod(1))
avg = np.add.reduceat(self._alpha * exp, self._size_starts)
payoffs = self._coefs * coef * avg + self._offset
if not jacobian:
return payoffs
beta = 1 - players * mixture * diag
jac_coef = (
((beta ** 2 - 1) * det.repeat(self.num_role_strats, 1) +
players * diag) * avg[:, None])
delta = np.repeat(cov_outer * det_sizes, self.num_role_strats, 1)
jac_exp = -self._alpha[:, None] * exp[:, None] * (
(delta * beta.repeat(self._sizes, 0) - diff * diag_sizes - 1) ** 2
- (delta - 1) ** 2)
jac_avg = (players * np.add.reduceat(jac_exp, self._size_starts, 0))
jac = -self._coefs[:, None] * coef[:, None] * (jac_coef + jac_avg) / 2
return payoffs, jac
# TODO Add function that creates sample game which draws payoffs from the
# gp distribution
def restrict(self, restriction):
restriction = np.asarray(restriction, bool)
base = rsgame.empty_copy(self).restrict(restriction)
size_mask = restriction.repeat(self._sizes)
sizes = self._sizes[restriction]
profiles = self._profiles[size_mask]
lengths = self._lengths[restriction]
zeros = (profiles[:, ~restriction] /
lengths[:, ~restriction].repeat(sizes, 0))
removed = np.exp(-np.einsum('ij,ij->i', zeros, zeros) / 2) # pylint: disable=invalid-unary-operand-type
uprofs, inds = np.unique(
recfunctions.merge_arrays([
np.arange(restriction.sum()).repeat(sizes).view([('s', int)]),
utils.axis_to_elem(profiles[:, restriction])], flatten=True),
return_inverse=True)
new_alpha = np.bincount(inds, removed * self._alpha[size_mask])
new_sizes = np.diff(np.concatenate([
[-1], np.flatnonzero(np.diff(uprofs['s'])),
[new_alpha.size - 1]]))
return _RbfGpGame(
base.role_names, base.strat_names, base.num_role_players,
self._offset[restriction], self._coefs[restriction],
lengths[:, restriction], new_sizes, uprofs['axis'], new_alpha)
def _add_constant(self, constant):
off = np.broadcast_to(constant, self.num_roles).repeat(
self.num_role_strats)
return _RbfGpGame(
self.role_names, self.strat_names, self.num_role_players,
self._offset + off, self._coefs, self._lengths, self._sizes,
self._profiles, self._alpha)
def _multiply_constant(self, constant):
mul = np.broadcast_to(constant, self.num_roles).repeat(
self.num_role_strats)
return _RbfGpGame(
self.role_names, self.strat_names, self.num_role_players,
self._offset * mul, self._coefs * mul, self._lengths, self._sizes,
self._profiles, self._alpha)
def _add_game(self, _):
return NotImplemented
def to_json(self):
base = super().to_json()
base['offsets'] = self.payoff_to_json(self._offset)
base['coefs'] = self.payoff_to_json(self._coefs)
lengths = {}
for role, strats, lens in zip(
self.role_names, self.strat_names,
np.split(self._lengths, self.role_starts[1:])):
lengths[role] = {s: self.payoff_to_json(l)
for s, l in zip(strats, lens)}
base['lengths'] = lengths
profs = {}
for role, strats, data in zip(
self.role_names, self.strat_names,
np.split(np.split(self._profiles, self._size_starts[1:]),
self.role_starts[1:])):
profs[role] = {strat: [self.profile_to_json(p) for p in dat]
for strat, dat in zip(strats, data)}
base['profiles'] = profs
alphas = {}
for role, strats, alphs in zip(
self.role_names, self.strat_names,
np.split(np.split(self._alpha, self._size_starts[1:]),
self.role_starts[1:])):
alphas[role] = {s: a.tolist() for s, a in zip(strats, alphs)}
base['alphas'] = alphas
base['type'] = 'rbf.1'
return base
def __eq__(self, othr):
# pylint: disable-msg=protected-access
return (super().__eq__(othr) and
np.allclose(self._offset, othr._offset) and
np.allclose(self._coefs, othr._coefs) and
np.allclose(self._lengths, othr._lengths) and
np.all(self._sizes == othr._sizes) and
utils.allclose_perm(
np.concatenate([
np.arange(self.num_strats).repeat(
self._sizes)[:, None],
self._profiles, self._alpha[:, None]], 1),
np.concatenate([
np.arange(othr.num_strats).repeat(
othr._sizes)[:, None],
othr._profiles, othr._alpha[:, None]], 1)))
@utils.memoize
def __hash__(self):
hprofs = np.sort(utils.axis_to_elem(np.concatenate([
np.arange(self.num_strats).repeat(self._sizes)[:, None],
self._profiles], 1))).tobytes()
return hash((super().__hash__(), hprofs))
def rbfgame_train(game, num_restarts=3): # pylint: disable=too-many-locals
"""Train a regression game with an RBF Gaussian process
This model is somewhat well tests and has a few added benefits over
standard regression models due the nature of its functional form.
Parameters
----------
game : RsGame
The game to learn. Must have at least one payoff per strategy.
num_restarts : int, optional
The number of random restarts to make with the optimizer. Higher
numbers will give a better fit (in expectation), but will take
longer.
"""
dev_players = np.maximum(game.num_role_players - np.eye(
game.num_roles, dtype=int), 1).repeat(
game.num_role_strats, 0).repeat(game.num_role_strats, 1)
bounds = np.insert(dev_players[..., None], 0, 1, 2)
# TODO Add an alpha that is smaller for points near the edge of the
# simplex, accounting for the importance of minimizing error at the
# extrema.
means = np.empty(game.num_strats)
coefs = np.empty(game.num_strats)
lengths = np.empty((game.num_strats, game.num_strats))
profiles = []
alpha = []
sizes = []
for (strat, profs, pays), bound in zip(_dev_profpay(game), bounds):
pay_mean = pays.mean()
pays -= pay_mean
reg = gp.GaussianProcessRegressor(
1.0 * gp.kernels.RBF(bound.mean(1), bound) +
gp.kernels.WhiteKernel(1), n_restarts_optimizer=num_restarts,
copy_X_train=False)
reg.fit(profs, pays)
means[strat] = pay_mean
coefs[strat] = reg.kernel_.k1.k1.constant_value
lengths[strat] = reg.kernel_.k1.k2.length_scale
uprofs, inds = np.unique(
utils.axis_to_elem(profs), return_inverse=True)
profiles.append(utils.axis_from_elem(uprofs))
alpha.append(np.bincount(inds, reg.alpha_))
sizes.append(uprofs.size)
if np.any(lengths[..., None] == bounds):
warnings.warn(
'some lengths were at their bounds, this may indicate a poor '
'fit')
return _RbfGpGame(
game.role_names, game.strat_names, game.num_role_players, means, coefs,
lengths, np.array(sizes), np.concatenate(profiles),
np.concatenate(alpha))
def rbfgame_json(json):
"""Read an rbf game from json"""
utils.check(json['type'].split('.', 1)[0] == 'rbf', 'incorrect type')
base = rsgame.empty_json(json)
offsets = base.payoff_from_json(json['offsets'])
coefs = base.payoff_from_json(json['coefs'])
lengths = np.empty((base.num_strats,) * 2)
for role, strats in json['lengths'].items():
for strat, pay in strats.items():
ind = base.role_strat_index(role, strat)
base.payoff_from_json(pay, lengths[ind])
profiles = [None] * base.num_strats
for role, strats in json['profiles'].items():
for strat, profs in strats.items():
ind = base.role_strat_index(role, strat)
profiles[ind] = np.stack([
base.profile_from_json(p, verify=False) for p in profs])
alphas = [None] * base.num_strats
for role, strats in json['alphas'].items():
for strat, alph in strats.items():
ind = base.role_strat_index(role, strat)
alphas[ind] = np.array(alph)
sizes = np.fromiter( # pragma: no branch
(a.size for a in alphas), int, base.num_strats)
return _RbfGpGame(
base.role_names, base.strat_names, base.num_role_players, offsets,
coefs, lengths, sizes, np.concatenate(profiles),
np.concatenate(alphas))
class _DeviationGame(rsgame._CompleteGame): # pylint: disable=abstract-method,protected-access
"""A game that adds deviation payoffs"""
def __init__(self, model_game):
super().__init__(model_game.role_names, model_game.strat_names,
model_game.num_role_players)
utils.check(
model_game.is_complete(),
'deviation models must be complete games')
self.model = model_game
def get_payoffs(self, profiles):
return self.model.get_payoffs(profiles)
def profiles(self):
return self.model.profiles()
def payoffs(self):
return self.model.payoffs()
def max_strat_payoffs(self):
return self.model.max_strat_payoffs()
def min_strat_payoffs(self):
return self.model.min_strat_payoffs()
def to_json(self):
base = super().to_json()
base['model'] = self.model.to_json()
return base
def __eq__(self, othr):
return (super().__eq__(othr) and
self.model == othr.model)
@utils.memoize
def __hash__(self):
return hash((super().__hash__(), self.model))
class _SampleDeviationGame(_DeviationGame):
"""Deviation payoffs by sampling from mixture
This model produces unbiased deviation payoff estimates, but they're noisy
and random and take a while to compute. This is accurate in the limit as
`num_samples` goes to infinity.
Parameters
----------
model : DevRegressionGame
A payoff model
num_samples : int, optional
The number of samples to use for each deviation estimate. Higher means
lower variance but higher computation time.
"""
def __init__(self, model, num_samples=100):
super().__init__(model)
utils.check(num_samples > 0, 'num samples must be greater than 0')
# TODO It might be interesting to play with a sample schedule, i.e.
# change the number of samples based off of the query number to
# deviation payoffs (i.e. reduce variance as we get close to
# convergence)
self.num_samples = num_samples
def deviation_payoffs(self, mixture, *, jacobian=False, **_):
"""Compute the deivation payoffs
The method computes the jacobian as if we were importance sampling the
results, i.e. the function is really always sample according to mixture
m', but then importance sample to get the actual result."""
profs = self.random_role_deviation_profiles(self.num_samples, mixture)
payoffs = self.model.get_dev_payoffs(profs)
dev_pays = payoffs.mean(0)
if not jacobian:
return dev_pays
supp = mixture > 0
weights = np.zeros(profs.shape)
weights[..., supp] = profs[..., supp] / mixture[supp]
jac = np.einsum('ij,ijk->jk', payoffs, weights.repeat(
self.num_role_strats, 1)) / self.num_samples
return dev_pays, jac
def restrict(self, restriction):
return _SampleDeviationGame(
self.model.restrict(restriction), self.num_samples)
def _add_constant(self, constant):
return _SampleDeviationGame(self.model + constant, self.num_samples)
def _multiply_constant(self, constant):
return _SampleDeviationGame(self.model * constant, self.num_samples)
def _add_game(self, othr):
try:
assert self.num_samples == othr.num_samples
return _SampleDeviationGame(
self.model + othr.model, self.num_samples)
except (AttributeError, AssertionError):
return NotImplemented
def to_json(self):
base = super().to_json()
base['samples'] = self.num_samples
base['type'] = 'sample.1'
return base
def __eq__(self, othr):
return (super().__eq__(othr) and
self.num_samples == othr.num_samples)
@utils.memoize
def __hash__(self):
return hash((super().__hash__(), self.num_samples))
def sample(game, num_samples=100):
"""Create a sample game from a model
Parameters
----------
game : RsGame
If this is a payoff model it will be used to take samples, if this is
an existing deviation game, then this will use it's underlying model.
num_samples : int, optional
The number of samples to take.
"""
try:
return _SampleDeviationGame(game.model, num_samples=num_samples)
except AttributeError:
return _SampleDeviationGame(game, num_samples=num_samples)
def sample_json(json):
"""Read sample game from json"""
utils.check(
json['type'].split('.', 1)[0] == 'sample', 'incorrect type')
return _SampleDeviationGame(
gamereader.loadj(json['model']), num_samples=json['samples'])
class _PointDeviationGame(_DeviationGame):
"""Deviation payoffs by point approximation
This model computes payoffs by finding the deviation payoffs from the point
estimate of the mixture. It's fast but biased. This is accurate in the
limit as the number of players goes to infinity.
For this work, the underlying implementation of get_dev_payoffs must
support floating point profiles, which only really makes sense for
regression games. For deviation payoffs to have a jacobian, the underlying
model must also support a jacobian for get_dev_payoffs.
Parameters
----------
model : DevRegressionGame
A payoff model
"""
def __init__(self, model):
super().__init__(model)
self._dev_players = np.repeat(self.num_role_players - np.eye(
self.num_roles, dtype=int), self.num_role_strats, 1)
def deviation_payoffs(self, mixture, *, jacobian=False, **_):
if not jacobian:
return self.model.get_dev_payoffs(self._dev_players * mixture)
dev, jac = self.model.get_dev_payoffs(
self._dev_players * mixture, jacobian=True)
jac *= self._dev_players.repeat(self.num_role_strats, 0)
return dev, jac
def restrict(self, restriction):
return _PointDeviationGame(self.model.restrict(restriction))
def _add_constant(self, constant):
return _PointDeviationGame(self.model + constant)
def _multiply_constant(self, constant):
return _PointDeviationGame(self.model * constant)
def _add_game(self, othr):
try:
assert isinstance(othr, _PointDeviationGame)
return _PointDeviationGame(self.model + othr.model)
except (AttributeError, AssertionError):
return NotImplemented
def to_json(self):
base = super().to_json()
base['type'] = 'point.1'
return base
def point(game):
"""Create a point game from a model
Parameters
----------
game : RsGame
If this is a payoff model it will be used to take samples, if this is
an existing deviation game, then this will use it's underlying model.
"""
try:
return _PointDeviationGame(game.model)
except AttributeError:
return _PointDeviationGame(game)
def point_json(json):
"""Read point game from json"""
utils.check(
json['type'].split('.', 1)[0] == 'point', 'incorrect type')
return _PointDeviationGame(gamereader.loadj(json['model']))
class _NeighborDeviationGame(_DeviationGame):
"""Create a neighbor game from a model
This takes a normalized weighted estimate of the deviation payoffs by
finding all profiles within `num_neighbors` of the maximum probability
profile for the mixture and weighting them accordingly. This is biased, but
accurate in the limit as `num_neighbors` approaches `num_players`. It also
produces discontinuities every time the maximum probability profile
switches.
Parameters
----------
game : RsGame
If this is a payoff model it will be used to take samples, if this is
an existing deviation game, then this will use it's underlying model.
num_neighbors : int, optional
The number of deviations to take.
"""
def __init__(self, model, num_neighbors=2):
super().__init__(model)
utils.check(num_neighbors >= 0, 'num devs must be nonnegative')
self.num_neighbors = num_neighbors
def deviation_payoffs(self, mixture, *, jacobian=False, **_):
# TODO This is not smooth because there are discontinuities when the
# maximum probability profile jumps at the boundary. If we wanted to
# make it smooth, one option would be to compute the smoother
# interpolation between this and lower probability profiles. All we
# need to ensure smoothness is that the weight at profile
# discontinuities is 0.
profiles = self.nearby_profiles(
self.max_prob_prof(mixture), self.num_neighbors)
payoffs = self.get_payoffs(profiles)
game = paygame.game_replace(self, profiles, payoffs)
return game.deviation_payoffs(mixture, ignore_incomplete=True,
jacobian=jacobian)
def restrict(self, restriction):
return _NeighborDeviationGame(
self.model.restrict(restriction), self.num_neighbors)
def _add_constant(self, constant):
return _NeighborDeviationGame(self.model + constant, self.num_neighbors)
def _multiply_constant(self, constant):
return _NeighborDeviationGame(self.model * constant, self.num_neighbors)
def _add_game(self, othr):
try:
assert self.num_neighbors == othr.num_neighbors
return _NeighborDeviationGame(
self.model + othr.model, self.num_neighbors)
except (AttributeError, AssertionError):
return NotImplemented
def to_json(self):
base = super().to_json()
base['neighbors'] = self.num_neighbors
base['type'] = 'neighbor.2'
return base
def __eq__(self, othr):
return super().__eq__(othr) and self.num_neighbors == othr.num_neighbors
@utils.memoize
def __hash__(self):
return hash((super().__hash__(), self.num_neighbors))
def neighbor(game, num_neighbors=2):
"""Create a neighbor game from a model
Parameters
----------
game : RsGame
If this is a payoff model it will be used to take samples, if this is
an existing deviation game, then this will use it's underlying model.
num_neighbors : int, optional
The number of deviations to explore out.
"""
try:
return _NeighborDeviationGame(game.model, num_neighbors=num_neighbors)
except AttributeError:
return _NeighborDeviationGame(game, num_neighbors=num_neighbors)
def neighbor_json(json):
"""Read neighbor game from json"""
utils.check(
json['type'].split('.', 1)[0] == 'neighbor', 'incorrect type')
return _NeighborDeviationGame(
gamereader.loadj(json['model']),
num_neighbors=json.get('neighbors', json.get('devs', None)))
|
egtaonline/GameAnalysis
|
gameanalysis/learning.py
|
Python
|
apache-2.0
| 32,808
|
[
"Gaussian"
] |
cca450aafab483253d4f4250b0071a2119fffcea7b3aa3779eabde40af4bee53
|
#!/usr/bin/env python
import datetime
import json
import unittest
from unittest.mock import MagicMock, PropertyMock, patch
from data.variable import Variable
from data.variable_list import VariableList
from oceannavigator import DatasetConfig, create_app
app = create_app(testing=True)
# Note that patches are applied in bottom-up order
class TestAPIv1(unittest.TestCase):
def setUp(self):
self.app = app.test_client()
with open("tests/testdata/endpoints.json") as endpoints:
self.apiLinks = json.load(endpoints)
with open("tests/testdata/datasetconfigpatch.json") as dataPatch:
self.patch_dataset_config_ret_val = json.load(dataPatch)
self.patch_data_vars_ret_val = VariableList(
[
Variable(
"votemper",
"Water temperature at CMC",
"Kelvins",
sorted(["deptht", "time_counter", "y", "x"]),
)
]
)
def __get_response_data(self, resp):
return json.loads(resp.get_data(as_text=True))
@patch.object(DatasetConfig, "_get_dataset_config")
@patch("data.sqlite_database.SQLiteDatabase.get_data_variables")
def test_variables_endpoint(self, patch_get_data_vars, patch_get_dataset_config):
patch_get_data_vars.return_value = self.patch_data_vars_ret_val
patch_get_dataset_config.return_value = self.patch_dataset_config_ret_val
res = self.app.get("/api/v1.0/variables/?dataset=giops&3d_only")
self.assertEqual(res.status_code, 200)
resp_data = self.__get_response_data(res)
self.assertEqual(len(resp_data), 1)
self.assertEqual(resp_data[0]["id"], "votemper")
self.assertEqual(resp_data[0]["scale"], [-5, 30])
self.assertEqual(resp_data[0]["value"], "Temperature")
res = self.app.get("/api/v1.0/variables/?dataset=giops")
self.assertEqual(res.status_code, 200)
resp_data = self.__get_response_data(res)
self.assertEqual(len(resp_data), 1)
res = self.app.get("/api/v1.0/variables/?dataset=giops&vectors_only")
self.assertEqual(res.status_code, 200)
resp_data = self.__get_response_data(res)
self.assertEqual(len(resp_data), 0)
@patch.object(DatasetConfig, "_get_dataset_config")
@patch("data.sqlite_database.SQLiteDatabase.get_latest_timestamp")
@patch("data.sqlite_database.SQLiteDatabase.get_data_variables")
def test_depth_endpoint(
self, patch_get_data_vars, patch_get_latest_timestamp, patch_get_dataset_config
):
patch_get_data_vars.return_value = self.patch_data_vars_ret_val
patch_get_latest_timestamp.return_value = 2034072000
patch_get_dataset_config.return_value = self.patch_dataset_config_ret_val
res = self.app.get("/api/v1.0/depth/?dataset=giops&variable=votemper")
self.assertEqual(res.status_code, 200)
res_data = self.__get_response_data(res)
self.assertEqual(len(res_data), 51)
self.assertEqual(res_data[0]["id"], "bottom")
self.assertEqual(res_data[0]["value"], "Bottom")
@patch.object(DatasetConfig, "_get_dataset_config")
@patch("data.sqlite_database.SQLiteDatabase.get_data_variables")
@patch("data.sqlite_database.SQLiteDatabase.get_timestamps")
def test_timestamps_endpoint_sqlite(
self,
patch_get_all_timestamps,
patch_get_data_variables,
patch_get_dataset_config,
):
patch_get_all_timestamps.return_value = sorted([2031436800, 2034072000])
patch_get_data_variables.return_value = self.patch_data_vars_ret_val
patch_get_dataset_config.return_value = self.patch_dataset_config_ret_val
res = self.app.get(
"/api/v1.0/timestamps/?dataset=nemo_sqlite3&variable=votemper"
)
self.assertEqual(res.status_code, 200)
res_data = self.__get_response_data(res)
self.assertEqual(len(res_data), 2)
self.assertEqual(res_data[0]["id"], 2031436800)
self.assertEqual(res_data[0]["value"], "2014-05-17T00:00:00+00:00")
@patch.object(DatasetConfig, "_get_dataset_config")
def test_timestamps_endpoint_xarray(self, patch_get_dataset_config):
patch_get_dataset_config.return_value = self.patch_dataset_config_ret_val
res = self.app.get("/api/v1.0/timestamps/?dataset=giops&variable=votemper")
self.assertEqual(res.status_code, 200)
res_data = self.__get_response_data(res)
self.assertEqual(len(res_data), 2)
self.assertEqual(res_data[0]["id"], 2031436800)
self.assertEqual(res_data[0]["value"], "2014-05-17T00:00:00+00:00")
@patch.object(DatasetConfig, "_get_dataset_config")
@patch("data.sqlite_database.SQLiteDatabase.get_data_variables")
def test_scale_endpoint(self, patch_get_data_vars, patch_get_dataset_config):
patch_get_data_vars.return_value = self.patch_data_vars_ret_val
patch_get_dataset_config.return_value = self.patch_dataset_config_ret_val
res = self.app.get("/api/v1.0/scale/giops/votemper/-5,30.png")
self.assertEqual(res.status_code, 200)
@patch.object(DatasetConfig, "get_datasets")
@patch.object(DatasetConfig, "_get_dataset_config")
def test_datasets_endpoint(self, patch_get_dataset_config, patch_get_datasets):
patch_get_dataset_config.return_value = self.patch_dataset_config_ret_val
patch_get_datasets = ["giops"]
res = self.app.get("/api/v1.0/datasets/")
self.assertEqual(res.status_code, 200)
def test_colors_endpoint(self):
res = self.app.get("/api/v1.0/colors/")
self.assertEqual(res.status_code, 200)
def test_colormaps_endpoint(self):
res = self.app.get("/api/v1.0/colormaps/")
self.assertEqual(res.status_code, 200)
res_data = self.__get_response_data(res)
self.assertIn({"id": "temperature", "value": "Temperature"}, res_data)
def test_colormaps_image_endpoint(self):
res = self.app.get("/api/v1.0/colormaps.png")
self.assertEqual(res.status_code, 200)
@patch.object(DatasetConfig, "_get_dataset_config")
def test_quantum_query(self, patch_get_dataset_config):
patch_get_dataset_config.return_value = self.patch_dataset_config_ret_val
res = self.app.get("/api/v1.0/quantum/?dataset=giops")
self.assertEqual(res.status_code, 200)
res_data = self.__get_response_data(res)
self.assertEqual(res_data, "day")
def test_api_info(self):
res = self.app.get("/api/")
self.assertEqual(res.status_code, 400)
res = self.app.get("/api/v1.0/")
self.assertEqual(res.status_code, 400)
@patch.object(DatasetConfig, "_get_dataset_config")
@patch("data.sqlite_database.SQLiteDatabase.get_data_variables")
def test_range_endpoint(self, patch_get_data_vars, patch_get_dataset_config):
patch_get_data_vars.return_value = self.patch_data_vars_ret_val
patch_get_dataset_config.return_value = self.patch_dataset_config_ret_val
res = self.app.get(self.apiLinks["range"])
self.assertEqual(res.status_code, 200)
# OverflowError: signed integer is greater than maximum
@unittest.skip("Skipping api/data.. problem with timestamp conversion")
@patch.object(DatasetConfig, "_get_dataset_config")
@patch("data.sqlite_database.SQLiteDatabase.get_data_variables")
def test_data_endpoint(self, patch_get_data_vars, patch_get_dataset_config):
patch_get_data_vars.return_value = self.patch_data_vars_ret_val
patch_get_dataset_config.return_value = self.patch_dataset_config_ret_val
res = self.app.get(
"/api/v1.0/data/giops_real/votemper/2212704000/0/60,-29.json"
)
self.assertEqual(res.status_code, 200)
def test_class4_models_endpoint(self):
res = self.app.get(
"/api/v1.0/class4/models/class4_20190102_GIOPS_CONCEPTS_2.3_profile/"
)
self.assertEqual(res.status_code, 200)
# RuntimeError: Opening a dataset via sqlite requires the 'variable' keyword argument.
@unittest.skip("Skipping api/stats.. needs re-write")
@patch.object(DatasetConfig, "_get_dataset_config")
@patch("data.sqlite_database.SQLiteDatabase.get_data_variables")
def test_stats_endpoint(self, patch_get_data_vars, patch_get_dataset_config):
patch_get_data_vars.return_value = self.patch_data_vars_ret_val
patch_get_dataset_config.return_value = self.patch_dataset_config_ret_val
res = self.app.get(self.apiLinks["stats"])
self.assertEqual(res.status_code, 200)
@patch.object(DatasetConfig, "_get_dataset_config")
@patch("data.netcdf_data.NetCDFData._get_xarray_data_variables")
def test_subset_endpoint(self, patch_get_data_vars, patch_get_dataset_config):
patch_get_data_vars.return_value = self.patch_data_vars_ret_val
patch_get_dataset_config.return_value = self.patch_dataset_config_ret_val
res = self.app.get(self.apiLinks["subset"])
self.assertEqual(res.status_code, 200)
@patch.object(DatasetConfig, "_get_dataset_config")
@patch("data.netcdf_data.NetCDFData._get_xarray_data_variables")
def test_plot_map_endpoint(self, patch_get_data_vars, patch_get_dataset_config):
patch_get_data_vars.return_value = self.patch_data_vars_ret_val
patch_get_dataset_config.return_value = self.patch_dataset_config_ret_val
# map (area)
res = self.app.get(self.apiLinks["plot_map"])
self.assertEqual(res.status_code, 200)
res = self.app.get(self.apiLinks["plot_map_csv"])
self.assertEqual(res.status_code, 200)
res = self.app.get(self.apiLinks["plot_map_quiver_len_mag"])
self.assertEqual(res.status_code, 200)
res = self.app.get(self.apiLinks["plot_map_quiver_no_mag"])
self.assertEqual(res.status_code, 200)
res = self.app.get(self.apiLinks["plot_map_quiver_color_mag"])
self.assertEqual(res.status_code, 200)
@patch.object(DatasetConfig, "_get_dataset_config")
@patch("data.netcdf_data.NetCDFData._get_xarray_data_variables")
def test_plot_transect_endpoint(
self, patch_get_data_vars, patch_get_dataset_config
):
patch_get_data_vars.return_value = self.patch_data_vars_ret_val
patch_get_dataset_config.return_value = self.patch_dataset_config_ret_val
# transect (line)
res = self.app.get(self.apiLinks["plot_transect"])
self.assertEqual(res.status_code, 200)
res = self.app.get(self.apiLinks["plot_transect_depth_limit"])
self.assertEqual(res.status_code, 200)
res = self.app.get(self.apiLinks["plot_transect_csv"])
self.assertEqual(res.status_code, 200)
@patch.object(DatasetConfig, "_get_dataset_config")
@patch("data.sqlite_database.SQLiteDatabase.get_data_variables")
def test_plot_timeseries_endpoint(
self, patch_get_data_vars, patch_get_dataset_config
):
patch_get_data_vars.return_value = self.patch_data_vars_ret_val
patch_get_dataset_config.return_value = self.patch_dataset_config_ret_val
# timeseries (point, virtual mooring)
res = self.app.get(self.apiLinks["plot_timeseries"])
self.assertEqual(res.status_code, 200)
@patch.object(DatasetConfig, "_get_dataset_config")
@patch("data.sqlite_database.SQLiteDatabase.get_data_variables")
def test_plot_timeseries_endpoint_all_depths(
self, patch_get_data_vars, patch_get_dataset_config
):
patch_get_data_vars.return_value = self.patch_data_vars_ret_val
patch_get_dataset_config.return_value = self.patch_dataset_config_ret_val
# timeseries (point, virtual mooring)
res = self.app.get(self.apiLinks["plot_timeseries_all_depths"])
self.assertEqual(res.status_code, 200)
@patch.object(DatasetConfig, "_get_dataset_config")
@patch("data.sqlite_database.SQLiteDatabase.get_data_variables")
def test_plot_timeseries_endpoint_bottom_depth(
self, patch_get_data_vars, patch_get_dataset_config
):
patch_get_data_vars.return_value = self.patch_data_vars_ret_val
patch_get_dataset_config.return_value = self.patch_dataset_config_ret_val
# timeseries (point, virtual mooring)
res = self.app.get(self.apiLinks["plot_timeseries_bottom"])
self.assertEqual(res.status_code, 200)
@patch.object(DatasetConfig, "_get_dataset_config")
@patch("data.sqlite_database.SQLiteDatabase.get_data_variables")
def test_plot_ts_endpoint(self, patch_get_data_vars, patch_get_dataset_config):
patch_get_data_vars.return_value = self.patch_data_vars_ret_val
patch_get_dataset_config.return_value = self.patch_dataset_config_ret_val
# ts (point, T/S Plot)
res = self.app.get(self.apiLinks["plot_ts"])
self.assertEqual(res.status_code, 200)
@unittest.skip("Skipping api/plot/sound.. returning error")
@patch.object(DatasetConfig, "_get_dataset_config")
@patch("data.sqlite_database.SQLiteDatabase.get_data_variables")
def test_plot_sound_endpoint(self, patch_get_data_vars, patch_get_dataset_config):
patch_get_data_vars.return_value = self.patch_data_vars_ret_val
patch_get_dataset_config.return_value = self.patch_dataset_config_ret_val
# sound (point, Speed of Sound)
# IndexError: list index out of range
res = self.app.get(self.apiLinks["plot_sound"])
self.assertEqual(res.status_code, 200)
@patch.object(DatasetConfig, "_get_dataset_config")
@patch("data.netcdf_data.NetCDFData._get_xarray_data_variables")
def test_plot_profile_endpoint(self, patch_get_data_vars, patch_get_dataset_config):
patch_get_data_vars.return_value = self.patch_data_vars_ret_val
patch_get_dataset_config.return_value = self.patch_dataset_config_ret_val
# profile (point, profile)
res = self.app.get(self.apiLinks["plot_profile"])
self.assertEqual(res.status_code, 200)
res = self.app.get(self.apiLinks["plot_profile_multi_variable"])
self.assertEqual(res.status_code, 200)
@patch.object(DatasetConfig, "_get_dataset_config")
@patch("data.sqlite_database.SQLiteDatabase.get_data_variables")
def test_plot_hovmoller_endpoint(
self, patch_get_data_vars, patch_get_dataset_config
):
patch_get_data_vars.return_value = self.patch_data_vars_ret_val
patch_get_dataset_config.return_value = self.patch_dataset_config_ret_val
# hovmoller (line, Hovmöller Diagram)
res = self.app.get(self.apiLinks["plot_hovmoller"])
self.assertEqual(res.status_code, 200)
res = self.app.get(self.apiLinks["plot_hovmoller_bottom"])
self.assertEqual(res.status_code, 200)
@unittest.skip("Skipping api/plot/observation.. returning error")
@patch.object(DatasetConfig, "_get_dataset_config")
@patch("data.sqlite_database.SQLiteDatabase.get_data_variables")
def test_plot_observation_endpoint(
self, patch_get_data_vars, patch_get_dataset_config
):
patch_get_data_vars.return_value = self.patch_data_vars_ret_val
patch_get_dataset_config.return_value = self.patch_dataset_config_ret_val
# observation (point, Observation)
# returns RuntimeError: Opening a dataset via sqlite requires the 'timestamp' keyword argument.
res = self.app.get(self.apiLinks["plot_observation"])
self.assertEqual(res.status_code, 200)
@unittest.skip("Skipping api/plot/stickplot.. explaination in definition..")
@patch.object(DatasetConfig, "_get_dataset_config")
@patch("data.sqlite_database.SQLiteDatabase.get_data_variables")
def test_plot_stick_endpoint(self, patch_get_data_vars, patch_get_dataset_config):
patch_get_data_vars.return_value = self.patch_data_vars_ret_val
patch_get_dataset_config.return_value = self.patch_dataset_config_ret_val
# stick (point, Stick Plot) returns NameError: name 'timestamp' is not defined
# or RuntimeError: Error finding timestamp(s) in database.
res = self.app.get(self.apiLinks["plot_stick"])
self.assertEqual(res.status_code, 200)
def test_query_endpoint(self):
# response for each type of query
res = []
res.append(self.app.get("/api/v1.0/points/"))
res.append(self.app.get("/api/v1.0/lines/"))
res.append(self.app.get("/api/v1.0/areas/"))
res.append(self.app.get("/api/v1.0/class4/"))
for i in range(4):
self.assertEqual(res[i].status_code, 200)
@unittest.skip("IndexError: list index out of range")
def test_query_id_endpoint(self):
res = []
res.append(self.app.get("/api/v1.0/areas/2015_VME_Closures.json"))
res.append(
self.app.get(
"/api/v1.0/class4/class4_20200102_GIOPS_CONCEPTS_3.0_profile.json"
)
)
for i in range(4):
self.assertEqual(res[i].status_code, 200)
@unittest.skip("IndexError: list index out of range")
def test_query_file_endpoint(self):
res = []
# points
res.append(
self.app.get(
"/api/v1.0/points/EPSG:3857/9784/-15938038,1751325,4803914,12220141/NL-AZMP_Stations.json"
)
)
# lines
res.append(
self.app.get(
"/api/v1.0/lines/EPSG:3857/9784/-15938038,1751325,4803914,12220141/AZMP%20Transects.json"
)
)
# areas
res.append(
self.app.get(
"/api/v1.0/areas/EPSG:3857/9784/-15938038,1751325,4803914,12220141/AZMP_NL_Region_Analysis_Areas.json"
)
)
# class4
res.append(
self.app.get(
"/api/v1.0/class4/EPSG:3857/9784/-15938038,1751325,4803914,12220141/class4_20200101_GIOPS_CONCEPTS_3.0_profile.json"
)
)
for i in range(6):
self.assertEqual(res[i].status_code, 200)
@patch.object(DatasetConfig, "_get_dataset_config")
@patch("data.netcdf_data.NetCDFData._get_xarray_data_variables")
def test_tile_endpoint(self, patch_get_data_vars, patch_get_dataset_config):
patch_get_data_vars.return_value = self.patch_data_vars_ret_val
patch_get_dataset_config.return_value = self.patch_dataset_config_ret_val
res = self.app.get(
"/api/v1.0/tiles/gaussian/25/10/EPSG:3857/giops_real/votemper/2212704000/0/-5,30/6/50/40.png"
)
self.assertEqual(res.status_code, 200)
@patch.object(DatasetConfig, "_get_dataset_config")
@patch("data.sqlite_database.SQLiteDatabase.get_data_variables")
def test_topo_endpoint(self, patch_get_data_vars, patch_get_dataset_config):
patch_get_data_vars.return_value = self.patch_data_vars_ret_val
patch_get_dataset_config.return_value = self.patch_dataset_config_ret_val
res = self.app.get("/api/v1.0/tiles/topo/false/EPSG:3857/6/52/41.png")
self.assertEqual(res.status_code, 200)
@patch.object(DatasetConfig, "_get_dataset_config")
@patch("data.sqlite_database.SQLiteDatabase.get_data_variables")
def test_bath_endpoint(self, patch_get_data_vars, patch_get_dataset_config):
patch_get_data_vars.return_value = self.patch_data_vars_ret_val
patch_get_dataset_config.return_value = self.patch_dataset_config_ret_val
res = self.app.get("/api/v1.0/tiles/bath/EPSG:3857/6/56/41.png")
self.assertEqual(res.status_code, 200)
@patch.object(DatasetConfig, "_get_dataset_config")
@patch("data.sqlite_database.SQLiteDatabase.get_data_variables")
def test_mbt_endpoint(self, patch_get_data_vars, patch_get_dataset_config):
patch_get_data_vars.return_value = self.patch_data_vars_ret_val
patch_get_dataset_config.return_value = self.patch_dataset_config_ret_val
res = self.app.get("/api/v1.0/mbt/EPSG:3857/lands/7/105/77")
self.assertEqual(res.status_code, 200)
@patch("data.observational.queries.get_datatypes")
def test_observation_datatypes(self, patch_get_datatypes):
patch_get_datatypes.return_value = [PropertyMock(key="mykey")]
patch_get_datatypes.return_value[0].name = "myname"
res = self.app.get(self.apiLinks["observation_datatypes"])
self.assertEqual(res.status_code, 200)
data = self.__get_response_data(res)
self.assertDictEqual(data[0], {"id": "mykey", "value": "myname"})
@patch("data.observational.db.session")
@patch("data.observational.queries.get_meta_keys")
def test_observation_meta_keys(self, patch_get_meta_keys, patch_session):
patch_get_meta_keys.return_value = ["this is a test"]
res = self.app.get(self.apiLinks["observation_meta_keys"])
self.assertEqual(res.status_code, 200)
patch_get_meta_keys.assert_called_with(patch_session, ["platform_type"])
data = self.__get_response_data(res)
self.assertEqual(data[0], "this is a test")
@patch("data.observational.db.session")
@patch("data.observational.queries.get_meta_values")
def test_observation_meta_values(self, patch_get_meta_values, patch_session):
patch_get_meta_values.return_value = ["this is a test"]
res = self.app.get(self.apiLinks["observation_meta_values"])
self.assertEqual(res.status_code, 200)
patch_get_meta_values.assert_called_with(
patch_session, ["platform_type"], "key"
)
data = self.__get_response_data(res)
self.assertEqual(data[0], "this is a test")
@patch("data.observational.db.session")
@patch("data.observational.queries.get_platform_tracks")
def test_observation_track(self, patch_get_platform_tracks, patch_session):
typ = PropertyMock()
typ.name = "none"
patch_get_platform_tracks.return_value = [
[0, typ, 0, 0],
[0, typ, 1, 1],
[1, typ, 0, 0],
]
res = self.app.get(self.apiLinks["observation_track"])
self.assertEqual(res.status_code, 200)
patch_get_platform_tracks.assert_called_with(
patch_session, "day", platform_types=["none"]
)
data = self.__get_response_data(res)
self.assertEqual(len(data["features"]), 1)
self.assertIn([0, 0], data["features"][0]["geometry"]["coordinates"])
@patch("data.observational.db.session")
@patch("data.observational.queries.get_stations")
def test_observation_track(self, patch_get_stations, patch_session):
platform_type = PropertyMock()
platform_type.name = "platform_type"
station = PropertyMock(
platform=PropertyMock(type=platform_type),
latitude=0,
longitude=0,
id=0,
)
station.name = "myname"
patch_get_stations.return_value = [station]
res = self.app.get(self.apiLinks["observation_point"])
self.assertEqual(res.status_code, 200)
patch_get_stations.assert_called_with(
session=patch_session, platform_types=["none"]
)
data = self.__get_response_data(res)
self.assertEqual(len(data["features"]), 1)
self.assertEqual([0, 0], data["features"][0]["geometry"]["coordinates"])
@patch("data.observational.db.session.query")
def test_observation_variables(self, patch_query):
query_return = MagicMock()
filter_return = MagicMock()
order_return = MagicMock()
patch_query.return_value = query_return
query_return.filter = MagicMock(return_value=filter_return)
filter_return.order_by = MagicMock(return_value=order_return)
variable0 = PropertyMock()
variable0.name = "variable0"
variable1 = PropertyMock()
variable1.name = "variable1"
order_return.all = MagicMock(return_value=[variable0, variable1])
res = self.app.get(self.apiLinks["observation_variables"])
self.assertEqual(res.status_code, 200)
data = self.__get_response_data(res)
self.assertEqual(len(data), 2)
self.assertDictEqual(data[0], {"id": 0, "value": "variable0"})
self.assertDictEqual(data[1], {"id": 1, "value": "variable1"})
@patch("data.observational.db.session.query")
def test_observation_tracktimerange(self, patch_query):
query_return = MagicMock()
filter_return = MagicMock()
patch_query.return_value = query_return
query_return.filter = MagicMock(return_value=filter_return)
filter_return.one = MagicMock(
return_value=[
datetime.datetime(2010, 1, 1),
datetime.datetime(2020, 1, 1),
]
)
res = self.app.get(self.apiLinks["observation_tracktimerange"])
self.assertEqual(res.status_code, 200)
data = self.__get_response_data(res)
self.assertEqual(data["min"], "2010-01-01T00:00:00")
self.assertEqual(data["max"], "2020-01-01T00:00:00")
@patch("data.observational.db.session.query")
def test_observation_meta(self, patch_query):
query_return = MagicMock()
patch_query.return_value = query_return
platform = PropertyMock(
attrs={
"attr0": "attribute0",
"attr1": "attribute1",
},
type=PropertyMock(),
)
platform.type.name = "platform_type"
query_return.get = MagicMock()
query_return.get.return_value = platform
res = self.app.get(
self.apiLinks["observation_meta"],
query_string={
"type": "platform",
"id": 123,
},
)
data = self.__get_response_data(res)
query_return.get.assert_called_with("123")
self.assertDictEqual(
data,
{
"Platform Type": "platform_type",
"attr0": "attribute0",
"attr1": "attribute1",
},
)
if __name__ == "__main__":
unittest.main()
|
DFO-Ocean-Navigator/Ocean-Data-Map-Project
|
tests/test_api_v_1_0.py
|
Python
|
gpl-3.0
| 26,272
|
[
"Gaussian"
] |
6e5a895a007d6ca253768dcde9317f7a8e3cd00ba157d03ae249820dcc2f71fd
|
########################################################################
# $Id$
########################################################################
""" The TimeLeft utility allows to calculate the amount of CPU time
left for a given batch system slot. This is essential for the 'Filling
Mode' where several VO jobs may be executed in the same allocated slot.
The prerequisites for the utility to run are:
- Plugin for extracting information from local batch system
- Scale factor for the local site.
With this information the utility can calculate in normalized units the
CPU time remaining for a given slot.
"""
__RCSID__ = "$Id$"
from DIRAC import gLogger, gConfig, S_OK, S_ERROR
from DIRAC.Core.Utilities.Subprocess import shellCall
import DIRAC
import os
class TimeLeft:
#############################################################################
def __init__( self ):
""" Standard constructor
"""
self.log = gLogger.getSubLogger( 'TimeLeft' )
# This is the ratio SpecInt published by the site over 250 (the reference used for Matching)
self.scaleFactor = gConfig.getValue( '/LocalSite/CPUScalingFactor', 0.0 )
if not self.scaleFactor:
self.log.warn( '/LocalSite/CPUScalingFactor not defined for site %s' % DIRAC.siteName() )
self.normFactor = gConfig.getValue( '/LocalSite/CPUNormalizationFactor', 0.0 )
if not self.normFactor:
self.log.warn( '/LocalSite/CPUNormalizationFactor not defined for site %s' % DIRAC.siteName() )
self.cpuMargin = gConfig.getValue( '/LocalSite/CPUMargin', 10 ) #percent
result = self.__getBatchSystemPlugin()
if result['OK']:
self.batchPlugin = result['Value']
else:
self.batchPlugin = None
self.batchError = result['Message']
def getScaledCPU( self ):
"""Returns the current CPU Time spend (according to batch system) scaled according
to /LocalSite/CPUScalingFactor
"""
#Quit if no scale factor available
if not self.scaleFactor:
return S_OK( 0.0 )
#Quit if Plugin is not available
if not self.batchPlugin:
return S_OK( 0.0 )
resourceDict = self.batchPlugin.getResourceUsage()
if 'Value' in resourceDict and resourceDict['Value']['CPU']:
return S_OK( resourceDict['Value']['CPU'] * self.scaleFactor )
return S_OK( 0.0 )
#############################################################################
def getTimeLeft( self, cpuConsumed = 0.0 ):
"""Returns the CPU Time Left for supported batch systems. The CPUConsumed
is the current raw total CPU.
"""
#Quit if no scale factor available
if not self.scaleFactor:
return S_ERROR( '/LocalSite/CPUScalingFactor not defined for site %s' % DIRAC.siteName() )
if not self.batchPlugin:
return S_ERROR( self.batchError )
resourceDict = self.batchPlugin.getResourceUsage()
if not resourceDict['OK']:
self.log.warn( 'Could not determine timeleft for batch system at site %s' % DIRAC.siteName() )
return resourceDict
resources = resourceDict['Value']
self.log.verbose( resources )
if not resources['CPULimit'] or not resources['WallClockLimit']:
return S_ERROR( 'No CPU / WallClock limits obtained' )
cpu = float( resources['CPU'] )
cpuFactor = 100 * float( resources['CPU'] ) / float( resources['CPULimit'] )
cpuRemaining = 100 - cpuFactor
cpuLimit = float( resources['CPULimit'] )
wcFactor = 100 * float( resources['WallClock'] ) / float( resources['WallClockLimit'] )
wcRemaining = 100 - wcFactor
wcLimit = float( resources['WallClockLimit'] )
self.log.verbose( 'Used CPU is %.02f, Used WallClock is %.02f.' % ( cpuFactor, wcFactor ) )
self.log.verbose( 'Remaining WallClock %.02f, Remaining CPU %.02f, margin %s' %
( wcRemaining, cpuRemaining, self.cpuMargin ) )
timeLeft = None
if wcRemaining > cpuRemaining and ( wcRemaining - cpuRemaining ) > self.cpuMargin:
self.log.verbose( 'Remaining WallClock %.02f > Remaining CPU %.02f and difference > margin %s' %
( wcRemaining, cpuRemaining, self.cpuMargin ) )
timeLeft = True
else:
if cpuRemaining > self.cpuMargin and wcRemaining > self.cpuMargin:
self.log.verbose( 'Remaining WallClock %.02f and Remaining CPU %.02f both > margin %s' %
( wcRemaining, cpuRemaining, self.cpuMargin ) )
timeLeft = True
else:
self.log.verbose( 'Remaining CPU %.02f < margin %s and WallClock %.02f < margin %s so no time left' %
( cpuRemaining, self.cpuMargin, wcRemaining, self.cpuMargin ) )
if timeLeft:
if cpu and cpuConsumed > 3600. and self.normFactor:
# If there has been more than 1 hour of consumed CPU and
# there is a Normalization set for the current CPU
# use that value to renormalize the values returned by the batch system
cpuWork = cpuConsumed * self.normFactor
timeLeft = ( cpuLimit - cpu ) * cpuWork / cpu
else:
# In some cases cpuFactor might be 0
# timeLeft = float(cpuConsumed*self.scaleFactor*cpuRemaining/cpuFactor)
# We need time left in the same units used by the Matching
timeLeft = float( cpuRemaining * cpuLimit / 100 * self.scaleFactor )
self.log.verbose( 'Remaining CPU in normalized units is: %.02f' % timeLeft )
return S_OK( timeLeft )
else:
return S_ERROR( 'No time left for slot' )
#############################################################################
def __getBatchSystemPlugin( self ):
"""Using the name of the batch system plugin, will return an instance
of the plugin class.
"""
batchSystems = {'LSF':'LSB_JOBID', 'PBS':'PBS_JOBID', 'BQS':'QSUB_REQNAME', 'SGE':'SGE_TASK_ID'} #more to be added later
name = None
for batchSystem, envVar in batchSystems.items():
if os.environ.has_key( envVar ):
name = batchSystem
break
if name == None:
self.log.warn( 'Batch system type for site %s is not currently supported' % DIRAC.siteName() )
return S_ERROR( 'Current batch system is not supported' )
self.log.debug( 'Creating plugin for %s batch system' % ( name ) )
try:
batchSystemName = "%sTimeLeft" % ( name )
batchPlugin = __import__( 'DIRAC.Core.Utilities.TimeLeft.%s' %
batchSystemName, globals(), locals(), [batchSystemName] )
except Exception, x:
msg = 'Could not import DIRAC.Core.Utilities.TimeLeft.%s' % ( batchSystemName )
self.log.warn( x )
self.log.warn( msg )
return S_ERROR( msg )
try:
batchStr = 'batchPlugin.%s()' % ( batchSystemName )
batchInstance = eval( batchStr )
except Exception, x:
msg = 'Could not instantiate %s()' % ( batchSystemName )
self.log.warn( x )
self.log.warn( msg )
return S_ERROR( msg )
return S_OK( batchInstance )
#############################################################################
def runCommand( cmd, timeout = 120 ):
"""Wrapper around shellCall to return S_OK(stdout) or S_ERROR(message)
"""
result = shellCall( timeout, cmd )
if not result['OK']:
return result
status = result['Value'][0]
stdout = result['Value'][1]
stderr = result['Value'][2]
if status:
gLogger.warn( 'Status %s while executing %s' % ( status, cmd ) )
gLogger.warn( stderr )
if stdout:
return S_ERROR( stdout )
if stderr:
return S_ERROR( stderr )
return S_ERROR( 'Status %s while executing %s' % ( status, cmd ) )
else:
return S_OK( stdout )
#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#
|
sposs/DIRAC
|
Core/Utilities/TimeLeft/TimeLeft.py
|
Python
|
gpl-3.0
| 7,741
|
[
"DIRAC"
] |
0c5e24e60eff4f041906c2113a4a6ed3ecd79333d51ae5f0161c2d56bd9ba1cc
|
# coding=utf-8
# Copyright 2021 The Dopamine Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Compact implementation of a Soft Actor-Critic agent in JAX.
Based on agent described in
"Soft Actor-Critic Algorithms and Applications"
by Tuomas Haarnoja et al.
https://arxiv.org/abs/1812.05905
"""
import functools
import math
import operator
import time
from typing import Any, Mapping, Tuple
from absl import logging
from dopamine.jax import continuous_networks
from dopamine.jax import losses
from dopamine.jax.agents.dqn import dqn_agent
# pylint: disable=unused-import
# This enables (experimental) networks for SAC from pixels.
# Note, that the full name import is required to avoid a naming
# collision with the short name import (continuous_networks) above.
import dopamine.labs.sac_from_pixels.continuous_networks
# pylint: enable=unused-import
from dopamine.replay_memory import circular_replay_buffer
import flax
from flax import linen as nn
import gin
import jax
import jax.numpy as jnp
import numpy as onp
import optax
import tensorflow as tf
try:
logging.warning(
('Setting tf to CPU only, to avoid OOM. '
'See https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html '
'for more information.'))
tf.config.set_visible_devices([], 'GPU')
except tf.errors.NotFoundError:
logging.info(
('Unable to modify visible devices. '
'If you don\'t have a GPU, this is expected.'))
gin.constant('sac_agent.IMAGE_DTYPE', onp.uint8)
gin.constant('sac_agent.STATE_DTYPE', onp.float32)
@functools.partial(jax.jit, static_argnums=(0, 1, 2))
def train(network_def: nn.Module,
optim: optax.GradientTransformation,
alpha_optim: optax.GradientTransformation,
optimizer_state: jnp.ndarray,
alpha_optimizer_state: jnp.ndarray,
network_params: flax.core.FrozenDict,
target_params: flax.core.FrozenDict,
log_alpha: jnp.ndarray,
key: jnp.ndarray,
states: jnp.ndarray,
actions: jnp.ndarray,
next_states: jnp.ndarray,
rewards: jnp.ndarray,
terminals: jnp.ndarray,
cumulative_gamma: float,
target_entropy: float,
reward_scale_factor: float) -> Mapping[str, Any]:
"""Run the training step.
Returns a list of updated values and losses.
Args:
network_def: The SAC network definition.
optim: The SAC optimizer (which also wraps the SAC parameters).
alpha_optim: The optimizer for alpha.
optimizer_state: The SAC optimizer state.
alpha_optimizer_state: The alpha optimizer state.
network_params: Parameters for SAC's online network.
target_params: The parameters for SAC's target network.
log_alpha: Parameters for alpha network.
key: An rng key to use for random action selection.
states: A batch of states.
actions: A batch of actions.
next_states: A batch of next states.
rewards: A batch of rewards.
terminals: A batch of terminals.
cumulative_gamma: The discount factor to use.
target_entropy: The target entropy for the agent.
reward_scale_factor: A factor by which to scale rewards.
Returns:
A mapping from string keys to values, including updated optimizers and
training statistics.
"""
# Get the models from all the optimizers.
frozen_params = network_params # For use in loss_fn without apply gradients
batch_size = states.shape[0]
actions = jnp.reshape(actions, (batch_size, -1)) # Flatten
def loss_fn(
params: flax.core.FrozenDict, log_alpha: flax.core.FrozenDict,
state: jnp.ndarray, action: jnp.ndarray, reward: jnp.ndarray,
next_state: jnp.ndarray, terminal: jnp.ndarray,
rng: jnp.ndarray) -> Tuple[jnp.ndarray, Mapping[str, jnp.ndarray]]:
"""Calculates the loss for one transition.
Args:
params: Parameters for the SAC network.
log_alpha: SAC's log_alpha parameter.
state: A single state vector.
action: A single action vector.
reward: A reward scalar.
next_state: A next state vector.
terminal: A terminal scalar.
rng: An RNG key to use for sampling actions.
Returns:
A tuple containing 1) the combined SAC loss and 2) a mapping containing
statistics from the loss step.
"""
rng1, rng2 = jax.random.split(rng, 2)
# J_Q(\theta) from equation (5) in paper.
q_value_1, q_value_2 = network_def.apply(
params, state, action, method=network_def.critic)
q_value_1 = jnp.squeeze(q_value_1)
q_value_2 = jnp.squeeze(q_value_2)
target_outputs = network_def.apply(target_params, next_state, rng1, True)
target_q_value_1, target_q_value_2 = target_outputs.critic
target_q_value = jnp.squeeze(
jnp.minimum(target_q_value_1, target_q_value_2))
alpha_value = jnp.exp(log_alpha)
log_prob = target_outputs.actor.log_probability
target = reward_scale_factor * reward + cumulative_gamma * (
target_q_value - alpha_value * log_prob) * (1. - terminal)
target = jax.lax.stop_gradient(target)
critic_loss_1 = losses.mse_loss(q_value_1, target)
critic_loss_2 = losses.mse_loss(q_value_2, target)
critic_loss = jnp.mean(critic_loss_1 + critic_loss_2)
# J_{\pi}(\phi) from equation (9) in paper.
mean_action, sampled_action, action_log_prob = network_def.apply(
params, state, rng2, method=network_def.actor)
# We use frozen_params so that gradients can flow back to the actor without
# being used to update the critic.
q_value_no_grad_1, q_value_no_grad_2 = network_def.apply(
frozen_params, state, sampled_action, method=network_def.critic)
no_grad_q_value = jnp.squeeze(
jnp.minimum(q_value_no_grad_1, q_value_no_grad_2))
alpha_value = jnp.exp(jax.lax.stop_gradient(log_alpha))
policy_loss = jnp.mean(alpha_value * action_log_prob - no_grad_q_value)
# J(\alpha) from equation (18) in paper.
entropy_diff = -action_log_prob - target_entropy
alpha_loss = jnp.mean(log_alpha * jax.lax.stop_gradient(entropy_diff))
# Giving a smaller weight to the critic empirically gives better results
combined_loss = 0.5 * critic_loss + 1.0 * policy_loss + 1.0 * alpha_loss
return combined_loss, {
'critic_loss': critic_loss,
'policy_loss': policy_loss,
'alpha_loss': alpha_loss,
'critic_value_1': q_value_1,
'critic_value_2': q_value_2,
'target_value_1': target_q_value_1,
'target_value_2': target_q_value_2,
'mean_action': mean_action
}
grad_fn = jax.vmap(
jax.value_and_grad(loss_fn, argnums=(0, 1), has_aux=True),
in_axes=(None, None, 0, 0, 0, 0, 0, 0))
rng = jnp.stack(jax.random.split(key, num=batch_size))
(_, aux_vars), gradients = grad_fn(network_params, log_alpha, states, actions,
rewards, next_states, terminals, rng)
# This calculates the mean gradient/aux_vars using the individual
# gradients/aux_vars from each item in the batch.
gradients = jax.tree_map(functools.partial(jnp.mean, axis=0), gradients)
aux_vars = jax.tree_map(functools.partial(jnp.mean, axis=0), aux_vars)
network_gradient, alpha_gradient = gradients
# Apply gradients to all the optimizers.
updates, optimizer_state = optim.update(network_gradient, optimizer_state,
params=network_params)
network_params = optax.apply_updates(network_params, updates)
alpha_updates, alpha_optimizer_state = alpha_optim.update(
alpha_gradient, alpha_optimizer_state, params=log_alpha)
log_alpha = optax.apply_updates(log_alpha, alpha_updates)
# Compile everything in a dict.
returns = {
'network_params': network_params,
'log_alpha': log_alpha,
'optimizer_state': optimizer_state,
'alpha_optimizer_state': alpha_optimizer_state,
'Losses/Critic': aux_vars['critic_loss'],
'Losses/Actor': aux_vars['policy_loss'],
'Losses/Alpha': aux_vars['alpha_loss'],
'Values/CriticValues1': jnp.mean(aux_vars['critic_value_1']),
'Values/CriticValues2': jnp.mean(aux_vars['critic_value_2']),
'Values/TargetValues1': jnp.mean(aux_vars['target_value_1']),
'Values/TargetValues2': jnp.mean(aux_vars['target_value_2']),
'Values/Alpha': jnp.exp(log_alpha),
}
for i, a in enumerate(aux_vars['mean_action']):
returns.update({f'Values/MeanActions{i}': a})
return returns
@functools.partial(jax.jit, static_argnums=0)
def select_action(network_def, params, state, rng, eval_mode=False):
"""Sample an action to take from the current policy network.
This obtains a mean and variance from the input policy network, and samples an
action using a Gaussian distribution.
Args:
network_def: Linen Module to use for inference.
params: Linen params (frozen dict) to use for inference.
state: input state to use for inference.
rng: Jax random number generator.
eval_mode: bool, whether in eval mode.
Returns:
rng: Jax random number generator.
action: int, the selected action.
"""
rng, rng2 = jax.random.split(rng)
greedy_a, sampled_a, _ = network_def.apply(
params, state, rng2, method=network_def.actor)
return rng, jnp.where(eval_mode, greedy_a, sampled_a)
@gin.configurable
class SACAgent(dqn_agent.JaxDQNAgent):
"""A JAX implementation of the SAC agent."""
def __init__(self,
action_shape,
action_limits,
observation_shape,
action_dtype=jnp.float32,
observation_dtype=jnp.float32,
reward_scale_factor=1.0,
stack_size=1,
network=continuous_networks.SACNetwork,
num_layers=2,
hidden_units=256,
gamma=0.99,
update_horizon=1,
min_replay_history=20000,
update_period=1,
target_update_type='soft',
target_update_period=1000,
target_smoothing_coefficient=0.005,
target_entropy=None,
eval_mode=False,
optimizer='adam',
summary_writer=None,
summary_writing_frequency=500,
allow_partial_reload=False,
seed=None):
r"""Initializes the agent and constructs the necessary components.
Args:
action_shape: int or tuple, dimensionality of the action space.
action_limits: pair of lower and higher bounds for actions.
observation_shape: tuple of ints describing the observation shape.
action_dtype: jnp.dtype, specifies the type of the actions.
observation_dtype: jnp.dtype, specifies the type of the observations.
reward_scale_factor: float, factor by which to scale rewards.
stack_size: int, number of frames to use in state stack.
network: Jax network to use for training.
num_layers: int, number of layers in the network.
hidden_units: int, number of hidden units in the network.
gamma: float, discount factor with the usual RL meaning.
update_horizon: int, horizon at which updates are performed, the 'n' in
n-step update.
min_replay_history: int, number of transitions that should be experienced
before the agent begins training its value function.
update_period: int, period between DQN updates.
target_update_type: str, if 'hard', will perform a hard update of the
target network every target_update_period training steps; if 'soft',
will use target_smoothing_coefficient to update the target network at
every training step.
target_update_period: int, frequency with which to update target network
when in 'hard' mode.
target_smoothing_coefficient: float, smoothing coefficient for target
network updates (\tau in paper) when in 'soft' mode.
target_entropy: float or None, the target entropy for training alpha. If
None, it will default to the half the negative of the number of action
dimensions.
eval_mode: bool, True for evaluation and False for training.
optimizer: str, name of optimizer to use.
summary_writer: SummaryWriter object for outputting training statistics.
summary_writing_frequency: int, frequency with which summaries will be
written. Lower values will result in slower training.
allow_partial_reload: bool, whether we allow reloading a partial agent
(for instance, only the network parameters).
seed: int, a seed for SAC's internal RNG, used for initialization and
sampling actions.
"""
assert isinstance(observation_shape, tuple)
# If we're performing hard updates, we force the smoothing coefficient to 1.
if target_update_type == 'hard':
target_smoothing_coefficient = 1.0
if isinstance(action_shape, int):
action_shape = (action_shape,)
# If target_entropy is None, set to default value.
if target_entropy is None:
action_dim = functools.reduce(operator.mul, action_shape, 1.0)
target_entropy = -0.5 * action_dim
seed = int(time.time() * 1e6) if seed is None else seed
logging.info('Creating %s agent with the following parameters:',
self.__class__.__name__)
logging.info('\t action_shape: %s', action_shape)
logging.info('\t action_dtype: %s', action_dtype)
logging.info('\t action_limits: %s', action_limits)
logging.info('\t observation_shape: %s', observation_shape)
logging.info('\t observation_dtype: %s', observation_dtype)
logging.info('\t reward_scale_factor: %f', reward_scale_factor)
logging.info('\t num_layers: %d', num_layers)
logging.info('\t hidden_units: %d', hidden_units)
logging.info('\t gamma: %f', gamma)
logging.info('\t update_horizon: %f', update_horizon)
logging.info('\t min_replay_history: %d', min_replay_history)
logging.info('\t update_period: %d', update_period)
logging.info('\t target_update_type: %s', target_update_type)
logging.info('\t target_update_period: %d', target_update_period)
logging.info('\t target_smoothing_coefficient: %f',
target_smoothing_coefficient)
logging.info('\t target_entropy: %f', target_entropy)
logging.info('\t optimizer: %s', optimizer)
logging.info('\t seed: %d', seed)
self.action_shape = action_shape
self.action_dtype = action_dtype
self.observation_shape = tuple(observation_shape)
self.observation_dtype = observation_dtype
self.reward_scale_factor = reward_scale_factor
self.stack_size = stack_size
self.action_limits = action_limits
action_limits = tuple(tuple(x.reshape(-1)) for x in action_limits)
self.network_def = network(action_shape, num_layers, hidden_units,
action_limits)
self.gamma = gamma
self.update_horizon = update_horizon
self.cumulative_gamma = math.pow(gamma, update_horizon)
self.min_replay_history = min_replay_history
self.update_period = update_period
self.target_update_type = target_update_type
self.target_update_period = target_update_period
self.target_smoothing_coefficient = target_smoothing_coefficient
self.target_entropy = target_entropy
self.eval_mode = eval_mode
self.training_steps = 0
self.summary_writer = summary_writer
self.summary_writing_frequency = summary_writing_frequency
self.allow_partial_reload = allow_partial_reload
self._rng = jax.random.PRNGKey(seed)
state_shape = self.observation_shape + (stack_size,)
self.state = onp.zeros(state_shape)
self._replay = self._build_replay_buffer()
self._optimizer_name = optimizer
self._build_networks_and_optimizer()
# Variables to be initialized by the agent once it interacts with the
# environment.
self._observation = None
self._last_observation = None
def _build_networks_and_optimizer(self):
self._rng, init_key = jax.random.split(self._rng)
# We can reuse init_key safely for the action selection key
# since it is only used for shape inference during initialization.
self.network_params = self.network_def.init(init_key, self.state, init_key)
self.network_optimizer = dqn_agent.create_optimizer(self._optimizer_name)
self.optimizer_state = self.network_optimizer.init(self.network_params)
# TODO(joshgreaves): Find a way to just copy the critic params
self.target_params = self.network_params
# \alpha network
self.log_alpha = jnp.zeros(1)
self.alpha_optimizer = dqn_agent.create_optimizer(self._optimizer_name)
self.alpha_optimizer_state = self.alpha_optimizer.init(self.log_alpha)
def _build_replay_buffer(self):
"""Creates the replay buffer used by the agent."""
return circular_replay_buffer.OutOfGraphReplayBuffer(
observation_shape=self.observation_shape,
stack_size=self.stack_size,
update_horizon=self.update_horizon,
gamma=self.gamma,
observation_dtype=self.observation_dtype,
action_shape=self.action_shape,
action_dtype=self.action_dtype)
def _maybe_sync_weights(self):
"""Syncs the target weights with the online weights."""
if (self.target_update_type == 'hard' and
self.training_steps % self.target_update_period != 0):
return
def _sync_weights(target_p, online_p):
return (self.target_smoothing_coefficient * online_p +
(1 - self.target_smoothing_coefficient) * target_p)
self.target_params = jax.tree_multimap(_sync_weights, self.target_params,
self.network_params)
def begin_episode(self, observation):
"""Returns the agent's first action for this episode.
Args:
observation: numpy array, the environment's initial observation.
Returns:
np.ndarray, the selected action.
"""
self._reset_state()
self._record_observation(observation)
if not self.eval_mode:
self._train_step()
if self._replay.add_count > self.min_replay_history:
self._rng, self.action = select_action(self.network_def,
self.network_params, self.state,
self._rng, self.eval_mode)
else:
self._rng, action_rng = jax.random.split(self._rng)
self.action = jax.random.uniform(action_rng, self.action_shape,
self.action_dtype, self.action_limits[0],
self.action_limits[1])
self.action = onp.asarray(self.action)
return self.action
def step(self, reward, observation):
"""Records the most recent transition and returns the agent's next action.
We store the observation of the last time step since we want to store it
with the reward.
Args:
reward: float, the reward received from the agent's most recent action.
observation: numpy array, the most recent observation.
Returns:
int, the selected action.
"""
self._last_observation = self._observation
self._record_observation(observation)
if not self.eval_mode:
self._store_transition(self._last_observation, self.action, reward, False)
self._train_step()
if self._replay.add_count > self.min_replay_history:
self._rng, self.action = select_action(self.network_def,
self.network_params, self.state,
self._rng, self.eval_mode)
else:
self._rng, action_rng = jax.random.split(self._rng)
self.action = jax.random.uniform(action_rng, self.action_shape,
self.action_dtype, self.action_limits[0],
self.action_limits[1])
self.action = onp.asarray(self.action)
return self.action
def _train_step(self):
"""Runs a single training step.
Runs training if both:
(1) A minimum number of frames have been added to the replay buffer.
(2) `training_steps` is a multiple of `update_period`.
Also, syncs weights from online_network to target_network if training steps
is a multiple of target update period.
"""
if self._replay.add_count > self.min_replay_history:
if self.training_steps % self.update_period == 0:
self._sample_from_replay_buffer()
self._rng, key = jax.random.split(self._rng)
train_returns = train(
self.network_def, self.network_optimizer, self.alpha_optimizer,
self.optimizer_state, self.alpha_optimizer_state,
self.network_params, self.target_params, self.log_alpha,
key, self.replay_elements['state'],
self.replay_elements['action'], self.replay_elements['next_state'],
self.replay_elements['reward'], self.replay_elements['terminal'],
self.cumulative_gamma, self.target_entropy,
self.reward_scale_factor)
self.network_params = train_returns['network_params']
self.optimizer_state = train_returns['optimizer_state']
self.log_alpha = train_returns['log_alpha']
self.alpha_optimizer_state = train_returns['alpha_optimizer_state']
if (self.summary_writer is not None and
self.training_steps > 0 and
self.training_steps % self.summary_writing_frequency == 0):
for k in train_returns:
if k.startswith('Losses') or k.startswith('Values'):
self.summary_writer.scalar(k, train_returns[k],
self.training_steps)
self.summary_writer.flush()
self._maybe_sync_weights()
self.training_steps += 1
def bundle_and_checkpoint(self, checkpoint_dir, iteration_number):
"""Returns a self-contained bundle of the agent's state.
This is used for checkpointing. It will return a dictionary containing all
non-TensorFlow objects (to be saved into a file by the caller), and it saves
all TensorFlow objects into a checkpoint file.
Args:
checkpoint_dir: str, directory where TensorFlow objects will be saved.
iteration_number: int, iteration number to use for naming the checkpoint
file.
Returns:
A dict containing additional Python objects to be checkpointed by the
experiment. If the checkpoint directory does not exist, returns None.
"""
if not tf.io.gfile.exists(checkpoint_dir):
return None
# Checkpoint the out-of-graph replay buffer.
self._replay.save(checkpoint_dir, iteration_number)
bundle_dictionary = {
'state': self.state,
'training_steps': self.training_steps,
'network_params': self.network_params,
'optimizer_state': self.optimizer_state,
'target_params': self.target_params,
'log_alpha': self.log_alpha,
'alpha_optimizer_state': self.alpha_optimizer_state,
}
return bundle_dictionary
def unbundle(self, checkpoint_dir, iteration_number, bundle_dictionary):
"""Restores the agent from a checkpoint.
Restores the agent's Python objects to those specified in bundle_dictionary,
and restores the TensorFlow objects to those specified in the
checkpoint_dir. If the checkpoint_dir does not exist, will not reset the
agent's state.
Args:
checkpoint_dir: str, path to the checkpoint saved.
iteration_number: int, checkpoint version, used when restoring the replay
buffer.
bundle_dictionary: dict, containing additional Python objects owned by the
agent.
Returns:
bool, True if unbundling was successful.
"""
try:
# self._replay.load() will throw a NotFoundError if it does not find all
# the necessary files.
self._replay.load(checkpoint_dir, iteration_number)
except tf.errors.NotFoundError:
if not self.allow_partial_reload:
# If we don't allow partial reloads, we will return False.
return False
logging.warning('Unable to reload replay buffer!')
if bundle_dictionary is not None:
self.state = bundle_dictionary['state']
self.training_steps = bundle_dictionary['training_steps']
self.network_params = bundle_dictionary['network_params']
self.network_optimizer = dqn_agent.create_optimizer(self._optimizer_name)
self.optimizer_state = bundle_dictionary['optimizer_state']
self.target_params = bundle_dictionary['target_params']
self.log_alpha = bundle_dictionary['log_alpha']
self.alpha_optimizer = dqn_agent.create_optimizer(self._optimizer_name)
self.alpha_optimizer_state = bundle_dictionary['alpha_optimizer_state']
elif not self.allow_partial_reload:
return False
else:
logging.warning("Unable to reload the agent's parameters!")
return True
|
google/dopamine
|
dopamine/jax/agents/sac/sac_agent.py
|
Python
|
apache-2.0
| 25,381
|
[
"Gaussian"
] |
a85b75fb07a6daa5a5febb89639c6aaceca69c916c83e66726a4269add359d9b
|
#!/usr/bin/python
import time
import os
import sys
import argparse
#TODO: Rename this script, it's horrible!
# Copyright(C) 2014 David Ream
# Released under Biopython license. http://www.biopython.org/DIST/LICENSE
# Do not remove this comment
#########################################################################################################################################
# I am putting some globals here, they are command line arguments for some of the scripts that we are using. They are not #
# important enough, at least at this time, to justify making them command line arguments for them. This can be revised #
# later, or changed by someone who cares too much about these trivial things. after we get everything running to our satisfaction. #
# Most likely all/almost all will be removed because it may tempt someone to ruin what already seems to be working well. #
#########################################################################################################################################
# removed from regulondb_dl_parse.py as a command line param for this master script
regulon_url = 'http://regulondb.ccg.unam.mx/menu/download/datasets/files/OperonSet.txt'
regulon_outfolder = './regulonDB/'
# the followoing two require additional code to work, fix later
regulon_download = 'True'
regulon_experimental_only = 'True'
# removed from format_db.py as a command line param for this master script
format_protein = 'True'
BLAST_database_folder = './db/'
# removed from make_operon_query.py as a command line param for this script
refrence_organism = 'NC_000913'
operon_query_outfile = './operon_query.fa'
# removed from blast_script.py as a command line param for this script
blast_outfolder = './blast_result/'
# This exists to make the main function easier to read. It contains code to run the argument parser, and does nothing else.
def parser_code():
parser = argparse.ArgumentParser(description='The purpose of this script is to run the full software suite that we have developed to study operons using as few inputs as possible. This will facilitate the ease of use as much as possible.')
parser.add_argument("-i", "--infile", dest="infile", metavar="FILE", default='./regulonDB/operon_names_and_genes.txt',
help="Input file for the operon query step of the pipeline.")
parser.add_argument("-I", "--infolder", dest="infolder", metavar="DIRECTORY", default='./genomes/',
help="Folder containing all genbank files for use by the program.")
parser.add_argument("-o", "--outfolder", dest="outfolder", metavar="DIRECTORY", default='./regulonDB/',
help="Folder where results will be stored.")
parser.add_argument("-f", "--filter", dest="filter", metavar="FILE", default='./phylo_order.txt',
help="File restrictiong which accession numbers this script will process. If no file is provided, filtering is not performed.")
parser.add_argument("-n", "--num_proc", dest="num_proc", metavar="INT", default = os.sysconf("SC_NPROCESSORS_CONF"), type=int,
help="Number of processors that you want this script to run on. The default is every CPU that the system has.")
parser.add_argument("-m", "--min_genes", dest="min_genes", metavar="INT", default = 5, type=int,
help="Minum number of genes that an operon must contain before it can be considered for further analysis. The default is 5 because that is what we are currently using in the study.")
parser.add_argument("-g", "--max_gap", dest="max_gap", metavar="INT", default = 500, type=int,
help="Size in nucleotides of the maximum gap allowed between genes to be considered neighboring. The default is 500.")
parser.add_argument("-e", "--eval", dest="eval", default='1e-10', metavar="FLOAT", type=float,
help="eval for the BLAST search.")
return parser.parse_args()
def check_options(parsed_args):
if os.path.isdir(parsed_args.infolder):
infolder = parsed_args.infolder
else:
print "The folder %s does not exist." % parsed_args.infolder
sys.exit()
# if the directory that the user specifies does not exist, then the program makes it for them.
if not os.path.isdir(parsed_args.outfolder):
os.makedirs(parsed_args.outfolder)
if parsed_args.outfolder[-1] != '/':
outfolder = parsed_args.outfolder + '/'
else:
outfolder = parsed_args.outfolder
if parsed_args.filter == 'NONE' or os.path.exists(parsed_args.filter):
filter_file = parsed_args.filter
else:
print "The file %s does not exist." % parsed_args.filter
sys.exit()
# section of code that deals determining the number of CPU cores that will be used by the program
if parsed_args.num_proc > os.sysconf("SC_NPROCESSORS_CONF"):
num_proc = os.sysconf("SC_NPROCESSORS_CONF")
elif parsed_args.num_proc < 1:
num_proc = 1
else:
num_proc = int(parsed_args.num_proc)
if parsed_args.min_genes <= 0:
min_genes = 1
else:
min_genes = parsed_args.min_genes
# validate the input for the maximum allowed gap
try:
max_gap = int(parsed_args.max_gap)
if max_gap <= 0:
print "The gap that you entered %s is a negative number, please enter a positive integer." % parsed_args.max_gap
sys.exit()
else:
pass
except:
print "The gap that you entered %s is not an integer, please enter a positive integer." % parsed_args.max_gap
sys.exit()
#e_val = float(parsed_args.eval)
e_val = parsed_args.eval
return infolder, outfolder, filter_file, num_proc, min_genes, max_gap, e_val
def main():
start = time.time()
parsed_args = parser_code()
infolder, outfolder, filter_file, num_proc, min_genes, max_gap, e_val = check_options(parsed_args)
#print infolder, outfolder, filter_file, num_proc, regulon_download, regulon_url, regulon_experimental_only, min_genes
# Stage 1: Get operon set and parse into something that we can use
cmd1 = "./regulondb_dl_parse.py -f %s -i %s -o %s -n %i -u %s -m %i" % (filter_file, infolder, regulon_outfolder, num_proc, regulon_url, min_genes)
# print "cmd1", cmd1
os.system(cmd1)
#Stage 2: Create BLAST searchable databases. (I am limiting this to protein databases right now since that is what we do in the paper)
cmd2 = "./format_db.py -f %s -i %s -o %s -n %i" % (filter_file, infolder, BLAST_database_folder, num_proc)
# Set the database formatting option[Protein or DNA], even though we don't use it
if format_protein == 'True':
pass
else:
cmd2 = cmd2 + ' -d'
#print cmd2
os.system(cmd2)
#Stage 3: make the operon query fasta file(s)
operon_file = regulon_outfolder + 'operon_names_and_genes.txt'
cmd3 = "./make_operon_query.py -i %s -o %s -p %s -n %i -r %s" % (infolder, operon_query_outfile, operon_file, num_proc, refrence_organism)
#print cmd3
os.system(cmd3)
#Stage 4: run BLAST with the query that we made in stage 3, using the databases that we used in stage 2.
# TODO: add eval filtering here, going with default since i'm low on time. i will fix in the nex few days
cmd4 = "./blast_script.py -d %s -o %s -f %s -n %i -q %s -e %f" % (BLAST_database_folder, blast_outfolder, filter_file, num_proc, operon_query_outfile, e_val)
print cmd4
os.system(cmd4)
# Stage 5: Parse the BLAST result and sort it by gene block
# i'm just trying to get this out the door, everything works how it should, but i am saving time to get this out the door. the final
# version will implement some ability to control this program's behavior.
cmd5 = "./blast_parse.py -f %s -n %i" % (filter_file, num_proc)
#print cmd5
os.system(cmd5)
# Stage 6: filter out spurious results and report the gene blocks that best represent the origional.
cmd6 = "./filter_operon_blast_results.py -n %i -g %i" % (num_proc, max_gap)
print cmd6
os.system(cmd6)
print time.time() - start
if __name__ == '__main__':
main()
|
schaefce/gene_block_evolution
|
main.py
|
Python
|
gpl-3.0
| 8,377
|
[
"BLAST",
"Biopython"
] |
ec72de5ea41f1fa331691bc8af6566948a5fbc9fdbe35653f74054b84cc11b7f
|
# Author: Varun Hiremath <varun@debian.org>
# Enthought library imports.
from traits.api import Instance, Enum
from traitsui.api import View, Group, Item
from tvtk.api import tvtk
# Local imports
from mayavi.filters.filter_base import FilterBase
from mayavi.core.pipeline_info import PipelineInfo
######################################################################
# `ExtractVectorComponents` class.
######################################################################
class ExtractVectorComponents(FilterBase):
""" This wraps the TVTK ExtractVectorComponents filter and allows
one to select any of the three components of an input vector data
attribute."""
# The version of this class. Used for persistence.
__version__ = 0
# The actual TVTK filter that this class manages.
filter = Instance(tvtk.ExtractVectorComponents, args=(), allow_none=False)
# The Vector Component to be extracted
component = Enum('x-component', 'y-component', 'z-component',
desc='component of the vector to be extracted')
input_info = PipelineInfo(datasets=['any'],
attribute_types=['any'],
attributes=['vectors'])
output_info = PipelineInfo(datasets=['any'],
attribute_types=['any'],
attributes=['any'])
view = View(Group(Item(name='component')),
resizable=True
)
######################################################################
# `Filter` interface.
######################################################################
def update_pipeline(self):
# Do nothing if there is no input.
inputs = self.inputs
if len(inputs) == 0:
return
fil = self.filter
self.configure_connection(fil, inputs[0])
fil.update()
self._component_changed(self.component)
######################################################################
# Non-public interface.
######################################################################
def _component_changed(self, value):
# Obtain output from the TVTK ExtractVectorComponents filter
# corresponding to the selected vector component
if len(self.inputs) == 0:
return
if value == 'x-component':
self._set_outputs([self.filter.vx_component])
elif value == 'y-component':
self._set_outputs([self.filter.vy_component])
elif value == 'z-component':
self._set_outputs([self.filter.vz_component])
self.render()
|
dmsurti/mayavi
|
mayavi/filters/extract_vector_components.py
|
Python
|
bsd-3-clause
| 2,661
|
[
"Mayavi"
] |
717e77f296b2d8e15d527b8991f399afbd4772ea76571f7b5015fb5ce38a148e
|
#!/usr/bin/env python
"""
Easy Install
------------
A tool for doing automatic download/extract/build of distutils-based Python
packages. For detailed documentation, see the accompanying EasyInstall.txt
file, or visit the `EasyInstall home page`__.
__ https://pythonhosted.org/setuptools/easy_install.html
"""
from glob import glob
from distutils.util import get_platform
from distutils.util import convert_path, subst_vars
from distutils.errors import DistutilsArgError, DistutilsOptionError, \
DistutilsError, DistutilsPlatformError
from distutils.command.install import INSTALL_SCHEMES, SCHEME_KEYS
from distutils import log, dir_util
from distutils.command.build_scripts import first_line_re
import sys
import os
import zipimport
import shutil
import tempfile
import zipfile
import re
import stat
import random
import platform
import textwrap
import warnings
import site
import struct
import contextlib
from setuptools import Command
from setuptools.sandbox import run_setup
from setuptools.py31compat import get_path, get_config_vars
from setuptools.command import setopt
from setuptools.archive_util import unpack_archive
from setuptools.package_index import PackageIndex
from setuptools.package_index import URL_SCHEME
from setuptools.command import bdist_egg, egg_info
from setuptools.compat import (iteritems, maxsize, basestring, unicode,
reraise, PY2, PY3)
from pkg_resources import (
yield_lines, normalize_path, resource_string, ensure_directory,
get_distribution, find_distributions, Environment, Requirement,
Distribution, PathMetadata, EggMetadata, WorkingSet, DistributionNotFound,
VersionConflict, DEVELOP_DIST,
)
import pkg_resources
# Turn on PEP440Warnings
warnings.filterwarnings("default", category=pkg_resources.PEP440Warning)
sys_executable = os.environ.get('__PYVENV_LAUNCHER__',
os.path.normpath(sys.executable))
__all__ = [
'samefile', 'easy_install', 'PthDistributions', 'extract_wininst_cfg',
'main', 'get_exe_prefixes',
]
def is_64bit():
return struct.calcsize("P") == 8
def samefile(p1, p2):
both_exist = os.path.exists(p1) and os.path.exists(p2)
use_samefile = hasattr(os.path, 'samefile') and both_exist
if use_samefile:
return os.path.samefile(p1, p2)
norm_p1 = os.path.normpath(os.path.normcase(p1))
norm_p2 = os.path.normpath(os.path.normcase(p2))
return norm_p1 == norm_p2
if PY2:
def _to_ascii(s):
return s
def isascii(s):
try:
unicode(s, 'ascii')
return True
except UnicodeError:
return False
else:
def _to_ascii(s):
return s.encode('ascii')
def isascii(s):
try:
s.encode('ascii')
return True
except UnicodeError:
return False
class easy_install(Command):
"""Manage a download/build/install process"""
description = "Find/get/install Python packages"
command_consumes_arguments = True
user_options = [
('prefix=', None, "installation prefix"),
("zip-ok", "z", "install package as a zipfile"),
("multi-version", "m", "make apps have to require() a version"),
("upgrade", "U", "force upgrade (searches PyPI for latest versions)"),
("install-dir=", "d", "install package to DIR"),
("script-dir=", "s", "install scripts to DIR"),
("exclude-scripts", "x", "Don't install scripts"),
("always-copy", "a", "Copy all needed packages to install dir"),
("index-url=", "i", "base URL of Python Package Index"),
("find-links=", "f", "additional URL(s) to search for packages"),
("build-directory=", "b",
"download/extract/build in DIR; keep the results"),
('optimize=', 'O',
"also compile with optimization: -O1 for \"python -O\", "
"-O2 for \"python -OO\", and -O0 to disable [default: -O0]"),
('record=', None,
"filename in which to record list of installed files"),
('always-unzip', 'Z', "don't install as a zipfile, no matter what"),
('site-dirs=', 'S', "list of directories where .pth files work"),
('editable', 'e', "Install specified packages in editable form"),
('no-deps', 'N', "don't install dependencies"),
('allow-hosts=', 'H', "pattern(s) that hostnames must match"),
('local-snapshots-ok', 'l',
"allow building eggs from local checkouts"),
('version', None, "print version information and exit"),
('no-find-links', None,
"Don't load find-links defined in packages being installed")
]
boolean_options = [
'zip-ok', 'multi-version', 'exclude-scripts', 'upgrade', 'always-copy',
'editable',
'no-deps', 'local-snapshots-ok', 'version'
]
if site.ENABLE_USER_SITE:
help_msg = "install in user site-package '%s'" % site.USER_SITE
user_options.append(('user', None, help_msg))
boolean_options.append('user')
negative_opt = {'always-unzip': 'zip-ok'}
create_index = PackageIndex
def initialize_options(self):
if site.ENABLE_USER_SITE:
whereami = os.path.abspath(__file__)
self.user = whereami.startswith(site.USER_SITE)
else:
self.user = 0
self.zip_ok = self.local_snapshots_ok = None
self.install_dir = self.script_dir = self.exclude_scripts = None
self.index_url = None
self.find_links = None
self.build_directory = None
self.args = None
self.optimize = self.record = None
self.upgrade = self.always_copy = self.multi_version = None
self.editable = self.no_deps = self.allow_hosts = None
self.root = self.prefix = self.no_report = None
self.version = None
self.install_purelib = None # for pure module distributions
self.install_platlib = None # non-pure (dists w/ extensions)
self.install_headers = None # for C/C++ headers
self.install_lib = None # set to either purelib or platlib
self.install_scripts = None
self.install_data = None
self.install_base = None
self.install_platbase = None
if site.ENABLE_USER_SITE:
self.install_userbase = site.USER_BASE
self.install_usersite = site.USER_SITE
else:
self.install_userbase = None
self.install_usersite = None
self.no_find_links = None
# Options not specifiable via command line
self.package_index = None
self.pth_file = self.always_copy_from = None
self.site_dirs = None
self.installed_projects = {}
self.sitepy_installed = False
# Always read easy_install options, even if we are subclassed, or have
# an independent instance created. This ensures that defaults will
# always come from the standard configuration file(s)' "easy_install"
# section, even if this is a "develop" or "install" command, or some
# other embedding.
self._dry_run = None
self.verbose = self.distribution.verbose
self.distribution._set_command_options(
self, self.distribution.get_option_dict('easy_install')
)
def delete_blockers(self, blockers):
for filename in blockers:
if os.path.exists(filename) or os.path.islink(filename):
log.info("Deleting %s", filename)
if not self.dry_run:
if (os.path.isdir(filename) and
not os.path.islink(filename)):
rmtree(filename)
else:
os.unlink(filename)
def finalize_options(self):
if self.version:
print('setuptools %s' % get_distribution('setuptools').version)
sys.exit()
py_version = sys.version.split()[0]
prefix, exec_prefix = get_config_vars('prefix', 'exec_prefix')
self.config_vars = {
'dist_name': self.distribution.get_name(),
'dist_version': self.distribution.get_version(),
'dist_fullname': self.distribution.get_fullname(),
'py_version': py_version,
'py_version_short': py_version[0:3],
'py_version_nodot': py_version[0] + py_version[2],
'sys_prefix': prefix,
'prefix': prefix,
'sys_exec_prefix': exec_prefix,
'exec_prefix': exec_prefix,
# Only python 3.2+ has abiflags
'abiflags': getattr(sys, 'abiflags', ''),
}
if site.ENABLE_USER_SITE:
self.config_vars['userbase'] = self.install_userbase
self.config_vars['usersite'] = self.install_usersite
# fix the install_dir if "--user" was used
# XXX: duplicate of the code in the setup command
if self.user and site.ENABLE_USER_SITE:
self.create_home_path()
if self.install_userbase is None:
raise DistutilsPlatformError(
"User base directory is not specified")
self.install_base = self.install_platbase = self.install_userbase
if os.name == 'posix':
self.select_scheme("unix_user")
else:
self.select_scheme(os.name + "_user")
self.expand_basedirs()
self.expand_dirs()
self._expand('install_dir', 'script_dir', 'build_directory',
'site_dirs')
# If a non-default installation directory was specified, default the
# script directory to match it.
if self.script_dir is None:
self.script_dir = self.install_dir
if self.no_find_links is None:
self.no_find_links = False
# Let install_dir get set by install_lib command, which in turn
# gets its info from the install command, and takes into account
# --prefix and --home and all that other crud.
self.set_undefined_options(
'install_lib', ('install_dir', 'install_dir')
)
# Likewise, set default script_dir from 'install_scripts.install_dir'
self.set_undefined_options(
'install_scripts', ('install_dir', 'script_dir')
)
if self.user and self.install_purelib:
self.install_dir = self.install_purelib
self.script_dir = self.install_scripts
# default --record from the install command
self.set_undefined_options('install', ('record', 'record'))
# Should this be moved to the if statement below? It's not used
# elsewhere
normpath = map(normalize_path, sys.path)
self.all_site_dirs = get_site_dirs()
if self.site_dirs is not None:
site_dirs = [
os.path.expanduser(s.strip()) for s in
self.site_dirs.split(',')
]
for d in site_dirs:
if not os.path.isdir(d):
log.warn("%s (in --site-dirs) does not exist", d)
elif normalize_path(d) not in normpath:
raise DistutilsOptionError(
d + " (in --site-dirs) is not on sys.path"
)
else:
self.all_site_dirs.append(normalize_path(d))
if not self.editable:
self.check_site_dir()
self.index_url = self.index_url or "https://pypi.python.org/simple"
self.shadow_path = self.all_site_dirs[:]
for path_item in self.install_dir, normalize_path(self.script_dir):
if path_item not in self.shadow_path:
self.shadow_path.insert(0, path_item)
if self.allow_hosts is not None:
hosts = [s.strip() for s in self.allow_hosts.split(',')]
else:
hosts = ['*']
if self.package_index is None:
self.package_index = self.create_index(
self.index_url, search_path=self.shadow_path, hosts=hosts,
)
self.local_index = Environment(self.shadow_path + sys.path)
if self.find_links is not None:
if isinstance(self.find_links, basestring):
self.find_links = self.find_links.split()
else:
self.find_links = []
if self.local_snapshots_ok:
self.package_index.scan_egg_links(self.shadow_path + sys.path)
if not self.no_find_links:
self.package_index.add_find_links(self.find_links)
self.set_undefined_options('install_lib', ('optimize', 'optimize'))
if not isinstance(self.optimize, int):
try:
self.optimize = int(self.optimize)
if not (0 <= self.optimize <= 2):
raise ValueError
except ValueError:
raise DistutilsOptionError("--optimize must be 0, 1, or 2")
if self.editable and not self.build_directory:
raise DistutilsArgError(
"Must specify a build directory (-b) when using --editable"
)
if not self.args:
raise DistutilsArgError(
"No urls, filenames, or requirements specified (see --help)")
self.outputs = []
def _expand_attrs(self, attrs):
for attr in attrs:
val = getattr(self, attr)
if val is not None:
if os.name == 'posix' or os.name == 'nt':
val = os.path.expanduser(val)
val = subst_vars(val, self.config_vars)
setattr(self, attr, val)
def expand_basedirs(self):
"""Calls `os.path.expanduser` on install_base, install_platbase and
root."""
self._expand_attrs(['install_base', 'install_platbase', 'root'])
def expand_dirs(self):
"""Calls `os.path.expanduser` on install dirs."""
self._expand_attrs(['install_purelib', 'install_platlib',
'install_lib', 'install_headers',
'install_scripts', 'install_data', ])
def run(self):
if self.verbose != self.distribution.verbose:
log.set_verbosity(self.verbose)
try:
for spec in self.args:
self.easy_install(spec, not self.no_deps)
if self.record:
outputs = self.outputs
if self.root: # strip any package prefix
root_len = len(self.root)
for counter in range(len(outputs)):
outputs[counter] = outputs[counter][root_len:]
from distutils import file_util
self.execute(
file_util.write_file, (self.record, outputs),
"writing list of installed files to '%s'" %
self.record
)
self.warn_deprecated_options()
finally:
log.set_verbosity(self.distribution.verbose)
def pseudo_tempname(self):
"""Return a pseudo-tempname base in the install directory.
This code is intentionally naive; if a malicious party can write to
the target directory you're already in deep doodoo.
"""
try:
pid = os.getpid()
except:
pid = random.randint(0, maxsize)
return os.path.join(self.install_dir, "test-easy-install-%s" % pid)
def warn_deprecated_options(self):
pass
def check_site_dir(self):
"""Verify that self.install_dir is .pth-capable dir, if needed"""
instdir = normalize_path(self.install_dir)
pth_file = os.path.join(instdir, 'easy-install.pth')
# Is it a configured, PYTHONPATH, implicit, or explicit site dir?
is_site_dir = instdir in self.all_site_dirs
if not is_site_dir and not self.multi_version:
# No? Then directly test whether it does .pth file processing
is_site_dir = self.check_pth_processing()
else:
# make sure we can write to target dir
testfile = self.pseudo_tempname() + '.write-test'
test_exists = os.path.exists(testfile)
try:
if test_exists:
os.unlink(testfile)
open(testfile, 'w').close()
os.unlink(testfile)
except (OSError, IOError):
self.cant_write_to_target()
if not is_site_dir and not self.multi_version:
# Can't install non-multi to non-site dir
raise DistutilsError(self.no_default_version_msg())
if is_site_dir:
if self.pth_file is None:
self.pth_file = PthDistributions(pth_file, self.all_site_dirs)
else:
self.pth_file = None
PYTHONPATH = os.environ.get('PYTHONPATH', '').split(os.pathsep)
if instdir not in map(normalize_path, [_f for _f in PYTHONPATH if _f]):
# only PYTHONPATH dirs need a site.py, so pretend it's there
self.sitepy_installed = True
elif self.multi_version and not os.path.exists(pth_file):
self.sitepy_installed = True # don't need site.py in this case
self.pth_file = None # and don't create a .pth file
self.install_dir = instdir
def cant_write_to_target(self):
template = """can't create or remove files in install directory
The following error occurred while trying to add or remove files in the
installation directory:
%s
The installation directory you specified (via --install-dir, --prefix, or
the distutils default setting) was:
%s
"""
msg = template % (sys.exc_info()[1], self.install_dir,)
if not os.path.exists(self.install_dir):
msg += """
This directory does not currently exist. Please create it and try again, or
choose a different installation directory (using the -d or --install-dir
option).
"""
else:
msg += """
Perhaps your account does not have write access to this directory? If the
installation directory is a system-owned directory, you may need to sign in
as the administrator or "root" account. If you do not have administrative
access to this machine, you may wish to choose a different installation
directory, preferably one that is listed in your PYTHONPATH environment
variable.
For information on other options, you may wish to consult the
documentation at:
https://pythonhosted.org/setuptools/easy_install.html
Please make the appropriate changes for your system and try again.
"""
raise DistutilsError(msg)
def check_pth_processing(self):
"""Empirically verify whether .pth files are supported in inst. dir"""
instdir = self.install_dir
log.info("Checking .pth file support in %s", instdir)
pth_file = self.pseudo_tempname() + ".pth"
ok_file = pth_file + '.ok'
ok_exists = os.path.exists(ok_file)
try:
if ok_exists:
os.unlink(ok_file)
dirname = os.path.dirname(ok_file)
if not os.path.exists(dirname):
os.makedirs(dirname)
f = open(pth_file, 'w')
except (OSError, IOError):
self.cant_write_to_target()
else:
try:
f.write("import os; f = open(%r, 'w'); f.write('OK'); "
"f.close()\n" % (ok_file,))
f.close()
f = None
executable = sys.executable
if os.name == 'nt':
dirname, basename = os.path.split(executable)
alt = os.path.join(dirname, 'pythonw.exe')
if (basename.lower() == 'python.exe' and
os.path.exists(alt)):
# use pythonw.exe to avoid opening a console window
executable = alt
from distutils.spawn import spawn
spawn([executable, '-E', '-c', 'pass'], 0)
if os.path.exists(ok_file):
log.info(
"TEST PASSED: %s appears to support .pth files",
instdir
)
return True
finally:
if f:
f.close()
if os.path.exists(ok_file):
os.unlink(ok_file)
if os.path.exists(pth_file):
os.unlink(pth_file)
if not self.multi_version:
log.warn("TEST FAILED: %s does NOT support .pth files", instdir)
return False
def install_egg_scripts(self, dist):
"""Write all the scripts for `dist`, unless scripts are excluded"""
if not self.exclude_scripts and dist.metadata_isdir('scripts'):
for script_name in dist.metadata_listdir('scripts'):
if dist.metadata_isdir('scripts/' + script_name):
# The "script" is a directory, likely a Python 3
# __pycache__ directory, so skip it.
continue
self.install_script(
dist, script_name,
dist.get_metadata('scripts/' + script_name)
)
self.install_wrapper_scripts(dist)
def add_output(self, path):
if os.path.isdir(path):
for base, dirs, files in os.walk(path):
for filename in files:
self.outputs.append(os.path.join(base, filename))
else:
self.outputs.append(path)
def not_editable(self, spec):
if self.editable:
raise DistutilsArgError(
"Invalid argument %r: you can't use filenames or URLs "
"with --editable (except via the --find-links option)."
% (spec,)
)
def check_editable(self, spec):
if not self.editable:
return
if os.path.exists(os.path.join(self.build_directory, spec.key)):
raise DistutilsArgError(
"%r already exists in %s; can't do a checkout there" %
(spec.key, self.build_directory)
)
def easy_install(self, spec, deps=False):
tmpdir = tempfile.mkdtemp(prefix="easy_install-")
download = None
if not self.editable:
self.install_site_py()
try:
if not isinstance(spec, Requirement):
if URL_SCHEME(spec):
# It's a url, download it to tmpdir and process
self.not_editable(spec)
download = self.package_index.download(spec, tmpdir)
return self.install_item(None, download, tmpdir, deps,
True)
elif os.path.exists(spec):
# Existing file or directory, just process it directly
self.not_editable(spec)
return self.install_item(None, spec, tmpdir, deps, True)
else:
spec = parse_requirement_arg(spec)
self.check_editable(spec)
dist = self.package_index.fetch_distribution(
spec, tmpdir, self.upgrade, self.editable,
not self.always_copy, self.local_index
)
if dist is None:
msg = "Could not find suitable distribution for %r" % spec
if self.always_copy:
msg += " (--always-copy skips system and development eggs)"
raise DistutilsError(msg)
elif dist.precedence == DEVELOP_DIST:
# .egg-info dists don't need installing, just process deps
self.process_distribution(spec, dist, deps, "Using")
return dist
else:
return self.install_item(spec, dist.location, tmpdir, deps)
finally:
if os.path.exists(tmpdir):
rmtree(tmpdir)
def install_item(self, spec, download, tmpdir, deps, install_needed=False):
# Installation is also needed if file in tmpdir or is not an egg
install_needed = install_needed or self.always_copy
install_needed = install_needed or os.path.dirname(download) == tmpdir
install_needed = install_needed or not download.endswith('.egg')
install_needed = install_needed or (
self.always_copy_from is not None and
os.path.dirname(normalize_path(download)) ==
normalize_path(self.always_copy_from)
)
if spec and not install_needed:
# at this point, we know it's a local .egg, we just don't know if
# it's already installed.
for dist in self.local_index[spec.project_name]:
if dist.location == download:
break
else:
install_needed = True # it's not in the local index
log.info("Processing %s", os.path.basename(download))
if install_needed:
dists = self.install_eggs(spec, download, tmpdir)
for dist in dists:
self.process_distribution(spec, dist, deps)
else:
dists = [self.egg_distribution(download)]
self.process_distribution(spec, dists[0], deps, "Using")
if spec is not None:
for dist in dists:
if dist in spec:
return dist
def select_scheme(self, name):
"""Sets the install directories by applying the install schemes."""
# it's the caller's problem if they supply a bad name!
scheme = INSTALL_SCHEMES[name]
for key in SCHEME_KEYS:
attrname = 'install_' + key
if getattr(self, attrname) is None:
setattr(self, attrname, scheme[key])
def process_distribution(self, requirement, dist, deps=True, *info):
self.update_pth(dist)
self.package_index.add(dist)
if dist in self.local_index[dist.key]:
self.local_index.remove(dist)
self.local_index.add(dist)
self.install_egg_scripts(dist)
self.installed_projects[dist.key] = dist
log.info(self.installation_report(requirement, dist, *info))
if (dist.has_metadata('dependency_links.txt') and
not self.no_find_links):
self.package_index.add_find_links(
dist.get_metadata_lines('dependency_links.txt')
)
if not deps and not self.always_copy:
return
elif requirement is not None and dist.key != requirement.key:
log.warn("Skipping dependencies for %s", dist)
return # XXX this is not the distribution we were looking for
elif requirement is None or dist not in requirement:
# if we wound up with a different version, resolve what we've got
distreq = dist.as_requirement()
requirement = requirement or distreq
requirement = Requirement(
distreq.project_name, distreq.specs, requirement.extras
)
log.info("Processing dependencies for %s", requirement)
try:
distros = WorkingSet([]).resolve(
[requirement], self.local_index, self.easy_install
)
except DistributionNotFound:
e = sys.exc_info()[1]
raise DistutilsError(
"Could not find required distribution %s" % e.args
)
except VersionConflict:
e = sys.exc_info()[1]
raise DistutilsError(
"Installed distribution %s conflicts with requirement %s"
% e.args
)
if self.always_copy or self.always_copy_from:
# Force all the relevant distros to be copied or activated
for dist in distros:
if dist.key not in self.installed_projects:
self.easy_install(dist.as_requirement())
log.info("Finished processing dependencies for %s", requirement)
def should_unzip(self, dist):
if self.zip_ok is not None:
return not self.zip_ok
if dist.has_metadata('not-zip-safe'):
return True
if not dist.has_metadata('zip-safe'):
return True
return False
def maybe_move(self, spec, dist_filename, setup_base):
dst = os.path.join(self.build_directory, spec.key)
if os.path.exists(dst):
msg = ("%r already exists in %s; build directory %s will not be "
"kept")
log.warn(msg, spec.key, self.build_directory, setup_base)
return setup_base
if os.path.isdir(dist_filename):
setup_base = dist_filename
else:
if os.path.dirname(dist_filename) == setup_base:
os.unlink(dist_filename) # get it out of the tmp dir
contents = os.listdir(setup_base)
if len(contents) == 1:
dist_filename = os.path.join(setup_base, contents[0])
if os.path.isdir(dist_filename):
# if the only thing there is a directory, move it instead
setup_base = dist_filename
ensure_directory(dst)
shutil.move(setup_base, dst)
return dst
def install_wrapper_scripts(self, dist):
if not self.exclude_scripts:
for args in get_script_args(dist):
self.write_script(*args)
def install_script(self, dist, script_name, script_text, dev_path=None):
"""Generate a legacy script wrapper and install it"""
spec = str(dist.as_requirement())
is_script = is_python_script(script_text, script_name)
if is_script:
script_text = (get_script_header(script_text) +
self._load_template(dev_path) % locals())
self.write_script(script_name, _to_ascii(script_text), 'b')
@staticmethod
def _load_template(dev_path):
"""
There are a couple of template scripts in the package. This
function loads one of them and prepares it for use.
"""
# See https://bitbucket.org/pypa/setuptools/issue/134 for info
# on script file naming and downstream issues with SVR4
name = 'script.tmpl'
if dev_path:
name = name.replace('.tmpl', ' (dev).tmpl')
raw_bytes = resource_string('setuptools', name)
return raw_bytes.decode('utf-8')
def write_script(self, script_name, contents, mode="t", blockers=()):
"""Write an executable file to the scripts directory"""
self.delete_blockers( # clean up old .py/.pyw w/o a script
[os.path.join(self.script_dir, x) for x in blockers]
)
log.info("Installing %s script to %s", script_name, self.script_dir)
target = os.path.join(self.script_dir, script_name)
self.add_output(target)
mask = current_umask()
if not self.dry_run:
ensure_directory(target)
if os.path.exists(target):
os.unlink(target)
f = open(target, "w" + mode)
f.write(contents)
f.close()
chmod(target, 0o777 - mask)
def install_eggs(self, spec, dist_filename, tmpdir):
# .egg dirs or files are already built, so just return them
if dist_filename.lower().endswith('.egg'):
return [self.install_egg(dist_filename, tmpdir)]
elif dist_filename.lower().endswith('.exe'):
return [self.install_exe(dist_filename, tmpdir)]
# Anything else, try to extract and build
setup_base = tmpdir
if os.path.isfile(dist_filename) and not dist_filename.endswith('.py'):
unpack_archive(dist_filename, tmpdir, self.unpack_progress)
elif os.path.isdir(dist_filename):
setup_base = os.path.abspath(dist_filename)
if (setup_base.startswith(tmpdir) # something we downloaded
and self.build_directory and spec is not None):
setup_base = self.maybe_move(spec, dist_filename, setup_base)
# Find the setup.py file
setup_script = os.path.join(setup_base, 'setup.py')
if not os.path.exists(setup_script):
setups = glob(os.path.join(setup_base, '*', 'setup.py'))
if not setups:
raise DistutilsError(
"Couldn't find a setup script in %s" %
os.path.abspath(dist_filename)
)
if len(setups) > 1:
raise DistutilsError(
"Multiple setup scripts in %s" %
os.path.abspath(dist_filename)
)
setup_script = setups[0]
# Now run it, and return the result
if self.editable:
log.info(self.report_editable(spec, setup_script))
return []
else:
return self.build_and_install(setup_script, setup_base)
def egg_distribution(self, egg_path):
if os.path.isdir(egg_path):
metadata = PathMetadata(egg_path, os.path.join(egg_path,
'EGG-INFO'))
else:
metadata = EggMetadata(zipimport.zipimporter(egg_path))
return Distribution.from_filename(egg_path, metadata=metadata)
def install_egg(self, egg_path, tmpdir):
destination = os.path.join(self.install_dir,
os.path.basename(egg_path))
destination = os.path.abspath(destination)
if not self.dry_run:
ensure_directory(destination)
dist = self.egg_distribution(egg_path)
if not samefile(egg_path, destination):
if os.path.isdir(destination) and not os.path.islink(destination):
dir_util.remove_tree(destination, dry_run=self.dry_run)
elif os.path.exists(destination):
self.execute(os.unlink, (destination,), "Removing " +
destination)
try:
new_dist_is_zipped = False
if os.path.isdir(egg_path):
if egg_path.startswith(tmpdir):
f, m = shutil.move, "Moving"
else:
f, m = shutil.copytree, "Copying"
elif self.should_unzip(dist):
self.mkpath(destination)
f, m = self.unpack_and_compile, "Extracting"
else:
new_dist_is_zipped = True
if egg_path.startswith(tmpdir):
f, m = shutil.move, "Moving"
else:
f, m = shutil.copy2, "Copying"
self.execute(f, (egg_path, destination),
(m + " %s to %s") %
(os.path.basename(egg_path),
os.path.dirname(destination)))
update_dist_caches(destination,
fix_zipimporter_caches=new_dist_is_zipped)
except:
update_dist_caches(destination, fix_zipimporter_caches=False)
raise
self.add_output(destination)
return self.egg_distribution(destination)
def install_exe(self, dist_filename, tmpdir):
# See if it's valid, get data
cfg = extract_wininst_cfg(dist_filename)
if cfg is None:
raise DistutilsError(
"%s is not a valid distutils Windows .exe" % dist_filename
)
# Create a dummy distribution object until we build the real distro
dist = Distribution(
None,
project_name=cfg.get('metadata', 'name'),
version=cfg.get('metadata', 'version'), platform=get_platform(),
)
# Convert the .exe to an unpacked egg
egg_path = dist.location = os.path.join(tmpdir, dist.egg_name() +
'.egg')
egg_tmp = egg_path + '.tmp'
_egg_info = os.path.join(egg_tmp, 'EGG-INFO')
pkg_inf = os.path.join(_egg_info, 'PKG-INFO')
ensure_directory(pkg_inf) # make sure EGG-INFO dir exists
dist._provider = PathMetadata(egg_tmp, _egg_info) # XXX
self.exe_to_egg(dist_filename, egg_tmp)
# Write EGG-INFO/PKG-INFO
if not os.path.exists(pkg_inf):
f = open(pkg_inf, 'w')
f.write('Metadata-Version: 1.0\n')
for k, v in cfg.items('metadata'):
if k != 'target_version':
f.write('%s: %s\n' % (k.replace('_', '-').title(), v))
f.close()
script_dir = os.path.join(_egg_info, 'scripts')
self.delete_blockers( # delete entry-point scripts to avoid duping
[os.path.join(script_dir, args[0]) for args in
get_script_args(dist)]
)
# Build .egg file from tmpdir
bdist_egg.make_zipfile(
egg_path, egg_tmp, verbose=self.verbose, dry_run=self.dry_run
)
# install the .egg
return self.install_egg(egg_path, tmpdir)
def exe_to_egg(self, dist_filename, egg_tmp):
"""Extract a bdist_wininst to the directories an egg would use"""
# Check for .pth file and set up prefix translations
prefixes = get_exe_prefixes(dist_filename)
to_compile = []
native_libs = []
top_level = {}
def process(src, dst):
s = src.lower()
for old, new in prefixes:
if s.startswith(old):
src = new + src[len(old):]
parts = src.split('/')
dst = os.path.join(egg_tmp, *parts)
dl = dst.lower()
if dl.endswith('.pyd') or dl.endswith('.dll'):
parts[-1] = bdist_egg.strip_module(parts[-1])
top_level[os.path.splitext(parts[0])[0]] = 1
native_libs.append(src)
elif dl.endswith('.py') and old != 'SCRIPTS/':
top_level[os.path.splitext(parts[0])[0]] = 1
to_compile.append(dst)
return dst
if not src.endswith('.pth'):
log.warn("WARNING: can't process %s", src)
return None
# extract, tracking .pyd/.dll->native_libs and .py -> to_compile
unpack_archive(dist_filename, egg_tmp, process)
stubs = []
for res in native_libs:
if res.lower().endswith('.pyd'): # create stubs for .pyd's
parts = res.split('/')
resource = parts[-1]
parts[-1] = bdist_egg.strip_module(parts[-1]) + '.py'
pyfile = os.path.join(egg_tmp, *parts)
to_compile.append(pyfile)
stubs.append(pyfile)
bdist_egg.write_stub(resource, pyfile)
self.byte_compile(to_compile) # compile .py's
bdist_egg.write_safety_flag(
os.path.join(egg_tmp, 'EGG-INFO'),
bdist_egg.analyze_egg(egg_tmp, stubs)) # write zip-safety flag
for name in 'top_level', 'native_libs':
if locals()[name]:
txt = os.path.join(egg_tmp, 'EGG-INFO', name + '.txt')
if not os.path.exists(txt):
f = open(txt, 'w')
f.write('\n'.join(locals()[name]) + '\n')
f.close()
def installation_report(self, req, dist, what="Installed"):
"""Helpful installation message for display to package users"""
msg = "\n%(what)s %(eggloc)s%(extras)s"
if self.multi_version and not self.no_report:
msg += """
Because this distribution was installed --multi-version, before you can
import modules from this package in an application, you will need to
'import pkg_resources' and then use a 'require()' call similar to one of
these examples, in order to select the desired version:
pkg_resources.require("%(name)s") # latest installed version
pkg_resources.require("%(name)s==%(version)s") # this exact version
pkg_resources.require("%(name)s>=%(version)s") # this version or higher
"""
if self.install_dir not in map(normalize_path, sys.path):
msg += """
Note also that the installation directory must be on sys.path at runtime for
this to work. (e.g. by being the application's script directory, by being on
PYTHONPATH, or by being added to sys.path by your code.)
"""
eggloc = dist.location
name = dist.project_name
version = dist.version
extras = '' # TODO: self.report_extras(req, dist)
return msg % locals()
def report_editable(self, spec, setup_script):
dirname = os.path.dirname(setup_script)
python = sys.executable
return """\nExtracted editable version of %(spec)s to %(dirname)s
If it uses setuptools in its setup script, you can activate it in
"development" mode by going to that directory and running::
%(python)s setup.py develop
See the setuptools documentation for the "develop" command for more info.
""" % locals()
def run_setup(self, setup_script, setup_base, args):
sys.modules.setdefault('distutils.command.bdist_egg', bdist_egg)
sys.modules.setdefault('distutils.command.egg_info', egg_info)
args = list(args)
if self.verbose > 2:
v = 'v' * (self.verbose - 1)
args.insert(0, '-' + v)
elif self.verbose < 2:
args.insert(0, '-q')
if self.dry_run:
args.insert(0, '-n')
log.info(
"Running %s %s", setup_script[len(setup_base) + 1:], ' '.join(args)
)
try:
run_setup(setup_script, args)
except SystemExit:
v = sys.exc_info()[1]
raise DistutilsError("Setup script exited with %s" % (v.args[0],))
def build_and_install(self, setup_script, setup_base):
args = ['bdist_egg', '--dist-dir']
dist_dir = tempfile.mkdtemp(
prefix='egg-dist-tmp-', dir=os.path.dirname(setup_script)
)
try:
self._set_fetcher_options(os.path.dirname(setup_script))
args.append(dist_dir)
self.run_setup(setup_script, setup_base, args)
all_eggs = Environment([dist_dir])
eggs = []
for key in all_eggs:
for dist in all_eggs[key]:
eggs.append(self.install_egg(dist.location, setup_base))
if not eggs and not self.dry_run:
log.warn("No eggs found in %s (setup script problem?)",
dist_dir)
return eggs
finally:
rmtree(dist_dir)
log.set_verbosity(self.verbose) # restore our log verbosity
def _set_fetcher_options(self, base):
"""
When easy_install is about to run bdist_egg on a source dist, that
source dist might have 'setup_requires' directives, requiring
additional fetching. Ensure the fetcher options given to easy_install
are available to that command as well.
"""
# find the fetch options from easy_install and write them out
# to the setup.cfg file.
ei_opts = self.distribution.get_option_dict('easy_install').copy()
fetch_directives = (
'find_links', 'site_dirs', 'index_url', 'optimize',
'site_dirs', 'allow_hosts',
)
fetch_options = {}
for key, val in ei_opts.items():
if key not in fetch_directives:
continue
fetch_options[key.replace('_', '-')] = val[1]
# create a settings dictionary suitable for `edit_config`
settings = dict(easy_install=fetch_options)
cfg_filename = os.path.join(base, 'setup.cfg')
setopt.edit_config(cfg_filename, settings)
def update_pth(self, dist):
if self.pth_file is None:
return
for d in self.pth_file[dist.key]: # drop old entries
if self.multi_version or d.location != dist.location:
log.info("Removing %s from easy-install.pth file", d)
self.pth_file.remove(d)
if d.location in self.shadow_path:
self.shadow_path.remove(d.location)
if not self.multi_version:
if dist.location in self.pth_file.paths:
log.info(
"%s is already the active version in easy-install.pth",
dist
)
else:
log.info("Adding %s to easy-install.pth file", dist)
self.pth_file.add(dist) # add new entry
if dist.location not in self.shadow_path:
self.shadow_path.append(dist.location)
if not self.dry_run:
self.pth_file.save()
if dist.key == 'setuptools':
# Ensure that setuptools itself never becomes unavailable!
# XXX should this check for latest version?
filename = os.path.join(self.install_dir, 'setuptools.pth')
if os.path.islink(filename):
os.unlink(filename)
f = open(filename, 'wt')
f.write(self.pth_file.make_relative(dist.location) + '\n')
f.close()
def unpack_progress(self, src, dst):
# Progress filter for unpacking
log.debug("Unpacking %s to %s", src, dst)
return dst # only unpack-and-compile skips files for dry run
def unpack_and_compile(self, egg_path, destination):
to_compile = []
to_chmod = []
def pf(src, dst):
if dst.endswith('.py') and not src.startswith('EGG-INFO/'):
to_compile.append(dst)
elif dst.endswith('.dll') or dst.endswith('.so'):
to_chmod.append(dst)
self.unpack_progress(src, dst)
return not self.dry_run and dst or None
unpack_archive(egg_path, destination, pf)
self.byte_compile(to_compile)
if not self.dry_run:
for f in to_chmod:
mode = ((os.stat(f)[stat.ST_MODE]) | 0o555) & 0o7755
chmod(f, mode)
def byte_compile(self, to_compile):
if sys.dont_write_bytecode:
self.warn('byte-compiling is disabled, skipping.')
return
from distutils.util import byte_compile
try:
# try to make the byte compile messages quieter
log.set_verbosity(self.verbose - 1)
byte_compile(to_compile, optimize=0, force=1, dry_run=self.dry_run)
if self.optimize:
byte_compile(
to_compile, optimize=self.optimize, force=1,
dry_run=self.dry_run
)
finally:
log.set_verbosity(self.verbose) # restore original verbosity
def no_default_version_msg(self):
template = """bad install directory or PYTHONPATH
You are attempting to install a package to a directory that is not
on PYTHONPATH and which Python does not read ".pth" files from. The
installation directory you specified (via --install-dir, --prefix, or
the distutils default setting) was:
%s
and your PYTHONPATH environment variable currently contains:
%r
Here are some of your options for correcting the problem:
* You can choose a different installation directory, i.e., one that is
on PYTHONPATH or supports .pth files
* You can add the installation directory to the PYTHONPATH environment
variable. (It must then also be on PYTHONPATH whenever you run
Python and want to use the package(s) you are installing.)
* You can set up the installation directory to support ".pth" files by
using one of the approaches described here:
https://pythonhosted.org/setuptools/easy_install.html#custom-installation-locations
Please make the appropriate changes for your system and try again."""
return template % (self.install_dir, os.environ.get('PYTHONPATH', ''))
def install_site_py(self):
"""Make sure there's a site.py in the target dir, if needed"""
if self.sitepy_installed:
return # already did it, or don't need to
sitepy = os.path.join(self.install_dir, "site.py")
source = resource_string("setuptools", "site-patch.py")
current = ""
if os.path.exists(sitepy):
log.debug("Checking existing site.py in %s", self.install_dir)
f = open(sitepy, 'rb')
current = f.read()
# we want str, not bytes
if PY3:
current = current.decode()
f.close()
if not current.startswith('def __boot():'):
raise DistutilsError(
"%s is not a setuptools-generated site.py; please"
" remove it." % sitepy
)
if current != source:
log.info("Creating %s", sitepy)
if not self.dry_run:
ensure_directory(sitepy)
f = open(sitepy, 'wb')
f.write(source)
f.close()
self.byte_compile([sitepy])
self.sitepy_installed = True
def create_home_path(self):
"""Create directories under ~."""
if not self.user:
return
home = convert_path(os.path.expanduser("~"))
for name, path in iteritems(self.config_vars):
if path.startswith(home) and not os.path.isdir(path):
self.debug_print("os.makedirs('%s', 0o700)" % path)
os.makedirs(path, 0o700)
INSTALL_SCHEMES = dict(
posix=dict(
install_dir='$base/lib/python$py_version_short/site-packages',
script_dir='$base/bin',
),
)
DEFAULT_SCHEME = dict(
install_dir='$base/Lib/site-packages',
script_dir='$base/Scripts',
)
def _expand(self, *attrs):
config_vars = self.get_finalized_command('install').config_vars
if self.prefix:
# Set default install_dir/scripts from --prefix
config_vars = config_vars.copy()
config_vars['base'] = self.prefix
scheme = self.INSTALL_SCHEMES.get(os.name, self.DEFAULT_SCHEME)
for attr, val in scheme.items():
if getattr(self, attr, None) is None:
setattr(self, attr, val)
from distutils.util import subst_vars
for attr in attrs:
val = getattr(self, attr)
if val is not None:
val = subst_vars(val, config_vars)
if os.name == 'posix':
val = os.path.expanduser(val)
setattr(self, attr, val)
def get_site_dirs():
# return a list of 'site' dirs
sitedirs = [_f for _f in os.environ.get('PYTHONPATH',
'').split(os.pathsep) if _f]
prefixes = [sys.prefix]
if sys.exec_prefix != sys.prefix:
prefixes.append(sys.exec_prefix)
for prefix in prefixes:
if prefix:
if sys.platform in ('os2emx', 'riscos'):
sitedirs.append(os.path.join(prefix, "Lib", "site-packages"))
elif os.sep == '/':
sitedirs.extend([os.path.join(prefix,
"lib",
"python" + sys.version[:3],
"site-packages"),
os.path.join(prefix, "lib", "site-python")])
else:
sitedirs.extend(
[prefix, os.path.join(prefix, "lib", "site-packages")]
)
if sys.platform == 'darwin':
# for framework builds *only* we add the standard Apple
# locations. Currently only per-user, but /Library and
# /Network/Library could be added too
if 'Python.framework' in prefix:
home = os.environ.get('HOME')
if home:
sitedirs.append(
os.path.join(home,
'Library',
'Python',
sys.version[:3],
'site-packages'))
lib_paths = get_path('purelib'), get_path('platlib')
for site_lib in lib_paths:
if site_lib not in sitedirs:
sitedirs.append(site_lib)
if site.ENABLE_USER_SITE:
sitedirs.append(site.USER_SITE)
sitedirs = list(map(normalize_path, sitedirs))
return sitedirs
def expand_paths(inputs):
"""Yield sys.path directories that might contain "old-style" packages"""
seen = {}
for dirname in inputs:
dirname = normalize_path(dirname)
if dirname in seen:
continue
seen[dirname] = 1
if not os.path.isdir(dirname):
continue
files = os.listdir(dirname)
yield dirname, files
for name in files:
if not name.endswith('.pth'):
# We only care about the .pth files
continue
if name in ('easy-install.pth', 'setuptools.pth'):
# Ignore .pth files that we control
continue
# Read the .pth file
f = open(os.path.join(dirname, name))
lines = list(yield_lines(f))
f.close()
# Yield existing non-dupe, non-import directory lines from it
for line in lines:
if not line.startswith("import"):
line = normalize_path(line.rstrip())
if line not in seen:
seen[line] = 1
if not os.path.isdir(line):
continue
yield line, os.listdir(line)
def extract_wininst_cfg(dist_filename):
"""Extract configuration data from a bdist_wininst .exe
Returns a ConfigParser.RawConfigParser, or None
"""
f = open(dist_filename, 'rb')
try:
endrec = zipfile._EndRecData(f)
if endrec is None:
return None
prepended = (endrec[9] - endrec[5]) - endrec[6]
if prepended < 12: # no wininst data here
return None
f.seek(prepended - 12)
from setuptools.compat import StringIO, ConfigParser
import struct
tag, cfglen, bmlen = struct.unpack("<iii", f.read(12))
if tag not in (0x1234567A, 0x1234567B):
return None # not a valid tag
f.seek(prepended - (12 + cfglen))
cfg = ConfigParser.RawConfigParser(
{'version': '', 'target_version': ''})
try:
part = f.read(cfglen)
# part is in bytes, but we need to read up to the first null
# byte.
if sys.version_info >= (2, 6):
null_byte = bytes([0])
else:
null_byte = chr(0)
config = part.split(null_byte, 1)[0]
# Now the config is in bytes, but for RawConfigParser, it should
# be text, so decode it.
config = config.decode(sys.getfilesystemencoding())
cfg.readfp(StringIO(config))
except ConfigParser.Error:
return None
if not cfg.has_section('metadata') or not cfg.has_section('Setup'):
return None
return cfg
finally:
f.close()
def get_exe_prefixes(exe_filename):
"""Get exe->egg path translations for a given .exe file"""
prefixes = [
('PURELIB/', ''), ('PLATLIB/pywin32_system32', ''),
('PLATLIB/', ''),
('SCRIPTS/', 'EGG-INFO/scripts/'),
('DATA/lib/site-packages', ''),
]
z = zipfile.ZipFile(exe_filename)
try:
for info in z.infolist():
name = info.filename
parts = name.split('/')
if len(parts) == 3 and parts[2] == 'PKG-INFO':
if parts[1].endswith('.egg-info'):
prefixes.insert(0, ('/'.join(parts[:2]), 'EGG-INFO/'))
break
if len(parts) != 2 or not name.endswith('.pth'):
continue
if name.endswith('-nspkg.pth'):
continue
if parts[0].upper() in ('PURELIB', 'PLATLIB'):
contents = z.read(name)
if PY3:
contents = contents.decode()
for pth in yield_lines(contents):
pth = pth.strip().replace('\\', '/')
if not pth.startswith('import'):
prefixes.append((('%s/%s/' % (parts[0], pth)), ''))
finally:
z.close()
prefixes = [(x.lower(), y) for x, y in prefixes]
prefixes.sort()
prefixes.reverse()
return prefixes
def parse_requirement_arg(spec):
try:
return Requirement.parse(spec)
except ValueError:
raise DistutilsError(
"Not a URL, existing file, or requirement spec: %r" % (spec,)
)
class PthDistributions(Environment):
"""A .pth file with Distribution paths in it"""
dirty = False
def __init__(self, filename, sitedirs=()):
self.filename = filename
self.sitedirs = list(map(normalize_path, sitedirs))
self.basedir = normalize_path(os.path.dirname(self.filename))
self._load()
Environment.__init__(self, [], None, None)
for path in yield_lines(self.paths):
list(map(self.add, find_distributions(path, True)))
def _load(self):
self.paths = []
saw_import = False
seen = dict.fromkeys(self.sitedirs)
if os.path.isfile(self.filename):
f = open(self.filename, 'rt')
for line in f:
if line.startswith('import'):
saw_import = True
continue
path = line.rstrip()
self.paths.append(path)
if not path.strip() or path.strip().startswith('#'):
continue
# skip non-existent paths, in case somebody deleted a package
# manually, and duplicate paths as well
path = self.paths[-1] = normalize_path(
os.path.join(self.basedir, path)
)
if not os.path.exists(path) or path in seen:
self.paths.pop() # skip it
self.dirty = True # we cleaned up, so we're dirty now :)
continue
seen[path] = 1
f.close()
if self.paths and not saw_import:
self.dirty = True # ensure anything we touch has import wrappers
while self.paths and not self.paths[-1].strip():
self.paths.pop()
def save(self):
"""Write changed .pth file back to disk"""
if not self.dirty:
return
data = '\n'.join(map(self.make_relative, self.paths))
if data:
log.debug("Saving %s", self.filename)
data = (
"import sys; sys.__plen = len(sys.path)\n"
"%s\n"
"import sys; new=sys.path[sys.__plen:];"
" del sys.path[sys.__plen:];"
" p=getattr(sys,'__egginsert',0); sys.path[p:p]=new;"
" sys.__egginsert = p+len(new)\n"
) % data
if os.path.islink(self.filename):
os.unlink(self.filename)
f = open(self.filename, 'wt')
f.write(data)
f.close()
elif os.path.exists(self.filename):
log.debug("Deleting empty %s", self.filename)
os.unlink(self.filename)
self.dirty = False
def add(self, dist):
"""Add `dist` to the distribution map"""
new_path = (
dist.location not in self.paths and (
dist.location not in self.sitedirs or
# account for '.' being in PYTHONPATH
dist.location == os.getcwd()
)
)
if new_path:
self.paths.append(dist.location)
self.dirty = True
Environment.add(self, dist)
def remove(self, dist):
"""Remove `dist` from the distribution map"""
while dist.location in self.paths:
self.paths.remove(dist.location)
self.dirty = True
Environment.remove(self, dist)
def make_relative(self, path):
npath, last = os.path.split(normalize_path(path))
baselen = len(self.basedir)
parts = [last]
sep = os.altsep == '/' and '/' or os.sep
while len(npath) >= baselen:
if npath == self.basedir:
parts.append(os.curdir)
parts.reverse()
return sep.join(parts)
npath, last = os.path.split(npath)
parts.append(last)
else:
return path
def _first_line_re():
"""
Return a regular expression based on first_line_re suitable for matching
strings.
"""
if isinstance(first_line_re.pattern, str):
return first_line_re
# first_line_re in Python >=3.1.4 and >=3.2.1 is a bytes pattern.
return re.compile(first_line_re.pattern.decode())
def get_script_header(script_text, executable=sys_executable, wininst=False):
"""Create a #! line, getting options (if any) from script_text"""
first = (script_text + '\n').splitlines()[0]
match = _first_line_re().match(first)
options = ''
if match:
options = match.group(1) or ''
if options:
options = ' ' + options
if wininst:
executable = "python.exe"
else:
executable = nt_quote_arg(executable)
hdr = "#!%(executable)s%(options)s\n" % locals()
if not isascii(hdr):
# Non-ascii path to sys.executable, use -x to prevent warnings
if options:
if options.strip().startswith('-'):
options = ' -x' + options.strip()[1:]
# else: punt, we can't do it, let the warning happen anyway
else:
options = ' -x'
executable = fix_jython_executable(executable, options)
hdr = "#!%(executable)s%(options)s\n" % locals()
return hdr
def auto_chmod(func, arg, exc):
if func is os.remove and os.name == 'nt':
chmod(arg, stat.S_IWRITE)
return func(arg)
et, ev, _ = sys.exc_info()
reraise(et, (ev[0], ev[1] + (" %s %s" % (func, arg))))
def update_dist_caches(dist_path, fix_zipimporter_caches):
"""
Fix any globally cached `dist_path` related data
`dist_path` should be a path of a newly installed egg distribution (zipped
or unzipped).
sys.path_importer_cache contains finder objects that have been cached when
importing data from the original distribution. Any such finders need to be
cleared since the replacement distribution might be packaged differently,
e.g. a zipped egg distribution might get replaced with an unzipped egg
folder or vice versa. Having the old finders cached may then cause Python
to attempt loading modules from the replacement distribution using an
incorrect loader.
zipimport.zipimporter objects are Python loaders charged with importing
data packaged inside zip archives. If stale loaders referencing the
original distribution, are left behind, they can fail to load modules from
the replacement distribution. E.g. if an old zipimport.zipimporter instance
is used to load data from a new zipped egg archive, it may cause the
operation to attempt to locate the requested data in the wrong location -
one indicated by the original distribution's zip archive directory
information. Such an operation may then fail outright, e.g. report having
read a 'bad local file header', or even worse, it may fail silently &
return invalid data.
zipimport._zip_directory_cache contains cached zip archive directory
information for all existing zipimport.zipimporter instances and all such
instances connected to the same archive share the same cached directory
information.
If asked, and the underlying Python implementation allows it, we can fix
all existing zipimport.zipimporter instances instead of having to track
them down and remove them one by one, by updating their shared cached zip
archive directory information. This, of course, assumes that the
replacement distribution is packaged as a zipped egg.
If not asked to fix existing zipimport.zipimporter instances, we still do
our best to clear any remaining zipimport.zipimporter related cached data
that might somehow later get used when attempting to load data from the new
distribution and thus cause such load operations to fail. Note that when
tracking down such remaining stale data, we can not catch every conceivable
usage from here, and we clear only those that we know of and have found to
cause problems if left alive. Any remaining caches should be updated by
whomever is in charge of maintaining them, i.e. they should be ready to
handle us replacing their zip archives with new distributions at runtime.
"""
# There are several other known sources of stale zipimport.zipimporter
# instances that we do not clear here, but might if ever given a reason to
# do so:
# * Global setuptools pkg_resources.working_set (a.k.a. 'master working
# set') may contain distributions which may in turn contain their
# zipimport.zipimporter loaders.
# * Several zipimport.zipimporter loaders held by local variables further
# up the function call stack when running the setuptools installation.
# * Already loaded modules may have their __loader__ attribute set to the
# exact loader instance used when importing them. Python 3.4 docs state
# that this information is intended mostly for introspection and so is
# not expected to cause us problems.
normalized_path = normalize_path(dist_path)
_uncache(normalized_path, sys.path_importer_cache)
if fix_zipimporter_caches:
_replace_zip_directory_cache_data(normalized_path)
else:
# Here, even though we do not want to fix existing and now stale
# zipimporter cache information, we still want to remove it. Related to
# Python's zip archive directory information cache, we clear each of
# its stale entries in two phases:
# 1. Clear the entry so attempting to access zip archive information
# via any existing stale zipimport.zipimporter instances fails.
# 2. Remove the entry from the cache so any newly constructed
# zipimport.zipimporter instances do not end up using old stale
# zip archive directory information.
# This whole stale data removal step does not seem strictly necessary,
# but has been left in because it was done before we started replacing
# the zip archive directory information cache content if possible, and
# there are no relevant unit tests that we can depend on to tell us if
# this is really needed.
_remove_and_clear_zip_directory_cache_data(normalized_path)
def _collect_zipimporter_cache_entries(normalized_path, cache):
"""
Return zipimporter cache entry keys related to a given normalized path.
Alternative path spellings (e.g. those using different character case or
those using alternative path separators) related to the same path are
included. Any sub-path entries are included as well, i.e. those
corresponding to zip archives embedded in other zip archives.
"""
result = []
prefix_len = len(normalized_path)
for p in cache:
np = normalize_path(p)
if (np.startswith(normalized_path) and
np[prefix_len:prefix_len + 1] in (os.sep, '')):
result.append(p)
return result
def _update_zipimporter_cache(normalized_path, cache, updater=None):
"""
Update zipimporter cache data for a given normalized path.
Any sub-path entries are processed as well, i.e. those corresponding to zip
archives embedded in other zip archives.
Given updater is a callable taking a cache entry key and the original entry
(after already removing the entry from the cache), and expected to update
the entry and possibly return a new one to be inserted in its place.
Returning None indicates that the entry should not be replaced with a new
one. If no updater is given, the cache entries are simply removed without
any additional processing, the same as if the updater simply returned None.
"""
for p in _collect_zipimporter_cache_entries(normalized_path, cache):
# N.B. pypy's custom zipimport._zip_directory_cache implementation does
# not support the complete dict interface:
# * Does not support item assignment, thus not allowing this function
# to be used only for removing existing cache entries.
# * Does not support the dict.pop() method, forcing us to use the
# get/del patterns instead. For more detailed information see the
# following links:
# https://bitbucket.org/pypa/setuptools/issue/202/more-robust-zipimporter-cache-invalidation#comment-10495960
# https://bitbucket.org/pypy/pypy/src/dd07756a34a41f674c0cacfbc8ae1d4cc9ea2ae4/pypy/module/zipimport/interp_zipimport.py#cl-99
old_entry = cache[p]
del cache[p]
new_entry = updater and updater(p, old_entry)
if new_entry is not None:
cache[p] = new_entry
def _uncache(normalized_path, cache):
_update_zipimporter_cache(normalized_path, cache)
def _remove_and_clear_zip_directory_cache_data(normalized_path):
def clear_and_remove_cached_zip_archive_directory_data(path, old_entry):
old_entry.clear()
_update_zipimporter_cache(
normalized_path, zipimport._zip_directory_cache,
updater=clear_and_remove_cached_zip_archive_directory_data)
# PyPy Python implementation does not allow directly writing to the
# zipimport._zip_directory_cache and so prevents us from attempting to correct
# its content. The best we can do there is clear the problematic cache content
# and have PyPy repopulate it as needed. The downside is that if there are any
# stale zipimport.zipimporter instances laying around, attempting to use them
# will fail due to not having its zip archive directory information available
# instead of being automatically corrected to use the new correct zip archive
# directory information.
if '__pypy__' in sys.builtin_module_names:
_replace_zip_directory_cache_data = \
_remove_and_clear_zip_directory_cache_data
else:
def _replace_zip_directory_cache_data(normalized_path):
def replace_cached_zip_archive_directory_data(path, old_entry):
# N.B. In theory, we could load the zip directory information just
# once for all updated path spellings, and then copy it locally and
# update its contained path strings to contain the correct
# spelling, but that seems like a way too invasive move (this cache
# structure is not officially documented anywhere and could in
# theory change with new Python releases) for no significant
# benefit.
old_entry.clear()
zipimport.zipimporter(path)
old_entry.update(zipimport._zip_directory_cache[path])
return old_entry
_update_zipimporter_cache(
normalized_path, zipimport._zip_directory_cache,
updater=replace_cached_zip_archive_directory_data)
def is_python(text, filename='<string>'):
"Is this string a valid Python script?"
try:
compile(text, filename, 'exec')
except (SyntaxError, TypeError):
return False
else:
return True
def is_sh(executable):
"""Determine if the specified executable is a .sh (contains a #! line)"""
try:
fp = open(executable)
magic = fp.read(2)
fp.close()
except (OSError, IOError):
return executable
return magic == '#!'
def nt_quote_arg(arg):
"""Quote a command line argument according to Windows parsing rules"""
result = []
needquote = False
nb = 0
needquote = (" " in arg) or ("\t" in arg)
if needquote:
result.append('"')
for c in arg:
if c == '\\':
nb += 1
elif c == '"':
# double preceding backslashes, then add a \"
result.append('\\' * (nb * 2) + '\\"')
nb = 0
else:
if nb:
result.append('\\' * nb)
nb = 0
result.append(c)
if nb:
result.append('\\' * nb)
if needquote:
result.append('\\' * nb) # double the trailing backslashes
result.append('"')
return ''.join(result)
def is_python_script(script_text, filename):
"""Is this text, as a whole, a Python script? (as opposed to shell/bat/etc.
"""
if filename.endswith('.py') or filename.endswith('.pyw'):
return True # extension says it's Python
if is_python(script_text, filename):
return True # it's syntactically valid Python
if script_text.startswith('#!'):
# It begins with a '#!' line, so check if 'python' is in it somewhere
return 'python' in script_text.splitlines()[0].lower()
return False # Not any Python I can recognize
try:
from os import chmod as _chmod
except ImportError:
# Jython compatibility
def _chmod(*args):
pass
def chmod(path, mode):
log.debug("changing mode of %s to %o", path, mode)
try:
_chmod(path, mode)
except os.error:
e = sys.exc_info()[1]
log.debug("chmod failed: %s", e)
def fix_jython_executable(executable, options):
if sys.platform.startswith('java') and is_sh(executable):
# Workaround for Jython is not needed on Linux systems.
import java
if java.lang.System.getProperty("os.name") == "Linux":
return executable
# Workaround Jython's sys.executable being a .sh (an invalid
# shebang line interpreter)
if options:
# Can't apply the workaround, leave it broken
log.warn(
"WARNING: Unable to adapt shebang line for Jython,"
" the following script is NOT executable\n"
" see http://bugs.jython.org/issue1112 for"
" more information.")
else:
return '/usr/bin/env %s' % executable
return executable
class ScriptWriter(object):
"""
Encapsulates behavior around writing entry point scripts for console and
gui apps.
"""
template = textwrap.dedent("""
# EASY-INSTALL-ENTRY-SCRIPT: %(spec)r,%(group)r,%(name)r
__requires__ = %(spec)r
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.exit(
load_entry_point(%(spec)r, %(group)r, %(name)r)()
)
""").lstrip()
@classmethod
def get_script_args(cls, dist, executable=sys_executable, wininst=False):
"""
Yield write_script() argument tuples for a distribution's entrypoints
"""
gen_class = cls.get_writer(wininst)
spec = str(dist.as_requirement())
header = get_script_header("", executable, wininst)
for type_ in 'console', 'gui':
group = type_ + '_scripts'
for name, ep in dist.get_entry_map(group).items():
script_text = gen_class.template % locals()
for res in gen_class._get_script_args(type_, name, header,
script_text):
yield res
@classmethod
def get_writer(cls, force_windows):
if force_windows or sys.platform == 'win32':
return WindowsScriptWriter.get_writer()
return cls
@classmethod
def _get_script_args(cls, type_, name, header, script_text):
# Simply write the stub with no extension.
yield (name, header + script_text)
class WindowsScriptWriter(ScriptWriter):
@classmethod
def get_writer(cls):
"""
Get a script writer suitable for Windows
"""
writer_lookup = dict(
executable=WindowsExecutableLauncherWriter,
natural=cls,
)
# for compatibility, use the executable launcher by default
launcher = os.environ.get('SETUPTOOLS_LAUNCHER', 'executable')
return writer_lookup[launcher]
@classmethod
def _get_script_args(cls, type_, name, header, script_text):
"For Windows, add a .py extension"
ext = dict(console='.pya', gui='.pyw')[type_]
if ext not in os.environ['PATHEXT'].lower().split(';'):
warnings.warn("%s not listed in PATHEXT; scripts will not be "
"recognized as executables." % ext, UserWarning)
old = ['.pya', '.py', '-script.py', '.pyc', '.pyo', '.pyw', '.exe']
old.remove(ext)
header = cls._adjust_header(type_, header)
blockers = [name + x for x in old]
yield name + ext, header + script_text, 't', blockers
@staticmethod
def _adjust_header(type_, orig_header):
"""
Make sure 'pythonw' is used for gui and and 'python' is used for
console (regardless of what sys.executable is).
"""
pattern = 'pythonw.exe'
repl = 'python.exe'
if type_ == 'gui':
pattern, repl = repl, pattern
pattern_ob = re.compile(re.escape(pattern), re.IGNORECASE)
new_header = pattern_ob.sub(string=orig_header, repl=repl)
clean_header = new_header[2:-1].strip('"')
if sys.platform == 'win32' and not os.path.exists(clean_header):
# the adjusted version doesn't exist, so return the original
return orig_header
return new_header
class WindowsExecutableLauncherWriter(WindowsScriptWriter):
@classmethod
def _get_script_args(cls, type_, name, header, script_text):
"""
For Windows, add a .py extension and an .exe launcher
"""
if type_ == 'gui':
launcher_type = 'gui'
ext = '-script.pyw'
old = ['.pyw']
else:
launcher_type = 'cli'
ext = '-script.py'
old = ['.py', '.pyc', '.pyo']
hdr = cls._adjust_header(type_, header)
blockers = [name + x for x in old]
yield (name + ext, hdr + script_text, 't', blockers)
yield (
name + '.exe', get_win_launcher(launcher_type),
'b' # write in binary mode
)
if not is_64bit():
# install a manifest for the launcher to prevent Windows
# from detecting it as an installer (which it will for
# launchers like easy_install.exe). Consider only
# adding a manifest for launchers detected as installers.
# See Distribute #143 for details.
m_name = name + '.exe.manifest'
yield (m_name, load_launcher_manifest(name), 't')
# for backward-compatibility
get_script_args = ScriptWriter.get_script_args
def get_win_launcher(type):
"""
Load the Windows launcher (executable) suitable for launching a script.
`type` should be either 'cli' or 'gui'
Returns the executable as a byte string.
"""
launcher_fn = '%s.exe' % type
if platform.machine().lower() == 'arm':
launcher_fn = launcher_fn.replace(".", "-arm.")
if is_64bit():
launcher_fn = launcher_fn.replace(".", "-64.")
else:
launcher_fn = launcher_fn.replace(".", "-32.")
return resource_string('setuptools', launcher_fn)
def load_launcher_manifest(name):
manifest = pkg_resources.resource_string(__name__, 'launcher manifest.xml')
if PY2:
return manifest % vars()
else:
return manifest.decode('utf-8') % vars()
def rmtree(path, ignore_errors=False, onerror=auto_chmod):
"""Recursively delete a directory tree.
This code is taken from the Python 2.4 version of 'shutil', because
the 2.3 version doesn't really work right.
"""
if ignore_errors:
def onerror(*args):
pass
elif onerror is None:
def onerror(*args):
raise
names = []
try:
names = os.listdir(path)
except os.error:
onerror(os.listdir, path, sys.exc_info())
for name in names:
fullname = os.path.join(path, name)
try:
mode = os.lstat(fullname).st_mode
except os.error:
mode = 0
if stat.S_ISDIR(mode):
rmtree(fullname, ignore_errors, onerror)
else:
try:
os.remove(fullname)
except os.error:
onerror(os.remove, fullname, sys.exc_info())
try:
os.rmdir(path)
except os.error:
onerror(os.rmdir, path, sys.exc_info())
def current_umask():
tmp = os.umask(0o022)
os.umask(tmp)
return tmp
def bootstrap():
# This function is called when setuptools*.egg is run using /bin/sh
import setuptools
argv0 = os.path.dirname(setuptools.__path__[0])
sys.argv[0] = argv0
sys.argv.append(argv0)
main()
def main(argv=None, **kw):
from setuptools import setup
from setuptools.dist import Distribution
class DistributionWithoutHelpCommands(Distribution):
common_usage = ""
def _show_help(self, *args, **kw):
with _patch_usage():
Distribution._show_help(self, *args, **kw)
if argv is None:
argv = sys.argv[1:]
with _patch_usage():
setup(
script_args=['-q', 'easy_install', '-v'] + argv,
script_name=sys.argv[0] or 'easy_install',
distclass=DistributionWithoutHelpCommands, **kw
)
@contextlib.contextmanager
def _patch_usage():
import distutils.core
USAGE = textwrap.dedent("""
usage: %(script)s [options] requirement_or_url ...
or: %(script)s --help
""").lstrip()
def gen_usage(script_name):
return USAGE % dict(
script=os.path.basename(script_name),
)
saved = distutils.core.gen_usage
distutils.core.gen_usage = gen_usage
try:
yield
finally:
distutils.core.gen_usage = saved
|
CollinsIchigo/hdx_2
|
venv/lib/python2.7/site-packages/setuptools/command/easy_install.py
|
Python
|
mit
| 82,867
|
[
"VisIt"
] |
8d811b1dd63e27c15fc7cb9f38a5dad74adf81641a45361ef3288fd5f104c2af
|
# Copyright (c) 2017 Elias Riedel Gårding
# Licensed under the MIT License
from .node import Node
from utilities import hamming_distance, memoized
from distributions import gaussian
import numpy as np
import scipy.stats as stats
from scipy.integrate import quad
from scipy.optimize import minimize
from queue import PriorityQueue
from numbers import Number
import warnings
class StackDecoder:
"""Decodes a convolutional code transmitted over a BSC.
Designed so that decoding incrementally by calling decode on a prefix
of the received code sequence is efficient."""
def __init__(self, code, p=None, SNR=None, PAM=None, bias_mode='R'):
"""If p is given, assumes BSC(p). If SNR is given, assumes AWGN(SNR)."""
self.code = code
if p is not None:
self.p = p
self.compute_metric_increment = BSC_metric_increment(code.n, p)
elif SNR is not None:
self.SNR = SNR
self.compute_metric_increment = AWGN_2PAM_metric_increment(SNR) \
if PAM is None else AWGN_PAM_metric_increment(PAM, SNR)
else:
raise ValueError("p or SNR must be given")
if bias_mode == 'R':
self.bias = self.code.rate()
elif bias_mode == 'E0':
self.bias = self.E0(1)
elif isinstance(bias_mode, Number):
self.bias = bias_mode
else:
raise ValueError(
"{} is not 'R', 'E0' or a number".format(bias_mode))
self.bias_sanity_check()
self.nodes = PriorityQueue()
root = StackDecoder.Node(self.code)
root.metric = 0
self.nodes.put(root)
# The first node in each layer
self.first_nodes = [root]
def extend(self, node, received):
for child in node.extend():
child.metric = node.metric + self.compute_metric_increment(
self.bias, received, child.codeword)
self.nodes.put(child)
def decode_node(self, received_sequence):
"""Returns the node corresponding to the decoded path."""
# Run until we reach the first full-length path
while True:
node = self.nodes.get()
depth = len(self.first_nodes) - 1 # Max depth among explored nodes
if node.depth == depth + 1:
self.first_nodes.append(node)
if node.depth == len(received_sequence):
# Add it back to the queue so it can be extended in the future
self.nodes.put(node)
return node
self.extend(node, received_sequence[node.depth])
def decode(self, received_sequence):
"""Returns the decoded bit sequence."""
return self.decode_node(received_sequence).input_history()
def decode_block(self, received_sequence):
"""Returns the last block of the decoded bit sequence."""
return self.decode_node(received_sequence).input_block
def E0(self, rho, simple_bound=False):
# Compared with (3b) of the tree code paper, 1 = log 2 and the summation
# have been simplified away
if hasattr(self, 'p') or simple_bound:
if hasattr(self, 'p'):
p = self.p
else:
# Lower bound on E0 (slicing, i.e. convert AWGN → BSC at a loss)
# Bit flip (sign crossover) if noise is larger than 1
p = gaussian(1 / self.SNR).sf(1)
return rho - (1 + rho) * np.log2(
p**(1/(1+rho)) + (1 - p)**(1/(1+rho)))
else:
# From an example in the tree code paper
w = gaussian(1 / self.SNR).pdf
return 1 + rho - np.log2(quad(lambda z:
( w(z - 1)**(1/(1+rho)) + w(z + 1)**(1/(1+rho)) )**(1+rho),
-np.inf, np.inf)[0])
@memoized
def EJ(self):
# Function to maximize
f = lambda rho: rho / (1 + rho) * (
self.E0(rho) + self.bias - (1 + rho) * self.code.rate())
return -minimize(lambda rho: -f(rho), 0.5, bounds=[[0,1]]).fun[0]
def bias_sanity_check(self):
E0 = self.E0(1)
if self.bias > E0:
warnings.warn((
"Bias {:.4f} is larger than E0 = {:.4f}. "
+ "Expect high decoding time complexity."
).format(self.bias, E0), RuntimeWarning)
class Node(Node):
"""A node with a comparison operator for use in a min-priority queue."""
def __lt__(self, other):
return self.metric > other.metric
def BSC_metric_increment(n, p):
def metric_increment(bias, received, codeword):
# Binary codewords
assert all(z in [0,1] for z in received.flatten())
d = hamming_distance(received, codeword)
return d * np.log2(p) + (n - d) * np.log2(1 - p) \
+ (1 - bias) * n
return metric_increment
def AWGN_2PAM_metric_increment(SNR):
def metric_increment(bias, received, codeword):
# Real-valued codewords
assert all(isinstance(z, float) for z in received.flatten())
# log2( w(zi|ci) / p(zi) )
log_term = lambda z, c: \
1 - SNR / (2 * np.log(2)) * (z - (-1)**c)**2 \
- np.log2(np.exp(-SNR/2 * (z - 1)**2)
+ np.exp(-SNR/2 * (z + 1)**2))
return sum(log_term(z, c) - bias
for z, c in zip(received, codeword))
return metric_increment
def AWGN_PAM_metric_increment(PAM, SNR):
return lambda bias, received, codeword: \
PAM.metric_increment(SNR, bias, received, codeword)
|
eliasrg/SURF2017
|
code/separate/coding/convolutional/stack.py
|
Python
|
mit
| 5,605
|
[
"Gaussian"
] |
bd78b89cb6c881e474a3c32052778e8dc23303e6199f75c1ac86a3e7a60da944
|
##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class RAffycoretools(RPackage):
"""Various wrapper functions that have been written to streamline
the more common analyses that a core Biostatistician might see."""
homepage = "https://www.bioconductor.org/packages/affycoretools/"
url = "https://git.bioconductor.org/packages/affycoretools"
version('1.48.0', git='https://git.bioconductor.org/packages/affycoretools', commit='e0d52e34eead1ac45d3e60c59efd940e4889eb99')
depends_on('r@3.4.0:3.4.9', when='@1.48.0')
depends_on('r-biobase', type=('build', 'run'))
depends_on('r-affy', type=('build', 'run'))
depends_on('r-limma', type=('build', 'run'))
depends_on('r-gostats', type=('build', 'run'))
depends_on('r-gcrma', type=('build', 'run'))
depends_on('r-xtable', type=('build', 'run'))
depends_on('r-annotationdbi', type=('build', 'run'))
depends_on('r-ggplot2', type=('build', 'run'))
depends_on('r-gplots', type=('build', 'run'))
depends_on('r-oligoclasses', type=('build', 'run'))
depends_on('r-reportingtools', type=('build', 'run'))
depends_on('r-hwriter', type=('build', 'run'))
depends_on('r-lattice', type=('build', 'run'))
depends_on('r-s4vectors', type=('build', 'run'))
depends_on('r-edger', type=('build', 'run'))
depends_on('r-rsqlite', type=('build', 'run'))
depends_on('r-biocgenerics', type=('build', 'run'))
|
lgarren/spack
|
var/spack/repos/builtin/packages/r-affycoretools/package.py
|
Python
|
lgpl-2.1
| 2,632
|
[
"Bioconductor"
] |
87549b9e0cf462419ffed07f6f63df28b9d4b21d52ba96daa34c9d3ac0f496a5
|
import numpy as np
from matplotlib import pyplot as plt
from scipy.stats import multivariate_normal
from skimage import filters
from toybox.symmetry.operators import propagate
from toybox.symmetry.parsers import parse_hermann_mauguin
from toybox.tools import check_points, check_point, equivalent
class Points:
def __init__(self,
starting_points=None,
symmetry=1,
auto_zero=True,
):
"""A series of points related by symmetry.
Use this class to define where the diffraction points should be in relation
to one another.
Parameters
----------
starting_points : array_like
(`n_points`, 3)
Initial points, in the format (x, y, intensity)
symmetry : :obj:int, :obj:str, optional
Symmetry to apply to the points. Defaults to int:1 i.e. no symmetry.
auto_zero : bool
If True, automatically appends (0., 0., None) to the starting_points.
"""
if starting_points is not None:
starting_points = check_points(starting_points)
self.starting_points = np.array(starting_points)
else:
self.starting_points = None
if auto_zero:
self.append_point((0., 0.))
self.symmetry = symmetry
def append_point(self, point):
"""Adds a point to the pattern.
Parameters
----------
point : array_like
(x, y, [intensity])
Coordinates of the point to add.
"""
point = check_point(point)
if self.starting_points is None:
self.starting_points = np.array(point).reshape(1, -1)
else:
self.starting_points = np.vstack((self.starting_points, point))
return self
@property
def points(self):
""":class:`numpy.ndarray` The points in the array, generated by
propagating the starting points through the specified symmetry.
"""
operations = parse_hermann_mauguin(self.symmetry)
points = propagate(self.starting_points, *operations)
return points
@points.setter
def points(self, points):
self.starting_points = check_points(points)
@property
def positions(self):
""":class:`numpy.ndarray` The positions of all the :attr:`points`."""
return self.points[:, :2].astype(float)
@property
def intensities(self):
""":class:`numpy.ndarray` The intensities of all the :attr:`points`."""
return self.points[:, 2]
@intensities.setter
def intensities(self, intensities):
self.starting_points[:, 2] = intensities
def to_shape(self, shape, scale=1.0):
"""Scales and translates the points into a bounding box of size `shape`.
Parameters
----------
shape : :obj:`tuple` of :obj:`int`
The shape of the bounding box.
scale : :obj:`float`, optional
All the new points will fit within an ellipse of semi-major axes
`scale`*`shape` from the centre to the edge of the bounding box.
Defaults to 1.0.
Returns
-------
:class:`numpy.ndarray`
(n_points, 2)
The transformed points.
"""
offset = np.array(shape)/2
scale_factor = scale * offset
distance = np.nanmax(np.sqrt(np.sum(np.square(self.positions), axis=1)))
return (self.positions/distance) * scale_factor + offset
def __repr__(self):
return "Array\n-----\nSymmetry: {}\n{}".format(self.symmetry,
self.points)
def __eq__(self, other):
if equivalent(self.points, other.points):
return True
else:
return False
class Pattern(np.ndarray):
"""A class representing a toy pattern.
Subclassing np.ndarray, this class simply extends the functionality of the
array.
"""
@classmethod
def from_points(cls, points, shape=(100, 100), scale=1.0, blur=1.):
"""Creates a pattern from a set of points.
Currently only Gaussian peaks are implemented.
Parameters
----------
points : Points, array_like
Positions and intensities of the points in the array.
shape : Shape of the final array.
scale : float
Maximum extent of the points. Should be less than 1.
blur : float
Level of gaussian blur to apply to the pattern.
Returns
-------
Pattern
An array simulating a diffraction pattern.
"""
if not isinstance(points, Points):
points = Points(points)
positions = points.to_shape(shape, scale)
dat = np.zeros(shape)
x, y = np.mgrid[0: shape[0], 0: shape[1]]
pos = np.empty(x.shape + (2,))
pos[:, :, 0] = x
pos[:, :, 1] = y
for position, intensity in zip(positions, points.intensities):
dat += intensity * multivariate_normal.pdf(pos, mean=position, cov=1)
dat = filters.gaussian(dat, sigma=blur)
return dat.view(cls)
def plot(self, colorbar=False, cmap='gray'):
"""Plots the pattern using :mod:`matplotlib`.
Parameters
----------
colorbar : bool
If `True`, the plot is produced with a color scale bar.
cmap : str
Set the color map of the plot. Can be any :mod:`matplotlib` color
map name.
"""
plt.imshow(self, interpolation='none', cmap=cmap)
if colorbar:
plt.colorbar()
|
bm424/diffraction-toybox
|
toybox/toys/core.py
|
Python
|
mit
| 5,671
|
[
"Gaussian"
] |
413c8eb0f655b9a63c2df15214bbb5ad61cf890980e0a91a01dd89d9d787bd9b
|
@mfunction("p")
def multivariateGaussian(X=None, mu=None, Sigma2=None):
# MULTIVARIATEGAUSSIAN Computes the probability density function of the
# multivariate gaussian distribution.
# p = MULTIVARIATEGAUSSIAN(X, mu, Sigma2) Computes the probability
# density function of the examples X under the multivariate gaussian
# distribution with parameters mu and Sigma2. If Sigma2 is a matrix, it is
# treated as the covariance matrix. If Sigma2 is a vector, it is treated
# as the \sigma^2 values of the variances in each dimension (a diagonal
# covariance matrix)
#
k = length(mu)
if (size(Sigma2, 2) == 1) or (size(Sigma2, 1) == 1):
Sigma2 = diag(Sigma2)
end
X = bsxfun(minus, X, mu(mslice[:]).cT)
p = (2 * pi) ** (-k / 2) * det(Sigma2) ** (-0.5) * exp(-0.5 * sum(bsxfun(times, X * pinv(Sigma2), X), 2))
end
|
gedman4b/MachineLearning
|
coursera/AnomolyDetection and RecommenderSystems/multivariateGaussian.py
|
Python
|
gpl-3.0
| 911
|
[
"Gaussian"
] |
556724a2a59ef16ec52a67fb9386cd021f791bd7a69efa6faf5d668fed58fb8c
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2012 The Plaso Project Authors.
# Please see the AUTHORS file for details on individual authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This file contains a parser for the Mozilla Firefox history."""
import sqlite3
from plaso.events import time_events
from plaso.lib import event
from plaso.lib import eventdata
from plaso.parsers import sqlite
from plaso.parsers.sqlite_plugins import interface
# Check SQlite version, bail out early if too old.
if sqlite3.sqlite_version_info < (3, 7, 8):
raise ImportWarning(
'FirefoxHistoryParser requires at least SQLite version 3.7.8.')
class FirefoxPlacesBookmarkAnnotation(time_events.TimestampEvent):
"""Convenience class for a Firefox bookmark annotation event."""
DATA_TYPE = 'firefox:places:bookmark_annotation'
def __init__(self, timestamp, usage, row_id, title, url, content):
"""Initializes the event object.
Args:
timestamp: The timestamp value.
usage: Timestamp description string.
row_id: The identifier of the corresponding row.
title: The title of the bookmark folder.
url: The bookmarked URL.
content: The content of the annotation.
"""
super(FirefoxPlacesBookmarkAnnotation, self).__init__(
timestamp, usage)
self.offset = row_id
self.title = title
self.url = url
self.content = content
class FirefoxPlacesBookmarkFolder(time_events.TimestampEvent):
"""Convenience class for a Firefox bookmark folder event."""
DATA_TYPE = 'firefox:places:bookmark_folder'
def __init__(self, timestamp, usage, row_id, title):
"""Initializes the event object.
Args:
timestamp: The timestamp value.
usage: Timestamp description string.
row_id: The identifier of the corresponding row.
title: The title of the bookmark folder.
"""
super(FirefoxPlacesBookmarkFolder, self).__init__(
timestamp, usage)
self.offset = row_id
self.title = title
class FirefoxPlacesBookmark(time_events.TimestampEvent):
"""Convenience class for a Firefox bookmark event."""
DATA_TYPE = 'firefox:places:bookmark'
# TODO: move to formatter.
_TYPES = {
1: 'URL',
2: 'Folder',
3: 'Separator',
}
_TYPES.setdefault('N/A')
# pylint: disable=redefined-builtin
def __init__(self, timestamp, usage, row_id, type, title, url, places_title,
hostname, visit_count):
"""Initializes the event object.
Args:
timestamp: The timestamp value.
usage: Timestamp description string.
row_id: The identifier of the corresponding row.
type: Integer value containing the bookmark type.
title: The title of the bookmark folder.
url: The bookmarked URL.
places_title: The places title.
hostname: The hostname.
visit_count: The visit count.
"""
super(FirefoxPlacesBookmark, self).__init__(timestamp, usage)
self.offset = row_id
self.type = self._TYPES[type]
self.title = title
self.url = url
self.places_title = places_title
self.host = hostname
self.visit_count = visit_count
class FirefoxPlacesPageVisitedEvent(event.EventObject):
"""Convenience class for a Firefox page visited event."""
DATA_TYPE = 'firefox:places:page_visited'
def __init__(self, timestamp, row_id, url, title, hostname, visit_count,
visit_type, extra):
"""Initializes the event object.
Args:
timestamp: The timestamp time value. The timestamp contains the
number of microseconds since Jan 1, 1970 00:00:00 UTC.
row_id: The identifier of the corresponding row.
url: The URL of the visited page.
title: The title of the visited page.
hostname: The visited hostname.
visit_count: The visit count.
visit_type: The transition type for the event.
extra: A list containing extra event data (TODO refactor).
"""
super(FirefoxPlacesPageVisitedEvent, self).__init__()
self.timestamp = timestamp
self.timestamp_desc = eventdata.EventTimestamp.PAGE_VISITED
self.offset = row_id
self.url = url
self.title = title
self.host = hostname
self.visit_count = visit_count
self.visit_type = visit_type
if extra:
self.extra = extra
class FirefoxDownload(time_events.TimestampEvent):
"""Convenience class for a Firefox download event."""
DATA_TYPE = 'firefox:downloads:download'
def __init__(self, timestamp, usage, row_id, name, url, referrer, full_path,
temporary_location, received_bytes, total_bytes, mime_type):
"""Initializes the event object.
Args:
timestamp: The timestamp value.
usage: Timestamp description string.
row_id: The identifier of the corresponding row.
name: The name of the download.
url: The source URL of the download.
referrer: The referrer URL of the download.
full_path: The full path of the target of the download.
temporary_location: The temporary location of the download.
received_bytes: The number of bytes received.
total_bytes: The total number of bytes of the download.
mime_type: The mime type of the download.
"""
super(FirefoxDownload, self).__init__(timestamp, usage)
self.offset = row_id
self.name = name
self.url = url
self.referrer = referrer
self.full_path = full_path
self.temporary_location = temporary_location
self.received_bytes = received_bytes
self.total_bytes = total_bytes
self.mime_type = mime_type
class FirefoxHistoryPlugin(interface.SQLitePlugin):
"""Parses a Firefox history file.
The Firefox history is stored in a SQLite database file named
places.sqlite.
"""
NAME = 'firefox_history'
DESCRIPTION = u'Parser for Firefox history SQLite database files.'
# Define the needed queries.
QUERIES = [
(('SELECT moz_historyvisits.id, moz_places.url, moz_places.title, '
'moz_places.visit_count, moz_historyvisits.visit_date, '
'moz_historyvisits.from_visit, moz_places.rev_host, '
'moz_places.hidden, moz_places.typed, moz_historyvisits.visit_type '
'FROM moz_places, moz_historyvisits '
'WHERE moz_places.id = moz_historyvisits.place_id'),
'ParsePageVisitedRow'),
(('SELECT moz_bookmarks.type, moz_bookmarks.title AS bookmark_title, '
'moz_bookmarks.dateAdded, moz_bookmarks.lastModified, '
'moz_places.url, moz_places.title AS places_title, '
'moz_places.rev_host, moz_places.visit_count, moz_bookmarks.id '
'FROM moz_places, moz_bookmarks WHERE moz_bookmarks.fk = moz_places.id '
'AND moz_bookmarks.type <> 3'),
'ParseBookmarkRow'),
(('SELECT moz_items_annos.content, moz_items_annos.dateAdded, '
'moz_items_annos.lastModified, moz_bookmarks.title, '
'moz_places.url, moz_places.rev_host, moz_items_annos.id '
'FROM moz_items_annos, moz_bookmarks, moz_places '
'WHERE moz_items_annos.item_id = moz_bookmarks.id '
'AND moz_bookmarks.fk = moz_places.id'),
'ParseBookmarkAnnotationRow'),
(('SELECT moz_bookmarks.id, moz_bookmarks.title,'
'moz_bookmarks.dateAdded, moz_bookmarks.lastModified '
'FROM moz_bookmarks WHERE moz_bookmarks.type = 2'),
'ParseBookmarkFolderRow')]
# The required tables.
REQUIRED_TABLES = frozenset([
'moz_places', 'moz_historyvisits', 'moz_bookmarks', 'moz_items_annos'])
# Cache queries.
URL_CACHE_QUERY = (
'SELECT h.id AS id, p.url, p.rev_host FROM moz_places p, '
'moz_historyvisits h WHERE p.id = h.place_id')
def ParseBookmarkAnnotationRow(
self, parser_context, row, query=None, **unused_kwargs):
"""Parses a bookmark annotation row.
Args:
parser_context: A parser context object (instance of ParserContext).
row: The row resulting from the query.
query: Optional query string. The default is None.
"""
if row['dateAdded']:
event_object = FirefoxPlacesBookmarkAnnotation(
row['dateAdded'], eventdata.EventTimestamp.ADDED_TIME,
row['id'], row['title'], row['url'], row['content'])
parser_context.ProduceEvent(
event_object, plugin_name=self.NAME, query=query)
if row['lastModified']:
event_object = FirefoxPlacesBookmarkAnnotation(
row['lastModified'], eventdata.EventTimestamp.MODIFICATION_TIME,
row['id'], row['title'], row['url'], row['content'])
parser_context.ProduceEvent(
event_object, plugin_name=self.NAME, query=query)
def ParseBookmarkFolderRow(
self, parser_context, row, query=None, **unused_kwargs):
"""Parses a bookmark folder row.
Args:
parser_context: A parser context object (instance of ParserContext).
row: The row resulting from the query.
query: Optional query string. The default is None.
"""
if not row['title']:
title = 'N/A'
else:
title = row['title']
if row['dateAdded']:
event_object = FirefoxPlacesBookmarkFolder(
row['dateAdded'], eventdata.EventTimestamp.ADDED_TIME,
row['id'], title)
parser_context.ProduceEvent(
event_object, plugin_name=self.NAME, query=query)
if row['lastModified']:
event_object = FirefoxPlacesBookmarkFolder(
row['lastModified'], eventdata.EventTimestamp.MODIFICATION_TIME,
row['id'], title)
parser_context.ProduceEvent(
event_object, plugin_name=self.NAME, query=query)
def ParseBookmarkRow(self, parser_context, row, query=None, **unused_kwargs):
"""Parses a bookmark row.
Args:
parser_context: A parser context object (instance of ParserContext).
row: The row resulting from the query.
query: Optional query string. The default is None.
"""
if row['dateAdded']:
event_object = FirefoxPlacesBookmark(
row['dateAdded'], eventdata.EventTimestamp.ADDED_TIME,
row['id'], row['type'], row['bookmark_title'], row['url'],
row['places_title'], getattr(row, 'rev_host', 'N/A'),
row['visit_count'])
parser_context.ProduceEvent(
event_object, plugin_name=self.NAME, query=query)
if row['lastModified']:
event_object = FirefoxPlacesBookmark(
row['lastModified'], eventdata.EventTimestamp.MODIFICATION_TIME,
row['id'], row['type'], row['bookmark_title'], row['url'],
row['places_title'], getattr(row, 'rev_host', 'N/A'),
row['visit_count'])
parser_context.ProduceEvent(
event_object, plugin_name=self.NAME, query=query)
def ParsePageVisitedRow(
self, parser_context, row, query=None, cache=None, database=None,
**unused_kwargs):
"""Parses a page visited row.
Args:
parser_context: A parser context object (instance of ParserContext).
row: The row resulting from the query.
query: Optional query string. The default is None.
cache: A cache object (instance of SQLiteCache).
database: A database object (instance of SQLiteDatabase).
"""
# TODO: make extra conditional formatting.
extras = []
if row['from_visit']:
extras.append(u'visited from: {0}'.format(
self._GetUrl(row['from_visit'], cache, database)))
if row['hidden'] == '1':
extras.append('(url hidden)')
if row['typed'] == '1':
extras.append('(directly typed)')
else:
extras.append('(URL not typed directly)')
if row['visit_date']:
event_object = FirefoxPlacesPageVisitedEvent(
row['visit_date'], row['id'], row['url'], row['title'],
self._ReverseHostname(row['rev_host']), row['visit_count'],
row['visit_type'], extras)
parser_context.ProduceEvent(
event_object, plugin_name=self.NAME, query=query)
def _ReverseHostname(self, hostname):
"""Reverses the hostname and strips the leading dot.
The hostname entry is reversed:
moc.elgoog.www.
Should be:
www.google.com
Args:
hostname: The reversed hostname.
Returns:
Reversed string without a leading dot.
"""
if not hostname:
return ''
if len(hostname) > 1:
if hostname[-1] == '.':
return hostname[::-1][1:]
else:
return hostname[::-1][0:]
return hostname
def _GetUrl(self, url_id, cache, database):
"""Return an URL from a reference to an entry in the from_visit table."""
url_cache_results = cache.GetResults('url')
if not url_cache_results:
cursor = database.cursor
result_set = cursor.execute(self.URL_CACHE_QUERY)
cache.CacheQueryResults(
result_set, 'url', 'id', ('url', 'rev_host'))
url_cache_results = cache.GetResults('url')
url, reverse_host = url_cache_results.get(url_id, [u'', u''])
if not url:
return u''
hostname = self._ReverseHostname(reverse_host)
return u'{:s} ({:s})'.format(url, hostname)
class FirefoxDownloadsPlugin(interface.SQLitePlugin):
"""Parses a Firefox downloads file.
The Firefox downloads history is stored in a SQLite database file named
downloads.sqlite.
"""
NAME = 'firefox_downloads'
DESCRIPTION = u'Parser for Firefox downloads SQLite database files.'
# Define the needed queries.
QUERIES = [
(('SELECT moz_downloads.id, moz_downloads.name, moz_downloads.source, '
'moz_downloads.target, moz_downloads.tempPath, '
'moz_downloads.startTime, moz_downloads.endTime, moz_downloads.state, '
'moz_downloads.referrer, moz_downloads.currBytes, '
'moz_downloads.maxBytes, moz_downloads.mimeType '
'FROM moz_downloads'),
'ParseDownloadsRow')]
# The required tables.
REQUIRED_TABLES = frozenset(['moz_downloads'])
def ParseDownloadsRow(self, parser_context, row, query=None, **unused_kwargs):
"""Parses a downloads row.
Args:
parser_context: A parser context object (instance of ParserContext).
row: The row resulting from the query.
query: Optional query string. The default is None.
"""
if row['startTime']:
event_object = FirefoxDownload(
row['startTime'], eventdata.EventTimestamp.START_TIME,
row['id'], row['name'], row['source'], row['referrer'], row['target'],
row['tempPath'], row['currBytes'], row['maxBytes'], row['mimeType'])
parser_context.ProduceEvent(
event_object, plugin_name=self.NAME, query=query)
if row['endTime']:
event_object = FirefoxDownload(
row['endTime'], eventdata.EventTimestamp.END_TIME,
row['id'], row['name'], row['source'], row['referrer'], row['target'],
row['tempPath'], row['currBytes'], row['maxBytes'], row['mimeType'])
parser_context.ProduceEvent(
event_object, plugin_name=self.NAME, query=query)
sqlite.SQLiteParser.RegisterPlugin(FirefoxHistoryPlugin)
sqlite.SQLiteParser.RegisterPlugin(FirefoxDownloadsPlugin)
|
cvandeplas/plaso
|
plaso/parsers/sqlite_plugins/firefox.py
|
Python
|
apache-2.0
| 15,505
|
[
"VisIt"
] |
9eb089da11e59f3dd373ee02324545cb7ea9866fe802f529a2e201dd31330d5d
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import division, print_function, unicode_literals
from __future__ import absolute_import
from pymatgen.analysis.elasticity.tensors import Tensor, \
TensorCollection, get_uvec
from pymatgen.analysis.elasticity.stress import Stress
from pymatgen.analysis.elasticity.strain import Strain
from pymatgen.core.units import Unit
from scipy.misc import factorial
from scipy.integrate import quad
from scipy.optimize import root
from monty.serialization import loadfn
from collections import OrderedDict
import numpy as np
import warnings
import itertools
import os
import sympy as sp
"""
This module provides a class used to describe the elastic tensor,
including methods used to fit the elastic tensor from linear response
stress-strain data
"""
__author__ = "Maarten de Jong, Joseph Montoya"
__copyright__ = "Copyright 2012, The Materials Project"
__credits__ = ("Ian Winter, Shyam Dwaraknath, "
"Mark Asta, Anubhav Jain")
__version__ = "1.0"
__maintainer__ = "Joseph Montoya"
__email__ = "montoyjh@lbl.gov"
__status__ = "Development"
__date__ = "March 22, 2012"
class NthOrderElasticTensor(Tensor):
"""
An object representing an nth-order tensor expansion
of the stress-strain constitutive equations
"""
GPa_to_eV_A3 = Unit("GPa").get_conversion_factor(Unit("eV ang^-3"))
def __new__(cls, input_array, check_rank=None, tol=1e-4):
obj = super(NthOrderElasticTensor, cls).__new__(
cls, input_array, check_rank=check_rank)
if obj.rank % 2 != 0:
raise ValueError("ElasticTensor must have even rank")
if not obj.is_voigt_symmetric(tol):
warnings.warn("Input elastic tensor does not satisfy "
"standard voigt symmetries")
return obj.view(cls)
@property
def order(self):
"""
Order of the elastic tensor
"""
return self.rank // 2
def calculate_stress(self, strain):
"""
Calculate's a given elastic tensor's contribution to the
stress using Einstein summation
Args:
strain (3x3 array-like): matrix corresponding to strain
"""
strain = np.array(strain)
if strain.shape == (6,):
strain = Strain.from_voigt(strain)
assert strain.shape == (3, 3), "Strain must be 3x3 or voigt-notation"
stress_matrix = self.einsum_sequence([strain]*(self.order - 1)) \
/ factorial(self.order - 1)
return Stress(stress_matrix)
def energy_density(self, strain, convert_GPa_to_eV=True):
"""
Calculates the elastic energy density due to a strain
"""
e_density = np.sum(self.calculate_stress(strain)*strain) / self.order
if convert_GPa_to_eV:
e_density *= self.GPa_to_eV_A3 # Conversion factor for GPa to eV/A^3
return e_density
@classmethod
def from_diff_fit(cls, strains, stresses, eq_stress=None,
order=2, tol=1e-10):
return cls(diff_fit(strains, stresses, eq_stress, order, tol)[order-2])
class ElasticTensor(NthOrderElasticTensor):
"""
This class extends Tensor to describe the 3x3x3x3
second-order elastic tensor, C_{ijkl}, with various
methods for estimating other properties derived from
the second order elastic tensor
"""
def __new__(cls, input_array, tol=1e-4):
"""
Create an ElasticTensor object. The constructor throws an error if
the shape of the input_matrix argument is not 3x3x3x3, i. e. in true
tensor notation. Issues a warning if the input_matrix argument does
not satisfy standard symmetries. Note that the constructor uses
__new__ rather than __init__ according to the standard method of
subclassing numpy ndarrays.
Args:
input_array (3x3x3x3 array-like): the 3x3x3x3 array-like
representing the elastic tensor
tol (float): tolerance for initial symmetry test of tensor
"""
obj = super(ElasticTensor, cls).__new__(cls, input_array,
check_rank=4, tol=tol)
return obj.view(cls)
@property
def compliance_tensor(self):
"""
returns the Voigt-notation compliance tensor,
which is the matrix inverse of the
Voigt-notation elastic tensor
"""
s_voigt = np.linalg.inv(self.voigt)
return ComplianceTensor.from_voigt(s_voigt)
@property
def k_voigt(self):
"""
returns the K_v bulk modulus
"""
return self.voigt[:3, :3].mean()
@property
def g_voigt(self):
"""
returns the G_v shear modulus
"""
return (2. * self.voigt[:3, :3].trace() -
np.triu(self.voigt[:3, :3]).sum() +
3 * self.voigt[3:, 3:].trace()) / 15.
@property
def k_reuss(self):
"""
returns the K_r bulk modulus
"""
return 1. / self.compliance_tensor.voigt[:3, :3].sum()
@property
def g_reuss(self):
"""
returns the G_r shear modulus
"""
return 15. / (8. * self.compliance_tensor.voigt[:3, :3].trace() -
4. * np.triu(self.compliance_tensor.voigt[:3, :3]).sum() +
3. * self.compliance_tensor.voigt[3:, 3:].trace())
@property
def k_vrh(self):
"""
returns the K_vrh (Voigt-Reuss-Hill) average bulk modulus
"""
return 0.5 * (self.k_voigt + self.k_reuss)
@property
def g_vrh(self):
"""
returns the G_vrh (Voigt-Reuss-Hill) average shear modulus
"""
return 0.5 * (self.g_voigt + self.g_reuss)
@property
def y_mod(self):
"""
Calculates Young's modulus (in SI units) using the
Voigt-Reuss-Hill averages of bulk and shear moduli
"""
return 9.e9 * self.k_vrh * self.g_vrh / (3. * self.k_vrh + self.g_vrh)
def directional_poisson_ratio(self, n, m, tol=1e-8):
"""
Calculates the poisson ratio for a specific direction
relative to a second, orthogonal direction
Args:
n (3-d vector): principal direction
m (3-d vector): secondary direction orthogonal to n
tol (float): tolerance for testing of orthogonality
"""
n, m = get_uvec(n), get_uvec(m)
if not np.abs(np.dot(n, m)) < tol:
raise ValueError("n and m must be orthogonal")
v = self.compliance_tensor.einsum_sequence([n]*2 + [m]*2)
v *= -1 / self.compliance_tensor.einsum_sequence([n]*4)
return v
def directional_elastic_mod(self, n):
"""
Calculates directional elastic modulus for a specific vector
"""
n = get_uvec(n)
return self.einsum_sequence([n]*4)
def trans_v(self, structure):
"""
Calculates transverse sound velocity (in SI units) using the
Voigt-Reuss-Hill average bulk modulus
Args:
structure: pymatgen structure object
Returns: transverse sound velocity (in SI units)
"""
nsites = structure.num_sites
volume = structure.volume
natoms = structure.composition.num_atoms
weight = structure.composition.weight
mass_density = 1.6605e3 * nsites * weight / (natoms * volume)
return (1e9 * self.g_vrh / mass_density) ** 0.5
def long_v(self, structure):
"""
Calculates longitudinal sound velocity (in SI units)
using the Voigt-Reuss-Hill average bulk modulus
Args:
structure: pymatgen structure object
Returns: longitudinal sound velocity (in SI units)
"""
nsites = structure.num_sites
volume = structure.volume
natoms = structure.composition.num_atoms
weight = structure.composition.weight
mass_density = 1.6605e3 * nsites * weight / (natoms * volume)
return (1e9 * (self.k_vrh + 4./3. * self.g_vrh) / mass_density) ** 0.5
def snyder_ac(self, structure):
"""
Calculates Snyder's acoustic sound velocity (in SI units)
Args:
structure: pymatgen structure object
Returns: Snyder's acoustic sound velocity (in SI units)
"""
nsites = structure.num_sites
volume = structure.volume
natoms = structure.composition.num_atoms
num_density = 1e30 * nsites / volume
tot_mass = sum([e.atomic_mass for e in structure.species])
avg_mass = 1.6605e-27 * tot_mass / natoms
return 0.38483*avg_mass * \
((self.long_v(structure) + 2.*self.trans_v(structure))/3.) ** 3.\
/ (300.*num_density ** (-2./3.) * nsites ** (1./3.))
def snyder_opt(self, structure):
"""
Calculates Snyder's optical sound velocity (in SI units)
Args:
structure: pymatgen structure object
Returns: Snyder's optical sound velocity (in SI units)
"""
nsites = structure.num_sites
volume = structure.volume
num_density = 1e30 * nsites / volume
return 1.66914e-23 * \
(self.long_v(structure) + 2.*self.trans_v(structure))/3. \
/ num_density ** (-2./3.) * (1 - nsites ** (-1./3.))
def snyder_total(self, structure):
"""
Calculates Snyder's total sound velocity (in SI units)
Args:
structure: pymatgen structure object
Returns: Snyder's total sound velocity (in SI units)
"""
return self.snyder_ac(structure) + self.snyder_opt(structure)
def clarke_thermalcond(self, structure):
"""
Calculates Clarke's thermal conductivity (in SI units)
Args:
structure: pymatgen structure object
Returns: Clarke's thermal conductivity (in SI units)
"""
nsites = structure.num_sites
volume = structure.volume
tot_mass = sum([e.atomic_mass for e in structure.species])
natoms = structure.composition.num_atoms
weight = structure.composition.weight
avg_mass = 1.6605e-27 * tot_mass / natoms
mass_density = 1.6605e3 * nsites * weight / (natoms * volume)
return 0.87 * 1.3806e-23 * avg_mass**(-2./3.) \
* mass_density**(1./6.) * self.y_mod**0.5
def cahill_thermalcond(self, structure):
"""
Calculates Cahill's thermal conductivity (in SI units)
Args:
structure: pymatgen structure object
Returns: Cahill's thermal conductivity (in SI units)
"""
nsites = structure.num_sites
volume = structure.volume
num_density = 1e30 * nsites / volume
return 1.3806e-23 / 2.48 * num_density**(2./3.) \
* (self.long_v(structure) + 2 * self.trans_v(structure))
def debye_temperature(self, structure):
"""
Calculates the debye temperature (in SI units)
Args:
structure: pymatgen structure object
Returns: debye temperature (in SI units)
"""
nsites = structure.num_sites
volume = structure.volume
tot_mass = sum([e.atomic_mass for e in structure.species])
natoms = structure.composition.num_atoms
weight = structure.composition.weight
avg_mass = 1.6605e-27 * tot_mass / natoms
mass_density = 1.6605e3 * nsites * weight / (natoms * volume)
return 2.589e-11 * avg_mass**(-1./3.) * mass_density**(-1./6.) \
* self.y_mod**0.5
def debye_temperature_gibbs(self, structure):
"""
Calculates the debye temperature accordings to the GIBBS
formulation (in SI units)
Args:
structure: pymatgen structure object
Returns: debye temperature (in SI units)
"""
volume = structure.volume
tot_mass = sum([e.atomic_mass for e in structure.species])
natoms = structure.composition.num_atoms
avg_mass = 1.6605e-27 * tot_mass / natoms
t = self.homogeneous_poisson
f = (3.*(2.*(2./3.*(1. + t)/(1. - 2.*t))**1.5 +
(1./3.*(1. + t)/(1. - t))**1.5)**-1) ** (1./3.)
return 2.9772e-11 * avg_mass**(-1./2.) * (volume / natoms) ** (-1./6.) \
* f * self.k_vrh ** 0.5
def debye_temperature_from_sound_velocities(self, structure):
"""
Estimates Debye temperature from sound velocities
"""
v0 = (structure.volume * 1e-30 / structure.num_sites)
vl, vt = self.long_v(structure), self.trans_v(structure)
vm = 3**(1./3.) * (1 / vl**3 + 2 / vt**3)**(-1./3.)
td = 1.05457e-34 / 1.38065e-23 * vm * (6 * np.pi**2 / v0) ** (1./3.)
return td
@property
def universal_anisotropy(self):
"""
returns the universal anisotropy value
"""
return 5. * self.g_voigt / self.g_reuss + \
self.k_voigt / self.k_reuss - 6.
@property
def homogeneous_poisson(self):
"""
returns the homogeneous poisson ratio
"""
return (1. - 2. / 3. * self.g_vrh / self.k_vrh) / \
(2. + 2. / 3. * self.g_vrh / self.k_vrh)
def green_kristoffel(self, u):
"""
Returns the Green-Kristoffel tensor for a second-order tensor
"""
return self.einsum_sequence([u, u], "ijkl,i,l")
@property
def property_dict(self):
"""
returns a dictionary of properties derived from the elastic tensor
"""
props = ["k_voigt", "k_reuss", "k_vrh", "g_voigt", "g_reuss", "g_vrh",
"universal_anisotropy", "homogeneous_poisson", "y_mod"]
return {prop: getattr(self, prop) for prop in props}
def get_structure_property_dict(self, structure, include_base_props=True):
"""
returns a dictionary of properties derived from the elastic tensor
and an associated structure
"""
s_props = ["trans_v", "long_v", "snyder_ac", "snyder_opt",
"snyder_total", "clarke_thermalcond", "cahill_thermalcond",
"debye_temperature", "debye_temperature_gibbs"]
sp_dict = {prop: getattr(self, prop)(structure) for prop in s_props}
sp_dict["structure"] = structure
if include_base_props:
sp_dict.update(self.property_dict)
return sp_dict
@classmethod
def from_pseudoinverse(cls, strains, stresses):
"""
Class method to fit an elastic tensor from stress/strain
data. Method uses Moore-Penrose pseudoinverse to invert
the s = C*e equation with elastic tensor, stress, and
strain in voigt notation
Args:
stresses (Nx3x3 array-like): list or array of stresses
strains (Nx3x3 array-like): list or array of strains
"""
# convert the stress/strain to Nx6 arrays of voigt-notation
warnings.warn("Pseudoinverse fitting of Strain/Stress lists may yield "
"questionable results from vasp data, use with caution.")
stresses = np.array([Stress(stress).voigt for stress in stresses])
with warnings.catch_warnings(record=True):
strains = np.array([Strain(strain).voigt for strain in strains])
voigt_fit = np.transpose(np.dot(np.linalg.pinv(strains), stresses))
return cls.from_voigt(voigt_fit)
@classmethod
def from_independent_strains(cls, strains, stresses, eq_stress=None,
vasp=False, tol=1e-10):
"""
Constructs the elastic tensor least-squares fit of independent strains
Args:
strains (list of Strains): list of strain objects to fit
stresses (list of Stresses): list of stress objects to use in fit
corresponding to the list of strains
eq_stress (Stress): equilibrium stress to use in fitting
vasp (boolean): flag for whether the stress tensor should be
converted based on vasp units/convention for stress
tol (float): tolerance for removing near-zero elements of the
resulting tensor
"""
strain_states = [tuple(ss) for ss in np.eye(6)]
ss_dict = get_strain_state_dict(strains, stresses, eq_stress=eq_stress)
if not set(strain_states) <= set(ss_dict.keys()):
raise ValueError("Missing independent strain states: "
"{}".format(set(strain_states) - set(ss_dict)))
if len(set(ss_dict.keys()) - set(strain_states)) > 0:
warnings.warn("Extra strain states in strain-stress pairs "
"are neglected in independent strain fitting")
c_ij = np.zeros((6, 6))
for i in range(6):
istrains = ss_dict[strain_states[i]]["strains"]
istresses = ss_dict[strain_states[i]]["stresses"]
for j in range(6):
c_ij[i, j] = np.polyfit(istrains[:, i], istresses[:, j], 1)[0]
if vasp:
c_ij *= -0.1 # Convert units/sign convention of vasp stress tensor
c = cls.from_voigt(c_ij)
c = c.zeroed(tol)
return c
class ComplianceTensor(Tensor):
"""
This class represents the compliance tensor, and exists
primarily to keep the voigt-conversion scheme consistent
since the compliance tensor has a unique vscale
"""
def __new__(cls, s_array):
vscale = np.ones((6, 6))
vscale[3:] *= 2
vscale[:, 3:] *= 2
obj = super(ComplianceTensor, cls).__new__(cls, s_array, vscale=vscale)
return obj.view(cls)
class ElasticTensorExpansion(TensorCollection):
"""
This class is a sequence of elastic tensors corresponding
to an elastic tensor expansion, which can be used to
calculate stress and energy density and inherits all
of the list-based properties of TensorCollection
(e. g. symmetrization, voigt conversion, etc.)
"""
def __init__(self, c_list):
"""
Initialization method for ElasticTensorExpansion
Args:
c_list (list or tuple): sequence of Tensor inputs
or tensors from which the elastic tensor
expansion is constructed.
"""
c_list = [NthOrderElasticTensor(c, check_rank=4+i*2)
for i, c in enumerate(c_list)]
super(ElasticTensorExpansion, self).__init__(c_list)
@classmethod
def from_diff_fit(cls, strains, stresses, eq_stress=None,
tol=1e-10, order=3):
"""
Generates an elastic tensor expansion via the fitting function
defined below in diff_fit
"""
c_list = diff_fit(strains, stresses, eq_stress, order, tol)
return cls(c_list)
@property
def order(self):
"""
Order of the elastic tensor expansion, i. e. the order of the
highest included set of elastic constants
"""
return self[-1].order
def calculate_stress(self, strain):
"""
Calculate's a given elastic tensor's contribution to the
stress using Einstein summation
"""
return sum([c.calculate_stress(strain) for c in self])
def energy_density(self, strain, convert_GPa_to_eV=True):
"""
Calculates the elastic energy density due to a strain
"""
return sum([c.energy_density(strain, convert_GPa_to_eV)
for c in self])
def get_ggt(self, n, u):
"""
Gets the Generalized Gruneisen tensor for a given
third-order elastic tensor expansion.
Args:
n (3x1 array-like): normal mode direction
u (3x1 array-like): polarization direction
"""
gk = self[0].einsum_sequence([n, u, n, u])
result = -(2*gk*np.outer(u, u) + self[0].einsum_sequence([n, n])
+ self[1].einsum_sequence([n, u, n, u])) / (2*gk)
return result
def get_tgt(self, temperature = None, structure=None, quad=None):
"""
Gets the thermodynamic Gruneisen tensor (TGT) by via an
integration of the GGT weighted by the directional heat
capacity.
See refs:
R. N. Thurston and K. Brugger, Phys. Rev. 113, A1604 (1964).
K. Brugger Phys. Rev. 137, A1826 (1965).
Args:
temperature (float): Temperature in kelvin, if not specified
will return non-cv-normalized value
structure (float): Structure to be used in directional heat
capacity determination, only necessary if temperature
is specified
quad (dict): quadrature for integration, should be
dictionary with "points" and "weights" keys defaults
to quadpy.sphere.Lebedev(19) as read from file
"""
if temperature and not structure:
raise ValueError("If using temperature input, you must also "
"include structure")
if not quad:
quad = loadfn(os.path.join(os.path.dirname(__file__),
"quad_data.json"))
points = quad['points']
weights = quad['weights']
num, denom, c = np.zeros((3, 3)), 0, 1
for p, w in zip(points, weights):
gk = ElasticTensor(self[0]).green_kristoffel(p)
rho_wsquareds, us = np.linalg.eigh(gk)
us = [u / np.linalg.norm(u) for u in np.transpose(us)]
for u in us:
# TODO: this should be benchmarked
if temperature:
c = self.get_heat_capacity(temperature, structure, p, u)
num += c*self.get_ggt(p, u) * w
denom += c * w
return num / denom
def get_gruneisen_parameter(self, temperature=None, structure=None,
quad=None):
"""
Gets the single average gruneisen parameter from the TGT.
Args:
temperature (float): Temperature in kelvin, if not specified
will return non-cv-normalized value
structure (float): Structure to be used in directional heat
capacity determination, only necessary if temperature
is specified
quad (dict): quadrature for integration, should be
dictionary with "points" and "weights" keys defaults
to quadpy.sphere.Lebedev(19) as read from file
"""
return np.trace(self.get_tgt(temperature, structure, quad)) / 3.
def get_heat_capacity(self, temperature, structure, n, u, cutoff=1e2):
"""
Gets the directional heat capacity for a higher order tensor
expansion as a function of direction and polarization.
Args:
temperature (float): Temperature in kelvin
structure (float): Structure to be used in directional heat
capacity determination
n (3x1 array-like): direction for Cv determination
u (3x1 array-like): polarization direction, note that
no attempt for verification of eigenvectors is made
overflow_cutoff (float)
"""
k = 1.38065e-23
kt = k*temperature
hbar_w = 1.05457e-34*self.omega(structure, n, u)
if hbar_w > kt * cutoff:
return 0.0
c = k * (hbar_w / kt) ** 2
c *= np.exp(hbar_w / kt) / (np.exp(hbar_w / kt) - 1)**2
return c * 6.022e23
def omega(self, structure, n, u):
"""
Finds directional frequency contribution to the heat
capacity from direction and polarization
Args:
structure (Structure): Structure to be used in directional heat
capacity determination
n (3x1 array-like): direction for Cv determination
u (3x1 array-like): polarization direction, note that
no attempt for verification of eigenvectors is made
"""
l0 = np.dot(np.sum(structure.lattice.matrix, axis=0), n)
l0 *= 1e-10 # in A
weight = structure.composition.weight * 1.66054e-27 # in kg
vol = structure.volume * 1e-30 # in m^3
vel = (1e9 * self[0].einsum_sequence([n, u, n, u])
/ (weight / vol)) ** 0.5
return vel / l0
def thermal_expansion_coeff(self, structure, temperature, mode="debye"):
"""
Gets thermal expansion coefficient from third-order constants.
Args:
temperature (float): Temperature in kelvin, if not specified
will return non-cv-normalized value
structure (Structure): Structure to be used in directional heat
capacity determination, only necessary if temperature
is specified
mode (string): mode for finding average heat-capacity,
current supported modes are 'debye' and 'dulong-petit'
"""
soec = ElasticTensor(self[0])
v0 = (structure.volume * 1e-30 / structure.num_sites)
if mode == "debye":
vl, vt = soec.long_v(structure), soec.trans_v(structure)
vm = 3**(1./3.) * (1 / vl**3 + 2 / vt**3)**(-1./3.)
td = 1.05457e-34 / 1.38065e-23 * vm * (6 * np.pi**2 / v0) ** (1./3.)
t_ratio = temperature / td
integrand = lambda x: (x**4 * np.exp(x)) / (np.exp(x) - 1)**2
cv = 3 * 8.314 * t_ratio**3 * quad(integrand, 0, t_ratio**-1)[0]
elif mode == "dulong-petit":
cv = 3 * 8.314
else:
raise ValueError("Mode must be debye or dulong-petit")
alpha = self.get_tgt() * cv / (soec.k_vrh * 1e9 * v0 * 6.022e23)
return alpha
def get_compliance_expansion(self):
"""
Gets a compliance tensor expansion from the elastic
tensor expansion.
"""
# TODO: this might have a general form
if not self.order <= 4:
raise ValueError("Compliance tensor expansion only "
"supported for fourth-order and lower")
ce_exp = [ElasticTensor(self[0]).compliance_tensor]
einstring = "ijpq,pqrsuv,rskl,uvmn->ijklmn"
ce_exp.append(np.einsum(einstring, -ce_exp[-1], self[1],
ce_exp[-1], ce_exp[-1]))
if self.order == 4:
# Four terms in the Fourth-Order compliance tensor
einstring_1 = "pqab,cdij,efkl,ghmn,abcdefgh"
tensors_1 = [ce_exp[0]]*4 + [self[-1]]
temp = -np.einsum(einstring_1, *tensors_1)
einstring_2 = "pqab,abcdef,cdijmn,efkl"
einstring_3 = "pqab,abcdef,efklmn,cdij"
einstring_4 = "pqab,abcdef,cdijkl,efmn"
for es in [einstring_2, einstring_3, einstring_4]:
temp -= np.einsum(es, ce_exp[0], self[-2], ce_exp[1], ce_exp[0])
ce_exp.append(temp)
return TensorCollection(ce_exp)
def get_strain_from_stress(self, stress):
"""
Gets the strain from a stress state according
to the compliance expansion corresponding to the
tensor expansion.
"""
compl_exp = self.get_compliance_expansion()
strain = 0
for n, compl in enumerate(compl_exp):
strain += compl.einsum_sequence([stress]*(n+1)) / factorial(n+1)
return strain
def get_effective_ecs(self, strain, order=2):
"""
Returns the effective elastic constants
from the elastic tensor expansion.
Args:
strain (Strain or 3x3 array-like): strain condition
under which to calculate the effective constants
order (int): order of the ecs to be returned
"""
ec_sum = 0
for n, ecs in enumerate(self[order-2:]):
ec_sum += ecs.einsum_sequence([strain] * n) / factorial(n)
return ec_sum
def get_wallace_tensor(self, tau):
"""
Gets the Wallace Tensor for determining yield strength
criteria.
Args:
tau (3x3 array-like): stress at which to evaluate
the wallace tensor
"""
b = 0.5 * (np.einsum("ml,kn->klmn", tau, np.eye(3)) +
np.einsum("km,ln->klmn", tau, np.eye(3)) +
np.einsum("nl,km->klmn", tau, np.eye(3)) +
np.einsum("kn,lm->klmn", tau, np.eye(3)) +
-2*np.einsum("kl,mn->klmn", tau, np.eye(3)))
strain = self.get_strain_from_stress(tau)
b += self.get_effective_ecs(strain)
return b
def get_symmetric_wallace_tensor(self, tau):
"""
Gets the symmetrized wallace tensor for determining
yield strength criteria.
Args:
tau (3x3 array-like): stress at which to evaluate
the wallace tensor.
"""
wallace = self.get_wallace_tensor(tau)
return Tensor(0.5 * (wallace + np.transpose(wallace, [2, 3, 0, 1])))
def get_stability_criteria(self, s, n):
"""
Gets the stability criteria from the symmetric
Wallace tensor from an input vector and stress
value.
Args:
s (float): Stress value at which to evaluate
the stability criteria
n (3x1 array-like): direction of the applied
stress
"""
n = get_uvec(n)
stress = s * np.outer(n, n)
sym_wallace = self.get_symmetric_wallace_tensor(stress)
return np.linalg.det(sym_wallace.voigt)
def get_yield_stress(self, n):
"""
Gets the yield stress for a given direction
Args:
n (3x1 array-like): direction for which to find the
yield stress
"""
# TODO: root finding could be more robust
comp = root(self.get_stability_criteria, -1, args=n)
tens = root(self.get_stability_criteria, 1, args=n)
return (comp.x, tens.x)
#TODO: abstract this for other tensor fitting procedures
def diff_fit(strains, stresses, eq_stress=None, order=2, tol=1e-10):
"""
nth order elastic constant fitting function based on
central-difference derivatives with respect to distinct
strain states. The algorithm is summarized as follows:
1. Identify distinct strain states as sets of indices
for which nonzero strain values exist, typically
[(0), (1), (2), (3), (4), (5), (0, 1) etc.]
2. For each strain state, find and sort strains and
stresses by strain value.
3. Find first, second .. nth derivatives of each stress
with respect to scalar variable corresponding to
the smallest perturbation in the strain.
4. Use the pseudoinverse of a matrix-vector expression
corresponding to the parameterized stress-strain
relationship and multiply that matrix by the respective
calculated first or second derivatives from the
previous step.
5. Place the calculated nth-order elastic
constants appropriately.
Args:
order (int): order of the elastic tensor set to return
strains (nx3x3 array-like): Array of 3x3 strains
to use in fitting of ECs
stresses (nx3x3 array-like): Array of 3x3 stresses
to use in fitting ECs. These should be PK2 stresses.
eq_stress (3x3 array-like): stress corresponding to
equilibrium strain (i. e. "0" strain state).
If not specified, function will try to find
the state in the list of provided stresses
and strains. If not found, defaults to 0.
tol (float): value for which strains below
are ignored in identifying strain states.
Returns:
Set of tensors corresponding to nth order expansion of
the stress/strain relation
"""
strain_state_dict = get_strain_state_dict(
strains, stresses, eq_stress=eq_stress, tol=tol,
add_eq=True, sort=True)
# Collect derivative data
c_list = []
dei_dsi = np.zeros((order - 1, 6, len(strain_state_dict)))
for n, (strain_state, data) in enumerate(strain_state_dict.items()):
hvec = data["strains"][:, strain_state.index(1)]
for i in range(1, order):
coef = get_diff_coeff(hvec, i)
dei_dsi[i-1, :, n] = np.dot(coef, data["stresses"])
m, absent = generate_pseudo(list(strain_state_dict.keys()), order)
for i in range(1, order):
cvec, carr = get_symbol_list(i+1)
svec = np.ravel(dei_dsi[i-1].T)
cmap = dict(zip(cvec, np.dot(m[i-1], svec)))
c_list.append(v_subs(carr, cmap))
return [Tensor.from_voigt(c) for c in c_list]
def find_eq_stress(strains, stresses, tol=1e-10):
"""
Finds stress corresponding to zero strain state in stress-strain list
Args:
strains (Nx3x3 array-like): array corresponding to strains
stresses (Nx3x3 array-like): array corresponding to stresses
tol (float): tolerance to find zero strain state
"""
stress_array = np.array(stresses)
strain_array = np.array(strains)
eq_stress = stress_array[np.all(abs(strain_array)<tol, axis=(1,2))]
if eq_stress.size != 0:
all_same = (abs(eq_stress - eq_stress[0]) < 1e-8).all()
if len(eq_stress) > 1 and not all_same:
raise ValueError("Multiple stresses found for equilibrium strain"
" state, please specify equilibrium stress or "
" remove extraneous stresses.")
eq_stress = eq_stress[0]
else:
warnings.warn("No eq state found, returning zero voigt stress")
eq_stress = Stress(np.zeros((3, 3)))
return eq_stress
def get_strain_state_dict(strains, stresses, eq_stress=None,
tol=1e-10, add_eq=True, sort=True):
"""
Creates a dictionary of voigt-notation stress-strain sets
keyed by "strain state", i. e. a tuple corresponding to
the non-zero entries in ratios to the lowest nonzero value,
e.g. [0, 0.1, 0, 0.2, 0, 0] -> (0,1,0,2,0,0)
This allows strains to be collected in stencils as to
evaluate parameterized finite difference derivatives
Args:
strains (Nx3x3 array-like): strain matrices
stresses (Nx3x3 array-like): stress matrices
eq_stress (Nx3x3 array-like): equilibrium stress
tol (float): tolerance for sorting strain states
add_eq (bool): flag for whether to add eq_strain
to stress-strain sets for each strain state
sort (bool): flag for whether to sort strain states
Returns:
OrderedDict with strain state keys and dictionaries
with stress-strain data corresponding to strain state
"""
# Recast stress/strains
vstrains = np.array([Strain(s).zeroed(tol).voigt for s in strains])
vstresses = np.array([Stress(s).zeroed(tol).voigt for s in stresses])
# Collect independent strain states:
independent = set([tuple(np.nonzero(vstrain)[0].tolist())
for vstrain in vstrains])
strain_state_dict = OrderedDict()
if add_eq:
if eq_stress is not None:
veq_stress = Stress(eq_stress).voigt
else:
veq_stress = find_eq_stress(strains, stresses).voigt
for n, ind in enumerate(independent):
# match strains with templates
template = np.zeros(6, dtype=bool)
np.put(template, ind, True)
template = np.tile(template, [vstresses.shape[0], 1])
mode = (template == (np.abs(vstrains) > 1e-10)).all(axis=1)
mstresses = vstresses[mode]
mstrains = vstrains[mode]
if add_eq:
# add zero strain state
mstrains = np.vstack([mstrains, np.zeros(6)])
mstresses = np.vstack([mstresses, veq_stress])
# sort strains/stresses by strain values
if sort:
mstresses = mstresses[mstrains[:, ind[0]].argsort()]
mstrains = mstrains[mstrains[:, ind[0]].argsort()]
# Get "strain state", i.e. ratio of each value to minimum strain
strain_state = mstrains[-1] / np.min(np.take(mstrains[-1], ind))
strain_state = tuple(strain_state)
strain_state_dict[strain_state] = {"strains": mstrains,
"stresses": mstresses}
return strain_state_dict
def generate_pseudo(strain_states, order=3):
"""
Generates the pseudoinverse for a given set of strains.
Args:
strain_states (6xN array like): a list of voigt-notation
"strain-states", i. e. perturbed indices of the strain
as a function of the smallest strain e. g. (0, 1, 0, 0, 1, 0)
order (int): order of pseudoinverse to calculate
Returns:
mis: pseudo inverses for each order tensor, these can
be multiplied by the central difference derivative
of the stress with respect to the strain state
absent_syms: symbols of the tensor absent from the PI
expression
"""
s = sp.Symbol('s')
nstates = len(strain_states)
ni = np.array(strain_states)*s
mis, absent_syms = [], []
for degree in range(2, order + 1):
cvec, carr = get_symbol_list(degree)
sarr = np.zeros((nstates, 6), dtype=object)
for n, strain_v in enumerate(ni):
# Get expressions
exps = carr.copy()
for i in range(degree - 1):
exps = np.dot(exps, strain_v)
exps /= np.math.factorial(degree - 1)
sarr[n] = [sp.diff(exp, s, degree - 1) for exp in exps]
svec = sarr.ravel()
present_syms = set.union(*[exp.atoms(sp.Symbol) for exp in svec])
absent_syms += [set(cvec) - present_syms]
m = np.zeros((6*nstates, len(cvec)))
for n, c in enumerate(cvec):
m[:, n] = v_diff(svec, c)
mis.append(np.linalg.pinv(m))
return mis, absent_syms
def get_symbol_list(rank, dim=6):
"""
Returns a symbolic representation of the voigt-notation
tensor that places identical symbols for entries related
by index transposition, i. e. C_1121 = C_1211 etc.
Args:
dim (int): dimension of matrix/tensor, e. g. 6 for
voigt notation and 3 for standard
rank (int): rank of tensor, e. g. 3 for third-order ECs
Returns:
c_vec (array): array representing distinct indices
c_arr (array): array representing tensor with equivalent
indices assigned as above
"""
indices = list(
itertools.combinations_with_replacement(range(dim), r=rank))
c_vec = np.zeros(len(indices), dtype=object)
c_arr = np.zeros([dim]*rank, dtype=object)
for n, idx in enumerate(indices):
c_vec[n] = sp.Symbol('c_'+''.join([str(i) for i in idx]))
for perm in itertools.permutations(idx):
c_arr[perm] = c_vec[n]
return c_vec, c_arr
def subs(entry, cmap):
"""
Sympy substitution function, primarily for the purposes
of numpy vectorization
Args:
entry (symbol or exp): sympy expr to undergo subs
cmap (dict): map for symbols to values to use in subs
Returns:
Evaluated expression with substitution
"""
return entry.subs(cmap)
# Vectorized functions
v_subs = np.vectorize(subs)
v_diff = np.vectorize(sp.diff)
def get_diff_coeff(hvec, n=1):
"""
Helper function to find difference coefficients of an
derivative on an arbitrary mesh.
Args:
hvec (1D array-like): sampling stencil
n (int): degree of derivative to find
"""
hvec = np.array(hvec, dtype=np.float)
acc = len(hvec)
exp = np.column_stack([np.arange(acc)]*acc)
a = np.vstack([hvec] * acc) ** exp
b = np.zeros(acc)
b[n] = factorial(n)
return np.linalg.solve(a, b)
|
setten/pymatgen
|
pymatgen/analysis/elasticity/elastic.py
|
Python
|
mit
| 40,064
|
[
"VASP",
"pymatgen"
] |
ba9778fcca8896bdfbc86ca8a7d28ae998ae30c879d7c123a8635ca9ae6ae64c
|
#!/usr/bin/python
#
# Created on Aug 25, 2016
# @author: Gaurav Rastogi (grastogi@avinetworks.com)
# Eric Anderson (eanderson@avinetworks.com)
# module_check: supported
# Avi Version: 16.3.8
#
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'status': ['preview'], 'supported_by': 'community', 'version': '1.0'}
DOCUMENTATION = '''
---
module: avi_networkprofile
author: Gaurav Rastogi (grastogi@avinetworks.com)
short_description: Module for setup of NetworkProfile Avi RESTful Object
description:
- This module is used to configure NetworkProfile object
- more examples at U(https://github.com/avinetworks/devops)
requirements: [ avisdk ]
version_added: "2.3"
options:
state:
description:
- The state that should be applied on the entity.
default: present
choices: ["absent","present"]
description:
description:
- User defined description for the object.
name:
description:
- The name of the network profile.
required: true
profile:
description:
- Networkprofileunion settings for networkprofile.
required: true
tenant_ref:
description:
- It is a reference to an object of type tenant.
url:
description:
- Avi controller URL of the object.
uuid:
description:
- Uuid of the network profile.
extends_documentation_fragment:
- avi
'''
EXAMPLES = '''
- name: Create a network profile for an UDP application
avi_networkprofile:
controller: ''
username: ''
password: ''
name: System-UDP-Fast-Path
profile:
type: PROTOCOL_TYPE_UDP_FAST_PATH
udp_fast_path_profile:
per_pkt_loadbalance: false
session_idle_timeout: 10
snat: true
tenant_ref: admin
'''
RETURN = '''
obj:
description: NetworkProfile (api/networkprofile) object
returned: success, changed
type: dict
'''
from pkg_resources import parse_version
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.avi import avi_common_argument_spec
HAS_AVI = True
try:
import avi.sdk
sdk_version = getattr(avi.sdk, '__version__', None)
if ((sdk_version is None) or (sdk_version and
(parse_version(sdk_version) < parse_version('16.3.5.post1')))):
# It allows the __version__ to be '' as that value is used in development builds
raise ImportError
from avi.sdk.utils.ansible_utils import avi_ansible_api
except ImportError:
HAS_AVI = False
def main():
argument_specs = dict(
state=dict(default='present',
choices=['absent', 'present']),
description=dict(type='str',),
name=dict(type='str', required=True),
profile=dict(type='dict', required=True),
tenant_ref=dict(type='str',),
url=dict(type='str',),
uuid=dict(type='str',),
)
argument_specs.update(avi_common_argument_spec())
module = AnsibleModule(
argument_spec=argument_specs, supports_check_mode=True)
if not HAS_AVI:
return module.fail_json(msg=(
'Avi python API SDK (avisdk>=16.3.5.post1) is not installed. '
'For more details visit https://github.com/avinetworks/sdk.'))
return avi_ansible_api(module, 'networkprofile',
set([]))
if __name__ == '__main__':
main()
|
0x46616c6b/ansible
|
lib/ansible/modules/network/avi/avi_networkprofile.py
|
Python
|
gpl-3.0
| 4,061
|
[
"VisIt"
] |
c37c51b1bdf06aab281ef47d2c908dd1ac288cc98944f167db3475dbb7d3c3e7
|
"""
Global average annual temperature plot
======================================
Produces a time-series plot of North American temperature forecasts for 2 different emission scenarios.
Constraining data to a limited spatial area also features in this example.
The data used comes from the HadGEM2-AO model simulations for the A1B and E1 scenarios, both of which
were derived using the IMAGE Integrated Assessment Model (Johns et al. 2010; Lowe et al. 2009).
References
----------
Johns T.C., et al. (2010) Climate change under aggressive mitigation: The ENSEMBLES multi-model
experiment. Climate Dynamics (submitted)
Lowe J.A., C.D. Hewitt, D.P. Van Vuuren, T.C. Johns, E. Stehfest, J-F. Royer, and P. van der Linden, 2009.
New Study For Climate Modeling, Analyses, and Scenarios. Eos Trans. AGU, Vol 90, No. 21.
.. seealso::
Further details on the aggregation functionality being used in this example can be found in
:ref:`cube-statistics`.
"""
import numpy as np
import matplotlib.pyplot as plt
import iris
import iris.plot as iplt
import iris.quickplot as qplt
import iris.analysis.cartography
import matplotlib.dates as mdates
def main():
# Load data into three Cubes, one for each set of NetCDF files.
e1 = iris.load_cube(iris.sample_data_path('E1_north_america.nc'))
a1b = iris.load_cube(iris.sample_data_path('A1B_north_america.nc'))
# load in the global pre-industrial mean temperature, and limit the domain
# to the same North American region that e1 and a1b are at.
north_america = iris.Constraint(longitude=lambda v: 225 <= v <= 315,
latitude=lambda v: 15 <= v <= 60)
pre_industrial = iris.load_cube(iris.sample_data_path('pre-industrial.pp'),
north_america)
# Generate area-weights array. As e1 and a1b are on the same grid we can
# do this just once and re-use. This method requires bounds on lat/lon
# coords, so let's add some in sensible locations using the "guess_bounds"
# method.
e1.coord('latitude').guess_bounds()
e1.coord('longitude').guess_bounds()
e1_grid_areas = iris.analysis.cartography.area_weights(e1)
pre_industrial.coord('latitude').guess_bounds()
pre_industrial.coord('longitude').guess_bounds()
pre_grid_areas = iris.analysis.cartography.area_weights(pre_industrial)
# Perform the area-weighted mean for each of the datasets using the
# computed grid-box areas.
pre_industrial_mean = pre_industrial.collapsed(['latitude', 'longitude'],
iris.analysis.MEAN,
weights=pre_grid_areas)
e1_mean = e1.collapsed(['latitude', 'longitude'],
iris.analysis.MEAN,
weights=e1_grid_areas)
a1b_mean = a1b.collapsed(['latitude', 'longitude'],
iris.analysis.MEAN,
weights=e1_grid_areas)
# Show ticks 30 years apart
plt.gca().xaxis.set_major_locator(mdates.YearLocator(30))
# Plot the datasets
qplt.plot(e1_mean, label='E1 scenario', lw=1.5, color='blue')
qplt.plot(a1b_mean, label='A1B-Image scenario', lw=1.5, color='red')
# Draw a horizontal line showing the pre-industrial mean
plt.axhline(y=pre_industrial_mean.data, color='gray', linestyle='dashed',
label='pre-industrial', lw=1.5)
# Establish where r and t have the same data, i.e. the observations
common = np.where(a1b_mean.data == e1_mean.data)[0]
observed = a1b_mean[common]
# Plot the observed data
qplt.plot(observed, label='observed', color='black', lw=1.5)
# Add a legend and title
plt.legend(loc="upper left")
plt.title('North American mean air temperature', fontsize=18)
plt.xlabel('Time / year')
plt.grid()
iplt.show()
if __name__ == '__main__':
main()
|
scollis/iris
|
docs/iris/example_code/graphics/COP_1d_plot.py
|
Python
|
gpl-3.0
| 3,947
|
[
"NetCDF"
] |
197ae2f98a53977fcba2f8838cfd0fdd8155d6d0dc856037cce6e7cf41f25c90
|
# -*- coding: utf-8 -*-
"""
End-to-end tests for Student's Profile Page.
"""
from contextlib import contextmanager
from datetime import datetime
from nose.plugins.attrib import attr
from common.test.acceptance.pages.common.auto_auth import AutoAuthPage
from common.test.acceptance.pages.common.logout import LogoutPage
from common.test.acceptance.pages.lms.account_settings import AccountSettingsPage
from common.test.acceptance.pages.lms.dashboard import DashboardPage
from common.test.acceptance.pages.lms.learner_profile import LearnerProfilePage
from common.test.acceptance.tests.helpers import AcceptanceTest, EventsTestMixin
class LearnerProfileTestMixin(EventsTestMixin):
"""
Mixin with helper methods for testing learner profile pages.
"""
PRIVACY_PUBLIC = u'all_users'
PRIVACY_PRIVATE = u'private'
PUBLIC_PROFILE_FIELDS = ['username', 'country', 'language_proficiencies', 'bio']
PRIVATE_PROFILE_FIELDS = ['username']
PUBLIC_PROFILE_EDITABLE_FIELDS = ['country', 'language_proficiencies', 'bio']
USER_SETTINGS_CHANGED_EVENT_NAME = u"edx.user.settings.changed"
def log_in_as_unique_user(self):
"""
Create a unique user and return the account's username and id.
"""
username = "test_{uuid}".format(uuid=self.unique_id[0:6])
auto_auth_page = AutoAuthPage(self.browser, username=username).visit()
user_id = auto_auth_page.get_user_id()
return username, user_id
def set_public_profile_fields_data(self, profile_page):
"""
Fill in the public profile fields of a user.
"""
profile_page.value_for_dropdown_field('language_proficiencies', 'English', focus_out=True)
profile_page.value_for_dropdown_field('country', 'United Arab Emirates', focus_out=True)
profile_page.set_value_for_textarea_field('bio', 'Nothing Special')
# Waits here for text to appear/save on bio field
profile_page.wait_for_ajax()
def visit_profile_page(self, username, privacy=None):
"""
Visit a user's profile page and if a privacy is specified and
is different from the displayed value, then set the privacy to that value.
"""
profile_page = LearnerProfilePage(self.browser, username)
# Change the privacy if requested by loading the page and
# changing the drop down
if privacy is not None:
profile_page.visit()
# Change the privacy setting if it is not the desired one already
profile_page.privacy = privacy
# Verify the current setting is as expected
if privacy == self.PRIVACY_PUBLIC:
self.assertEqual(profile_page.privacy, 'all_users')
else:
self.assertEqual(profile_page.privacy, 'private')
if privacy == self.PRIVACY_PUBLIC:
self.set_public_profile_fields_data(profile_page)
# Reset event tracking so that the tests only see events from
# loading the profile page.
self.start_time = datetime.now() # pylint: disable=attribute-defined-outside-init
# Load the page
profile_page.visit()
return profile_page
def set_birth_year(self, birth_year):
"""
Set birth year for the current user to the specified value.
"""
account_settings_page = AccountSettingsPage(self.browser)
account_settings_page.visit()
account_settings_page.wait_for_page()
self.assertEqual(
account_settings_page.value_for_dropdown_field('year_of_birth', str(birth_year), focus_out=True),
str(birth_year)
)
def verify_profile_page_is_public(self, profile_page, is_editable=True):
"""
Verify that the profile page is currently public.
"""
self.assertEqual(profile_page.visible_fields, self.PUBLIC_PROFILE_FIELDS)
if is_editable:
self.assertTrue(profile_page.privacy_field_visible)
self.assertEqual(profile_page.editable_fields, self.PUBLIC_PROFILE_EDITABLE_FIELDS)
else:
self.assertEqual(profile_page.editable_fields, [])
def verify_profile_page_is_private(self, profile_page, is_editable=True):
"""
Verify that the profile page is currently private.
"""
if is_editable:
self.assertTrue(profile_page.privacy_field_visible)
self.assertEqual(profile_page.visible_fields, self.PRIVATE_PROFILE_FIELDS)
def verify_profile_page_view_event(self, requesting_username, profile_user_id, visibility=None):
"""
Verifies that the correct view event was captured for the profile page.
"""
actual_events = self.wait_for_events(
start_time=self.start_time,
event_filter={'event_type': 'edx.user.settings.viewed', 'username': requesting_username},
number_of_matches=1)
self.assert_events_match(
[
{
'username': requesting_username,
'event': {
'user_id': int(profile_user_id),
'page': 'profile',
'visibility': unicode(visibility)
}
}
],
actual_events
)
@contextmanager
def verify_pref_change_event_during(self, username, user_id, setting, **kwargs):
"""Assert that a single setting changed event is emitted for the user_api_userpreference table."""
expected_event = {
'username': username,
'event': {
'setting': setting,
'user_id': int(user_id),
'table': 'user_api_userpreference',
'truncated': []
}
}
expected_event['event'].update(kwargs)
event_filter = {
'event_type': self.USER_SETTINGS_CHANGED_EVENT_NAME,
'username': username,
}
with self.assert_events_match_during(event_filter=event_filter, expected_events=[expected_event]):
yield
def initialize_different_user(self, privacy=None, birth_year=None):
"""
Initialize the profile page for a different test user
"""
username, user_id = self.log_in_as_unique_user()
# Set the privacy for the new user
if privacy is None:
privacy = self.PRIVACY_PUBLIC
self.visit_profile_page(username, privacy=privacy)
# Set the user's year of birth
if birth_year:
self.set_birth_year(birth_year)
# Log the user out
LogoutPage(self.browser).visit()
return username, user_id
@attr(shard=4)
class OwnLearnerProfilePageTest(LearnerProfileTestMixin, AcceptanceTest):
"""
Tests that verify a student's own profile page.
"""
def verify_profile_forced_private_message(self, username, birth_year, message=None):
"""
Verify age limit messages for a user.
"""
if birth_year is None:
birth_year = ""
self.set_birth_year(birth_year=birth_year)
profile_page = self.visit_profile_page(username)
self.assertTrue(profile_page.privacy_field_visible)
if message:
self.assertTrue(profile_page.age_limit_message_present)
else:
self.assertFalse(profile_page.age_limit_message_present)
self.assertIn(message, profile_page.profile_forced_private_message)
def test_profile_defaults_to_public(self):
"""
Scenario: Verify that a new user's profile defaults to public.
Given that I am a new user.
When I go to my profile page.
Then I see that the profile visibility is set to public.
"""
username, __ = self.log_in_as_unique_user()
profile_page = self.visit_profile_page(username)
self.verify_profile_page_is_public(profile_page)
def assert_default_image_has_public_access(self, profile_page):
"""
Assert that profile image has public access.
"""
self.assertTrue(profile_page.profile_has_default_image)
self.assertTrue(profile_page.profile_has_image_with_public_access())
def test_make_profile_public(self):
"""
Scenario: Verify that the user can change their privacy.
Given that I am a registered user
And I visit my private profile page
And I set the profile visibility to public
Then a user preference changed event should be recorded
When I reload the page
Then the profile visibility should be shown as public
"""
username, user_id = self.log_in_as_unique_user()
profile_page = self.visit_profile_page(username, privacy=self.PRIVACY_PRIVATE)
with self.verify_pref_change_event_during(
username, user_id, 'account_privacy', old=self.PRIVACY_PRIVATE, new=self.PRIVACY_PUBLIC
):
profile_page.privacy = self.PRIVACY_PUBLIC
# Reload the page and verify that the profile is now public
self.browser.refresh()
profile_page.wait_for_page()
self.verify_profile_page_is_public(profile_page)
def test_make_profile_private(self):
"""
Scenario: Verify that the user can change their privacy.
Given that I am a registered user
And I visit my public profile page
And I set the profile visibility to private
Then a user preference changed event should be recorded
When I reload the page
Then the profile visibility should be shown as private
"""
username, user_id = self.log_in_as_unique_user()
profile_page = self.visit_profile_page(username, privacy=self.PRIVACY_PUBLIC)
with self.verify_pref_change_event_during(
username, user_id, 'account_privacy', old=None, new=self.PRIVACY_PRIVATE
):
profile_page.privacy = self.PRIVACY_PRIVATE
# Reload the page and verify that the profile is now private
self.browser.refresh()
profile_page.wait_for_page()
self.verify_profile_page_is_private(profile_page)
def test_dashboard_learner_profile_link(self):
"""
Scenario: Verify that my profile link is present on dashboard page and we can navigate to correct page.
Given that I am a registered user.
When I go to Dashboard page.
And I click on username dropdown.
Then I see Profile link in the dropdown menu.
When I click on Profile link.
Then I will be navigated to Profile page.
"""
username, __ = self.log_in_as_unique_user()
dashboard_page = DashboardPage(self.browser)
dashboard_page.visit()
dashboard_page.click_username_dropdown()
self.assertIn('Profile', dashboard_page.username_dropdown_link_text)
dashboard_page.click_my_profile_link()
my_profile_page = LearnerProfilePage(self.browser, username)
my_profile_page.wait_for_page()
def test_fields_on_my_private_profile(self):
"""
Scenario: Verify that desired fields are shown when looking at her own private profile.
Given that I am a registered user.
And I visit my Profile page.
And I set the profile visibility to private.
And I reload the page.
Then I should see the profile visibility selector dropdown.
Then I see some of the profile fields are shown.
"""
username, user_id = self.log_in_as_unique_user()
profile_page = self.visit_profile_page(username, privacy=self.PRIVACY_PRIVATE)
self.verify_profile_page_is_private(profile_page)
self.verify_profile_page_view_event(username, user_id, visibility=self.PRIVACY_PRIVATE)
def test_fields_on_my_public_profile(self):
"""
Scenario: Verify that desired fields are shown when looking at her own public profile.
Given that I am a registered user.
And I visit my Profile page.
And I set the profile visibility to public.
And I reload the page.
Then I should see the profile visibility selector dropdown.
Then I see all the profile fields are shown.
And `location`, `language` and `about me` fields are editable.
"""
username, user_id = self.log_in_as_unique_user()
profile_page = self.visit_profile_page(username, privacy=self.PRIVACY_PUBLIC)
self.verify_profile_page_is_public(profile_page)
self.verify_profile_page_view_event(username, user_id, visibility=self.PRIVACY_PUBLIC)
def _test_dropdown_field(self, profile_page, field_id, new_value, displayed_value, mode):
"""
Test behaviour of a dropdown field.
"""
profile_page.value_for_dropdown_field(field_id, new_value, focus_out=True)
self.assertEqual(profile_page.get_non_editable_mode_value(field_id), displayed_value)
self.assertTrue(profile_page.mode_for_field(field_id), mode)
self.browser.refresh()
profile_page.wait_for_page()
self.assertEqual(profile_page.get_non_editable_mode_value(field_id), displayed_value)
self.assertTrue(profile_page.mode_for_field(field_id), mode)
def _test_textarea_field(self, profile_page, field_id, new_value, displayed_value, mode):
"""
Test behaviour of a textarea field.
"""
profile_page.set_value_for_textarea_field(field_id, new_value)
self.assertEqual(profile_page.get_non_editable_mode_value(field_id), displayed_value)
self.assertTrue(profile_page.mode_for_field(field_id), mode)
self.browser.refresh()
profile_page.wait_for_page()
self.assertEqual(profile_page.get_non_editable_mode_value(field_id), displayed_value)
self.assertTrue(profile_page.mode_for_field(field_id), mode)
def test_country_field(self):
"""
Test behaviour of `Country` field.
Given that I am a registered user.
And I visit my Profile page.
And I set the profile visibility to public and set default values for public fields.
Then I set country value to `Pakistan`.
Then displayed country should be `Pakistan` and country field mode should be `display`
And I reload the page.
Then displayed country should be `Pakistan` and country field mode should be `display`
And I make `country` field editable
Then `country` field mode should be `edit`
And `country` field icon should be visible.
"""
username, __ = self.log_in_as_unique_user()
profile_page = self.visit_profile_page(username, privacy=self.PRIVACY_PUBLIC)
self._test_dropdown_field(profile_page, 'country', 'Pakistan', 'Pakistan', 'display')
profile_page.make_field_editable('country')
self.assertEqual(profile_page.mode_for_field('country'), 'edit')
def test_language_field(self):
"""
Test behaviour of `Language` field.
Given that I am a registered user.
And I visit my Profile page.
And I set the profile visibility to public and set default values for public fields.
Then I set language value to `Urdu`.
Then displayed language should be `Urdu` and language field mode should be `display`
And I reload the page.
Then displayed language should be `Urdu` and language field mode should be `display`
Then I set empty value for language.
Then displayed language should be `Add language` and language field mode should be `placeholder`
And I reload the page.
Then displayed language should be `Add language` and language field mode should be `placeholder`
And I make `language` field editable
Then `language` field mode should be `edit`
And `language` field icon should be visible.
"""
username, __ = self.log_in_as_unique_user()
profile_page = self.visit_profile_page(username, privacy=self.PRIVACY_PUBLIC)
self._test_dropdown_field(profile_page, 'language_proficiencies', 'Urdu', 'Urdu', 'display')
self._test_dropdown_field(profile_page, 'language_proficiencies', '', 'Add language', 'placeholder')
profile_page.make_field_editable('language_proficiencies')
self.assertTrue(profile_page.mode_for_field('language_proficiencies'), 'edit')
def test_about_me_field(self):
"""
Test behaviour of `About Me` field.
Given that I am a registered user.
And I visit my Profile page.
And I set the profile visibility to public and set default values for public fields.
Then I set about me value to `ThisIsIt`.
Then displayed about me should be `ThisIsIt` and about me field mode should be `display`
And I reload the page.
Then displayed about me should be `ThisIsIt` and about me field mode should be `display`
Then I set empty value for about me.
Then displayed about me should be `Tell other edX learners a little about yourself: where you live,
what your interests are, why you're taking courses on edX, or what you hope to learn.` and about me
field mode should be `placeholder`
And I reload the page.
Then displayed about me should be `Tell other edX learners a little about yourself: where you live,
what your interests are, why you're taking courses on edX, or what you hope to learn.` and about me
field mode should be `placeholder`
And I make `about me` field editable
Then `about me` field mode should be `edit`
"""
placeholder_value = (
"Tell other learners a little about yourself: where you live, what your interests are, "
"why you're taking courses, or what you hope to learn."
)
username, __ = self.log_in_as_unique_user()
profile_page = self.visit_profile_page(username, privacy=self.PRIVACY_PUBLIC)
self._test_textarea_field(profile_page, 'bio', 'ThisIsIt', 'ThisIsIt', 'display')
self._test_textarea_field(profile_page, 'bio', '', placeholder_value, 'placeholder')
profile_page.make_field_editable('bio')
self.assertTrue(profile_page.mode_for_field('bio'), 'edit')
def test_birth_year_not_set(self):
"""
Verify message if birth year is not set.
Given that I am a registered user.
And birth year is not set for the user.
And I visit my profile page.
Then I should see a message that the profile is private until the year of birth is set.
"""
username, user_id = self.log_in_as_unique_user()
message = "You must specify your birth year before you can share your full profile."
self.verify_profile_forced_private_message(username, birth_year=None, message=message)
self.verify_profile_page_view_event(username, user_id, visibility=self.PRIVACY_PRIVATE)
def test_user_is_under_age(self):
"""
Verify message if user is under age.
Given that I am a registered user.
And birth year is set so that age is less than 13.
And I visit my profile page.
Then I should see a message that the profile is private as I am under thirteen.
"""
username, user_id = self.log_in_as_unique_user()
under_age_birth_year = datetime.now().year - 10
self.verify_profile_forced_private_message(
username,
birth_year=under_age_birth_year,
message='You must be over 13 to share a full profile.'
)
self.verify_profile_page_view_event(username, user_id, visibility=self.PRIVACY_PRIVATE)
def test_user_can_only_see_default_image_for_private_profile(self):
"""
Scenario: Default profile image behaves correctly for under age user.
Given that I am on my profile page with private access
And I can see default image
When I move my cursor to the image
Then i cannot see the upload/remove image text
And i cannot upload/remove the image.
"""
year_of_birth = datetime.now().year - 5
username, __ = self.log_in_as_unique_user()
profile_page = self.visit_profile_page(username, privacy=self.PRIVACY_PRIVATE)
self.verify_profile_forced_private_message(
username,
year_of_birth,
message='You must be over 13 to share a full profile.'
)
self.assertTrue(profile_page.profile_has_default_image)
self.assertFalse(profile_page.profile_has_image_with_private_access())
def test_user_can_see_default_image_for_public_profile(self):
"""
Scenario: Default profile image behaves correctly for public profile.
Given that I am on my profile page with public access
And I can see default image
When I move my cursor to the image
Then i can see the upload/remove image text
And i am able to upload new image
"""
username, __ = self.log_in_as_unique_user()
profile_page = self.visit_profile_page(username, privacy=self.PRIVACY_PUBLIC)
self.assert_default_image_has_public_access(profile_page)
def test_user_can_upload_the_profile_image_with_success(self):
"""
Scenario: Upload profile image works correctly.
Given that I am on my profile page with public access
And I can see default image
When I move my cursor to the image
Then i can see the upload/remove image text
When i upload new image via file uploader
Then i can see the changed image
And i can also see the latest image after reload.
"""
username, user_id = self.log_in_as_unique_user()
profile_page = self.visit_profile_page(username, privacy=self.PRIVACY_PUBLIC)
self.assert_default_image_has_public_access(profile_page)
with self.verify_pref_change_event_during(
username, user_id, 'profile_image_uploaded_at', table='auth_userprofile'
):
profile_page.upload_file(filename='image.jpg')
self.assertTrue(profile_page.image_upload_success)
profile_page.visit()
self.assertTrue(profile_page.image_upload_success)
def test_user_can_see_error_for_exceeding_max_file_size_limit(self):
"""
Scenario: Upload profile image does not work for > 1MB image file.
Given that I am on my profile page with public access
And I can see default image
When I move my cursor to the image
Then i can see the upload/remove image text
When i upload new > 1MB image via file uploader
Then i can see the error message for file size limit
And i can still see the default image after page reload.
"""
username, user_id = self.log_in_as_unique_user()
profile_page = self.visit_profile_page(username, privacy=self.PRIVACY_PUBLIC)
self.assert_default_image_has_public_access(profile_page)
profile_page.upload_file(filename='larger_image.jpg')
self.assertEqual(profile_page.profile_image_message, "The file must be smaller than 1 MB in size.")
profile_page.visit()
self.assertTrue(profile_page.profile_has_default_image)
self.assert_no_matching_events_were_emitted({
'event_type': self.USER_SETTINGS_CHANGED_EVENT_NAME,
'event': {
'setting': 'profile_image_uploaded_at',
'user_id': int(user_id),
}
})
def test_user_can_see_error_for_file_size_below_the_min_limit(self):
"""
Scenario: Upload profile image does not work for < 100 Bytes image file.
Given that I am on my profile page with public access
And I can see default image
When I move my cursor to the image
Then i can see the upload/remove image text
When i upload new < 100 Bytes image via file uploader
Then i can see the error message for minimum file size limit
And i can still see the default image after page reload.
"""
username, user_id = self.log_in_as_unique_user()
profile_page = self.visit_profile_page(username, privacy=self.PRIVACY_PUBLIC)
self.assert_default_image_has_public_access(profile_page)
profile_page.upload_file(filename='list-icon-visited.png')
self.assertEqual(profile_page.profile_image_message, "The file must be at least 100 bytes in size.")
profile_page.visit()
self.assertTrue(profile_page.profile_has_default_image)
self.assert_no_matching_events_were_emitted({
'event_type': self.USER_SETTINGS_CHANGED_EVENT_NAME,
'event': {
'setting': 'profile_image_uploaded_at',
'user_id': int(user_id),
}
})
def test_user_can_see_error_for_wrong_file_type(self):
"""
Scenario: Upload profile image does not work for wrong file types.
Given that I am on my profile page with public access
And I can see default image
When I move my cursor to the image
Then i can see the upload/remove image text
When i upload new csv file via file uploader
Then i can see the error message for wrong/unsupported file type
And i can still see the default image after page reload.
"""
username, user_id = self.log_in_as_unique_user()
profile_page = self.visit_profile_page(username, privacy=self.PRIVACY_PUBLIC)
self.assert_default_image_has_public_access(profile_page)
profile_page.upload_file(filename='generic_csv.csv')
self.assertEqual(
profile_page.profile_image_message,
"The file must be one of the following types: .gif, .png, .jpeg, .jpg."
)
profile_page.visit()
self.assertTrue(profile_page.profile_has_default_image)
self.assert_no_matching_events_were_emitted({
'event_type': self.USER_SETTINGS_CHANGED_EVENT_NAME,
'event': {
'setting': 'profile_image_uploaded_at',
'user_id': int(user_id),
}
})
def test_user_can_remove_profile_image(self):
"""
Scenario: Remove profile image works correctly.
Given that I am on my profile page with public access
And I can see default image
When I move my cursor to the image
Then i can see the upload/remove image text
When i click on the remove image link
Then i can see the default image
And i can still see the default image after page reload.
"""
username, user_id = self.log_in_as_unique_user()
profile_page = self.visit_profile_page(username, privacy=self.PRIVACY_PUBLIC)
self.assert_default_image_has_public_access(profile_page)
with self.verify_pref_change_event_during(
username, user_id, 'profile_image_uploaded_at', table='auth_userprofile'
):
profile_page.upload_file(filename='image.jpg')
self.assertTrue(profile_page.image_upload_success)
with self.verify_pref_change_event_during(
username, user_id, 'profile_image_uploaded_at', table='auth_userprofile'
):
self.assertTrue(profile_page.remove_profile_image())
self.assertTrue(profile_page.profile_has_default_image)
profile_page.visit()
self.assertTrue(profile_page.profile_has_default_image)
def test_user_cannot_remove_default_image(self):
"""
Scenario: Remove profile image does not works for default images.
Given that I am on my profile page with public access
And I can see default image
When I move my cursor to the image
Then i can see only the upload image text
And i cannot see the remove image text
"""
username, __ = self.log_in_as_unique_user()
profile_page = self.visit_profile_page(username, privacy=self.PRIVACY_PUBLIC)
self.assert_default_image_has_public_access(profile_page)
self.assertFalse(profile_page.remove_link_present)
def test_eventing_after_multiple_uploads(self):
"""
Scenario: An event is fired when a user with a profile image uploads another image
Given that I am on my profile page with public access
And I upload a new image via file uploader
When I upload another image via the file uploader
Then two upload events have been emitted
"""
username, user_id = self.log_in_as_unique_user()
profile_page = self.visit_profile_page(username, privacy=self.PRIVACY_PUBLIC)
self.assert_default_image_has_public_access(profile_page)
with self.verify_pref_change_event_during(
username, user_id, 'profile_image_uploaded_at', table='auth_userprofile'
):
profile_page.upload_file(filename='image.jpg')
self.assertTrue(profile_page.image_upload_success)
with self.verify_pref_change_event_during(
username, user_id, 'profile_image_uploaded_at', table='auth_userprofile'
):
profile_page.upload_file(filename='image.jpg', wait_for_upload_button=False)
@attr(shard=4)
class DifferentUserLearnerProfilePageTest(LearnerProfileTestMixin, AcceptanceTest):
"""
Tests that verify viewing the profile page of a different user.
"""
def test_different_user_private_profile(self):
"""
Scenario: Verify that desired fields are shown when looking at a different user's private profile.
Given that I am a registered user.
And I visit a different user's private profile page.
Then I shouldn't see the profile visibility selector dropdown.
Then I see some of the profile fields are shown.
"""
different_username, different_user_id = self.initialize_different_user(privacy=self.PRIVACY_PRIVATE)
username, __ = self.log_in_as_unique_user()
profile_page = self.visit_profile_page(different_username)
self.verify_profile_page_is_private(profile_page, is_editable=False)
self.verify_profile_page_view_event(username, different_user_id, visibility=self.PRIVACY_PRIVATE)
def test_different_user_under_age(self):
"""
Scenario: Verify that an under age user's profile is private to others.
Given that I am a registered user.
And I visit an under age user's profile page.
Then I shouldn't see the profile visibility selector dropdown.
Then I see that only the private fields are shown.
"""
under_age_birth_year = datetime.now().year - 10
different_username, different_user_id = self.initialize_different_user(
privacy=self.PRIVACY_PUBLIC,
birth_year=under_age_birth_year
)
username, __ = self.log_in_as_unique_user()
profile_page = self.visit_profile_page(different_username)
self.verify_profile_page_is_private(profile_page, is_editable=False)
self.verify_profile_page_view_event(username, different_user_id, visibility=self.PRIVACY_PRIVATE)
def test_different_user_public_profile(self):
"""
Scenario: Verify that desired fields are shown when looking at a different user's public profile.
Given that I am a registered user.
And I visit a different user's public profile page.
Then I shouldn't see the profile visibility selector dropdown.
Then all the profile fields are shown.
Then I shouldn't see the profile visibility selector dropdown.
Also `location`, `language` and `about me` fields are not editable.
"""
different_username, different_user_id = self.initialize_different_user(privacy=self.PRIVACY_PUBLIC)
username, __ = self.log_in_as_unique_user()
profile_page = self.visit_profile_page(different_username)
profile_page.wait_for_public_fields()
self.verify_profile_page_is_public(profile_page, is_editable=False)
self.verify_profile_page_view_event(username, different_user_id, visibility=self.PRIVACY_PUBLIC)
def test_badge_share_modal(self):
username = 'testcert'
AutoAuthPage(self.browser, username=username).visit()
profile_page = self.visit_profile_page(username)
profile_page.display_accomplishments()
badge = profile_page.badges[0]
badge.display_modal()
badge.close_modal()
@attr('a11y')
class LearnerProfileA11yTest(LearnerProfileTestMixin, AcceptanceTest):
"""
Class to test learner profile accessibility.
"""
def test_editable_learner_profile_a11y(self):
"""
Test the accessibility of the editable version of the profile page
(user viewing her own public profile).
"""
username, _ = self.log_in_as_unique_user()
profile_page = self.visit_profile_page(username)
profile_page.a11y_audit.check_for_accessibility_errors()
profile_page.make_field_editable('language_proficiencies')
profile_page.a11y_audit.check_for_accessibility_errors()
profile_page.make_field_editable('bio')
profile_page.a11y_audit.check_for_accessibility_errors()
def test_read_only_learner_profile_a11y(self):
"""
Test the accessibility of the read-only version of a public profile page
(user viewing someone else's profile page).
"""
# initialize_different_user should cause country, language, and bio to be filled out (since
# privacy is public). It doesn't appear that this is happening, although the method
# works in regular bokchoy tests. Perhaps a problem with phantomjs? So this test is currently
# only looking at a read-only profile page with a username.
different_username, _ = self.initialize_different_user(privacy=self.PRIVACY_PUBLIC)
self.log_in_as_unique_user()
profile_page = self.visit_profile_page(different_username)
profile_page.a11y_audit.check_for_accessibility_errors()
def test_badges_accessibility(self):
"""
Test the accessibility of the badge listings and sharing modal.
"""
username = 'testcert'
AutoAuthPage(self.browser, username=username).visit()
profile_page = self.visit_profile_page(username)
profile_page.display_accomplishments()
profile_page.a11y_audit.check_for_accessibility_errors()
profile_page.badges[0].display_modal()
profile_page.a11y_audit.check_for_accessibility_errors()
|
Lektorium-LLC/edx-platform
|
common/test/acceptance/tests/lms/test_learner_profile.py
|
Python
|
agpl-3.0
| 34,750
|
[
"VisIt"
] |
2ae0a479e973f600acf08714722b577d1087ebedf98083a8e7d2a469ce9f1250
|
def agts(queue):
queue.add('C5H12.agts.py',
walltime=25,
ncpus=8,
creates=['C5H12-gpaw.csv'])
if __name__ == "__main__":
from ase.optimize.test.C5H12 import *
|
qsnake/gpaw
|
doc/devel/ase_optimize/C5H12.agts.py
|
Python
|
gpl-3.0
| 210
|
[
"ASE",
"GPAW"
] |
4911238f104b5b0e693fc6c3fce22ff1e911c09d0bffd5eaa918184bd137c0ff
|
""" DIRAC Transformation DB
Transformation database is used to collect and serve the necessary information
in order to automate the task of job preparation for high level transformations.
This class is typically used as a base class for more specific data processing
databases
"""
import re
import time
import threading
import json
from DIRAC import gLogger, S_OK, S_ERROR
from DIRAC.Core.Base.DB import DB
from DIRAC.Resources.Catalog.FileCatalog import FileCatalog
from DIRAC.Core.Security.ProxyInfo import getProxyInfo
from DIRAC.Core.Utilities.List import stringListToString, intListToString, breakListIntoChunks
from DIRAC.Core.Utilities.Shifter import setupShifterProxyInEnv
from DIRAC.ConfigurationSystem.Client.Helpers.Operations import Operations
from DIRAC.Core.Utilities.Subprocess import pythonCall
from DIRAC.DataManagementSystem.Client.MetaQuery import MetaQuery
__RCSID__ = "$Id$"
MAX_ERROR_COUNT = 10
#############################################################################
class TransformationDB(DB):
""" TransformationDB class
"""
def __init__(self, dbname=None, dbconfig=None, dbIn=None):
""" The standard constructor takes the database name (dbname) and the name of the
configuration section (dbconfig)
"""
if not dbname:
dbname = 'TransformationDB'
if not dbconfig:
dbconfig = 'Transformation/TransformationDB'
if not dbIn:
DB.__init__(self, dbname, dbconfig)
self.lock = threading.Lock()
self.filters = []
res = self.__updateFilters()
if not res['OK']:
gLogger.fatal("Failed to create filters")
self.allowedStatusForTasks = ('Unused', 'ProbInFC')
self.TRANSPARAMS = ['TransformationID',
'TransformationName',
'Description',
'LongDescription',
'CreationDate',
'LastUpdate',
'AuthorDN',
'AuthorGroup',
'Type',
'Plugin',
'AgentType',
'Status',
'FileMask',
'TransformationGroup',
'GroupSize',
'InheritedFrom',
'Body',
'MaxNumberOfTasks',
'EventsPerTask',
'TransformationFamily']
self.mutable = ['TransformationName',
'Description',
'LongDescription',
'AgentType',
'Status',
'MaxNumberOfTasks',
'TransformationFamily',
'Body'] # for the moment include TransformationFamily
self.TRANSFILEPARAMS = ['TransformationID',
'FileID',
'Status',
'TaskID',
'TargetSE',
'UsedSE',
'ErrorCount',
'LastUpdate',
'InsertedTime']
self.TRANSFILETASKPARAMS = ['TransformationID',
'FileID',
'TaskID']
self.TASKSPARAMS = ['TaskID',
'TransformationID',
'ExternalStatus',
'ExternalID',
'TargetSE',
'CreationTime',
'LastUpdateTime']
self.ADDITIONALPARAMETERS = ['TransformationID',
'ParameterName',
'ParameterValue',
'ParameterType'
]
# This is here to ensure full compatibility between different versions of the MySQL DB schema
self.isTransformationTasksInnoDB = True
res = self._query("SELECT Engine FROM INFORMATION_SCHEMA.TABLES WHERE table_name = 'TransformationTasks'")
if not res['OK']:
raise RuntimeError(res['Message'])
else:
engine = res['Value'][0][0]
if engine.lower() != 'innodb':
self.isTransformationTasksInnoDB = False
def getName(self):
""" Get the database name
"""
return self.dbName
###########################################################################
#
# These methods manipulate the Transformations table
#
def addTransformation(self, transName, description, longDescription, authorDN, authorGroup, transType,
plugin, agentType, fileMask,
transformationGroup='General',
groupSize=1,
inheritedFrom=0,
body='',
maxTasks=0,
eventsPerTask=0,
addFiles=True,
connection=False):
""" Add new transformation definition including its input streams
"""
connection = self.__getConnection(connection)
res = self._getTransformationID(transName, connection=connection)
if res['OK']:
return S_ERROR("Transformation with name %s already exists with TransformationID = %d" % (transName,
res['Value']))
elif res['Message'] != "Transformation does not exist":
return res
self.lock.acquire()
res = self._escapeString(body)
if not res['OK']:
return S_ERROR("Failed to parse the transformation body")
body = res['Value']
req = "INSERT INTO Transformations (TransformationName,Description,LongDescription, \
CreationDate,LastUpdate,AuthorDN,AuthorGroup,Type,Plugin,AgentType,\
FileMask,Status,TransformationGroup,GroupSize,\
InheritedFrom,Body,MaxNumberOfTasks,EventsPerTask)\
VALUES ('%s','%s','%s',\
UTC_TIMESTAMP(),UTC_TIMESTAMP(),'%s','%s','%s','%s','%s',\
'%s','New','%s',%d,\
%d,%s,%d,%d);" % \
(transName, description, longDescription,
authorDN, authorGroup, transType, plugin, agentType,
fileMask, transformationGroup, groupSize,
inheritedFrom, body, maxTasks, eventsPerTask)
res = self._update(req, connection)
if not res['OK']:
self.lock.release()
return res
transID = res['lastRowId']
self.lock.release()
# If the transformation has an input data specification
if fileMask:
self.filters.append((transID, json.loads(fileMask)))
if inheritedFrom:
res = self._getTransformationID(inheritedFrom, connection=connection)
if not res['OK']:
gLogger.error("Failed to get ID for parent transformation, now deleting", res['Message'])
return self.deleteTransformation(transID, connection=connection)
originalID = res['Value']
# FIXME: this is not the right place to change status information, and in general the whole should not be here
res = self.setTransformationParameter(originalID, 'Status', 'Completing',
author=authorDN, connection=connection)
if not res['OK']:
gLogger.error("Failed to update parent transformation status: now deleting", res['Message'])
return self.deleteTransformation(transID, connection=connection)
res = self.setTransformationParameter(originalID, 'AgentType', 'Automatic',
author=authorDN, connection=connection)
if not res['OK']:
gLogger.error("Failed to update parent transformation agent type, now deleting", res['Message'])
return self.deleteTransformation(transID, connection=connection)
message = 'Creation of the derived transformation (%d)' % transID
self.__updateTransformationLogging(originalID, message, authorDN, connection=connection)
res = self.getTransformationFiles(condDict={'TransformationID': originalID}, connection=connection)
if not res['OK']:
gLogger.error("Could not get transformation files, now deleting", res['Message'])
return self.deleteTransformation(transID, connection=connection)
if res['Records']:
res = self.__insertExistingTransformationFiles(transID, res['Records'], connection=connection)
if not res['OK']:
gLogger.error("Could not insert files, now deleting", res['Message'])
return self.deleteTransformation(transID, connection=connection)
### Add files to the DataFiles table ##################
catalog = FileCatalog()
if addFiles and fileMask:
mqDict = json.loads(fileMask)
res = catalog.findFilesByMetadata(mqDict)
if not res['OK']:
gLogger.error("Failed to find files to be added to the transformation", res['Message'])
return res
filesToAdd = res['Value']
gLogger.notice('filesToAdd', filesToAdd)
if filesToAdd:
connection = self.__getConnection(connection)
res = self.__addDataFiles(filesToAdd, connection=connection)
if not res['OK']:
return res
lfnFileIDs = res['Value']
# Add the files to the transformations
fileIDs = []
for lfn in filesToAdd:
if lfn in lfnFileIDs:
fileIDs.append(lfnFileIDs[lfn])
res = self.__addFilesToTransformation(transID, fileIDs, connection=connection)
if not res['OK']:
gLogger.error("Failed to add files to transformation", "%s %s" % (transID, res['Message']))
message = "Created transformation %d" % transID
self.__updateTransformationLogging(transID, message, authorDN, connection=connection)
return S_OK(transID)
def getTransformations(self, condDict=None, older=None, newer=None, timeStamp='LastUpdate',
orderAttribute=None, limit=None, extraParams=False, offset=None, connection=False):
""" Get parameters of all the Transformations with support for the web standard structure """
connection = self.__getConnection(connection)
req = "SELECT %s FROM Transformations %s" % (intListToString(self.TRANSPARAMS),
self.buildCondition(condDict, older, newer, timeStamp,
orderAttribute, limit, offset=offset))
res = self._query(req, connection)
if not res['OK']:
return res
if condDict is None:
condDict = {}
webList = []
resultList = []
for row in res['Value']:
# Prepare the structure for the web
rList = [str(item) if not isinstance(item, (long, int)) else item for item in row]
transDict = dict(zip(self.TRANSPARAMS, row))
webList.append(rList)
if extraParams:
res = self.__getAdditionalParameters(transDict['TransformationID'], connection=connection)
if not res['OK']:
return res
transDict.update(res['Value'])
resultList.append(transDict)
result = S_OK(resultList)
result['Records'] = webList
result['ParameterNames'] = self.TRANSPARAMS
return result
def getTransformation(self, transName, extraParams=False, connection=False):
"""Get Transformation definition and parameters of Transformation identified by TransformationID
"""
res = self._getConnectionTransID(connection, transName)
if not res['OK']:
return res
connection = res['Value']['Connection']
transID = res['Value']['TransformationID']
res = self.getTransformations(condDict={'TransformationID': transID}, extraParams=extraParams,
connection=connection)
if not res['OK']:
return res
if not res['Value']:
return S_ERROR("Transformation %s did not exist" % transName)
return S_OK(res['Value'][0])
def getTransformationParameters(self, transName, parameters, connection=False):
""" Get the requested parameters for a supplied transformation """
if isinstance(parameters, basestring):
parameters = [parameters]
extraParams = bool(set(parameters) - set(self.TRANSPARAMS))
res = self.getTransformation(transName, extraParams=extraParams, connection=connection)
if not res['OK']:
return res
transParams = res['Value']
paramDict = {}
for reqParam in parameters:
if reqParam not in transParams:
return S_ERROR("Parameter %s not defined for transformation %s" % (reqParam, transName))
paramDict[reqParam] = transParams[reqParam]
if len(paramDict) == 1:
return S_OK(paramDict[reqParam])
return S_OK(paramDict)
def getTransformationWithStatus(self, status, connection=False):
""" Gets a list of the transformations with the supplied status """
req = "SELECT TransformationID FROM Transformations WHERE Status = '%s';" % status
res = self._query(req, conn=connection)
if not res['OK']:
return res
transIDs = [tupleIn[0] for tupleIn in res['Value']]
return S_OK(transIDs)
def getTableDistinctAttributeValues(self, table, attributes, selectDict, older=None, newer=None,
timeStamp=None, connection=False):
tableFields = {'Transformations': self.TRANSPARAMS,
'TransformationTasks': self.TASKSPARAMS,
'TransformationFiles': self.TRANSFILEPARAMS}
possibleFields = tableFields.get(table, [])
return self.__getTableDistinctAttributeValues(table, possibleFields, attributes, selectDict, older, newer,
timeStamp, connection=connection)
def __getTableDistinctAttributeValues(self, table, possible, attributes, selectDict, older, newer,
timeStamp, connection=False):
connection = self.__getConnection(connection)
attributeValues = {}
for attribute in attributes:
if possible and (attribute not in possible):
return S_ERROR('Requested attribute (%s) does not exist in table %s' % (attribute, table))
res = self.getDistinctAttributeValues(table, attribute, condDict=selectDict, older=older, newer=newer,
timeStamp=timeStamp, connection=connection)
if not res['OK']:
return S_ERROR('Failed to serve values for attribute %s in table %s' % (attribute, table))
attributeValues[attribute] = res['Value']
return S_OK(attributeValues)
def __updateTransformationParameter(self, transID, paramName, paramValue, connection=False):
if paramName not in self.mutable:
return S_ERROR("Can not update the '%s' transformation parameter" % paramName)
if paramName == 'Body':
res = self._escapeString(paramValue)
if not res['OK']:
return S_ERROR("Failed to parse parameter value")
paramValue = res['Value']
req = "UPDATE Transformations SET %s=%s, LastUpdate=UTC_TIMESTAMP() WHERE TransformationID=%d" % (paramName,
paramValue,
transID)
return self._update(req, connection)
req = "UPDATE Transformations SET %s='%s', LastUpdate=UTC_TIMESTAMP() WHERE TransformationID=%d" % (paramName,
paramValue,
transID)
return self._update(req, connection)
def _getTransformationID(self, transName, connection=False):
""" Method returns ID of transformation with the name=<name> """
try:
transName = long(transName)
cmd = "SELECT TransformationID from Transformations WHERE TransformationID=%d;" % transName
except ValueError:
if not isinstance(transName, basestring):
return S_ERROR("Transformation should be ID or name")
cmd = "SELECT TransformationID from Transformations WHERE TransformationName='%s';" % transName
res = self._query(cmd, connection)
if not res['OK']:
gLogger.error("Failed to obtain transformation ID for transformation", "%s: %s" % (transName, res['Message']))
return res
elif not res['Value']:
gLogger.verbose("Transformation %s does not exist" % (transName))
return S_ERROR("Transformation does not exist")
return S_OK(res['Value'][0][0])
def __deleteTransformation(self, transID, connection=False):
req = "DELETE FROM Transformations WHERE TransformationID=%d;" % transID
return self._update(req, connection)
def __updateFilters(self, connection=False):
""" Get filters for all defined input streams in all the transformations.
If transID argument is given, get filters only for this transformation.
"""
resultList = []
req = "SELECT TransformationID,FileMask FROM Transformations;"
res = self._query(req, connection)
if not res['OK']:
return res
for transID, mask in res['Value']:
if mask:
resultList.append((transID, json.loads(mask)))
self.filters = resultList
return S_OK(resultList)
def __filterFile(self, lfn, filters=None):
"""Pass the input file through a supplied filter or those currently active """
result = []
if filters:
for transID, refilter in filters:
if refilter.search(lfn):
result.append(transID)
else:
for transID, refilter in self.filters:
if refilter.search(lfn):
result.append(transID)
return result
###########################################################################
#
# These methods manipulate the AdditionalParameters tables
#
def setTransformationParameter(self, transName, paramName, paramValue, author='', connection=False):
""" Add a parameter for the supplied transformations """
res = self._getConnectionTransID(connection, transName)
if not res['OK']:
return res
connection = res['Value']['Connection']
transID = res['Value']['TransformationID']
message = ''
if paramName in self.TRANSPARAMS:
res = self.__updateTransformationParameter(transID, paramName, paramValue, connection=connection)
if res['OK']:
pv = self._escapeString(paramValue)
if not pv['OK']:
return S_ERROR("Failed to parse parameter value")
paramValue = pv['Value']
message = '%s updated to %s' % (paramName, paramValue)
else:
res = self.__addAdditionalTransformationParameter(transID, paramName, paramValue, connection=connection)
if res['OK']:
message = 'Added additional parameter %s' % paramName
if message:
self.__updateTransformationLogging(transID, message, author, connection=connection)
return res
def getAdditionalParameters(self, transName, connection=False):
res = self._getConnectionTransID(connection, transName)
if not res['OK']:
return res
connection = res['Value']['Connection']
transID = res['Value']['TransformationID']
return self.__getAdditionalParameters(transID, connection=connection)
def deleteTransformationParameter(self, transName, paramName, author='', connection=False):
""" Delete a parameter from the additional parameters table """
res = self._getConnectionTransID(connection, transName)
if not res['OK']:
return res
connection = res['Value']['Connection']
transID = res['Value']['TransformationID']
if paramName in self.TRANSPARAMS:
return S_ERROR("Can not delete core transformation parameter")
res = self.__deleteTransformationParameters(transID, parameters=[paramName], connection=connection)
if not res['OK']:
return res
self.__updateTransformationLogging(transID, 'Removed additional parameter %s' % paramName, author,
connection=connection)
return res
def __addAdditionalTransformationParameter(self, transID, paramName, paramValue, connection=False):
req = "DELETE FROM AdditionalParameters WHERE TransformationID=%d AND ParameterName='%s'" % (transID, paramName)
res = self._update(req, connection)
if not res['OK']:
return res
res = self._escapeString(paramValue)
if not res['OK']:
return S_ERROR("Failed to parse parameter value")
paramValue = res['Value']
paramType = 'StringType'
if isinstance(paramValue, (long, int)):
paramType = 'IntType'
req = "INSERT INTO AdditionalParameters (%s) VALUES (%s,'%s',%s,'%s');" % (', '.join(self.ADDITIONALPARAMETERS),
transID, paramName,
paramValue, paramType)
return self._update(req, connection)
def __getAdditionalParameters(self, transID, connection=False):
req = "SELECT %s FROM AdditionalParameters WHERE TransformationID = %d" % (', '.join(self.ADDITIONALPARAMETERS),
transID)
res = self._query(req, connection)
if not res['OK']:
return res
paramDict = {}
for _transID, parameterName, parameterValue, parameterType in res['Value']:
if parameterType in ('IntType', 'LongType'):
parameterValue = int(parameterValue)
paramDict[parameterName] = parameterValue
return S_OK(paramDict)
def __deleteTransformationParameters(self, transID, parameters=None, connection=False):
""" Remove the parameters associated to a transformation """
if parameters is None:
parameters = []
req = "DELETE FROM AdditionalParameters WHERE TransformationID=%d" % transID
if parameters:
req = "%s AND ParameterName IN (%s);" % (req, stringListToString(parameters))
return self._update(req, connection)
###########################################################################
#
# These methods manipulate the TransformationFiles table
#
def addFilesToTransformation(self, transName, lfns, connection=False):
""" Add a list of LFNs to the transformation directly """
gLogger.info("TransformationDB.addFilesToTransformation:"
" Attempting to add %s files to transformations: %s" % (len(lfns), transName))
if not lfns:
return S_ERROR('Zero length LFN list')
res = self._getConnectionTransID(connection, transName)
if not res['OK']:
return res
connection = res['Value']['Connection']
transID = res['Value']['TransformationID']
# Add missing files if necessary (__addDataFiles does the job)
res = self.__addDataFiles(lfns, connection=connection)
if not res['OK']:
return res
fileIDs = dict((fileID, lfn) for lfn, fileID in res['Value'].iteritems())
# Attach files to transformation
successful = {}
if fileIDs:
res = self.__addFilesToTransformation(transID, fileIDs.keys(), connection=connection)
if not res['OK']:
return res
for fileID in fileIDs:
lfn = fileIDs[fileID]
successful[lfn] = "Added" if fileID in res['Value'] else "Present"
resDict = {'Successful': successful, 'Failed': {}}
return S_OK(resDict)
def getTransformationFiles(self, condDict=None, older=None, newer=None, timeStamp='LastUpdate',
orderAttribute=None, limit=None, offset=None, connection=False):
""" Get files for the supplied transformations with support for the web standard structure """
connection = self.__getConnection(connection)
req = "SELECT %s FROM TransformationFiles" % (intListToString(self.TRANSFILEPARAMS))
originalFileIDs = {}
if condDict is None:
condDict = {}
if condDict or older or newer:
lfns = condDict.pop('LFN', None)
if lfns:
if isinstance(lfns, basestring):
lfns = [lfns]
res = self.__getFileIDsForLfns(lfns, connection=connection)
if not res['OK']:
return res
originalFileIDs = res['Value'][0]
condDict['FileID'] = originalFileIDs.keys()
for val in condDict.itervalues():
if not val:
return S_OK([])
req = "%s %s" % (req, self.buildCondition(condDict, older, newer, timeStamp, orderAttribute, limit,
offset=offset))
res = self._query(req, connection)
if not res['OK']:
return res
transFiles = res['Value']
fileIDs = [int(row[1]) for row in transFiles]
webList = []
resultList = []
if not fileIDs:
originalFileIDs = {}
else:
if not originalFileIDs:
res = self.__getLfnsForFileIDs(fileIDs, connection=connection)
if not res['OK']:
return res
originalFileIDs = res['Value'][1]
for row in transFiles:
lfn = originalFileIDs[row[1]]
# Prepare the structure for the web
fDict = {'LFN': lfn}
fDict.update(dict(zip(self.TRANSFILEPARAMS, row)))
# Note: the line below is returning "None" if the item is None... This seems to work but is ugly...
rList = [lfn] + [str(item) if not isinstance(item, (long, int)) else item for item in row]
webList.append(rList)
resultList.append(fDict)
result = S_OK(resultList)
result['Records'] = webList
result['ParameterNames'] = ['LFN'] + self.TRANSFILEPARAMS
return result
def getFileSummary(self, lfns, connection=False):
""" Get file status summary in all the transformations """
connection = self.__getConnection(connection)
condDict = {'LFN': lfns}
res = self.getTransformationFiles(condDict=condDict, connection=connection)
if not res['OK']:
return res
resDict = {}
for fileDict in res['Value']:
resDict.setdefault(fileDict['LFN'], {})[fileDict['TransformationID']] = fileDict
failedDict = dict.fromkeys(set(lfns) - set(resDict), 'Did not exist in the Transformation database')
return S_OK({'Successful': resDict, 'Failed': failedDict})
def setFileStatusForTransformation(self, transID, fileStatusDict=None, connection=False):
""" Set file status for the given transformation, based on
fileStatusDict {fileID_A: ('statusA',errorA), fileID_B: ('statusB',errorB), ...}
The ErrorCount is incremented if errorA flag is True
"""
if not fileStatusDict:
return S_OK()
# Building the request with "ON DUPLICATE KEY UPDATE"
reqBase = "INSERT INTO TransformationFiles (TransformationID, FileID, Status, ErrorCount, LastUpdate) VALUES "
# Get fileID and status for each case: error and no error
statusFileDict = {}
for fileID, (status, error) in fileStatusDict.iteritems():
statusFileDict.setdefault(error, []).append((fileID, status))
for error, fileIDStatusList in statusFileDict.iteritems():
req = reqBase + ','.join("(%d, %d, '%s', 0, UTC_TIMESTAMP())" %
(transID, fileID, status) for fileID, status in fileIDStatusList)
if error:
# Increment the error counter when we requested
req += " ON DUPLICATE KEY UPDATE Status=VALUES(Status),ErrorCount=ErrorCount+1,LastUpdate=VALUES(LastUpdate)"
else:
req += " ON DUPLICATE KEY UPDATE Status=VALUES(Status),LastUpdate=VALUES(LastUpdate)"
result = self._update(req, connection)
if not result['OK']:
return result
return S_OK()
def getTransformationStats(self, transName, connection=False):
""" Get number of files in Transformation Table for each status """
res = self._getConnectionTransID(connection, transName)
if not res['OK']:
return res
connection = res['Value']['Connection']
transID = res['Value']['TransformationID']
res = self.getCounters('TransformationFiles', ['TransformationID', 'Status'], {'TransformationID': transID})
if not res['OK']:
return res
statusDict = dict((attrDict['Status'], count)
for attrDict, count in res['Value'] if '-' not in attrDict['Status'])
statusDict['Total'] = sum(statusDict.values())
return S_OK(statusDict)
def getTransformationFilesCount(self, transName, field, selection=None, connection=False):
""" Get the number of files in the TransformationFiles table grouped by the supplied field """
res = self._getConnectionTransID(connection, transName)
if not res['OK']:
return res
if selection is None:
selection = {}
connection = res['Value']['Connection']
transID = res['Value']['TransformationID']
selection['TransformationID'] = transID
if field not in self.TRANSFILEPARAMS:
return S_ERROR("Supplied field not in TransformationFiles table")
res = self.getCounters('TransformationFiles', ['TransformationID', field], selection)
if not res['OK']:
return res
countDict = dict((attrDict[field], count) for attrDict, count in res['Value'])
countDict['Total'] = sum(countDict.values())
return S_OK(countDict)
def __addFilesToTransformation(self, transID, fileIDs, connection=False):
req = "SELECT FileID from TransformationFiles"
req = req + " WHERE TransformationID = %d AND FileID IN (%s);" % (transID, intListToString(fileIDs))
res = self._query(req, connection)
if not res['OK']:
return res
for tupleIn in res['Value']:
fileIDs.remove(tupleIn[0])
if not fileIDs:
return S_OK([])
req = "INSERT INTO TransformationFiles (TransformationID,FileID,LastUpdate,InsertedTime) VALUES"
for fileID in fileIDs:
req = "%s (%d,%d,UTC_TIMESTAMP(),UTC_TIMESTAMP())," % (req, transID, fileID)
req = req.rstrip(',')
res = self._update(req, connection)
if not res['OK']:
return res
return S_OK(fileIDs)
def __insertExistingTransformationFiles(self, transID, fileTuplesList, connection=False):
""" Inserting already transformation files in TransformationFiles table (e.g. for deriving transformations)
"""
gLogger.info("Inserting %d files in TransformationFiles" % len(fileTuplesList))
# splitting in various chunks, in case it is too big
for fileTuples in breakListIntoChunks(fileTuplesList, 10000):
gLogger.verbose("Adding first %d files in TransformationFiles (out of %d)" % (len(fileTuples),
len(fileTuplesList)))
req = "INSERT INTO TransformationFiles (TransformationID,Status,TaskID,FileID,TargetSE,UsedSE,LastUpdate) VALUES"
candidates = False
for ft in fileTuples:
_lfn, originalID, fileID, status, taskID, targetSE, usedSE, _errorCount, _lastUpdate, _insertTime = ft[:10]
if status not in ('Removed', ):
candidates = True
if not re.search('-', status):
status = "%s-inherited" % status
if taskID:
# Should be readable up to 999,999 tasks: that field is an int(11) in the DB, not a string
taskID = 1000000 * int(originalID) + int(taskID)
req = "%s (%d,'%s','%d',%d,'%s','%s',UTC_TIMESTAMP())," % (req, transID, status, taskID,
fileID, targetSE, usedSE)
if not candidates:
continue
req = req.rstrip(",")
res = self._update(req, connection)
if not res['OK']:
return res
return S_OK()
def __assignTransformationFile(self, transID, taskID, se, fileIDs, connection=False):
""" Make necessary updates to the TransformationFiles table for the newly created task
"""
req = "UPDATE TransformationFiles SET TaskID='%d',UsedSE='%s',Status='Assigned',LastUpdate=UTC_TIMESTAMP()"
req = (req + " WHERE TransformationID = %d AND FileID IN (%s);") % (taskID, se, transID, intListToString(fileIDs))
res = self._update(req, connection)
if not res['OK']:
gLogger.error("Failed to assign file to task", res['Message'])
fileTuples = []
for fileID in fileIDs:
fileTuples.append(("(%d,%d,%d)" % (transID, fileID, taskID)))
req = "INSERT INTO TransformationFileTasks (TransformationID,FileID,TaskID) VALUES %s" % ','.join(fileTuples)
res = self._update(req, connection)
if not res['OK']:
gLogger.error("Failed to assign file to task", res['Message'])
return res
def __setTransformationFileStatus(self, fileIDs, status, connection=False):
req = "UPDATE TransformationFiles SET Status = '%s' WHERE FileID IN (%s);" % (status, intListToString(fileIDs))
res = self._update(req, connection)
if not res['OK']:
gLogger.error("Failed to update file status", res['Message'])
return res
def __setTransformationFileUsedSE(self, fileIDs, usedSE, connection=False):
req = "UPDATE TransformationFiles SET UsedSE = '%s' WHERE FileID IN (%s);" % (usedSE, intListToString(fileIDs))
res = self._update(req, connection)
if not res['OK']:
gLogger.error("Failed to update file usedSE", res['Message'])
return res
def __resetTransformationFile(self, transID, taskID, connection=False):
req = "UPDATE TransformationFiles SET TaskID=NULL, UsedSE='Unknown', Status='Unused'\
WHERE TransformationID = %d AND TaskID=%d;" % (transID, taskID)
res = self._update(req, connection)
if not res['OK']:
gLogger.error("Failed to reset transformation file", res['Message'])
return res
def __deleteTransformationFiles(self, transID, connection=False):
""" Remove the files associated to a transformation """
req = "DELETE FROM TransformationFiles WHERE TransformationID = %d;" % transID
res = self._update(req, connection)
if not res['OK']:
gLogger.error("Failed to delete transformation files", res['Message'])
return res
###########################################################################
#
# These methods manipulate the TransformationFileTasks table
#
def __deleteTransformationFileTask(self, transID, taskID, connection=False):
''' Delete the file associated to a given task of a given transformation
from the TransformationFileTasks table for transformation with TransformationID and TaskID
'''
req = "DELETE FROM TransformationFileTasks WHERE TransformationID=%d AND TaskID=%d" % (transID, taskID)
return self._update(req, connection)
def __deleteTransformationFileTasks(self, transID, connection=False):
''' Remove all associations between files, tasks and a transformation '''
req = "DELETE FROM TransformationFileTasks WHERE TransformationID = %d;" % transID
res = self._update(req, connection)
if not res['OK']:
gLogger.error("Failed to delete transformation files/task history", res['Message'])
return res
###########################################################################
#
# These methods manipulate the TransformationTasks table
#
def getTransformationTasks(self, condDict=None, older=None, newer=None, timeStamp='CreationTime',
orderAttribute=None, limit=None, inputVector=False,
offset=None, connection=False):
connection = self.__getConnection(connection)
req = "SELECT %s FROM TransformationTasks %s" % (intListToString(self.TASKSPARAMS),
self.buildCondition(condDict, older, newer, timeStamp,
orderAttribute, limit, offset=offset))
res = self._query(req, connection)
if not res['OK']:
return res
if condDict is None:
condDict = {}
webList = []
resultList = []
for row in res['Value']:
# Prepare the structure for the web
rList = [str(item) if not isinstance(item, (long, int)) else item for item in row]
taskDict = dict(zip(self.TASKSPARAMS, row))
webList.append(rList)
if inputVector:
taskDict['InputVector'] = ''
taskID = taskDict['TaskID']
transID = taskDict['TransformationID']
res = self.getTaskInputVector(transID, taskID)
if res['OK']:
if taskID in res['Value']:
taskDict['InputVector'] = res['Value'][taskID]
else:
return res
resultList.append(taskDict)
result = S_OK(resultList)
result['Records'] = webList
result['ParameterNames'] = self.TASKSPARAMS
return result
def getTasksForSubmission(self, transName, numTasks=1, site='', statusList=None,
older=None, newer=None, connection=False):
""" Select tasks with the given status (and site) for submission """
res = self._getConnectionTransID(connection, transName)
if not res['OK']:
return res
if statusList is None:
statusList = ['Created']
connection = res['Value']['Connection']
transID = res['Value']['TransformationID']
condDict = {"TransformationID": transID}
if statusList:
condDict["ExternalStatus"] = statusList
if site:
numTasks = 0
res = self.getTransformationTasks(condDict=condDict, older=older, newer=newer,
timeStamp='CreationTime', orderAttribute=None, limit=numTasks,
inputVector=True, connection=connection)
if not res['OK']:
return res
tasks = res['Value']
# Now prepare the tasks
resultDict = {}
for taskDict in tasks:
if len(resultDict) >= numTasks:
break
taskDict['Status'] = taskDict.pop('ExternalStatus')
taskDict['InputData'] = taskDict.pop('InputVector')
taskDict.pop('LastUpdateTime')
taskDict.pop('CreationTime')
taskDict.pop('ExternalID')
taskID = taskDict['TaskID']
resultDict[taskID] = taskDict
if site:
resultDict[taskID]['Site'] = site
return S_OK(resultDict)
def deleteTasks(self, transName, taskIDbottom, taskIDtop, author='', connection=False):
""" Delete tasks with taskID range in transformation """
res = self._getConnectionTransID(connection, transName)
if not res['OK']:
return res
connection = res['Value']['Connection']
transID = res['Value']['TransformationID']
for taskID in range(taskIDbottom, taskIDtop + 1):
res = self.__removeTransformationTask(transID, taskID, connection=connection)
if not res['OK']:
return res
message = "Deleted tasks from %d to %d" % (taskIDbottom, taskIDtop)
self.__updateTransformationLogging(transID, message, author, connection=connection)
return res
def reserveTask(self, transName, taskID, connection=False):
""" Reserve the taskID from transformation for submission """
res = self._getConnectionTransID(connection, transName)
if not res['OK']:
return res
connection = res['Value']['Connection']
transID = res['Value']['TransformationID']
res = self.__checkUpdate("TransformationTasks", "ExternalStatus", "Reserved", {"TransformationID": transID,
"TaskID": taskID},
connection=connection)
if not res['OK']:
return res
if not res['Value']:
return S_ERROR('Failed to set Reserved status for job %d - already Reserved' % int(taskID))
# The job is reserved, update the time stamp
res = self.setTaskStatus(transID, taskID, 'Reserved', connection=connection)
if not res['OK']:
return S_ERROR('Failed to set Reserved status for job %d - failed to update the time stamp' % int(taskID))
return S_OK()
def setTaskStatusAndWmsID(self, transName, taskID, status, taskWmsID, connection=False):
""" Set status and ExternalID for job with taskID in production with transformationID
"""
res = self._getConnectionTransID(connection, transName)
if not res['OK']:
return res
connection = res['Value']['Connection']
transID = res['Value']['TransformationID']
# Set ID first in order to be sure there is no status set without the ID being set
res = self.__setTaskParameterValue(transID, taskID, 'ExternalID', taskWmsID, connection=connection)
if not res['OK']:
return res
return self.__setTaskParameterValue(transID, taskID, 'ExternalStatus', status, connection=connection)
def setTaskStatus(self, transName, taskID, status, connection=False):
""" Set status for job with taskID in production with transformationID """
res = self._getConnectionTransID(connection, transName)
if not res['OK']:
return res
connection = res['Value']['Connection']
transID = res['Value']['TransformationID']
if not isinstance(taskID, list):
taskIDList = [taskID]
else:
taskIDList = list(taskID)
for taskID in taskIDList:
res = self.__setTaskParameterValue(transID, taskID, 'ExternalStatus', status, connection=connection)
if not res['OK']:
return res
return S_OK()
def getTransformationTaskStats(self, transName='', connection=False):
""" Returns dictionary with number of jobs per status for the given production.
"""
connection = self.__getConnection(connection)
if transName:
res = self._getTransformationID(transName, connection=connection)
if not res['OK']:
gLogger.error("Failed to get ID for transformation", res['Message'])
return res
res = self.getCounters('TransformationTasks', ['ExternalStatus'], {'TransformationID': res['Value']},
connection=connection)
else:
res = self.getCounters('TransformationTasks', ['ExternalStatus', 'TransformationID'], {},
connection=connection)
if not res['OK']:
return res
statusDict = {}
total = 0
for attrDict, count in res['Value']:
status = attrDict['ExternalStatus']
statusDict[status] = count
total += count
statusDict['TotalCreated'] = total
return S_OK(statusDict)
def __setTaskParameterValue(self, transID, taskID, paramName, paramValue, connection=False):
req = "UPDATE TransformationTasks SET %s='%s', LastUpdateTime=UTC_TIMESTAMP()" % (paramName, paramValue)
req = req + " WHERE TransformationID=%d AND TaskID=%d;" % (transID, taskID)
return self._update(req, connection)
def __deleteTransformationTasks(self, transID, connection=False):
""" Delete all the tasks from the TransformationTasks table for transformation with TransformationID
"""
req = "DELETE FROM TransformationTasks WHERE TransformationID=%d" % transID
return self._update(req, connection)
def __deleteTransformationTask(self, transID, taskID, connection=False):
""" Delete the task from the TransformationTasks table for transformation with TransformationID
"""
req = "DELETE FROM TransformationTasks WHERE TransformationID=%d AND TaskID=%d" % (transID, taskID)
return self._update(req, connection)
####################################################################
#
# These methods manipulate the TransformationInputDataQuery table
#
def createTransformationInputDataQuery(self, transName, queryDict, author='', connection=False):
res = self._getConnectionTransID(connection, transName)
if not res['OK']:
return res
connection = res['Value']['Connection']
transID = res['Value']['TransformationID']
return self.__addInputDataQuery(transID, queryDict, author=author, connection=connection)
def __addInputDataQuery(self, transID, queryDict, author='', connection=False):
res = self.getTransformationInputDataQuery(transID, connection=connection)
if res['OK']:
return S_ERROR("Input data query already exists for transformation")
if res['Message'] != 'No InputDataQuery found for transformation':
return res
for parameterName in sorted(queryDict):
parameterValue = queryDict[parameterName]
if not parameterValue:
continue
parameterType = 'String'
if isinstance(parameterValue, (list, tuple)):
if isinstance(parameterValue[0], (long, int)):
parameterType = 'Integer'
parameterValue = [str(x) for x in parameterValue]
parameterValue = ';;;'.join(parameterValue)
else:
if isinstance(parameterValue, (long, int)):
parameterType = 'Integer'
parameterValue = str(parameterValue)
if isinstance(parameterValue, dict):
parameterType = 'Dict'
parameterValue = str(parameterValue)
res = self.insertFields('TransformationInputDataQuery', ['TransformationID', 'ParameterName',
'ParameterValue', 'ParameterType'],
[transID, parameterName, parameterValue, parameterType], conn=connection)
if not res['OK']:
message = 'Failed to add input data query'
self.deleteTransformationInputDataQuery(transID, connection=connection)
break
else:
message = 'Added input data query'
self.__updateTransformationLogging(transID, message, author, connection=connection)
return res
def deleteTransformationInputDataQuery(self, transName, author='', connection=False):
res = self._getConnectionTransID(connection, transName)
if not res['OK']:
return res
connection = res['Value']['Connection']
transID = res['Value']['TransformationID']
req = "DELETE FROM TransformationInputDataQuery WHERE TransformationID=%d;" % transID
res = self._update(req, connection)
if not res['OK']:
return res
if res['Value']:
# Add information to the transformation logging
message = 'Deleted input data query'
self.__updateTransformationLogging(transID, message, author, connection=connection)
return res
def getTransformationInputDataQuery(self, transName, connection=False):
res = self._getConnectionTransID(connection, transName)
if not res['OK']:
return res
connection = res['Value']['Connection']
transID = res['Value']['TransformationID']
req = "SELECT ParameterName,ParameterValue,ParameterType FROM TransformationInputDataQuery"
req = req + " WHERE TransformationID=%d;" % transID
res = self._query(req, connection)
if not res['OK']:
return res
queryDict = {}
for parameterName, parameterValue, parameterType in res['Value']:
if re.search(';;;', str(parameterValue)):
parameterValue = parameterValue.split(';;;')
if parameterType == 'Integer':
parameterValue = [int(x) for x in parameterValue]
elif parameterType == 'Integer':
parameterValue = int(parameterValue)
elif parameterType == 'Dict':
parameterValue = eval(parameterValue)
queryDict[parameterName] = parameterValue
if not queryDict:
return S_ERROR("No InputDataQuery found for transformation")
return S_OK(queryDict)
###########################################################################
#
# These methods manipulate the TaskInputs table
#
def getTaskInputVector(self, transName, taskID, connection=False):
""" Get input vector for the given task """
res = self._getConnectionTransID(connection, transName)
if not res['OK']:
return res
connection = res['Value']['Connection']
transID = res['Value']['TransformationID']
if not isinstance(taskID, list):
taskIDList = [taskID]
else:
taskIDList = list(taskID)
taskString = ','.join(["'%s'" % x for x in taskIDList])
req = "SELECT TaskID,InputVector FROM TaskInputs WHERE TaskID in (%s) AND TransformationID='%d';" % (taskString,
transID)
res = self._query(req)
inputVectorDict = {}
if not res['OK']:
return res
elif res['Value']:
for row in res['Value']:
inputVectorDict[row[0]] = row[1]
return S_OK(inputVectorDict)
def __insertTaskInputs(self, transID, taskID, lfns, connection=False):
vector = str.join(';', lfns)
fields = ['TransformationID', 'TaskID', 'InputVector']
values = [transID, taskID, vector]
res = self.insertFields('TaskInputs', fields, values, connection)
if not res['OK']:
gLogger.error("Failed to add input vector to task %d" % taskID)
return res
def __deleteTransformationTaskInputs(self, transID, taskID=0, connection=False):
""" Delete all the tasks inputs from the TaskInputs table for transformation with TransformationID
"""
req = "DELETE FROM TaskInputs WHERE TransformationID=%d" % transID
if taskID:
req = "%s AND TaskID=%d" % (req, int(taskID))
return self._update(req, connection)
###########################################################################
#
# These methods manipulate the TransformationLog table
#
def __updateTransformationLogging(self, transName, message, authorDN, connection=False):
""" Update the Transformation log table with any modifications
"""
if not authorDN:
res = getProxyInfo(False, False)
if res['OK']:
authorDN = res['Value']['subject']
res = self._getConnectionTransID(connection, transName)
if not res['OK']:
return res
connection = res['Value']['Connection']
transID = res['Value']['TransformationID']
req = "INSERT INTO TransformationLog (TransformationID,Message,Author,MessageDate)"
req = req + " VALUES (%s,'%s','%s',UTC_TIMESTAMP());" % (transID, message, authorDN)
return self._update(req, connection)
def getTransformationLogging(self, transName, connection=False):
""" Get logging info from the TransformationLog table
"""
res = self._getConnectionTransID(connection, transName)
if not res['OK']:
return res
connection = res['Value']['Connection']
transID = res['Value']['TransformationID']
req = "SELECT TransformationID, Message, Author, MessageDate FROM TransformationLog"
req = req + " WHERE TransformationID=%s ORDER BY MessageDate;" % (transID)
res = self._query(req)
if not res['OK']:
return res
transList = []
for transID, message, authorDN, messageDate in res['Value']:
transDict = {}
transDict['TransformationID'] = transID
transDict['Message'] = message
transDict['AuthorDN'] = authorDN
transDict['MessageDate'] = messageDate
transList.append(transDict)
return S_OK(transList)
def __deleteTransformationLog(self, transID, connection=False):
""" Remove the entries in the transformation log for a transformation
"""
req = "DELETE FROM TransformationLog WHERE TransformationID=%d;" % transID
return self._update(req, connection)
###########################################################################
#
# These methods manipulate the DataFiles table
#
def __getAllFileIDs(self, connection=False):
""" Get all the fileIDs for the supplied list of lfns
"""
req = "SELECT LFN,FileID FROM DataFiles;"
res = self._query(req, connection)
if not res['OK']:
return res
fids = {}
lfns = {}
for lfn, fileID in res['Value']:
fids[fileID] = lfn
lfns[lfn] = fileID
return S_OK((fids, lfns))
def __getFileIDsForLfns(self, lfns, connection=False):
""" Get file IDs for the given list of lfns
warning: if the file is not present, we'll see no errors
"""
req = "SELECT LFN,FileID FROM DataFiles WHERE LFN in (%s);" % (stringListToString(lfns))
res = self._query(req, connection)
if not res['OK']:
return res
lfns = dict(res['Value'])
# Reverse dictionary
fids = dict((fileID, lfn) for lfn, fileID in lfns.iteritems())
return S_OK((fids, lfns))
def __getLfnsForFileIDs(self, fileIDs, connection=False):
""" Get lfns for the given list of fileIDs
"""
req = "SELECT LFN,FileID FROM DataFiles WHERE FileID in (%s);" % stringListToString(fileIDs)
res = self._query(req, connection)
if not res['OK']:
return res
fids = dict(res['Value'])
# Reverse dictionary
lfns = dict((fileID, lfn) for lfn, fileID in fids.iteritems())
return S_OK((fids, lfns))
def __addDataFiles(self, lfns, connection=False):
""" Add a file to the DataFiles table and retrieve the FileIDs
"""
res = self.__getFileIDsForLfns(lfns, connection=connection)
if not res['OK']:
return res
# Insert only files not found, and assume the LFN is unique in the table
lfnFileIDs = res['Value'][1]
for lfn in set(lfns) - set(lfnFileIDs):
req = "INSERT INTO DataFiles (LFN,Status) VALUES ('%s','New');" % lfn
res = self._update(req, connection)
# If the LFN is duplicate we get an error and ignore it
if res['OK']:
lfnFileIDs[lfn] = res['lastRowId']
return S_OK(lfnFileIDs)
def __setDataFileStatus(self, fileIDs, status, connection=False):
""" Set the status of the supplied files
"""
req = "UPDATE DataFiles SET Status = '%s' WHERE FileID IN (%s);" % (status, intListToString(fileIDs))
return self._update(req, connection)
###########################################################################
#
# These methods manipulate multiple tables
#
def addTaskForTransformation(self, transID, lfns=None, se='Unknown', connection=False):
""" Create a new task with the supplied files for a transformation.
"""
res = self._getConnectionTransID(connection, transID)
if not res['OK']:
return res
if lfns is None:
lfns = []
connection = res['Value']['Connection']
transID = res['Value']['TransformationID']
# Be sure the all the supplied LFNs are known to the database for the supplied transformation
fileIDs = []
if lfns:
res = self.getTransformationFiles(condDict={'TransformationID': transID, 'LFN': lfns}, connection=connection)
if not res['OK']:
return res
foundLfns = set()
for fileDict in res['Value']:
fileIDs.append(fileDict['FileID'])
lfn = fileDict['LFN']
if fileDict['Status'] in self.allowedStatusForTasks:
foundLfns.add(lfn)
else:
gLogger.error("Supplied file not in %s status but %s" % (self.allowedStatusForTasks, fileDict['Status']), lfn)
unavailableLfns = set(lfns) - foundLfns
if unavailableLfns:
gLogger.error("Supplied files not found for transformation", sorted(unavailableLfns))
return S_ERROR("Not all supplied files available in the transformation database")
# Insert the task into the jobs table and retrieve the taskID
self.lock.acquire()
req = "INSERT INTO TransformationTasks(TransformationID, ExternalStatus, ExternalID, TargetSE,"
req = req + " CreationTime, LastUpdateTime)"
req = req + " VALUES (%s,'%s','%d','%s', UTC_TIMESTAMP(), UTC_TIMESTAMP());" % (transID, 'Created', 0, se)
res = self._update(req, connection)
if not res['OK']:
self.lock.release()
gLogger.error("Failed to publish task for transformation", res['Message'])
return res
# With InnoDB, TaskID is computed by a trigger, which sets the local variable @last (per connection)
# @last is the last insert TaskID. With multi-row inserts, will be the first new TaskID inserted.
# The trigger TaskID_Generator must be present with the InnoDB schema (defined in TransformationDB.sql)
if self.isTransformationTasksInnoDB:
res = self._query("SELECT @last;", connection)
else:
res = self._query("SELECT LAST_INSERT_ID();", connection)
self.lock.release()
if not res['OK']:
return res
taskID = int(res['Value'][0][0])
gLogger.verbose("Published task %d for transformation %d." % (taskID, transID))
# If we have input data then update their status, and taskID in the transformation table
if lfns:
res = self.__insertTaskInputs(transID, taskID, lfns, connection=connection)
if not res['OK']:
self.__removeTransformationTask(transID, taskID, connection=connection)
return res
res = self.__assignTransformationFile(transID, taskID, se, fileIDs, connection=connection)
if not res['OK']:
self.__removeTransformationTask(transID, taskID, connection=connection)
return res
return S_OK(taskID)
def extendTransformation(self, transName, nTasks, author='', connection=False):
""" Extend SIMULATION type transformation by nTasks number of tasks
"""
connection = self.__getConnection(connection)
res = self.getTransformation(transName, connection=connection)
if not res['OK']:
gLogger.error("Failed to get transformation details", res['Message'])
return res
transType = res['Value']['Type']
transID = res['Value']['TransformationID']
extendableProds = Operations().getValue('Transformations/ExtendableTransfTypes', ['Simulation', 'MCSimulation'])
if transType.lower() not in [ep.lower() for ep in extendableProds]:
return S_ERROR('Can not extend non-SIMULATION type production')
taskIDs = []
for _task in range(nTasks):
res = self.addTaskForTransformation(transID, connection=connection)
if not res['OK']:
return res
taskIDs.append(res['Value'])
# Add information to the transformation logging
message = 'Transformation extended by %d tasks' % nTasks
self.__updateTransformationLogging(transName, message, author, connection=connection)
return S_OK(taskIDs)
def cleanTransformation(self, transName, author='', connection=False):
""" Clean the transformation specified by name or id """
res = self._getConnectionTransID(connection, transName)
if not res['OK']:
return res
connection = res['Value']['Connection']
transID = res['Value']['TransformationID']
res = self.__deleteTransformationFileTasks(transID, connection=connection)
if not res['OK']:
return res
res = self.__deleteTransformationFiles(transID, connection=connection)
if not res['OK']:
return res
res = self.__deleteTransformationTaskInputs(transID, connection=connection)
if not res['OK']:
return res
res = self.__deleteTransformationTasks(transID, connection=connection)
if not res['OK']:
return res
self.__updateTransformationLogging(transID, "Transformation Cleaned", author, connection=connection)
return S_OK(transID)
def deleteTransformation(self, transName, author='', connection=False):
""" Remove the transformation specified by name or id """
res = self._getConnectionTransID(connection, transName)
if not res['OK']:
return res
connection = res['Value']['Connection']
transID = res['Value']['TransformationID']
res = self.cleanTransformation(transID, author=author, connection=connection)
if not res['OK']:
return res
res = self.__deleteTransformationLog(transID, connection=connection)
if not res['OK']:
return res
res = self.__deleteTransformationParameters(transID, connection=connection)
if not res['OK']:
return res
res = self.__deleteTransformation(transID, connection=connection)
if not res['OK']:
return res
res = self.__updateFilters()
if not res['OK']:
return res
return S_OK()
def __removeTransformationTask(self, transID, taskID, connection=False):
res = self.__deleteTransformationTaskInputs(transID, taskID, connection=connection)
if not res['OK']:
return res
res = self.__deleteTransformationFileTask(transID, taskID, connection=connection)
if not res['OK']:
return res
res = self.__resetTransformationFile(transID, taskID, connection=connection)
if not res['OK']:
return res
return self.__deleteTransformationTask(transID, taskID, connection=connection)
def __checkUpdate(self, table, param, paramValue, selectDict=None, connection=False):
""" Check whether the update will perform an update """
req = "UPDATE %s SET %s = '%s'" % (table, param, paramValue)
if selectDict:
req = "%s %s" % (req, self.buildCondition(selectDict))
return self._update(req, connection)
def __getConnection(self, connection):
if connection:
return connection
res = self._getConnection()
if res['OK']:
return res['Value']
gLogger.warn("Failed to get MySQL connection", res['Message'])
return connection
def _getConnectionTransID(self, connection, transName):
connection = self.__getConnection(connection)
res = self._getTransformationID(transName, connection=connection)
if not res['OK']:
gLogger.error("Failed to get ID for transformation", res['Message'])
return res
transID = res['Value']
resDict = {'Connection': connection, 'TransformationID': transID}
return S_OK(resDict)
####################################################################################
#
# This part should correspond to the DIRAC Standard File Catalog interface
#
####################################################################################
def exists(self, lfns, connection=False):
""" Check the presence of the lfn in the TransformationDB DataFiles table
"""
gLogger.info("TransformationDB.exists: Attempting to determine existence of %s files." % len(lfns))
res = self.__getFileIDsForLfns(lfns, connection=connection)
if not res['OK']:
return res
fileIDs = res['Value'][0]
failed = {}
successful = {}
fileIDsValues = set(fileIDs.values())
for lfn in lfns:
successful[lfn] = (lfn in fileIDsValues)
resDict = {'Successful': successful, 'Failed': failed}
return S_OK(resDict)
def addFile(self, fileDicts, force=False, connection=False):
""" Add the supplied lfn to the Transformations and to the DataFiles table if it passes the filter
"""
gLogger.info("TransformationDB.addFile: Attempting to add %s files." % len(fileDicts))
successful = {}
failed = {}
# Determine which files pass the filters and are to be added to transformations
transFiles = {}
filesToAdd = []
catalog = FileCatalog()
for lfn in fileDicts:
gLogger.info("addFile: Attempting to add file %s" % lfn)
res = catalog.getFileUserMetadata(lfn)
if not res['OK']:
gLogger.error("Failed to getFileUserMetadata for file", "%s: %s" % (lfn, res['Message']))
failed[lfn] = res['Message']
continue
else:
metadatadict = res['Value']
gLogger.info('Filter file with metadata', metadatadict)
transIDs = self._filterFileByMetadata(metadatadict)
gLogger.info('Transformations passing the filter: %s' % transIDs)
if not (transIDs or force): # not clear how force should be used for
successful[lfn] = False # True -> False bug fix: otherwise it is set to True even if transIDs is empty.
else:
filesToAdd.append(lfn)
for trans in transIDs:
if trans not in transFiles:
transFiles[trans] = []
transFiles[trans].append(lfn)
# Add the files to the transformations
gLogger.info('Files to add to transformations:', filesToAdd)
if filesToAdd:
for transID, lfns in transFiles.iteritems():
res = self.addFilesToTransformation(transID, lfns)
if not res['OK']:
gLogger.error("Failed to add files to transformation", "%s %s" % (transID, res['Message']))
return res
else:
for lfn in lfns:
successful[lfn] = True
res = S_OK({'Successful': successful, 'Failed': failed})
return res
def removeFile(self, lfns, connection=False):
""" Remove file specified by lfn from the ProcessingDB
"""
gLogger.info("TransformationDB.removeFile: Attempting to remove %s files." % len(lfns))
failed = {}
successful = {}
connection = self.__getConnection(connection)
if not lfns:
return S_ERROR("No LFNs supplied")
res = self.__getFileIDsForLfns(lfns, connection=connection)
if not res['OK']:
return res
fileIDs, lfnFilesIDs = res['Value']
for lfn in lfns:
if lfn not in lfnFilesIDs:
successful[lfn] = 'File does not exist'
if fileIDs:
res = self.__setTransformationFileStatus(fileIDs.keys(), 'Deleted', connection=connection)
if not res['OK']:
return res
res = self.__setDataFileStatus(fileIDs.keys(), 'Deleted', connection=connection)
if not res['OK']:
return S_ERROR("TransformationDB.removeFile: Failed to remove files.")
for lfn in lfnFilesIDs:
if lfn not in failed:
successful[lfn] = True
resDict = {'Successful': successful, 'Failed': failed}
return S_OK(resDict)
def addDirectory(self, path, force=False):
""" Adds all the files stored in a given directory in file catalog """
gLogger.info("TransformationDB.addDirectory: Attempting to populate %s." % path)
res = pythonCall(30, self.__addDirectory, path, force)
if not res['OK']:
gLogger.error("Failed to invoke addDirectory with shifter proxy")
return res
return res['Value']
def __addDirectory(self, path, force):
res = setupShifterProxyInEnv("ProductionManager")
if not res['OK']:
return S_OK("Failed to setup shifter proxy")
catalog = FileCatalog()
start = time.time()
res = catalog.listDirectory(path)
if not res['OK']:
gLogger.error("TransformationDB.addDirectory: Failed to get files. %s" % res['Message'])
return res
if not path in res['Value']['Successful']:
gLogger.error("TransformationDB.addDirectory: Failed to get files.")
return res
gLogger.info("TransformationDB.addDirectory: Obtained %s files in %s seconds." % (path, time.time() - start))
successful = []
failed = []
for lfn in res['Value']['Successful'][path]["Files"]:
res = self.addFile({lfn: {}}, force=force)
if not res['OK'] or lfn not in res['Value']['Successful']:
failed.append(lfn)
else:
successful.append(lfn)
return {"OK": True, "Value": len(res['Value']['Successful']), "Successful": successful, "Failed": failed}
def setMetadata(self, path, usermetadatadict):
""" It can be applied to a file or to a directory (path). For a file, add the file to Transformations if the updated metadata dictionary passes the filter.
For a directory, add the files contained in the directory to the Transformations if the the updated metadata dictionary passes the filter.
"""
gLogger.info("setMetadata: Attempting to set metadata %s to: %s" % (usermetadatadict, path))
transFiles = {}
filesToAdd = []
catalog = FileCatalog()
res = catalog.isFile(path)
if res['OK']:
isFile = res['Value']['Successful'][path]
else:
gLogger.error("Failed isFile %s: %s" % (path, res['Message']))
return res
res = catalog.isDirectory(path)
if res['OK']:
isDirectory = res['Value']['Successful'][path]
else:
gLogger.error("Failed isDirectory %s: %s" % (path, res['Message']))
return res
if isFile:
res = catalog.getFileUserMetadata(path)
elif isDirectory:
res = catalog.getDirectoryUserMetadata(path)
if not res['OK']:
gLogger.error("Failed to get User Metadata %s: %s" % (path, res['Message']))
return res
else:
metadatadict = res['Value']
metadatadict.update(usermetadatadict)
gLogger.info('Filter file with metadata:', metadatadict)
transIDs = self._filterFileByMetadata(metadatadict)
gLogger.info('Transformations passing the filter: %s' % transIDs)
if not transIDs:
return S_OK()
elif isFile:
filesToAdd.append(path)
elif isDirectory:
res = catalog.findFilesByMetadata(metadatadict, path)
if not res['OK']:
gLogger.error("Failed to findFilesByMetadata %s: %s" % (path, res['Message']))
return res
filesToAdd.extend(res['Value'])
for trans in transIDs:
transFiles[trans].extend(filesToAdd)
# Add the files to the transformations
gLogger.info('Files to add to transformations:', filesToAdd)
if filesToAdd:
for transID, lfns in transFiles.iteritems():
res = self.addFilesToTransformation(transID, lfns)
if not res['OK']:
gLogger.error("Failed to add files to transformation", "%s %s" % (transID, res['Message']))
return res
return S_OK()
def _filterFileByMetadata(self, metadatadict):
"""Pass the input metadatadict through those currently active"""
transIDs = []
queries = self.filters
catalog = FileCatalog()
gLogger.info('Filter file by queries', queries)
res = catalog.getMetadataFields()
if not res['OK']:
gLogger.error("Error in getMetadataFields: %s" % res['Message'])
return res
if not res['Value']:
gLogger.error("Error: no metadata fields defined")
return res
typeDict = res['Value']['FileMetaFields']
typeDict.update(res['Value']['DirectoryMetaFields'])
for transID, query in queries:
mq = MetaQuery(query, typeDict)
gLogger.info("Apply query %s to metadata %s" % (mq.getMetaQuery(), metadatadict))
res = mq.applyQuery(metadatadict)
if not res['OK']:
gLogger.error("Error in applying query: %s" % res['Message'])
return res
elif res['Value']:
gLogger.info("Apply query result is True")
transIDs.append(transID)
else:
gLogger.info("Apply query result is False")
return transIDs
|
andresailer/DIRAC
|
TransformationSystem/DB/TransformationDB.py
|
Python
|
gpl-3.0
| 69,831
|
[
"DIRAC"
] |
b71c272ee2e9025103eb98a13d6268ffa55ea5704d4cf2db1b2e9a7a07001c7c
|
"""
Generates cut-site matrix for use in CENTIPEDE
Usage:
python cutsite_matrix.py something.bam annotation.bed
"""
import pysam
import numpy as np
def get_cutsite():
pass
def main():
import optparse
p = optparse.OptionParser(__doc__)
p.add_option("-D", "--debug", action="store_true", dest="D", help="debug")
p.add_option("-S", "--stam", action="store_true", dest="S",
help="DNAseI is generated from STAM's group")
p.add_option("-s", "--shift", action="store", dest="shift",
help="Amount to shift the negative strand", default = 36)
options, args = p.parse_args()
options.shift = int(options.shift)
bamfile = pysam.Samfile(args[0], 'rb')
PWM_bed = open(args[1], 'rU')
debug = 0
for line in PWM_bed:
line = line.split('\t')
chrom = line[0]
start = int(line[1]) - 100 - 1
end = int(line[2]) + 100
diff = end-start
a = np.zeros(2*(diff), dtype=np.int)
try:
for alignment in bamfile.fetch(chrom, start, end):
if alignment.pos-start < 0: pass
else:
if alignment.is_reverse:
try:
a[alignment.pos-start+diff+options.shift] += 1
except IndexError:
pass
else:
a[alignment.pos-start] += 1
except ValueError:
pass
print("\t".join(map(str,a)))
if options.D:
debug += 1
if debug >= 400: break
if __name__ == '__main__':
main()
|
jeffhsu3/genda
|
scripts/PWM/cutsite_matrix.py
|
Python
|
bsd-3-clause
| 1,609
|
[
"pysam"
] |
54c55e94300d318c1d0fedb74a8ac0a898b76c27851b493df44e2eec9886158f
|
"""An NNTP client class based on:
- RFC 977: Network News Transfer Protocol
- RFC 2980: Common NNTP Extensions
- RFC 3977: Network News Transfer Protocol (version 2)
Example:
>>> from nntplib import NNTP
>>> s = NNTP('news')
>>> resp, count, first, last, name = s.group('comp.lang.python')
>>> print('Group', name, 'has', count, 'articles, range', first, 'to', last)
Group comp.lang.python has 51 articles, range 5770 to 5821
>>> resp, subs = s.xhdr('subject', '{0}-{1}'.format(first, last))
>>> resp = s.quit()
>>>
Here 'resp' is the server response line.
Error responses are turned into exceptions.
To post an article from a file:
>>> f = open(filename, 'rb') # file containing article, including header
>>> resp = s.post(f)
>>>
For descriptions of all methods, read the comments in the code below.
Note that all arguments and return values representing article numbers
are strings, not numbers, since they are rarely used for calculations.
"""
# RFC 977 by Brian Kantor and Phil Lapsley.
# xover, xgtitle, xpath, date methods by Kevan Heydon
# Incompatible changes from the 2.x nntplib:
# - all commands are encoded as UTF-8 data (using the "surrogateescape"
# error handler), except for raw message data (POST, IHAVE)
# - all responses are decoded as UTF-8 data (using the "surrogateescape"
# error handler), except for raw message data (ARTICLE, HEAD, BODY)
# - the `file` argument to various methods is keyword-only
#
# - NNTP.date() returns a datetime object
# - NNTP.newgroups() and NNTP.newnews() take a datetime (or date) object,
# rather than a pair of (date, time) strings.
# - NNTP.newgroups() and NNTP.list() return a list of GroupInfo named tuples
# - NNTP.descriptions() returns a dict mapping group names to descriptions
# - NNTP.xover() returns a list of dicts mapping field names (header or metadata)
# to field values; each dict representing a message overview.
# - NNTP.article(), NNTP.head() and NNTP.body() return a (response, ArticleInfo)
# tuple.
# - the "internal" methods have been marked private (they now start with
# an underscore)
# Other changes from the 2.x/3.1 nntplib:
# - automatic querying of capabilities at connect
# - New method NNTP.getcapabilities()
# - New method NNTP.over()
# - New helper function decode_header()
# - NNTP.post() and NNTP.ihave() accept file objects, bytes-like objects and
# arbitrary iterables yielding lines.
# - An extensive test suite :-)
# TODO:
# - return structured data (GroupInfo etc.) everywhere
# - support HDR
# Imports
import re
import socket
import collections
import datetime
import warnings
try:
import ssl
except ImportError:
_have_ssl = False
else:
_have_ssl = True
from email.header import decode_header as _email_decode_header
from socket import _GLOBAL_DEFAULT_TIMEOUT
__all__ = ["NNTP",
"NNTPReplyError", "NNTPTemporaryError", "NNTPPermanentError",
"NNTPProtocolError", "NNTPDataError",
"decode_header",
]
# Exceptions raised when an error or invalid response is received
class NNTPError(Exception):
"""Base class for all nntplib exceptions"""
def __init__(self, *args):
Exception.__init__(self, *args)
try:
self.response = args[0]
except IndexError:
self.response = 'No response given'
class NNTPReplyError(NNTPError):
"""Unexpected [123]xx reply"""
pass
class NNTPTemporaryError(NNTPError):
"""4xx errors"""
pass
class NNTPPermanentError(NNTPError):
"""5xx errors"""
pass
class NNTPProtocolError(NNTPError):
"""Response does not begin with [1-5]"""
pass
class NNTPDataError(NNTPError):
"""Error in response data"""
pass
# Standard port used by NNTP servers
NNTP_PORT = 119
NNTP_SSL_PORT = 563
# Response numbers that are followed by additional text (e.g. article)
_LONGRESP = {
'100', # HELP
'101', # CAPABILITIES
'211', # LISTGROUP (also not multi-line with GROUP)
'215', # LIST
'220', # ARTICLE
'221', # HEAD, XHDR
'222', # BODY
'224', # OVER, XOVER
'225', # HDR
'230', # NEWNEWS
'231', # NEWGROUPS
'282', # XGTITLE
}
# Default decoded value for LIST OVERVIEW.FMT if not supported
_DEFAULT_OVERVIEW_FMT = [
"subject", "from", "date", "message-id", "references", ":bytes", ":lines"]
# Alternative names allowed in LIST OVERVIEW.FMT response
_OVERVIEW_FMT_ALTERNATIVES = {
'bytes': ':bytes',
'lines': ':lines',
}
# Line terminators (we always output CRLF, but accept any of CRLF, CR, LF)
_CRLF = b'\r\n'
GroupInfo = collections.namedtuple('GroupInfo',
['group', 'last', 'first', 'flag'])
ArticleInfo = collections.namedtuple('ArticleInfo',
['number', 'message_id', 'lines'])
# Helper function(s)
def decode_header(header_str):
"""Takes an unicode string representing a munged header value
and decodes it as a (possibly non-ASCII) readable value."""
parts = []
for v, enc in _email_decode_header(header_str):
if isinstance(v, bytes):
parts.append(v.decode(enc or 'ascii'))
else:
parts.append(v)
return ' '.join(parts)
def _parse_overview_fmt(lines):
"""Parse a list of string representing the response to LIST OVERVIEW.FMT
and return a list of header/metadata names.
Raises NNTPDataError if the response is not compliant
(cf. RFC 3977, section 8.4)."""
fmt = []
for line in lines:
if line[0] == ':':
# Metadata name (e.g. ":bytes")
name, _, suffix = line[1:].partition(':')
name = ':' + name
else:
# Header name (e.g. "Subject:" or "Xref:full")
name, _, suffix = line.partition(':')
name = name.lower()
name = _OVERVIEW_FMT_ALTERNATIVES.get(name, name)
# Should we do something with the suffix?
fmt.append(name)
defaults = _DEFAULT_OVERVIEW_FMT
if len(fmt) < len(defaults):
raise NNTPDataError("LIST OVERVIEW.FMT response too short")
if fmt[:len(defaults)] != defaults:
raise NNTPDataError("LIST OVERVIEW.FMT redefines default fields")
return fmt
def _parse_overview(lines, fmt, data_process_func=None):
"""Parse the response to a OVER or XOVER command according to the
overview format `fmt`."""
n_defaults = len(_DEFAULT_OVERVIEW_FMT)
overview = []
for line in lines:
fields = {}
article_number, *tokens = line.split('\t')
article_number = int(article_number)
for i, token in enumerate(tokens):
if i >= len(fmt):
# XXX should we raise an error? Some servers might not
# support LIST OVERVIEW.FMT and still return additional
# headers.
continue
field_name = fmt[i]
is_metadata = field_name.startswith(':')
if i >= n_defaults and not is_metadata:
# Non-default header names are included in full in the response
# (unless the field is totally empty)
h = field_name + ": "
if token and token[:len(h)].lower() != h:
raise NNTPDataError("OVER/XOVER response doesn't include "
"names of additional headers")
token = token[len(h):] if token else None
fields[fmt[i]] = token
overview.append((article_number, fields))
return overview
def _parse_datetime(date_str, time_str=None):
"""Parse a pair of (date, time) strings, and return a datetime object.
If only the date is given, it is assumed to be date and time
concatenated together (e.g. response to the DATE command).
"""
if time_str is None:
time_str = date_str[-6:]
date_str = date_str[:-6]
hours = int(time_str[:2])
minutes = int(time_str[2:4])
seconds = int(time_str[4:])
year = int(date_str[:-4])
month = int(date_str[-4:-2])
day = int(date_str[-2:])
# RFC 3977 doesn't say how to interpret 2-char years. Assume that
# there are no dates before 1970 on Usenet.
if year < 70:
year += 2000
elif year < 100:
year += 1900
return datetime.datetime(year, month, day, hours, minutes, seconds)
def _unparse_datetime(dt, legacy=False):
"""Format a date or datetime object as a pair of (date, time) strings
in the format required by the NEWNEWS and NEWGROUPS commands. If a
date object is passed, the time is assumed to be midnight (00h00).
The returned representation depends on the legacy flag:
* if legacy is False (the default):
date has the YYYYMMDD format and time the HHMMSS format
* if legacy is True:
date has the YYMMDD format and time the HHMMSS format.
RFC 3977 compliant servers should understand both formats; therefore,
legacy is only needed when talking to old servers.
"""
if not isinstance(dt, datetime.datetime):
time_str = "000000"
else:
time_str = "{0.hour:02d}{0.minute:02d}{0.second:02d}".format(dt)
y = dt.year
if legacy:
y = y % 100
date_str = "{0:02d}{1.month:02d}{1.day:02d}".format(y, dt)
else:
date_str = "{0:04d}{1.month:02d}{1.day:02d}".format(y, dt)
return date_str, time_str
if _have_ssl:
def _encrypt_on(sock, context):
"""Wrap a socket in SSL/TLS. Arguments:
- sock: Socket to wrap
- context: SSL context to use for the encrypted connection
Returns:
- sock: New, encrypted socket.
"""
# Generate a default SSL context if none was passed.
if context is None:
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
# SSLv2 considered harmful.
context.options |= ssl.OP_NO_SSLv2
return context.wrap_socket(sock)
# The classes themselves
class _NNTPBase:
# UTF-8 is the character set for all NNTP commands and responses: they
# are automatically encoded (when sending) and decoded (and receiving)
# by this class.
# However, some multi-line data blocks can contain arbitrary bytes (for
# example, latin-1 or utf-16 data in the body of a message). Commands
# taking (POST, IHAVE) or returning (HEAD, BODY, ARTICLE) raw message
# data will therefore only accept and produce bytes objects.
# Furthermore, since there could be non-compliant servers out there,
# we use 'surrogateescape' as the error handler for fault tolerance
# and easy round-tripping. This could be useful for some applications
# (e.g. NNTP gateways).
encoding = 'utf-8'
errors = 'surrogateescape'
def __init__(self, file, host,
readermode=None, timeout=_GLOBAL_DEFAULT_TIMEOUT):
"""Initialize an instance. Arguments:
- file: file-like object (open for read/write in binary mode)
- host: hostname of the server
- readermode: if true, send 'mode reader' command after
connecting.
- timeout: timeout (in seconds) used for socket connections
readermode is sometimes necessary if you are connecting to an
NNTP server on the local machine and intend to call
reader-specific commands, such as `group'. If you get
unexpected NNTPPermanentErrors, you might need to set
readermode.
"""
self.host = host
self.file = file
self.debugging = 0
self.welcome = self._getresp()
# 'MODE READER' is sometimes necessary to enable 'reader' mode.
# However, the order in which 'MODE READER' and 'AUTHINFO' need to
# arrive differs between some NNTP servers. If _setreadermode() fails
# with an authorization failed error, it will set this to True;
# the login() routine will interpret that as a request to try again
# after performing its normal function.
self.readermode_afterauth = False
if readermode:
self._setreadermode()
# RFC 4642 2.2.2: Both the client and the server MUST know if there is
# a TLS session active. A client MUST NOT attempt to start a TLS
# session if a TLS session is already active.
self.tls_on = False
# Inquire about capabilities (RFC 3977).
self._caps = None
self.getcapabilities()
# Log in and encryption setup order is left to subclasses.
self.authenticated = False
def getwelcome(self):
"""Get the welcome message from the server
(this is read and squirreled away by __init__()).
If the response code is 200, posting is allowed;
if it 201, posting is not allowed."""
if self.debugging: print('*welcome*', repr(self.welcome))
return self.welcome
def getcapabilities(self):
"""Get the server capabilities, as read by __init__().
If the CAPABILITIES command is not supported, an empty dict is
returned."""
if self._caps is None:
self.nntp_version = 1
self.nntp_implementation = None
try:
resp, caps = self.capabilities()
except NNTPPermanentError:
# Server doesn't support capabilities
self._caps = {}
else:
self._caps = caps
if 'VERSION' in caps:
# The server can advertise several supported versions,
# choose the highest.
self.nntp_version = max(map(int, caps['VERSION']))
if 'IMPLEMENTATION' in caps:
self.nntp_implementation = ' '.join(caps['IMPLEMENTATION'])
return self._caps
def set_debuglevel(self, level):
"""Set the debugging level. Argument 'level' means:
0: no debugging output (default)
1: print commands and responses but not body text etc.
2: also print raw lines read and sent before stripping CR/LF"""
self.debugging = level
debug = set_debuglevel
def _putline(self, line):
"""Internal: send one line to the server, appending CRLF.
The `line` must be a bytes-like object."""
line = line + _CRLF
if self.debugging > 1: print('*put*', repr(line))
self.file.write(line)
self.file.flush()
def _putcmd(self, line):
"""Internal: send one command to the server (through _putline()).
The `line` must be an unicode string."""
if self.debugging: print('*cmd*', repr(line))
line = line.encode(self.encoding, self.errors)
self._putline(line)
def _getline(self, strip_crlf=True):
"""Internal: return one line from the server, stripping _CRLF.
Raise EOFError if the connection is closed.
Returns a bytes object."""
line = self.file.readline()
if self.debugging > 1:
print('*get*', repr(line))
if not line: raise EOFError
if strip_crlf:
if line[-2:] == _CRLF:
line = line[:-2]
elif line[-1:] in _CRLF:
line = line[:-1]
return line
def _getresp(self):
"""Internal: get a response from the server.
Raise various errors if the response indicates an error.
Returns an unicode string."""
resp = self._getline()
if self.debugging: print('*resp*', repr(resp))
resp = resp.decode(self.encoding, self.errors)
c = resp[:1]
if c == '4':
raise NNTPTemporaryError(resp)
if c == '5':
raise NNTPPermanentError(resp)
if c not in '123':
raise NNTPProtocolError(resp)
return resp
def _getlongresp(self, file=None):
"""Internal: get a response plus following text from the server.
Raise various errors if the response indicates an error.
Returns a (response, lines) tuple where `response` is an unicode
string and `lines` is a list of bytes objects.
If `file` is a file-like object, it must be open in binary mode.
"""
openedFile = None
try:
# If a string was passed then open a file with that name
if isinstance(file, (str, bytes)):
openedFile = file = open(file, "wb")
resp = self._getresp()
if resp[:3] not in _LONGRESP:
raise NNTPReplyError(resp)
lines = []
if file is not None:
# XXX lines = None instead?
terminators = (b'.' + _CRLF, b'.\n')
while 1:
line = self._getline(False)
if line in terminators:
break
if line.startswith(b'..'):
line = line[1:]
file.write(line)
else:
terminator = b'.'
while 1:
line = self._getline()
if line == terminator:
break
if line.startswith(b'..'):
line = line[1:]
lines.append(line)
finally:
# If this method created the file, then it must close it
if openedFile:
openedFile.close()
return resp, lines
def _shortcmd(self, line):
"""Internal: send a command and get the response.
Same return value as _getresp()."""
self._putcmd(line)
return self._getresp()
def _longcmd(self, line, file=None):
"""Internal: send a command and get the response plus following text.
Same return value as _getlongresp()."""
self._putcmd(line)
return self._getlongresp(file)
def _longcmdstring(self, line, file=None):
"""Internal: send a command and get the response plus following text.
Same as _longcmd() and _getlongresp(), except that the returned `lines`
are unicode strings rather than bytes objects.
"""
self._putcmd(line)
resp, list = self._getlongresp(file)
return resp, [line.decode(self.encoding, self.errors)
for line in list]
def _getoverviewfmt(self):
"""Internal: get the overview format. Queries the server if not
already done, else returns the cached value."""
try:
return self._cachedoverviewfmt
except AttributeError:
pass
try:
resp, lines = self._longcmdstring("LIST OVERVIEW.FMT")
except NNTPPermanentError:
# Not supported by server?
fmt = _DEFAULT_OVERVIEW_FMT[:]
else:
fmt = _parse_overview_fmt(lines)
self._cachedoverviewfmt = fmt
return fmt
def _grouplist(self, lines):
# Parse lines into "group last first flag"
return [GroupInfo(*line.split()) for line in lines]
def capabilities(self):
"""Process a CAPABILITIES command. Not supported by all servers.
Return:
- resp: server response if successful
- caps: a dictionary mapping capability names to lists of tokens
(for example {'VERSION': ['2'], 'OVER': [], LIST: ['ACTIVE', 'HEADERS'] })
"""
caps = {}
resp, lines = self._longcmdstring("CAPABILITIES")
for line in lines:
name, *tokens = line.split()
caps[name] = tokens
return resp, caps
def newgroups(self, date, *, file=None):
"""Process a NEWGROUPS command. Arguments:
- date: a date or datetime object
Return:
- resp: server response if successful
- list: list of newsgroup names
"""
if not isinstance(date, (datetime.date, datetime.date)):
raise TypeError(
"the date parameter must be a date or datetime object, "
"not '{:40}'".format(date.__class__.__name__))
date_str, time_str = _unparse_datetime(date, self.nntp_version < 2)
cmd = 'NEWGROUPS {0} {1}'.format(date_str, time_str)
resp, lines = self._longcmdstring(cmd, file)
return resp, self._grouplist(lines)
def newnews(self, group, date, *, file=None):
"""Process a NEWNEWS command. Arguments:
- group: group name or '*'
- date: a date or datetime object
Return:
- resp: server response if successful
- list: list of message ids
"""
if not isinstance(date, (datetime.date, datetime.date)):
raise TypeError(
"the date parameter must be a date or datetime object, "
"not '{:40}'".format(date.__class__.__name__))
date_str, time_str = _unparse_datetime(date, self.nntp_version < 2)
cmd = 'NEWNEWS {0} {1} {2}'.format(group, date_str, time_str)
return self._longcmdstring(cmd, file)
def list(self, group_pattern=None, *, file=None):
"""Process a LIST or LIST ACTIVE command. Arguments:
- group_pattern: a pattern indicating which groups to query
- file: Filename string or file object to store the result in
Returns:
- resp: server response if successful
- list: list of (group, last, first, flag) (strings)
"""
if group_pattern is not None:
command = 'LIST ACTIVE ' + group_pattern
else:
command = 'LIST'
resp, lines = self._longcmdstring(command, file)
return resp, self._grouplist(lines)
def _getdescriptions(self, group_pattern, return_all):
line_pat = re.compile('^(?P<group>[^ \t]+)[ \t]+(.*)$')
# Try the more std (acc. to RFC2980) LIST NEWSGROUPS first
resp, lines = self._longcmdstring('LIST NEWSGROUPS ' + group_pattern)
if not resp.startswith('215'):
# Now the deprecated XGTITLE. This either raises an error
# or succeeds with the same output structure as LIST
# NEWSGROUPS.
resp, lines = self._longcmdstring('XGTITLE ' + group_pattern)
groups = {}
for raw_line in lines:
match = line_pat.search(raw_line.strip())
if match:
name, desc = match.group(1, 2)
if not return_all:
return desc
groups[name] = desc
if return_all:
return resp, groups
else:
# Nothing found
return ''
def description(self, group):
"""Get a description for a single group. If more than one
group matches ('group' is a pattern), return the first. If no
group matches, return an empty string.
This elides the response code from the server, since it can
only be '215' or '285' (for xgtitle) anyway. If the response
code is needed, use the 'descriptions' method.
NOTE: This neither checks for a wildcard in 'group' nor does
it check whether the group actually exists."""
return self._getdescriptions(group, False)
def descriptions(self, group_pattern):
"""Get descriptions for a range of groups."""
return self._getdescriptions(group_pattern, True)
def group(self, name):
"""Process a GROUP command. Argument:
- group: the group name
Returns:
- resp: server response if successful
- count: number of articles
- first: first article number
- last: last article number
- name: the group name
"""
resp = self._shortcmd('GROUP ' + name)
if not resp.startswith('211'):
raise NNTPReplyError(resp)
words = resp.split()
count = first = last = 0
n = len(words)
if n > 1:
count = words[1]
if n > 2:
first = words[2]
if n > 3:
last = words[3]
if n > 4:
name = words[4].lower()
return resp, int(count), int(first), int(last), name
def help(self, *, file=None):
"""Process a HELP command. Argument:
- file: Filename string or file object to store the result in
Returns:
- resp: server response if successful
- list: list of strings returned by the server in response to the
HELP command
"""
return self._longcmdstring('HELP', file)
def _statparse(self, resp):
"""Internal: parse the response line of a STAT, NEXT, LAST,
ARTICLE, HEAD or BODY command."""
if not resp.startswith('22'):
raise NNTPReplyError(resp)
words = resp.split()
art_num = int(words[1])
message_id = words[2]
return resp, art_num, message_id
def _statcmd(self, line):
"""Internal: process a STAT, NEXT or LAST command."""
resp = self._shortcmd(line)
return self._statparse(resp)
def stat(self, message_spec=None):
"""Process a STAT command. Argument:
- message_spec: article number or message id (if not specified,
the current article is selected)
Returns:
- resp: server response if successful
- art_num: the article number
- message_id: the message id
"""
if message_spec:
return self._statcmd('STAT {0}'.format(message_spec))
else:
return self._statcmd('STAT')
def next(self):
"""Process a NEXT command. No arguments. Return as for STAT."""
return self._statcmd('NEXT')
def last(self):
"""Process a LAST command. No arguments. Return as for STAT."""
return self._statcmd('LAST')
def _artcmd(self, line, file=None):
"""Internal: process a HEAD, BODY or ARTICLE command."""
resp, lines = self._longcmd(line, file)
resp, art_num, message_id = self._statparse(resp)
return resp, ArticleInfo(art_num, message_id, lines)
def head(self, message_spec=None, *, file=None):
"""Process a HEAD command. Argument:
- message_spec: article number or message id
- file: filename string or file object to store the headers in
Returns:
- resp: server response if successful
- ArticleInfo: (article number, message id, list of header lines)
"""
if message_spec is not None:
cmd = 'HEAD {0}'.format(message_spec)
else:
cmd = 'HEAD'
return self._artcmd(cmd, file)
def body(self, message_spec=None, *, file=None):
"""Process a BODY command. Argument:
- message_spec: article number or message id
- file: filename string or file object to store the body in
Returns:
- resp: server response if successful
- ArticleInfo: (article number, message id, list of body lines)
"""
if message_spec is not None:
cmd = 'BODY {0}'.format(message_spec)
else:
cmd = 'BODY'
return self._artcmd(cmd, file)
def article(self, message_spec=None, *, file=None):
"""Process an ARTICLE command. Argument:
- message_spec: article number or message id
- file: filename string or file object to store the article in
Returns:
- resp: server response if successful
- ArticleInfo: (article number, message id, list of article lines)
"""
if message_spec is not None:
cmd = 'ARTICLE {0}'.format(message_spec)
else:
cmd = 'ARTICLE'
return self._artcmd(cmd, file)
def slave(self):
"""Process a SLAVE command. Returns:
- resp: server response if successful
"""
return self._shortcmd('SLAVE')
def xhdr(self, hdr, str, *, file=None):
"""Process an XHDR command (optional server extension). Arguments:
- hdr: the header type (e.g. 'subject')
- str: an article nr, a message id, or a range nr1-nr2
- file: Filename string or file object to store the result in
Returns:
- resp: server response if successful
- list: list of (nr, value) strings
"""
pat = re.compile('^([0-9]+) ?(.*)\n?')
resp, lines = self._longcmdstring('XHDR {0} {1}'.format(hdr, str), file)
def remove_number(line):
m = pat.match(line)
return m.group(1, 2) if m else line
return resp, [remove_number(line) for line in lines]
def xover(self, start, end, *, file=None):
"""Process an XOVER command (optional server extension) Arguments:
- start: start of range
- end: end of range
- file: Filename string or file object to store the result in
Returns:
- resp: server response if successful
- list: list of dicts containing the response fields
"""
resp, lines = self._longcmdstring('XOVER {0}-{1}'.format(start, end),
file)
fmt = self._getoverviewfmt()
return resp, _parse_overview(lines, fmt)
def over(self, message_spec, *, file=None):
"""Process an OVER command. If the command isn't supported, fall
back to XOVER. Arguments:
- message_spec:
- either a message id, indicating the article to fetch
information about
- or a (start, end) tuple, indicating a range of article numbers;
if end is None, information up to the newest message will be
retrieved
- or None, indicating the current article number must be used
- file: Filename string or file object to store the result in
Returns:
- resp: server response if successful
- list: list of dicts containing the response fields
NOTE: the "message id" form isn't supported by XOVER
"""
cmd = 'OVER' if 'OVER' in self._caps else 'XOVER'
if isinstance(message_spec, (tuple, list)):
start, end = message_spec
cmd += ' {0}-{1}'.format(start, end or '')
elif message_spec is not None:
cmd = cmd + ' ' + message_spec
resp, lines = self._longcmdstring(cmd, file)
fmt = self._getoverviewfmt()
return resp, _parse_overview(lines, fmt)
def xgtitle(self, group, *, file=None):
"""Process an XGTITLE command (optional server extension) Arguments:
- group: group name wildcard (i.e. news.*)
Returns:
- resp: server response if successful
- list: list of (name,title) strings"""
warnings.warn("The XGTITLE extension is not actively used, "
"use descriptions() instead",
PendingDeprecationWarning, 2)
line_pat = re.compile('^([^ \t]+)[ \t]+(.*)$')
resp, raw_lines = self._longcmdstring('XGTITLE ' + group, file)
lines = []
for raw_line in raw_lines:
match = line_pat.search(raw_line.strip())
if match:
lines.append(match.group(1, 2))
return resp, lines
def xpath(self, id):
"""Process an XPATH command (optional server extension) Arguments:
- id: Message id of article
Returns:
resp: server response if successful
path: directory path to article
"""
warnings.warn("The XPATH extension is not actively used",
PendingDeprecationWarning, 2)
resp = self._shortcmd('XPATH {0}'.format(id))
if not resp.startswith('223'):
raise NNTPReplyError(resp)
try:
[resp_num, path] = resp.split()
except ValueError:
raise NNTPReplyError(resp)
else:
return resp, path
def date(self):
"""Process the DATE command.
Returns:
- resp: server response if successful
- date: datetime object
"""
resp = self._shortcmd("DATE")
if not resp.startswith('111'):
raise NNTPReplyError(resp)
elem = resp.split()
if len(elem) != 2:
raise NNTPDataError(resp)
date = elem[1]
if len(date) != 14:
raise NNTPDataError(resp)
return resp, _parse_datetime(date, None)
def _post(self, command, f):
resp = self._shortcmd(command)
# Raises a specific exception if posting is not allowed
if not resp.startswith('3'):
raise NNTPReplyError(resp)
if isinstance(f, (bytes, bytearray)):
f = f.splitlines()
# We don't use _putline() because:
# - we don't want additional CRLF if the file or iterable is already
# in the right format
# - we don't want a spurious flush() after each line is written
for line in f:
if not line.endswith(_CRLF):
line = line.rstrip(b"\r\n") + _CRLF
if line.startswith(b'.'):
line = b'.' + line
self.file.write(line)
self.file.write(b".\r\n")
self.file.flush()
return self._getresp()
def post(self, data):
"""Process a POST command. Arguments:
- data: bytes object, iterable or file containing the article
Returns:
- resp: server response if successful"""
return self._post('POST', data)
def ihave(self, message_id, data):
"""Process an IHAVE command. Arguments:
- message_id: message-id of the article
- data: file containing the article
Returns:
- resp: server response if successful
Note that if the server refuses the article an exception is raised."""
return self._post('IHAVE {0}'.format(message_id), data)
def _close(self):
self.file.close()
del self.file
def quit(self):
"""Process a QUIT command and close the socket. Returns:
- resp: server response if successful"""
try:
resp = self._shortcmd('QUIT')
finally:
self._close()
return resp
def login(self, user=None, password=None, usenetrc=True):
if self.authenticated:
raise ValueError("Already logged in.")
if not user and not usenetrc:
raise ValueError(
"At least one of `user` and `usenetrc` must be specified")
# If no login/password was specified but netrc was requested,
# try to get them from ~/.netrc
# Presume that if .netrc has an entry, NNRP authentication is required.
try:
if usenetrc and not user:
import netrc
credentials = netrc.netrc()
auth = credentials.authenticators(self.host)
if auth:
user = auth[0]
password = auth[2]
except IOError:
pass
# Perform NNTP authentication if needed.
if not user:
return
resp = self._shortcmd('authinfo user ' + user)
if resp.startswith('381'):
if not password:
raise NNTPReplyError(resp)
else:
resp = self._shortcmd('authinfo pass ' + password)
if not resp.startswith('281'):
raise NNTPPermanentError(resp)
# Attempt to send mode reader if it was requested after login.
if self.readermode_afterauth:
self._setreadermode()
def _setreadermode(self):
try:
self.welcome = self._shortcmd('mode reader')
except NNTPPermanentError:
# Error 5xx, probably 'not implemented'
pass
except NNTPTemporaryError as e:
if e.response.startswith('480'):
# Need authorization before 'mode reader'
self.readermode_afterauth = True
else:
raise
if _have_ssl:
def starttls(self, context=None):
"""Process a STARTTLS command. Arguments:
- context: SSL context to use for the encrypted connection
"""
# Per RFC 4642, STARTTLS MUST NOT be sent after authentication or if
# a TLS session already exists.
if self.tls_on:
raise ValueError("TLS is already enabled.")
if self.authenticated:
raise ValueError("TLS cannot be started after authentication.")
resp = self._shortcmd('STARTTLS')
if resp.startswith('382'):
self.file.close()
self.sock = _encrypt_on(self.sock, context)
self.file = self.sock.makefile("rwb")
self.tls_on = True
# Capabilities may change after TLS starts up, so ask for them
# again.
self._caps = None
self.getcapabilities()
else:
raise NNTPError("TLS failed to start.")
class NNTP(_NNTPBase):
def __init__(self, host, port=NNTP_PORT, user=None, password=None,
readermode=None, usenetrc=False,
timeout=_GLOBAL_DEFAULT_TIMEOUT):
"""Initialize an instance. Arguments:
- host: hostname to connect to
- port: port to connect to (default the standard NNTP port)
- user: username to authenticate with
- password: password to use with username
- readermode: if true, send 'mode reader' command after
connecting.
- usenetrc: allow loading username and password from ~/.netrc file
if not specified explicitly
- timeout: timeout (in seconds) used for socket connections
readermode is sometimes necessary if you are connecting to an
NNTP server on the local machine and intend to call
reader-specific commands, such as `group'. If you get
unexpected NNTPPermanentErrors, you might need to set
readermode.
"""
self.host = host
self.port = port
self.sock = socket.create_connection((host, port), timeout)
file = self.sock.makefile("rwb")
_NNTPBase.__init__(self, file, host,
readermode, timeout)
if user or usenetrc:
self.login(user, password, usenetrc)
def _close(self):
try:
_NNTPBase._close(self)
finally:
self.sock.close()
if _have_ssl:
class NNTP_SSL(_NNTPBase):
def __init__(self, host, port=NNTP_SSL_PORT,
user=None, password=None, ssl_context=None,
readermode=None, usenetrc=False,
timeout=_GLOBAL_DEFAULT_TIMEOUT):
"""This works identically to NNTP.__init__, except for the change
in default port and the `ssl_context` argument for SSL connections.
"""
self.sock = socket.create_connection((host, port), timeout)
self.sock = _encrypt_on(self.sock, ssl_context)
file = self.sock.makefile("rwb")
_NNTPBase.__init__(self, file, host,
readermode=readermode, timeout=timeout)
if user or usenetrc:
self.login(user, password, usenetrc)
def _close(self):
try:
_NNTPBase._close(self)
finally:
self.sock.close()
__all__.append("NNTP_SSL")
# Test retrieval when run as a script.
if __name__ == '__main__':
import argparse
from email.utils import parsedate
parser = argparse.ArgumentParser(description="""\
nntplib built-in demo - display the latest articles in a newsgroup""")
parser.add_argument('-g', '--group', default='gmane.comp.python.general',
help='group to fetch messages from (default: %(default)s)')
parser.add_argument('-s', '--server', default='news.gmane.org',
help='NNTP server hostname (default: %(default)s)')
parser.add_argument('-p', '--port', default=-1, type=int,
help='NNTP port number (default: %s / %s)' % (NNTP_PORT, NNTP_SSL_PORT))
parser.add_argument('-n', '--nb-articles', default=10, type=int,
help='number of articles to fetch (default: %(default)s)')
parser.add_argument('-S', '--ssl', action='store_true', default=False,
help='use NNTP over SSL')
args = parser.parse_args()
port = args.port
if not args.ssl:
if port == -1:
port = NNTP_PORT
s = NNTP(host=args.server, port=port)
else:
if port == -1:
port = NNTP_SSL_PORT
s = NNTP_SSL(host=args.server, port=port)
caps = s.getcapabilities()
if 'STARTTLS' in caps:
s.starttls()
resp, count, first, last, name = s.group(args.group)
print('Group', name, 'has', count, 'articles, range', first, 'to', last)
def cut(s, lim):
if len(s) > lim:
s = s[:lim - 4] + "..."
return s
first = str(int(last) - args.nb_articles + 1)
resp, overviews = s.xover(first, last)
for artnum, over in overviews:
author = decode_header(over['from']).split('<', 1)[0]
subject = decode_header(over['subject'])
lines = int(over[':lines'])
print("{:7} {:20} {:42} ({})".format(
artnum, cut(author, 20), cut(subject, 42), lines)
)
s.quit()
|
invisiblek/python-for-android
|
python3-alpha/python3-src/Lib/nntplib.py
|
Python
|
apache-2.0
| 41,473
|
[
"Brian"
] |
451bb6f4770b4afd8bfacec3fad1f0e4233534977b3f3fd714a3fd1d97813e95
|
"""Tests for items views."""
import copy
import json
import os
import tempfile
import textwrap
from uuid import uuid4
from mock import patch
from django.core.urlresolvers import reverse
from django.test.utils import override_settings
from django.conf import settings
from contentstore.tests.utils import CourseTestCase, mock_requests_get
from cache_toolbox.core import del_cached_content
from xmodule.modulestore.django import modulestore
from xmodule.contentstore.django import contentstore
from xmodule.contentstore.content import StaticContent
from xmodule.exceptions import NotFoundError
from opaque_keys.edx.keys import UsageKey
from xmodule.video_module import transcripts_utils
TEST_DATA_CONTENTSTORE = copy.deepcopy(settings.CONTENTSTORE)
TEST_DATA_CONTENTSTORE['DOC_STORE_CONFIG']['db'] = 'test_xcontent_%s' % uuid4().hex
@override_settings(CONTENTSTORE=TEST_DATA_CONTENTSTORE)
class BaseTranscripts(CourseTestCase):
"""Base test class for transcripts tests."""
def clear_subs_content(self):
"""Remove, if transcripts content exists."""
for youtube_id in self.get_youtube_ids().values():
filename = 'subs_{0}.srt.sjson'.format(youtube_id)
content_location = StaticContent.compute_location(self.course.id, filename)
try:
content = contentstore().find(content_location)
contentstore().delete(content.get_id())
except NotFoundError:
pass
def setUp(self):
"""Create initial data."""
super(BaseTranscripts, self).setUp()
# Add video module
data = {
'parent_locator': unicode(self.course.location),
'category': 'video',
'type': 'video'
}
resp = self.client.ajax_post('/xblock/', data)
self.assertEqual(resp.status_code, 200)
self.video_usage_key = self._get_usage_key(resp)
self.item = modulestore().get_item(self.video_usage_key)
# hI10vDNYz4M - valid Youtube ID with transcripts.
# JMD_ifUUfsU, AKqURZnYqpk, DYpADpL7jAY - valid Youtube IDs without transcripts.
self.item.data = '<video youtube="0.75:JMD_ifUUfsU,1.0:hI10vDNYz4M,1.25:AKqURZnYqpk,1.50:DYpADpL7jAY" />'
modulestore().update_item(self.item, self.user.id)
self.item = modulestore().get_item(self.video_usage_key)
# Remove all transcripts for current module.
self.clear_subs_content()
def _get_usage_key(self, resp):
""" Returns the usage key from the response returned by a create operation. """
usage_key_string = json.loads(resp.content).get('locator')
return UsageKey.from_string(usage_key_string)
def get_youtube_ids(self):
"""Return youtube speeds and ids."""
item = modulestore().get_item(self.video_usage_key)
return {
0.75: item.youtube_id_0_75,
1: item.youtube_id_1_0,
1.25: item.youtube_id_1_25,
1.5: item.youtube_id_1_5
}
class TestUploadTranscripts(BaseTranscripts):
"""Tests for '/transcripts/upload' url."""
def setUp(self):
"""Create initial data."""
super(TestUploadTranscripts, self).setUp()
self.good_srt_file = tempfile.NamedTemporaryFile(suffix='.srt')
self.good_srt_file.write(textwrap.dedent("""
1
00:00:10,500 --> 00:00:13,000
Elephant's Dream
2
00:00:15,000 --> 00:00:18,000
At the left we can see...
"""))
self.good_srt_file.seek(0)
self.bad_data_srt_file = tempfile.NamedTemporaryFile(suffix='.srt')
self.bad_data_srt_file.write('Some BAD data')
self.bad_data_srt_file.seek(0)
self.bad_name_srt_file = tempfile.NamedTemporaryFile(suffix='.BAD')
self.bad_name_srt_file.write(textwrap.dedent("""
1
00:00:10,500 --> 00:00:13,000
Elephant's Dream
2
00:00:15,000 --> 00:00:18,000
At the left we can see...
"""))
self.bad_name_srt_file.seek(0)
self.ufeff_srt_file = tempfile.NamedTemporaryFile(suffix='.srt')
def test_success_video_module_source_subs_uploading(self):
self.item.data = textwrap.dedent("""
<video youtube="">
<source src="http://www.quirksmode.org/html5/videos/big_buck_bunny.mp4"/>
<source src="http://www.quirksmode.org/html5/videos/big_buck_bunny.webm"/>
<source src="http://www.quirksmode.org/html5/videos/big_buck_bunny.ogv"/>
</video>
""")
modulestore().update_item(self.item, self.user.id)
link = reverse('upload_transcripts')
filename = os.path.splitext(os.path.basename(self.good_srt_file.name))[0]
resp = self.client.post(link, {
'locator': self.video_usage_key,
'transcript-file': self.good_srt_file,
'video_list': json.dumps([{
'type': 'html5',
'video': filename,
'mode': 'mp4',
}])
})
self.assertEqual(resp.status_code, 200)
self.assertEqual(json.loads(resp.content).get('status'), 'Success')
item = modulestore().get_item(self.video_usage_key)
self.assertEqual(item.sub, filename)
content_location = StaticContent.compute_location(
self.course.id, 'subs_{0}.srt.sjson'.format(filename))
self.assertTrue(contentstore().find(content_location))
def test_fail_data_without_id(self):
link = reverse('upload_transcripts')
resp = self.client.post(link, {'transcript-file': self.good_srt_file})
self.assertEqual(resp.status_code, 400)
self.assertEqual(json.loads(resp.content).get('status'), 'POST data without "locator" form data.')
def test_fail_data_without_file(self):
link = reverse('upload_transcripts')
resp = self.client.post(link, {'locator': self.video_usage_key})
self.assertEqual(resp.status_code, 400)
self.assertEqual(json.loads(resp.content).get('status'), 'POST data without "file" form data.')
def test_fail_data_with_bad_locator(self):
# Test for raising `InvalidLocationError` exception.
link = reverse('upload_transcripts')
filename = os.path.splitext(os.path.basename(self.good_srt_file.name))[0]
resp = self.client.post(link, {
'locator': 'BAD_LOCATOR',
'transcript-file': self.good_srt_file,
'video_list': json.dumps([{
'type': 'html5',
'video': filename,
'mode': 'mp4',
}])
})
self.assertEqual(resp.status_code, 400)
self.assertEqual(json.loads(resp.content).get('status'), "Can't find item by locator.")
# Test for raising `ItemNotFoundError` exception.
link = reverse('upload_transcripts')
filename = os.path.splitext(os.path.basename(self.good_srt_file.name))[0]
resp = self.client.post(link, {
'locator': '{0}_{1}'.format(self.video_usage_key, 'BAD_LOCATOR'),
'transcript-file': self.good_srt_file,
'video_list': json.dumps([{
'type': 'html5',
'video': filename,
'mode': 'mp4',
}])
})
self.assertEqual(resp.status_code, 400)
self.assertEqual(json.loads(resp.content).get('status'), "Can't find item by locator.")
def test_fail_for_non_video_module(self):
# non_video module: setup
data = {
'parent_locator': unicode(self.course.location),
'category': 'non_video',
'type': 'non_video'
}
resp = self.client.ajax_post('/xblock/', data)
usage_key = self._get_usage_key(resp)
item = modulestore().get_item(usage_key)
item.data = '<non_video youtube="0.75:JMD_ifUUfsU,1.0:hI10vDNYz4M" />'
modulestore().update_item(item, self.user.id)
# non_video module: testing
link = reverse('upload_transcripts')
filename = os.path.splitext(os.path.basename(self.good_srt_file.name))[0]
resp = self.client.post(link, {
'locator': unicode(usage_key),
'transcript-file': self.good_srt_file,
'video_list': json.dumps([{
'type': 'html5',
'video': filename,
'mode': 'mp4',
}])
})
self.assertEqual(resp.status_code, 400)
self.assertEqual(json.loads(resp.content).get('status'), 'Transcripts are supported only for "video" modules.')
def test_fail_bad_xml(self):
self.item.data = '<<<video youtube="0.75:JMD_ifUUfsU,1.25:AKqURZnYqpk,1.50:DYpADpL7jAY" />'
modulestore().update_item(self.item, self.user.id)
link = reverse('upload_transcripts')
filename = os.path.splitext(os.path.basename(self.good_srt_file.name))[0]
resp = self.client.post(link, {
'locator': unicode(self.video_usage_key),
'transcript-file': self.good_srt_file,
'video_list': json.dumps([{
'type': 'html5',
'video': filename,
'mode': 'mp4',
}])
})
self.assertEqual(resp.status_code, 400)
# incorrect xml produces incorrect item category error
self.assertEqual(json.loads(resp.content).get('status'), 'Transcripts are supported only for "video" modules.')
def test_fail_bad_data_srt_file(self):
link = reverse('upload_transcripts')
filename = os.path.splitext(os.path.basename(self.bad_data_srt_file.name))[0]
resp = self.client.post(link, {
'locator': unicode(self.video_usage_key),
'transcript-file': self.bad_data_srt_file,
'video_list': json.dumps([{
'type': 'html5',
'video': filename,
'mode': 'mp4',
}])
})
self.assertEqual(resp.status_code, 400)
self.assertEqual(json.loads(resp.content).get('status'), 'Something wrong with SubRip transcripts file during parsing.')
def test_fail_bad_name_srt_file(self):
link = reverse('upload_transcripts')
filename = os.path.splitext(os.path.basename(self.bad_name_srt_file.name))[0]
resp = self.client.post(link, {
'locator': unicode(self.video_usage_key),
'transcript-file': self.bad_name_srt_file,
'video_list': json.dumps([{
'type': 'html5',
'video': filename,
'mode': 'mp4',
}])
})
self.assertEqual(resp.status_code, 400)
self.assertEqual(json.loads(resp.content).get('status'), 'We support only SubRip (*.srt) transcripts format.')
def test_undefined_file_extension(self):
srt_file = tempfile.NamedTemporaryFile(suffix='')
srt_file.write(textwrap.dedent("""
1
00:00:10,500 --> 00:00:13,000
Elephant's Dream
2
00:00:15,000 --> 00:00:18,000
At the left we can see...
"""))
srt_file.seek(0)
link = reverse('upload_transcripts')
filename = os.path.splitext(os.path.basename(srt_file.name))[0]
resp = self.client.post(link, {
'locator': self.video_usage_key,
'transcript-file': srt_file,
'video_list': json.dumps([{
'type': 'html5',
'video': filename,
'mode': 'mp4',
}])
})
self.assertEqual(resp.status_code, 400)
self.assertEqual(json.loads(resp.content).get('status'), 'Undefined file extension.')
def test_subs_uploading_with_byte_order_mark(self):
"""
Test uploading subs containing BOM(Byte Order Mark), e.g. U+FEFF
"""
filedata = textwrap.dedent("""
1
00:00:10,500 --> 00:00:13,000
Test ufeff characters
2
00:00:15,000 --> 00:00:18,000
At the left we can see...
""").encode('utf-8-sig')
# Verify that ufeff character is in filedata.
self.assertIn("ufeff", filedata)
self.ufeff_srt_file.write(filedata)
self.ufeff_srt_file.seek(0)
link = reverse('upload_transcripts')
filename = os.path.splitext(os.path.basename(self.ufeff_srt_file.name))[0]
resp = self.client.post(link, {
'locator': self.video_usage_key,
'transcript-file': self.ufeff_srt_file,
'video_list': json.dumps([{
'type': 'html5',
'video': filename,
'mode': 'mp4',
}])
})
self.assertEqual(resp.status_code, 200)
content_location = StaticContent.compute_location(
self.course.id, 'subs_{0}.srt.sjson'.format(filename))
self.assertTrue(contentstore().find(content_location))
subs_text = json.loads(contentstore().find(content_location).data).get('text')
self.assertIn("Test ufeff characters", subs_text)
def tearDown(self):
super(TestUploadTranscripts, self).tearDown()
self.good_srt_file.close()
self.bad_data_srt_file.close()
self.bad_name_srt_file.close()
self.ufeff_srt_file.close()
class TestDownloadTranscripts(BaseTranscripts):
"""Tests for '/transcripts/download' url."""
def save_subs_to_store(self, subs, subs_id):
"""Save transcripts into `StaticContent`."""
filedata = json.dumps(subs, indent=2)
mime_type = 'application/json'
filename = 'subs_{0}.srt.sjson'.format(subs_id)
content_location = StaticContent.compute_location(self.course.id, filename)
content = StaticContent(content_location, filename, mime_type, filedata)
contentstore().save(content)
del_cached_content(content_location)
return content_location
def test_success_download_youtube(self):
self.item.data = '<video youtube="1:JMD_ifUUfsU" />'
modulestore().update_item(self.item, self.user.id)
subs = {
'start': [100, 200, 240],
'end': [200, 240, 380],
'text': [
'subs #1',
'subs #2',
'subs #3'
]
}
self.save_subs_to_store(subs, 'JMD_ifUUfsU')
link = reverse('download_transcripts')
resp = self.client.get(link, {'locator': self.video_usage_key, 'subs_id': "JMD_ifUUfsU"})
self.assertEqual(resp.status_code, 200)
self.assertEqual(resp.content, """0\n00:00:00,100 --> 00:00:00,200\nsubs #1\n\n1\n00:00:00,200 --> 00:00:00,240\nsubs #2\n\n2\n00:00:00,240 --> 00:00:00,380\nsubs #3\n\n""")
def test_success_download_nonyoutube(self):
subs_id = str(uuid4())
self.item.data = textwrap.dedent("""
<video youtube="" sub="{}">
<source src="http://www.quirksmode.org/html5/videos/big_buck_bunny.mp4"/>
<source src="http://www.quirksmode.org/html5/videos/big_buck_bunny.webm"/>
<source src="http://www.quirksmode.org/html5/videos/big_buck_bunny.ogv"/>
</video>
""".format(subs_id))
modulestore().update_item(self.item, self.user.id)
subs = {
'start': [100, 200, 240],
'end': [200, 240, 380],
'text': [
'subs #1',
'subs #2',
'subs #3'
]
}
self.save_subs_to_store(subs, subs_id)
link = reverse('download_transcripts')
resp = self.client.get(link, {'locator': self.video_usage_key, 'subs_id': subs_id})
self.assertEqual(resp.status_code, 200)
self.assertEqual(
resp.content,
'0\n00:00:00,100 --> 00:00:00,200\nsubs #1\n\n1\n00:00:00,200 --> '
'00:00:00,240\nsubs #2\n\n2\n00:00:00,240 --> 00:00:00,380\nsubs #3\n\n'
)
transcripts_utils.remove_subs_from_store(subs_id, self.item)
def test_fail_data_without_file(self):
link = reverse('download_transcripts')
resp = self.client.get(link, {'locator': ''})
self.assertEqual(resp.status_code, 404)
resp = self.client.get(link, {})
self.assertEqual(resp.status_code, 404)
def test_fail_data_with_bad_locator(self):
# Test for raising `InvalidLocationError` exception.
link = reverse('download_transcripts')
resp = self.client.get(link, {'locator': 'BAD_LOCATOR'})
self.assertEqual(resp.status_code, 404)
# Test for raising `ItemNotFoundError` exception.
link = reverse('download_transcripts')
resp = self.client.get(link, {'locator': '{0}_{1}'.format(self.video_usage_key, 'BAD_LOCATOR')})
self.assertEqual(resp.status_code, 404)
def test_fail_for_non_video_module(self):
# Video module: setup
data = {
'parent_locator': unicode(self.course.location),
'category': 'videoalpha',
'type': 'videoalpha'
}
resp = self.client.ajax_post('/xblock/', data)
usage_key = self._get_usage_key(resp)
subs_id = str(uuid4())
item = modulestore().get_item(usage_key)
item.data = textwrap.dedent("""
<videoalpha youtube="" sub="{}">
<source src="http://www.quirksmode.org/html5/videos/big_buck_bunny.mp4"/>
<source src="http://www.quirksmode.org/html5/videos/big_buck_bunny.webm"/>
<source src="http://www.quirksmode.org/html5/videos/big_buck_bunny.ogv"/>
</videoalpha>
""".format(subs_id))
modulestore().update_item(item, self.user.id)
subs = {
'start': [100, 200, 240],
'end': [200, 240, 380],
'text': [
'subs #1',
'subs #2',
'subs #3'
]
}
self.save_subs_to_store(subs, subs_id)
link = reverse('download_transcripts')
resp = self.client.get(link, {'locator': unicode(usage_key)})
self.assertEqual(resp.status_code, 404)
def test_fail_nonyoutube_subs_dont_exist(self):
self.item.data = textwrap.dedent("""
<video youtube="" sub="UNDEFINED">
<source src="http://www.quirksmode.org/html5/videos/big_buck_bunny.mp4"/>
<source src="http://www.quirksmode.org/html5/videos/big_buck_bunny.webm"/>
<source src="http://www.quirksmode.org/html5/videos/big_buck_bunny.ogv"/>
</video>
""")
modulestore().update_item(self.item, self.user.id)
link = reverse('download_transcripts')
resp = self.client.get(link, {'locator': self.video_usage_key})
self.assertEqual(resp.status_code, 404)
def test_empty_youtube_attr_and_sub_attr(self):
self.item.data = textwrap.dedent("""
<video youtube="">
<source src="http://www.quirksmode.org/html5/videos/big_buck_bunny.mp4"/>
<source src="http://www.quirksmode.org/html5/videos/big_buck_bunny.webm"/>
<source src="http://www.quirksmode.org/html5/videos/big_buck_bunny.ogv"/>
</video>
""")
modulestore().update_item(self.item, self.user.id)
link = reverse('download_transcripts')
resp = self.client.get(link, {'locator': self.video_usage_key})
self.assertEqual(resp.status_code, 404)
def test_fail_bad_sjson_subs(self):
subs_id = str(uuid4())
self.item.data = textwrap.dedent("""
<video youtube="" sub="{}">
<source src="http://www.quirksmode.org/html5/videos/big_buck_bunny.mp4"/>
<source src="http://www.quirksmode.org/html5/videos/big_buck_bunny.webm"/>
<source src="http://www.quirksmode.org/html5/videos/big_buck_bunny.ogv"/>
</video>
""".format(subs_id))
modulestore().update_item(self.item, self.user.id)
subs = {
'start': [100, 200, 240],
'end': [200, 240, 380],
'text': [
'subs #1'
]
}
self.save_subs_to_store(subs, 'JMD_ifUUfsU')
link = reverse('download_transcripts')
resp = self.client.get(link, {'locator': self.video_usage_key})
self.assertEqual(resp.status_code, 404)
class TestCheckTranscripts(BaseTranscripts):
"""Tests for '/transcripts/check' url."""
def save_subs_to_store(self, subs, subs_id):
"""Save transcripts into `StaticContent`."""
filedata = json.dumps(subs, indent=2)
mime_type = 'application/json'
filename = 'subs_{0}.srt.sjson'.format(subs_id)
content_location = StaticContent.compute_location(self.course.id, filename)
content = StaticContent(content_location, filename, mime_type, filedata)
contentstore().save(content)
del_cached_content(content_location)
return content_location
def test_success_download_nonyoutube(self):
subs_id = str(uuid4())
self.item.data = textwrap.dedent("""
<video youtube="" sub="{}">
<source src="http://www.quirksmode.org/html5/videos/big_buck_bunny.mp4"/>
<source src="http://www.quirksmode.org/html5/videos/big_buck_bunny.webm"/>
<source src="http://www.quirksmode.org/html5/videos/big_buck_bunny.ogv"/>
</video>
""".format(subs_id))
modulestore().update_item(self.item, self.user.id)
subs = {
'start': [100, 200, 240],
'end': [200, 240, 380],
'text': [
'subs #1',
'subs #2',
'subs #3'
]
}
self.save_subs_to_store(subs, subs_id)
data = {
'locator': unicode(self.video_usage_key),
'videos': [{
'type': 'html5',
'video': subs_id,
'mode': 'mp4',
}]
}
link = reverse('check_transcripts')
resp = self.client.get(link, {'data': json.dumps(data)})
self.assertEqual(resp.status_code, 200)
self.assertDictEqual(
json.loads(resp.content),
{
u'status': u'Success',
u'subs': unicode(subs_id),
u'youtube_local': False,
u'is_youtube_mode': False,
u'youtube_server': False,
u'command': u'found',
u'current_item_subs': unicode(subs_id),
u'youtube_diff': True,
u'html5_local': [unicode(subs_id)],
u'html5_equal': False,
}
)
transcripts_utils.remove_subs_from_store(subs_id, self.item)
def test_check_youtube(self):
self.item.data = '<video youtube="1:JMD_ifUUfsU" />'
modulestore().update_item(self.item, self.user.id)
subs = {
'start': [100, 200, 240],
'end': [200, 240, 380],
'text': [
'subs #1',
'subs #2',
'subs #3'
]
}
self.save_subs_to_store(subs, 'JMD_ifUUfsU')
link = reverse('check_transcripts')
data = {
'locator': unicode(self.video_usage_key),
'videos': [{
'type': 'youtube',
'video': 'JMD_ifUUfsU',
'mode': 'youtube',
}]
}
resp = self.client.get(link, {'data': json.dumps(data)})
self.assertEqual(resp.status_code, 200)
self.assertDictEqual(
json.loads(resp.content),
{
u'status': u'Success',
u'subs': u'JMD_ifUUfsU',
u'youtube_local': True,
u'is_youtube_mode': True,
u'youtube_server': False,
u'command': u'found',
u'current_item_subs': None,
u'youtube_diff': True,
u'html5_local': [],
u'html5_equal': False,
}
)
@patch('xmodule.video_module.transcripts_utils.requests.get', side_effect=mock_requests_get)
def test_check_youtube_with_transcript_name(self, mock_get):
"""
Test that the transcripts are fetched correctly when the the transcript name is set
"""
self.item.data = '<video youtube="good_id_2" />'
modulestore().update_item(self.item, self.user.id)
subs = {
'start': [100, 200, 240],
'end': [200, 240, 380],
'text': [
'subs #1',
'subs #2',
'subs #3'
]
}
self.save_subs_to_store(subs, 'good_id_2')
link = reverse('check_transcripts')
data = {
'locator': unicode(self.video_usage_key),
'videos': [{
'type': 'youtube',
'video': 'good_id_2',
'mode': 'youtube',
}]
}
resp = self.client.get(link, {'data': json.dumps(data)})
mock_get.assert_any_call(
'http://video.google.com/timedtext',
params={'lang': 'en', 'v': 'good_id_2', 'name': 'Custom'}
)
self.assertEqual(resp.status_code, 200)
self.assertDictEqual(
json.loads(resp.content),
{
u'status': u'Success',
u'subs': u'good_id_2',
u'youtube_local': True,
u'is_youtube_mode': True,
u'youtube_server': True,
u'command': u'replace',
u'current_item_subs': None,
u'youtube_diff': True,
u'html5_local': [],
u'html5_equal': False,
}
)
def test_fail_data_without_id(self):
link = reverse('check_transcripts')
data = {
'locator': '',
'videos': [{
'type': '',
'video': '',
'mode': '',
}]
}
resp = self.client.get(link, {'data': json.dumps(data)})
self.assertEqual(resp.status_code, 400)
self.assertEqual(json.loads(resp.content).get('status'), "Can't find item by locator.")
def test_fail_data_with_bad_locator(self):
# Test for raising `InvalidLocationError` exception.
link = reverse('check_transcripts')
data = {
'locator': '',
'videos': [{
'type': '',
'video': '',
'mode': '',
}]
}
resp = self.client.get(link, {'data': json.dumps(data)})
self.assertEqual(resp.status_code, 400)
self.assertEqual(json.loads(resp.content).get('status'), "Can't find item by locator.")
# Test for raising `ItemNotFoundError` exception.
data = {
'locator': '{0}_{1}'.format(self.video_usage_key, 'BAD_LOCATOR'),
'videos': [{
'type': '',
'video': '',
'mode': '',
}]
}
resp = self.client.get(link, {'data': json.dumps(data)})
self.assertEqual(resp.status_code, 400)
self.assertEqual(json.loads(resp.content).get('status'), "Can't find item by locator.")
def test_fail_for_non_video_module(self):
# Not video module: setup
data = {
'parent_locator': unicode(self.course.location),
'category': 'not_video',
'type': 'not_video'
}
resp = self.client.ajax_post('/xblock/', data)
usage_key = self._get_usage_key(resp)
subs_id = str(uuid4())
item = modulestore().get_item(usage_key)
item.data = textwrap.dedent("""
<not_video youtube="" sub="{}">
<source src="http://www.quirksmode.org/html5/videos/big_buck_bunny.mp4"/>
<source src="http://www.quirksmode.org/html5/videos/big_buck_bunny.webm"/>
<source src="http://www.quirksmode.org/html5/videos/big_buck_bunny.ogv"/>
</videoalpha>
""".format(subs_id))
modulestore().update_item(item, self.user.id)
subs = {
'start': [100, 200, 240],
'end': [200, 240, 380],
'text': [
'subs #1',
'subs #2',
'subs #3'
]
}
self.save_subs_to_store(subs, subs_id)
data = {
'locator': unicode(usage_key),
'videos': [{
'type': '',
'video': '',
'mode': '',
}]
}
link = reverse('check_transcripts')
resp = self.client.get(link, {'data': json.dumps(data)})
self.assertEqual(resp.status_code, 400)
self.assertEqual(json.loads(resp.content).get('status'), 'Transcripts are supported only for "video" modules.')
|
waheedahmed/edx-platform
|
cms/djangoapps/contentstore/views/tests/test_transcripts.py
|
Python
|
agpl-3.0
| 29,107
|
[
"FEFF"
] |
cf62ed5e49d4836011a61e2bb569268798ecf1d33fef3b48ba97d1825ccc3307
|
from math import log
import numpy as np
from matplotlib import pyplot
from spm1d import rft1d
def here_manova1_single_node(Y, GROUP):
### assemble counts:
u = np.unique(GROUP)
nGroups = u.size
nResponses = Y.shape[0]
nComponents = Y.shape[1]
### create design matrix:
X = np.zeros((nResponses, nGroups))
ind0 = 0
for i,uu in enumerate(u):
n = (GROUP==uu).sum()
X[ind0:ind0+n, i] = 1
ind0 += n
### SS for original design:
Y,X = np.matrix(Y), np.matrix(X)
b = np.linalg.pinv(X)*Y
R = Y - X*b
R = R.T*R
### SS for reduced design:
X0 = np.matrix( np.ones(Y.shape[0]) ).T
b0 = np.linalg.pinv(X0)*Y
R0 = Y - X0*b0
R0 = R0.T*R0
### Wilk's lambda:
lam = np.linalg.det(R) / np.linalg.det(R0)
### test statistic:
N,p,k = float(nResponses), float(nComponents), float(nGroups)
x2 = -((N-1) - 0.5*(p+k)) * log(lam)
return x2
def here_manova1(Y, GROUP):
nNodes = Y.shape[1]
X2 = [here_manova1_single_node(Y[:,i,:], GROUP) for i in range(nNodes)]
return np.array(X2)
def here_get_groups(nResponses):
GROUP = []
for i,n in enumerate(nResponses):
GROUP += [i]*n
return np.array(GROUP)
#(0) Set parameters:
np.random.seed(123456789)
nResponses = 40,20,10
nNodes = 101
nComponents = 2
FWHM = 15.0
W0 = np.eye(nComponents)
nIterations = 200
### derived parameters:
GROUP = here_get_groups(nResponses)
nGroups = len(nResponses)
nTotal = sum(nResponses)
df = nComponents * (nGroups-1)
#(1) Generate Gaussian 1D fields, compute test stat, store field maximum:
X2 = []
generator = rft1d.random.GeneratorMulti1D(nTotal, nNodes, nComponents, FWHM, W0)
for i in range(nIterations):
y = generator.generate_sample()
chi2 = here_manova1(y, GROUP)
X2.append( chi2.max() )
X2 = np.asarray(X2)
#(2) Compute survival function (SF) for the field maximumimum:
heights = np.linspace(10, 18, 21)
sf = np.array( [ (X2>h).mean() for h in heights] )
sfE = rft1d.chi2.sf(heights, df, nNodes, FWHM) #theoretical
sf0D = rft1d.chi2.sf0d(heights, df) #theoretical (0D)
#(3) Plot results:
pyplot.close('all')
ax = pyplot.axes()
ax.plot(heights, sf, 'o', label='Simulated')
ax.plot(heights, sfE, '-', label='Theoretical')
ax.plot(heights, sf0D, 'r-', label='Theoretical (0D)')
ax.set_xlabel('$u$', size=20)
ax.set_ylabel('$P (\chi^2_\mathrm{max} > u)$', size=20)
ax.legend()
ax.set_title("MANOVA validation (1D)", size=20)
pyplot.show()
|
0todd0000/spm1d
|
spm1d/rft1d/examples/val_max_8_manova1_1d.py
|
Python
|
gpl-3.0
| 2,552
|
[
"Gaussian"
] |
90a29b7049de8c539739fe0d2448f0806a81c34b633ef8415a232ac550f9e867
|
# Copyright (c) 2012 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
import os
import sys
try:
from setuptools import setup
except:
from distutils.core import setup
def get_static_files(path):
return [os.path.join(dirpath.replace("luigi/", ""), ext)
for (dirpath, dirnames, filenames) in os.walk(path)
for ext in ["*.html", "*.js", "*.css", "*.png"]]
luigi_package_data = sum(map(get_static_files, ["luigi/static", "luigi/templates"]), [])
readme_note = """\
.. note::
For the latest source, discussion, etc, please visit the
`GitHub repository <https://github.com/spotify/luigi>`_\n\n
"""
with open('README.rst') as fobj:
long_description = readme_note + fobj.read()
install_requires = [
'cached_property',
'pyparsing',
'tornado',
'python-daemon',
]
if os.environ.get('READTHEDOCS', None) == 'True':
install_requires.append('sqlalchemy')
# So that we can build documentation for luigi.db_task_history and luigi.contrib.sqla
setup(
name='luigi',
version='1.2.2',
description='Workflow mgmgt + task scheduling + dependency resolution',
long_description=long_description,
author='Erik Bernhardsson',
author_email='erikbern@spotify.com',
url='https://github.com/spotify/luigi',
license='Apache License 2.0',
packages=[
'luigi',
'luigi.contrib',
'luigi.contrib.hdfs',
'luigi.tools'
],
package_data={
'luigi': luigi_package_data
},
entry_points={
'console_scripts': [
'luigi = luigi.cmdline:luigi_run',
'luigid = luigi.cmdline:luigid',
'luigi-grep = luigi.tools.luigi_grep:main',
'luigi-deps = luigi.tools.deps:main',
]
},
install_requires=install_requires,
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Topic :: System :: Monitoring',
],
)
|
kalaidin/luigi
|
setup.py
|
Python
|
apache-2.0
| 2,810
|
[
"VisIt"
] |
353bf7169c0705b316931d6ab7625fe5300567e85ab985afe56b8af9d402ff64
|
#!/usr/bin/env python2
from __future__ import print_function
import os, sys, subprocess, re
import warnings
warnings.filterwarnings("ignore")
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import time, hashlib
import tempfile
import numpy as np
import quod, tcblast
#import Bio.Entrez
import Bio.pairwise2, Bio.SubsMat.MatrixInfo
DEBUG = 0
VERBOSITY = 1
def warn(*msgs):
''' prints warnings so that debug prints are more greppable '''
for l in msgs: print('[WARNING]', l, file=sys.stderr)
def error(*msgs):
''' prints error messages and exits with return code 1 '''
for l in msgs: print('[ERROR]', l, file=sys.stderr)
exit(1)
def info(*msgs):
''' prints info messages '''
for l in msgs: print('[INFO]', l, file=sys.stderr)
def run_pfam(indir, outdir, pfamdb):
if not os.path.isdir(outdir): os.mkdir(outdir)
for fn in os.listdir(indir):
if not fn.endswith('.fa'): continue
outfn = '{}/{}.pfam'.format(outdir, os.path.basename(os.path.splitext(fn)[0]))
if VERBOSITY: redirect = '/dev/stderr'
else: redirect = '/dev/null'
cmd = ['hmmscan', '--cpu', '2', '--noali', '--cut_ga', '-o', redirect, '--domtblout', outfn, pfamdb, '{}/{}'.format(indir, fn)]
subprocess.call(cmd)
s = ''
#for arg in cmd: s += arg + ' '
#info(s)
def parse_pfam(infile, color=None, y=-2.5, size=8):
entities = []
spans = []
with open(infile) as f:
for l in f:
if l.startswith('#'): continue
elif not l.strip(): continue
else:
sl = l.strip().split()
#label = l[181:].strip()
#start = int(l[152:157].strip())
#end = int(l[158:163].strip())
#label = sl[-1]
label = sl[1]
start = int(sl[19])
end = int(sl[20])
dy = 0
for span in spans:
if (span[0] <= start <= span[1]) or (span[0] <= end <= span[1]): dy = 0.3
entities.append(quod.Region([[start, end]], [y-0.15+dy, 0.15], label, style=color, size=size))
spans.append([start, end])
return entities
def fetch(accessions, email=None, db='protein'):
''' grabs PDBs from locally installed TCDB BLAST databases, I'm pretty sure '''
if not accessions: return ''
if db == 'tcdb':
out = ''
for acc in accessions:
try:
if DEBUG: info('Running blastdbcmd')
fa = subprocess.check_output(['blastdbcmd', '-db', 'tcdb', '-target_only', '-entry', acc])
out += fa + '\n'
except ValueError: raise ValueError
return out
else:
if DEBUG: info('Preparing to fetch non-TCDB sequences')
acclist = ''
for x in accessions: acclist += ',' + x
acclist = acclist[1:]
try:
if DEBUG: info('Running blastdbcmd')
p = subprocess.Popen(['blastdbcmd', '-db', 'nr', '-entry', acclist], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = p.communicate()
#out = re.sub('>', '\n', out) + '\n'
if err.startswith('BLAST Database error'): raise subprocess.CalledProcessError('Database error', '1')
remotes = ''
for l in err.split('\n'):
if l.strip():
if 'Entry not found' in l: remotes += '%s,' % l.split()[-1]
remotes = remotes[:-1]
#out += subprocess.check_output(['curl', '-d', 'db=%s&id=%s&rettype=fasta&retmode=text' % (db, acclist), 'https://eutils.ncbi.nlm.nih.gov/entrez/eutils/efetch.fcgi'])
if remotes:
if DEBUG: info('Could not fetch some sequences locally; fetching from remote')
out += subprocess.check_output(['curl', '-d', 'db=%s&id=%s&rettype=fasta&retmode=text' % (db, remotes), 'https://eutils.ncbi.nlm.nih.gov/entrez/eutils/efetch.fcgi'])
#out += subprocess.check_output(['curl', '-d', 'db=protein&id=Q9RBJ2&rettype=fasta&retmode=text', 'https://eutils.ncbi.nlm.nih.gov/entrez/eutils/efetch.fcgi'])
return out
except subprocess.CalledProcessError:
info('Could not find nr, falling back to Entrez efetch')
if not email:
if 'ENTREZ_EMAIL' in os.environ: email = os.environ['ENTREZ_EMAIL']
else:
raise TypeError('Missing argument email')
if DEBUG: info('Fetching from remote')
out += subprocess.check_output(['curl', '-d', 'db=%s&id=%s&rettype=fasta&retmode=text' % (db, acclist), 'https://eutils.ncbi.nlm.nih.gov/entrez/eutils/efetch.fcgi'])
return out
if DEBUG: info('Done fetching a batch from %s' % db)
def parse_p2report(p2report, minz=15, maxz=None, musthave=None, thispair=None):
''' parses Protocol2 TSV reports '''
if musthave and thispair: error('Arguments musthave and thispair are not mutually compatible')
line = 0
if minz == None: minz = -2**16
if maxz == None: maxz = 2**16
bcs = []
alnregs = {}
stats = {}
for l in p2report.split('\n'):
line += 1
if line == 1:
fams = [re.split('[, :]+', l)[2], re.split('[, :]+', l)[4]]
bcs = {fams[0]:[], fams[1]:[]}
elif line == 2: pass
else:
if not l.strip(): continue
ls = l.split('\t')
z = float(ls[3])
if minz <= z <= maxz:
#bcs.append(ls[:2])
if musthave and ls[0] not in musthave and ls[1] not in musthave: continue
found = 1
if thispair:
found = 0
for pair in thispair:
if ls[:2] != pair and ls[:2][::-1] != pair: continue
else:
found = 1
break
if not found: continue
bcs[fams[0]].append(ls[0])
bcs[fams[1]].append(ls[1])
try: alnregs[ls[0]][ls[1]] = (ls[6], ls[7])
except KeyError: alnregs[ls[0]] = {ls[1]:(ls[6], ls[7])}
try: stats[ls[0]][ls[1]] = ls[2:6]
except KeyError: stats[ls[0]] = {ls[1]:ls[2:6]}
return fams, bcs, alnregs, stats
def seek_initial(p1ds, bcs):
''' Grabs detailed hit information '''
hits = {}
for fam in sorted(bcs):
hits[fam] = {}
for bc in sorted(bcs[fam]): hits[fam][bc] = []
fs = {}
fams = sorted(bcs.keys())
for p1d in p1ds:
if os.path.isfile(p1d):
#fs[bc] = p1d
#this is indeed an xor/xnor case, but that may be wrong for some directory naming schemes
if fams[0] in p1d and fams[1] in p1d:
for bc in fams: fs[bc] = p1d
elif fams[0] in p1d: fs[fams[0]] = p1d
elif fams[1] in p1d: fs[fams[1]] = p1d
else:
for bc in fams: fs[bc] = p1d
elif os.path.isdir(p1d):
for fam in sorted(bcs):
if os.path.isfile('%s/%s.tbl' % (p1d, fam)): fs[fam] = '%s/%s.tbl' % (p1d, fam)
elif os.path.isfile('%s/%s/psiblast.tbl' % (p1d, fam)): fs[fam] = '%s/%s/psiblast.tbl' % (p1d, fam)
else: error('Could not find famXpander results table in %s' % p1d)
else: error('Could not find p1d %s' % p1d)
for bc in sorted(bcs):
with open(fs[bc]) as f:
for l in f:
if not l.strip(): continue
if l.lstrip().startswith('#'): continue
if '\t' not in l: continue
ls = l.split('\t')
#if 'CUU05502' in l: print(ls)
#if '4.D.1.1.1-Q52257' in l: print(ls)
try:
hits[bc][ls[1]].append((float(ls[4]), ls[0], (int(ls[6]), int(ls[7])), (int(ls[9]), int(ls[10]))))
#if 'WP_051443908' in l:
# print(hits[bc][ls[1]])
# exit()
except KeyError: hits[bc][ls[1]] = [(float(ls[4]), ls[0], (int(ls[6]), int(ls[7])), (int(ls[9]), int(ls[10])))]
for fam in sorted(bcs):
for bc in sorted(hits[fam]):
try: hits[fam][bc] = sorted(hits[fam][bc])[0]
except IndexError: error('Could not find any hits for {}/{}: Was psiblast.tbl deleted?'.format(fam, bc))
return hits
def clean_fetch(accs, outdir, force=False, email=None):
''' also fetches sequences but different? '''
if DEBUG: info('Fetching %s' % accs)
if not force:
removeme = []
for acc in accs:
if os.path.isfile(outdir + '/%s.fa' % acc): removeme.append(acc)
for acc in removeme: accs.remove(acc)
if not os.path.isdir(outdir): os.mkdir(outdir)
dlme = []
tcdlme = []
for acc in accs:
if os.path.isfile(outdir + '/%s.fa' % acc): continue
else:
if re.match('[0-9]\.[A-Z]\.[0-9]+\.', acc): tcdlme.append(acc)
else: dlme.append(acc)
allfaa = ''
if dlme:
if VERBOSITY: info('Downloading %d sequence(s)' % len(dlme))
allfaa += fetch(dlme, email=email)
if tcdlme:
if VERBOSITY: info('Loading %d TCDB sequence(s)' % len(tcdlme))
allfaa += fetch(tcdlme, db='tcdb', email=email)
if VERBOSITY: info('Done loading %d TCDB sequence(s)' % len(tcdlme))
with open('%s/allseqs.faa' % outdir, 'w') as f: f.write(allfaa)
with open('%s/allseqs.faa' % outdir) as f:
faa = Bio.SeqIO.parse(f, format='fasta')
for record in faa:
for desc in record.description.split('>'):
name = desc.split()[0]
if name.count('.') < 4: name = name[:name.find('.')]
if name.count('|') == 1: name = name.split('|')[1]
if DEBUG > 1: info('Saving %s' % name)
with open('%s/%s.fa' % (outdir, name), 'w') as f: f.write('>%s\n%s' % (desc, record.seq))
fastas = {}
#for fa in allfaa.split('\n\n'):
# if not fa.strip(): continue
# for acc in accs:
# if acc in fastas: pass
# if fa.startswith('>' + acc):
# fastas[acc] = fa
#for x in sorted(fastas):
# if DEBUG: info('Saving %s' % x)
# f = open(outdir + '/%s.fa' % x, 'w')
# f.write(fastas[x])
# f.close()
def quod_set(seqids, sequences, indir, outdir, dpi=300, force=False, bars=[], prefix='', suffix='', silent=False, pars=[]):
''' generates QUOD plots for batches of sequences '''
if not os.path.isdir(outdir): os.mkdir(outdir)
#wedges = [[[x, 2 * (0.5 - (i % 2))] for i, x in enumerate(span)] for span in bars]
ove = lambda x: int(2 * (0.5 - (x % 2)))
wedges = []
for i, span in enumerate(bars):
wedges.append([])
if 1 <= i <= 2: y = -2
else: y = -2
wedges[-1].append(quod.Wall(spans=[span], y=y, ylim=[0,0.5]))
medges = []
for i, span in enumerate(pars):
medges.append([])
y = 2
medges[-1].append(quod.Wall(spans=[span], y=y, ylim=[0.5,1]))
domains = []
for i, seqid in enumerate(seqids):
if i < 2: color = 'red'
else: color = 'blue'
domains.append(parse_pfam('{}/../pfam/{}.pfam'.format(indir, seqid), color=color))
#Draw A: barred by B
quod.what([sequences[seqids[0]]], force_seq=True, title=seqids[0], imgfmt='png', outdir=outdir, outfile=(seqids[0] + '_' + seqids[1] + '.png'), dpi=dpi, hide=1, entities=wedges[0]+domains[0], silent=True, width=15, height=3)
#Draw B: barred by C
quod.what([sequences[seqids[1]]], force_seq=True, title=seqids[1], imgfmt='png', outdir=outdir, outfile=(seqids[1] + '_' + seqids[2] + '.png'), dpi=dpi, hide=1, entities=wedges[1]+medges[0]+domains[1], silent=True, width=15, height=3)
#Draw C: barred by B
quod.what([sequences[seqids[2]]], force_seq=True, title=seqids[2], imgfmt='png', outdir=outdir, outfile=(seqids[2] + '_' + seqids[1] + '.png'), dpi=dpi, hide=1, color=1, entities=wedges[2]+medges[1]+domains[2], silent=True, width=15, height=3)
#Draw D: barred by C
quod.what([sequences[seqids[3]]], force_seq=True, title=seqids[3], imgfmt='png', outdir=outdir, outfile=(seqids[3] + '_' + seqids[2] + '.png'), dpi=dpi, hide=1, color=1, entities=wedges[3]+domains[3], silent=True, width=15, height=3)
def get_pfam(bc, prefix):
print(prefix)
domaindefs = []
for acc in bc[:4]:
with open('{}/pfam/{}.pfam'.format(prefix, acc)) as f:
for l in f:
if not l.strip(): continue
elif l.startswith('#'): continue
domaindefs.append(l.strip())
return domaindefs
def build_html(bc, indir, blasts, outdir='hvordan_out/html', filename='test.html', lastpair=None, nextpair=None, pfam=None):
''' build an HTML report '''
if not os.path.isdir(outdir): os.mkdir(outdir)
if not os.path.isdir(outdir + '/assets'): os.mkdir(outdir + '/assets')
if pfam is None: pfam = get_pfam(bc, prefix=indir)
if not os.path.isfile(outdir + '/assets/openclose.js'):
f = open(outdir + '/assets/openclose.js', 'w')
f.write('function toggle_section(sectionid, selfid) {\n\tvar section = document.getElementById(sectionid);\n\tvar me = document.getElementById(selfid);\n\t//console.log([section, section.style.display]);\n\tif (section.style.display == \'none\') {\n\t\tsection.style.display = \'block\';\n\t\tme.innerHTML = \'Hide\';\n\t} else { \n\t\tsection.style.display = \'none\'; \n\t\tme.innerHTML = \'Show\';\n\t}\n}')
f.close()
if not os.path.isfile(outdir + '/assets/nice.css'):
f = open(outdir + '/assets/nice.css', 'w')
f.write('body {\n\tfont-family: sans-serif;\n\theight: 100%;\n}\ndiv {\n\tdisplay: block;\n}\ndiv.tcblast {\n\tmax-width: 1500px;\n}\ndiv.fullblast {\n\twidth: 50%;\n\tfloat: left;\n}\ndiv.tabular1 {\n\twidth: 49%;\n\tfloat: left;\n\theight: 100%;\n}\ndiv.tabular2 {\n\twidth: 49%;\n\tfloat: right;\n\theight: 100%;\n}\nimg.bluebarplot {\n\tmax-width: 100%;\n\theight: auto;\n}\n.clear { clear: both; }\n.scrollable {\n\toverflow-y: scroll;\n}\n.resizeable {\n\tresize: vertical;\n\toverflow: auto;\n\tborder: 1px solid gray;\n\tdisplay: block;\n\tpadding-bottom: 1ex;\n}\n.bluebars {\n\theight: 25vh;\n}\n.pairwise {\n\theight: 50vh;\n}\n.whatall {\n\theight: 50vh;\n}\n.whataln {\n\twidth: 100%;\n}\n#seqs {\n\tdisplay: none;\n}\n\n\n\n.summtbl {\n\tfont-family: monospace, courier;\n\tfont-size: 75%;\n}\n.oddrow {\n\tbackground-color: #d8d8d8;\n}\ntd {\n\tpadding-right: 1em;\n}\n.red {\n\tcolor: red;\n}\nimg {\n\tborder: 1pt solid black;\n}\n.monospace {\n\tfont-family: monospace;\n}')
f.close()
#bc := [WP_1234567890, AP_1234567890]
title = 'HVORDAN summary: %s vs %s' % tuple(bc[1:3])
out = '<html><head><title>%s</title>' % title
out += '\n<link rel="stylesheet" type="text/css" href="assets/nice.css"/>'
out += '\n<script src="assets/openclose.js"></script>'
out += '\n</head><body>'
out += '\n<h1>%s</h1>' % title
if lastpair or nextpair:
out += '\n'
if lastpair: out += '<a href="%s_vs_%s.html">◀ %s vs %s</a> ' % (lastpair[1], lastpair[2], lastpair[1], lastpair[2])
if nextpair: out += '<a href="%s_vs_%s.html">%s vs %s ▶</a> ' % (nextpair[1], nextpair[2], nextpair[1], nextpair[2])
out += '<br/>'
out += '\n<h2>Table of contents</h2>'
out += '\n<button class="showhide" id="tocsh" onclick="toggle_section(\'toc\', \'tocsh\')">Hide</button>'
out += '\n<div class="toc" id="toc"> <ol> <li><a href="#summary">Summary</a></li> <li><a href="#tcsummary">TCBLAST Summary</a></li> <li><a href="#pairwise">Pairwise</a></li> <li><a href="#abcd">ABCD hydropathy plots</a></li> <li><a href="#bc">BC hydropathy plot</a></li> <li><a href="sequences">Sequences</a></li> <li><a href="domains">Domains</a></li> </ol> </div>'
#stats
out += '\n<h2>Summary</h2>'
out += '\n<button class="showhide" id="summarysh" onclick="toggle_section(\'summary\', \'summarysh\')">Hide</button>'
out += '\n<div class="whataln" id="summary">'
out += '\nSS Z-score: %s<br/>' % bc[8]
out += '\nGSAT Z-score: %s<br/>' % bc[9]
out += '\nSubject align-length: %s<br/>' % bc[10]
out += '\nTarget align-length: %s<br/>' % bc[11]
out += '\n</div>'
out += '\n<h2>TCBLAST</h2>'
#bluebars
out += '\n<button class="showhide" id="tcblastsh" onclick="toggle_section(\'tcblast\', \'tcblastsh\')">Hide</button>'
out += '\n<div class="tcblast" id="tcblast"><a name="tcsummary"><h3>TCBLAST Summary</h3></a>'
out += '\n<div class="resizeable bluebars"><div class="scrollable tabular1">'
out += '\n<img class="bluebarplot" src="../graphs/TCBLAST_%s.png"/>' % bc[1]
out += '\n</div><div class="scrollable tabular2">'
out += '\n<img class="bluebarplot" src="../graphs/TCBLAST_%s.png"/>' % bc[2]
out += '\n</div></div>'
#pairwise
out += '\n<div class="clear"></div><a name="pairwise"><h3>Pairwise</h3></a><div class="resizeable pairwise"><div class="scrollable tabular1">'
out += '\n%s' % blasts[0][1]
out += '</div><div class="scrollable tabular2">'
out += '\n%s' % blasts[1][1]
out += '\n</div></div></div>'
#abcd bc
out += '\n<div class="clear"></div><a name="abcd"><h3>ABCD Hydropathy plots</h3></a>'
out += '\n<button class="showhide" id="abcdsh" onclick="toggle_section(\'abcd\', \'abcdsh\')">Hide</button>'
out += '\n<div class="whatall" id="abcd">'
out += '\n<div class="tabular1">'
out += '\nA<br/><img class="bluebarplot" id="plota" src="../graphs/%s_%s.png"/><br/>' % (bc[0], bc[1])
out += '\nB<br/><img class="bluebarplot" id="plotb" src="../graphs/%s_%s.png"/><br/>' % (bc[1], bc[2])
out += '\n</div><div class="tabular2">'
out += '\nD<br/><img class="bluebarplot" id="plotd" src="../graphs/%s_%s.png"/><br/>' % (bc[3], bc[2])
out += '\nC<br/><img class="bluebarplot" id="plotc" src="../graphs/%s_%s.png"/><br/>' % (bc[2], bc[1])
out += '\n</div></div>'
out += '\n<div class="clear"></div><br/><a name="bc"><h3>BC hydropathy plot</h3></a>'
out += '\n<button class="showhide" id="bcsh" onclick="toggle_section(\'bc\', \'bcsh\')">Hide</button>'
out += '\n<div class="resizeable whataln" id="bc"><div class="scrollable">'
out += '<img class="bluebarplot" id="plotbc" src="../graphs/%s_vs_%s.png"/><br/>' % (bc[1], bc[2])
out += '\n</div></div>'
#out += '\n<button class="showhide" id="tcblastsh" onclick="toggle_section(\'tcblast\', \'tcblastsh\')">Hide</button>'
out += '\n<br/><div style="height: 10ex"></div>'
#sequences
out += '\n<div class="clear"></div><br/><a name="sequences"><h3>Sequences</h3></a>'
out += '\n<button class="showhide" id="seqsh" onclick="toggle_section(\'sequences\', \'seqsh\')">Hide</button>'
out += '\n<div class="resizeable whataln monospace" id="sequences"><div class="scrollable">'
out += ('\n%s\n%s\n%s\n%s' % tuple(bc[4:8])).replace('\n', '<br/>\n')
out += '\n</div></div>'
#pfam
out += '\n<div class="clear></div><br/><a name="domains"><h3>Domains</h3></a>'
out += '\n<button class="showhide" id="domsh" onclick="toggle_section(\'domains\', \'domsh\')">Hide</button>'
out += '\n<div class="resizeable whataln monospace" id="domains"><div class="scrollable"><pre>'
for domainstr in pfam: out += '\n{}<br/>'.format(domainstr)
out += '\n</pre></div></div>'
out += '\n</body></html>'
f = open(outdir + '/' + filename, 'w')
f.write(out)
f.close()
def get_fulltrans(fams, bcs, abcd):
''' collect A, B, C, and D into one convenient data structure '''
pairs = zip(bcs[fams[0]], bcs[fams[1]])
origs = [abcd[fams[0]], abcd[fams[1]]]
fulltrans = []
for p in pairs:
fulltrans.append(tuple([origs[0][p[0]][1], p[0], p[1], origs[1][p[1]][1]]))
return fulltrans
def blastem(acc, indir, outdir, dpi=300, force=False, seqbank={}, tmcount={}, maxhits=50):
''' generates TCBLAST plots '''
f = open(indir + '/sequences/' + acc + '.fa')
seq= f.read()
f.close()
return tcblast.til_warum(seq, outfile='%s/graphs/TCBLAST_%s.png' % (outdir, acc), title=acc, dpi=dpi, outdir='%s/blasts' % outdir, clobber=force, seqbank=seqbank, tmcount=tmcount, silent=True, maxhits=maxhits)
#fn = outdir + '/' + filename + '.png'
#blasts = tcblast.til_warum(seq, fn, dpi=dpi)
#blasts = [tcblast.til_warum(l[0], args.o + '/images/' + accs[0] + '.png', dpi=args.r, html=2, outdir=args.o + '/hmmtop'), tcblast.til_warum(l[1], args.o + '/images/' + accs[1] + '.png', dpi=args.r, html=2, outdir=args.o + '/hmmtop')]
def identifind(seq1, seq2):
''' obtains qstart, qend, sstart, send '''
#Seq1 = Bio.Seq.Seq(seq1, Bio.Alphabet.ProteinAlphabet())
if seq1.startswith('>'): seq1 = seq1[seq1.find('\n')+1:]
if seq2.startswith('>'): seq2 = seq2[seq2.find('\n')+1:]
seq1 = re.sub('[^ACDEFGHIKLMNPQRSTVWY]', '', seq1.upper())
seq2 = re.sub('[^ACDEFGHIKLMNPQRSTVWY]', '', seq2.upper())
if DEBUG: info('Starting an alignment')
#alns = Bio.pairwise2.align.localds(seq1, seq2, Bio.SubsMat.MatrixInfo.ident, -10, -0.5)
#out = subprocess.check_output(['ggsearch36'])
aln = ggsearch(seq1, seq2)
if DEBUG: info('Finished an alignment')
subjstart = 0
#sngap = re.findall('^-+', aln[0])
#if sngap: sngap = len(sngap[0])
#else: sngap = 0
#scgap = re.findall('-+$', aln[0])
#if scgap: scgap = len(aln[0]) - len(scgap[0]) - 1
#else: scgap = len(aln[0])-1
#tngap = re.findall('^-+', aln[1])
#if tngap: tngap = len(tngap[0])
#else: tngap = 0
#tcgap = re.findall('-+$', aln[1])
#if tcgap: tcgap = len(aln[1]) - len(tcgap[0]) - 1
#else: tcgap = len(aln[1])-1
#if sngap:
# sstart = 0
# tstart = sngap
#else:
# sstart = tngap
# tstart = 0
igap1 = re.findall('^-+', aln[0])
igap2 = re.findall('^-+', aln[1])
tgap1 = re.findall('-+$', aln[0])
tgap2 = re.findall('-+$', aln[1])
#print(seq1)
#print(seq2)
#print(aln[0])
#print(aln[1])
if igap1:
#1 -----CYFQNCPRG
#2 CYFQNCPRGCYFQN
qstart = 0
sstart = len(igap1[0])
elif igap2:
#1 CYFQNCPRGCYFQN
#2 -----CYFQNCPRG
qstart = len(igap2[0])
sstart = 0
else:
#1 CYFQNCPRGCYFQN
#2 CYFQNCPRG-----
qstart = 0
sstart = 0
if tgap1:
#1 CYFQNCPRG-----
#2 CYFQNCPRGCYFQN
qend = len(seq1)-1
send = len(seq2)-1-len(tgap1[0])
elif tgap2:
#1 CYFQNCPRGCYFQN
#2 CYFQNCPRG-----
qend = len(seq1)-1-len(tgap2[1])
send = len(seq2)-1
else:
#1 CYFQNCPRGCYFQN
#2 -----CYFQNCPRG
qend = len(seq1)-1
send = len(seq2)-1
return qstart+1, qend+1, sstart+1, send+1
#I prefer 0-indexing, but pretty much everyone 1-indexes (at least for protein sequences)
def ggsearch(seq1, seq2):
''' runs ssearch '''
if not seq1.startswith('>'): seq1 = '>seq1\n' + seq1
if not seq2.startswith('>'): seq2 = '>seq2\n' + seq2
try:
f1 = tempfile.NamedTemporaryFile(delete=False)
f1.write(seq1)
f1.close()
f2 = tempfile.NamedTemporaryFile(delete=False)
f2.write(seq2)
f2.close()
cmd = ['ssearch36', '-a', '-m', '3', f1.name, f2.name]
out = subprocess.check_output(cmd).replace(' ', '-')
finally:
os.remove(f1.name)
os.remove(f2.name)
seqi = 0
alns = []
for l in out.split('\n'):
if l.startswith('>'): seqi += 1
if seqi:
if not l.strip(): seqi = 0
#elif l.startswith('>'): alns.append(l + '\n')
#else: alns[-1] += l + '\n'
elif l.startswith('>'): alns.append('')
else: alns[-1] += l
return alns
def summarize(p1d, p2d, outdir, minz=15, maxz=None, dpi=100, force=False, email=None, musthave=None, thispair=None, fams=None, maxhits=50, pfamdb='./Pfam-A.hmm'):
''' summarize stuff '''
if thispair is not None:
if len(thispair) % 2: error('Unpaired sequence found')
else:
truepairs = [thispair[i:i+2] for i in range(0, len(thispair), 2)]
else: truepairs = None
if not os.path.isdir(outdir): os.mkdir(outdir)
if VERBOSITY: info('Reading Protocol2 report')
try: f = open(p2d + '/report.tbl')
except IOError:
if os.path.isfile(p2d):
f = open(p2d)
warn('Opening %s as a Protocol2 results table' % p2d)
else:
try:
famvfam = '%s_vs_%s' % tuple(fams)
try:
f = open('%s/%s/report.tbl' % (p2d, famvfam))
info('Could not find report.tbl in %s, falling back on family vs family subdirectory' % p2d)
except IOError:
try: f = open('%s/%s/%s/report.tbl' % (p2d, famvfam, famvfam))
except IOError: error('Could not find a Protocol2 directory for %s and %s' % tuple(fams))
except TypeError: error('Specify families if using Protocol2 root directories')
p2report = f.read()
f.close()
fams, bcs, alnregs, stats = parse_p2report(p2report, minz, maxz, musthave=musthave, thispair=truepairs)
if VERBOSITY: info('Selecting best A-B C-D pairs')
abcd = seek_initial(p1d, bcs)
#for k in abcd:
# for j in abcd[k]:
# print(k, j, abcd[k][j])
fulltrans = get_fulltrans(fams, bcs, abcd)
fetchme = set()
pairstats = {}
#for fam in abcd:
# for bc in abcd[fam]:
# fetchme.add(bc) # B|C
# fetchme.add(abcd[fam][bc][1]) #A|D
# try: pairstats[bc][abcd[fam][bc][1]] = abcd[fam][bc]
# except KeyError: pairstats[bc] = {abcd[fam][bc][1]:abcd[fam][bc]}
for fam in abcd:
for bc in abcd[fam]:
try: pairstats[bc][abcd[fam][bc][1]] = abcd[fam][bc]
except KeyError: pairstats[bc] = {abcd[fam][bc][1]:abcd[fam][bc]}
#if 'WP_051443908' in pairstats:
# print('#'*80)
# print('WP_051443908', pairstats['WP_051443908'])
for pair in fulltrans:
for acc in pair: fetchme.add(acc)
#grab all relevant sequences and store them
if VERBOSITY: info('Retrieving %d sequence(s)' % len(fetchme))
clean_fetch(fetchme, outdir + '/sequences', force=force, email=email)
run_pfam(indir=(outdir + '/sequences'), outdir='{}/pfam'.format(outdir), pfamdb=pfamdb)
if VERBOSITY: info('Done retrieving %d sequences' % len(fetchme))
#prepare correspondences for identifind (marks B, C)
allseqs = []
bars = []
seqs = {}
pars = []
if VERBOSITY: info('Aligning subsequences to sequences (x%d)' % len(fulltrans))
paths = {}
for i, pair in enumerate(fulltrans):
[allseqs.append(x) for x in pair]
if pair[0] not in paths:
paths[pair[0]] = {}
if pair[1] not in paths[pair[0]]: paths[pair[0]] = {}
paths[pair[0]][pair[1]] = pair[2]
if pair[3] not in paths:
paths[pair[3]] = {}
if pair[2] not in paths[pair[3]]: paths[pair[3]] = {}
paths[pair[3]][pair[2]] = pair[1]
#bar A
#bars.append(pairstats[pair[1]][pair[0]][3])
#pars.append(pairstats[pair[1]][pair[0]][2])
bars.append(pairstats[pair[1]][pair[0]][2])
pars.append(pairstats[pair[1]][pair[0]][3])
#bar B, C
try: seqb = seqs[pair[1]]
except KeyError:
with open('%s/sequences/%s.fa' % (outdir, pair[1])) as f: seqb = seqs[pair[1]] = f.read()
try: seqc = seqs[pair[2]]
except KeyError:
with open('%s/sequences/%s.fa' % (outdir, pair[2])) as f: seqc = seqs[pair[2]] = f.read()
if DEBUG: info('Performing 2 subsequence-sequence alignments')
bars.append(identifind(alnregs[pair[1]][pair[2]][0], seqb)[2:4])
bars.append(identifind(alnregs[pair[1]][pair[2]][1], seqc)[2:4])
#bar D
#bars.append(pairstats[pair[2]][pair[3]][3])
#pars.append(pairstats[pair[2]][pair[3]][2])
bars.append(pairstats[pair[2]][pair[3]][2])
pars.append(pairstats[pair[2]][pair[3]][3])
try: subseqs = alnregs[pair[1]][pair[2]]
except KeyError: subseqs = alnregs[pair[2]][pair[1]]
#make graphs for all individual full-lengthers
if VERBOSITY: info('Generating QUOD plots')
for x in allseqs:
try: seqs[x]
except KeyError:
with open('%s/sequences/%s.fa' % (outdir, x)) as f: seqs[x] = f.read()
for i in range(0, len(allseqs), 4):
quod_set(tuple(allseqs[i:i+4]), seqs, outdir + '/sequences', outdir + '/graphs/', dpi=dpi, force=force, bars=bars[i:i+4], silent=not i, pars=pars[i//2:i//2+2])
#make graphs for all pairs of sequences
for s1 in alnregs:
for s2 in alnregs[s1]:
quod.what(alnregs[s1][s2], force_seq=True, labels=[s1,s2], title='%s (red) vs %s (blue)' % (s1,s2), imgfmt='png', outdir=outdir+'/graphs', outfile='%s_vs_%s.png' % (s1,s2), dpi=dpi, hide=1, width=30, height=3)
if VERBOSITY: info('Generating TCBLAST plots')
blasts = {}
tmcount = {}
seqbank = {}
for pair in fulltrans:
#blasts[tuple(pair)] = [blastem(pair[1], indir=outdir, outdir=outdir, dpi=dpi), blastem(pair[2], indir=outdir, outdir=outdir, dpi=dpi, force=force, seqbank=seqbank, tmcount=tmcount, maxhits=maxhits)]
blasts[tuple(pair)] = [blastem(pair[i+1], indir=outdir, outdir=outdir, dpi=dpi, maxhits=maxhits) for i in range(2)]
if fulltrans:
if VERBOSITY: info('Generating %d HTML reports' % len(fulltrans))
for i, pair in enumerate(fulltrans):
pairseqs = []
for seq in pair:
try: pairseqs.append(seqs[seq])
except KeyError:
with open('%s/sequences/%s.fa' % (outdir, seq)) as f: pairseqs.append(f.read())
if i > 0: lastpair = fulltrans[i-1]
else: lastpair = None
if i < (len(fulltrans)-1): nextpair = fulltrans[i+1]
else: nextpair = None
build_html(pair + tuple(pairseqs) + tuple(stats[pair[1]][pair[2]]), indir=outdir, blasts=blasts[tuple(pair)], outdir=(outdir + '/html'), filename='%s_vs_%s.html' % tuple(pair[1:3]), lastpair=lastpair, nextpair=nextpair)
else:
if minz is None: zmin = '-inf'
else: zmin = '%0.1f' % minz
if maxz is None: zmax = '+inf'
else: zmax = '%0.1f' % maxz
info('Generated 0 HTML reports: No significant Protocol2 hits found with Z-scores between %s and %s' % (zmin, zmax))
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description='HTML Visualization of Reasonable, Decent Alignment Networks')
parser.add_argument('--p1d', metavar='PATH', default=['.'], nargs='+', help='famXpander directories or table(s) (generally psiblast.tbl). Note: Running "cut -f1-12" on psiblast.tbl will greatly improve performance, but compatibility with famXpander/9.X.99/psiblast.tbl directory structures is implemented. Directory traversal is not implemented yet.')
parser.add_argument('--p2d', metavar='PATH', default='.', help='Protocol2 directory or results table (generally results.tbl). If using on root Protocol2 directories, -f is required.')
parser.add_argument('-o', '--outdir', metavar='DIR', default='hvordan_out', help='output directory {default:hvordan_out}')
parser.add_argument('-f', '--fams', metavar='FAMILY', default=None, nargs=2, help='families to inspect. Required if using --p2d on root Protocol2 directories')
parser.add_argument('-z', '--z-min', default=15, type=int, help='minimum Z score {default:15}')
parser.add_argument('-Z', '--z-max', default=None, type=int, help='maximum Z score {default:none}')
parser.add_argument('-c', '--clobber', action='store_true', help='force redownloads/regenerates where applicable')
parser.add_argument('-r', '--dpi', type=int, default=100, help='resolution of graphs {default:100}')
parser.add_argument('-m', '--max-hits', type=int, default=10, help='how many TCBLAST hits to BLAST for. Contributes significantly to execution time for small famXpander results. {default:10}')
if 'ENTREZ_EMAIL' in os.environ:
parser.add_argument('-e', '--email', default=None, help='Working email in case too many requests get sent and the NCBI needs to initiate contact. Defaults to checking $ENTREZ_EMAIL if set. {current value: %s}' % os.environ['ENTREZ_EMAIL'])
else: parser.add_argument('-e', '--email', default=None, help='Working email in case too many requests get sent and the NCBI needs to initiate contact. Defaults to checking $ENTREZ_EMAIL if set. {unset}')
if 'PFAMDB' in os.environ:
parser.add_argument('-d', '--pfamdb', default=os.environ['PFAMDB'], help='Which PFAM database to use. Defaults to checking $PFAMDB if set. (default: {})'.format(os.environ['PFAMDB']))
else: parser.add_argument('-d', '--pfamdb', default='/ResearchData/pfam/pfamdb/Pfam-A.hmm', help='Which PFAM database to use. Defaults to checking $PFAMDB if set. (default: {})'.format('/ResearchData/pfam/pfamdb/Pfam-A.hmm'))
parser.add_argument('-i', metavar='ACC', nargs='+', help='Operate only on pairs containing these accessions')
parser.add_argument('-p', metavar='ACC', nargs='+', help='Operate only on these specific pairs.')
args = parser.parse_args()
if args.p1d == '.' and args.p2d == '.':
parser.print_help()
exit()
summarize(args.p1d, args.p2d, args.outdir, minz=args.z_min, maxz=args.z_max, dpi=args.dpi, force=args.clobber, email=args.email, musthave=args.i, thispair=args.p, fams=args.fams, maxhits=args.max_hits, pfamdb=args.pfamdb)
|
khendarg/hvordan
|
hvordan.py
|
Python
|
bsd-3-clause
| 30,570
|
[
"BLAST"
] |
74d58087108aa778e743760da99a4af5cc30347cf38b6c156299af00bd608070
|
from zope.component import getUtility
from zope.interface import implementer
import Missing
from wsapi4plone.core.interfaces import IFormatQueryResults, IScrubber
@implementer(IFormatQueryResults)
class FormatQueryResults(object):
masking = {
'cmf_uid': None,
'exclude_from_nav': None,
'getIcon': None,
'getId': None,
'getObjSize': 'size',
'is_folderish': 'container',
'meta_type': None,
'portal_type': None, # redundant data, would seem to correspond with 'Type'
}
def __call__(self, brains):
grey_matter = {}
for brain in brains:
path = brain.getPath()
grey_matter[path] = {}
for neuron in brain.schema():
if brain[neuron] == Missing.Value:
continue
elif neuron in list(self.masking.keys()):
if self.masking[neuron]:
grey_matter[path][self.masking[neuron]] = brain[neuron]
else:
continue
else:
grey_matter[path][neuron] = brain[neuron]
scrubber = getUtility(IScrubber)
jarred_brains = scrubber.dict_scrub(grey_matter)
return jarred_brains
def formatter():
return FormatQueryResults()
|
OpenBfS/dokpool-plone
|
Plone/src/wsapi4plone.core/wsapi4plone/core/utilities/query.py
|
Python
|
gpl-3.0
| 1,327
|
[
"NEURON"
] |
028105d8181c4824df2e7625a27052b36368d965a6b27d855c12994333e86597
|
# -*- coding: utf-8 -*-
"""Wrapper functions and classes around scikit-images AffineTransformation.
Simplifies augmentation of images in machine learning.
Example usage:
img_width = 32 # width of the images
img_height = 32 # height of the images
images = ... # e.g. load via scipy.misc.imload(filename)
# For each image: randomly flip it horizontally (50% chance),
# randomly rotate it between -20 and +20 degrees, randomly translate
# it on the x-axis between -5 and +5 pixel.
ia = ImageAugmenter(img_width, img_height, hlip=True, rotation_deg=20,
translation_x_px=5)
augmented_images = ia.augment_batch(images)
"""
from __future__ import division
from skimage import transform as tf
import skimage
from skimage.filters import gaussian_filter
import numpy as np
import random
import cv2
def rotate(image, angle, center = None, scale = 1.0):
(h, w) = image.shape[:2]
if center is None:
center = (w / 2, h / 2)
# Perform the rotation
M = cv2.getRotationMatrix2D(center, angle, scale)
rotated = cv2.warpAffine(image, M, (w, h))
return rotated
def apply_motion_blur(image, kernel_size, strength = 1.0):
"""Applies motion blur on image
"""
# generating the kernel
kernel_motion_blur = np.zeros((kernel_size, kernel_size))
kernel_motion_blur[int((kernel_size - 1) / 2), :] = np.ones(kernel_size)
kernel_motion_blur = kernel_motion_blur / kernel_size
rotation_kernel = np.random.uniform(0, 360)
kernel_motion_blur = rotate(kernel_motion_blur, rotation_kernel)
#cv2.imshow("kernel", cv2.resize(kernel_motion_blur, (100, 100)))
kernel_motion_blur *= strength
# applying the kernel to the input image
output = cv2.filter2D(image, -1, kernel_motion_blur)
return output
def is_minmax_tuple(param):
"""Returns whether the parameter is a tuple containing two values.
Used in create_aug_matrices() and probably useless everywhere else.
Args:
param: The parameter to check (whether it is a tuple of length 2).
Returns:
Boolean
"""
return type(param) is tuple and len(param) == 2
def create_aug_matrices(nb_matrices, img_width_px, img_height_px,
scale_to_percent=1.0, scale_axis_equally=False,
rotation_deg=0, shear_deg=0,
translation_x_px=0, translation_y_px=0,
seed=None):
"""Creates the augmentation matrices that may later be used to transform
images.
This is a wrapper around scikit-image's transform.AffineTransform class.
You can apply those matrices to images using the apply_aug_matrices()
function.
Args:
nb_matrices: How many matrices to return, e.g. 100 returns 100 different
random-generated matrices (= 100 different transformations).
img_width_px: Width of the images that will be transformed later
on (same as the width of each of the matrices).
img_height_px: Height of the images that will be transformed later
on (same as the height of each of the matrices).
scale_to_percent: Same as in ImageAugmenter.__init__().
Up to which percentage the images may be
scaled/zoomed. The negative scaling is automatically derived
from this value. A value of 1.1 allows scaling by any value
between -10% and +10%. You may set min and max values yourself
by using a tuple instead, like (1.1, 1.2) to scale between
+10% and +20%. Default is 1.0 (no scaling).
scale_axis_equally: Same as in ImageAugmenter.__init__().
Whether to always scale both axis (x and y)
in the same way. If set to False, then e.g. the Augmenter
might scale the x-axis by 20% and the y-axis by -5%.
Default is False.
rotation_deg: Same as in ImageAugmenter.__init__().
By how much the image may be rotated around its
center (in degrees). The negative rotation will automatically
be derived from this value. E.g. a value of 20 allows any
rotation between -20 degrees and +20 degrees. You may set min
and max values yourself by using a tuple instead, e.g. (5, 20)
to rotate between +5 und +20 degrees. Default is 0 (no
rotation).
shear_deg: Same as in ImageAugmenter.__init__().
By how much the image may be sheared (in degrees). The
negative value will automatically be derived from this value.
E.g. a value of 20 allows any shear between -20 degrees and
+20 degrees. You may set min and max values yourself by using a
tuple instead, e.g. (5, 20) to shear between +5 und +20
degrees. Default is 0 (no shear).
translation_x_px: Same as in ImageAugmenter.__init__().
By up to how many pixels the image may be
translated (moved) on the x-axis. The negative value will
automatically be derived from this value. E.g. a value of +7
allows any translation between -7 and +7 pixels on the x-axis.
You may set min and max values yourself by using a tuple
instead, e.g. (5, 20) to translate between +5 und +20 pixels.
Default is 0 (no translation on the x-axis).
translation_y_px: Same as in ImageAugmenter.__init__().
See translation_x_px, just for the y-axis.
seed: Seed to use for python's and numpy's random functions.
Returns:
List of augmentation matrices.
"""
assert nb_matrices > 0
assert img_width_px > 0
assert img_height_px > 0
assert is_minmax_tuple(scale_to_percent) or scale_to_percent >= 1.0
assert is_minmax_tuple(rotation_deg) or rotation_deg >= 0
assert is_minmax_tuple(shear_deg) or shear_deg >= 0
assert is_minmax_tuple(translation_x_px) or translation_x_px >= 0
assert is_minmax_tuple(translation_y_px) or translation_y_px >= 0
if seed is not None:
random.seed(seed)
np.random.seed(seed)
result = []
shift_x = int(img_width_px / 2.0)
shift_y = int(img_height_px / 2.0)
# prepare min and max values for
# scaling/zooming (min/max values)
if is_minmax_tuple(scale_to_percent):
scale_x_min = scale_to_percent[0]
scale_x_max = scale_to_percent[1]
else:
scale_x_min = scale_to_percent
scale_x_max = 1.0 - (scale_to_percent - 1.0)
assert scale_x_min > 0.0
#if scale_x_max >= 2.0:
# warnings.warn("Scaling by more than 100 percent (%.2f)." % (scale_x_max,))
scale_y_min = scale_x_min # scale_axis_equally affects the random value generation
scale_y_max = scale_x_max
# rotation (min/max values)
if is_minmax_tuple(rotation_deg):
rotation_deg_min = rotation_deg[0]
rotation_deg_max = rotation_deg[1]
else:
rotation_deg_min = (-1) * int(rotation_deg)
rotation_deg_max = int(rotation_deg)
# shear (min/max values)
if is_minmax_tuple(shear_deg):
shear_deg_min = shear_deg[0]
shear_deg_max = shear_deg[1]
else:
shear_deg_min = (-1) * int(shear_deg)
shear_deg_max = int(shear_deg)
# translation x-axis (min/max values)
if is_minmax_tuple(translation_x_px):
translation_x_px_min = translation_x_px[0]
translation_x_px_max = translation_x_px[1]
else:
translation_x_px_min = (-1) * translation_x_px
translation_x_px_max = translation_x_px
# translation y-axis (min/max values)
if is_minmax_tuple(translation_y_px):
translation_y_px_min = translation_y_px[0]
translation_y_px_max = translation_y_px[1]
else:
translation_y_px_min = (-1) * translation_y_px
translation_y_px_max = translation_y_px
# create nb_matrices randomized affine transformation matrices
for _ in range(nb_matrices):
# generate random values for scaling, rotation, shear, translation
scale_x = random.uniform(scale_x_min, scale_x_max)
scale_y = random.uniform(scale_y_min, scale_y_max)
if not scale_axis_equally:
scale_y = random.uniform(scale_y_min, scale_y_max)
else:
scale_y = scale_x
rotation = np.deg2rad(random.randint(rotation_deg_min, rotation_deg_max))
shear = np.deg2rad(random.randint(shear_deg_min, shear_deg_max))
translation_x = random.randint(translation_x_px_min, translation_x_px_max)
translation_y = random.randint(translation_y_px_min, translation_y_px_max)
# create three affine transformation matrices
# 1st one moves the image to the top left, 2nd one transforms it, 3rd one
# moves it back to the center.
# The movement is neccessary, because rotation is applied to the top left
# and not to the image's center (same for scaling and shear).
matrix_to_topleft = tf.SimilarityTransform(translation=[-shift_x, -shift_y])
matrix_transforms = tf.AffineTransform(scale=(scale_x, scale_y),
rotation=rotation, shear=shear,
translation=(translation_x,
translation_y))
matrix_to_center = tf.SimilarityTransform(translation=[shift_x, shift_y])
# Combine the three matrices to one affine transformation (one matrix)
matrix = matrix_to_topleft + matrix_transforms + matrix_to_center
# one matrix is ready, add it to the result
result.append(matrix.inverse)
return result
def apply_aug_matrices(images, matrices, transform_channels_equally=True,
channel_is_first_axis=False, random_order=True,
mode="constant", cval=0.0, interpolation_order=1,
seed=None):
"""Augment the given images using the given augmentation matrices.
This function is a wrapper around scikit-image's transform.warp().
It is expected to be called by ImageAugmenter.augment_batch().
The matrices may be generated by create_aug_matrices().
Args:
images: Same as in ImageAugmenter.augment_batch().
Numpy array (dtype: uint8, i.e. values 0-255) with the images.
Expected shape is either (image-index, height, width) for
grayscale images or (image-index, channel, height, width) for
images with channels (e.g. RGB) where the channel has the first
index or (image-index, height, width, channel) for images with
channels, where the channel is the last index.
If your shape is (image-index, channel, width, height) then
you must also set channel_is_first_axis=True in the constructor.
matrices: A list of augmentation matrices as produced by
create_aug_matrices().
transform_channels_equally: Same as in ImageAugmenter.__init__().
Whether to apply the exactly same
transformations to each channel of an image (True). Setting
it to False allows different transformations per channel,
e.g. the red-channel might be rotated by +20 degrees, while
the blue channel (of the same image) might be rotated
by -5 degrees. If you don't have any channels (2D grayscale),
you can simply ignore this setting.
Default is True (transform all equally).
channel_is_first_axis: Same as in ImageAugmenter.__init__().
Whether the channel (e.g. RGB) is the first
axis of each image (True) or the last axis (False).
False matches the scipy and PIL implementation and is the
default. If your images are 2D-grayscale then you can ignore
this setting (as the augmenter will ignore it too).
random_order: Whether to apply the augmentation matrices in a random
order (True, e.g. the 2nd matrix might be applied to the
5th image) or in the given order (False, e.g. the 2nd matrix might
be applied to the 2nd image).
Notice that for multi-channel images (e.g. RGB) this function
will use a different matrix for each channel, unless
transform_channels_equally is set to True.
mode: Parameter used for the transform.warp-function of scikit-image.
Can usually be ignored.
cval: Parameter used for the transform.warp-function of scikit-image.
Defines the fill color for "new" pixels, e.g. for empty areas
after rotations. (0.0 is black, 1.0 is white.)
interpolation_order: Parameter used for the transform.warp-function of
scikit-image. Defines the order of all interpolations used to
generate the new/augmented image. See their documentation for
further details.
seed: Seed to use for python's and numpy's random functions.
"""
# images must be numpy array
assert type(images).__module__ == np.__name__, "Expected numpy array for " \
"parameter 'images'."
# images must have uint8 as dtype (0-255)
assert images.dtype.name == "uint8", "Expected numpy.uint8 as image dtype."
# 3 axis total (2 per image) for grayscale,
# 4 axis total (3 per image) for RGB (usually)
assert len(images.shape) in [3, 4], """Expected 'images' parameter to have
either shape (image index, y, x) for greyscale
or (image index, channel, y, x) / (image index, y, x, channel)
for multi-channel (usually color) images."""
if seed:
np.random.seed(seed)
nb_images = images.shape[0]
# estimate number of channels, set to 1 if there is no axis channel,
# otherwise it will usually be 3
has_channels = False
nb_channels = 1
if len(images.shape) == 4:
has_channels = True
if channel_is_first_axis:
nb_channels = images.shape[1] # first axis within each image
else:
nb_channels = images.shape[3] # last axis within each image
# whether to apply the transformations directly to the whole image
# array (True) or for each channel individually (False)
apply_directly = not has_channels or (transform_channels_equally
and not channel_is_first_axis)
# We generate here the order in which the matrices may be applied.
# At the end, order_indices will contain the index of the matrix to use
# for each image, e.g. [15, 2] would mean, that the 15th matrix will be
# applied to the 0th image, the 2nd matrix to the 1st image.
# If the images gave multiple channels (e.g. RGB) and
# transform_channels_equally has been set to False, we will need one
# matrix per channel instead of per image.
# 0 to nb_images, but restart at 0 if index is beyond number of matrices
len_indices = nb_images if apply_directly else nb_images * nb_channels
if random_order:
# Notice: This way to choose random matrices is concise, but can create
# problems if there is a low amount of images and matrices.
# E.g. suppose that 2 images are ought to be transformed by either
# 0px translation on the x-axis or 1px translation. So 50% of all
# matrices translate by 0px and 50% by 1px. The following method
# will randomly choose a combination of the two matrices for the
# two images (matrix 0 for image 0 and matrix 0 for image 1,
# matrix 0 for image 0 and matrix 1 for image 1, ...).
# In 50% of these cases, a different matrix will be chosen for image 0
# and image 1 (matrices 0, 1 or matrices 1, 0). But 50% of these
# "different" matrices (different index) will be the same, as 50%
# translate by 1px and 50% by 0px. As a result, 75% of all augmentations
# will transform both images in the same way.
# The effect decreases if more matrices or images are chosen.
order_indices = np.random.random_integers(0, len(matrices) - 1, len_indices)
else:
# monotonously growing indexes (each by +1), but none of them may be
# higher than or equal to the number of matrices
order_indices = np.arange(0, len_indices) % len(matrices)
bg = int((int((images[0][..., 0].mean() * 0.5) + (images[0][..., 0].max() * 0.5)) + int((images[0][..., 1].mean() * 0.5) + (images[0][..., 1].max() * 0.5)) + int((images[0][..., 2].mean() * 0.5) + (images[0][..., 2].max() * 0.5))) / 3)
result = np.full(images.shape, bg, dtype=np.float32)
matrix_number = 0
# iterate over every image, find out which matrix to apply and then use
# that matrix to augment the image
for img_idx, image in enumerate(images):
if apply_directly:
# we can apply the matrix to the whole numpy array of the image
# at the same time, so we do that to save time (instead of eg. three
# steps for three channels as in the else-part)
matrix = matrices[order_indices[matrix_number]]
result[img_idx, ...] = tf.warp(image, matrix, mode=mode, cval=cval,
order=interpolation_order)
matrix_number += 1
else:
# we cant apply the matrix to the whole image in one step, instead
# we have to apply it to each channel individually. that happens
# if the channel is the first axis of each image (incompatible with
# tf.warp()) or if it was explicitly requested via
# transform_channels_equally=False.
for channel_idx in range(nb_channels):
matrix = matrices[order_indices[matrix_number]]
if channel_is_first_axis:
warped = tf.warp(image[channel_idx], matrix, mode=mode,
cval=cval, order=interpolation_order)
result[img_idx, channel_idx, ...] = warped
else:
warped = tf.warp(image[..., channel_idx], matrix, mode=mode,
cval=cval, order=interpolation_order)
result[img_idx, ..., channel_idx] = warped
if not transform_channels_equally:
matrix_number += 1
if transform_channels_equally:
matrix_number += 1
return result
class ImageAugmenter(object):
"""Helper class to randomly augment images, usually for neural networks.
Example usage:
img_width = 32 # width of the images
img_height = 32 # height of the images
images = ... # e.g. load via scipy.misc.imload(filename)
# For each image: randomly flip it horizontally (50% chance),
# randomly rotate it between -20 and +20 degrees, randomly translate
# it on the x-axis between -5 and +5 pixel.
ia = ImageAugmenter(img_width, img_height, hlip=True, rotation_deg=20,
translation_x_px=5)
augmented_images = ia.augment_batch(images)
"""
def __init__(self, img_width_px, img_height_px, channel_is_first_axis=False,
hflip=False, vflip=False,
scale_to_percent=1.0, scale_axis_equally=False,
rotation_deg=0, shear_deg=0,
translation_x_px=0, translation_y_px=0,
transform_channels_equally=True, blur_radius=0, noise_variance=0, motion_blur_radius=0, motion_blur_strength=0):
"""
Args:
img_width_px: The intended width of each image in pixels.
img_height_px: The intended height of each image in pixels.
channel_is_first_axis: Whether the channel (e.g. RGB) is the first
axis of each image (True) or the last axis (False).
False matches the scipy and PIL implementation and is the
default. If your images are 2D-grayscale then you can ignore
this setting (as the augmenter will ignore it too).
hflip: Whether to randomly flip images horizontally (on the y-axis).
You may choose either False (no horizontal flipping),
True (flip with probability 0.5) or use a float
value (probability) between 0.0 and 1.0. Default is False.
vflip: Whether to randomly flip images vertically (on the x-axis).
You may choose either False (no vertical flipping),
True (flip with probability 0.5) or use a float
value (probability) between 0.0 and 1.0. Default is False.
scale_to_percent: Up to which percentage the images may be
scaled/zoomed. The negative scaling is automatically derived
from this value. A value of 1.1 allows scaling by any value
between -10% and +10%. You may set min and max values yourself
by using a tuple instead, like (1.1, 1.2) to scale between
+10% and +20%. Default is 1.0 (no scaling).
scale_axis_equally: Whether to always scale both axis (x and y)
in the same way. If set to False, then e.g. the Augmenter
might scale the x-axis by 20% and the y-axis by -5%.
Default is False.
rotation_deg: By how much the image may be rotated around its
center (in degrees). The negative rotation will automatically
be derived from this value. E.g. a value of 20 allows any
rotation between -20 degrees and +20 degrees. You may set min
and max values yourself by using a tuple instead, e.g. (5, 20)
to rotate between +5 und +20 degrees. Default is 0 (no
rotation).
shear_deg: By how much the image may be sheared (in degrees). The
negative value will automatically be derived from this value.
E.g. a value of 20 allows any shear between -20 degrees and
+20 degrees. You may set min and max values yourself by using a
tuple instead, e.g. (5, 20) to shear between +5 und +20
degrees. Default is 0 (no shear).
translation_x_px: By up to how many pixels the image may be
translated (moved) on the x-axis. The negative value will
automatically be derived from this value. E.g. a value of +7
allows any translation between -7 and +7 pixels on the x-axis.
You may set min and max values yourself by using a tuple
instead, e.g. (5, 20) to translate between +5 und +20 pixels.
Default is 0 (no translation on the x-axis).
translation_y_px: See translation_x_px, just for the y-axis.
transform_channels_equally: Whether to apply the exactly same
transformations to each channel of an image (True). Setting
it to False allows different transformations per channel,
e.g. the red-channel might be rotated by +20 degrees, while
the blue channel (of the same image) might be rotated
by -5 degrees. If you don't have any channels (2D grayscale),
you can simply ignore this setting.
Default is True (transform all equally).
"""
self.img_width_px = img_width_px
self.img_height_px = img_height_px
self.channel_is_first_axis = channel_is_first_axis
self.hflip_prob = 0.0
# note: we have to check first for floats, otherwise "hflip == True"
# will evaluate to true if hflip is 1.0. So chosing 1.0 (100%) would
# result in hflip_prob to be set to 0.5 (50%).
if isinstance(hflip, float):
assert hflip >= 0.0 and hflip <= 1.0
self.hflip_prob = hflip
elif hflip == True:
self.hflip_prob = 0.5
elif hflip == False:
self.hflip_prob = 0.0
else:
raise Exception("Unexpected value for parameter 'hflip'.")
self.vflip_prob = 0.0
if isinstance(vflip, float):
assert vflip >= 0.0 and vflip <= 1.0
self.vflip_prob = vflip
elif vflip == True:
self.vflip_prob = 0.5
elif vflip == False:
self.vflip_prob = 0.0
else:
raise Exception("Unexpected value for parameter 'vflip'.")
self.motion_blur_strength = motion_blur_strength
self.motion_blur_radius = motion_blur_radius
self.blur_radius = blur_radius
self.noise_variance = noise_variance
self.scale_to_percent = scale_to_percent
self.scale_axis_equally = scale_axis_equally
self.rotation_deg = rotation_deg
self.shear_deg = shear_deg
self.translation_x_px = translation_x_px
self.translation_y_px = translation_y_px
self.transform_channels_equally = transform_channels_equally
self.cval = 0.0
self.interpolation_order = 1
self.pregenerated_matrices = None
def pregenerate_matrices(self, nb_matrices, seed=None):
"""Pregenerate/cache augmentation matrices.
If matrices are pregenerated, augment_batch() will reuse them on
each call. The augmentations will not always be the same,
as the order of the matrices will be randomized (when
they are applied to the images). The requirement for that is though
that you pregenerate enough of them (e.g. a couple thousand).
Note that generating the augmentation matrices is usually fast
and only starts to make sense if you process millions of small images
or many tens of thousands of big images.
Each call to this method results in pregenerating a new set of matrices,
e.g. to replace a list of matrices that has been used often enough.
Calling this method with nb_matrices set to 0 will remove the
pregenerated matrices and augment_batch() returns to its default
behaviour of generating new matrices on each call.
Args:
nb_matrices: The number of matrices to pregenerate. E.g. a few
thousand. If set to 0, the matrices will be generated again on
each call of augment_batch().
seed: A random seed to use.
"""
assert nb_matrices >= 0
if nb_matrices == 0:
self.pregenerated_matrices = None
else:
matrices = create_aug_matrices(nb_matrices,
self.img_width_px,
self.img_height_px,
scale_to_percent=self.scale_to_percent,
scale_axis_equally=self.scale_axis_equally,
rotation_deg=self.rotation_deg,
shear_deg=self.shear_deg,
translation_x_px=self.translation_x_px,
translation_y_px=self.translation_y_px,
seed=seed)
self.pregenerated_matrices = matrices
def augment_batch(self, images, seed=None):
"""Augments a batch of images.
Applies all settings (rotation, shear, translation, ...) that
have been chosen in the constructor.
Args:
images: Numpy array (dtype: uint8, i.e. values 0-255) with the images.
Expected shape is either (image-index, height, width) for
grayscale images or (image-index, channel, height, width) for
images with channels (e.g. RGB) where the channel has the first
index or (image-index, height, width, channel) for images with
channels, where the channel is the last index.
If your shape is (image-index, channel, width, height) then
you must also set channel_is_first_axis=True in the constructor.
seed: Seed to use for python's and numpy's random functions.
Default is None (dont use a seed).
Returns:
Augmented images as numpy array of dtype float32 (i.e. values
are between 0.0 and 1.0).
"""
shape = images.shape
nb_channels = 0
if len(shape) == 3:
# shape like (image_index, y-axis, x-axis)
assert shape[1] == self.img_height_px
assert shape[2] == self.img_width_px
nb_channels = 1
elif len(shape) == 4:
if not self.channel_is_first_axis:
# shape like (image-index, y-axis, x-axis, channel-index)
assert shape[1] == self.img_height_px
assert shape[2] == self.img_width_px
nb_channels = shape[3]
else:
# shape like (image-index, channel-index, y-axis, x-axis)
assert shape[2] == self.img_height_px
assert shape[3] == self.img_width_px
nb_channels = shape[1]
else:
msg = "Mismatch between images shape %s and " \
"predefined image width/height (%d/%d)."
raise Exception(msg % (str(shape), self.img_width_px, self.img_height_px))
if seed:
random.seed(seed)
np.random.seed(seed)
# --------------------------------
# horizontal and vertical flipping/mirroring
# --------------------------------
# This should be done before applying the affine matrices, as otherwise
# contents of image might already be rotated/translated out of the image.
# It is done with numpy instead of the affine matrices, because
# scikit-image doesn't offer a nice interface to add mirroring/flipping
# to affine transformations. The numpy operations are O(1), so they
# shouldn't have a noticeable effect on runtimes. They also won't suffer
# from interpolation problems.
if self.hflip_prob > 0 or self.vflip_prob > 0:
# TODO this currently ignores the setting in
# transform_channels_equally and will instead always flip all
# channels equally
# if this is simply a view, then the input array gets flipped too
# for some reason
images_flipped = np.copy(images)
#images_flipped = images.view()
if len(shape) == 4 and self.channel_is_first_axis:
# roll channel to the last axis
# swapaxes doesnt work here, because
# (image index, channel, y, x)
# would be turned into
# (image index, x, y, channel)
# and y needs to come before x
images_flipped = np.rollaxis(images_flipped, 1, 4)
y_p = self.hflip_prob
x_p = self.vflip_prob
for i in range(images.shape[0]):
if y_p > 0 and random.random() < y_p:
images_flipped[i] = np.fliplr(images_flipped[i])
if x_p > 0 and random.random() < x_p:
images_flipped[i] = np.flipud(images_flipped[i])
if len(shape) == 4 and self.channel_is_first_axis:
# roll channel back to the second axis (index 1)
images_flipped = np.rollaxis(images_flipped, 3, 1)
images = images_flipped
# --------------------------------
# if no augmentation has been chosen, stop early
# for improved performance (evade applying matrices)
# --------------------------------
if self.pregenerated_matrices is None \
and self.scale_to_percent == 1.0 and self.rotation_deg == 0 \
and self.shear_deg == 0 \
and self.translation_x_px == 0 and self.translation_y_px == 0:
return np.array(images, dtype=np.float32) / 255
# --------------------------------
# generate transformation matrices
# --------------------------------
if self.pregenerated_matrices is not None:
matrices = self.pregenerated_matrices
else:
# estimate the number of matrices required
if self.transform_channels_equally:
nb_matrices = shape[0]
else:
nb_matrices = shape[0] * nb_channels
# generate matrices
matrices = create_aug_matrices(nb_matrices,
self.img_width_px,
self.img_height_px,
scale_to_percent=self.scale_to_percent,
scale_axis_equally=self.scale_axis_equally,
rotation_deg=self.rotation_deg,
shear_deg=self.shear_deg,
translation_x_px=self.translation_x_px,
translation_y_px=self.translation_y_px,
seed=seed)
# --------------------------------
# apply transformation matrices (i.e. augment images)
# --------------------------------
images = apply_aug_matrices(images, matrices,
transform_channels_equally=self.transform_channels_equally,
channel_is_first_axis=self.channel_is_first_axis,
cval=self.cval, interpolation_order=self.interpolation_order,
seed=seed)
# Adding some blur
if self.blur_radius > 0:
for i in range(0, len(images)):
if random.randint(0, 10) > 7:
random_blur_radius = random.uniform(0, self.blur_radius)
images[i] = skimage.filters.gaussian(images[i],sigma=random_blur_radius)
# Adding some noise
if self.noise_variance > 0:
for i in range(0, len(images)):
variance = random.uniform(0, self.noise_variance)
noise_shape = (int(images[i].shape[0] / 3), int(images[i].shape[1] / 3))
noise = np.zeros(noise_shape, dtype=images[i].dtype)
noise = skimage.util.random_noise(noise, mode='gaussian', seed=None, clip=True, var=variance, mean=0)
noise = cv2.resize(noise, (images[i].shape[1], images[i].shape[0]), interpolation=cv2.INTER_CUBIC)
noise = skimage.filters.gaussian(noise,sigma=2)
images[i] -= noise
if self.motion_blur_radius > 0 and self.motion_blur_strength > 0:
for i in range(0, len(images)):
if (np.random.uniform(0, 1) < 0.25): #probability of camerashake
radius = random.randint(3, self.motion_blur_radius)
if radius % 2 == 0:
radius += 1
images[i] = apply_motion_blur(images[i], radius, self.motion_blur_strength)
return images
def plot_image(self, image, nb_repeat=40, show_plot=True):
"""Plot augmented variations of an image.
This method takes an image and plots it by default in 40 differently
augmented versions.
This method is intended to visualize the strength of your chosen
augmentations (so for debugging).
Args:
image: The image to plot.
nb_repeat: How often to plot the image. Each time it is plotted,
the chosen augmentation will be different. (Default: 40).
show_plot: Whether to show the plot. False makes sense if you
don't have a graphical user interface on the machine.
(Default: True)
Returns:
The figure of the plot.
Use figure.savefig() to save the image.
"""
if len(image.shape) == 2:
images = np.resize(image, (nb_repeat, image.shape[0], image.shape[1]))
else:
images = np.resize(image, (nb_repeat, image.shape[0], image.shape[1],
image.shape[2]))
return self.plot_images(images, True, show_plot=show_plot)
def plot_images(self, images, augment, show_plot=True, figure=None):
"""Plot augmented variations of images.
The images will all be shown in the same window.
It is recommended to not plot too many of them (i.e. stay below 100).
This method is intended to visualize the strength of your chosen
augmentations (so for debugging).
Args:
images: A numpy array of images. See augment_batch().
augment: Whether to augment the images (True) or just display
them in the way they are (False).
show_plot: Whether to show the plot. False makes sense if you
don't have a graphical user interface on the machine.
(Default: True)
figure: The figure of the plot in which to draw the images.
Provide the return value of this function (from a prior call)
to draw in the same plot window again. Chosing 'None' will
create a new figure. (Default is None.)
Returns:
The figure of the plot.
Use figure.savefig() to save the image.
"""
import matplotlib.pyplot as plt
import matplotlib.cm as cm
if augment:
images = self.augment_batch(images)
# (Lists of) Grayscale images have the shape (image index, y, x)
# Multi-Channel images therefore must have 4 or more axes here
if len(images.shape) >= 4:
# The color-channel is expected to be the last axis by matplotlib
# therefore exchange the axes, if its the first one here
if self.channel_is_first_axis:
images = np.rollaxis(images, 1, 4)
nb_cols = 10
nb_rows = 1 + int(images.shape[0] / nb_cols)
if figure is not None:
fig = figure
plt.figure(fig.number)
fig.clear()
else:
fig = plt.figure(figsize=(10, 10))
for i, image in enumerate(images):
image = images[i]
plot_number = i + 1
ax = fig.add_subplot(nb_rows, nb_cols, plot_number, xticklabels=[],
yticklabels=[])
ax.set_axis_off()
# "cmap" should restrict the color map to grayscale, but strangely
# also works well with color images
imgplot = plt.imshow(image, cmap=cm.Greys_r, aspect="equal")
# not showing the plot might be useful e.g. on clusters
if show_plot:
plt.show()
return fig
|
Luonic/tf-cnn-lstm-ocr-captcha
|
ImageAugmenter.py
|
Python
|
mit
| 39,211
|
[
"Gaussian"
] |
a7712f6a436f9cdaef387614dbaffaa80ebf697801230f71f8d3df07a2979017
|
from __future__ import annotations
import errno
import os
import matplotlib
from libtbx.phil import parse
import dials.util
from dials.algorithms.refinement.rotation_decomposition import (
solve_r3_rotation_for_angles_given_axes,
)
matplotlib.use("Agg")
import matplotlib.gridspec as gridspec
import matplotlib.pyplot as plt
phil_scope = parse(
"""
output {
directory = .
.type = str
.help = "The directory to store the results"
format = *png pdf
.type = choice
debug = False
.help = "print tables of values that will be plotted"
.type = bool
.expert_level = 1
}
orientation_decomposition
.help = "Options determining how the orientation matrix"
"decomposition is done. The axes about which to decompose"
"the matrix into three rotations are chosen here, as well"
"as whether the rotations are relative to the reference"
"orientation, taken from the static crystal model"
{
e1 = 1. 0. 0.
.type = floats(size = 3)
e2 = 0. 1. 0.
.type = floats(size = 3)
e3 = 0. 0. 1.
.type = floats(size = 3)
relative_to_static_orientation = True
.type = bool
}
"""
)
help_message = """
Generate plots of scan-varying models, including crystal orientation, unit cell
and beam centre, from the input refined.expt
Examples::
dials.plot_scan_varying_model refined.expt
"""
def ensure_directory(path):
"""Make the directory if not already there."""
try:
os.makedirs(path)
except OSError as e:
if e.errno != errno.EEXIST:
raise
class Script:
"""Class to run script."""
def __init__(self):
"""Setup the script."""
from dials.util.options import ArgumentParser
usage = "usage: dials.plot_scan_varying_model [options] refined.expt"
self.parser = ArgumentParser(
usage=usage,
phil=phil_scope,
read_experiments=True,
check_format=False,
epilog=help_message,
)
def run(self, args=None):
"""Run the script."""
from scitbx import matrix
from dials.util.options import flatten_experiments
params, options = self.parser.parse_args(args)
if len(params.input.experiments) == 0:
self.parser.print_help()
return
experiments = flatten_experiments(params.input.experiments)
# Determine output path
self._directory = os.path.join(params.output.directory, "scan-varying_model")
self._directory = os.path.abspath(self._directory)
ensure_directory(self._directory)
self._format = "." + params.output.format
self._debug = params.output.debug
# Decomposition axes
self._e1 = params.orientation_decomposition.e1
self._e2 = params.orientation_decomposition.e2
self._e3 = params.orientation_decomposition.e3
# cell plot
dat = []
for iexp, exp in enumerate(experiments):
crystal = exp.crystal
scan = exp.scan
if crystal.num_scan_points == 0:
print("Ignoring scan-static crystal")
continue
scan_pts = list(range(crystal.num_scan_points))
cells = [crystal.get_unit_cell_at_scan_point(t) for t in scan_pts]
cell_params = [e.parameters() for e in cells]
a, b, c, aa, bb, cc = zip(*cell_params)
start, stop = scan.get_array_range()
phi = [scan.get_angle_from_array_index(t) for t in range(start, stop + 1)]
vol = [e.volume() for e in cells]
cell_dat = {
"phi": phi,
"a": a,
"b": b,
"c": c,
"alpha": aa,
"beta": bb,
"gamma": cc,
"volume": vol,
}
try:
cell_esds = [
crystal.get_cell_parameter_sd_at_scan_point(t) for t in scan_pts
]
sig_a, sig_b, sig_c, sig_aa, sig_bb, sig_cc = zip(*cell_esds)
cell_dat["sig_a"] = sig_a
cell_dat["sig_b"] = sig_b
cell_dat["sig_c"] = sig_c
cell_dat["sig_aa"] = sig_aa
cell_dat["sig_bb"] = sig_bb
cell_dat["sig_cc"] = sig_cc
except RuntimeError:
pass
if self._debug:
print(f"Crystal in Experiment {iexp}")
print("Phi\ta\tb\tc\talpha\tbeta\tgamma\tVolume")
msg = "{0}\t{1}\t{2}\t{3}\t{4}\t{5}\t{6}\t{7}"
line_dat = zip(phi, a, b, c, aa, bb, cc, vol)
for line in line_dat:
print(msg.format(*line))
dat.append(cell_dat)
if dat:
self.plot_cell(dat)
# orientation plot
dat = []
for iexp, exp in enumerate(experiments):
crystal = exp.crystal
scan = exp.scan
if crystal.num_scan_points == 0:
print("Ignoring scan-static crystal")
continue
scan_pts = list(range(crystal.num_scan_points))
start, stop = scan.get_array_range()
phi = [scan.get_angle_from_array_index(t) for t in range(start, stop + 1)]
Umats = [matrix.sqr(crystal.get_U_at_scan_point(t)) for t in scan_pts]
if params.orientation_decomposition.relative_to_static_orientation:
# factor out static U
Uinv = matrix.sqr(crystal.get_U()).inverse()
Umats = [U * Uinv for U in Umats]
# NB e3 and e1 definitions for the crystal are swapped compared
# with those used inside the solve_r3_rotation_for_angles_given_axes
# method
angles = [
solve_r3_rotation_for_angles_given_axes(
U, self._e3, self._e2, self._e1, deg=True
)
for U in Umats
]
phi3, phi2, phi1 = zip(*angles)
angle_dat = {"phi": phi, "phi3": phi3, "phi2": phi2, "phi1": phi1}
if self._debug:
print(f"Crystal in Experiment {iexp}")
print("Image\tphi3\tphi2\tphi1")
msg = "{0}\t{1}\t{2}\t{3}"
line_dat = zip(phi, phi3, phi2, phi1)
for line in line_dat:
print(msg.format(*line))
dat.append(angle_dat)
if dat:
self.plot_orientation(dat)
# beam centre plot
dat = []
for iexp, exp in enumerate(experiments):
beam = exp.beam
detector = exp.detector
scan = exp.scan
if beam.num_scan_points == 0:
print("Ignoring scan-static beam")
continue
scan_pts = range(beam.num_scan_points)
start, stop = scan.get_array_range()
phi = [scan.get_angle_from_array_index(t) for t in range(start, stop + 1)]
p = detector.get_panel_intersection(beam.get_s0())
if p < 0:
print("Beam does not intersect a panel")
continue
panel = detector[p]
s0_scan_points = [
beam.get_s0_at_scan_point(i) for i in range(beam.num_scan_points)
]
bc_scan_points = [panel.get_beam_centre_px(s0) for s0 in s0_scan_points]
bc_x, bc_y = zip(*bc_scan_points)
dat.append({"phi": phi, "beam_centre_x": bc_x, "beam_centre_y": bc_y})
if dat:
self.plot_beam_centre(dat)
def plot_cell(self, dat):
plt.figure(figsize=(13, 10))
gs = gridspec.GridSpec(4, 2, wspace=0.4, hspace=0.6)
ax = plt.subplot(gs[0, 0])
ax.ticklabel_format(useOffset=False)
for cell in dat:
if "sig_a" in cell:
ax.errorbar(
cell["phi"][0::20], cell["a"][0::20], yerr=cell["sig_a"][0::20]
)
plt.plot(cell["phi"], cell["a"])
plt.xlabel(r"rotation angle $\left(^\circ\right)$")
plt.ylabel(r"length $\left(\AA\right)$")
plt.title("a")
ax = plt.subplot(gs[0, 1])
ax.ticklabel_format(useOffset=False)
ymin, ymax = 180.0, 0.0
for cell in dat:
if "sig_aa" in cell:
ax.errorbar(
cell["phi"][0::20], cell["alpha"][0::20], yerr=cell["sig_aa"][0::20]
)
plt.plot(cell["phi"], cell["alpha"])
# choose the widest y range
ymin = min(ymin, min(cell["alpha"]) - 0.1)
ymax = max(ymax, max(cell["alpha"]) + 0.1)
plt.axis(ymin=ymin, ymax=ymax)
plt.xlabel(r"rotation angle $\left(^\circ\right)$")
plt.ylabel(r"angle $\left(^\circ\right)$")
plt.title(r"$\alpha$")
ax = plt.subplot(gs[1, 0])
ax.ticklabel_format(useOffset=False)
for cell in dat:
if "sig_b" in cell:
ax.errorbar(
cell["phi"][0::20], cell["b"][0::20], yerr=cell["sig_b"][0::20]
)
plt.plot(cell["phi"], cell["b"])
plt.xlabel(r"rotation angle $\left(^\circ\right)$")
plt.ylabel(r"length $\left(\AA\right)$")
plt.title("b")
ax = plt.subplot(gs[1, 1])
ax.ticklabel_format(useOffset=False)
ymin, ymax = 180.0, 0.0
for cell in dat:
if "sig_bb" in cell:
ax.errorbar(
cell["phi"][0::20], cell["beta"][0::20], yerr=cell["sig_bb"][0::20]
)
plt.plot(cell["phi"], cell["beta"])
# choose the widest y range
ymin = min(ymin, min(cell["beta"]) - 0.1)
ymax = max(ymax, max(cell["beta"]) + 0.1)
plt.axis(ymin=ymin, ymax=ymax)
plt.xlabel(r"rotation angle $\left(^\circ\right)$")
plt.ylabel(r"angle $\left(^\circ\right)$")
plt.title(r"$\beta$")
ax = plt.subplot(gs[2, 0])
ax.ticklabel_format(useOffset=False)
for cell in dat:
if "sig_c" in cell:
ax.errorbar(
cell["phi"][0::20], cell["c"][0::20], yerr=cell["sig_c"][0::20]
)
plt.plot(cell["phi"], cell["c"])
plt.xlabel(r"rotation angle $\left(^\circ\right)$")
plt.ylabel(r"length $\left(\AA\right)$")
plt.title("c")
ax = plt.subplot(gs[2, 1])
ax.ticklabel_format(useOffset=False)
ymin, ymax = 180.0, 0.0
for cell in dat:
if "sig_cc" in cell:
ax.errorbar(
cell["phi"][0::20], cell["gamma"][0::20], yerr=cell["sig_cc"][0::20]
)
plt.plot(cell["phi"], cell["gamma"])
# choose the widest y range
ymin = min(ymin, min(cell["gamma"]) - 0.1)
ymax = max(ymax, max(cell["gamma"]) + 0.1)
plt.axis(ymin=ymin, ymax=ymax)
plt.xlabel(r"rotation angle $\left(^\circ\right)$")
plt.ylabel(r"angle $\left(^\circ\right)$")
plt.title(r"$\gamma$")
ax = plt.subplot2grid((4, 2), (3, 0), colspan=2)
ax.ticklabel_format(useOffset=False)
for cell in dat:
plt.plot(cell["phi"], cell["volume"])
plt.xlabel(r"rotation angle $\left(^\circ\right)$")
plt.ylabel(r"volume $\left(\AA^3\right)$")
plt.title("Cell volume")
basename = os.path.join(self._directory, "unit_cell")
fullname = basename + self._format
print(f"Saving unit cell plot to {fullname}")
plt.savefig(fullname)
def plot_orientation(self, dat):
plt.figure(figsize=(13, 10))
gs = gridspec.GridSpec(3, 1, wspace=0.4, hspace=0.6)
ax = plt.subplot(gs[0, 0])
ax.ticklabel_format(useOffset=False)
for ori in dat:
plt.plot(ori["phi"], ori["phi1"])
plt.xlabel(r"rotation angle $\left(^\circ\right)$")
plt.ylabel(r"angle $\left(^\circ\right)$")
plt.title(r"$\phi_1$")
ax = plt.subplot(gs[1, 0])
ax.ticklabel_format(useOffset=False)
for ori in dat:
plt.plot(ori["phi"], ori["phi2"])
plt.xlabel(r"rotation angle $\left(^\circ\right)$")
plt.ylabel(r"angle $\left(^\circ\right)$")
plt.title(r"$\phi_2$")
ax = plt.subplot(gs[2, 0])
ax.ticklabel_format(useOffset=False)
for ori in dat:
plt.plot(ori["phi"], ori["phi3"])
plt.xlabel(r"rotation angle $\left(^\circ\right)$")
plt.ylabel(r"angle $\left(^\circ\right)$")
plt.title(r"$\phi_3$")
basename = os.path.join(self._directory, "orientation")
fullname = basename + self._format
print(f"Saving orientation plot to {fullname}")
plt.savefig(fullname)
def plot_beam_centre(self, dat):
plt.figure(figsize=(13, 10))
gs = gridspec.GridSpec(2, 1, wspace=0.4, hspace=0.6)
ax = plt.subplot(gs[0, 0])
ax.ticklabel_format(useOffset=False)
ymin, ymax = 0.0, 0.0
for bc in dat:
plt.plot(bc["phi"], bc["beam_centre_x"])
ymin = max(ymin, min(bc["beam_centre_x"]) - 0.1)
ymax = max(ymax, max(bc["beam_centre_x"]) + 0.1)
plt.axis(ymin=ymin, ymax=ymax)
plt.xlabel(r"rotation angle $\left(^\circ\right)$")
plt.ylabel(r"X (pixels)")
plt.title(r"Beam centre X (pixels)")
ax = plt.subplot(gs[1, 0])
ax.ticklabel_format(useOffset=False)
ymin, ymax = 0.0, 0.0
for bc in dat:
plt.plot(bc["phi"], bc["beam_centre_y"])
ymin = max(ymin, min(bc["beam_centre_y"]) - 0.1)
ymax = max(ymax, max(bc["beam_centre_y"]) + 0.1)
plt.axis(ymin=ymin, ymax=ymax)
plt.xlabel(r"rotation angle $\left(^\circ\right)$")
plt.ylabel(r"Y (pixels)")
plt.title(r"Beam centre Y (pixels)")
basename = os.path.join(self._directory, "beam_centre")
fullname = basename + self._format
print(f"Saving beam centre plot to {fullname}")
plt.savefig(fullname)
@dials.util.show_mail_handle_errors()
def run(args=None):
script = Script()
script.run(args)
if __name__ == "__main__":
run()
|
dials/dials
|
command_line/plot_scan_varying_model.py
|
Python
|
bsd-3-clause
| 14,391
|
[
"CRYSTAL"
] |
9b56423d50693ba8ab2ce88384de766a2ea199f17eb44e06bbd0c4c16f4cefc8
|
__author__ = 'sulantha'
from Utils.PipelineLogger import PipelineLogger
from Utils.DbUtils import DbUtils
import Config.PipelineConfig as pc
from datetime import datetime
import itertools
class DIAN_T1_Helper:
def __init__(self):
self.DBClient = DbUtils()
self.MatchDBClient = DbUtils(database=pc.DIAN_dataMatchDBName)
def getMatchingT1(self, processingItemObj):
modalityID = '{0}{1}{2}{3}{4}{5}{6}'.format(processingItemObj.study, processingItemObj.version,
processingItemObj.subject_rid, processingItemObj.modality,
processingItemObj.scan_date.replace('-', ''),
processingItemObj.s_identifier, processingItemObj.i_identifier)
getFromMatchTableSQL = "SELECT * FROM MatchT1 WHERE MODALITY_ID = '{0}'".format(modalityID)
existingMatchedRec = self.DBClient.executeAllResults(getFromMatchTableSQL)
if len(existingMatchedRec) == 1:
getConvSQL = "SELECT * FROM Conversion WHERE RECORD_ID = '{0}'".format(existingMatchedRec[0][3])
return self.DBClient.executeAllResults(getConvSQL)[0]
else:
if processingItemObj.modality == 'FMRI':
PipelineLogger.log('root', 'error',
'FMRI T1 Matching not implemented. {0} - {1} - {2}'.format(processingItemObj.subject_rid,
processingItemObj.s_identifier.replace(
'S', ''),
processingItemObj.i_identifier.replace(
'I', '')))
return None
else: # By Default, for PET images
date_str = processingItemObj.scan_date.replace('-','')
name_and_Mod = '{0}{1}'.format(processingItemObj.subject_rid, processingItemObj.modality)
visit = processingItemObj.i_identifier.split('x')[0].replace(date_str,'').replace(name_and_Mod, '')
pet_label = '{0}_{1}_{2}'.format(processingItemObj.subject_rid, visit, processingItemObj.modality.lower())
getRecordSQL = "SELECT * FROM PET_MRI_Proc_Match WHERE Label LIKE '{0}'".format(pet_label)
petrecord = self.MatchDBClient.executeAllResults(getRecordSQL)
if not petrecord:
PipelineLogger.log('root', 'error', 'Cannot find PET record : {0} - {1} - {2}'.format(processingItemObj.subject_rid, processingItemObj.s_identifier.replace('S', ''), processingItemObj.i_identifier.replace('I', '')))
return None
mr_name = petrecord[0][5]
if mr_name == '':
### Processed with MR entry not found. Have to switch to date based matching.
PipelineLogger.log('root', 'error',
'Processed with MR entry not found. : {0} - {1} - {2} - Searching based on scan date. +/- 60 days from PET date'.format(
processingItemObj.subject_rid, processingItemObj.modality, visit))
return None
mr_fid = petrecord[0][6]
mr_visit = mr_name.split('_')[1]
matchedT1withScanDescriptions= []
for t1_type in ['MPRAGE', 'IRFSPGR', 'MPR', 'FSPGR']:
mr_DB_iid = '{0}{3}{1}%x{2}'.format(processingItemObj.subject_rid, mr_visit, mr_fid, t1_type)
getScanFromConversionSQL = "SELECT * FROM Conversion WHERE STUDY = '{0}' AND I_IDENTIFIER LIKE '{1}' AND SKIP = 0".format(processingItemObj.study,mr_DB_iid)
t1_conversion = self.DBClient.executeAllResults(getScanFromConversionSQL)
if len(t1_conversion) > 0:
matchedT1withScanDescriptions.append(t1_conversion[0])
if len(matchedT1withScanDescriptions) < 1:
PipelineLogger.log('root', 'error', 'Matched T1s are not in the database. : Subject, visit and FID - {0} {1} {2}'.format(processingItemObj.subject_rid, mr_visit, mr_fid))
return None
else:
if len(matchedT1withScanDescriptions) == 1:
## ONLY ONE MATCHED T1. GOOD> CHECK IF THE T1 is a good scan type and not a bluff !!!
self.addToMatchT1Table(processingItemObj, modalityID, matchedT1withScanDescriptions[0])
return matchedT1withScanDescriptions[0]
else:
#### MORE THAN ONE FOUND. Very weird fro DIAN.
PipelineLogger.log('root', 'error',
'MORE THAN ONE T1 Match FOUND. Very weird fro DIAN. : Subject and visit - {0} {1}'.format(
processingItemObj.subject_rid, mr_visit))
return None
def checkProcessed(self, t1Record):
subject_id = t1Record[2]
version = t1Record[11]
s_id = t1Record[6]
i_id = t1Record[7]
checkProcessedSQL = "SELECT * FROM Processing WHERE RID = '{0}' AND VERSION = '{1}' AND S_IDENTIFIER = '{2}' AND I_IDENTIFIER = '{3}'".format(subject_id, version, s_id, i_id)
result = self.DBClient.executeAllResults(checkProcessedSQL)[0]
if len(result) < 1:
PipelineLogger.log('root', 'error', 'Matched T1 is not added to the processing table. {0} - {1} - {2}'.format(subject_id, s_id, i_id))
return False
else:
if result[12] == 1 and result[13] == 1:
return result[8]
else:
PipelineLogger.log('root', 'error', 'Matched T1 is not process or QC failed. {0} - {1} - {2}'.format(subject_id, s_id, i_id))
self.startProcessOFT1(result)
return False
def addToMatchT1Table(self, processingItemObj, modalityID, t1Record):
pet_date = datetime.strptime(processingItemObj.scan_date, '%Y-%m-%d')
mri_date =datetime.combine(t1Record[4], datetime.min.time())
date_diff = abs(mri_date - pet_date)
t1ID = '{0}{1}{2}_x_{3}_x_{4}{5}{6}'.format(t1Record[1], t1Record[11], t1Record[2], t1Record[3], t1Record[4].strftime('%Y-%m-%d').replace('-', ''), t1Record[6], t1Record[7])
conversionID = t1Record[0]
sql = "INSERT IGNORE INTO MatchT1 VALUES (Null, '{0}', '{1}', '{2}', {3}, Null)".format(modalityID, t1ID, conversionID, date_diff.days)
self.DBClient.executeNoResult(sql)
def startProcessOFT1(self, processTableEntry):
recordId = processTableEntry[0]
study = processTableEntry[1]
sql = "UPDATE {0}_T1_Pipeline SET SKIP = 0 WHERE PROCESSING_TID = {1}".format(study, recordId)
self.DBClient.executeNoResult(sql)
|
sulantha2006/Processing_Pipeline
|
Pipelines/DIAN_T1/DIAN_T1_Helper.py
|
Python
|
apache-2.0
| 6,987
|
[
"VisIt"
] |
88b378be325cd5a219bdab31a7dd9576b889965fe2df191969cf54b364631c55
|
"""
@name: PyHouse_Install/src/Install/hostname.py
@author: D. Brian Kimmel
@contact: D.BrianKimmel@gmail.com
@copyright: (c) 2016-2016 by D. Brian Kimmel
@license: MIT License
@note: Created Jan 22, 2016
@Summary: Set up the computers hostname
Hostname must be set up properly so that X509 certificates will work as they should.
"""
# Import system stuff
import os
class Private(object):
""" This will get information from the file /etc/pyhouse/.private.config
"""
def if_exists(self):
return False
class Hostname(object):
"""
"""
def get_existing(self):
l_host = os.uname()[1]
print(' Hostname: {}'.format(l_host))
if __name__ == "__main__":
print(' Running hostname.py ...')
l_host = Hostname()
l_host.get_existing()
print(' Finished hostname.py\n')
# ## END DBK
|
DBrianKimmel/PyHouse_Install
|
src/Install/hostname.py
|
Python
|
mit
| 864
|
[
"Brian"
] |
5f1f104fd82a46b32dd795cf1b4499e87105a284c0ad33086f800af24c3c6618
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.